code
stringlengths 22
1.05M
| apis
listlengths 1
3.31k
| extract_api
stringlengths 75
3.25M
|
|---|---|---|
import json
from trac.core import *
from trac.web.api import IRequestHandler, ITemplateStreamFilter
from trac.web.chrome import ITemplateProvider, add_script
from trac.ticket.query import Query, QueryModule
from trac.ticket.model import Ticket
from genshi.builder import tag
from genshi.filters import Transformer
from genshi.input import HTML
from genshi.template import MarkupTemplate
from .api import TicketRelationSystem
import pkg_resources
class SelectTicketPlugin(Component):
implements(IRequestHandler, ITemplateProvider, ITemplateStreamFilter)
# IRequestHandler methods
def match_request(self, req):
return req.path_info == '/select_tickets'
def process_request(self, req):
args = req.args
qm = QueryModule(self.env)
template, data, _whatever = qm.process_request(req)
return 'select_tickets.html', data, None
#ITemplateProvider methods
def get_htdocs_dirs(self):
return [('ticketrelation', pkg_resources.resource_filename('ticketrelation', 'htdocs'))]
def get_templates_dirs(self):
return [pkg_resources.resource_filename('ticketrelation', 'templates')]
## ITemplateStreamFilter
def filter_stream(self, req, method, filename, stream, data):
if req.path_info == '/select_tickets':
stream |= Transformer('//div[@id="banner"]').remove()
stream |= Transformer('//div[@id="mainnav"]').remove()
stream |= Transformer('//div[@id="ctxtnav"]').remove()
if (filename == "ticket.html" or filename == 'ticket_preview.html') and 'ticket' in data:
ticket = data['ticket']
trs = TicketRelationSystem(self.env)
data = {}
for relation in trs.build_relations().values():
if relation.ticket_type_a == ticket['type']:
stream = self._generate_html(relation, relation.relation_type_a, 'a', stream, ticket, data)
elif relation.ticket_type_b == ticket['type']:
stream = self._generate_html(relation, relation.relation_type_b, 'b', stream, ticket, data)
add_script(req, 'ticketrelation/js/bundle.js')
stream |= Transformer('//body').append(tag.script("""
(function () {
var data = %s;
var app = new Vue({
el: '#properties',
data: {
relation: data,
}
});
})();
""" % json.dumps(data)))
return stream
def _generate_html(self, relation, relation_type, relation_role, stream, ticket, data):
trs = TicketRelationSystem(self.env)
try:
if relation_type == 'one':
if ticket[relation.name + '_' + relation_role] is not None:
stream |= Transformer(
'//input[@id="field-%s_%s"]' % (relation.name, relation_role)) \
.replace(HTML("""
<relation-single id="field-%s_%s" :relation="relation['%s_%s']" />
""" % (relation.name, relation_role, relation.name, relation_role)))
else:
if ticket[relation.name + '_' + relation_role] is not None:
stream |= Transformer(
'//textarea[@id="field-%s_%s"]' % (relation.name, relation_role)) \
.replace(HTML("""
<relation-multi id="field-%s_%s" :relation="relation['%s_%s']" />
""" % (relation.name, relation_role, relation.name, relation_role)))
data[relation.name + '_' + relation_role] = {
'name': relation.name,
'role': relation_role,
'targetType': relation.ticket_type_a if relation_role == 'b' else relation.ticket_type_b,
'value': ticket[relation.name + '_' + relation_role]
}
except Exception as e:
self.log.error(e)
return stream
return stream
|
[
"trac.ticket.query.QueryModule",
"genshi.filters.Transformer",
"trac.web.chrome.add_script",
"json.dumps",
"pkg_resources.resource_filename",
"genshi.input.HTML"
] |
[((751, 772), 'trac.ticket.query.QueryModule', 'QueryModule', (['self.env'], {}), '(self.env)\n', (762, 772), False, 'from trac.ticket.query import Query, QueryModule\n'), ((1093, 1155), 'pkg_resources.resource_filename', 'pkg_resources.resource_filename', (['"""ticketrelation"""', '"""templates"""'], {}), "('ticketrelation', 'templates')\n", (1124, 1155), False, 'import pkg_resources\n'), ((2129, 2175), 'trac.web.chrome.add_script', 'add_script', (['req', '"""ticketrelation/js/bundle.js"""'], {}), "(req, 'ticketrelation/js/bundle.js')\n", (2139, 2175), False, 'from trac.web.chrome import ITemplateProvider, add_script\n'), ((980, 1039), 'pkg_resources.resource_filename', 'pkg_resources.resource_filename', (['"""ticketrelation"""', '"""htdocs"""'], {}), "('ticketrelation', 'htdocs')\n", (1011, 1039), False, 'import pkg_resources\n'), ((1324, 1358), 'genshi.filters.Transformer', 'Transformer', (['"""//div[@id="banner"]"""'], {}), '(\'//div[@id="banner"]\')\n', (1335, 1358), False, 'from genshi.filters import Transformer\n'), ((1390, 1425), 'genshi.filters.Transformer', 'Transformer', (['"""//div[@id="mainnav"]"""'], {}), '(\'//div[@id="mainnav"]\')\n', (1401, 1425), False, 'from genshi.filters import Transformer\n'), ((1457, 1492), 'genshi.filters.Transformer', 'Transformer', (['"""//div[@id="ctxtnav"]"""'], {}), '(\'//div[@id="ctxtnav"]\')\n', (1468, 1492), False, 'from genshi.filters import Transformer\n'), ((2199, 2220), 'genshi.filters.Transformer', 'Transformer', (['"""//body"""'], {}), "('//body')\n", (2210, 2220), False, 'from genshi.filters import Transformer\n'), ((2530, 2546), 'json.dumps', 'json.dumps', (['data'], {}), '(data)\n', (2540, 2546), False, 'import json\n'), ((3004, 3209), 'genshi.input.HTML', 'HTML', (['("""\n <relation-single id="field-%s_%s" :relation="relation[\'%s_%s\']" />\n """\n % (relation.name, relation_role, relation.name, relation_role))'], {}), '(\n """\n <relation-single id="field-%s_%s" :relation="relation[\'%s_%s\']" />\n """\n % (relation.name, relation_role, relation.name, relation_role))\n', (3008, 3209), False, 'from genshi.input import HTML\n'), ((3463, 3667), 'genshi.input.HTML', 'HTML', (['("""\n <relation-multi id="field-%s_%s" :relation="relation[\'%s_%s\']" />\n """\n % (relation.name, relation_role, relation.name, relation_role))'], {}), '(\n """\n <relation-multi id="field-%s_%s" :relation="relation[\'%s_%s\']" />\n """\n % (relation.name, relation_role, relation.name, relation_role))\n', (3467, 3667), False, 'from genshi.input import HTML\n'), ((2869, 2943), 'genshi.filters.Transformer', 'Transformer', (['(\'//input[@id="field-%s_%s"]\' % (relation.name, relation_role))'], {}), '(\'//input[@id="field-%s_%s"]\' % (relation.name, relation_role))\n', (2880, 2943), False, 'from genshi.filters import Transformer\n'), ((3325, 3402), 'genshi.filters.Transformer', 'Transformer', (['(\'//textarea[@id="field-%s_%s"]\' % (relation.name, relation_role))'], {}), '(\'//textarea[@id="field-%s_%s"]\' % (relation.name, relation_role))\n', (3336, 3402), False, 'from genshi.filters import Transformer\n')]
|
from django.conf.urls import patterns, url
urlpatterns = patterns('news.views',
url(r'^$', 'news_list', name='list'),
url(r'^feed/(?P<slug>.*)/$',
'news_list', name='feed'),
url(r'^(?P<slug>.*)/$', 'news_item', name='item'),
)
|
[
"django.conf.urls.url"
] |
[((105, 140), 'django.conf.urls.url', 'url', (['"""^$"""', '"""news_list"""'], {'name': '"""list"""'}), "('^$', 'news_list', name='list')\n", (108, 140), False, 'from django.conf.urls import patterns, url\n'), ((166, 219), 'django.conf.urls.url', 'url', (['"""^feed/(?P<slug>.*)/$"""', '"""news_list"""'], {'name': '"""feed"""'}), "('^feed/(?P<slug>.*)/$', 'news_list', name='feed')\n", (169, 219), False, 'from django.conf.urls import patterns, url\n'), ((272, 320), 'django.conf.urls.url', 'url', (['"""^(?P<slug>.*)/$"""', '"""news_item"""'], {'name': '"""item"""'}), "('^(?P<slug>.*)/$', 'news_item', name='item')\n", (275, 320), False, 'from django.conf.urls import patterns, url\n')]
|
import numpy as np
import matplotlib.pyplot as plt
from lib5c.util.plotting import plotter
@plotter
def plot_pvalue_histogram(data, xlabel='pvalue', **kwargs):
"""
Plots a p-value or q-value distribution.
Parameters
----------
data : np.ndarray
The p-values or q-values to plot.
kwargs : kwargs
Typical plotter kwargs.
Returns
-------
pyplot axis
The axis plotted on.
"""
plt.hist(data, bins=np.linspace(0, 1, 21))
plt.ylabel('number of pixels')
|
[
"matplotlib.pyplot.ylabel",
"numpy.linspace"
] |
[((492, 522), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""number of pixels"""'], {}), "('number of pixels')\n", (502, 522), True, 'import matplotlib.pyplot as plt\n'), ((465, 486), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(21)'], {}), '(0, 1, 21)\n', (476, 486), True, 'import numpy as np\n')]
|
from flask_restplus import Namespace
from ..search.resources.search import SearchResource, SearchOptionsResource
from app.api.search.search.resources.simple_search import SimpleSearchResource
api = Namespace('search', description='Search related operations')
api.add_resource(SearchResource, '')
api.add_resource(SearchOptionsResource, '/options')
api.add_resource(SimpleSearchResource, '/simple')
|
[
"flask_restplus.Namespace"
] |
[((200, 260), 'flask_restplus.Namespace', 'Namespace', (['"""search"""'], {'description': '"""Search related operations"""'}), "('search', description='Search related operations')\n", (209, 260), False, 'from flask_restplus import Namespace\n')]
|
from _util import *
###########################################################################################################################################################################################################################################################################################################################
class recorder():
def __init__(self):
# length + coverage frequency
self.IS = { "error" : []
, "stds" : []
, "freq" : {"5" : [], "10" : []}
}
self.DR = { "error" : []
, "stds" : []
, "freq" : {"5" : [], "10" : []}
}
self.TR = {"error" : []
, "stds" : []
, "freq" : {"5" : [], "10" : []}}
self.QR = {"error" : []
, "stds" : []
, "freq" : {"5" : [], "10" : []}}
self.raw_Q = []
self.V_true = []
self.seed = 0
self.instances = []
self.names = ["IS", "DR", "TR", "QR"]
def add_env(self, fqi, fqe):
self.fqi_para = fqi
self.fqe_para = fqe
def update(self, V_true, are = None, are_details = None, dis = False, prec = 2):
if are_details is not None:
raw_Qs, IS_V, DR_V, TR_V, QR_V = are_details
else:
raw_Qs, IS_V, DR_V, TR_V, QR_V = are.raw_Qs, are.IS_V, are.DR_V, are.TR_V, are.QR_V
are.large = []
self.seed += 1
############################################################################################################################################
if dis:
printR("true value: {:.2f} ".format(V_true))
printR("raw Q-value: {:.2f}".format(np.mean(raw_Qs)))
pd.set_option('precision', prec)
printR("IS: est = {:.2f}, sigma = {:.2f}".format(IS_V["V"], IS_V["sigma"]))
display(DF(IS_V["CIs"], index = ["0.05", "0.1"]))
printR("DR: est = {:.2f}, sigma = {:.2f}".format(DR_V["V"], DR_V["sigma"]))
display(DF(DR_V["CIs"], index = ["0.05", "0.1"]))
printR("TR: est = {:.2f}, sigma = {:.2f}".format(TR_V["V"], TR_V["sigma"]))
display(DF(TR_V["CIs"], index = ["0.05", "0.1"]))
printR("QR: est = {:.2f}, sigma = {:.2f}".format(QR_V["V"], QR_V["sigma"]))
display(DF(QR_V["CIs"], index = ["0.05", "0.1"]))
############################ Record results ############################
self.raw_Q.append(np.mean(raw_Qs))
self.V_true.append(V_true)
self.IS["error"].append(IS_V["V"] - V_true)
self.IS["stds"].append(IS_V["sigma"])
self.DR["error"].append(DR_V["V"] - V_true)
self.DR["stds"].append(DR_V["sigma"])
self.TR["error"].append(TR_V["V"] - V_true)
self.TR["stds"].append(TR_V["sigma"])
self.QR["error"].append(QR_V["V"] - V_true)
self.QR["stds"].append(QR_V["sigma"])
for i, alpha in enumerate(["5", "10"]):
self.IS["freq"][alpha].append(IS_V["CIs"][i][0] <= V_true and IS_V["CIs"][i][1] >= V_true)
self.DR["freq"][alpha].append(DR_V["CIs"][i][0] <= V_true and DR_V["CIs"][i][1] >= V_true)
self.TR["freq"][alpha].append(TR_V["CIs"][i][0] <= V_true and TR_V["CIs"][i][1] >= V_true)
self.QR["freq"][alpha].append(QR_V["CIs"][i][0] <= V_true and QR_V["CIs"][i][1] >= V_true)
self.instances.append(are)
if dis:
printG("<<================ Iteration {} DONE ! ================>>".format(self.seed))
self.analyze()
def analyze(self, prec = 3, echo = True):
pd.set_option('precision', prec)
mat = [[ np.sqrt(np.mean(arr(estimator["error"]) ** 2))
, np.mean(np.abs(estimator["error"]))
, np.mean(estimator["error"])
, np.mean(estimator["stds"])
#, np.mean(estimator['freq']['1'])
, np.mean(estimator['freq']['5'])
, np.mean(estimator['freq']['10'])] for estimator in [self.IS, self.DR, self.TR, self.QR]]
df = DF(mat
, columns = ["RMSE", "MAE", "bias", "ave_std", "freq: 0.95", "freq: 0.9"] # "freq: 0.99",
, index = self.names)
error_Q = (arr(self.raw_Q) - arr(self.V_true))
RMSE_Q = np.sqrt(np.mean(error_Q ** 2))
MAE_Q = np.mean(np.abs(error_Q))
bias_Q = np.mean(error_Q)
if echo:
display(df)
print("Q: RMSE = {:.2f}, bias = {:.2f}".format(RMSE_Q, bias_Q))
printR("rep = {}".format(self.seed))
return mat
def save(self, path):
freq = arr([[np.mean(estimator['freq'][alpha])
for estimator in [self.IS, self.DR, self.TR, self.QR]
]
for alpha in ["5", "10"]]) # "1",
res = {"DR" : self.DR, "TR" : self.TR, "QR" : self.QR, "IS" : self.IS
, "raw_Q" : self.raw_Q
, "V_true" : self.V_true
, "RMSE" : arr([np.sqrt(np.mean(arr(estimator["error"]) ** 2))
for estimator in [self.IS, self.DR, self.TR, self.QR]])
, "MAE" : arr([
np.mean(np.abs(estimator["error"]))
for estimator in [self.IS, self.DR, self.TR, self.QR]
])
, "std" : arr([np.mean(estimator["stds"])
for estimator in [self.IS, self.DR, self.TR, self.QR]])
, "freq" : freq
, "hyper": self.hyper}
dump(res, path)
def aggregate(self, results, prec = 3):
n_reps = [len(res["DR"]["error"]) for res in results]
total_rep = sum(n_reps)
# n_reps = arr(n_reps)
# n_weight = n_reps / np.sum(n_reps)
pd.set_option('precision', prec)
RMSE = np.sqrt(np.sum([res["RMSE"] ** 2 * n for n, res in zip(n_reps, results)], 0) / total_rep)
bias = arr([np.sum([np.mean(res[est]["error"]) * n for n, res in zip(n_reps, results)], 0) / total_rep for est in self.names
])
# should deal with this line
est_std = arr([np.sum([np.std(res[est]["error"]) * n for n, res in zip(n_reps, results)], 0) / total_rep for est in self.names
])
MAE = np.sum([res["MAE"] * n for n, res in zip(n_reps, results)], 0) / total_rep
std = np.sum([res["std"] * n for n, res in zip(n_reps, results)], 0) / total_rep
freq = np.stack([res["freq"].T * n for n, res in zip(n_reps, results)], axis = 0)
freq = np.sum(freq, axis = 0) / np.sum(n_reps)
res_array = np.hstack([RMSE[:, np.newaxis]
, MAE[:, np.newaxis]
, bias[:, np.newaxis]
, est_std[:, np.newaxis]
, std[:, np.newaxis] # width
, freq])
res = DF(res_array
, columns = ["RMSE", "MAE", "bias", "std", "ave_std", "freq: 0.95", "freq: 0.9"] # "freq: 0.99",
, index = self.names)
display(res)
RMSE_Q = np.sqrt(np.sum([np.mean((arr(res["raw_Q"]) - arr(res["V_true"])) ** 2) * n for n, res in zip(n_reps, results)]) / total_rep)
MAE_Q = np.sum([np.sum(np.abs(arr(res["raw_Q"]) - arr(res["V_true"]))) for n, res in zip(n_reps, results)]) / total_rep
#np.mean(np.abs(arr(self.raw_Q) - arr(self.V_true)))
print("Q: RMSE = {:.2f}, MAE = {:.2f}".format(RMSE_Q, MAE_Q))
printR("rep = {}".format(total_rep))
return res_array
def print_one_seed(self, V_true, are = None, prec = 3):
from IPython.display import display
raw_Qs, DR_V, TR_V, QR_V = are.raw_Qs, are.DR_V, are.TR_V, are.QR_V
printR("true value: {:.2f} ".format(V_true))
printR("raw Q-value: {:.2f}".format(np.mean(raw_Qs)))
printR("raw IS: {:.2f} with std = {:.2f} ".format(are.IS_V["V"], are.IS_V["sigma"]))
pd.set_option('precision', prec)
printR("DR: est = {:.2f}, sigma = {:.2f}".format(DR_V["V"], DR_V["sigma"]))
display(DF(DR_V["CIs"], index = ["0.05", "0.1"]))
printR("TR: est = {:.2f}, sigma = {:.2f}".format(TR_V["V"], TR_V["sigma"]))
display(DF(TR_V["CIs"], index = ["0.05", "0.1"]))
printR("QR: est = {:.2f}, sigma = {:.2f}".format(QR_V["V"], QR_V["sigma"]))
display(DF(QR_V["CIs"], index = ["0.05", "0.1"]))
|
[
"IPython.display.display"
] |
[((7053, 7065), 'IPython.display.display', 'display', (['res'], {}), '(res)\n', (7060, 7065), False, 'from IPython.display import display\n'), ((4440, 4451), 'IPython.display.display', 'display', (['df'], {}), '(df)\n', (4447, 4451), False, 'from IPython.display import display\n')]
|
#PHDF_PATH = '/home/brryan/rpm/phoebus/external/parthenon/scripts/python/'
#PHDF_PATH = '/home/brryan/github/phoebus/external/parthenon/scripts/python/'
#DUMP_NAMES = '/home/brryan/builds/phoebus/torus.out1.*.phdf'
DUMP_NAMES = 'torus.out1.*.phdf'
import argparse
import numpy as np
import sys
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import shutil
import os
from subprocess import call, DEVNULL
import glob
#sys.path.append(PHDF_PATH)
#import phdf
from parthenon_tools import phdf
import time
from enum import Enum
#plot = "mks"
plot = "cartesian"
# Outer radius to plot or None
rmax = 40
#rmax = None
parser = argparse.ArgumentParser(description='Plot torus')
parser.add_argument('--nfinal', type=int, default=-1, help='dump to plot')
parser.add_argument('--savefig', type=bool, default=False, help='Whether to save figure')
args = parser.parse_args()
# Whether to plot meshblock boundaries
plot_meshblocks = True
h_ = 0.3
a = 0.9375
rh = 1. + np.sqrt(1. - a*a)
nfinal = args.nfinal
dfnams = np.sort(glob.glob(DUMP_NAMES))
#dfnam = dfnams[nfinal]
dfile = phdf.phdf(dfnams[0])
dfile1 = phdf.phdf(dfnams[nfinal])
nblocks = dfile.NumBlocks
meshblocksize = dfile.MeshBlockSize
nb = nblocks
nx = meshblocksize[0]
ny = meshblocksize[1]
nz = meshblocksize[2]
print("File: ", dfnams[nfinal], end="\n\n")
time = dfile1.Time
print("Time: ", time, end="\n\n")
print("Nblocks: ", nblocks)
print(" nx: %i" % nx + " ny: %i" % ny)
print("")
blockbounds = dfile.BlockBounds
dx = (blockbounds[0][1] - blockbounds[0][0])/nx
dy = (blockbounds[0][3] - blockbounds[0][2])/ny
# Get pcolormesh grid for each block
xblock = np.zeros([nblocks, nx+1, ny+1])
yblock = np.zeros([nblocks, nx+1, ny+1])
for n in range(nblocks):
for i in range(nx+1):
for j in range(ny+1):
dx = (blockbounds[n][1] - blockbounds[n][0])/nx
dy = (blockbounds[n][3] - blockbounds[n][2])/ny
xblock[n,i,j] = blockbounds[n][0] + i*dx
yblock[n,i,j] = blockbounds[n][2] + j*dy
# Convert from FMKS to xy
r = np.exp(xblock)
th = np.pi*yblock + ((1. - h_)/2.)*np.sin(2.*np.pi*yblock)
x = r*np.sin(th)
y = r*np.cos(th)
print("Variables:")
for var in dfile.Variables:
print(" " + var)
print("")
# Numblocks, nz, ny, nx
Pg = dfile1.Get("pressure", flatten=False)
#bfield = dfile.Get("p.bfield", flatten=False)
vcon = dfile.Get("p.velocity", flatten=False)
density = dfile1.Get("p.density", flatten=False)
crho = dfile1.Get("c.density", flatten=False)
ug = dfile1.Get("p.energy", flatten=False)
fd = dfile1.Get("flux_divergence", flatten=False)
st = dfile1.Get("src_terms", flatten=False)
v1 = vcon[:,:,:,:,0]
v2 = vcon[:,:,:,:,1]
v3 = vcon[:,:,:,:,2]
Bcon = dfile1.Get("p.bfield", flatten=False)
flatgcov = dfile1.Get("g.c.gcov", flatten=False)
alpha = dfile1.Get("g.c.alpha", flatten=False)
gcov = np.zeros([nb,nz,ny,nx,4,4])
def flatten(m,n):
ind = [[0,1,3,6],[1,2,4,7],[3,4,5,8],[6,7,8,9]]
return ind[m][n]
for mu in range(4):
gcov[:,:,:,:,mu,0] = flatgcov[:,:,:,:,flatten(mu,0)]
gcov[:,:,:,:,0,mu] = flatgcov[:,:,:,:,flatten(0,mu)]
for mu in range(1,4):
gcov[:,:,:,:,mu,1] = flatgcov[:,:,:,:,flatten(mu,1)]
gcov[:,:,:,:,1,mu] = flatgcov[:,:,:,:,flatten(1,mu)]
for mu in range(2,4):
gcov[:,:,:,:,mu,2] = flatgcov[:,:,:,:,flatten(mu,2)]
gcov[:,:,:,:,2,mu] = flatgcov[:,:,:,:,flatten(2,mu)]
gcov[:,:,:,:,3,3] = flatgcov[:,:,:,:,flatten(3,3)]
Bcov = np.zeros([nb,nz,ny,nx,3])
vcov = np.zeros([nb,nz,ny,nx,3])
for ii in range(3):
for jj in range(3):
Bcov[:,:,:,:,ii] += gcov[:,:,:,:,ii+1,jj+1]*Bcon[:,:,:,:,jj]
vcov[:,:,:,:,ii] += gcov[:,:,:,:,ii+1,jj+1]*vcon[:,:,:,:,jj]
Bsq = np.zeros([nb,nz,ny,nx])
Bdv = np.zeros([nb,nz,ny,nx])
vsq = np.zeros([nb,nz,ny,nx])
Gamma = np.zeros([nb,nz,ny,nx])
for ii in range(3):
Bsq[:,:,:,:] += Bcon[:,:,:,:,ii]*Bcov[:,:,:,:,ii]
Bdv[:,:,:,:] += Bcon[:,:,:,:,ii]*vcov[:,:,:,:,ii]
vsq[:,:,:,:] += vcon[:,:,:,:,ii]*vcov[:,:,:,:,ii]
Gamma[:,:,:,:] = 1./np.sqrt(1 - vsq[:,:,:,:])
b0 = Gamma*Bdv/alpha
bsq = (Bsq + alpha**2*b0**2)/Gamma**2
beta = 2.*Pg/(bsq + 1.e-20)
#b1 = bfield[:,:,:,:,0]
#b2 = bfield[:,:,:,:,1]
#b3 = bfield[:,:,:,:,2]
#b2 = b1*b1 + b2*b2 + b3*b3
#beta = 2*Pg/(b2 + 1.e-100)
#fig = plt.figure()
#ax = plt.gca()
#ax.plot(density[3,0,:,64])
#print(density[3,:,:,64])
#plt.show()
#sys.exit()
var = density
#var = ug
vmin = -5
vmax = 0
#var1 = dfile1.Get("p.density", flatten=False)
#var1 = dfile1.Get("p.energy", flatten=False)
var1 = density
#var = np.fabs(v1)
#vmin=-4
#vmax=0
#var = beta
#vmin = -2
#vmax = 2
mblw = 0.5
def myplot(myvar, n, vmin=vmin, vmax=vmax, uselog=True, cmap='jet',label=None):
from mpl_toolkits.axes_grid1 import make_axes_locatable
ax = axes[n]
#ax = axes
for nb in range(nblocks):
if plot == "mks":
im = ax.pcolormesh(xblock[nb,:,:], yblock[nb,:,:], np.log10(myvar[nb,0].transpose()),
vmin=vmin, vmax=vmax, cmap=cmap)
elif plot == "cartesian":
if uselog:
im = ax.pcolormesh(x[nb,:,:], y[nb,:,:], np.log10(myvar[nb,0].transpose()),
vmin=vmin, vmax=vmax, cmap=cmap)
else:
im = ax.pcolormesh(x[nb,:,:], y[nb,:,:], myvar[nb,0].transpose(),
vmin=vmin, vmax=vmax, cmap=cmap)
if plot_meshblocks:
ax.plot(x[nb,0,:], y[nb,0,:], color='k', linewidth=mblw, linestyle='--')
ax.plot(x[nb,-1,:], y[nb,-1,:], color='k', linewidth=mblw, linestyle='--')
ax.plot(x[nb,:,0], y[nb,:,0], color='k', linewidth=mblw, linestyle='--')
ax.plot(x[nb,:,-1], y[nb,:,-1], color='k', linewidth=mblw, linestyle='--')
if rmax is not None:
ax.set_xlim([0,rmax])
ax.set_ylim([-rmax,rmax])
else:
print("Plotting coordinates \"" + plot + "\" unknown")
sys.exit()
if plot == "cartesian":
ax.set_aspect('equal')
ax.set_xlabel('x')
ax.set_ylabel('y')
# Draw black hole
bh = plt.Circle((0, 0), rh, color='k')
ax.add_patch(bh)
if label is not None:
ax.set_title(label)
if n > 0:
ax.set_yticklabels([])
divider = make_axes_locatable(ax)
cax = divider.append_axes('right', size='5%', pad=0.05)
fig.colorbar(im, cax=cax, orientation='vertical')
fig, axes = plt.subplots(1, 2, figsize=(8,8))
myplot(var1,0,label='density')
myplot(beta,1,vmin=-3,vmax=3,uselog=True,cmap='RdBu',label='plasma beta')
if args.savefig:
plt.savefig('frame_%08d.png' % args.nfinal, bbox_inches='tight')
else:
plt.show()
|
[
"mpl_toolkits.axes_grid1.make_axes_locatable",
"matplotlib.pyplot.show",
"argparse.ArgumentParser",
"numpy.zeros",
"numpy.sin",
"numpy.exp",
"numpy.cos",
"glob.glob",
"matplotlib.pyplot.Circle",
"sys.exit",
"parthenon_tools.phdf.phdf",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.savefig",
"numpy.sqrt"
] |
[((644, 693), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Plot torus"""'}), "(description='Plot torus')\n", (667, 693), False, 'import argparse\n'), ((1092, 1112), 'parthenon_tools.phdf.phdf', 'phdf.phdf', (['dfnams[0]'], {}), '(dfnams[0])\n', (1101, 1112), False, 'from parthenon_tools import phdf\n'), ((1122, 1147), 'parthenon_tools.phdf.phdf', 'phdf.phdf', (['dfnams[nfinal]'], {}), '(dfnams[nfinal])\n', (1131, 1147), False, 'from parthenon_tools import phdf\n'), ((1645, 1680), 'numpy.zeros', 'np.zeros', (['[nblocks, nx + 1, ny + 1]'], {}), '([nblocks, nx + 1, ny + 1])\n', (1653, 1680), True, 'import numpy as np\n'), ((1686, 1721), 'numpy.zeros', 'np.zeros', (['[nblocks, nx + 1, ny + 1]'], {}), '([nblocks, nx + 1, ny + 1])\n', (1694, 1721), True, 'import numpy as np\n'), ((2026, 2040), 'numpy.exp', 'np.exp', (['xblock'], {}), '(xblock)\n', (2032, 2040), True, 'import numpy as np\n'), ((2817, 2849), 'numpy.zeros', 'np.zeros', (['[nb, nz, ny, nx, 4, 4]'], {}), '([nb, nz, ny, nx, 4, 4])\n', (2825, 2849), True, 'import numpy as np\n'), ((3391, 3420), 'numpy.zeros', 'np.zeros', (['[nb, nz, ny, nx, 3]'], {}), '([nb, nz, ny, nx, 3])\n', (3399, 3420), True, 'import numpy as np\n'), ((3424, 3453), 'numpy.zeros', 'np.zeros', (['[nb, nz, ny, nx, 3]'], {}), '([nb, nz, ny, nx, 3])\n', (3432, 3453), True, 'import numpy as np\n'), ((3629, 3655), 'numpy.zeros', 'np.zeros', (['[nb, nz, ny, nx]'], {}), '([nb, nz, ny, nx])\n', (3637, 3655), True, 'import numpy as np\n'), ((3659, 3685), 'numpy.zeros', 'np.zeros', (['[nb, nz, ny, nx]'], {}), '([nb, nz, ny, nx])\n', (3667, 3685), True, 'import numpy as np\n'), ((3689, 3715), 'numpy.zeros', 'np.zeros', (['[nb, nz, ny, nx]'], {}), '([nb, nz, ny, nx])\n', (3697, 3715), True, 'import numpy as np\n'), ((3721, 3747), 'numpy.zeros', 'np.zeros', (['[nb, nz, ny, nx]'], {}), '([nb, nz, ny, nx])\n', (3729, 3747), True, 'import numpy as np\n'), ((6146, 6180), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {'figsize': '(8, 8)'}), '(1, 2, figsize=(8, 8))\n', (6158, 6180), True, 'import matplotlib.pyplot as plt\n'), ((979, 999), 'numpy.sqrt', 'np.sqrt', (['(1.0 - a * a)'], {}), '(1.0 - a * a)\n', (986, 999), True, 'import numpy as np\n'), ((1037, 1058), 'glob.glob', 'glob.glob', (['DUMP_NAMES'], {}), '(DUMP_NAMES)\n', (1046, 1058), False, 'import glob\n'), ((2106, 2116), 'numpy.sin', 'np.sin', (['th'], {}), '(th)\n', (2112, 2116), True, 'import numpy as np\n'), ((2123, 2133), 'numpy.cos', 'np.cos', (['th'], {}), '(th)\n', (2129, 2133), True, 'import numpy as np\n'), ((3941, 3969), 'numpy.sqrt', 'np.sqrt', (['(1 - vsq[:, :, :, :])'], {}), '(1 - vsq[:, :, :, :])\n', (3948, 3969), True, 'import numpy as np\n'), ((5844, 5877), 'matplotlib.pyplot.Circle', 'plt.Circle', (['(0, 0)', 'rh'], {'color': '"""k"""'}), "((0, 0), rh, color='k')\n", (5854, 5877), True, 'import matplotlib.pyplot as plt\n'), ((5999, 6022), 'mpl_toolkits.axes_grid1.make_axes_locatable', 'make_axes_locatable', (['ax'], {}), '(ax)\n', (6018, 6022), False, 'from mpl_toolkits.axes_grid1 import make_axes_locatable\n'), ((6305, 6369), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('frame_%08d.png' % args.nfinal)"], {'bbox_inches': '"""tight"""'}), "('frame_%08d.png' % args.nfinal, bbox_inches='tight')\n", (6316, 6369), True, 'import matplotlib.pyplot as plt\n'), ((6378, 6388), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6386, 6388), True, 'import matplotlib.pyplot as plt\n'), ((2076, 2104), 'numpy.sin', 'np.sin', (['(2.0 * np.pi * yblock)'], {}), '(2.0 * np.pi * yblock)\n', (2082, 2104), True, 'import numpy as np\n'), ((5710, 5720), 'sys.exit', 'sys.exit', ([], {}), '()\n', (5718, 5720), False, 'import sys\n')]
|
# Generated by Django 3.2.9 on 2021-11-18 11:12
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("podcasts", "0097_rename_schedule_modifier_podcast_frequency_modifier"),
]
operations = [
migrations.RemoveField(
model_name="podcast",
name="podcastindex",
),
]
|
[
"django.db.migrations.RemoveField"
] |
[((261, 326), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""podcast"""', 'name': '"""podcastindex"""'}), "(model_name='podcast', name='podcastindex')\n", (283, 326), False, 'from django.db import migrations\n')]
|
import json
import hmac
import hashlib
import requests
import datetime
import uuid
from requests.auth import AuthBase
class NiceHashAuth(AuthBase):
def __init__(self, fname = None, api_secret = None, api_key = None, org_id = None):
if fname is not None:
with open(fname) as f:
keys = json.load(f)
self.api_secret = keys['api_secret']
self.api_key = keys['api_key']
self.org_id = keys['org_id']
if api_secret is not None:
self.api_secret = api_secret
if api_key is not None:
self.api_key = api_key
if org_id is not None:
self.org_id = org_id
assert self.api_secret is not None
assert self.api_key is not None
assert self.org_id is not None
def make_timestamp(self):
# current UTC time in ms as a integer expressed as a string
return str(round(datetime.datetime.now(tz=datetime.timezone.utc).timestamp() * 1000.0))
def make_nonce(self):
# random long string
return str(uuid.uuid4())
def __call__(self, request):
timestamp = self.make_timestamp()
nonce = self.make_nonce()
empty = bytearray('\x00', 'utf-8')
comps = request.path_url.split('?', 1)
url = comps[0]
query = '' if len(comps) == 1 else comps[1]
body = bytearray(self.api_key, 'utf-8') + empty
body += bytearray(timestamp, 'utf-8') + empty
body += bytearray(nonce, 'utf-8') + empty + empty
body += bytearray(self.org_id, 'utf-8') + empty + empty
body += bytearray(request.method, 'utf-8') + empty
body += bytearray(url, 'utf-8') + empty + bytearray(query, 'utf-8')
if request.body:
body += empty + request.body
digest = hmac.new(bytearray(self.api_secret, 'utf-8'), body, hashlib.sha256).hexdigest()
request.headers.update({
'X-Time': timestamp,
'X-Nonce': nonce,
'X-Organization-Id': self.org_id,
'X-Auth': f'{self.api_key}:{digest}',
})
return request
def checkNHTime():
'''Check that our clock is within 5 minutes of the nicehash clock (no point in continuing
it is isn't).'''
nhtime = float(requests.get('https://api2.nicehash.com/api/v2/time').json()['serverTime'])
nhtime /= 1000.0
nhtime = datetime.datetime.fromtimestamp(nhtime, tz=datetime.timezone.utc)
mytime = datetime.datetime.now(tz=datetime.timezone.utc)
delta = nhtime - mytime
assert delta <= datetime.timedelta(minutes=5) and delta >= datetime.timedelta(minutes=-5), \
f"timedelta is too great: {delta}"
|
[
"json.load",
"uuid.uuid4",
"datetime.timedelta",
"requests.get",
"datetime.datetime.fromtimestamp",
"datetime.datetime.now"
] |
[((2460, 2525), 'datetime.datetime.fromtimestamp', 'datetime.datetime.fromtimestamp', (['nhtime'], {'tz': 'datetime.timezone.utc'}), '(nhtime, tz=datetime.timezone.utc)\n', (2491, 2525), False, 'import datetime\n'), ((2540, 2587), 'datetime.datetime.now', 'datetime.datetime.now', ([], {'tz': 'datetime.timezone.utc'}), '(tz=datetime.timezone.utc)\n', (2561, 2587), False, 'import datetime\n'), ((1108, 1120), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (1118, 1120), False, 'import uuid\n'), ((2638, 2667), 'datetime.timedelta', 'datetime.timedelta', ([], {'minutes': '(5)'}), '(minutes=5)\n', (2656, 2667), False, 'import datetime\n'), ((2681, 2711), 'datetime.timedelta', 'datetime.timedelta', ([], {'minutes': '(-5)'}), '(minutes=-5)\n', (2699, 2711), False, 'import datetime\n'), ((339, 351), 'json.load', 'json.load', (['f'], {}), '(f)\n', (348, 351), False, 'import json\n'), ((2348, 2401), 'requests.get', 'requests.get', (['"""https://api2.nicehash.com/api/v2/time"""'], {}), "('https://api2.nicehash.com/api/v2/time')\n", (2360, 2401), False, 'import requests\n'), ((958, 1005), 'datetime.datetime.now', 'datetime.datetime.now', ([], {'tz': 'datetime.timezone.utc'}), '(tz=datetime.timezone.utc)\n', (979, 1005), False, 'import datetime\n')]
|
import numpy as np
# Python3 program to find element
# closet to given target.
# Returns element closest to target in arr[]
def findClosest(arr, n, target):
# Corner cases
if (target <= arr[0][0]):
return 0
if (target >= arr[n - 1][0]):
return n - 1
# Doing binary search
i = 0
j = n
mid = 0
while (i < j):
mid = (i + j) // 2
if (arr[mid][0] == target):
return mid
# If target is less than array
# element, then search in left
if (target < arr[mid][0]):
# If target is greater than previous
# to mid, return closest of two
if (mid > 0 and target > arr[mid - 1][0]):
return getClosest(arr, mid - 1, mid, target)
# Repeat for left half
j = mid
# If target is greater than mid
else:
if (mid < n - 1 and target < arr[mid + 1][0]):
return getClosest(arr, mid, mid + 1, target)
# update i
i = mid + 1
# Only single element left after search
return mid
# Method to compare which one is the more close.
# We find the closest by taking the difference
# between the target and both values. It assumes
# that val2 is greater than val1 and target lies
# between these two.
def getClosest(arr, ind1, ind2, target):
val1 = arr[ind1][0]
val2 = arr[ind2][0]
if (target - val1 >= val2 - target):
return ind2
else:
return ind1
def get_bound(arr, N, s,e):
f1 = get_bound_util(arr, N, s, True)
f2 = get_bound_util(arr, N, e, False)
return f1,f2
def get_bound_util(arr, N, X, is_start):
if is_start:
idx = findClosest(arr, N, X)
# idx = 0
if idx==0:
return np.zeros(60)
else:
return arr[idx-1][1:]
else:
idx = findClosest(arr, arr.shape[0], X)
# idx = N-1
return arr[idx][1:]
if __name__ == '__main__':
gb = get_bound([[4], [5], [10], [12], [18], [20]], 6, 20, True)
print(gb)
|
[
"numpy.zeros"
] |
[((1791, 1803), 'numpy.zeros', 'np.zeros', (['(60)'], {}), '(60)\n', (1799, 1803), True, 'import numpy as np\n')]
|
"""
Codemonk link: https://www.hackerearth.com/problem/algorithm/monk-and-fredo-cm-number-theory-97942213/
Given two weights of a and b units, in how many different ways you can achieve a weight of d units using only the given
weights? Any of the given weights can be used any number of times (including 0 number of times).
Input - Output:
The first line of input consists of an integer T denoting the number of test cases.
Each test case consists of only one line containing three space separated integers a, b and d.
For each test case, print the answer in a separate line.
Sample input:
4
2 3 7
4 10 6
6 14 0
2 3 6
Sample Output:
1
0
1
2
"""
"""
This is considered to be a common problem in number theory. However, even if the solution might seem simple at first,
the full Mathematical concept and all the proofs need study to be fully understood. Here, we will directly use the tools
without the proofs. The question can be translated into the following: Find all the possible x and y pairs for which
a*x + b*y = d (1). This is a linear diophantine equation and has solutions if and only if d % GCD(a, b) = 0. With simple
words, it would how many a's and b's can we add together to get d? Is there only one way to do it or more? If a = b = 0
and d!=0 then there are no solutions. If a=b=c=0 then there are infinite solutions. To solve this equation we use the
extended Euclidean algorithm to solve a*x' + b*'y = GCD(a, b). Then, an initial integer solution for (1) is going to be
x0 = x`*d/GCD(a, b) and y0 = y'*d/GCD(a,b). From this initial solution we can generate all the other solutions which
will be x = x0 + k*b/GCD(a, b) and y = y0 - k*a/GCD(a, b). We now need to find k so that we only have non negative
solutions.
0 <= x0 + k*b/GCD(a, b) => -x0 <= k*b/GCD(a, b) => -x'*d/GCD(a, b) <= k*b/GCD(a, b) => k >= -x'*d/b (2)
0 <= y0 - k*a/GCD(a, b) => y0 >= k*a/GCD(a, b) => y'*d/GCD(a, b) >= k*a/GCD(a, b) => k <= y'*d/a (3)
From (2) and (3) we have: -x'*d/b <= k <= y'*d/a
So, the final answer will be: number_of_solutions = floor(y'*d/a) - ceil(-x'*d/b) + 1. Why floor, ceil and + 1 you may
ask? Since k must be integer (don't get confused with the integer solution) it means that if -x'*d/b is a float number,
for example 2.2, then k >= 2.2 and the first integer that's bigger than 2.2 is 3. We follow the exact same logic to
understand the ceil. Finally, we add +1 because we already find an initial solution, x0, y0 that we didn't count.
O(log(min(A, B)) to find x', y' and GCD(a, b).
Final complexity: O(log(min(A, B))
"""
def extended_euclidean(a, b):
if b == 0:
return a, 1, 0
gcd, x1, y1 = extended_euclidean(b, a % b)
x = y1
y = x1 - (a // b) * y1
return gcd, x, y
from sys import stdin, stdout
from math import ceil, floor
t = int(stdin.readline())
for _ in range(t):
a, b, d = map(int, stdin.readline().split())
gcd, x, y = extended_euclidean(a, b)
if d % gcd != 0:
stdout.write(str(0) + "\n")
continue
first = ceil((-x)*d/b)
second = floor(y*d/a)
if first <= second:
stdout.write(str(second - first + 1) + "\n")
else:
stdout.write(str(0) + "\n")
|
[
"math.floor",
"sys.stdin.readline",
"math.ceil"
] |
[((2800, 2816), 'sys.stdin.readline', 'stdin.readline', ([], {}), '()\n', (2814, 2816), False, 'from sys import stdin, stdout\n'), ((3014, 3030), 'math.ceil', 'ceil', (['(-x * d / b)'], {}), '(-x * d / b)\n', (3018, 3030), False, 'from math import ceil, floor\n'), ((3042, 3058), 'math.floor', 'floor', (['(y * d / a)'], {}), '(y * d / a)\n', (3047, 3058), False, 'from math import ceil, floor\n'), ((2860, 2876), 'sys.stdin.readline', 'stdin.readline', ([], {}), '()\n', (2874, 2876), False, 'from sys import stdin, stdout\n')]
|
import unittest
from signals.generators.ios.ios_template_methods import iOSTemplateMethods
from signals.parser.fields import Field
from signals.parser.api import GetAPI, API, PatchAPI
from tests.utils import create_dynamic_schema
class iOSTemplateMethodsTestCase(unittest.TestCase):
def test_get_url_name(self):
self.assertEqual(iOSTemplateMethods.get_url_name("/post/:id/favorite/"), "PostWithIdFavorite")
def test_method_name(self):
api = GetAPI("post/", {
"response": {
"200+": "$postResponse"
}
})
self.assertEqual(iOSTemplateMethods.method_name(api), "PostWithSuccess")
api = GetAPI("post/:id/", {
"response": {
"200+": "$postResponse"
}
})
self.assertEqual(iOSTemplateMethods.method_name(api), "PostWithTheID")
objects_json = {
'$postRequest': {"body": "string", "title": "string"},
'$postResponse': {"body": "string", "title": "string"}
}
urls_json = [
{
"url": "post/:id/",
"patch": {
"request": "$postRequest",
"response": {
"200+": "$postResponse"
}
}
}
]
schema = create_dynamic_schema(objects_json, urls_json)
self.assertEqual(iOSTemplateMethods.method_name(schema.urls[0].patch), "PostWithBody")
def test_content_type(self):
api = GetAPI("post/", {
"response": {
"200+": "$postResponse"
}
})
self.assertEqual(iOSTemplateMethods.content_type(api), "RKMIMETypeJSON")
api.content_type = API.CONTENT_TYPE_FORM
self.assertEqual(iOSTemplateMethods.content_type(api), "RKMIMETypeFormURLEncoded")
def test_media_field_check(self):
fields = [
Field("video", ["video"]),
Field("image", ["image"])
]
media_field_statement = iOSTemplateMethods.media_field_check(fields)
self.assertEqual(media_field_statement, "video != nil || image != nil")
|
[
"signals.generators.ios.ios_template_methods.iOSTemplateMethods.content_type",
"signals.generators.ios.ios_template_methods.iOSTemplateMethods.get_url_name",
"signals.parser.fields.Field",
"tests.utils.create_dynamic_schema",
"signals.parser.api.GetAPI",
"signals.generators.ios.ios_template_methods.iOSTemplateMethods.method_name",
"signals.generators.ios.ios_template_methods.iOSTemplateMethods.media_field_check"
] |
[((468, 524), 'signals.parser.api.GetAPI', 'GetAPI', (['"""post/"""', "{'response': {'200+': '$postResponse'}}"], {}), "('post/', {'response': {'200+': '$postResponse'}})\n", (474, 524), False, 'from signals.parser.api import GetAPI, API, PatchAPI\n'), ((673, 733), 'signals.parser.api.GetAPI', 'GetAPI', (['"""post/:id/"""', "{'response': {'200+': '$postResponse'}}"], {}), "('post/:id/', {'response': {'200+': '$postResponse'}})\n", (679, 733), False, 'from signals.parser.api import GetAPI, API, PatchAPI\n'), ((1344, 1390), 'tests.utils.create_dynamic_schema', 'create_dynamic_schema', (['objects_json', 'urls_json'], {}), '(objects_json, urls_json)\n', (1365, 1390), False, 'from tests.utils import create_dynamic_schema\n'), ((1534, 1590), 'signals.parser.api.GetAPI', 'GetAPI', (['"""post/"""', "{'response': {'200+': '$postResponse'}}"], {}), "('post/', {'response': {'200+': '$postResponse'}})\n", (1540, 1590), False, 'from signals.parser.api import GetAPI, API, PatchAPI\n'), ((2042, 2086), 'signals.generators.ios.ios_template_methods.iOSTemplateMethods.media_field_check', 'iOSTemplateMethods.media_field_check', (['fields'], {}), '(fields)\n', (2078, 2086), False, 'from signals.generators.ios.ios_template_methods import iOSTemplateMethods\n'), ((343, 397), 'signals.generators.ios.ios_template_methods.iOSTemplateMethods.get_url_name', 'iOSTemplateMethods.get_url_name', (['"""/post/:id/favorite/"""'], {}), "('/post/:id/favorite/')\n", (374, 397), False, 'from signals.generators.ios.ios_template_methods import iOSTemplateMethods\n'), ((602, 637), 'signals.generators.ios.ios_template_methods.iOSTemplateMethods.method_name', 'iOSTemplateMethods.method_name', (['api'], {}), '(api)\n', (632, 637), False, 'from signals.generators.ios.ios_template_methods import iOSTemplateMethods\n'), ((811, 846), 'signals.generators.ios.ios_template_methods.iOSTemplateMethods.method_name', 'iOSTemplateMethods.method_name', (['api'], {}), '(api)\n', (841, 846), False, 'from signals.generators.ios.ios_template_methods import iOSTemplateMethods\n'), ((1416, 1468), 'signals.generators.ios.ios_template_methods.iOSTemplateMethods.method_name', 'iOSTemplateMethods.method_name', (['schema.urls[0].patch'], {}), '(schema.urls[0].patch)\n', (1446, 1468), False, 'from signals.generators.ios.ios_template_methods import iOSTemplateMethods\n'), ((1668, 1704), 'signals.generators.ios.ios_template_methods.iOSTemplateMethods.content_type', 'iOSTemplateMethods.content_type', (['api'], {}), '(api)\n', (1699, 1704), False, 'from signals.generators.ios.ios_template_methods import iOSTemplateMethods\n'), ((1799, 1835), 'signals.generators.ios.ios_template_methods.iOSTemplateMethods.content_type', 'iOSTemplateMethods.content_type', (['api'], {}), '(api)\n', (1830, 1835), False, 'from signals.generators.ios.ios_template_methods import iOSTemplateMethods\n'), ((1935, 1960), 'signals.parser.fields.Field', 'Field', (['"""video"""', "['video']"], {}), "('video', ['video'])\n", (1940, 1960), False, 'from signals.parser.fields import Field\n'), ((1974, 1999), 'signals.parser.fields.Field', 'Field', (['"""image"""', "['image']"], {}), "('image', ['image'])\n", (1979, 1999), False, 'from signals.parser.fields import Field\n')]
|
import numpy as np
import tensorflow as tf
import tfops_short as Z
class model:
def __init__(self, sess, hps, train_iterator, data_init):
# === Define session
self.sess = sess
self.hps = hps
# === Input tensors
with tf.name_scope('input'):
s_shape = [None, hps.n_bins, 1]
self.s_placeholder = tf.compat.v1.placeholder(tf.float32, s_shape, name='spectra')
self.lr_placeholder = tf.compat.v1.placeholder(tf.float32, None, name='learning_rate')
self.train_iterator = train_iterator
z_shape = [None, hps.n_bins/2**(hps.n_levels+1), 4]
self.z_placeholder = tf.compat.v1.placeholder(tf.float32, z_shape, name='latent_rep')
intermediate_z_shapes = [[None, hps.n_bins/2**(i+1), 2] for i in range(1, hps.n_levels)]
self.intermediate_z_placeholders = [
tf.compat.v1.placeholder(tf.float32, shape)
for shape in intermediate_z_shapes
]
# === Loss and optimizer
self.optimizer, self.loss, self.stats = self._create_optimizer()
# === Encoding and decoding
self.z, self.logpx, self.intermediate_zs = self._create_encoder(self.s_placeholder)
self.s = self._create_decoder(self.z_placeholder)
self.s_from_intermediate_zs = self._create_decoder(self.z_placeholder,
self.intermediate_z_placeholders)
# === Initialize
sess.run(tf.compat.v1.global_variables_initializer()) # not sure if more initialization is needed?
# === Saving and restoring
with tf.device('/cpu:0'):
saver = tf.compat.v1.train.Saver()
self.save = lambda path: saver.save(sess, path, write_meta_graph=False)
self.restore = lambda path: saver.restore(sess, path)
def _create_optimizer(self):
'''Set up optimizer to train on input train_iterator and learning rate.'''
_, logpx, _ = self._create_encoder(self.train_iterator)
bits_x = -logpx / (np.log(2.) * self.hps.n_bins) # bits per subpixel
with tf.compat.v1.variable_scope('optimizer', reuse=tf.compat.v1.AUTO_REUSE):
loss = tf.reduce_mean(bits_x)
stats = tf.stack([tf.reduce_mean(loss)])
optimizer = tf.train.AdamOptimizer(learning_rate=self.lr_placeholder).minimize(loss)
return optimizer, loss, stats
def _create_encoder(self, x):
'''Set up encoder tensors to pipe input spectra x to a latent representation
Args:
x: input tensor with shape [?, n_bins, 1], either a placeholder or data stream
Returns:
z: output tensor, contains the fully compressed latent representation
logpx: tensor with shape [?,], the log likelihood of each spectrum
intermediate_zs: list of tensors, the components dropped after splits
'''
logpx = tf.zeros_like(x, dtype='float32')[:, 0, 0] # zeros tensor with shape (batch_size)
intermediate_zs = []
z = Z.squeeze(x - .5, 4) # preprocess the input
with tf.compat.v1.variable_scope('model', reuse=tf.compat.v1.AUTO_REUSE):
for i in range(self.hps.n_levels):
for j in range(self.hps.depth):
z, logpx = self._flow_step('flow-level{}-depth{}'.format(i, j), z, logpx)
if i < self.hps.n_levels - 1:
z1, z2 = Z.split(z)
intermediate_prior = self._create_prior(z2)
logpx += intermediate_prior.logp(z2)
intermediate_zs.append(z2)
z = Z.squeeze(z1, 2)
prior = self._create_prior(z)
logpx += prior.logp(z)
return z, logpx, intermediate_zs
def _create_decoder(self, z, intermediate_zs=None):
'''Set up decoder tensors to generate spectra from latent representation.
Args:
z: tensor where shape matches final latent representation.
intermediate_zs: optional list of tensors, components removed during encoder splits.
Returns:
x: tensor with shape [?, n_bins, 1], spectra constructed from z.
'''
with tf.compat.v1.variable_scope('model', reuse=tf.compat.v1.AUTO_REUSE):
for i in reversed(range(self.hps.n_levels)):
if i < self.hps.n_levels - 1:
z1 = Z.unsqueeze(z, 2)
if intermediate_zs is None:
intermediate_prior = self._create_prior(z1)
z2 = intermediate_prior.sample()
else:
z2 = intermediate_zs[i]
z = Z.unsplit(z1, z2)
for j in reversed(range(self.hps.depth)):
z = self._reverse_flow_step('flow-level{}-depth{}'.format(i, j), z)
x = Z.unsqueeze(z + .5, 4) # post-process spectra
return x
def _flow_step(self, name, z, logdet):
with tf.compat.v1.variable_scope(name):
z, logdet = Z.actnorm('actnorm', z, logdet)
z, logdet = Z.invertible_1x1_conv('invconv', z, logdet, reverse=False)
z1, z2 = Z.split(z)
z2 += Z.f('f', z1, self.hps.width)
z = Z.unsplit(z1, z2)
return z, logdet
def _reverse_flow_step(self, name, z):
with tf.compat.v1.variable_scope(name):
z1, z2 = Z.split(z)
z2 -= Z.f('f', z1, self.hps.width)
z = Z.unsplit(z1, z2)
z, _ = Z.invertible_1x1_conv('invconv', z, 0, reverse=True)
z = Z.actnorm_reverse('actnorm', z)
return z
def _create_prior(self, z):
'''Create a unit normal Gaussian object with same shape as z.'''
mu = tf.zeros_like(z, dtype='float32')
logs = tf.zeros_like(z, dtype='float32')
return Z.gaussian_diag(mu, logs)
def train(self, lr):
'''Run one training batch to optimize the network with learning rate lr.
Returns:
stats: statistics created in _create_optimizer. probably contains loss.
'''
_, stats = self.sess.run([self.optimizer, self.stats], {self.lr_placeholder: lr})
return stats
def encode(self, s):
return self.sess.run([self.z, self.intermediate_zs], {self.s_placeholder: s})
def decode(self, z, intermediate_zs=None):
'''Decode a latent representation with optional intermediate components.
Returns:
spectra, from z and intermediate zs. If no intermediate zs are provided, sample them
randomly from unit normal distributions.
'''
feed_dict = {self.z_placeholder: z}
if intermediate_zs is None:
return self.sess.run(self.s, feed_dict)
else:
for i in range(len(intermediate_zs)):
feed_dict[self.intermediate_z_placeholders[i]] = intermediate_zs[i]
return self.sess.run(self.s_from_intermediate_zs, feed_dict)
def get_likelihood(self, s):
return self.sess.run(self.logpx, {self.s_placeholder: s})
|
[
"tensorflow.zeros_like",
"tfops_short.f",
"tfops_short.squeeze",
"tfops_short.invertible_1x1_conv",
"tfops_short.unsplit",
"tfops_short.gaussian_diag",
"tensorflow.compat.v1.global_variables_initializer",
"tensorflow.compat.v1.variable_scope",
"tensorflow.compat.v1.placeholder",
"tensorflow.name_scope",
"tfops_short.actnorm",
"tfops_short.split",
"tensorflow.compat.v1.train.Saver",
"tensorflow.reduce_mean",
"tfops_short.actnorm_reverse",
"numpy.log",
"tensorflow.device",
"tfops_short.unsqueeze",
"tensorflow.train.AdamOptimizer"
] |
[((3096, 3117), 'tfops_short.squeeze', 'Z.squeeze', (['(x - 0.5)', '(4)'], {}), '(x - 0.5, 4)\n', (3105, 3117), True, 'import tfops_short as Z\n'), ((5841, 5874), 'tensorflow.zeros_like', 'tf.zeros_like', (['z'], {'dtype': '"""float32"""'}), "(z, dtype='float32')\n", (5854, 5874), True, 'import tensorflow as tf\n'), ((5890, 5923), 'tensorflow.zeros_like', 'tf.zeros_like', (['z'], {'dtype': '"""float32"""'}), "(z, dtype='float32')\n", (5903, 5923), True, 'import tensorflow as tf\n'), ((5939, 5964), 'tfops_short.gaussian_diag', 'Z.gaussian_diag', (['mu', 'logs'], {}), '(mu, logs)\n', (5954, 5964), True, 'import tfops_short as Z\n'), ((263, 285), 'tensorflow.name_scope', 'tf.name_scope', (['"""input"""'], {}), "('input')\n", (276, 285), True, 'import tensorflow as tf\n'), ((364, 425), 'tensorflow.compat.v1.placeholder', 'tf.compat.v1.placeholder', (['tf.float32', 's_shape'], {'name': '"""spectra"""'}), "(tf.float32, s_shape, name='spectra')\n", (388, 425), True, 'import tensorflow as tf\n'), ((461, 525), 'tensorflow.compat.v1.placeholder', 'tf.compat.v1.placeholder', (['tf.float32', 'None'], {'name': '"""learning_rate"""'}), "(tf.float32, None, name='learning_rate')\n", (485, 525), True, 'import tensorflow as tf\n'), ((674, 738), 'tensorflow.compat.v1.placeholder', 'tf.compat.v1.placeholder', (['tf.float32', 'z_shape'], {'name': '"""latent_rep"""'}), "(tf.float32, z_shape, name='latent_rep')\n", (698, 738), True, 'import tensorflow as tf\n'), ((1524, 1567), 'tensorflow.compat.v1.global_variables_initializer', 'tf.compat.v1.global_variables_initializer', ([], {}), '()\n', (1565, 1567), True, 'import tensorflow as tf\n'), ((1663, 1682), 'tensorflow.device', 'tf.device', (['"""/cpu:0"""'], {}), "('/cpu:0')\n", (1672, 1682), True, 'import tensorflow as tf\n'), ((1704, 1730), 'tensorflow.compat.v1.train.Saver', 'tf.compat.v1.train.Saver', ([], {}), '()\n', (1728, 1730), True, 'import tensorflow as tf\n'), ((2154, 2225), 'tensorflow.compat.v1.variable_scope', 'tf.compat.v1.variable_scope', (['"""optimizer"""'], {'reuse': 'tf.compat.v1.AUTO_REUSE'}), "('optimizer', reuse=tf.compat.v1.AUTO_REUSE)\n", (2181, 2225), True, 'import tensorflow as tf\n'), ((2246, 2268), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['bits_x'], {}), '(bits_x)\n', (2260, 2268), True, 'import tensorflow as tf\n'), ((2973, 3006), 'tensorflow.zeros_like', 'tf.zeros_like', (['x'], {'dtype': '"""float32"""'}), "(x, dtype='float32')\n", (2986, 3006), True, 'import tensorflow as tf\n'), ((3153, 3220), 'tensorflow.compat.v1.variable_scope', 'tf.compat.v1.variable_scope', (['"""model"""'], {'reuse': 'tf.compat.v1.AUTO_REUSE'}), "('model', reuse=tf.compat.v1.AUTO_REUSE)\n", (3180, 3220), True, 'import tensorflow as tf\n'), ((4270, 4337), 'tensorflow.compat.v1.variable_scope', 'tf.compat.v1.variable_scope', (['"""model"""'], {'reuse': 'tf.compat.v1.AUTO_REUSE'}), "('model', reuse=tf.compat.v1.AUTO_REUSE)\n", (4297, 4337), True, 'import tensorflow as tf\n'), ((4936, 4959), 'tfops_short.unsqueeze', 'Z.unsqueeze', (['(z + 0.5)', '(4)'], {}), '(z + 0.5, 4)\n', (4947, 4959), True, 'import tfops_short as Z\n'), ((5060, 5093), 'tensorflow.compat.v1.variable_scope', 'tf.compat.v1.variable_scope', (['name'], {}), '(name)\n', (5087, 5093), True, 'import tensorflow as tf\n'), ((5119, 5150), 'tfops_short.actnorm', 'Z.actnorm', (['"""actnorm"""', 'z', 'logdet'], {}), "('actnorm', z, logdet)\n", (5128, 5150), True, 'import tfops_short as Z\n'), ((5175, 5233), 'tfops_short.invertible_1x1_conv', 'Z.invertible_1x1_conv', (['"""invconv"""', 'z', 'logdet'], {'reverse': '(False)'}), "('invconv', z, logdet, reverse=False)\n", (5196, 5233), True, 'import tfops_short as Z\n'), ((5255, 5265), 'tfops_short.split', 'Z.split', (['z'], {}), '(z)\n', (5262, 5265), True, 'import tfops_short as Z\n'), ((5284, 5312), 'tfops_short.f', 'Z.f', (['"""f"""', 'z1', 'self.hps.width'], {}), "('f', z1, self.hps.width)\n", (5287, 5312), True, 'import tfops_short as Z\n'), ((5329, 5346), 'tfops_short.unsplit', 'Z.unsplit', (['z1', 'z2'], {}), '(z1, z2)\n', (5338, 5346), True, 'import tfops_short as Z\n'), ((5433, 5466), 'tensorflow.compat.v1.variable_scope', 'tf.compat.v1.variable_scope', (['name'], {}), '(name)\n', (5460, 5466), True, 'import tensorflow as tf\n'), ((5489, 5499), 'tfops_short.split', 'Z.split', (['z'], {}), '(z)\n', (5496, 5499), True, 'import tfops_short as Z\n'), ((5518, 5546), 'tfops_short.f', 'Z.f', (['"""f"""', 'z1', 'self.hps.width'], {}), "('f', z1, self.hps.width)\n", (5521, 5546), True, 'import tfops_short as Z\n'), ((5563, 5580), 'tfops_short.unsplit', 'Z.unsplit', (['z1', 'z2'], {}), '(z1, z2)\n', (5572, 5580), True, 'import tfops_short as Z\n'), ((5600, 5652), 'tfops_short.invertible_1x1_conv', 'Z.invertible_1x1_conv', (['"""invconv"""', 'z', '(0)'], {'reverse': '(True)'}), "('invconv', z, 0, reverse=True)\n", (5621, 5652), True, 'import tfops_short as Z\n'), ((5669, 5700), 'tfops_short.actnorm_reverse', 'Z.actnorm_reverse', (['"""actnorm"""', 'z'], {}), "('actnorm', z)\n", (5686, 5700), True, 'import tfops_short as Z\n'), ((906, 949), 'tensorflow.compat.v1.placeholder', 'tf.compat.v1.placeholder', (['tf.float32', 'shape'], {}), '(tf.float32, shape)\n', (930, 949), True, 'import tensorflow as tf\n'), ((2089, 2100), 'numpy.log', 'np.log', (['(2.0)'], {}), '(2.0)\n', (2095, 2100), True, 'import numpy as np\n'), ((2299, 2319), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['loss'], {}), '(loss)\n', (2313, 2319), True, 'import tensorflow as tf\n'), ((2346, 2403), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', ([], {'learning_rate': 'self.lr_placeholder'}), '(learning_rate=self.lr_placeholder)\n', (2368, 2403), True, 'import tensorflow as tf\n'), ((3486, 3496), 'tfops_short.split', 'Z.split', (['z'], {}), '(z)\n', (3493, 3496), True, 'import tfops_short as Z\n'), ((3689, 3705), 'tfops_short.squeeze', 'Z.squeeze', (['z1', '(2)'], {}), '(z1, 2)\n', (3698, 3705), True, 'import tfops_short as Z\n'), ((4467, 4484), 'tfops_short.unsqueeze', 'Z.unsqueeze', (['z', '(2)'], {}), '(z, 2)\n', (4478, 4484), True, 'import tfops_short as Z\n'), ((4756, 4773), 'tfops_short.unsplit', 'Z.unsplit', (['z1', 'z2'], {}), '(z1, z2)\n', (4765, 4773), True, 'import tfops_short as Z\n')]
|
import random
from functools import wraps
from .ipa import Client
class IPAAdmin(object):
__WRAPPED_METHODS = ("user_add", "user_show", "user_mod", "group_add_member")
__WRAPPED_METHODS_TESTING = (
"user_del",
"group_add",
"group_del",
"group_add_member_manager",
"pwpolicy_add",
"otptoken_add",
"otptoken_del",
"otptoken_find",
)
def __init__(self, app):
self.__username = app.config['FREEIPA_ADMIN_USER']
self.__password = app.config['FREEIPA_ADMIN_PASSWORD']
app.config['FREEIPA_ADMIN_USER'] = '***'
app.config['FREEIPA_ADMIN_PASSWORD'] = '***' # nosec
self.__app = app
# Attempt to obtain an administrative IPA session
def __maybe_ipa_admin_session(self):
self.__client = Client(
random.choice(self.__app.config['FREEIPA_SERVERS']),
verify_ssl=self.__app.config['FREEIPA_CACERT'],
)
self.__client.login(self.__username, self.__password)
self.__client._request('ping')
return self.__client
def __wrap_method(self, method_name):
@wraps(getattr(Client, method_name))
def wrapper(*args, **kwargs):
ipa = self.__maybe_ipa_admin_session()
ipa_method = getattr(ipa, method_name)
res = ipa_method(*args, **kwargs)
ipa.logout()
return res
return wrapper
def __getattr__(self, name):
wrapped_methods = list(self.__WRAPPED_METHODS)
if self.__app.config['TESTING']: # pragma: no cover
wrapped_methods.extend(self.__WRAPPED_METHODS_TESTING)
if name in wrapped_methods:
return self.__wrap_method(name)
raise AttributeError(name)
|
[
"random.choice"
] |
[((839, 890), 'random.choice', 'random.choice', (["self.__app.config['FREEIPA_SERVERS']"], {}), "(self.__app.config['FREEIPA_SERVERS'])\n", (852, 890), False, 'import random\n')]
|
""" Test Object Tracking
This script receives a .tsv file as input which has already been labelled
and runs the four selected objects tracking algorithm on all videos.
The target object that is being gazed at by the person is presented in blue.
Parameters
----------
tsv_path : str, optional
Path to tsv file containing dataset information (default is "benchmarks/gaze.tsv")
tracker_type : str, optional
Tracking algorithm (default is "CSRT", possible values are ['BOOSTING', 'MIL', 'KCF','TLD', 'MEDIANFLOW', 'GOTURN', 'MOSSE', 'CSRT'])
use_gpu : bool, optional
Whether to use a gpu (default is False)
write_video : bool, optional
Whether to save the processed video via OpenCV (default is True)
"""
# External imports
from sacred import Experiment
from sacred.observers import FileStorageObserver
from collections import namedtuple
from adam_visual_perception import ObjectTracker
import pandas as pd
import numpy as np
import sys
import os
ex = Experiment()
@ex.config
def my_config():
tsv_path = "benchmarks/gaze.tsv"
tracker_type = "CSRT"
use_gpu = False
write_video = True
@ex.automain
def main(_config):
args = namedtuple('GenericDict', _config.keys())(**_config)
# Setting the random seed
np.random.seed(args.seed)
# Load tsv
if not os.path.isfile(args.tsv_path):
raise Exception("The path to tsv file cannot be found at {}.".format(args.tsv_path))
df = pd.read_csv(args.tsv_path, sep='\t')
# Definea tracker
ot = ObjectTracker(
tracker_type=args.tracker_type,
use_gpu=args.use_gpu,
detect_objects=False,
write_video=args.write_video,
)
for index, row in df.iterrows():
if len(row) == 6:
filename, o1, o2, o3, o4, label = row
print("Started {}".format(filename))
objs = [o1, o2, o3, o4]
bboxes = []
for bbox in objs:
if bbox is np.nan:
print("Skipping {}. No bounding boxes are provided".format(filename))
else:
bbox = tuple(map(int, bbox.strip("()").split(', ')))
bboxes.append(bbox)
ot.get_four_bboxes(filename, bboxes, label, args.write_video)
else:
print("Error: Did you forget to run the object labeling script?")
sys.exit()
print("Done!")
|
[
"numpy.random.seed",
"pandas.read_csv",
"os.path.isfile",
"adam_visual_perception.ObjectTracker",
"sacred.Experiment",
"sys.exit"
] |
[((967, 979), 'sacred.Experiment', 'Experiment', ([], {}), '()\n', (977, 979), False, 'from sacred import Experiment\n'), ((1248, 1273), 'numpy.random.seed', 'np.random.seed', (['args.seed'], {}), '(args.seed)\n', (1262, 1273), True, 'import numpy as np\n'), ((1435, 1471), 'pandas.read_csv', 'pd.read_csv', (['args.tsv_path'], {'sep': '"""\t"""'}), "(args.tsv_path, sep='\\t')\n", (1446, 1471), True, 'import pandas as pd\n'), ((1504, 1627), 'adam_visual_perception.ObjectTracker', 'ObjectTracker', ([], {'tracker_type': 'args.tracker_type', 'use_gpu': 'args.use_gpu', 'detect_objects': '(False)', 'write_video': 'args.write_video'}), '(tracker_type=args.tracker_type, use_gpu=args.use_gpu,\n detect_objects=False, write_video=args.write_video)\n', (1517, 1627), False, 'from adam_visual_perception import ObjectTracker\n'), ((1301, 1330), 'os.path.isfile', 'os.path.isfile', (['args.tsv_path'], {}), '(args.tsv_path)\n', (1315, 1330), False, 'import os\n'), ((2355, 2365), 'sys.exit', 'sys.exit', ([], {}), '()\n', (2363, 2365), False, 'import sys\n')]
|
import os
def validate_helm_chart(helm_chart):
os.system(f'helm lint {helm_chart}')
|
[
"os.system"
] |
[((52, 88), 'os.system', 'os.system', (['f"""helm lint {helm_chart}"""'], {}), "(f'helm lint {helm_chart}')\n", (61, 88), False, 'import os\n')]
|
# -*- coding: utf-8 -*-
import os
from shutil import rmtree
from tempfile import mkdtemp
import unittest
from pelican import Pelican
from pelican.settings import read_settings
from pelican.tests.support import mute
from fontawesome_markdown import FontAwesomeExtension
import pelicanfly
CURRENT_DIR = os.path.dirname(os.path.abspath(__file__))
OUTPUT_PATH = os.path.abspath(os.path.join(CURRENT_DIR, 'output'))
INPUT_PATH = os.path.join(CURRENT_DIR, "content")
class TestPelicanfly(unittest.TestCase):
def setUp(self):
self.temp_path = mkdtemp(prefix='pelicanfly.')
pelicanfly_path, _ = os.path.join(os.path.split(pelicanfly.__file__))
self.pelicanfly_static = os.path.join(pelicanfly_path, 'static')
self.settings = read_settings(path=None,
override={
'PATH': INPUT_PATH,
'OUTPUT_PATH': self.temp_path,
'PLUGINS': [pelicanfly]})
self.pelican = Pelican(self.settings)
mute(True)(self.pelican.run)()
pass
def tearDown(self):
rmtree(self.temp_path)
pass
def test_add_markdown_plugin(self):
added = any([isinstance(x,FontAwesomeExtension)
for x in self.pelican.settings['MD_EXTENSIONS']])
self.assertTrue(added)
def test_add_static_paths(self):
theme = self.pelican.settings['THEME_STATIC_PATHS']
self.assertTrue(self.pelicanfly_static in theme)
def test_markdown_plugin(self):
sample_output = open(os.path.join(self.temp_path, 'pages', 'a-sample-page.html'), 'r')
self.assertTrue('<i class="fa fa-bug"></i>' in sample_output.read())
def test_assets_exist(self):
for static_dir in ['css', 'fonts']:
static_path = os.path.join(self.pelicanfly_static, static_dir)
for static_file in os.listdir(static_path):
in_theme = os.path.join(self.temp_path, 'theme',
static_dir, static_file)
self.assertTrue(os.path.exists(in_theme))
|
[
"os.path.abspath",
"pelican.settings.read_settings",
"os.path.exists",
"pelican.tests.support.mute",
"tempfile.mkdtemp",
"pelican.Pelican",
"shutil.rmtree",
"os.path.split",
"os.path.join",
"os.listdir"
] |
[((428, 464), 'os.path.join', 'os.path.join', (['CURRENT_DIR', '"""content"""'], {}), "(CURRENT_DIR, 'content')\n", (440, 464), False, 'import os\n'), ((320, 345), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (335, 345), False, 'import os\n'), ((377, 412), 'os.path.join', 'os.path.join', (['CURRENT_DIR', '"""output"""'], {}), "(CURRENT_DIR, 'output')\n", (389, 412), False, 'import os\n'), ((554, 583), 'tempfile.mkdtemp', 'mkdtemp', ([], {'prefix': '"""pelicanfly."""'}), "(prefix='pelicanfly.')\n", (561, 583), False, 'from tempfile import mkdtemp\n'), ((695, 734), 'os.path.join', 'os.path.join', (['pelicanfly_path', '"""static"""'], {}), "(pelicanfly_path, 'static')\n", (707, 734), False, 'import os\n'), ((759, 875), 'pelican.settings.read_settings', 'read_settings', ([], {'path': 'None', 'override': "{'PATH': INPUT_PATH, 'OUTPUT_PATH': self.temp_path, 'PLUGINS': [pelicanfly]}"}), "(path=None, override={'PATH': INPUT_PATH, 'OUTPUT_PATH': self.\n temp_path, 'PLUGINS': [pelicanfly]})\n", (772, 875), False, 'from pelican.settings import read_settings\n'), ((1059, 1081), 'pelican.Pelican', 'Pelican', (['self.settings'], {}), '(self.settings)\n', (1066, 1081), False, 'from pelican import Pelican\n'), ((1167, 1189), 'shutil.rmtree', 'rmtree', (['self.temp_path'], {}), '(self.temp_path)\n', (1173, 1189), False, 'from shutil import rmtree\n'), ((626, 660), 'os.path.split', 'os.path.split', (['pelicanfly.__file__'], {}), '(pelicanfly.__file__)\n', (639, 660), False, 'import os\n'), ((1614, 1673), 'os.path.join', 'os.path.join', (['self.temp_path', '"""pages"""', '"""a-sample-page.html"""'], {}), "(self.temp_path, 'pages', 'a-sample-page.html')\n", (1626, 1673), False, 'import os\n'), ((1861, 1909), 'os.path.join', 'os.path.join', (['self.pelicanfly_static', 'static_dir'], {}), '(self.pelicanfly_static, static_dir)\n', (1873, 1909), False, 'import os\n'), ((1941, 1964), 'os.listdir', 'os.listdir', (['static_path'], {}), '(static_path)\n', (1951, 1964), False, 'import os\n'), ((1090, 1100), 'pelican.tests.support.mute', 'mute', (['(True)'], {}), '(True)\n', (1094, 1100), False, 'from pelican.tests.support import mute\n'), ((1993, 2055), 'os.path.join', 'os.path.join', (['self.temp_path', '"""theme"""', 'static_dir', 'static_file'], {}), "(self.temp_path, 'theme', static_dir, static_file)\n", (2005, 2055), False, 'import os\n'), ((2128, 2152), 'os.path.exists', 'os.path.exists', (['in_theme'], {}), '(in_theme)\n', (2142, 2152), False, 'import os\n')]
|
#!/usr/bin/env python
from distutils.core import setup
setup(
name="onifw",
version="1.13",
description="pentest framework",
author="w0bos",
author_email="<EMAIL>",
packages=["packaging"]
)
|
[
"distutils.core.setup"
] |
[((57, 194), 'distutils.core.setup', 'setup', ([], {'name': '"""onifw"""', 'version': '"""1.13"""', 'description': '"""pentest framework"""', 'author': '"""w0bos"""', 'author_email': '"""<EMAIL>"""', 'packages': "['packaging']"}), "(name='onifw', version='1.13', description='pentest framework', author\n ='w0bos', author_email='<EMAIL>', packages=['packaging'])\n", (62, 194), False, 'from distutils.core import setup\n')]
|
"""
This is an example of how to add the occupancy into the model.
"""
from cobs import Model
Model.set_energyplus_folder("D:\\Software\\EnergyPlus\\")
mode = 1
model = Model(idf_file_name="../data/buildings/5ZoneAirCooled.idf",
weather_file="../data/weathers/USA_IL_Chicago-OHare.Intl.AP.725300_TMY3.epw",)
# Example of how to check the required fields for a new component
print(model.get_sub_configuration("Output:Variable"))
# --------------------------------------------------------------------------
# Sample of adding occupancy movement manually
# --------------------------------------------------------------------------
if mode == 1:
# Setup values for new components
occu_schedule_values = {"Name": "Test_Schedule",
"Schedule Type Limits Name": "Fraction",
"Field 1": "Through: 12/31",
"Field 2": "For: Alldays",
"Field 3": "Until: 05:00",
"Field 4": "0",
"Field 5": "Until 09:00",
"Field 6": "0.5",
"Field 7": "Until 17:00",
"Field 8": "0.95",
"Field 9": "Until 24:00",
"Field 10": "0.5"}
activity_values = {"Name": "Test_Activity_Schedule",
"Schedule Type Limits Name": "Any Number",
"Field 1": "Through:12/31",
"Field 2": "For: Alldays",
"Field 3": "Until 24:00",
"Field 4": "117"}
work_efficiency = {"Name": "Test_Work_Schedule",
"Schedule Type Limits Name": "Fraction",
"Field 1": "Through:12/31",
"Field 2": "For: Alldays",
"Field 3": "Until 24:00",
"Field 4": "0.1"}
cloth_schedule = {"Name": "Test_Cloth_Schedule",
"Schedule Type Limits Name": "Fraction",
"Field 1": "Through:12/31",
"Field 2": "For: Alldays",
"Field 3": "Until 24:00",
"Field 4": "0.9"}
air_velocity = {"Name": "Test_Air_Velocity",
"Schedule Type Limits Name": "Fraction",
"Field 1": "Through:12/31",
"Field 2": "For: Alldays",
"Field 3": "Until 24:00",
"Field 4": "0.25"}
people_values = {"Name": "Test",
"Zone or ZoneList Name": "SPACE1-1",
"Number of People Schedule Name": "Test_Schedule",
"Number of People": 5,
"Activity Level Schedule Name": "Test_Activity_Schedule",
"Work Efficiency Schedule Name": "Test_Work_Schedule",
"Clothing Insulation Schedule Name": "Test_Cloth_Schedule",
"Air Velocity Schedule Name": "Test_Air_Velocity",
"Thermal Comfort Model 1 Type": "Fanger"}
print(model.add_configuration("Schedule:Compact", values=occu_schedule_values))
print(model.add_configuration("Schedule:Compact", values=activity_values))
print(model.add_configuration("Schedule:Compact", values=work_efficiency))
print(model.add_configuration("Schedule:Compact", values=cloth_schedule))
print(model.add_configuration("Schedule:Compact", values=air_velocity))
print(model.add_configuration("People", values=people_values))
print(model.add_configuration("Output:Variable", values={"Variable Name": "Zone Thermal Comfort Fanger Model PMV",
"Reporting_Frequency": "timestep"}))
# --------------------------------------------------------------------------
# Sample of adding occupancy using OccupancyGenerator
# --------------------------------------------------------------------------
elif mode == 2:
from cobs import OccupancyGenerator as OG
OG(model).generate_daily_schedule(add_to_model=True)
# Example of check what is available for the state value
print(model.get_current_state_values())
if __name__ == '__main__':
state = model.reset()
while not model.is_terminate():
print(state)
state = model.step(list())
print("Done")
|
[
"cobs.Model",
"cobs.Model.set_energyplus_folder",
"cobs.OccupancyGenerator"
] |
[((96, 153), 'cobs.Model.set_energyplus_folder', 'Model.set_energyplus_folder', (['"""D:\\\\Software\\\\EnergyPlus\\\\"""'], {}), "('D:\\\\Software\\\\EnergyPlus\\\\')\n", (123, 153), False, 'from cobs import Model\n'), ((173, 315), 'cobs.Model', 'Model', ([], {'idf_file_name': '"""../data/buildings/5ZoneAirCooled.idf"""', 'weather_file': '"""../data/weathers/USA_IL_Chicago-OHare.Intl.AP.725300_TMY3.epw"""'}), "(idf_file_name='../data/buildings/5ZoneAirCooled.idf', weather_file=\n '../data/weathers/USA_IL_Chicago-OHare.Intl.AP.725300_TMY3.epw')\n", (178, 315), False, 'from cobs import Model\n'), ((4120, 4129), 'cobs.OccupancyGenerator', 'OG', (['model'], {}), '(model)\n', (4122, 4129), True, 'from cobs import OccupancyGenerator as OG\n')]
|
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import hashlib
import logging
import os
import re
import threading
import time
from collections import namedtuple
import psutil
from six import string_types
from twitter.common.collections import maybe_list
from pants.base.build_environment import get_buildroot
from pants.java.executor import Executor, SubprocessExecutor
from pants.java.nailgun_client import NailgunClient
from pants.util.dirutil import safe_open
logger = logging.getLogger(__name__)
# TODO: Once we integrate standard logging into our reporting framework, we can consider making
# some of the log.debug() below into log.info(). Right now it just looks wrong on the console.
class NailgunExecutor(Executor):
"""Executes java programs by launching them in nailgun server.
If a nailgun is not available for a given set of jvm args and classpath, one is launched and
re-used for the given jvm args and classpath on subsequent runs.
"""
class Endpoint(namedtuple('Endpoint', ['exe', 'fingerprint', 'pid', 'port'])):
"""The coordinates for a nailgun server controlled by NailgunExecutor."""
@classmethod
def parse(cls, endpoint):
"""Parses an endpoint from a string of the form exe:fingerprint:pid:port"""
components = endpoint.split(':')
if len(components) != 4:
raise ValueError('Invalid endpoint spec {}'.format(endpoint))
exe, fingerprint, pid, port = components
return cls(exe, fingerprint, int(pid), int(port))
# Used to identify we own a given java nailgun server
_PANTS_NG_ARG_PREFIX = b'-Dpants.buildroot'
_PANTS_NG_ARG = b'{0}={1}'.format(_PANTS_NG_ARG_PREFIX, get_buildroot())
_PANTS_FINGERPRINT_ARG_PREFIX = b'-Dpants.nailgun.fingerprint='
@staticmethod
def _check_pid(pid):
try:
os.kill(pid, 0)
return True
except OSError:
return False
@staticmethod
def create_owner_arg(workdir):
# Currently the owner is identified via the full path to the workdir.
return b'-Dpants.nailgun.owner={0}'.format(workdir)
@classmethod
def _create_fingerprint_arg(cls, fingerprint):
return cls._PANTS_FINGERPRINT_ARG_PREFIX + fingerprint
@classmethod
def parse_fingerprint_arg(cls, args):
for arg in args:
components = arg.split(cls._PANTS_FINGERPRINT_ARG_PREFIX)
if len(components) == 2 and components[0] == '':
return components[1]
return None
@staticmethod
def _fingerprint(jvm_options, classpath, java_version):
"""Compute a fingerprint for this invocation of a Java task.
:param list jvm_options: JVM options passed to the java invocation
:param list classpath: The -cp arguments passed to the java invocation
:param Revision java_version: return value from Distribution.version()
:return: a hexstring representing a fingerprint of the java invocation
"""
digest = hashlib.sha1()
digest.update(''.join(sorted(jvm_options)))
digest.update(''.join(sorted(classpath))) # TODO(<NAME>): hash classpath contents?
digest.update(repr(java_version))
return digest.hexdigest()
@staticmethod
def _log_kill(pid, port=None):
port_desc = ' port:{0}'.format(port if port else '')
logger.info('killing ng server @ pid:{pid}{port}'.format(pid=pid, port=port_desc))
@classmethod
def _find_ngs(cls, everywhere=False):
def cmdline_matches(cmdline):
if everywhere:
return any(filter(lambda arg: arg.startswith(cls._PANTS_NG_ARG_PREFIX), cmdline))
else:
return cls._PANTS_NG_ARG in cmdline
for proc in psutil.process_iter():
try:
if b'java' == proc.name and cmdline_matches(proc.cmdline):
yield proc
except (psutil.AccessDenied, psutil.NoSuchProcess):
pass
@classmethod
def killall(cls, everywhere=False):
"""Kills all nailgun servers started by pants.
:param bool everywhere: If ``True`` Kills all pants-started nailguns on this machine; otherwise
restricts the nailguns killed to those started for the current build root.
"""
success = True
for proc in cls._find_ngs(everywhere=everywhere):
try:
cls._log_kill(proc.pid)
proc.kill()
except (psutil.AccessDenied, psutil.NoSuchProcess):
success = False
return success
@staticmethod
def _find_ng_listen_port(proc):
for connection in proc.get_connections(kind=b'tcp'):
if connection.status == b'LISTEN':
host, port = connection.laddr
return port
return None
@classmethod
def _find(cls, workdir):
owner_arg = cls.create_owner_arg(workdir)
for proc in cls._find_ngs(everywhere=False):
try:
if owner_arg in proc.cmdline:
fingerprint = cls.parse_fingerprint_arg(proc.cmdline)
port = cls._find_ng_listen_port(proc)
exe = proc.cmdline[0]
if fingerprint and port:
return cls.Endpoint(exe, fingerprint, proc.pid, port)
except (psutil.AccessDenied, psutil.NoSuchProcess):
pass
return None
def __init__(self, workdir, nailgun_classpath, distribution=None, ins=None):
super(NailgunExecutor, self).__init__(distribution=distribution)
self._nailgun_classpath = maybe_list(nailgun_classpath)
if not isinstance(workdir, string_types):
raise ValueError('Workdir must be a path string, given {workdir}'.format(workdir=workdir))
self._workdir = workdir
self._ng_out = os.path.join(workdir, 'stdout')
self._ng_err = os.path.join(workdir, 'stderr')
self._ins = ins
def _runner(self, classpath, main, jvm_options, args, cwd=None):
command = self._create_command(classpath, main, jvm_options, args)
class Runner(self.Runner):
@property
def executor(this):
return self
@property
def command(self):
return list(command)
def run(this, stdout=None, stderr=None, cwd=None):
nailgun = self._get_nailgun_client(jvm_options, classpath, stdout, stderr)
try:
logger.debug('Executing via {ng_desc}: {cmd}'.format(ng_desc=nailgun, cmd=this.cmd))
return nailgun(main, cwd, *args)
except nailgun.NailgunError as e:
self.kill()
raise self.Error('Problem launching via {ng_desc} command {main} {args}: {msg}'
.format(ng_desc=nailgun, main=main, args=' '.join(args), msg=e))
return Runner()
def kill(self):
"""Kills the nailgun server owned by this executor if its currently running."""
endpoint = self._get_nailgun_endpoint()
if endpoint:
self._log_kill(endpoint.pid, endpoint.port)
try:
os.kill(endpoint.pid, 9)
except OSError:
pass
def _get_nailgun_endpoint(self):
endpoint = self._find(self._workdir)
if endpoint:
logger.debug('Found ng server launched with {endpoint}'.format(endpoint=repr(endpoint)))
return endpoint
def _find_and_stat_nailgun_server(self, new_fingerprint):
endpoint = self._get_nailgun_endpoint()
running = endpoint and self._check_pid(endpoint.pid)
updated = endpoint and endpoint.fingerprint != new_fingerprint
updated = updated or (endpoint and endpoint.exe != self._distribution.java)
return endpoint, running, updated
_nailgun_spawn_lock = threading.Lock()
def _get_nailgun_client(self, jvm_options, classpath, stdout, stderr):
classpath = self._nailgun_classpath + classpath
new_fingerprint = self._fingerprint(jvm_options, classpath, self._distribution.version)
endpoint, running, updated = self._find_and_stat_nailgun_server(new_fingerprint)
if running and not updated:
return self._create_ngclient(endpoint.port, stdout, stderr)
with self._nailgun_spawn_lock:
endpoint, running, updated = self._find_and_stat_nailgun_server(new_fingerprint)
if running and not updated:
return self._create_ngclient(endpoint.port, stdout, stderr)
if running and updated:
logger.debug('Killing ng server launched with {endpoint}'.format(endpoint=repr(endpoint)))
self.kill()
return self._spawn_nailgun_server(new_fingerprint, jvm_options, classpath, stdout, stderr)
# 'NGServer started on 127.0.0.1, port 53785.'
_PARSE_NG_PORT = re.compile('.*\s+port\s+(\d+)\.$')
def _parse_nailgun_port(self, line):
match = self._PARSE_NG_PORT.match(line)
if not match:
raise NailgunClient.NailgunError('Failed to determine spawned ng port from response'
' line: {line}'.format(line=line))
return int(match.group(1))
def _await_nailgun_server(self, stdout, stderr, debug_desc):
# TODO(<NAME>) Make these cmdline/config parameters once we have a global way to fetch
# the global options scope.
nailgun_timeout_seconds = 10
max_socket_connect_attempts = 5
nailgun = None
port_parse_start = time.time()
with safe_open(self._ng_out, 'r') as ng_out:
while not nailgun:
started = ng_out.readline()
if started.find('Listening for transport dt_socket at address:') >= 0:
nailgun_timeout_seconds = 60
logger.warn('Timeout extended to {timeout} seconds for debugger to attach to ng server.'
.format(timeout=nailgun_timeout_seconds))
started = ng_out.readline()
if started:
port = self._parse_nailgun_port(started)
nailgun = self._create_ngclient(port, stdout, stderr)
logger.debug('Detected ng server up on port {port}'.format(port=port))
elif time.time() - port_parse_start > nailgun_timeout_seconds:
raise NailgunClient.NailgunError(
'Failed to read ng output after {sec} seconds.\n {desc}'
.format(sec=nailgun_timeout_seconds, desc=debug_desc))
attempt = 0
while nailgun:
sock = nailgun.try_connect()
if sock:
sock.close()
endpoint = self._get_nailgun_endpoint()
if endpoint:
logger.debug('Connected to ng server launched with {endpoint}'
.format(endpoint=repr(endpoint)))
else:
raise NailgunClient.NailgunError('Failed to connect to ng server.')
return nailgun
elif attempt > max_socket_connect_attempts:
raise nailgun.NailgunError('Failed to connect to ng output after {count} connect attempts'
.format(count=max_socket_connect_attempts))
attempt += 1
logger.debug('Failed to connect on attempt {count}'.format(count=attempt))
time.sleep(0.1)
def _create_ngclient(self, port, stdout, stderr):
return NailgunClient(port=port, ins=self._ins, out=stdout, err=stderr, workdir=get_buildroot())
def _spawn_nailgun_server(self, fingerprint, jvm_options, classpath, stdout, stderr):
logger.debug('No ng server found with fingerprint {fingerprint}, spawning...'
.format(fingerprint=fingerprint))
with safe_open(self._ng_out, 'w'):
pass # truncate
pid = os.fork()
if pid != 0:
# In the parent tine - block on ng being up for connections
return self._await_nailgun_server(stdout, stderr,
'jvm_options={jvm_options} classpath={classpath}'
.format(jvm_options=jvm_options, classpath=classpath))
os.setsid()
in_fd = open('/dev/null', 'r')
out_fd = safe_open(self._ng_out, 'w')
err_fd = safe_open(self._ng_err, 'w')
java = SubprocessExecutor(self._distribution)
jvm_options = jvm_options + [self._PANTS_NG_ARG,
self.create_owner_arg(self._workdir),
self._create_fingerprint_arg(fingerprint)]
process = java.spawn(classpath=classpath,
main='com.martiansoftware.nailgun.NGServer',
jvm_options=jvm_options,
args=[':0'],
stdin=in_fd,
stdout=out_fd,
stderr=err_fd,
close_fds=True)
logger.debug('Spawned ng server with fingerprint {fingerprint} @ {pid}'
.format(fingerprint=fingerprint, pid=process.pid))
# Prevents finally blocks and atexit handlers from being executed, unlike sys.exit(). We
# don't want to execute finally blocks because we might, e.g., clean up tempfiles that the
# parent still needs.
os._exit(0)
def __str__(self):
return 'NailgunExecutor({dist}, server={endpoint})' \
.format(dist=self._distribution, endpoint=self._get_nailgun_endpoint())
|
[
"psutil.process_iter",
"hashlib.sha1",
"pants.base.build_environment.get_buildroot",
"pants.util.dirutil.safe_open",
"time.time",
"threading.Lock",
"os.kill",
"time.sleep",
"os._exit",
"os.setsid",
"collections.namedtuple",
"os.fork",
"pants.java.executor.SubprocessExecutor",
"pants.java.nailgun_client.NailgunClient.NailgunError",
"twitter.common.collections.maybe_list",
"os.path.join",
"logging.getLogger",
"re.compile"
] |
[((729, 756), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (746, 756), False, 'import logging\n'), ((1239, 1300), 'collections.namedtuple', 'namedtuple', (['"""Endpoint"""', "['exe', 'fingerprint', 'pid', 'port']"], {}), "('Endpoint', ['exe', 'fingerprint', 'pid', 'port'])\n", (1249, 1300), False, 'from collections import namedtuple\n'), ((7528, 7544), 'threading.Lock', 'threading.Lock', ([], {}), '()\n', (7542, 7544), False, 'import threading\n'), ((8488, 8526), 're.compile', 're.compile', (['""".*\\\\s+port\\\\s+(\\\\d+)\\\\.$"""'], {}), "('.*\\\\s+port\\\\s+(\\\\d+)\\\\.$')\n", (8498, 8526), False, 'import re\n'), ((1915, 1930), 'pants.base.build_environment.get_buildroot', 'get_buildroot', ([], {}), '()\n', (1928, 1930), False, 'from pants.base.build_environment import get_buildroot\n'), ((3130, 3144), 'hashlib.sha1', 'hashlib.sha1', ([], {}), '()\n', (3142, 3144), False, 'import hashlib\n'), ((3817, 3838), 'psutil.process_iter', 'psutil.process_iter', ([], {}), '()\n', (3836, 3838), False, 'import psutil\n'), ((5464, 5493), 'twitter.common.collections.maybe_list', 'maybe_list', (['nailgun_classpath'], {}), '(nailgun_classpath)\n', (5474, 5493), False, 'from twitter.common.collections import maybe_list\n'), ((5686, 5717), 'os.path.join', 'os.path.join', (['workdir', '"""stdout"""'], {}), "(workdir, 'stdout')\n", (5698, 5717), False, 'import os\n'), ((5737, 5768), 'os.path.join', 'os.path.join', (['workdir', '"""stderr"""'], {}), "(workdir, 'stderr')\n", (5749, 5768), False, 'import os\n'), ((9119, 9130), 'time.time', 'time.time', ([], {}), '()\n', (9128, 9130), False, 'import time\n'), ((11247, 11256), 'os.fork', 'os.fork', ([], {}), '()\n', (11254, 11256), False, 'import os\n'), ((11587, 11598), 'os.setsid', 'os.setsid', ([], {}), '()\n', (11596, 11598), False, 'import os\n'), ((11647, 11675), 'pants.util.dirutil.safe_open', 'safe_open', (['self._ng_out', '"""w"""'], {}), "(self._ng_out, 'w')\n", (11656, 11675), False, 'from pants.util.dirutil import safe_open\n'), ((11689, 11717), 'pants.util.dirutil.safe_open', 'safe_open', (['self._ng_err', '"""w"""'], {}), "(self._ng_err, 'w')\n", (11698, 11717), False, 'from pants.util.dirutil import safe_open\n'), ((11730, 11768), 'pants.java.executor.SubprocessExecutor', 'SubprocessExecutor', (['self._distribution'], {}), '(self._distribution)\n', (11748, 11768), False, 'from pants.java.executor import Executor, SubprocessExecutor\n'), ((12685, 12696), 'os._exit', 'os._exit', (['(0)'], {}), '(0)\n', (12693, 12696), False, 'import os\n'), ((2054, 2069), 'os.kill', 'os.kill', (['pid', '(0)'], {}), '(pid, 0)\n', (2061, 2069), False, 'import os\n'), ((9140, 9168), 'pants.util.dirutil.safe_open', 'safe_open', (['self._ng_out', '"""r"""'], {}), "(self._ng_out, 'r')\n", (9149, 9168), False, 'from pants.util.dirutil import safe_open\n'), ((10782, 10797), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (10792, 10797), False, 'import time\n'), ((11183, 11211), 'pants.util.dirutil.safe_open', 'safe_open', (['self._ng_out', '"""w"""'], {}), "(self._ng_out, 'w')\n", (11192, 11211), False, 'from pants.util.dirutil import safe_open\n'), ((6887, 6911), 'os.kill', 'os.kill', (['endpoint.pid', '(9)'], {}), '(endpoint.pid, 9)\n', (6894, 6911), False, 'import os\n'), ((10934, 10949), 'pants.base.build_environment.get_buildroot', 'get_buildroot', ([], {}), '()\n', (10947, 10949), False, 'from pants.base.build_environment import get_buildroot\n'), ((10363, 10424), 'pants.java.nailgun_client.NailgunClient.NailgunError', 'NailgunClient.NailgunError', (['"""Failed to connect to ng server."""'], {}), "('Failed to connect to ng server.')\n", (10389, 10424), False, 'from pants.java.nailgun_client import NailgunClient\n'), ((9789, 9800), 'time.time', 'time.time', ([], {}), '()\n', (9798, 9800), False, 'import time\n')]
|
from conans import ConanFile, CMake
from conans.tools import unzip, download
import os
import shutil
class WebsocketppConan(ConanFile):
name = "websocketpp"
boost_version = "1.68.0"
openssl_version = "1.1.1"
zlib_version = "1.2.11"
with open(os.path.join(os.path.dirname(os.path.realpath(
__file__)), "VERSION.txt"), 'r') as version_file:
version = version_file.read()
settings = "os", "arch", "compiler", "build_type"
description = "C++ websocket client/server library"
generators = "cmake", "virtualenv"
requires = f"boost/{boost_version}@kapilsh/release", \
f"openssl/{openssl_version}@kapilsh/release"
exports = "VERSION.txt"
url = "https://github.com/zaphoyd/websocketpp"
license = "http://eigen.tuxfamily.org/index.php?title=Main_Page#License"
github_url = "https://github.com/zaphoyd/websocketpp/archive"
def source(self):
tar_file = "{}.tar.gz".format(self.version)
download("{}/{}".format(self.github_url, tar_file), tar_file)
unzip(tar_file)
os.unlink(tar_file)
shutil.move(f"websocketpp-{self.version}", "websocketpp")
def build(self):
cmake = CMake(self)
cmake.definitions['BUILD_TESTS'] = False
cmake.definitions['BUILD_EXAMPLES'] = False
cmake.configure(source_folder="websocketpp")
cmake.build()
cmake.install()
def package(self):
pass
def package_info(self):
self.cpp_info.includedirs = ['include']
self.env_info.CPATH.append("{}/include".format(self.package_folder))
|
[
"os.unlink",
"os.path.realpath",
"conans.CMake",
"shutil.move",
"conans.tools.unzip"
] |
[((1059, 1074), 'conans.tools.unzip', 'unzip', (['tar_file'], {}), '(tar_file)\n', (1064, 1074), False, 'from conans.tools import unzip, download\n'), ((1083, 1102), 'os.unlink', 'os.unlink', (['tar_file'], {}), '(tar_file)\n', (1092, 1102), False, 'import os\n'), ((1111, 1168), 'shutil.move', 'shutil.move', (['f"""websocketpp-{self.version}"""', '"""websocketpp"""'], {}), "(f'websocketpp-{self.version}', 'websocketpp')\n", (1122, 1168), False, 'import shutil\n'), ((1207, 1218), 'conans.CMake', 'CMake', (['self'], {}), '(self)\n', (1212, 1218), False, 'from conans import ConanFile, CMake\n'), ((294, 320), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (310, 320), False, 'import os\n')]
|
#!/usr/bin/env python3.7
# -*- coding: utf-8 -*-
"""
Created on Mon Feb 4 15:45:33 2019
@author: ejreidelbach
:DESCRIPTION:
:REQUIRES:
:TODO:
"""
#==============================================================================
# Package Import
#==============================================================================
import os
import pandas as pd
import pathlib
import tqdm
import collections
import matplotlib.pyplot as plt
import math
#==============================================================================
# Reference Variable Declaration
#==============================================================================
#==============================================================================
# Function Definitions
#==============================================================================
def function_name(var1, var2):
'''
Purpose: Stuff goes here
Inputs
------
var1 : type
description
var2 : type
description
Outputs
-------
var1 : type
description
var2 : type
description
'''
def bucketize(point, bucket_size):
"""floor the point to the next lower multiple of bucket_size"""
return bucket_size * math.floor(point / bucket_size)
def make_histogram(points, bucket_size):
"""buckets the points and counts how many in each bucket"""
return Counter(bucketize(point, bucket_size) for point in points)
def plot_histogram(points, bucket_size, title=""):
histogram = make_histogram(points, bucket_size)
plt.bar(histogram.keys(), histogram.values(), width=bucket_size)
plt.title(title)
plt.show()
def plot_scatter(xs, ys1, ys2):
plt.scatter(xs, ys1, marker='.', color='black', label='ys1')
plt.scatter(xs, ys2, marker='.', color='gray', label='ys2')
plt.xlabel('xs')
plt.ylabel('ys')
plt.legend(loc=9)
plt.show()
def dot(v, w):
"""v_1 * w_1 + ... + v_n * w_n"""
return sum(v_i * w_i for v_i, w_i in zip(v, w))
def sum_of_squares(v):
"""v_1 * v_1 + ... + v_n * v_n"""
return dot(v, v)
def de_mean(x):
"""translate x by subtracting its mean (so the result has mean 0)"""
x_bar = mean(x)
return [x_i - x_bar for x_i in x]
def mean(x):
return sum(x) / len(x)
def variance(x):
"""assumes x has at least two elements"""
n = len(x)
deviations = de_mean(x)
return sum_of_squares(deviations) / (n - 1)
def get_column(A, j):
return [A_i[j] for A_i in A]
def standard_deviation(x):
return math.sqrt(variance(x))
def shape(A):
num_rows = len(A)
num_cols = len(A[0]) if A else 0
return num_rows, num_cols
def covariance(x, y):
n = len(x)
return dot(de_mean(x), de_mean(y)) / (n - 1)
def correlation(x, y):
stdev_x = standard_deviation(x)
stdev_y = standard_deviation(y)
if stdev_x > 0 and stdev_y > 0:
return covariance(x, y) / stdev_x / stdev_y
else:
return 0 # if no variation, correlation is zero
#==============================================================================
# Working Code
#==============================================================================
#from aggregate_CFBStats_by_category import aggregate_data_by_category
# Set the project working directory
#path_project = pathlib.Path(__file__).resolve().parents[2]
#os.chdir(path_project)
path_dir = pathlib.Path('/home/ejreidelbach/Projects/kagglePUBG/data/raw')
os.chdir(path_dir)
df = pd.read_csv('train_V2.csv', nrows=10000)
plot_histogram(df['kills'], 1)
plot_scatter(df, df['kills'], df['winPlacePerc'])
|
[
"matplotlib.pyplot.title",
"matplotlib.pyplot.show",
"pandas.read_csv",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.legend",
"math.floor",
"pathlib.Path",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"os.chdir"
] |
[((3421, 3484), 'pathlib.Path', 'pathlib.Path', (['"""/home/ejreidelbach/Projects/kagglePUBG/data/raw"""'], {}), "('/home/ejreidelbach/Projects/kagglePUBG/data/raw')\n", (3433, 3484), False, 'import pathlib\n'), ((3485, 3503), 'os.chdir', 'os.chdir', (['path_dir'], {}), '(path_dir)\n', (3493, 3503), False, 'import os\n'), ((3510, 3550), 'pandas.read_csv', 'pd.read_csv', (['"""train_V2.csv"""'], {'nrows': '(10000)'}), "('train_V2.csv', nrows=10000)\n", (3521, 3550), True, 'import pandas as pd\n'), ((1662, 1678), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (1671, 1678), True, 'import matplotlib.pyplot as plt\n'), ((1683, 1693), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1691, 1693), True, 'import matplotlib.pyplot as plt\n'), ((1735, 1795), 'matplotlib.pyplot.scatter', 'plt.scatter', (['xs', 'ys1'], {'marker': '"""."""', 'color': '"""black"""', 'label': '"""ys1"""'}), "(xs, ys1, marker='.', color='black', label='ys1')\n", (1746, 1795), True, 'import matplotlib.pyplot as plt\n'), ((1800, 1859), 'matplotlib.pyplot.scatter', 'plt.scatter', (['xs', 'ys2'], {'marker': '"""."""', 'color': '"""gray"""', 'label': '"""ys2"""'}), "(xs, ys2, marker='.', color='gray', label='ys2')\n", (1811, 1859), True, 'import matplotlib.pyplot as plt\n'), ((1865, 1881), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""xs"""'], {}), "('xs')\n", (1875, 1881), True, 'import matplotlib.pyplot as plt\n'), ((1886, 1902), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""ys"""'], {}), "('ys')\n", (1896, 1902), True, 'import matplotlib.pyplot as plt\n'), ((1907, 1924), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '(9)'}), '(loc=9)\n', (1917, 1924), True, 'import matplotlib.pyplot as plt\n'), ((1929, 1939), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1937, 1939), True, 'import matplotlib.pyplot as plt\n'), ((1277, 1308), 'math.floor', 'math.floor', (['(point / bucket_size)'], {}), '(point / bucket_size)\n', (1287, 1308), False, 'import math\n')]
|
from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.i18n import i18n_patterns
from django.http import HttpResponse
from django.shortcuts import redirect
from django.template.response import TemplateResponse
from django.urls import reverse, reverse_lazy
from django.utils.translation import gettext_lazy as _
from django.views.decorators.cache import cache_control
from django.views.generic import RedirectView
from django.views.i18n import JavaScriptCatalog
from moj_irat.views import HealthcheckView, PingJsonView
from mtp_common.analytics import genericised_pageview
from mtp_common.auth import views as auth_views
from mtp_common.auth.exceptions import Unauthorized
from mtp_common.metrics.views import metrics_view
from mtp_auth.views import AcceptRequestView
from user_admin.views import UserCreationView, UserUpdateView
from views import FAQView
def login_view(request):
return auth_views.login(request, template_name='mtp_auth/login.html', extra_context={
'start_page_url': settings.START_PAGE_URL,
'google_analytics_pageview': genericised_pageview(request, _('Sign in')),
})
def root_view(request):
if not (request.can_access_prisoner_location or request.can_access_security):
raise Unauthorized() # middleware causes user to be logged-out
if request.can_access_prisoner_location and not request.can_access_security:
return redirect(reverse('location_file_upload'))
return redirect(reverse('security:dashboard'))
# NB: API settings has certain Noms Ops URLs which will need to be updated
# if they change: settings, feedback, and notifications
urlpatterns = i18n_patterns(
url(r'^$', root_view, name='root'),
url(r'^prisoner-location/', include('prisoner_location_admin.urls')),
url(r'^settings/', include('settings.urls')),
url(r'^faq/', FAQView.as_view(), name='faq'),
url(r'^feedback/', include('feedback.urls')),
url(r'^', include('mtp_auth.urls')),
url(r'^login/$', login_view, name='login'),
url(
r'^logout/$', auth_views.logout, {
'template_name': 'mtp_auth/login.html',
'next_page': reverse_lazy('login'),
}, name='logout'
),
url(
r'^password_change/$', auth_views.password_change, {
'template_name': 'mtp_common/auth/password_change.html',
'cancel_url': reverse_lazy(settings.LOGIN_REDIRECT_URL),
}, name='password_change'
),
url(
r'^create_password/$', auth_views.password_change_with_code, {
'template_name': 'mtp_common/auth/password_change_with_code.html',
'cancel_url': reverse_lazy(settings.LOGIN_REDIRECT_URL),
}, name='password_change_with_code'
),
url(
r'^password_change_done/$', auth_views.password_change_done, {
'template_name': 'mtp_common/auth/password_change_done.html',
'cancel_url': reverse_lazy(settings.LOGIN_REDIRECT_URL),
}, name='password_change_done'
),
url(
r'^reset-password/$', auth_views.reset_password, {
'password_change_url': reverse_lazy('password_change_with_code'),
'template_name': 'mtp_common/auth/reset-password.html',
'cancel_url': reverse_lazy(settings.LOGIN_REDIRECT_URL),
}, name='reset_password'
),
url(
r'^reset-password-done/$', auth_views.reset_password_done, {
'template_name': 'mtp_common/auth/reset-password-done.html',
'cancel_url': reverse_lazy(settings.LOGIN_REDIRECT_URL),
}, name='reset_password_done'
),
url(
r'^email_change/$', auth_views.email_change, {
'cancel_url': reverse_lazy('settings'),
}, name='email_change'
),
url(r'^', include('security.urls', namespace='security')),
# Override mtp_common.user_admin's /users/new/ view
url(r'^users/new/$', UserCreationView.as_view(), name='new-user'),
# Override mtp_common.user_admin's /users/{ID}/edit/ view
url(r'^users/(?P<username>[^/]+)/edit/$', UserUpdateView.as_view(), name='edit-user'),
url(r'^', include('mtp_common.user_admin.urls')),
url(
r'^users/request/(?P<account_request>\d+)/accept/$',
AcceptRequestView.as_view(),
name='accept-request'
),
url(r'^js-i18n.js$', cache_control(public=True, max_age=86400)(JavaScriptCatalog.as_view()), name='js-i18n'),
url(r'^404.html$', lambda request: TemplateResponse(request, 'mtp_common/errors/404.html', status=404)),
url(r'^500.html$', lambda request: TemplateResponse(request, 'mtp_common/errors/500.html', status=500)),
)
urlpatterns += [
url(r'^ping.json$', PingJsonView.as_view(
build_date_key='APP_BUILD_DATE',
commit_id_key='APP_GIT_COMMIT',
version_number_key='APP_BUILD_TAG',
), name='ping_json'),
url(r'^healthcheck.json$', HealthcheckView.as_view(), name='healthcheck_json'),
url(r'^metrics.txt$', metrics_view, name='prometheus_metrics'),
url(r'^favicon.ico$', RedirectView.as_view(url=settings.STATIC_URL + 'images/favicon.ico', permanent=True)),
url(r'^robots.txt$', lambda request: HttpResponse('User-agent: *\nDisallow: /', content_type='text/plain')),
url(r'^\.well-known/security\.txt$', RedirectView.as_view(
url='https://raw.githubusercontent.com/ministryofjustice/security-guidance'
'/main/contact/vulnerability-disclosure-security.txt',
permanent=True,
)),
]
handler404 = 'mtp_common.views.page_not_found'
handler500 = 'mtp_common.views.server_error'
handler400 = 'mtp_common.views.bad_request'
|
[
"views.FAQView.as_view",
"django.conf.urls.include",
"django.http.HttpResponse",
"django.utils.translation.gettext_lazy",
"django.urls.reverse_lazy",
"django.views.i18n.JavaScriptCatalog.as_view",
"user_admin.views.UserUpdateView.as_view",
"django.views.decorators.cache.cache_control",
"django.template.response.TemplateResponse",
"django.urls.reverse",
"moj_irat.views.HealthcheckView.as_view",
"user_admin.views.UserCreationView.as_view",
"django.conf.urls.url",
"mtp_common.auth.exceptions.Unauthorized",
"moj_irat.views.PingJsonView.as_view",
"mtp_auth.views.AcceptRequestView.as_view",
"django.views.generic.RedirectView.as_view"
] |
[((1690, 1723), 'django.conf.urls.url', 'url', (['"""^$"""', 'root_view'], {'name': '"""root"""'}), "('^$', root_view, name='root')\n", (1693, 1723), False, 'from django.conf.urls import include, url\n'), ((1998, 2039), 'django.conf.urls.url', 'url', (['"""^login/$"""', 'login_view'], {'name': '"""login"""'}), "('^login/$', login_view, name='login')\n", (2001, 2039), False, 'from django.conf.urls import include, url\n'), ((4946, 5007), 'django.conf.urls.url', 'url', (['"""^metrics.txt$"""', 'metrics_view'], {'name': '"""prometheus_metrics"""'}), "('^metrics.txt$', metrics_view, name='prometheus_metrics')\n", (4949, 5007), False, 'from django.conf.urls import include, url\n'), ((1277, 1291), 'mtp_common.auth.exceptions.Unauthorized', 'Unauthorized', ([], {}), '()\n', (1289, 1291), False, 'from mtp_common.auth.exceptions import Unauthorized\n'), ((1493, 1522), 'django.urls.reverse', 'reverse', (['"""security:dashboard"""'], {}), "('security:dashboard')\n", (1500, 1522), False, 'from django.urls import reverse, reverse_lazy\n'), ((1758, 1797), 'django.conf.urls.include', 'include', (['"""prisoner_location_admin.urls"""'], {}), "('prisoner_location_admin.urls')\n", (1765, 1797), False, 'from django.conf.urls import include, url\n'), ((1823, 1847), 'django.conf.urls.include', 'include', (['"""settings.urls"""'], {}), "('settings.urls')\n", (1830, 1847), False, 'from django.conf.urls import include, url\n'), ((1869, 1886), 'views.FAQView.as_view', 'FAQView.as_view', ([], {}), '()\n', (1884, 1886), False, 'from views import FAQView\n'), ((1924, 1948), 'django.conf.urls.include', 'include', (['"""feedback.urls"""'], {}), "('feedback.urls')\n", (1931, 1948), False, 'from django.conf.urls import include, url\n'), ((1966, 1990), 'django.conf.urls.include', 'include', (['"""mtp_auth.urls"""'], {}), "('mtp_auth.urls')\n", (1973, 1990), False, 'from django.conf.urls import include, url\n'), ((3779, 3825), 'django.conf.urls.include', 'include', (['"""security.urls"""'], {'namespace': '"""security"""'}), "('security.urls', namespace='security')\n", (3786, 3825), False, 'from django.conf.urls import include, url\n'), ((3910, 3936), 'user_admin.views.UserCreationView.as_view', 'UserCreationView.as_view', ([], {}), '()\n', (3934, 3936), False, 'from user_admin.views import UserCreationView, UserUpdateView\n'), ((4064, 4088), 'user_admin.views.UserUpdateView.as_view', 'UserUpdateView.as_view', ([], {}), '()\n', (4086, 4088), False, 'from user_admin.views import UserCreationView, UserUpdateView\n'), ((4123, 4160), 'django.conf.urls.include', 'include', (['"""mtp_common.user_admin.urls"""'], {}), "('mtp_common.user_admin.urls')\n", (4130, 4160), False, 'from django.conf.urls import include, url\n'), ((4241, 4268), 'mtp_auth.views.AcceptRequestView.as_view', 'AcceptRequestView.as_view', ([], {}), '()\n', (4266, 4268), False, 'from mtp_auth.views import AcceptRequestView\n'), ((4685, 4811), 'moj_irat.views.PingJsonView.as_view', 'PingJsonView.as_view', ([], {'build_date_key': '"""APP_BUILD_DATE"""', 'commit_id_key': '"""APP_GIT_COMMIT"""', 'version_number_key': '"""APP_BUILD_TAG"""'}), "(build_date_key='APP_BUILD_DATE', commit_id_key=\n 'APP_GIT_COMMIT', version_number_key='APP_BUILD_TAG')\n", (4705, 4811), False, 'from moj_irat.views import HealthcheckView, PingJsonView\n'), ((4889, 4914), 'moj_irat.views.HealthcheckView.as_view', 'HealthcheckView.as_view', ([], {}), '()\n', (4912, 4914), False, 'from moj_irat.views import HealthcheckView, PingJsonView\n'), ((5037, 5125), 'django.views.generic.RedirectView.as_view', 'RedirectView.as_view', ([], {'url': "(settings.STATIC_URL + 'images/favicon.ico')", 'permanent': '(True)'}), "(url=settings.STATIC_URL + 'images/favicon.ico',\n permanent=True)\n", (5057, 5125), False, 'from django.views.generic import RedirectView\n'), ((5278, 5452), 'django.views.generic.RedirectView.as_view', 'RedirectView.as_view', ([], {'url': '"""https://raw.githubusercontent.com/ministryofjustice/security-guidance/main/contact/vulnerability-disclosure-security.txt"""', 'permanent': '(True)'}), "(url=\n 'https://raw.githubusercontent.com/ministryofjustice/security-guidance/main/contact/vulnerability-disclosure-security.txt'\n , permanent=True)\n", (5298, 5452), False, 'from django.views.generic import RedirectView\n'), ((1440, 1471), 'django.urls.reverse', 'reverse', (['"""location_file_upload"""'], {}), "('location_file_upload')\n", (1447, 1471), False, 'from django.urls import reverse, reverse_lazy\n'), ((2171, 2192), 'django.urls.reverse_lazy', 'reverse_lazy', (['"""login"""'], {}), "('login')\n", (2183, 2192), False, 'from django.urls import reverse, reverse_lazy\n'), ((2391, 2432), 'django.urls.reverse_lazy', 'reverse_lazy', (['settings.LOGIN_REDIRECT_URL'], {}), '(settings.LOGIN_REDIRECT_URL)\n', (2403, 2432), False, 'from django.urls import reverse, reverse_lazy\n'), ((2660, 2701), 'django.urls.reverse_lazy', 'reverse_lazy', (['settings.LOGIN_REDIRECT_URL'], {}), '(settings.LOGIN_REDIRECT_URL)\n', (2672, 2701), False, 'from django.urls import reverse, reverse_lazy\n'), ((2934, 2975), 'django.urls.reverse_lazy', 'reverse_lazy', (['settings.LOGIN_REDIRECT_URL'], {}), '(settings.LOGIN_REDIRECT_URL)\n', (2946, 2975), False, 'from django.urls import reverse, reverse_lazy\n'), ((3126, 3167), 'django.urls.reverse_lazy', 'reverse_lazy', (['"""password_change_with_code"""'], {}), "('password_change_with_code')\n", (3138, 3167), False, 'from django.urls import reverse, reverse_lazy\n'), ((3263, 3304), 'django.urls.reverse_lazy', 'reverse_lazy', (['settings.LOGIN_REDIRECT_URL'], {}), '(settings.LOGIN_REDIRECT_URL)\n', (3275, 3304), False, 'from django.urls import reverse, reverse_lazy\n'), ((3523, 3564), 'django.urls.reverse_lazy', 'reverse_lazy', (['settings.LOGIN_REDIRECT_URL'], {}), '(settings.LOGIN_REDIRECT_URL)\n', (3535, 3564), False, 'from django.urls import reverse, reverse_lazy\n'), ((3701, 3725), 'django.urls.reverse_lazy', 'reverse_lazy', (['"""settings"""'], {}), "('settings')\n", (3713, 3725), False, 'from django.urls import reverse, reverse_lazy\n'), ((4333, 4374), 'django.views.decorators.cache.cache_control', 'cache_control', ([], {'public': '(True)', 'max_age': '(86400)'}), '(public=True, max_age=86400)\n', (4346, 4374), False, 'from django.views.decorators.cache import cache_control\n'), ((4375, 4402), 'django.views.i18n.JavaScriptCatalog.as_view', 'JavaScriptCatalog.as_view', ([], {}), '()\n', (4400, 4402), False, 'from django.views.i18n import JavaScriptCatalog\n'), ((4462, 4529), 'django.template.response.TemplateResponse', 'TemplateResponse', (['request', '"""mtp_common/errors/404.html"""'], {'status': '(404)'}), "(request, 'mtp_common/errors/404.html', status=404)\n", (4478, 4529), False, 'from django.template.response import TemplateResponse\n'), ((4571, 4638), 'django.template.response.TemplateResponse', 'TemplateResponse', (['request', '"""mtp_common/errors/500.html"""'], {'status': '(500)'}), "(request, 'mtp_common/errors/500.html', status=500)\n", (4587, 4638), False, 'from django.template.response import TemplateResponse\n'), ((5165, 5237), 'django.http.HttpResponse', 'HttpResponse', (['"""User-agent: *\nDisallow: /"""'], {'content_type': '"""text/plain"""'}), '("""User-agent: *\nDisallow: /""", content_type=\'text/plain\')\n', (5177, 5237), False, 'from django.http import HttpResponse\n'), ((1133, 1145), 'django.utils.translation.gettext_lazy', '_', (['"""Sign in"""'], {}), "('Sign in')\n", (1134, 1145), True, 'from django.utils.translation import gettext_lazy as _\n')]
|
#! /usr/bin/env python
"""
Copyright 2015-2018 <NAME> <<EMAIL>>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import cv2
import numpy as np
import os
class VideoReader(cv2.VideoCapture):
'''Read a video in batches.
Parameters
----------
path: str
Path to the video file.
batch_size: int, default = 1
Batch size for reading frames
framerate: float, default = None
Video framerate for determining timestamps
for each frame. If None, timestamps will
equal frame number.
gray: bool, default = False
If gray, return only the middle channel
'''
def __init__(self, path, batch_size=1, framerate=None, gray=False):
if isinstance(path, str):
if os.path.exists(path):
super(VideoReader, self).__init__(path)
self.path = path
else:
raise ValueError('file or path does not exist')
else:
raise TypeError('path must be str')
self.batch_size = batch_size
self.n_frames = int(self.get(cv2.CAP_PROP_FRAME_COUNT))
if framerate:
self.timestep = 1. / framerate
else:
self.timestep = 1.
self.idx = 0
self.fps = self.get(cv2.CAP_PROP_FPS)
self.height = self.get(cv2.CAP_PROP_FRAME_HEIGHT)
self.width = self.get(cv2.CAP_PROP_FRAME_WIDTH)
self.shape = (self.height, self.width)
self.finished = False
self.gray = gray
self._read = super(VideoReader, self).read
def read(self):
''' Read one frame
Returns
-------
frame: array
Image is returned of the frame if a frame exists.
Otherwise, return None.
'''
ret, frame = self._read()
if ret:
if self.gray:
frame = frame[..., 1][..., None]
self.idx += 1
return self.idx - 1, frame
else:
self.finished = True
return None
def read_batch(self):
''' Read in a batch of frames.
Returns
-------
frames_idx: array
A batch of frames from the video.
frames: array
A batch of frames from the video.
'''
frames = []
frames_idx = []
for idx in range(self.batch_size):
frame = self.read()
if frame is not None and not self.finished:
frame_idx, frame = frame
frames.append(frame)
frames_idx.append(frame_idx)
empty = len(frames) == 0
if not empty:
frames = np.stack(frames)
frames_idx = np.array(frames_idx)
timestamps = frames_idx * self.timestep
return frames, frames_idx, timestamps
else:
return None
def close(self):
''' Close the VideoReader.
Returns
-------
bool
Returns True if successfully closed.
'''
self.release()
return not self.isOpened()
def __len__(self):
return int(np.ceil(self.n_frames / float(self.batch_size)))
def __getitem__(self, index):
if self.finished:
raise StopIteration
if isinstance(index, (int, np.integer)):
idx0 = index * self.batch_size
if self.idx != idx0:
self.set(cv2.CAP_PROP_POS_FRAMES, idx0 - 1)
self.idx = idx0
else:
raise NotImplementedError
return self.read_batch()
def __next__(self):
if self.finished:
raise StopIteration
else:
return self.read_batch()
def __del__(self):
self.close()
@property
def current_frame(self):
return int(self.get(cv2.CAP_PROP_POS_FRAMES))
@property
def current_time(self):
return self.get(cv2.CAP_PROP_POS_MSEC)
@property
def percent_finished(self):
return self.get(cv2.CAP_PROP_POS_AVI_RATIO) * 100
|
[
"numpy.stack",
"os.path.exists",
"numpy.array"
] |
[((1231, 1251), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (1245, 1251), False, 'import os\n'), ((3132, 3148), 'numpy.stack', 'np.stack', (['frames'], {}), '(frames)\n', (3140, 3148), True, 'import numpy as np\n'), ((3174, 3194), 'numpy.array', 'np.array', (['frames_idx'], {}), '(frames_idx)\n', (3182, 3194), True, 'import numpy as np\n')]
|
#!/usr/bin/env python3
# Copyright (c) 2019-2021 The EDID JSON Tools authors. All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
# We need this stub of a script to be able to handle `pip install --editable .`
import setuptools
setuptools.setup()
|
[
"setuptools.setup"
] |
[((241, 259), 'setuptools.setup', 'setuptools.setup', ([], {}), '()\n', (257, 259), False, 'import setuptools\n')]
|
import os
import sys
from distutils.core import setup
from Cython.Build import cythonize
CYTHON_DEBUG = bool(os.getenv('CYTHON_DEBUG', ''))
build_dir = sys.argv.pop()
script_name = sys.argv.pop()
setup(
ext_modules=cythonize(
script_name,
build_dir=build_dir,
quiet=not CYTHON_DEBUG,
annotate=CYTHON_DEBUG,
compiler_directives={"language_level": 3},
)
)
|
[
"Cython.Build.cythonize",
"os.getenv",
"sys.argv.pop"
] |
[((156, 170), 'sys.argv.pop', 'sys.argv.pop', ([], {}), '()\n', (168, 170), False, 'import sys\n'), ((185, 199), 'sys.argv.pop', 'sys.argv.pop', ([], {}), '()\n', (197, 199), False, 'import sys\n'), ((112, 141), 'os.getenv', 'os.getenv', (['"""CYTHON_DEBUG"""', '""""""'], {}), "('CYTHON_DEBUG', '')\n", (121, 141), False, 'import os\n'), ((224, 361), 'Cython.Build.cythonize', 'cythonize', (['script_name'], {'build_dir': 'build_dir', 'quiet': '(not CYTHON_DEBUG)', 'annotate': 'CYTHON_DEBUG', 'compiler_directives': "{'language_level': 3}"}), "(script_name, build_dir=build_dir, quiet=not CYTHON_DEBUG,\n annotate=CYTHON_DEBUG, compiler_directives={'language_level': 3})\n", (233, 361), False, 'from Cython.Build import cythonize\n')]
|
# (c) 2012-2019, Ansible by Red Hat
#
# This file is part of Ansible Galaxy
#
# Ansible Galaxy is free software: you can redistribute it and/or modify
# it under the terms of the Apache License as published by
# the Apache Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# Ansible Galaxy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Apache License for more details.
#
# You should have received a copy of the Apache License
# along with Galaxy. If not, see <http://www.apache.org/licenses/>.
import logging
from django.conf import settings
from django.db import models
from django.urls import reverse
from .base import CommonModel
# from .content import Content
log = logging.getLogger(__name__)
class Namespace(CommonModel):
"""
Represents the aggregation of multiple namespaces across providers.
"""
class Meta:
ordering = ('name',)
owners = models.ManyToManyField(
settings.AUTH_USER_MODEL,
related_name='namespaces',
editable=True,
)
avatar_url = models.CharField(
max_length=256,
blank=True,
null=True,
verbose_name="Avatar URL"
)
location = models.CharField(
max_length=256,
blank=True,
null=True,
verbose_name="Location"
)
company = models.CharField(
max_length=256,
blank=True,
null=True,
verbose_name="Company Name"
)
email = models.CharField(
max_length=256,
blank=True,
null=True,
verbose_name="Email Address"
)
html_url = models.CharField(
max_length=256,
blank=True,
null=True,
verbose_name="Web Site URL"
)
is_vendor = models.BooleanField(default=False)
def get_absolute_url(self):
return reverse('api:v1:namespace_detail', args=(self.pk,))
@property
def content_counts(self):
# FIXME: just stubbed out until COntent is a thing
return 31
# return Content.objects \
# .filter(namespace=self.pk) \
# .values('content_type__name') \
# .annotate(count=models.Count('content_type__name')) \
# .order_by('content_type__name')
def is_owner(self, user):
log.warning('Namespace.is_owner is stubbed. FIXME! user=%s', user)
return True
# return self.owners.filter(pk=user.pk).exists()
|
[
"django.db.models.ManyToManyField",
"django.db.models.CharField",
"django.db.models.BooleanField",
"django.urls.reverse",
"logging.getLogger"
] |
[((859, 886), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (876, 886), False, 'import logging\n'), ((1067, 1161), 'django.db.models.ManyToManyField', 'models.ManyToManyField', (['settings.AUTH_USER_MODEL'], {'related_name': '"""namespaces"""', 'editable': '(True)'}), "(settings.AUTH_USER_MODEL, related_name='namespaces',\n editable=True)\n", (1089, 1161), False, 'from django.db import models\n'), ((1207, 1294), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(256)', 'blank': '(True)', 'null': '(True)', 'verbose_name': '"""Avatar URL"""'}), "(max_length=256, blank=True, null=True, verbose_name=\n 'Avatar URL')\n", (1223, 1294), False, 'from django.db import models\n'), ((1343, 1428), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(256)', 'blank': '(True)', 'null': '(True)', 'verbose_name': '"""Location"""'}), "(max_length=256, blank=True, null=True, verbose_name='Location'\n )\n", (1359, 1428), False, 'from django.db import models\n'), ((1476, 1565), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(256)', 'blank': '(True)', 'null': '(True)', 'verbose_name': '"""Company Name"""'}), "(max_length=256, blank=True, null=True, verbose_name=\n 'Company Name')\n", (1492, 1565), False, 'from django.db import models\n'), ((1611, 1701), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(256)', 'blank': '(True)', 'null': '(True)', 'verbose_name': '"""Email Address"""'}), "(max_length=256, blank=True, null=True, verbose_name=\n 'Email Address')\n", (1627, 1701), False, 'from django.db import models\n'), ((1750, 1839), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(256)', 'blank': '(True)', 'null': '(True)', 'verbose_name': '"""Web Site URL"""'}), "(max_length=256, blank=True, null=True, verbose_name=\n 'Web Site URL')\n", (1766, 1839), False, 'from django.db import models\n'), ((1890, 1924), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (1909, 1924), False, 'from django.db import models\n'), ((1973, 2024), 'django.urls.reverse', 'reverse', (['"""api:v1:namespace_detail"""'], {'args': '(self.pk,)'}), "('api:v1:namespace_detail', args=(self.pk,))\n", (1980, 2024), False, 'from django.urls import reverse\n')]
|
#!/usr/bin/env python3
if __name__ == '__main__':
from cassandra.rockets import RandomRocket
from cassandra.simulation import Simulation
from cassandra.physics.integrators import RK4
SIM_TIME = 8
SIM_TIMESTEP = 0.01
rocket = RandomRocket()
integrator = RK4()
simulation = Simulation(rocket, integrator)
simulation.run(SIM_TIME, SIM_TIMESTEP)
simulation.animate()
|
[
"cassandra.rockets.RandomRocket",
"cassandra.physics.integrators.RK4",
"cassandra.simulation.Simulation"
] |
[((241, 255), 'cassandra.rockets.RandomRocket', 'RandomRocket', ([], {}), '()\n', (253, 255), False, 'from cassandra.rockets import RandomRocket\n'), ((271, 276), 'cassandra.physics.integrators.RK4', 'RK4', ([], {}), '()\n', (274, 276), False, 'from cassandra.physics.integrators import RK4\n'), ((293, 323), 'cassandra.simulation.Simulation', 'Simulation', (['rocket', 'integrator'], {}), '(rocket, integrator)\n', (303, 323), False, 'from cassandra.simulation import Simulation\n')]
|
import os
import random
import torch
# from torch.autograd import Variable
from torchvision import transforms as T
from PIL import Image, ImageDraw, ImageFont
class IMGProcess(object):
def __init__(self, source,
use_cuda=True,
img_path="imgs",
batch_size=100,
img_size=416,
confidence=0.5,
rebuild=True,
result="result"):
self.colors = source["pallete"]
self.num_classes = source["num_classes"]
self.classes = source["classes"]
self.confidence = confidence
self.rebuild = rebuild
self.result = result
self.use_cuda = use_cuda
self.img_size = img_size
self.font = ImageFont.truetype("arial.ttf", 15)
self.imgs = [os.path.join(img_path, img)
for img in os.listdir(img_path)]
self.sents_size = len(self.imgs)
self.bsz = min(batch_size, len(self.imgs))
self._step = 0
self._stop_step = self.sents_size // self.bsz
def _encode(self, x):
# convert the image to network input size and a tensor
encode = T.Compose([T.Resize((self.img_size, self.img_size)),
T.ToTensor()])
return encode(x)
def img2Var(self, imgs):
self.imgs = imgs = [Image.open(img).convert('RGB') for img in imgs]
imgs_dim = torch.FloatTensor([img.size for img in imgs]).repeat(1, 2)
with torch.no_grad():
tensors = [self._encode(img).unsqueeze(0) for img in imgs]
vs = torch.cat(tensors, 0)
if self.use_cuda:
vs = vs.cuda()
return vs, imgs_dim
def predict(self, prediction, nms_conf=0.4):
"""
prediction:
0:3 - x, y, h, w
4 - confidence
5: - class score
"""
def iou(box1, box2):
x1, y1 = box1[:, 0], box1[:, 1]
b1_w, b1_h = box1[:, 2] - x1 + .1, box1[:, 3] - y1 + .1
x2, y2, = box2[:, 0], box2[:, 1]
b2_w, b2_h = box2[:, 2] - x2 + .1, box2[:, 3] - y2 + .1
end_x = torch.min(x1 + b1_w, x2 + b2_w)
start_x = torch.max(x1, x2)
end_y = torch.min(y1 + b1_h, y2 + b2_h)
start_y = torch.max(y1, y2)
a = (end_x - start_x) * (end_y - start_y)
return a / (b1_w * b1_h + b2_w * b2_h - a)
conf_mask = (prediction[:, :, 4] >
self.confidence).float().unsqueeze(2)
prediction = prediction * conf_mask
# create a tensor the same size as prediction
box_corner = prediction.new(*prediction.size())
box_corner[:, :, 0] = (prediction[:, :, 0] - prediction[:, :, 2] / 2)
box_corner[:, :, 1] = (prediction[:, :, 1] - prediction[:, :, 3] / 2)
box_corner[:, :, 2] = (prediction[:, :, 0] + prediction[:, :, 2] / 2)
box_corner[:, :, 3] = (prediction[:, :, 1] + prediction[:, :, 3] / 2)
prediction[:, :, :4] = box_corner[:, :, :4]
outputs = []
for index in range(prediction.size(0)):
image_pred = prediction[index] # [10647, 85]
max_score, max_index = torch.max(
image_pred[:, 5:], 1, keepdim=True)
image_pred = torch.cat(
(image_pred[:, :5], max_score, max_index.float()), 1) # [10647, 7]
non_zero_ind = (torch.nonzero(image_pred[:, 4])).view(-1)
if non_zero_ind.size(0) == 0:
continue
image_pred_ = image_pred[non_zero_ind, :]
img_classes = torch.unique(image_pred_[:, -1])
objects, img_preds = [], []
name = self.this_img_names[index].split("/")[-1]
for c in img_classes:
c_mask = image_pred_ * \
(image_pred_[:, -1] == c).float().unsqueeze(1)
class_mask_ind = torch.nonzero(c_mask[:, -2]).squeeze()
image_pred_class = image_pred_[class_mask_ind].view(-1, 7)
_, conf_sort_index = torch.sort(
image_pred_class[:, 4], descending=True)
image_pred_class = image_pred_class[conf_sort_index]
for i in range(image_pred_class.size(0) - 1):
try:
ious = iou(image_pred_class[i].unsqueeze(
0), image_pred_class[i + 1:])
except IndexError:
break
iou_mask = (ious < nms_conf).float().unsqueeze(1)
image_pred_class[i + 1:] *= iou_mask
non_zero_ind = torch.nonzero(
image_pred_class[:, 4]).squeeze()
image_pred_class = image_pred_class[non_zero_ind].view(
-1, 7)
img_preds.append(image_pred_class)
objects += [self.classes[int(x[-1])] for x in image_pred_class]
outputs.append((name, objects))
img_preds = torch.cat(img_preds, dim=0)
if self.rebuild:
self.tensor2img(img_preds, index, name)
return outputs
def tensor2img(self, tensor, index, name):
imgs_dim = self.imgs_dim[index] / self.img_size
img = self.imgs[index]
draw = ImageDraw.Draw(img)
# print(imgs_dim)
# print(tensor)
# if tensor.is_cuda():
# tensor.to_cpu()
tensor[:, :4] = tensor.cpu()[:, :4].clamp_(0, self.img_size) * imgs_dim
for t in tensor:
s_x, s_y, e_x, e_y = list(map(int, t[:4]))
label = self.classes[int(t[-1])]
color = random.choice(self.colors)
draw.rectangle([s_x, s_y, e_x, e_y], outline=color)
draw.text([s_x, s_y], label, fill=color, font=self.font)
del draw
img.save(os.path.join(self.result, "res_{}".format(name)))
def __iter__(self):
return self
def __next__(self):
if self._step == self._stop_step:
self._step = 0
raise StopIteration()
_s = self._step * self.bsz
self._step += 1
self.this_img_names = self.imgs[_s:_s + self.bsz]
vs, self.imgs_dim = self.img2Var(self.this_img_names)
return vs
|
[
"os.listdir",
"torch.unique",
"torch.FloatTensor",
"torch.cat",
"random.choice",
"torchvision.transforms.ToTensor",
"PIL.ImageFont.truetype",
"PIL.Image.open",
"torch.nonzero",
"torch.max",
"torch.sort",
"PIL.ImageDraw.Draw",
"torch.no_grad",
"os.path.join",
"torch.min",
"torchvision.transforms.Resize"
] |
[((763, 798), 'PIL.ImageFont.truetype', 'ImageFont.truetype', (['"""arial.ttf"""', '(15)'], {}), "('arial.ttf', 15)\n", (781, 798), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((5356, 5375), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['img'], {}), '(img)\n', (5370, 5375), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((820, 847), 'os.path.join', 'os.path.join', (['img_path', 'img'], {}), '(img_path, img)\n', (832, 847), False, 'import os\n'), ((1498, 1513), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1511, 1513), False, 'import torch\n'), ((1603, 1624), 'torch.cat', 'torch.cat', (['tensors', '(0)'], {}), '(tensors, 0)\n', (1612, 1624), False, 'import torch\n'), ((2171, 2202), 'torch.min', 'torch.min', (['(x1 + b1_w)', '(x2 + b2_w)'], {}), '(x1 + b1_w, x2 + b2_w)\n', (2180, 2202), False, 'import torch\n'), ((2225, 2242), 'torch.max', 'torch.max', (['x1', 'x2'], {}), '(x1, x2)\n', (2234, 2242), False, 'import torch\n'), ((2264, 2295), 'torch.min', 'torch.min', (['(y1 + b1_h)', '(y2 + b2_h)'], {}), '(y1 + b1_h, y2 + b2_h)\n', (2273, 2295), False, 'import torch\n'), ((2318, 2335), 'torch.max', 'torch.max', (['y1', 'y2'], {}), '(y1, y2)\n', (2327, 2335), False, 'import torch\n'), ((3234, 3279), 'torch.max', 'torch.max', (['image_pred[:, 5:]', '(1)'], {'keepdim': '(True)'}), '(image_pred[:, 5:], 1, keepdim=True)\n', (3243, 3279), False, 'import torch\n'), ((3637, 3669), 'torch.unique', 'torch.unique', (['image_pred_[:, -1]'], {}), '(image_pred_[:, -1])\n', (3649, 3669), False, 'import torch\n'), ((5068, 5095), 'torch.cat', 'torch.cat', (['img_preds'], {'dim': '(0)'}), '(img_preds, dim=0)\n', (5077, 5095), False, 'import torch\n'), ((5713, 5739), 'random.choice', 'random.choice', (['self.colors'], {}), '(self.colors)\n', (5726, 5739), False, 'import random\n'), ((880, 900), 'os.listdir', 'os.listdir', (['img_path'], {}), '(img_path)\n', (890, 900), False, 'import os\n'), ((1189, 1229), 'torchvision.transforms.Resize', 'T.Resize', (['(self.img_size, self.img_size)'], {}), '((self.img_size, self.img_size))\n', (1197, 1229), True, 'from torchvision import transforms as T\n'), ((1259, 1271), 'torchvision.transforms.ToTensor', 'T.ToTensor', ([], {}), '()\n', (1269, 1271), True, 'from torchvision import transforms as T\n'), ((1425, 1470), 'torch.FloatTensor', 'torch.FloatTensor', (['[img.size for img in imgs]'], {}), '([img.size for img in imgs])\n', (1442, 1470), False, 'import torch\n'), ((4100, 4151), 'torch.sort', 'torch.sort', (['image_pred_class[:, 4]'], {'descending': '(True)'}), '(image_pred_class[:, 4], descending=True)\n', (4110, 4151), False, 'import torch\n'), ((1358, 1373), 'PIL.Image.open', 'Image.open', (['img'], {}), '(img)\n', (1368, 1373), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((3446, 3477), 'torch.nonzero', 'torch.nonzero', (['image_pred[:, 4]'], {}), '(image_pred[:, 4])\n', (3459, 3477), False, 'import torch\n'), ((3948, 3976), 'torch.nonzero', 'torch.nonzero', (['c_mask[:, -2]'], {}), '(c_mask[:, -2])\n', (3961, 3976), False, 'import torch\n'), ((4687, 4724), 'torch.nonzero', 'torch.nonzero', (['image_pred_class[:, 4]'], {}), '(image_pred_class[:, 4])\n', (4700, 4724), False, 'import torch\n')]
|
import re
def asSeconds(data, default=None):
r"""
Convert string to seconds. The following input is accepted:
* A humanly readable time (e.g. "1d").
* A SLURM time string (e.g. "1-00:00:00").
* A time string (e.g. "24:00:00").
* ``int`` or ``float``: interpreted as seconds.
:arguments:
**data** (``<str>`` | ``<float>`` | ``<int>``)
The input string
(number are equally accepted; they are directly interpreted as seconds).
:options:
**default** ([``None``] | ``<int>``)
Value to return if the conversion fails.
:returns:
``<int>``
Number of seconds as integer (or default value if the conversion fails).
"""
# convert int -> int (implicitly assume that the input is in seconds)
if isinstance(data, int):
return data
# convert float -> float (implicitly assume that the input is in seconds)
if isinstance(data, float):
return int(data)
# convert SLURM time string (e.g. "1-00:00:00")
if re.match(r"^[0-9]*\-[0-9]*\:[0-9]*\:[0-9]*$", data):
# - initialize number of days, hours, minutes, seconds
t = [0, 0, 0, 0]
# - split days
if len(data.split("-")) > 1:
t[0], data = data.split("-")
# - split hours:minutes:seconds (all optional)
data = data.split(":")
# - fill from seconds -> minutes (if present) -> hours (if present)
for i in range(len(data)):
t[-1 * (i + 1)] = data[-1 * (i + 1)]
# - return seconds
return int(t[0]) * 24 * 60 * 60 + int(t[1]) * 60 * 60 + int(t[2]) * 60 + int(t[3])
# convert time string in hours (e.g. "24:00:00")
if re.match(r"^[0-9]*\:[0-9]*\:[0-9]*$", data):
# - initialize number of hours, minutes, seconds
t = [0, 0, 0]
# - split hours:minutes:seconds (all optional)
data = data.split(":")
# - fill from seconds -> minutes (if present) -> hours (if present)
for i in range(len(data)):
t[-1 * i] = data[-1 * i]
# - return seconds
return int(t[0]) * 60 * 60 + int(t[1]) * 60 + int(t[2])
# convert time string in minutes (e.g. "12:34")
if re.match(r"^[0-9]*\:[0-9]*$", data):
# - initialize number of minutes, seconds
t = [0, 0]
# - split hours:minutes:seconds (all optional)
data = data.split(":")
# - fill from seconds -> minutes (if present) -> hours (if present)
for i in range(len(data)):
t[-1 * i] = data[-1 * i]
# - return seconds
return int(t[0]) * 60 + int(t[1])
# convert humanly readable time (e.g. "1d")
if re.match(r"^[0-9]*\.?[0-9]*[a-zA-Z]$", data):
if data[-1] == "d":
return int(float(data[:-1]) * float(60 * 60 * 24))
elif data[-1] == "h":
return int(float(data[:-1]) * float(60 * 60))
elif data[-1] == "m":
return int(float(data[:-1]) * float(60))
elif data[-1] == "s":
return int(float(data[:-1]) * float(1))
# one last try (implicitly assume that the input is in seconds)
try:
return int(data)
except BaseException:
pass
# all conversions failed: return default value
return default
def asUnit(data, unit, precision):
r"""
Convert to rich-string with a certain unit and precision. The output is e.g. ``"1.1d"``.
:arguments:
**data** (``<int>`` | ``<float>``)
Numerical value (e.g. ``1.1``).
**unit** (``<str>``)
The unit (e.g. ``"d"``).
**precision** (``<int>``)
The precision with which to print (e.g. ``1``).
:returns:
``<str>``
The rich-string.
"""
if precision:
return f"{{0:.{precision:d}f}}{{1:s}}".format(data, unit)
if abs(round(data)) < 10.0:
return f"{data:.1f}{unit:s}"
else:
return f"{round(data):.0f}{unit:s}"
def asHuman(data, precision=None):
r"""
Convert to string that has the biggest possible unit.
For example: ``100`` (seconds) -> ``"1.7m"``.
:arguments:
**data** (``<str>`` | ``<float>`` | ``<int>``)
A time, see ``GooseSLURM.duration.asSeconds`` for conversion.
**precision** (``<int>``)
The precision with which to print. By default a precision of one is used for
``0 < value < 10``,
while a precision of zero is used otherwise.
:returns:
``<str>``
The rich-string.
"""
data = asSeconds(data)
if data is None:
return ""
units = (
(24 * 60 * 60, "d"),
(60 * 60, "h"),
(60, "m"),
(1, "s"),
)
for val, unit in units:
if abs(data) >= val:
return asUnit(float(data) / float(val), unit, precision)
return asUnit(float(data), "s", precision)
def asSlurm(data):
r"""
Convert to a SLURM time string. For example ``"1d"`` -> ``"1-00:00:00"``.
:arguments:
**data** (``<str>`` | ``<float>`` | ``<int>``)
A time, see ``GooseSLURM.duration.asSeconds`` for conversion.
:returns:
``<str>``
The rich-string.
"""
data = asSeconds(data)
if data is None:
return ""
s = int(data % 60)
data = (data - s) / 60
m = int(data % 60)
data = (data - m) / 60
h = int(data % 24)
data = (data - h) / 24
d = int(data)
return "%d-%02d:%02d:%02d" % (d, h, m, s)
|
[
"re.match"
] |
[((1059, 1112), 're.match', 're.match', (['"""^[0-9]*\\\\-[0-9]*\\\\:[0-9]*\\\\:[0-9]*$"""', 'data'], {}), "('^[0-9]*\\\\-[0-9]*\\\\:[0-9]*\\\\:[0-9]*$', data)\n", (1067, 1112), False, 'import re\n'), ((1727, 1771), 're.match', 're.match', (['"""^[0-9]*\\\\:[0-9]*\\\\:[0-9]*$"""', 'data'], {}), "('^[0-9]*\\\\:[0-9]*\\\\:[0-9]*$', data)\n", (1735, 1771), False, 'import re\n'), ((2237, 2272), 're.match', 're.match', (['"""^[0-9]*\\\\:[0-9]*$"""', 'data'], {}), "('^[0-9]*\\\\:[0-9]*$', data)\n", (2245, 2272), False, 'import re\n'), ((2703, 2747), 're.match', 're.match', (['"""^[0-9]*\\\\.?[0-9]*[a-zA-Z]$"""', 'data'], {}), "('^[0-9]*\\\\.?[0-9]*[a-zA-Z]$', data)\n", (2711, 2747), False, 'import re\n')]
|
#!/usr/bin/env python
import spider_base_selenium
class MySpider(spider_base_selenium.Spider):
def __init__(self):
self.urls = [
'http://www.baidu.com',
'http://www.bing.com',
'http://www.weibo.com',
]
def parse(self, response):
print(response.get_response().title)
if __name__ == '__main__':
my_spider = MySpider()
engine = spider_base_selenium.Engine()
engine.run(my_spider)
|
[
"spider_base_selenium.Engine"
] |
[((405, 434), 'spider_base_selenium.Engine', 'spider_base_selenium.Engine', ([], {}), '()\n', (432, 434), False, 'import spider_base_selenium\n')]
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from ._enums import *
__all__ = [
'AzureActiveDirectoryArgs',
'ClientCertificateArgs',
'EndpointRangeDescriptionArgs',
'LoadBalancingRuleArgs',
'SettingsParameterDescriptionArgs',
'SettingsSectionDescriptionArgs',
'SkuArgs',
'SubResourceArgs',
'VMSSExtensionArgs',
'VaultCertificateArgs',
'VaultSecretGroupArgs',
]
@pulumi.input_type
class AzureActiveDirectoryArgs:
def __init__(__self__, *,
client_application: Optional[pulumi.Input[str]] = None,
cluster_application: Optional[pulumi.Input[str]] = None,
tenant_id: Optional[pulumi.Input[str]] = None):
"""
The settings to enable AAD authentication on the cluster.
:param pulumi.Input[str] client_application: Azure active directory client application id.
:param pulumi.Input[str] cluster_application: Azure active directory cluster application id.
:param pulumi.Input[str] tenant_id: Azure active directory tenant id.
"""
if client_application is not None:
pulumi.set(__self__, "client_application", client_application)
if cluster_application is not None:
pulumi.set(__self__, "cluster_application", cluster_application)
if tenant_id is not None:
pulumi.set(__self__, "tenant_id", tenant_id)
@property
@pulumi.getter(name="clientApplication")
def client_application(self) -> Optional[pulumi.Input[str]]:
"""
Azure active directory client application id.
"""
return pulumi.get(self, "client_application")
@client_application.setter
def client_application(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "client_application", value)
@property
@pulumi.getter(name="clusterApplication")
def cluster_application(self) -> Optional[pulumi.Input[str]]:
"""
Azure active directory cluster application id.
"""
return pulumi.get(self, "cluster_application")
@cluster_application.setter
def cluster_application(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "cluster_application", value)
@property
@pulumi.getter(name="tenantId")
def tenant_id(self) -> Optional[pulumi.Input[str]]:
"""
Azure active directory tenant id.
"""
return pulumi.get(self, "tenant_id")
@tenant_id.setter
def tenant_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "tenant_id", value)
@pulumi.input_type
class ClientCertificateArgs:
def __init__(__self__, *,
is_admin: pulumi.Input[bool],
common_name: Optional[pulumi.Input[str]] = None,
issuer_thumbprint: Optional[pulumi.Input[str]] = None,
thumbprint: Optional[pulumi.Input[str]] = None):
"""
Client Certificate definition.
:param pulumi.Input[bool] is_admin: Whether the certificate is admin or not.
:param pulumi.Input[str] common_name: Certificate Common name.
:param pulumi.Input[str] issuer_thumbprint: Issuer thumbprint for the certificate. Only used together with CommonName.
:param pulumi.Input[str] thumbprint: Certificate Thumbprint.
"""
pulumi.set(__self__, "is_admin", is_admin)
if common_name is not None:
pulumi.set(__self__, "common_name", common_name)
if issuer_thumbprint is not None:
pulumi.set(__self__, "issuer_thumbprint", issuer_thumbprint)
if thumbprint is not None:
pulumi.set(__self__, "thumbprint", thumbprint)
@property
@pulumi.getter(name="isAdmin")
def is_admin(self) -> pulumi.Input[bool]:
"""
Whether the certificate is admin or not.
"""
return pulumi.get(self, "is_admin")
@is_admin.setter
def is_admin(self, value: pulumi.Input[bool]):
pulumi.set(self, "is_admin", value)
@property
@pulumi.getter(name="commonName")
def common_name(self) -> Optional[pulumi.Input[str]]:
"""
Certificate Common name.
"""
return pulumi.get(self, "common_name")
@common_name.setter
def common_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "common_name", value)
@property
@pulumi.getter(name="issuerThumbprint")
def issuer_thumbprint(self) -> Optional[pulumi.Input[str]]:
"""
Issuer thumbprint for the certificate. Only used together with CommonName.
"""
return pulumi.get(self, "issuer_thumbprint")
@issuer_thumbprint.setter
def issuer_thumbprint(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "issuer_thumbprint", value)
@property
@pulumi.getter
def thumbprint(self) -> Optional[pulumi.Input[str]]:
"""
Certificate Thumbprint.
"""
return pulumi.get(self, "thumbprint")
@thumbprint.setter
def thumbprint(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "thumbprint", value)
@pulumi.input_type
class EndpointRangeDescriptionArgs:
def __init__(__self__, *,
end_port: pulumi.Input[int],
start_port: pulumi.Input[int]):
"""
Port range details
:param pulumi.Input[int] end_port: End port of a range of ports
:param pulumi.Input[int] start_port: Starting port of a range of ports
"""
pulumi.set(__self__, "end_port", end_port)
pulumi.set(__self__, "start_port", start_port)
@property
@pulumi.getter(name="endPort")
def end_port(self) -> pulumi.Input[int]:
"""
End port of a range of ports
"""
return pulumi.get(self, "end_port")
@end_port.setter
def end_port(self, value: pulumi.Input[int]):
pulumi.set(self, "end_port", value)
@property
@pulumi.getter(name="startPort")
def start_port(self) -> pulumi.Input[int]:
"""
Starting port of a range of ports
"""
return pulumi.get(self, "start_port")
@start_port.setter
def start_port(self, value: pulumi.Input[int]):
pulumi.set(self, "start_port", value)
@pulumi.input_type
class LoadBalancingRuleArgs:
def __init__(__self__, *,
backend_port: pulumi.Input[int],
frontend_port: pulumi.Input[int],
probe_protocol: pulumi.Input[Union[str, 'ProbeProtocol']],
protocol: pulumi.Input[Union[str, 'Protocol']],
probe_request_path: Optional[pulumi.Input[str]] = None):
"""
Describes a load balancing rule.
:param pulumi.Input[int] backend_port: The port used for internal connections on the endpoint. Acceptable values are between 1 and 65535.
:param pulumi.Input[int] frontend_port: The port for the external endpoint. Port numbers for each rule must be unique within the Load Balancer. Acceptable values are between 1 and 65534.
:param pulumi.Input[Union[str, 'ProbeProtocol']] probe_protocol: the reference to the load balancer probe used by the load balancing rule.
:param pulumi.Input[Union[str, 'Protocol']] protocol: The reference to the transport protocol used by the load balancing rule.
:param pulumi.Input[str] probe_request_path: The probe request path. Only supported for HTTP/HTTPS probes.
"""
pulumi.set(__self__, "backend_port", backend_port)
pulumi.set(__self__, "frontend_port", frontend_port)
pulumi.set(__self__, "probe_protocol", probe_protocol)
pulumi.set(__self__, "protocol", protocol)
if probe_request_path is not None:
pulumi.set(__self__, "probe_request_path", probe_request_path)
@property
@pulumi.getter(name="backendPort")
def backend_port(self) -> pulumi.Input[int]:
"""
The port used for internal connections on the endpoint. Acceptable values are between 1 and 65535.
"""
return pulumi.get(self, "backend_port")
@backend_port.setter
def backend_port(self, value: pulumi.Input[int]):
pulumi.set(self, "backend_port", value)
@property
@pulumi.getter(name="frontendPort")
def frontend_port(self) -> pulumi.Input[int]:
"""
The port for the external endpoint. Port numbers for each rule must be unique within the Load Balancer. Acceptable values are between 1 and 65534.
"""
return pulumi.get(self, "frontend_port")
@frontend_port.setter
def frontend_port(self, value: pulumi.Input[int]):
pulumi.set(self, "frontend_port", value)
@property
@pulumi.getter(name="probeProtocol")
def probe_protocol(self) -> pulumi.Input[Union[str, 'ProbeProtocol']]:
"""
the reference to the load balancer probe used by the load balancing rule.
"""
return pulumi.get(self, "probe_protocol")
@probe_protocol.setter
def probe_protocol(self, value: pulumi.Input[Union[str, 'ProbeProtocol']]):
pulumi.set(self, "probe_protocol", value)
@property
@pulumi.getter
def protocol(self) -> pulumi.Input[Union[str, 'Protocol']]:
"""
The reference to the transport protocol used by the load balancing rule.
"""
return pulumi.get(self, "protocol")
@protocol.setter
def protocol(self, value: pulumi.Input[Union[str, 'Protocol']]):
pulumi.set(self, "protocol", value)
@property
@pulumi.getter(name="probeRequestPath")
def probe_request_path(self) -> Optional[pulumi.Input[str]]:
"""
The probe request path. Only supported for HTTP/HTTPS probes.
"""
return pulumi.get(self, "probe_request_path")
@probe_request_path.setter
def probe_request_path(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "probe_request_path", value)
@pulumi.input_type
class SettingsParameterDescriptionArgs:
def __init__(__self__, *,
name: pulumi.Input[str],
value: pulumi.Input[str]):
"""
Describes a parameter in fabric settings of the cluster.
:param pulumi.Input[str] name: The parameter name of fabric setting.
:param pulumi.Input[str] value: The parameter value of fabric setting.
"""
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
The parameter name of fabric setting.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def value(self) -> pulumi.Input[str]:
"""
The parameter value of fabric setting.
"""
return pulumi.get(self, "value")
@value.setter
def value(self, value: pulumi.Input[str]):
pulumi.set(self, "value", value)
@pulumi.input_type
class SettingsSectionDescriptionArgs:
def __init__(__self__, *,
name: pulumi.Input[str],
parameters: pulumi.Input[Sequence[pulumi.Input['SettingsParameterDescriptionArgs']]]):
"""
Describes a section in the fabric settings of the cluster.
:param pulumi.Input[str] name: The section name of the fabric settings.
:param pulumi.Input[Sequence[pulumi.Input['SettingsParameterDescriptionArgs']]] parameters: The collection of parameters in the section.
"""
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "parameters", parameters)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
The section name of the fabric settings.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def parameters(self) -> pulumi.Input[Sequence[pulumi.Input['SettingsParameterDescriptionArgs']]]:
"""
The collection of parameters in the section.
"""
return pulumi.get(self, "parameters")
@parameters.setter
def parameters(self, value: pulumi.Input[Sequence[pulumi.Input['SettingsParameterDescriptionArgs']]]):
pulumi.set(self, "parameters", value)
@pulumi.input_type
class SkuArgs:
def __init__(__self__, *,
name: pulumi.Input[str]):
"""
Sku definition
:param pulumi.Input[str] name: Sku Name.
"""
pulumi.set(__self__, "name", name)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
Sku Name.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@pulumi.input_type
class SubResourceArgs:
def __init__(__self__, *,
id: Optional[pulumi.Input[str]] = None):
"""
Azure resource identifier.
:param pulumi.Input[str] id: Azure resource identifier.
"""
if id is not None:
pulumi.set(__self__, "id", id)
@property
@pulumi.getter
def id(self) -> Optional[pulumi.Input[str]]:
"""
Azure resource identifier.
"""
return pulumi.get(self, "id")
@id.setter
def id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "id", value)
@pulumi.input_type
class VMSSExtensionArgs:
def __init__(__self__, *,
name: pulumi.Input[str],
publisher: pulumi.Input[str],
type: pulumi.Input[str],
type_handler_version: pulumi.Input[str],
auto_upgrade_minor_version: Optional[pulumi.Input[bool]] = None,
force_update_tag: Optional[pulumi.Input[str]] = None,
protected_settings: Optional[Any] = None,
provision_after_extensions: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
settings: Optional[Any] = None):
"""
Specifies set of extensions that should be installed onto the virtual machines.
:param pulumi.Input[str] name: The name of the extension.
:param pulumi.Input[str] publisher: The name of the extension handler publisher.
:param pulumi.Input[str] type: Specifies the type of the extension; an example is "CustomScriptExtension".
:param pulumi.Input[str] type_handler_version: Specifies the version of the script handler.
:param pulumi.Input[bool] auto_upgrade_minor_version: Indicates whether the extension should use a newer minor version if one is available at deployment time. Once deployed, however, the extension will not upgrade minor versions unless redeployed, even with this property set to true.
:param pulumi.Input[str] force_update_tag: If a value is provided and is different from the previous value, the extension handler will be forced to update even if the extension configuration has not changed.
:param Any protected_settings: The extension can contain either protectedSettings or protectedSettingsFromKeyVault or no protected settings at all.
:param pulumi.Input[Sequence[pulumi.Input[str]]] provision_after_extensions: Collection of extension names after which this extension needs to be provisioned.
:param Any settings: Json formatted public settings for the extension.
"""
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "publisher", publisher)
pulumi.set(__self__, "type", type)
pulumi.set(__self__, "type_handler_version", type_handler_version)
if auto_upgrade_minor_version is not None:
pulumi.set(__self__, "auto_upgrade_minor_version", auto_upgrade_minor_version)
if force_update_tag is not None:
pulumi.set(__self__, "force_update_tag", force_update_tag)
if protected_settings is not None:
pulumi.set(__self__, "protected_settings", protected_settings)
if provision_after_extensions is not None:
pulumi.set(__self__, "provision_after_extensions", provision_after_extensions)
if settings is not None:
pulumi.set(__self__, "settings", settings)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
The name of the extension.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def publisher(self) -> pulumi.Input[str]:
"""
The name of the extension handler publisher.
"""
return pulumi.get(self, "publisher")
@publisher.setter
def publisher(self, value: pulumi.Input[str]):
pulumi.set(self, "publisher", value)
@property
@pulumi.getter
def type(self) -> pulumi.Input[str]:
"""
Specifies the type of the extension; an example is "CustomScriptExtension".
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: pulumi.Input[str]):
pulumi.set(self, "type", value)
@property
@pulumi.getter(name="typeHandlerVersion")
def type_handler_version(self) -> pulumi.Input[str]:
"""
Specifies the version of the script handler.
"""
return pulumi.get(self, "type_handler_version")
@type_handler_version.setter
def type_handler_version(self, value: pulumi.Input[str]):
pulumi.set(self, "type_handler_version", value)
@property
@pulumi.getter(name="autoUpgradeMinorVersion")
def auto_upgrade_minor_version(self) -> Optional[pulumi.Input[bool]]:
"""
Indicates whether the extension should use a newer minor version if one is available at deployment time. Once deployed, however, the extension will not upgrade minor versions unless redeployed, even with this property set to true.
"""
return pulumi.get(self, "auto_upgrade_minor_version")
@auto_upgrade_minor_version.setter
def auto_upgrade_minor_version(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "auto_upgrade_minor_version", value)
@property
@pulumi.getter(name="forceUpdateTag")
def force_update_tag(self) -> Optional[pulumi.Input[str]]:
"""
If a value is provided and is different from the previous value, the extension handler will be forced to update even if the extension configuration has not changed.
"""
return pulumi.get(self, "force_update_tag")
@force_update_tag.setter
def force_update_tag(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "force_update_tag", value)
@property
@pulumi.getter(name="protectedSettings")
def protected_settings(self) -> Optional[Any]:
"""
The extension can contain either protectedSettings or protectedSettingsFromKeyVault or no protected settings at all.
"""
return pulumi.get(self, "protected_settings")
@protected_settings.setter
def protected_settings(self, value: Optional[Any]):
pulumi.set(self, "protected_settings", value)
@property
@pulumi.getter(name="provisionAfterExtensions")
def provision_after_extensions(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Collection of extension names after which this extension needs to be provisioned.
"""
return pulumi.get(self, "provision_after_extensions")
@provision_after_extensions.setter
def provision_after_extensions(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "provision_after_extensions", value)
@property
@pulumi.getter
def settings(self) -> Optional[Any]:
"""
Json formatted public settings for the extension.
"""
return pulumi.get(self, "settings")
@settings.setter
def settings(self, value: Optional[Any]):
pulumi.set(self, "settings", value)
@pulumi.input_type
class VaultCertificateArgs:
def __init__(__self__, *,
certificate_store: pulumi.Input[str],
certificate_url: pulumi.Input[str]):
"""
Describes a single certificate reference in a Key Vault, and where the certificate should reside on the VM.
:param pulumi.Input[str] certificate_store: For Windows VMs, specifies the certificate store on the Virtual Machine to which the certificate should be added. The specified certificate store is implicitly in the LocalMachine account. <br><br>For Linux VMs, the certificate file is placed under the /var/lib/waagent directory, with the file name <UppercaseThumbprint>.crt for the X509 certificate file and <UppercaseThumbprint>.prv for private key. Both of these files are .pem formatted.
:param pulumi.Input[str] certificate_url: This is the URL of a certificate that has been uploaded to Key Vault as a secret. For adding a secret to the Key Vault, see [Add a key or secret to the key vault](https://docs.microsoft.com/azure/key-vault/key-vault-get-started/#add). In this case, your certificate needs to be It is the Base64 encoding of the following JSON Object which is encoded in UTF-8: <br><br> {<br> "data":"<Base64-encoded-certificate>",<br> "dataType":"pfx",<br> "password":"<<PASSWORD>>"<br>}
"""
pulumi.set(__self__, "certificate_store", certificate_store)
pulumi.set(__self__, "certificate_url", certificate_url)
@property
@pulumi.getter(name="certificateStore")
def certificate_store(self) -> pulumi.Input[str]:
"""
For Windows VMs, specifies the certificate store on the Virtual Machine to which the certificate should be added. The specified certificate store is implicitly in the LocalMachine account. <br><br>For Linux VMs, the certificate file is placed under the /var/lib/waagent directory, with the file name <UppercaseThumbprint>.crt for the X509 certificate file and <UppercaseThumbprint>.prv for private key. Both of these files are .pem formatted.
"""
return pulumi.get(self, "certificate_store")
@certificate_store.setter
def certificate_store(self, value: pulumi.Input[str]):
pulumi.set(self, "certificate_store", value)
@property
@pulumi.getter(name="certificateUrl")
def certificate_url(self) -> pulumi.Input[str]:
"""
This is the URL of a certificate that has been uploaded to Key Vault as a secret. For adding a secret to the Key Vault, see [Add a key or secret to the key vault](https://docs.microsoft.com/azure/key-vault/key-vault-get-started/#add). In this case, your certificate needs to be It is the Base64 encoding of the following JSON Object which is encoded in UTF-8: <br><br> {<br> "data":"<Base64-encoded-certificate>",<br> "dataType":"pfx",<br> "password":"<<PASSWORD>>"<br>}
"""
return pulumi.get(self, "certificate_url")
@certificate_url.setter
def certificate_url(self, value: pulumi.Input[str]):
pulumi.set(self, "certificate_url", value)
@pulumi.input_type
class VaultSecretGroupArgs:
def __init__(__self__, *,
source_vault: pulumi.Input['SubResourceArgs'],
vault_certificates: pulumi.Input[Sequence[pulumi.Input['VaultCertificateArgs']]]):
"""
Specifies set of certificates that should be installed onto the virtual machines.
:param pulumi.Input['SubResourceArgs'] source_vault: The relative URL of the Key Vault containing all of the certificates in VaultCertificates.
:param pulumi.Input[Sequence[pulumi.Input['VaultCertificateArgs']]] vault_certificates: The list of key vault references in SourceVault which contain certificates.
"""
pulumi.set(__self__, "source_vault", source_vault)
pulumi.set(__self__, "vault_certificates", vault_certificates)
@property
@pulumi.getter(name="sourceVault")
def source_vault(self) -> pulumi.Input['SubResourceArgs']:
"""
The relative URL of the Key Vault containing all of the certificates in VaultCertificates.
"""
return pulumi.get(self, "source_vault")
@source_vault.setter
def source_vault(self, value: pulumi.Input['SubResourceArgs']):
pulumi.set(self, "source_vault", value)
@property
@pulumi.getter(name="vaultCertificates")
def vault_certificates(self) -> pulumi.Input[Sequence[pulumi.Input['VaultCertificateArgs']]]:
"""
The list of key vault references in SourceVault which contain certificates.
"""
return pulumi.get(self, "vault_certificates")
@vault_certificates.setter
def vault_certificates(self, value: pulumi.Input[Sequence[pulumi.Input['VaultCertificateArgs']]]):
pulumi.set(self, "vault_certificates", value)
|
[
"pulumi.get",
"pulumi.getter",
"pulumi.set"
] |
[((1688, 1727), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""clientApplication"""'}), "(name='clientApplication')\n", (1701, 1727), False, 'import pulumi\n'), ((2101, 2141), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""clusterApplication"""'}), "(name='clusterApplication')\n", (2114, 2141), False, 'import pulumi\n'), ((2521, 2551), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""tenantId"""'}), "(name='tenantId')\n", (2534, 2551), False, 'import pulumi\n'), ((3971, 4000), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""isAdmin"""'}), "(name='isAdmin')\n", (3984, 4000), False, 'import pulumi\n'), ((4301, 4333), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""commonName"""'}), "(name='commonName')\n", (4314, 4333), False, 'import pulumi\n'), ((4651, 4689), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""issuerThumbprint"""'}), "(name='issuerThumbprint')\n", (4664, 4689), False, 'import pulumi\n'), ((5902, 5931), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""endPort"""'}), "(name='endPort')\n", (5915, 5931), False, 'import pulumi\n'), ((6218, 6249), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""startPort"""'}), "(name='startPort')\n", (6231, 6249), False, 'import pulumi\n'), ((8102, 8135), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""backendPort"""'}), "(name='backendPort')\n", (8115, 8135), False, 'import pulumi\n'), ((8512, 8546), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""frontendPort"""'}), "(name='frontendPort')\n", (8525, 8546), False, 'import pulumi\n'), ((8976, 9011), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""probeProtocol"""'}), "(name='probeProtocol')\n", (8989, 9011), False, 'import pulumi\n'), ((9803, 9841), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""probeRequestPath"""'}), "(name='probeRequestPath')\n", (9816, 9841), False, 'import pulumi\n'), ((17591, 17631), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""typeHandlerVersion"""'}), "(name='typeHandlerVersion')\n", (17604, 17631), False, 'import pulumi\n'), ((17994, 18039), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""autoUpgradeMinorVersion"""'}), "(name='autoUpgradeMinorVersion')\n", (18007, 18039), False, 'import pulumi\n'), ((18640, 18676), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""forceUpdateTag"""'}), "(name='forceUpdateTag')\n", (18653, 18676), False, 'import pulumi\n'), ((19159, 19198), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""protectedSettings"""'}), "(name='protectedSettings')\n", (19172, 19198), False, 'import pulumi\n'), ((19615, 19661), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""provisionAfterExtensions"""'}), "(name='provisionAfterExtensions')\n", (19628, 19661), False, 'import pulumi\n'), ((21953, 21991), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""certificateStore"""'}), "(name='certificateStore')\n", (21966, 21991), False, 'import pulumi\n'), ((22737, 22773), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""certificateUrl"""'}), "(name='certificateUrl')\n", (22750, 22773), False, 'import pulumi\n'), ((24351, 24384), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""sourceVault"""'}), "(name='sourceVault')\n", (24364, 24384), False, 'import pulumi\n'), ((24781, 24820), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""vaultCertificates"""'}), "(name='vaultCertificates')\n", (24794, 24820), False, 'import pulumi\n'), ((1886, 1924), 'pulumi.get', 'pulumi.get', (['self', '"""client_application"""'], {}), "(self, 'client_application')\n", (1896, 1924), False, 'import pulumi\n'), ((2035, 2080), 'pulumi.set', 'pulumi.set', (['self', '"""client_application"""', 'value'], {}), "(self, 'client_application', value)\n", (2045, 2080), False, 'import pulumi\n'), ((2302, 2341), 'pulumi.get', 'pulumi.get', (['self', '"""cluster_application"""'], {}), "(self, 'cluster_application')\n", (2312, 2341), False, 'import pulumi\n'), ((2454, 2500), 'pulumi.set', 'pulumi.set', (['self', '"""cluster_application"""', 'value'], {}), "(self, 'cluster_application', value)\n", (2464, 2500), False, 'import pulumi\n'), ((2689, 2718), 'pulumi.get', 'pulumi.get', (['self', '"""tenant_id"""'], {}), "(self, 'tenant_id')\n", (2699, 2718), False, 'import pulumi\n'), ((2811, 2847), 'pulumi.set', 'pulumi.set', (['self', '"""tenant_id"""', 'value'], {}), "(self, 'tenant_id', value)\n", (2821, 2847), False, 'import pulumi\n'), ((3602, 3644), 'pulumi.set', 'pulumi.set', (['__self__', '"""is_admin"""', 'is_admin'], {}), "(__self__, 'is_admin', is_admin)\n", (3612, 3644), False, 'import pulumi\n'), ((4135, 4163), 'pulumi.get', 'pulumi.get', (['self', '"""is_admin"""'], {}), "(self, 'is_admin')\n", (4145, 4163), False, 'import pulumi\n'), ((4245, 4280), 'pulumi.set', 'pulumi.set', (['self', '"""is_admin"""', 'value'], {}), "(self, 'is_admin', value)\n", (4255, 4280), False, 'import pulumi\n'), ((4464, 4495), 'pulumi.get', 'pulumi.get', (['self', '"""common_name"""'], {}), "(self, 'common_name')\n", (4474, 4495), False, 'import pulumi\n'), ((4592, 4630), 'pulumi.set', 'pulumi.set', (['self', '"""common_name"""', 'value'], {}), "(self, 'common_name', value)\n", (4602, 4630), False, 'import pulumi\n'), ((4876, 4913), 'pulumi.get', 'pulumi.get', (['self', '"""issuer_thumbprint"""'], {}), "(self, 'issuer_thumbprint')\n", (4886, 4913), False, 'import pulumi\n'), ((5022, 5066), 'pulumi.set', 'pulumi.set', (['self', '"""issuer_thumbprint"""', 'value'], {}), "(self, 'issuer_thumbprint', value)\n", (5032, 5066), False, 'import pulumi\n'), ((5229, 5259), 'pulumi.get', 'pulumi.get', (['self', '"""thumbprint"""'], {}), "(self, 'thumbprint')\n", (5239, 5259), False, 'import pulumi\n'), ((5354, 5391), 'pulumi.set', 'pulumi.set', (['self', '"""thumbprint"""', 'value'], {}), "(self, 'thumbprint', value)\n", (5364, 5391), False, 'import pulumi\n'), ((5784, 5826), 'pulumi.set', 'pulumi.set', (['__self__', '"""end_port"""', 'end_port'], {}), "(__self__, 'end_port', end_port)\n", (5794, 5826), False, 'import pulumi\n'), ((5835, 5881), 'pulumi.set', 'pulumi.set', (['__self__', '"""start_port"""', 'start_port'], {}), "(__self__, 'start_port', start_port)\n", (5845, 5881), False, 'import pulumi\n'), ((6053, 6081), 'pulumi.get', 'pulumi.get', (['self', '"""end_port"""'], {}), "(self, 'end_port')\n", (6063, 6081), False, 'import pulumi\n'), ((6162, 6197), 'pulumi.set', 'pulumi.set', (['self', '"""end_port"""', 'value'], {}), "(self, 'end_port', value)\n", (6172, 6197), False, 'import pulumi\n'), ((6378, 6408), 'pulumi.get', 'pulumi.get', (['self', '"""start_port"""'], {}), "(self, 'start_port')\n", (6388, 6408), False, 'import pulumi\n'), ((6493, 6530), 'pulumi.set', 'pulumi.set', (['self', '"""start_port"""', 'value'], {}), "(self, 'start_port', value)\n", (6503, 6530), False, 'import pulumi\n'), ((7738, 7788), 'pulumi.set', 'pulumi.set', (['__self__', '"""backend_port"""', 'backend_port'], {}), "(__self__, 'backend_port', backend_port)\n", (7748, 7788), False, 'import pulumi\n'), ((7797, 7849), 'pulumi.set', 'pulumi.set', (['__self__', '"""frontend_port"""', 'frontend_port'], {}), "(__self__, 'frontend_port', frontend_port)\n", (7807, 7849), False, 'import pulumi\n'), ((7858, 7912), 'pulumi.set', 'pulumi.set', (['__self__', '"""probe_protocol"""', 'probe_protocol'], {}), "(__self__, 'probe_protocol', probe_protocol)\n", (7868, 7912), False, 'import pulumi\n'), ((7921, 7963), 'pulumi.set', 'pulumi.set', (['__self__', '"""protocol"""', 'protocol'], {}), "(__self__, 'protocol', protocol)\n", (7931, 7963), False, 'import pulumi\n'), ((8331, 8363), 'pulumi.get', 'pulumi.get', (['self', '"""backend_port"""'], {}), "(self, 'backend_port')\n", (8341, 8363), False, 'import pulumi\n'), ((8452, 8491), 'pulumi.set', 'pulumi.set', (['self', '"""backend_port"""', 'value'], {}), "(self, 'backend_port', value)\n", (8462, 8491), False, 'import pulumi\n'), ((8791, 8824), 'pulumi.get', 'pulumi.get', (['self', '"""frontend_port"""'], {}), "(self, 'frontend_port')\n", (8801, 8824), False, 'import pulumi\n'), ((8915, 8955), 'pulumi.set', 'pulumi.set', (['self', '"""frontend_port"""', 'value'], {}), "(self, 'frontend_port', value)\n", (8925, 8955), False, 'import pulumi\n'), ((9208, 9242), 'pulumi.get', 'pulumi.get', (['self', '"""probe_protocol"""'], {}), "(self, 'probe_protocol')\n", (9218, 9242), False, 'import pulumi\n'), ((9359, 9400), 'pulumi.set', 'pulumi.set', (['self', '"""probe_protocol"""', 'value'], {}), "(self, 'probe_protocol', value)\n", (9369, 9400), False, 'import pulumi\n'), ((9619, 9647), 'pulumi.get', 'pulumi.get', (['self', '"""protocol"""'], {}), "(self, 'protocol')\n", (9629, 9647), False, 'import pulumi\n'), ((9747, 9782), 'pulumi.set', 'pulumi.set', (['self', '"""protocol"""', 'value'], {}), "(self, 'protocol', value)\n", (9757, 9782), False, 'import pulumi\n'), ((10016, 10054), 'pulumi.get', 'pulumi.get', (['self', '"""probe_request_path"""'], {}), "(self, 'probe_request_path')\n", (10026, 10054), False, 'import pulumi\n'), ((10165, 10210), 'pulumi.set', 'pulumi.set', (['self', '"""probe_request_path"""', 'value'], {}), "(self, 'probe_request_path', value)\n", (10175, 10210), False, 'import pulumi\n'), ((10641, 10675), 'pulumi.set', 'pulumi.set', (['__self__', '"""name"""', 'name'], {}), "(__self__, 'name', name)\n", (10651, 10675), False, 'import pulumi\n'), ((10684, 10720), 'pulumi.set', 'pulumi.set', (['__self__', '"""value"""', 'value'], {}), "(__self__, 'value', value)\n", (10694, 10720), False, 'import pulumi\n'), ((10881, 10905), 'pulumi.get', 'pulumi.get', (['self', '"""name"""'], {}), "(self, 'name')\n", (10891, 10905), False, 'import pulumi\n'), ((10978, 11009), 'pulumi.set', 'pulumi.set', (['self', '"""name"""', 'value'], {}), "(self, 'name', value)\n", (10988, 11009), False, 'import pulumi\n'), ((11172, 11197), 'pulumi.get', 'pulumi.get', (['self', '"""value"""'], {}), "(self, 'value')\n", (11182, 11197), False, 'import pulumi\n'), ((11272, 11304), 'pulumi.set', 'pulumi.set', (['self', '"""value"""', 'value'], {}), "(self, 'value', value)\n", (11282, 11304), False, 'import pulumi\n'), ((11864, 11898), 'pulumi.set', 'pulumi.set', (['__self__', '"""name"""', 'name'], {}), "(__self__, 'name', name)\n", (11874, 11898), False, 'import pulumi\n'), ((11907, 11953), 'pulumi.set', 'pulumi.set', (['__self__', '"""parameters"""', 'parameters'], {}), "(__self__, 'parameters', parameters)\n", (11917, 11953), False, 'import pulumi\n'), ((12117, 12141), 'pulumi.get', 'pulumi.get', (['self', '"""name"""'], {}), "(self, 'name')\n", (12127, 12141), False, 'import pulumi\n'), ((12214, 12245), 'pulumi.set', 'pulumi.set', (['self', '"""name"""', 'value'], {}), "(self, 'name', value)\n", (12224, 12245), False, 'import pulumi\n'), ((12474, 12504), 'pulumi.get', 'pulumi.get', (['self', '"""parameters"""'], {}), "(self, 'parameters')\n", (12484, 12504), False, 'import pulumi\n'), ((12644, 12681), 'pulumi.set', 'pulumi.set', (['self', '"""parameters"""', 'value'], {}), "(self, 'parameters', value)\n", (12654, 12681), False, 'import pulumi\n'), ((12895, 12929), 'pulumi.set', 'pulumi.set', (['__self__', '"""name"""', 'name'], {}), "(__self__, 'name', name)\n", (12905, 12929), False, 'import pulumi\n'), ((13062, 13086), 'pulumi.get', 'pulumi.get', (['self', '"""name"""'], {}), "(self, 'name')\n", (13072, 13086), False, 'import pulumi\n'), ((13159, 13190), 'pulumi.set', 'pulumi.set', (['self', '"""name"""', 'value'], {}), "(self, 'name', value)\n", (13169, 13190), False, 'import pulumi\n'), ((13673, 13695), 'pulumi.get', 'pulumi.get', (['self', '"""id"""'], {}), "(self, 'id')\n", (13683, 13695), False, 'import pulumi\n'), ((13774, 13803), 'pulumi.set', 'pulumi.set', (['self', '"""id"""', 'value'], {}), "(self, 'id', value)\n", (13784, 13803), False, 'import pulumi\n'), ((15837, 15871), 'pulumi.set', 'pulumi.set', (['__self__', '"""name"""', 'name'], {}), "(__self__, 'name', name)\n", (15847, 15871), False, 'import pulumi\n'), ((15880, 15924), 'pulumi.set', 'pulumi.set', (['__self__', '"""publisher"""', 'publisher'], {}), "(__self__, 'publisher', publisher)\n", (15890, 15924), False, 'import pulumi\n'), ((15933, 15967), 'pulumi.set', 'pulumi.set', (['__self__', '"""type"""', 'type'], {}), "(__self__, 'type', type)\n", (15943, 15967), False, 'import pulumi\n'), ((15976, 16042), 'pulumi.set', 'pulumi.set', (['__self__', '"""type_handler_version"""', 'type_handler_version'], {}), "(__self__, 'type_handler_version', type_handler_version)\n", (15986, 16042), False, 'import pulumi\n'), ((16794, 16818), 'pulumi.get', 'pulumi.get', (['self', '"""name"""'], {}), "(self, 'name')\n", (16804, 16818), False, 'import pulumi\n'), ((16891, 16922), 'pulumi.set', 'pulumi.set', (['self', '"""name"""', 'value'], {}), "(self, 'name', value)\n", (16901, 16922), False, 'import pulumi\n'), ((17095, 17124), 'pulumi.get', 'pulumi.get', (['self', '"""publisher"""'], {}), "(self, 'publisher')\n", (17105, 17124), False, 'import pulumi\n'), ((17207, 17243), 'pulumi.set', 'pulumi.set', (['self', '"""publisher"""', 'value'], {}), "(self, 'publisher', value)\n", (17217, 17243), False, 'import pulumi\n'), ((17442, 17466), 'pulumi.get', 'pulumi.get', (['self', '"""type"""'], {}), "(self, 'type')\n", (17452, 17466), False, 'import pulumi\n'), ((17539, 17570), 'pulumi.set', 'pulumi.set', (['self', '"""type"""', 'value'], {}), "(self, 'type', value)\n", (17549, 17570), False, 'import pulumi\n'), ((17781, 17821), 'pulumi.get', 'pulumi.get', (['self', '"""type_handler_version"""'], {}), "(self, 'type_handler_version')\n", (17791, 17821), False, 'import pulumi\n'), ((17926, 17973), 'pulumi.set', 'pulumi.set', (['self', '"""type_handler_version"""', 'value'], {}), "(self, 'type_handler_version', value)\n", (17936, 17973), False, 'import pulumi\n'), ((18392, 18438), 'pulumi.get', 'pulumi.get', (['self', '"""auto_upgrade_minor_version"""'], {}), "(self, 'auto_upgrade_minor_version')\n", (18402, 18438), False, 'import pulumi\n'), ((18566, 18619), 'pulumi.set', 'pulumi.set', (['self', '"""auto_upgrade_minor_version"""', 'value'], {}), "(self, 'auto_upgrade_minor_version', value)\n", (18576, 18619), False, 'import pulumi\n'), ((18952, 18988), 'pulumi.get', 'pulumi.get', (['self', '"""force_update_tag"""'], {}), "(self, 'force_update_tag')\n", (18962, 18988), False, 'import pulumi\n'), ((19095, 19138), 'pulumi.set', 'pulumi.set', (['self', '"""force_update_tag"""', 'value'], {}), "(self, 'force_update_tag', value)\n", (19105, 19138), False, 'import pulumi\n'), ((19414, 19452), 'pulumi.get', 'pulumi.get', (['self', '"""protected_settings"""'], {}), "(self, 'protected_settings')\n", (19424, 19452), False, 'import pulumi\n'), ((19549, 19594), 'pulumi.set', 'pulumi.set', (['self', '"""protected_settings"""', 'value'], {}), "(self, 'protected_settings', value)\n", (19559, 19594), False, 'import pulumi\n'), ((19888, 19934), 'pulumi.get', 'pulumi.get', (['self', '"""provision_after_extensions"""'], {}), "(self, 'provision_after_extensions')\n", (19898, 19934), False, 'import pulumi\n'), ((20085, 20138), 'pulumi.set', 'pulumi.set', (['self', '"""provision_after_extensions"""', 'value'], {}), "(self, 'provision_after_extensions', value)\n", (20095, 20138), False, 'import pulumi\n'), ((20311, 20339), 'pulumi.get', 'pulumi.get', (['self', '"""settings"""'], {}), "(self, 'settings')\n", (20321, 20339), False, 'import pulumi\n'), ((20416, 20451), 'pulumi.set', 'pulumi.set', (['self', '"""settings"""', 'value'], {}), "(self, 'settings', value)\n", (20426, 20451), False, 'import pulumi\n'), ((21807, 21867), 'pulumi.set', 'pulumi.set', (['__self__', '"""certificate_store"""', 'certificate_store'], {}), "(__self__, 'certificate_store', certificate_store)\n", (21817, 21867), False, 'import pulumi\n'), ((21876, 21932), 'pulumi.set', 'pulumi.set', (['__self__', '"""certificate_url"""', 'certificate_url'], {}), "(__self__, 'certificate_url', certificate_url)\n", (21886, 21932), False, 'import pulumi\n'), ((22536, 22573), 'pulumi.get', 'pulumi.get', (['self', '"""certificate_store"""'], {}), "(self, 'certificate_store')\n", (22546, 22573), False, 'import pulumi\n'), ((22672, 22716), 'pulumi.set', 'pulumi.set', (['self', '"""certificate_store"""', 'value'], {}), "(self, 'certificate_store', value)\n", (22682, 22716), False, 'import pulumi\n'), ((23347, 23382), 'pulumi.get', 'pulumi.get', (['self', '"""certificate_url"""'], {}), "(self, 'certificate_url')\n", (23357, 23382), False, 'import pulumi\n'), ((23477, 23519), 'pulumi.set', 'pulumi.set', (['self', '"""certificate_url"""', 'value'], {}), "(self, 'certificate_url', value)\n", (23487, 23519), False, 'import pulumi\n'), ((24209, 24259), 'pulumi.set', 'pulumi.set', (['__self__', '"""source_vault"""', 'source_vault'], {}), "(__self__, 'source_vault', source_vault)\n", (24219, 24259), False, 'import pulumi\n'), ((24268, 24330), 'pulumi.set', 'pulumi.set', (['__self__', '"""vault_certificates"""', 'vault_certificates'], {}), "(__self__, 'vault_certificates', vault_certificates)\n", (24278, 24330), False, 'import pulumi\n'), ((24586, 24618), 'pulumi.get', 'pulumi.get', (['self', '"""source_vault"""'], {}), "(self, 'source_vault')\n", (24596, 24618), False, 'import pulumi\n'), ((24721, 24760), 'pulumi.set', 'pulumi.set', (['self', '"""source_vault"""', 'value'], {}), "(self, 'source_vault', value)\n", (24731, 24760), False, 'import pulumi\n'), ((25042, 25080), 'pulumi.get', 'pulumi.get', (['self', '"""vault_certificates"""'], {}), "(self, 'vault_certificates')\n", (25052, 25080), False, 'import pulumi\n'), ((25224, 25269), 'pulumi.set', 'pulumi.set', (['self', '"""vault_certificates"""', 'value'], {}), "(self, 'vault_certificates', value)\n", (25234, 25269), False, 'import pulumi\n'), ((1393, 1455), 'pulumi.set', 'pulumi.set', (['__self__', '"""client_application"""', 'client_application'], {}), "(__self__, 'client_application', client_application)\n", (1403, 1455), False, 'import pulumi\n'), ((1512, 1576), 'pulumi.set', 'pulumi.set', (['__self__', '"""cluster_application"""', 'cluster_application'], {}), "(__self__, 'cluster_application', cluster_application)\n", (1522, 1576), False, 'import pulumi\n'), ((1623, 1667), 'pulumi.set', 'pulumi.set', (['__self__', '"""tenant_id"""', 'tenant_id'], {}), "(__self__, 'tenant_id', tenant_id)\n", (1633, 1667), False, 'import pulumi\n'), ((3693, 3741), 'pulumi.set', 'pulumi.set', (['__self__', '"""common_name"""', 'common_name'], {}), "(__self__, 'common_name', common_name)\n", (3703, 3741), False, 'import pulumi\n'), ((3796, 3856), 'pulumi.set', 'pulumi.set', (['__self__', '"""issuer_thumbprint"""', 'issuer_thumbprint'], {}), "(__self__, 'issuer_thumbprint', issuer_thumbprint)\n", (3806, 3856), False, 'import pulumi\n'), ((3904, 3950), 'pulumi.set', 'pulumi.set', (['__self__', '"""thumbprint"""', 'thumbprint'], {}), "(__self__, 'thumbprint', thumbprint)\n", (3914, 3950), False, 'import pulumi\n'), ((8019, 8081), 'pulumi.set', 'pulumi.set', (['__self__', '"""probe_request_path"""', 'probe_request_path'], {}), "(__self__, 'probe_request_path', probe_request_path)\n", (8029, 8081), False, 'import pulumi\n'), ((13485, 13515), 'pulumi.set', 'pulumi.set', (['__self__', '"""id"""', 'id'], {}), "(__self__, 'id', id)\n", (13495, 13515), False, 'import pulumi\n'), ((16106, 16184), 'pulumi.set', 'pulumi.set', (['__self__', '"""auto_upgrade_minor_version"""', 'auto_upgrade_minor_version'], {}), "(__self__, 'auto_upgrade_minor_version', auto_upgrade_minor_version)\n", (16116, 16184), False, 'import pulumi\n'), ((16238, 16296), 'pulumi.set', 'pulumi.set', (['__self__', '"""force_update_tag"""', 'force_update_tag'], {}), "(__self__, 'force_update_tag', force_update_tag)\n", (16248, 16296), False, 'import pulumi\n'), ((16352, 16414), 'pulumi.set', 'pulumi.set', (['__self__', '"""protected_settings"""', 'protected_settings'], {}), "(__self__, 'protected_settings', protected_settings)\n", (16362, 16414), False, 'import pulumi\n'), ((16478, 16556), 'pulumi.set', 'pulumi.set', (['__self__', '"""provision_after_extensions"""', 'provision_after_extensions'], {}), "(__self__, 'provision_after_extensions', provision_after_extensions)\n", (16488, 16556), False, 'import pulumi\n'), ((16602, 16644), 'pulumi.set', 'pulumi.set', (['__self__', '"""settings"""', 'settings'], {}), "(__self__, 'settings', settings)\n", (16612, 16644), False, 'import pulumi\n')]
|
from package import redact_ex
from package import solve_explicit_ode
import numpy as np
EXERCISE_01 = """\
Make a program that is able to graphically solve the equation
\u2202T/\u2202t = \u03B1 \u2202\u00B2T/\u2202x\u00B2 = 0 using the Forward in Time, Centered in Space (FTCS)
scheme with Dirichlet boundary conditions u_0 = 0 and u_L = 10.
+ Consider different initial conditions.
+ Consider a new boundary condition: u_L = sin(t/2)
+ Consider null flux boundary conditions.\
"""
redact_ex(EXERCISE_01, 1)
# computation parameters
slices = 20
itern = 1000
plot_frequency = 0.05
# differentiation parameters
deltat = 1e-3
deltax = 1e-1
# problem parameters
alpha = 1
# helper variable
s = alpha*deltat/deltax**2
# grid creation and initial conditions
tprev = np.zeros(slices+1); tpprev = np.zeros(slices+1)
tprev[0] = 0; tpprev[0] = 0
tprev[slices] = 10; tpprev[slices] = 10
initial_conditions = [tprev, tpprev]
# boundary conditions
def boundary_conditions(lap, ngr, grid):
slices = len(grid[0])-1
ngr[0] = 0
ngr[slices] = 10
egrid = [ngr] + [r for r in grid]
return egrid
# differentiation scheme
def ftcs(ngr, grid, s = s):
slices = len(grid[0])-1
for vl in range(1, slices):
ngr[vl] = \
grid[0][vl] + s*(grid[0][vl+1] - 2*grid[0][vl] + grid[0][vl-1])
return ngr
print("Computing...", end='\n\n')
solve_explicit_ode(ftcs, initial_conditions, boundary_conditions,
slices, itern, plot_frequency)
otprev = otpprev = np.zeros(slices+1)
otprev[8:12] = 5; otpprev[8:12] = 5
otprev[0] = 0; otpprev[0] = 0
otprev[slices] = 10; otpprev[slices] = 10
oinitial_conditions = [otprev, otpprev]
print("+ Computing...", end='\n\n')
solve_explicit_ode(ftcs, oinitial_conditions, boundary_conditions,
slices, itern, plot_frequency)
deltat = 1e-2
def oboundary_conditions(lap, ngr, grid, deltat = deltat):
slices = len(grid[0])-1
ngr[0] = 0
ngr[slices] = 10 * abs(np.cos((lap+1)*deltat/2))
egrid = [ngr] + [r for r in grid]
return egrid
# need to re-create the initial_conditions arrays
tprev = np.zeros(slices+1); tpprev = np.zeros(slices+1)
tprev[0] = 0; tpprev[0] = 0
tprev[slices] = 10; tpprev[slices] = 10
initial_conditions = [tprev, tpprev]
print("+ Computing...", end='\n\n')
solve_explicit_ode(ftcs, initial_conditions, oboundary_conditions,
slices, itern, plot_frequency)
deltat = 1e-3
def oboundary_conditions(lap, ngr, grid, deltat = deltat):
slices = len(grid[0])-1
ngr[0] = ngr[1]
ngr[slices] = ngr[slices-1]
egrid = [ngr] + [r for r in grid]
return egrid
otprev = otpprev = np.zeros(slices+1)
otprev[8:12] = 5; otpprev[8:12] = 5
otprev[0] = 0; otpprev[0] = 0
oinitial_conditions = [otprev, otpprev]
print("+ Computing...", end='\n\n')
solve_explicit_ode(ftcs, oinitial_conditions, oboundary_conditions,
slices, itern, plot_frequency)
|
[
"package.solve_explicit_ode",
"package.redact_ex",
"numpy.zeros",
"numpy.cos"
] |
[((487, 512), 'package.redact_ex', 'redact_ex', (['EXERCISE_01', '(1)'], {}), '(EXERCISE_01, 1)\n', (496, 512), False, 'from package import redact_ex\n'), ((772, 792), 'numpy.zeros', 'np.zeros', (['(slices + 1)'], {}), '(slices + 1)\n', (780, 792), True, 'import numpy as np\n'), ((801, 821), 'numpy.zeros', 'np.zeros', (['(slices + 1)'], {}), '(slices + 1)\n', (809, 821), True, 'import numpy as np\n'), ((1377, 1477), 'package.solve_explicit_ode', 'solve_explicit_ode', (['ftcs', 'initial_conditions', 'boundary_conditions', 'slices', 'itern', 'plot_frequency'], {}), '(ftcs, initial_conditions, boundary_conditions, slices,\n itern, plot_frequency)\n', (1395, 1477), False, 'from package import solve_explicit_ode\n'), ((1499, 1519), 'numpy.zeros', 'np.zeros', (['(slices + 1)'], {}), '(slices + 1)\n', (1507, 1519), True, 'import numpy as np\n'), ((1704, 1805), 'package.solve_explicit_ode', 'solve_explicit_ode', (['ftcs', 'oinitial_conditions', 'boundary_conditions', 'slices', 'itern', 'plot_frequency'], {}), '(ftcs, oinitial_conditions, boundary_conditions, slices,\n itern, plot_frequency)\n', (1722, 1805), False, 'from package import solve_explicit_ode\n'), ((2095, 2115), 'numpy.zeros', 'np.zeros', (['(slices + 1)'], {}), '(slices + 1)\n', (2103, 2115), True, 'import numpy as np\n'), ((2124, 2144), 'numpy.zeros', 'np.zeros', (['(slices + 1)'], {}), '(slices + 1)\n', (2132, 2144), True, 'import numpy as np\n'), ((2287, 2388), 'package.solve_explicit_ode', 'solve_explicit_ode', (['ftcs', 'initial_conditions', 'oboundary_conditions', 'slices', 'itern', 'plot_frequency'], {}), '(ftcs, initial_conditions, oboundary_conditions, slices,\n itern, plot_frequency)\n', (2305, 2388), False, 'from package import solve_explicit_ode\n'), ((2623, 2643), 'numpy.zeros', 'np.zeros', (['(slices + 1)'], {}), '(slices + 1)\n', (2631, 2643), True, 'import numpy as np\n'), ((2787, 2889), 'package.solve_explicit_ode', 'solve_explicit_ode', (['ftcs', 'oinitial_conditions', 'oboundary_conditions', 'slices', 'itern', 'plot_frequency'], {}), '(ftcs, oinitial_conditions, oboundary_conditions, slices,\n itern, plot_frequency)\n', (2805, 2889), False, 'from package import solve_explicit_ode\n'), ((1953, 1983), 'numpy.cos', 'np.cos', (['((lap + 1) * deltat / 2)'], {}), '((lap + 1) * deltat / 2)\n', (1959, 1983), True, 'import numpy as np\n')]
|
import itertools
import regex as re
import numpy as np
# seed is fixed for reproducibility
np.random.seed(7)
from tensorflow import set_random_seed
set_random_seed(7)
from unidecode import unidecode
from delft.utilities.Tokenizer import tokenizeAndFilterSimple
from delft.utilities.bert.run_classifier_delft import DataProcessor
import delft.utilities.bert.tokenization as tokenization
from delft.utilities.bert.run_classifier_delft import InputExample
special_character_removal = re.compile(r'[^A-Za-z\.\-\?\!\,\#\@\% ]',re.IGNORECASE)
def to_vector_single(text, embeddings, maxlen=300):
"""
Given a string, tokenize it, then convert it to a sequence of word embedding
vectors with the provided embeddings, introducing <PAD> and <UNK> padding token
vector when appropriate
"""
tokens = tokenizeAndFilterSimple(clean_text(text))
window = tokens[-maxlen:]
# TBD: use better initializers (uniform, etc.)
x = np.zeros((maxlen, embeddings.embed_size), )
# TBD: padding should be left and which vector do we use for padding?
# and what about masking padding later for RNN?
for i, word in enumerate(window):
x[i,:] = embeddings.get_word_vector(word).astype('float32')
return x
def to_vector_elmo(tokens, embeddings, maxlen=300, lowercase=False, num_norm=False):
"""
Given a list of tokens convert it to a sequence of word embedding
vectors based on ELMo contextualized embeddings
"""
subtokens = []
for i in range(0, len(tokens)):
local_tokens = []
for j in range(0, min(len(tokens[i]), maxlen)):
if lowercase:
local_tokens.append(lower(tokens[i][j]))
else:
local_tokens.append(tokens[i][j])
subtokens.append(local_tokens)
return embeddings.get_sentence_vector_only_ELMo(subtokens)
"""
if use_token_dump:
return embeddings.get_sentence_vector_ELMo_with_token_dump(tokens)
"""
def to_vector_bert(tokens, embeddings, maxlen=300, lowercase=False, num_norm=False):
"""
Given a list of tokens convert it to a sequence of word embedding
vectors based on the BERT contextualized embeddings, introducing
padding token when appropriate
"""
subtokens = []
for i in range(0, len(tokens)):
local_tokens = []
for j in range(0, min(len(tokens[i]), maxlen)):
if lowercase:
local_tokens.append(lower(tokens[i][j]))
else:
local_tokens.append(tokens[i][j])
subtokens.append(local_tokens)
vector = embeddings.get_sentence_vector_only_BERT(subtokens)
return vector
def to_vector_simple_with_elmo(tokens, embeddings, maxlen=300, lowercase=False, num_norm=False):
"""
Given a list of tokens convert it to a sequence of word embedding
vectors based on the concatenation of the provided static embeddings and
the ELMo contextualized embeddings, introducing <PAD> and <UNK>
padding token vector when appropriate
"""
subtokens = []
for i in range(0, len(tokens)):
local_tokens = []
for j in range(0, min(len(tokens[i]), maxlen)):
if lowercase:
local_tokens.append(lower(tokens[i][j]))
else:
local_tokens.append(tokens[i][j])
if len(tokens[i]) < maxlen:
for i in range(0, maxlen-len(tokens[i])):
local_tokens.append(" ")
subtokens.append(local_tokens)
return embeddings.get_sentence_vector_with_ELMo(subtokens)
def to_vector_simple_with_bert(tokens, embeddings, maxlen=300, lowercase=False, num_norm=False):
"""
Given a list of tokens convert it to a sequence of word embedding
vectors based on the concatenation of the provided static embeddings and
the BERT contextualized embeddings, introducing padding token vector
when appropriate
"""
subtokens = []
for i in range(0, len(tokens)):
local_tokens = []
for j in range(0, min(len(tokens[i]), maxlen)):
if lowercase:
local_tokens.append(lower(tokens[i][j]))
else:
local_tokens.append(tokens[i][j])
if len(tokens[i]) < maxlen:
for i in range(0, maxlen-len(tokens[i])):
local_tokens.append(" ")
subtokens.append(local_tokens)
return embeddings.get_sentence_vector_with_BERT(subtokens)
def clean_text(text):
x_ascii = unidecode(text)
x_clean = special_character_removal.sub('',x_ascii)
return x_clean
def lower(word):
return word.lower()
def normalize_num(word):
return re.sub(r'[0-90123456789]', r'0', word)
class BERT_classifier_processor(DataProcessor):
"""
BERT data processor for classification
"""
def __init__(self, labels=None, x_train=None, y_train=None, x_test=None, y_test=None):
self.list_classes = labels
self.x_train = x_train
self.y_train = y_train
self.x_test = x_test
self.y_test = y_test
def get_train_examples(self, x_train=None, y_train=None):
"""See base class."""
if x_train is not None:
self.x_train = x_train
if y_train is not None:
self.y_train = y_train
examples, _ = self.create_examples(self.x_train, self.y_train)
return examples
def get_labels(self):
"""See base class."""
return self.list_classes
def get_test_examples(self, x_test=None, y_test=None):
"""See base class."""
if x_test is not None:
self.x_test = x_test
if y_test is not None:
self.y_test = y_test
examples, results = self.create_examples(self.x_test, self.y_test)
return examples, results
def create_examples(self, x_s, y_s=None):
examples = []
valid_classes = np.zeros((y_s.shape[0],len(self.list_classes)))
accumul = 0
for (i, x) in enumerate(x_s):
y = y_s[i]
guid = i
text_a = tokenization.convert_to_unicode(x)
#the_class = self._rewrite_classes(y, i)
ind, = np.where(y == 1)
the_class = self.list_classes[ind[0]]
if the_class is None:
#print(text_a)
continue
if the_class not in self.list_classes:
#the_class = 'other'
continue
label = tokenization.convert_to_unicode(the_class)
examples.append(InputExample(guid=guid, text_a=text_a, text_b=None, label=label))
valid_classes[accumul] = y
accumul += 1
return examples, valid_classes
def create_inputs(self, x_s, dummy_label='dummy'):
examples = []
# dummy label to avoid breaking the bert base code
label = tokenization.convert_to_unicode(dummy_label)
for (i, x) in enumerate(x_s):
guid = i
text_a = tokenization.convert_to_unicode(x)
examples.append(InputExample(guid=guid, text_a=text_a, text_b=None, label=label))
return examples
|
[
"unidecode.unidecode",
"numpy.random.seed",
"regex.compile",
"numpy.zeros",
"tensorflow.set_random_seed",
"regex.sub",
"numpy.where",
"delft.utilities.bert.tokenization.convert_to_unicode",
"delft.utilities.bert.run_classifier_delft.InputExample"
] |
[((91, 108), 'numpy.random.seed', 'np.random.seed', (['(7)'], {}), '(7)\n', (105, 108), True, 'import numpy as np\n'), ((148, 166), 'tensorflow.set_random_seed', 'set_random_seed', (['(7)'], {}), '(7)\n', (163, 166), False, 'from tensorflow import set_random_seed\n'), ((483, 546), 'regex.compile', 're.compile', (['"""[^A-Za-z\\\\.\\\\-\\\\?\\\\!\\\\,\\\\#\\\\@\\\\% ]"""', 're.IGNORECASE'], {}), "('[^A-Za-z\\\\.\\\\-\\\\?\\\\!\\\\,\\\\#\\\\@\\\\% ]', re.IGNORECASE)\n", (493, 546), True, 'import regex as re\n'), ((949, 990), 'numpy.zeros', 'np.zeros', (['(maxlen, embeddings.embed_size)'], {}), '((maxlen, embeddings.embed_size))\n', (957, 990), True, 'import numpy as np\n'), ((4468, 4483), 'unidecode.unidecode', 'unidecode', (['text'], {}), '(text)\n', (4477, 4483), False, 'from unidecode import unidecode\n'), ((4641, 4677), 'regex.sub', 're.sub', (['"""[0-90123456789]"""', '"""0"""', 'word'], {}), "('[0-90123456789]', '0', word)\n", (4647, 4677), True, 'import regex as re\n'), ((6829, 6873), 'delft.utilities.bert.tokenization.convert_to_unicode', 'tokenization.convert_to_unicode', (['dummy_label'], {}), '(dummy_label)\n', (6860, 6873), True, 'import delft.utilities.bert.tokenization as tokenization\n'), ((6037, 6071), 'delft.utilities.bert.tokenization.convert_to_unicode', 'tokenization.convert_to_unicode', (['x'], {}), '(x)\n', (6068, 6071), True, 'import delft.utilities.bert.tokenization as tokenization\n'), ((6144, 6160), 'numpy.where', 'np.where', (['(y == 1)'], {}), '(y == 1)\n', (6152, 6160), True, 'import numpy as np\n'), ((6434, 6476), 'delft.utilities.bert.tokenization.convert_to_unicode', 'tokenization.convert_to_unicode', (['the_class'], {}), '(the_class)\n', (6465, 6476), True, 'import delft.utilities.bert.tokenization as tokenization\n'), ((6954, 6988), 'delft.utilities.bert.tokenization.convert_to_unicode', 'tokenization.convert_to_unicode', (['x'], {}), '(x)\n', (6985, 6988), True, 'import delft.utilities.bert.tokenization as tokenization\n'), ((6505, 6569), 'delft.utilities.bert.run_classifier_delft.InputExample', 'InputExample', ([], {'guid': 'guid', 'text_a': 'text_a', 'text_b': 'None', 'label': 'label'}), '(guid=guid, text_a=text_a, text_b=None, label=label)\n', (6517, 6569), False, 'from delft.utilities.bert.run_classifier_delft import InputExample\n'), ((7018, 7082), 'delft.utilities.bert.run_classifier_delft.InputExample', 'InputExample', ([], {'guid': 'guid', 'text_a': 'text_a', 'text_b': 'None', 'label': 'label'}), '(guid=guid, text_a=text_a, text_b=None, label=label)\n', (7030, 7082), False, 'from delft.utilities.bert.run_classifier_delft import InputExample\n')]
|
import dlib
if dlib.cuda.get_num_devices()>=1:
print("Enabeling CUDA")
dlib.DLIB_USE_CUDA = True
dlib.USE_AVX_INSTRUCTIONS = True
dlib.DLIB_USE_BLAS, dlib.DLIB_USE_LAPACK, dlib.USE_NEON_INSTRUCTIONS = True, True, True
print(dlib.DLIB_USE_CUDA, dlib.USE_AVX_INSTRUCTIONS, dlib.DLIB_USE_BLAS, dlib.DLIB_USE_LAPACK, dlib.USE_NEON_INSTRUCTIONS)
import face_recognition
import os
import cv2
KNOWN_FACES_DIR = 'private'
TOLERANCE = 0.6
FRAME_THICKNESS = 3
FONT_THICKNESS = 2
MODEL = 'cnn' # default: 'hog', other one can be 'cnn' - CUDA accelerated (if available) deep-learning pretrained model
class FaceDetector():
def __init__(self):
super().__init__()
self.known_faces, self.known_names = self.explore_known_faces()
print(len(self.known_faces), len(self.known_names))
def explore_known_faces(self):
print('Loading known faces...')
known_faces = []
known_names = []
# We oranize known faces as subfolders of KNOWN_FACES_DIR
# Each subfolder's name becomes our label (name)
for name in os.listdir(KNOWN_FACES_DIR):
# Next we load every file of faces of known person
for filename in os.listdir(f'{KNOWN_FACES_DIR}/{name}'):
# Load an image
image = face_recognition.load_image_file(f'{KNOWN_FACES_DIR}/{name}/{filename}')
# Get 128-dimension face encoding
# Always returns a list of found faces, for this purpose we take first face only (assuming one face per image as you can't be twice on one image)
try:
encoding = face_recognition.face_encodings(image)[0]
# Append encodings and name
known_faces.append(encoding)
known_names.append(name)
except Exception as e:
os.remove(f'{KNOWN_FACES_DIR}/{name}/{filename}')
print(str(e), f"\nremoved {KNOWN_FACES_DIR}/{name}/{filename}")
return known_faces, known_names
def get_face(self, image):
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
locations = face_recognition.face_locations(image, model=MODEL)
encodings = face_recognition.face_encodings(image, locations)
for face_encoding, face_location in zip(encodings, locations):
results = face_recognition.compare_faces(self.known_faces, face_encoding, TOLERANCE)
match = None
if True in results: # If at least one is true, get a name of first of found labels
match = self.known_names[results.index(True)]
# print(f' - {match} from {results}')
# Each location contains positions in order: top, right, bottom, left
top_left = [face_location[0], face_location[3]]
bottom_right = [face_location[2], face_location[1]]
return top_left, bottom_right, match
return [0,0],[0,0], "Unknown"
|
[
"os.remove",
"face_recognition.compare_faces",
"cv2.cvtColor",
"face_recognition.face_encodings",
"dlib.cuda.get_num_devices",
"face_recognition.face_locations",
"face_recognition.load_image_file",
"os.listdir"
] |
[((15, 42), 'dlib.cuda.get_num_devices', 'dlib.cuda.get_num_devices', ([], {}), '()\n', (40, 42), False, 'import dlib\n'), ((1085, 1112), 'os.listdir', 'os.listdir', (['KNOWN_FACES_DIR'], {}), '(KNOWN_FACES_DIR)\n', (1095, 1112), False, 'import os\n'), ((2109, 2147), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2RGB'], {}), '(image, cv2.COLOR_BGR2RGB)\n', (2121, 2147), False, 'import cv2\n'), ((2168, 2219), 'face_recognition.face_locations', 'face_recognition.face_locations', (['image'], {'model': 'MODEL'}), '(image, model=MODEL)\n', (2199, 2219), False, 'import face_recognition\n'), ((2240, 2289), 'face_recognition.face_encodings', 'face_recognition.face_encodings', (['image', 'locations'], {}), '(image, locations)\n', (2271, 2289), False, 'import face_recognition\n'), ((1206, 1245), 'os.listdir', 'os.listdir', (['f"""{KNOWN_FACES_DIR}/{name}"""'], {}), "(f'{KNOWN_FACES_DIR}/{name}')\n", (1216, 1245), False, 'import os\n'), ((2384, 2458), 'face_recognition.compare_faces', 'face_recognition.compare_faces', (['self.known_faces', 'face_encoding', 'TOLERANCE'], {}), '(self.known_faces, face_encoding, TOLERANCE)\n', (2414, 2458), False, 'import face_recognition\n'), ((1304, 1376), 'face_recognition.load_image_file', 'face_recognition.load_image_file', (['f"""{KNOWN_FACES_DIR}/{name}/{filename}"""'], {}), "(f'{KNOWN_FACES_DIR}/{name}/{filename}')\n", (1336, 1376), False, 'import face_recognition\n'), ((1642, 1680), 'face_recognition.face_encodings', 'face_recognition.face_encodings', (['image'], {}), '(image)\n', (1673, 1680), False, 'import face_recognition\n'), ((1886, 1935), 'os.remove', 'os.remove', (['f"""{KNOWN_FACES_DIR}/{name}/{filename}"""'], {}), "(f'{KNOWN_FACES_DIR}/{name}/{filename}')\n", (1895, 1935), False, 'import os\n')]
|
from discord.ext import commands
bot = commands.Bot(command_prefix=',')
@bot.command()
async def react(ctx, id, emoji):
message = await ctx.fetch_message(id)
await message.add_reaction(emoji)
# Usage: ,react [MESSAGE_ID] [EMOJI]
|
[
"discord.ext.commands.Bot"
] |
[((41, 73), 'discord.ext.commands.Bot', 'commands.Bot', ([], {'command_prefix': '""","""'}), "(command_prefix=',')\n", (53, 73), False, 'from discord.ext import commands\n')]
|
# Princeton University licenses this file to You under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License. You may obtain a copy of the License at:
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and limitations under the License.
# ********************************************* Binary Execution Wrappers **************************************************************
from psyneulink.core.globals.utilities import NodeRole
import copy, ctypes
from collections import defaultdict
import numpy as np
from .builder_context import *
from . import helpers, jit_engine
from .debug import debug_env
__all__ = ['CompExecution', 'FuncExecution', 'MechExecution']
def _convert_ctype_to_python(x):
if isinstance(x, ctypes.Structure):
return [_convert_ctype_to_python(getattr(x, field_name)) for field_name, _ in x._fields_]
if isinstance(x, ctypes.Array):
return [_convert_ctype_to_python(num) for num in x]
if isinstance(x, ctypes.c_double):
return x.value
if isinstance(x, (float, int)):
return x
assert False, "Don't know how to convert: {}".format(x)
def _tupleize(x):
try:
return tuple(_tupleize(y) for y in x)
except TypeError:
return x if x is not None else tuple()
class CUDAExecution:
def __init__(self, buffers=['param_struct', 'context_struct']):
for b in buffers:
setattr(self, "_buffer_cuda_" + b, None)
self._uploaded_bytes = 0
self._downloaded_bytes = 0
self.__cuda_out_buf = None
self.__debug_env = debug_env
self.__vo_ty = None
def __del__(self):
if "cuda_data" in self.__debug_env:
try:
name = self._bin_func.name
except:
name = self._composition.name
print("{} CUDA uploaded: {}".format(name, self._uploaded_bytes))
print("{} CUDA downloaded: {}".format(name, self._downloaded_bytes))
@property
def _vo_ty(self):
if self.__vo_ty is None:
self.__vo_ty = self._bin_func.byref_arg_types[3]
if len(self._execution_ids) > 1:
self.__vo_ty = self.__vo_ty * len(self._execution_ids)
return self.__vo_ty
def _get_ctype_bytes(self, data):
# Return dummy buffer. CUDA does not handle 0 size well.
if ctypes.sizeof(data) == 0:
return bytearray(b'aaaa')
return bytearray(data)
def upload_ctype(self, data):
self._uploaded_bytes += ctypes.sizeof(data)
return jit_engine.pycuda.driver.to_device(self._get_ctype_bytes(data))
def download_ctype(self, source, ty):
self._downloaded_bytes += ctypes.sizeof(ty)
out_buf = bytearray(ctypes.sizeof(ty))
jit_engine.pycuda.driver.memcpy_dtoh(out_buf, source)
return ty.from_buffer(out_buf)
def __getattr__(self, attribute):
if not attribute.startswith("_cuda"):
return getattr(super(), attribute)
private_attr = "_buffer" + attribute
if getattr(self, private_attr) is None:
new_buffer = self.upload_ctype(getattr(self, attribute[5:]))
setattr(self, private_attr, new_buffer)
return getattr(self, private_attr)
@property
def _cuda_out_buf(self):
if self.__cuda_out_buf is None:
size = ctypes.sizeof(self._vo_ty)
self.__cuda_out_buf = jit_engine.pycuda.driver.mem_alloc(size)
return self.__cuda_out_buf
def cuda_execute(self, variable):
# Create input parameter
new_var = np.asfarray(variable)
data_in = jit_engine.pycuda.driver.In(new_var)
self._uploaded_bytes += new_var.nbytes
self._bin_func.cuda_call(self._cuda_param_struct,
self._cuda_context_struct,
data_in, self._cuda_out_buf,
threads=len(self._execution_ids))
# Copy the result from the device
ct_res = self.download_ctype(self._cuda_out_buf, self._vo_ty)
return _convert_ctype_to_python(ct_res)
class FuncExecution(CUDAExecution):
def __init__(self, component, execution_ids=[None]):
super().__init__()
self._bin_func = component._llvmBinFunction
self._execution_ids = execution_ids
self._component = component
par_struct_ty, ctx_struct_ty, vi_ty, vo_ty = self._bin_func.byref_arg_types
if len(execution_ids) > 1:
self._bin_multirun = self._bin_func.get_multi_run()
par_struct_ty = par_struct_ty * len(execution_ids)
ctx_struct_ty = ctx_struct_ty * len(execution_ids)
vo_ty = vo_ty * len(execution_ids)
vi_ty = vi_ty * len(execution_ids)
par_initializer = (component._get_param_initializer(ex_id) for ex_id in execution_ids)
ctx_initializer = (component._get_context_initializer(ex_id) for ex_id in execution_ids)
self.__param_struct = par_struct_ty(*par_initializer)
self.__context_struct = ctx_struct_ty(*ctx_initializer)
self._ct_len = ctypes.c_int(len(execution_ids))
self._ct_vo = vo_ty()
self._vi_ty = vi_ty
def _get_compilation_param(self, name, initializer, arg, execution_id):
param = getattr(self._component._compilation_data, name)
struct = param.get(execution_id)
if struct is None:
initializer = getattr(self._component, initializer)(execution_id)
struct_ty = self._bin_func.byref_arg_types[arg]
struct = struct_ty(*initializer)
param.set(struct, execution_context=execution_id)
return struct
@property
def _param_struct(self):
if len(self._execution_ids) > 1:
return self.__param_struct
return self._get_compilation_param('parameter_struct', '_get_param_initializer', 0, self._execution_ids[0])
@property
def _context_struct(self):
if len(self._execution_ids) > 1:
return self.__context_struct
return self._get_compilation_param('context_struct', '_get_context_initializer', 1, self._execution_ids[0])
def execute(self, variable):
new_variable = np.asfarray(variable)
if len(self._execution_ids) > 1:
# wrap_call casts the arguments so we only need contiguaous data
# layout
ct_vi = np.ctypeslib.as_ctypes(new_variable)
self._bin_multirun.wrap_call(self._param_struct,
self._context_struct,
ct_vi, self._ct_vo, self._ct_len)
else:
ct_vi = new_variable.ctypes.data_as(ctypes.POINTER(self._vi_ty))
self._bin_func(ctypes.byref(self._param_struct),
ctypes.byref(self._context_struct),
ct_vi, ctypes.byref(self._ct_vo))
return _convert_ctype_to_python(self._ct_vo)
class MechExecution(FuncExecution):
def execute(self, variable):
# convert to 3d. we always assume that:
# a) the input is vector of input states
# b) input states take vector of projection outputs
# c) projection output is a vector (even 1 element vector)
new_var = np.asfarray([np.atleast_2d(x) for x in variable])
return super().execute(new_var)
class CompExecution(CUDAExecution):
def __init__(self, composition, execution_ids = [None]):
super().__init__(buffers=['context_struct', 'param_struct', 'data_struct', 'conditions'])
self._composition = composition
self._execution_ids = execution_ids
self.__bin_exec_func = None
self.__bin_exec_multi_func = None
self.__bin_func = None
self.__bin_run_func = None
self.__bin_run_multi_func = None
self.__debug_env = debug_env
self.__frozen_vals = None
# TODO: Consolidate these
if len(execution_ids) > 1:
# At least the input_CIM wrapper should be generated
input_cim_fn = composition._get_node_wrapper(composition.input_CIM)._llvm_function
# Input structures
# TODO: Use the compiled version to get these
c_context = _convert_llvm_ir_to_ctype(input_cim_fn.args[0].type.pointee)
c_param = _convert_llvm_ir_to_ctype(input_cim_fn.args[1].type.pointee)
c_data = _convert_llvm_ir_to_ctype(input_cim_fn.args[3].type.pointee)
c_context = c_context * len(execution_ids)
c_param = c_param * len(execution_ids)
c_data = c_data * len(execution_ids)
ctx_initializer = (composition._get_context_initializer(ex_id) for ex_id in execution_ids)
par_initializer = (composition._get_param_initializer(ex_id) for ex_id in execution_ids)
data_initializer = (composition._get_data_initializer(ex_id) for ex_id in execution_ids)
# Instantiate structures
self.__context_struct = c_context(*ctx_initializer)
self.__param_struct = c_param(*par_initializer)
self.__data_struct = c_data(*data_initializer)
self.__conds = None
self._ct_len = ctypes.c_int(len(execution_ids))
@property
def _bin_func(self):
if self.__bin_func is not None:
assert len(self._execution_ids) == 1
return self.__bin_func
if self.__bin_exec_func is not None:
return self.__bin_exec_func
if self.__bin_run_func is not None:
return self.__bin_run_func
assert False, "Binary function not set for execution!"
def _set_bin_node(self, node):
assert node in self._composition._all_nodes
self.__bin_func = self._composition._get_bin_node(node)
@property
def _conditions(self):
if len(self._execution_ids) > 1:
if self.__conds is None:
cond_type = self._bin_func.byref_arg_types[4] * len(self._execution_ids)
gen = helpers.ConditionGenerator(None, self._composition)
cond_initializer = (gen.get_condition_initializer() for _ in self._execution_ids)
self.__conds = cond_type(*cond_initializer)
return self.__conds
conds = self._composition._compilation_data.scheduler_conditions.get(self._execution_ids[0])
if conds is None:
cond_type = self._bin_func.byref_arg_types[4]
gen = helpers.ConditionGenerator(None, self._composition)
cond_initializer = gen.get_condition_initializer()
conds = cond_type(*cond_initializer)
self._composition._compilation_data.scheduler_conditions.set(conds, execution_context=self._execution_ids[0])
return conds
def _get_compilation_param(self, name, initializer, arg, execution_id):
param = getattr(self._composition._compilation_data, name)
struct = param.get(execution_id)
if struct is None:
initializer = getattr(self._composition, initializer)(execution_id)
struct_ty = self._bin_func.byref_arg_types[arg]
struct = struct_ty(*initializer)
param.set(struct, execution_context=execution_id)
return struct
@property
def _param_struct(self):
if len(self._execution_ids) > 1:
return self.__param_struct
return self._get_compilation_param('parameter_struct', '_get_param_initializer', 1, self._execution_ids[0])
@property
def _context_struct(self):
if len(self._execution_ids) > 1:
return self.__context_struct
return self._get_compilation_param('context_struct', '_get_context_initializer', 0, self._execution_ids[0])
@property
def _data_struct(self):
if len(self._execution_ids) > 1:
return self.__data_struct
# Run wrapper changed argument order
arg = 2 if len(self._bin_func.byref_arg_types) > 5 else 3
return self._get_compilation_param('data_struct', '_get_data_initializer', arg, self._execution_ids[0])
@_data_struct.setter
def _data_struct(self, data_struct):
if len(self._execution_ids) > 1:
self.__data_struct = data_struct
else:
self._composition._compilation_data.data_struct.set(data_struct, execution_context = self._execution_ids[0])
def _extract_node_struct(self, node, data):
field = data._fields_[0][0]
res_struct = getattr(data, field)
index = self._composition._get_node_index(node)
field = res_struct._fields_[index][0]
res_struct = getattr(res_struct, field)
return _convert_ctype_to_python(res_struct)
def extract_node_struct(self, node, struct):
if len(self._execution_ids) > 1:
return [self._extract_node_struct(node, struct[i]) for i, _ in enumerate(self._execution_ids)]
else:
return self._extract_node_struct(node, struct)
def extract_frozen_node_output(self, node):
return self.extract_node_struct(node, self.__frozen_vals)
def extract_node_output(self, node):
return self.extract_node_struct(node, self._data_struct)
def extract_node_state(self, node):
return self.extract_node_struct(node, self._context_struct)
def extract_node_params(self, node):
return self.extract_node_struct(node, self._param_struct)
def insert_node_output(self, node, data):
my_field_name = self._data_struct._fields_[0][0]
my_res_struct = getattr(self._data_struct, my_field_name)
index = self._composition._get_node_index(node)
node_field_name = my_res_struct._fields_[index][0]
setattr(my_res_struct, node_field_name, _tupleize(data))
def _get_input_struct(self, inputs):
origins = self._composition.get_nodes_by_role(NodeRole.INPUT)
# Either node execute or composition execute, either way the
# input_CIM should be ready
bin_input_node = self._composition._get_bin_node(self._composition.input_CIM)
c_input = bin_input_node.byref_arg_types[2]
if len(self._execution_ids) > 1:
c_input = c_input * len(self._execution_ids)
# Read provided input data and separate each input state
if len(self._execution_ids) > 1:
input_data = []
for inp in inputs:
input_data.append(([x] for m in origins for x in inp[m]))
else:
input_data = ([x] for m in origins for x in inputs[m])
return c_input(*_tupleize(input_data))
def freeze_values(self):
self.__frozen_vals = copy.deepcopy(self._data_struct)
def execute_node(self, node, inputs = None, execution_id=None):
# We need to reconstruct the inputs here if they were not provided.
# This happens during node execution of nested compositions.
if inputs is None and node is self._composition.input_CIM:
# This assumes origin mechanisms are in the same order as
# CIM input states
origins = (n for n in self._composition.get_nodes_by_role(NodeRole.INPUT) for istate in n.input_states)
input_data = ([proj.parameters.value.get(execution_id) for proj in state.all_afferents] for state in node.input_states)
inputs = defaultdict(list)
for n, d in zip(origins, input_data):
inputs[n].append(d[0])
if inputs is not None:
inputs = self._get_input_struct(inputs)
assert inputs is not None or node is not self._composition.input_CIM
# Set bin node to make sure self._*struct works as expected
self._set_bin_node(node)
if node is not self._composition.input_CIM and self.__frozen_vals is None:
self.freeze_values()
self._bin_func.wrap_call(self._context_struct, self._param_struct,
inputs, self.__frozen_vals, self._data_struct)
if "comp_node_debug" in self.__debug_env:
print("RAN: {}. CTX: {}".format(node, self.extract_node_state(node)))
print("RAN: {}. Params: {}".format(node, self.extract_node_params(node)))
print("RAN: {}. Results: {}".format(node, self.extract_node_output(node)))
@property
def _bin_exec_func(self):
if self.__bin_exec_func is None:
self.__bin_exec_func = self._composition._get_bin_execution()
return self.__bin_exec_func
@property
def _bin_exec_multi_func(self):
if self.__bin_exec_multi_func is None:
self.__bin_exec_multi_func = self._bin_exec_func.get_multi_run()
return self.__bin_exec_multi_func
def execute(self, inputs):
inputs = self._get_input_struct(inputs)
if len(self._execution_ids) > 1:
self._bin_exec_multi_func.wrap_call(self._context_struct, self._param_struct,
inputs, self._data_struct, self._conditions, self._ct_len)
else:
self._bin_exec_func.wrap_call(self._context_struct, self._param_struct,
inputs, self._data_struct, self._conditions)
def cuda_execute(self, inputs):
# Create input buffer
inputs = self._get_input_struct(inputs)
data_in = self.upload_ctype(inputs)
self._bin_exec_func.cuda_call(self._cuda_context_struct, self._cuda_param_struct,
data_in, self._cuda_data_struct, self._cuda_conditions,
threads=len(self._execution_ids))
# Copy the data struct from the device
self._data_struct = self.download_ctype(self._cuda_data_struct, self._vo_ty)
# Methods used to accelerate "Run"
def _get_run_input_struct(self, inputs, num_input_sets):
origins = self._composition.get_nodes_by_role(NodeRole.INPUT)
input_type = self._composition._get_bin_run().byref_arg_types[3]
c_input = input_type * num_input_sets
if len(self._execution_ids) > 1:
c_input = c_input * len(self._execution_ids)
run_inputs = []
for inp in inputs:
run_inps = []
# Extract inputs for each trial
for i in range(num_input_sets):
run_inps.append([])
for m in origins:
run_inps[i] += [[v] for v in inp[m][i]]
run_inputs.append(run_inps)
else:
run_inputs = []
# Extract inputs for each trial
for i in range(num_input_sets):
run_inputs.append([])
for m in origins:
run_inputs[i] += [[v] for v in inputs[m][i]]
return c_input(*_tupleize(run_inputs))
@property
def _bin_run_func(self):
if self.__bin_run_func is None:
self.__bin_run_func = self._composition._get_bin_run()
return self.__bin_run_func
@property
def _bin_run_multi_func(self):
if self.__bin_run_multi_func is None:
self.__bin_run_multi_func = self._bin_run_func.get_multi_run()
return self.__bin_run_multi_func
def run(self, inputs, runs, num_input_sets):
inputs = self._get_run_input_struct(inputs, num_input_sets)
ct_vo = self._bin_run_func.byref_arg_types[4] * runs
if len(self._execution_ids) > 1:
ct_vo = ct_vo * len(self._execution_ids)
outputs = ct_vo()
runs_count = ctypes.c_int(runs)
input_count = ctypes.c_int(num_input_sets)
if len(self._execution_ids) > 1:
self._bin_run_multi_func.wrap_call(self._context_struct, self._param_struct,
self._data_struct, inputs, outputs,
runs_count, input_count, self._ct_len)
else:
self._bin_run_func.wrap_call(self._context_struct, self._param_struct,
self._data_struct, inputs, outputs,
runs_count, input_count)
return _convert_ctype_to_python(outputs)
def cuda_run(self, inputs, runs, num_input_sets):
# Create input buffer
inputs = self._get_run_input_struct(inputs, num_input_sets)
data_in = self.upload_ctype(inputs)
# Create output buffer
output_type = (self._bin_run_func.byref_arg_types[4] * runs)
if len(self._execution_ids) > 1:
output_type = output_type * len(self._execution_ids)
output_size = ctypes.sizeof(output_type)
data_out = jit_engine.pycuda.driver.mem_alloc(output_size)
runs_count = jit_engine.pycuda.driver.In(np.int32(runs))
input_count = jit_engine.pycuda.driver.In(np.int32(num_input_sets))
self._uploaded_bytes += 8 # runs_count + input_count
self._bin_run_func.cuda_call(self._cuda_context_struct,
self._cuda_param_struct,
self._cuda_data_struct,
data_in, data_out, runs_count, input_count,
threads=len(self._execution_ids))
# Copy the data struct from the device
ct_out = self.download_ctype(data_out, output_type)
return _convert_ctype_to_python(ct_out)
|
[
"numpy.atleast_2d",
"copy.deepcopy",
"ctypes.c_int",
"numpy.ctypeslib.as_ctypes",
"ctypes.byref",
"ctypes.sizeof",
"numpy.asfarray",
"collections.defaultdict",
"numpy.int32",
"ctypes.POINTER"
] |
[((2832, 2851), 'ctypes.sizeof', 'ctypes.sizeof', (['data'], {}), '(data)\n', (2845, 2851), False, 'import copy, ctypes\n'), ((3008, 3025), 'ctypes.sizeof', 'ctypes.sizeof', (['ty'], {}), '(ty)\n', (3021, 3025), False, 'import copy, ctypes\n'), ((3899, 3920), 'numpy.asfarray', 'np.asfarray', (['variable'], {}), '(variable)\n', (3910, 3920), True, 'import numpy as np\n'), ((6567, 6588), 'numpy.asfarray', 'np.asfarray', (['variable'], {}), '(variable)\n', (6578, 6588), True, 'import numpy as np\n'), ((15007, 15039), 'copy.deepcopy', 'copy.deepcopy', (['self._data_struct'], {}), '(self._data_struct)\n', (15020, 15039), False, 'import copy, ctypes\n'), ((19847, 19865), 'ctypes.c_int', 'ctypes.c_int', (['runs'], {}), '(runs)\n', (19859, 19865), False, 'import copy, ctypes\n'), ((19888, 19916), 'ctypes.c_int', 'ctypes.c_int', (['num_input_sets'], {}), '(num_input_sets)\n', (19900, 19916), False, 'import copy, ctypes\n'), ((20919, 20945), 'ctypes.sizeof', 'ctypes.sizeof', (['output_type'], {}), '(output_type)\n', (20932, 20945), False, 'import copy, ctypes\n'), ((2670, 2689), 'ctypes.sizeof', 'ctypes.sizeof', (['data'], {}), '(data)\n', (2683, 2689), False, 'import copy, ctypes\n'), ((3054, 3071), 'ctypes.sizeof', 'ctypes.sizeof', (['ty'], {}), '(ty)\n', (3067, 3071), False, 'import copy, ctypes\n'), ((3672, 3698), 'ctypes.sizeof', 'ctypes.sizeof', (['self._vo_ty'], {}), '(self._vo_ty)\n', (3685, 3698), False, 'import copy, ctypes\n'), ((6749, 6785), 'numpy.ctypeslib.as_ctypes', 'np.ctypeslib.as_ctypes', (['new_variable'], {}), '(new_variable)\n', (6771, 6785), True, 'import numpy as np\n'), ((15691, 15708), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (15702, 15708), False, 'from collections import defaultdict\n'), ((21064, 21078), 'numpy.int32', 'np.int32', (['runs'], {}), '(runs)\n', (21072, 21078), True, 'import numpy as np\n'), ((21130, 21154), 'numpy.int32', 'np.int32', (['num_input_sets'], {}), '(num_input_sets)\n', (21138, 21154), True, 'import numpy as np\n'), ((7047, 7074), 'ctypes.POINTER', 'ctypes.POINTER', (['self._vi_ty'], {}), '(self._vi_ty)\n', (7061, 7074), False, 'import copy, ctypes\n'), ((7103, 7135), 'ctypes.byref', 'ctypes.byref', (['self._param_struct'], {}), '(self._param_struct)\n', (7115, 7135), False, 'import copy, ctypes\n'), ((7164, 7198), 'ctypes.byref', 'ctypes.byref', (['self._context_struct'], {}), '(self._context_struct)\n', (7176, 7198), False, 'import copy, ctypes\n'), ((7234, 7259), 'ctypes.byref', 'ctypes.byref', (['self._ct_vo'], {}), '(self._ct_vo)\n', (7246, 7259), False, 'import copy, ctypes\n'), ((7642, 7658), 'numpy.atleast_2d', 'np.atleast_2d', (['x'], {}), '(x)\n', (7655, 7658), True, 'import numpy as np\n')]
|
import cv2
import argparse
import os
data={"label_name":[],"no_of_video":[], "overall_fps":[]}
def TrainingVideoFiles(video_location, video_name):
files = [f for f in os.listdir(video_location)]
data['label_name'].append(video_name)
data['no_of_video'].append(len(files))
fps=0
try:
os.mkdir("tf_files/mudras/"+video_name)
except:
print("Directory Already Exists")
for index,file in enumerate(files):
vidcap = cv2.VideoCapture(video_location+"/"+file)
success, image = vidcap.read()
count = 1
while success:
cv2.imwrite("./tf_files/mudras/%s/image%d_%d.jpg" % (video_name,index, count), image)
success, image = vidcap.read()
print('Saved image',video_name,index,"-", count, end="\r")
count += 1
fps+=count
data['overall_fps'].append(fps)
if __name__ == '__main__':
for file in os.listdir("./offline_training"):
TrainingVideoFiles("./offline_training/"+file, file)
print("\n\n%12s|%12s|%12s"%("Label Name", "No of Videos", "Overall Fps"))
print("%12s|%12s|%12s"%("="*12,"="*12, "="*12))
for label, vid, fps in zip(data['label_name'],data['no_of_video'], data['overall_fps']):
print("%-12s|%12d|%12d"%(label, vid, fps))
input("Press any key to exit...")
|
[
"cv2.VideoCapture",
"os.mkdir",
"os.listdir",
"cv2.imwrite"
] |
[((949, 981), 'os.listdir', 'os.listdir', (['"""./offline_training"""'], {}), "('./offline_training')\n", (959, 981), False, 'import os\n'), ((318, 359), 'os.mkdir', 'os.mkdir', (["('tf_files/mudras/' + video_name)"], {}), "('tf_files/mudras/' + video_name)\n", (326, 359), False, 'import os\n'), ((477, 522), 'cv2.VideoCapture', 'cv2.VideoCapture', (["(video_location + '/' + file)"], {}), "(video_location + '/' + file)\n", (493, 522), False, 'import cv2\n'), ((173, 199), 'os.listdir', 'os.listdir', (['video_location'], {}), '(video_location)\n', (183, 199), False, 'import os\n'), ((611, 701), 'cv2.imwrite', 'cv2.imwrite', (["('./tf_files/mudras/%s/image%d_%d.jpg' % (video_name, index, count))", 'image'], {}), "('./tf_files/mudras/%s/image%d_%d.jpg' % (video_name, index,\n count), image)\n", (622, 701), False, 'import cv2\n')]
|
from unidecode import unidecode
def compare_name(name_1, name_2):
name_1 = unidecode(name_1).lower()
name_2 = unidecode(name_2).lower()
return name_1 == name_2
|
[
"unidecode.unidecode"
] |
[((85, 102), 'unidecode.unidecode', 'unidecode', (['name_1'], {}), '(name_1)\n', (94, 102), False, 'from unidecode import unidecode\n'), ((128, 145), 'unidecode.unidecode', 'unidecode', (['name_2'], {}), '(name_2)\n', (137, 145), False, 'from unidecode import unidecode\n')]
|
from django.conf.urls import url
from . import views
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
url('^$',views.index,name='index'),
url('^image/',views.single_image,name='single_image'),
url('^location/',views.images_by_location,name='location'),
url('^category/', views.images_by_category, name='category'),
]
if settings.DEBUG:
urlpatterns+= static(settings.MEDIA_URL, document_root = settings.MEDIA_ROOT)
|
[
"django.conf.urls.static.static",
"django.conf.urls.url"
] |
[((150, 186), 'django.conf.urls.url', 'url', (['"""^$"""', 'views.index'], {'name': '"""index"""'}), "('^$', views.index, name='index')\n", (153, 186), False, 'from django.conf.urls import url\n'), ((190, 245), 'django.conf.urls.url', 'url', (['"""^image/"""', 'views.single_image'], {'name': '"""single_image"""'}), "('^image/', views.single_image, name='single_image')\n", (193, 245), False, 'from django.conf.urls import url\n'), ((249, 309), 'django.conf.urls.url', 'url', (['"""^location/"""', 'views.images_by_location'], {'name': '"""location"""'}), "('^location/', views.images_by_location, name='location')\n", (252, 309), False, 'from django.conf.urls import url\n'), ((313, 373), 'django.conf.urls.url', 'url', (['"""^category/"""', 'views.images_by_category'], {'name': '"""category"""'}), "('^category/', views.images_by_category, name='category')\n", (316, 373), False, 'from django.conf.urls import url\n'), ((415, 476), 'django.conf.urls.static.static', 'static', (['settings.MEDIA_URL'], {'document_root': 'settings.MEDIA_ROOT'}), '(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n', (421, 476), False, 'from django.conf.urls.static import static\n')]
|
"""
---
title: Generative Adversarial Networks (GAN)
summary: A simple PyTorch implementation/tutorial of Generative Adversarial Networks (GAN) loss functions.
---
# Generative Adversarial Networks (GAN)
This is an implementation of
[Generative Adversarial Networks](https://arxiv.org/abs/1406.2661).
The generator, $G(\pmb{z}; \theta_g)$ generates samples that match the
distribution of data, while the discriminator, $D(\pmb{x}; \theta_g)$
gives the probability that $\pmb{x}$ came from data rather than $G$.
We train $D$ and $G$ simultaneously on a two-player min-max game with value
function $V(G, D)$.
$$\min_G \max_D V(D, G) =
\mathop{\mathbb{E}}_{\pmb{x} \sim p_{data}(\pmb{x})}
\big[\log D(\pmb{x})\big] +
\mathop{\mathbb{E}}_{\pmb{z} \sim p_{\pmb{z}}(\pmb{z})}
\big[\log (1 - D(G(\pmb{z}))\big]
$$
$p_{data}(\pmb{x})$ is the probability distribution over data,
whilst $p_{\pmb{z}}(\pmb{z})$ probability distribution of $\pmb{z}$, which is set to
gaussian noise.
This file defines the loss functions. [Here](../simple_mnist_experiment.html) is an MNIST example
with two multilayer perceptron for the generator and discriminator.
"""
import torch
import torch.nn as nn
import torch.utils.data
import torch.utils.data
from labml_helpers.module import Module
class DiscriminatorLogitsLoss(Module):
"""
## Discriminator Loss
Discriminator should **ascend** on the gradient,
$$\nabla_{\theta_d} \frac{1}{m} \sum_{i=1}^m \Bigg[
\log D\Big(\pmb{x}^{(i)}\Big) +
\log \Big(1 - D\Big(G\Big(\pmb{z}^{(i)}\Big)\Big)\Big)
\Bigg]$$
$m$ is the mini-batch size and $(i)$ is used to index samples in the mini-batch.
$\pmb{x}$ are samples from $p_{data}$ and $\pmb{z}$ are samples from $p_z$.
"""
def __init__(self, smoothing: float = 0.2):
super().__init__()
# We use PyTorch Binary Cross Entropy Loss, which is
# $-\sum\Big[y \log(\hat{y}) + (1 - y) \log(1 - \hat{y})\Big]$,
# where $y$ are the labels and $\hat{y}$ are the predictions.
# *Note the negative sign*.
# We use labels equal to $1$ for $\pmb{x}$ from $p_{data}$
# and labels equal to $0$ for $\pmb{x}$ from $p_{G}.$
# Then descending on the sum of these is the same as ascending on
# the above gradient.
#
# `BCEWithLogitsLoss` combines softmax and binary cross entropy loss.
self.loss_true = nn.BCEWithLogitsLoss()
self.loss_false = nn.BCEWithLogitsLoss()
# We use label smoothing because it seems to work better in some cases
self.smoothing = smoothing
# Labels are registered as buffered and persistence is set to `False`.
self.register_buffer('labels_true', _create_labels(256, 1.0 - smoothing, 1.0), False)
self.register_buffer('labels_false', _create_labels(256, 0.0, smoothing), False)
def __call__(self, logits_true: torch.Tensor, logits_false: torch.Tensor):
"""
`logits_true` are logits from $D(\pmb{x}^{(i)})$ and
`logits_false` are logits from $D(G(\pmb{z}^{(i)}))$
"""
if len(logits_true) > len(self.labels_true):
self.register_buffer("labels_true",
_create_labels(len(logits_true), 1.0 - self.smoothing, 1.0, logits_true.device), False)
if len(logits_false) > len(self.labels_false):
self.register_buffer("labels_false",
_create_labels(len(logits_false), 0.0, self.smoothing, logits_false.device), False)
return (self.loss_true(logits_true, self.labels_true[:len(logits_true)]),
self.loss_false(logits_false, self.labels_false[:len(logits_false)]))
class GeneratorLogitsLoss(Module):
"""
## Generator Loss
Generator should **descend** on the gradient,
$$\nabla_{\theta_g} \frac{1}{m} \sum_{i=1}^m \Bigg[
\log \Big(1 - D\Big(G\Big(\pmb{z}^{(i)}\Big)\Big)\Big)
\Bigg]$$
"""
def __init__(self, smoothing: float = 0.2):
super().__init__()
self.loss_true = nn.BCEWithLogitsLoss()
self.smoothing = smoothing
# We use labels equal to $1$ for $\pmb{x}$ from $p_{G}.$
# Then descending on this loss is the same as descending on
# the above gradient.
self.register_buffer('fake_labels', _create_labels(256, 1.0 - smoothing, 1.0), False)
def __call__(self, logits: torch.Tensor):
if len(logits) > len(self.fake_labels):
self.register_buffer("fake_labels",
_create_labels(len(logits), 1.0 - self.smoothing, 1.0, logits.device), False)
return self.loss_true(logits, self.fake_labels[:len(logits)])
def _create_labels(n: int, r1: float, r2: float, device: torch.device = None):
"""
Create smoothed labels
"""
return torch.empty(n, 1, requires_grad=False, device=device).uniform_(r1, r2)
|
[
"torch.nn.BCEWithLogitsLoss",
"torch.empty"
] |
[((2434, 2456), 'torch.nn.BCEWithLogitsLoss', 'nn.BCEWithLogitsLoss', ([], {}), '()\n', (2454, 2456), True, 'import torch.nn as nn\n'), ((2483, 2505), 'torch.nn.BCEWithLogitsLoss', 'nn.BCEWithLogitsLoss', ([], {}), '()\n', (2503, 2505), True, 'import torch.nn as nn\n'), ((4081, 4103), 'torch.nn.BCEWithLogitsLoss', 'nn.BCEWithLogitsLoss', ([], {}), '()\n', (4101, 4103), True, 'import torch.nn as nn\n'), ((4856, 4909), 'torch.empty', 'torch.empty', (['n', '(1)'], {'requires_grad': '(False)', 'device': 'device'}), '(n, 1, requires_grad=False, device=device)\n', (4867, 4909), False, 'import torch\n')]
|
# Generated by Django 3.0.8 on 2021-02-25 08:53
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Gallery', '0010_pickedimage'),
]
operations = [
migrations.RemoveField(
model_name='pickedimage',
name='cover_image',
),
migrations.AddField(
model_name='pickedimage',
name='cover_image',
field=models.ManyToManyField(to='Gallery.ImagesClient'),
),
]
|
[
"django.db.migrations.RemoveField",
"django.db.models.ManyToManyField"
] |
[((228, 296), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""pickedimage"""', 'name': '"""cover_image"""'}), "(model_name='pickedimage', name='cover_image')\n", (250, 296), False, 'from django.db import migrations, models\n'), ((450, 499), 'django.db.models.ManyToManyField', 'models.ManyToManyField', ([], {'to': '"""Gallery.ImagesClient"""'}), "(to='Gallery.ImagesClient')\n", (472, 499), False, 'from django.db import migrations, models\n')]
|
# -*- coding: utf-8 -*-
# file: DetectronModelMeta.py
# date: 2021-09-23
from multipledispatch import dispatch
from detectron2 import model_zoo
from .. import common
class DetectronModelMeta(common.ModelMeta):
def __init__(self,
dataset: str, task: str, model: str, backbone: str
):
self._dataset: str = None
self._task: str = None
self._model: str = None
self._backbone: str = None
self._name: str = None
self._model_url: str = None
self._conf_name: str = None
self.init(dataset, task, model, backbone)
def init(self,
dataset: str, task: str, model: str, backbone: str
) -> None:
self._dataset = dataset
self._task = task
self._model = model
self._backbone = backbone
self._name = self._infer_model_name()
self._model_url = self._infer_model_url()
self._conf_name = self._infer_conf_name()
def _infer_model_name(self) -> str:
return "%s-%s-%s_%s" % (
self._dataset, self._task, self._model, self._backbone)
def _infer_model_url(self) -> str:
sub_path: str = "%s-%s/%s_%s" % (
self._dataset, self._task, self._model, self._backbone)
model_url: str = model_zoo.get_checkpoint_url(sub_path + ".yaml")
return model_url
def _infer_conf_name(self) -> str:
sub_path: str = "%s-%s/%s_%s" % (
self._dataset, self._task, self._model, self._backbone)
return sub_path + ".yaml"
def get_name(self) -> str:
return self._name
def get_model_url(self) -> str:
return self._model_url
def get_conf_name(self) -> str:
return self._conf_name
|
[
"detectron2.model_zoo.get_checkpoint_url"
] |
[((1286, 1334), 'detectron2.model_zoo.get_checkpoint_url', 'model_zoo.get_checkpoint_url', (["(sub_path + '.yaml')"], {}), "(sub_path + '.yaml')\n", (1314, 1334), False, 'from detectron2 import model_zoo\n')]
|
import fresh_tomatoes
import media
# Create a data structure
alien = media.Movie(
"Alien",
"During its return to the earth, commercial spaceship\
Nostromo intercepts a distress signal from a distant planet.",
"https://image.tmdb.org/t/p/w640/2h00HrZs89SL3tXB4nbkiM7BKHs.jpg",
"https://www.youtube.com/watch?v=jQ5lPt9edzQ")
matrix = media.Movie(
"The Matrix",
"Set in the 22nd century, The Matrix tells the story of a\
computer hacker who joins a group of underground insurgents\
fighting the vast and powerful computers who now rule the earth.",
"https://image.tmdb.org/t/p/w640/hEpWvX6Bp79eLxY1kX5ZZJcme5U.jpg",
"https://www.youtube.com/watch?v=oZ1-M8O70zk")
interstellar = media.Movie(
"Interstellar",
"Interstellar chronicles the adventures of a group of\
explorers who make use of a newly discovered wormhole\
to surpass the limitations on human space travel and\
conquer the vast distances involved in an interstellar voyage.",
"https://image.tmdb.org/t/p/w640/nBNZadXqJSdt05SHLqgT0HuC5Gm.jpg",
"https://www.youtube.com/watch?v=zSWdZVtXT7E")
prometheus = media.Movie(
"Prometheus",
"A team of explorers discover a clue to the origins\
of mankind on Earth, leading them on a journey\
to the darkest corners of the universe.",
"https://image.tmdb.org/t/p/w640/ng8ALjSDhUmwLl7vtjUWIZNQSlt.jpg",
"https://www.youtube.com/watch?v=r-EZC5zn2Fk")
edge = media.Movie(
"Edge of Tomorrow",
"<NAME> is an officer who has never seen\
a day of combat when he is unceremoniously demoted\
and dropped into combat.",
"https://image.tmdb.org/t/p/w640/tpoVEYvm6qcXueZrQYJNRLXL88s.jpg",
"https://www.youtube.com/watch?v=fLe_qO4AE-M")
inception = media.Movie(
"Inception",
"Cobb, a skilled thief who commits corporate espionage\
by infiltrating the subconscious of his targets is offered\
a chance to regain his old life as payment for a task\
considered to be impossible: the implantation of\
another person's idea into a target's subconscious.",
"https://image.tmdb.org/t/p/w640/qmDpIHrmpJINaRKAfWQfftjCdyi.jpg",
"https://www.youtube.com/watch?v=YoHD9XEInc0")
movies = [alien,
matrix,
interstellar,
prometheus,
edge,
inception]
# Run page generator
fresh_tomatoes.open_movies_page(movies)
|
[
"fresh_tomatoes.open_movies_page",
"media.Movie"
] |
[((71, 338), 'media.Movie', 'media.Movie', (['"""Alien"""', '"""During its return to the earth, commercial spaceship Nostromo intercepts a distress signal from a distant planet."""', '"""https://image.tmdb.org/t/p/w640/2h00HrZs89SL3tXB4nbkiM7BKHs.jpg"""', '"""https://www.youtube.com/watch?v=jQ5lPt9edzQ"""'], {}), "('Alien',\n 'During its return to the earth, commercial spaceship Nostromo intercepts a distress signal from a distant planet.'\n , 'https://image.tmdb.org/t/p/w640/2h00HrZs89SL3tXB4nbkiM7BKHs.jpg',\n 'https://www.youtube.com/watch?v=jQ5lPt9edzQ')\n", (82, 338), False, 'import media\n'), ((355, 698), 'media.Movie', 'media.Movie', (['"""The Matrix"""', '"""Set in the 22nd century, The Matrix tells the story of a computer hacker who joins a group of underground insurgents fighting the vast and powerful computers who now rule the earth."""', '"""https://image.tmdb.org/t/p/w640/hEpWvX6Bp79eLxY1kX5ZZJcme5U.jpg"""', '"""https://www.youtube.com/watch?v=oZ1-M8O70zk"""'], {}), "('The Matrix',\n 'Set in the 22nd century, The Matrix tells the story of a computer hacker who joins a group of underground insurgents fighting the vast and powerful computers who now rule the earth.'\n , 'https://image.tmdb.org/t/p/w640/hEpWvX6Bp79eLxY1kX5ZZJcme5U.jpg',\n 'https://www.youtube.com/watch?v=oZ1-M8O70zk')\n", (366, 698), False, 'import media\n'), ((723, 1112), 'media.Movie', 'media.Movie', (['"""Interstellar"""', '"""Interstellar chronicles the adventures of a group of explorers who make use of a newly discovered wormhole to surpass the limitations on human space travel and conquer the vast distances involved in an interstellar voyage."""', '"""https://image.tmdb.org/t/p/w640/nBNZadXqJSdt05SHLqgT0HuC5Gm.jpg"""', '"""https://www.youtube.com/watch?v=zSWdZVtXT7E"""'], {}), "('Interstellar',\n 'Interstellar chronicles the adventures of a group of explorers who make use of a newly discovered wormhole to surpass the limitations on human space travel and conquer the vast distances involved in an interstellar voyage.'\n , 'https://image.tmdb.org/t/p/w640/nBNZadXqJSdt05SHLqgT0HuC5Gm.jpg',\n 'https://www.youtube.com/watch?v=zSWdZVtXT7E')\n", (734, 1112), False, 'import media\n'), ((1137, 1436), 'media.Movie', 'media.Movie', (['"""Prometheus"""', '"""A team of explorers discover a clue to the origins of mankind on Earth, leading them on a journey to the darkest corners of the universe."""', '"""https://image.tmdb.org/t/p/w640/ng8ALjSDhUmwLl7vtjUWIZNQSlt.jpg"""', '"""https://www.youtube.com/watch?v=r-EZC5zn2Fk"""'], {}), "('Prometheus',\n 'A team of explorers discover a clue to the origins of mankind on Earth, leading them on a journey to the darkest corners of the universe.'\n , 'https://image.tmdb.org/t/p/w640/ng8ALjSDhUmwLl7vtjUWIZNQSlt.jpg',\n 'https://www.youtube.com/watch?v=r-EZC5zn2Fk')\n", (1148, 1436), False, 'import media\n'), ((1453, 1736), 'media.Movie', 'media.Movie', (['"""Edge of Tomorrow"""', '"""<NAME> is an officer who has never seen a day of combat when he is unceremoniously demoted and dropped into combat."""', '"""https://image.tmdb.org/t/p/w640/tpoVEYvm6qcXueZrQYJNRLXL88s.jpg"""', '"""https://www.youtube.com/watch?v=fLe_qO4AE-M"""'], {}), "('Edge of Tomorrow',\n '<NAME> is an officer who has never seen a day of combat when he is unceremoniously demoted and dropped into combat.'\n , 'https://image.tmdb.org/t/p/w640/tpoVEYvm6qcXueZrQYJNRLXL88s.jpg',\n 'https://www.youtube.com/watch?v=fLe_qO4AE-M')\n", (1464, 1736), False, 'import media\n'), ((1758, 2192), 'media.Movie', 'media.Movie', (['"""Inception"""', '"""Cobb, a skilled thief who commits corporate espionage by infiltrating the subconscious of his targets is offered a chance to regain his old life as payment for a task considered to be impossible: the implantation of another person\'s idea into a target\'s subconscious."""', '"""https://image.tmdb.org/t/p/w640/qmDpIHrmpJINaRKAfWQfftjCdyi.jpg"""', '"""https://www.youtube.com/watch?v=YoHD9XEInc0"""'], {}), '(\'Inception\',\n "Cobb, a skilled thief who commits corporate espionage by infiltrating the subconscious of his targets is offered a chance to regain his old life as payment for a task considered to be impossible: the implantation of another person\'s idea into a target\'s subconscious."\n , \'https://image.tmdb.org/t/p/w640/qmDpIHrmpJINaRKAfWQfftjCdyi.jpg\',\n \'https://www.youtube.com/watch?v=YoHD9XEInc0\')\n', (1769, 2192), False, 'import media\n'), ((2347, 2386), 'fresh_tomatoes.open_movies_page', 'fresh_tomatoes.open_movies_page', (['movies'], {}), '(movies)\n', (2378, 2386), False, 'import fresh_tomatoes\n')]
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: entry_meta.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='entry_meta.proto',
package='mit.protobuf',
syntax='proto3',
serialized_pb=_b('\n\x10\x65ntry_meta.proto\x12\x0cmit.protobuf\"&\n\x0c\x46ieldIdArray\x12\x16\n\x0e\x66ield_id_array\x18\x01 \x03(\r\"\xc6\x01\n\tEntryMeta\x12\x41\n\x0e\x65ntry_meta_map\x18\x01 \x03(\x0b\x32).mit.protobuf.EntryMeta.EntryMetaMapEntry\x12\x16\n\x0e\x65mbedding_size\x18\x02 \x01(\r\x12\r\n\x05model\x18\x03 \x01(\t\x1aO\n\x11\x45ntryMetaMapEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12)\n\x05value\x18\x02 \x01(\x0b\x32\x1a.mit.protobuf.FieldIdArray:\x02\x38\x01\x42)\n\x11org.openmit.entryB\x0f\x45ntryMetaProtosP\x01\xf8\x01\x01\x62\x06proto3')
)
_FIELDIDARRAY = _descriptor.Descriptor(
name='FieldIdArray',
full_name='mit.protobuf.FieldIdArray',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='field_id_array', full_name='mit.protobuf.FieldIdArray.field_id_array', index=0,
number=1, type=13, cpp_type=3, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=34,
serialized_end=72,
)
_ENTRYMETA_ENTRYMETAMAPENTRY = _descriptor.Descriptor(
name='EntryMetaMapEntry',
full_name='mit.protobuf.EntryMeta.EntryMetaMapEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='mit.protobuf.EntryMeta.EntryMetaMapEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='mit.protobuf.EntryMeta.EntryMetaMapEntry.value', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=194,
serialized_end=273,
)
_ENTRYMETA = _descriptor.Descriptor(
name='EntryMeta',
full_name='mit.protobuf.EntryMeta',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='entry_meta_map', full_name='mit.protobuf.EntryMeta.entry_meta_map', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='embedding_size', full_name='mit.protobuf.EntryMeta.embedding_size', index=1,
number=2, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='model', full_name='mit.protobuf.EntryMeta.model', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_ENTRYMETA_ENTRYMETAMAPENTRY, ],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=75,
serialized_end=273,
)
_ENTRYMETA_ENTRYMETAMAPENTRY.fields_by_name['value'].message_type = _FIELDIDARRAY
_ENTRYMETA_ENTRYMETAMAPENTRY.containing_type = _ENTRYMETA
_ENTRYMETA.fields_by_name['entry_meta_map'].message_type = _ENTRYMETA_ENTRYMETAMAPENTRY
DESCRIPTOR.message_types_by_name['FieldIdArray'] = _FIELDIDARRAY
DESCRIPTOR.message_types_by_name['EntryMeta'] = _ENTRYMETA
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
FieldIdArray = _reflection.GeneratedProtocolMessageType('FieldIdArray', (_message.Message,), dict(
DESCRIPTOR = _FIELDIDARRAY,
__module__ = 'entry_meta_pb2'
# @@protoc_insertion_point(class_scope:mit.protobuf.FieldIdArray)
))
_sym_db.RegisterMessage(FieldIdArray)
EntryMeta = _reflection.GeneratedProtocolMessageType('EntryMeta', (_message.Message,), dict(
EntryMetaMapEntry = _reflection.GeneratedProtocolMessageType('EntryMetaMapEntry', (_message.Message,), dict(
DESCRIPTOR = _ENTRYMETA_ENTRYMETAMAPENTRY,
__module__ = 'entry_meta_pb2'
# @@protoc_insertion_point(class_scope:mit.protobuf.EntryMeta.EntryMetaMapEntry)
))
,
DESCRIPTOR = _ENTRYMETA,
__module__ = 'entry_meta_pb2'
# @@protoc_insertion_point(class_scope:mit.protobuf.EntryMeta)
))
_sym_db.RegisterMessage(EntryMeta)
_sym_db.RegisterMessage(EntryMeta.EntryMetaMapEntry)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\021org.openmit.entryB\017EntryMetaProtosP\001\370\001\001'))
_ENTRYMETA_ENTRYMETAMAPENTRY.has_options = True
_ENTRYMETA_ENTRYMETAMAPENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001'))
# @@protoc_insertion_point(module_scope)
|
[
"google.protobuf.symbol_database.Default",
"google.protobuf.descriptor.FieldDescriptor",
"google.protobuf.descriptor_pb2.FileOptions",
"google.protobuf.descriptor_pb2.MessageOptions"
] |
[((483, 509), 'google.protobuf.symbol_database.Default', '_symbol_database.Default', ([], {}), '()\n', (507, 509), True, 'from google.protobuf import symbol_database as _symbol_database\n'), ((6039, 6067), 'google.protobuf.descriptor_pb2.FileOptions', 'descriptor_pb2.FileOptions', ([], {}), '()\n', (6065, 6067), False, 'from google.protobuf import descriptor_pb2\n'), ((6251, 6282), 'google.protobuf.descriptor_pb2.MessageOptions', 'descriptor_pb2.MessageOptions', ([], {}), '()\n', (6280, 6282), False, 'from google.protobuf import descriptor_pb2\n'), ((1376, 1720), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', ([], {'name': '"""field_id_array"""', 'full_name': '"""mit.protobuf.FieldIdArray.field_id_array"""', 'index': '(0)', 'number': '(1)', 'type': '(13)', 'cpp_type': '(3)', 'label': '(3)', 'has_default_value': '(False)', 'default_value': '[]', 'message_type': 'None', 'enum_type': 'None', 'containing_type': 'None', 'is_extension': '(False)', 'extension_scope': 'None', 'options': 'None', 'file': 'DESCRIPTOR'}), "(name='field_id_array', full_name=\n 'mit.protobuf.FieldIdArray.field_id_array', index=0, number=1, type=13,\n cpp_type=3, label=3, has_default_value=False, default_value=[],\n message_type=None, enum_type=None, containing_type=None, is_extension=\n False, extension_scope=None, options=None, file=DESCRIPTOR)\n", (1403, 1720), True, 'from google.protobuf import descriptor as _descriptor\n'), ((2545, 2889), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', ([], {'name': '"""value"""', 'full_name': '"""mit.protobuf.EntryMeta.EntryMetaMapEntry.value"""', 'index': '(1)', 'number': '(2)', 'type': '(11)', 'cpp_type': '(10)', 'label': '(1)', 'has_default_value': '(False)', 'default_value': 'None', 'message_type': 'None', 'enum_type': 'None', 'containing_type': 'None', 'is_extension': '(False)', 'extension_scope': 'None', 'options': 'None', 'file': 'DESCRIPTOR'}), "(name='value', full_name=\n 'mit.protobuf.EntryMeta.EntryMetaMapEntry.value', index=1, number=2,\n type=11, cpp_type=10, label=1, has_default_value=False, default_value=\n None, message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR)\n", (2572, 2889), True, 'from google.protobuf import descriptor as _descriptor\n'), ((3010, 3041), 'google.protobuf.descriptor_pb2.MessageOptions', 'descriptor_pb2.MessageOptions', ([], {}), '()\n', (3039, 3041), False, 'from google.protobuf import descriptor_pb2\n'), ((3357, 3699), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', ([], {'name': '"""entry_meta_map"""', 'full_name': '"""mit.protobuf.EntryMeta.entry_meta_map"""', 'index': '(0)', 'number': '(1)', 'type': '(11)', 'cpp_type': '(10)', 'label': '(3)', 'has_default_value': '(False)', 'default_value': '[]', 'message_type': 'None', 'enum_type': 'None', 'containing_type': 'None', 'is_extension': '(False)', 'extension_scope': 'None', 'options': 'None', 'file': 'DESCRIPTOR'}), "(name='entry_meta_map', full_name=\n 'mit.protobuf.EntryMeta.entry_meta_map', index=0, number=1, type=11,\n cpp_type=10, label=3, has_default_value=False, default_value=[],\n message_type=None, enum_type=None, containing_type=None, is_extension=\n False, extension_scope=None, options=None, file=DESCRIPTOR)\n", (3384, 3699), True, 'from google.protobuf import descriptor as _descriptor\n'), ((3724, 4064), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', ([], {'name': '"""embedding_size"""', 'full_name': '"""mit.protobuf.EntryMeta.embedding_size"""', 'index': '(1)', 'number': '(2)', 'type': '(13)', 'cpp_type': '(3)', 'label': '(1)', 'has_default_value': '(False)', 'default_value': '(0)', 'message_type': 'None', 'enum_type': 'None', 'containing_type': 'None', 'is_extension': '(False)', 'extension_scope': 'None', 'options': 'None', 'file': 'DESCRIPTOR'}), "(name='embedding_size', full_name=\n 'mit.protobuf.EntryMeta.embedding_size', index=1, number=2, type=13,\n cpp_type=3, label=1, has_default_value=False, default_value=0,\n message_type=None, enum_type=None, containing_type=None, is_extension=\n False, extension_scope=None, options=None, file=DESCRIPTOR)\n", (3751, 4064), True, 'from google.protobuf import descriptor as _descriptor\n')]
|
import gimp
from gimp import pdb
from settings import *
from network import *
from image import *
from utils import *
from draw import *
from paint import *
from collections import Counter
import TwitterAPI
from random import randrange, choice, shuffle
from string import letters
import datetime as dt
from time import sleep
def setTwitterAPIKeys(ACCESS_TOKEN_KEY="NOT_SET",CONSUMER_KEY="NOT_SET",CONSUMER_SECRET="NOT_SET",ACCESS_TOKEN_SECRET="NOT_SET"):
global settings_data
if not ACCESS_TOKEN_KEY == "NOT_SET":
settings_data['twitter']['ACCESS_TOKEN_KEY']=ACCESS_TOKEN_KEY
settings_data['twitter']['CONSUMER_KEY']=CONSUMER_KEY
settings_data['twitter']['CONSUMER_SECRET']=CONSUMER_SECRET
settings_data['twitter']['ACCESS_TOKEN_SECRET']=ACCESS_TOKEN_SECRET
saveSettings()
def addHashtag(tag):
#add hashtag to settings
global settings_data
settings_data['twitter']['hashtags']=settings_data['twitter']['hashtags']+u' #'+unicode(tag, "utf-8")
saveSettings()
def removeHashtag(tag):
#return string of hashtags filling given character space
global settings_data
hashtags=map(str, settings_data['twitter']['hashtags'].split('#')[1:])
hashtags=map(str.strip, hashtags)
if tag in hashtags:
hashtags.remove(tag)
rt=''
for hashtag in hashtags:
rt=rt+'#'+hashtag + ' '
rt.strip()
settings_data['twitter']['hashtags']=rt
saveSettings()
return True
else:
return False
def hashtagString(length=140,mode=0):
#return string of hashtags filling given character space
global settings_data
hashtags=settings_data['twitter']['hashtags'].split('#')[1:]
hs=''
ll=[]
for item in hashtags:
if len(item)+2<=length:
ll.append(item)
ll.sort(key=len)
while length > len(ll[0]) and len(ll) > 0:
il=[]
for item in ll:
if len(item)+2<=length:
il.append(item)
shuffle(il)
if not len(il)==0:
nh=il.pop()
if len(nh)+2<=length:
length=length-len(nh)-2
hs=hs+'#'+nh.strip()+' '
if nh in ll:
ll.remove(nh)
if len(ll)<1:
return str(hs).strip()
return str(hs).strip()
def setDefaultTweet(default_tweet='GIMP-Python tweet!'):
global settings_data
settings_data['twitter']['default_tweet']=unicode(default_tweet, "utf-8")
saveSettings()
def tweetText(opt=0):
global settings_data
now = dt.datetime.now()
updateLocationData()
title = imageTitle(2)
city = settings_data["location"]["city"]
state = settings_data["location"]["state"]
host_name = settings_data["network"]["host_name"]
tempf = settings_data["location"]["tempf"]
weather = settings_data["location"]["weather"]
hashtags = settings_data["twitter"]["hashtags"]
time_stamp = str(dt.datetime.now())
tweet_text = ''
if opt == 0:
tweet_text = title + '\nby ' + settings_data['user']['author'] + '\n' + city + ' ' + state + ' | ' + host_name + '\n' + tempf + 'F ' + weather + '\n' + now.strftime("%A %B %d - %I:%M%p")
elif opt == 1:
tweet_text = title + '\nby ' + settings_data['user']['author'] + ' ' + time_stamp[:4] + '\n'
else:
tweet_text = title + '\nby ' + settings_data['user']['author'] + ' ' + time_stamp[:4]
tweet_text = tweet_text + '\n'+hashtagString(139-len(tweet_text))
return tweet_text
def tweetImage(message,image_file):
global settings_data
CONSUMER_KEY = settings_data['twitter']['consumer_key']
CONSUMER_SECRET = settings_data['twitter']['consumer_secret']
ACCESS_TOKEN_KEY = settings_data['twitter']['access_token_key']
ACCESS_TOKEN_SECRET = settings_data['twitter']['access_token_secret']
api = TwitterAPI.TwitterAPI(CONSUMER_KEY, CONSUMER_SECRET, ACCESS_TOKEN_KEY, ACCESS_TOKEN_SECRET)
file = open(image_file, 'rb')
data = file.read()
r = api.request('statuses/update_with_media', {'status':message}, {'media[]':data})
return str(str(r.status_code))
def qS():
# quick set up of default size image
imageSetup()
def qXJ(comment=""):
# quick export jpg default with unique file name
global settings_data
settings_data['path']['export_name'] = str(settings_data['path']['default_save_path'])+'art-'+dt.datetime.now().strftime('%Y%m%d%H%M%S')+'-'+choice(letters)+choice(letters)+choice(letters)+'.jpg'
if comment=="":
comment=tweetText(0)
saved=saveJPG(settings_data['path']['export_name'],comment)
qXDT(saved[1],comment)
sleep(1)
return saved
def qXP(comment=""):
# quick export png default with unique file name
global settings_data
settings_data['path']['export_name'] = str(settings_data['path']['default_save_path'])+'art-'+dt.datetime.now().strftime('%Y%m%d%H%M%S')+'-'+choice(letters)+choice(letters)+choice(letters)+'.png'
image = gimp.image_list()[0]
if comment=="":
comment=tweetText(0)
saved = savePNG(settings_data['path']['export_name'])
qXDT(saved[1],comment)
return saved
def qG(comment="",delay=100):
# quick export png default with unique file name
global settings_data
settings_data['path']['export_name'] = str(settings_data['path']['default_save_path'])+'animation-'+dt.datetime.now().strftime('%Y%m%d%H%M%S')+'-'+choice(letters)+choice(letters)+choice(letters)+'.gif'
image = gimp.image_list()[0]
if comment=="":
comment=tweetText(0)
saved = saveGIF(settings_data['path']['export_name'],delay)
qXDT(saved[1],comment)
return saved
def qP(fn=""):
# quick save project
global settings_data
if fn=="":
fn = str(settings_data['path']['project_folder'])+'project-'+dt.datetime.now().strftime('%Y%m%d%H%M%S')+'-'+choice(letters)+choice(letters)+choice(letters)+'.xcf'
saveXCFProject(fn)
def qX(comment=""):
# quick export to prefered file type
global settings_data
export_modes = {"qXJ" : qXJ,
"qXP" : qXP,
"qG" : qG}
try:
mode=str(settings_data['image']['export_mode'])
return export_modes[mode](comment)
except:
mode='qXJ'
return export_modes[mode](comment)
def qT():
# generate tweet then qX() then send tweet return results
global settings_data
tweet = tweetText(0)
exported=qX(comment=tweet)
sleep(5)
return (tweetImage(tweet, exported[1]) == '200', tweet)
def qTG():
# generate tweet then qX() then send tweet return results
global settings_data
tweet = tweetText(0)
exported=qG()
return (tweetImage(tweet, exported[1]) == '200', tweet)
def qXDT(fn,comment=""):
global settings_data
setEXIFTags(fn,{"Copyright":settings_data['user']['author']+" "+dt.datetime.now().strftime('%Y'),
"License":settings_data['image']['license'],
"Comment":comment,
"XPComment":comment,
"Description":comment,
"ImageDescription":comment,
"SEMInfo":comment,
"Artist":settings_data['user']['author'],
"Author":settings_data['user']['author'],
"Software":"GIMP 2.8 Python 2.7 EXIFTool",
"Title":comment[:comment.find('\n')],
"XPTitle":comment[:comment.find('\n')],
"Make":"GIMP",
"Model":"Python",
"Rating":"5"})
def paint():
# Full auto painting
global settings_data
image = gimp.image_list()[0]
height = image.height
width = image.width
x_center = width/2
y_center = height/2
image.height
image.width
randomBlend()
loop_range = range(0, random.choice((3, 4, 5, 6)))
loop_range.reverse()
title = imageTitle(2)
for x in loop_range:
# 1. add layer
layer_add_par = {'opacity': 100, 'msk': 1}
addNewLayer(**layer_add_par)
layer_mode_par = {'layer': pdb.gimp_image_get_active_layer(image), 'mode': random.randrange(0, 25)}
pdb.gimp_layer_set_mode(layer_mode_par['layer'], layer_mode_par['mode'])
editLayerMask(0)
drawable = pdb.gimp_image_active_drawable(image)
# 1. paint layer
if random.choice((0, 1, 1, 1)):
plasma_par = {'Image': image, 'Draw': drawable, 'Seed': random.randrange(1, 1000),
'Turbulence': random.choice(
(0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 1.0, 1.5, 1.1, 1.4, 1.8, 2.0, 2.7))}
pdb.plug_in_plasma(plasma_par['Image'], plasma_par['Draw'], plasma_par['Seed'], plasma_par['Turbulence'])
if random.choice((0, 1, 1, 1, 1)):
drawRays_par = {'Number': random.choice((5, 10, 50, 100, 300)),
'Length': random.choice((80, 160, 240, 400, height / 4, height / 3, height / 2)),
'X': random.choice((width / 2, width / 3, width / 4)),
'Y': random.choice((height / 4, height / 3, height / 2))}
drawRays(drawRays_par['Number'], drawRays_par['Length'], drawRays_par['X'], drawRays_par['Y'])
# 1. mask edit
editLayerMask(1)
drawable = pdb.gimp_image_active_drawable(image)
plasma_par = {'Image': image, 'Draw': drawable, 'Seed': random.randrange(1, 1000), 'Turbulence': random.choice(
(0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 1.0, 1.5, 1.1, 1.4, 1.8, 2.0, 2.7))}
pdb.plug_in_plasma(plasma_par['Image'], plasma_par['Draw'], plasma_par['Seed'], plasma_par['Turbulence'])
if random.choice((0, 1, 1, 1, 1)):
drawRays_par = {'Number': random.choice((5, 10, 50, 100, 300)),
'Length': random.choice((80, 160, 240, 400, height / 4, height / 3, height / 2)),
'X': random.choice((width / 2, width / 3, width / 4)),
'Y': random.choice((height / 4, height / 3, height / 2))}
drawRays(drawRays_par['Number'], drawRays_par['Length'], drawRays_par['X'], drawRays_par['Y'])
editLayerMask(0)
# 2. add layer
layer_add_par = {'opacity': random.randrange(70, 100), 'msk': 1}
addNewLayer(**layer_add_par)
layer_mode_par = {'layer': pdb.gimp_image_get_active_layer(image), 'mode': random.randrange(0, 25)}
pdb.gimp_layer_set_mode(layer_mode_par['layer'], layer_mode_par['mode'])
editLayerMask(0)
# 2. paint layer
if x % 4 == 0:
drawBars_par = {'Number': random.choice((2, 3, 4, 5, 6, 7, 8, 12, 16, 32, 64, 128)),
'Mode': random.choice((0, 0, 3))}
drawBars(drawBars_par['Number'], drawBars_par['Mode'])
randomBlend()
# 2. mask edit
editLayerMask(1)
image = gimp.image_list()[0]
drawable = pdb.gimp_image_active_drawable(image)
plasma_par = {'Image': image, 'Draw': drawable, 'Seed': random.randrange(1, 1000), 'Turbulence': random.choice(
(0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 1.0, 1.5, 1.1, 1.4, 1.8, 2.0, 2.7))}
pdb.plug_in_plasma(plasma_par['Image'], plasma_par['Draw'], plasma_par['Seed'], plasma_par['Turbulence'])
randomBlend()
editLayerMask(0)
image = gimp.image_list()[0]
# 3. add layer
layer_add_par = {'opacity': random.randrange(55, 100), 'msk': 1}
addNewLayer(**layer_add_par)
layer_mode_par = {'layer': pdb.gimp_image_get_active_layer(image), 'mode': random.randrange(0, 25)}
pdb.gimp_layer_set_mode(layer_mode_par['layer'], layer_mode_par['mode'])
if random.choice((0, 1)):
fill_par = {'num': random.choice((1, 1, 1, 1, 1, 1, 1, 1, 2, 3, 3, 3, 3, 4, 5, 8, 9, 12, 24, 64, 128, 512)),
'size': random.randrange(15, height), 'opt': 3, 'sq': random.choice((0, 1))}
randomCircleFill(**fill_par)
if random.choice((0, 1)):
fill_par = {'num': random.choice((1, 1, 1, 1, 1, 1, 1, 1, 2, 3, 3, 3, 3, 4, 5, 8, 9, 12, 24, 64, 128, 512)),
'size': random.randrange(15, height), 'opt': 3, 'sq': random.choice((0, 1))}
randomRectFill(**fill_par)
editLayerMask(0)
# 3. paint layer
if random.choice((0, 1, 1, 1, 1)):
drawRays_par = {'rays': random.choice((3, 5, 10, 15, 30, 45)), 'rayLength': random.choice(
(width / 4, width / 3, width / 2, 4 * (width / 5), 3 * (width / 4), 2 * (width / 3))),
'centerX': random.choice((width / 4, width / 3, width / 2, 4 * (width / 5), 3 * (width / 4),
2 * (width / 3))), 'centerY': random.choice(
(height / 4, height / 3, height / 2, 4 * (height / 5), 3 * (height / 4), 2 * (height / 3)))}
drawRays(**drawRays_par)
if random.choice((0, 1)):
fill_par = {'num': random.choice((1, 1, 1, 1, 1, 1, 1, 1, 2, 3, 3, 3, 3, 4, 5, 8, 9, 12, 24, 64, 128, 512)),
'size': random.randrange(15, height), 'opt': 3, 'sq': random.choice((0, 1))}
randomCircleFill(**fill_par)
if random.choice((0, 1)):
fill_par = {'num': random.choice((1, 1, 1, 1, 1, 1, 1, 1, 2, 3, 3, 3, 3, 4, 5, 8, 9, 12, 24, 64, 128, 512)),
'size': random.randrange(15, height), 'opt': 3, 'sq': random.choice((0, 1))}
randomRectFill(**fill_par)
if random.choice((0, 1)):
randomBrush()
if random.choice((0, 1)):
randomDynamics()
if random.choice((0, 1, 1, 1, 1)):
brushSize(50)
drawTree_par = {'x1': random.randrange(width / 4, 3 * (width / 4)),
'y1': random.randrange(height / 4, 3 * (height / 4)), 'angle': random.randrange(0, 360),
'depth': random.randrange(5, 7)}
drawOddTree(**drawTree_par) # x1, y1, angle, depth
if random.choice((0, 1, 1, 1, 1)):
if random.choice((0, 1, 1, 1, 1)):
brushSize(random.randrange(20, (height / 3)))
if random.choice((0, 1, 1, 1, 1)):
brushColor()
drawRays_par = {'rays': random.choice((10, 50, 100)),
'rayLength': random.choice((80, 160, 240, 400, height / 4, height / 3, height / 2)),
'centerX': random.choice(
((x_center + x_center / 2), x_center, x_center / 2, x_center / 3, x_center / 4)),
'centerY': random.choice(
((x_center + x_center / 2), x_center, x_center / 2, x_center / 3, x_center / 4))}
drawRays(**drawRays_par)
# 3. mask edit
editLayerMask(1)
image = gimp.image_list()[0]
drawable = pdb.gimp_image_active_drawable(image)
plasma_par = {'Image': image, 'Draw': drawable, 'Seed': random.randrange(1, 1000), 'Turbulence': random.choice(
(0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 1.0, 1.5, 1.1, 1.4, 1.8, 2.0, 2.7))}
pdb.plug_in_plasma(plasma_par['Image'], plasma_par['Draw'], plasma_par['Seed'], plasma_par['Turbulence'])
# 4. add layer
layer_add_par = {'opacity': random.randrange(55, 100), 'msk': 1}
addNewLayer(**layer_add_par)
layer_mode_par = {'layer': pdb.gimp_image_get_active_layer(image), 'mode': random.randrange(0, 25)}
pdb.gimp_layer_set_mode(layer_mode_par['layer'], layer_mode_par['mode'])
brushSize(-1)
editLayerMask(0)
# 4. paint layer
if random.choice((0, 1, 1, 1, 1)):
drawSin_par = {
'bar_space': random.choice((16, 18, 19, 20, 21, 51, 52, 53, 54, 56, 55, 57, 58, 59)),
'bar_length': random.choice((10, 100, height / 3)),
'mag': random.choice((40, 69, 120, 200, 300, 400, height / 2)),
'x_offset': 0,
'y_offset': random.randrange(height / 12, height)
}
drawSinWave(**drawSin_par)
if random.choice((0, 1, 1, 1, 1)):
drawForest(random.randrange(15, 64), 0)
if random.choice((0, 1, 1, 1, 1)):
# 5. mask edit
editLayerMask(1)
image = gimp.image_list()[0]
drawable = pdb.gimp_image_active_drawable(image)
plasma_par = {'Image': image, 'Draw': drawable, 'Seed': random.randrange(1, 1000),
'Turbulence': random.choice(
(0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 1.0, 1.5, 1.1, 1.4, 1.8, 2.0, 2.7))}
pdb.plug_in_plasma(plasma_par['Image'], plasma_par['Draw'], plasma_par['Seed'], plasma_par['Turbulence'])
if random.choice((0, 1)):
fill_par = {
'num': random.choice((1, 1, 1, 1, 1, 1, 1, 1, 2, 3, 3, 3, 3, 4, 5, 8, 9, 12, 24, 64, 128, 512)),
'size': random.randrange(15, height), 'opt': 3, 'sq': random.choice((0, 1))}
randomCircleFill(**fill_par)
if random.choice((0, 1)):
fill_par = {
'num': random.choice((1, 1, 1, 1, 1, 1, 1, 1, 2, 3, 3, 3, 3, 4, 5, 8, 9, 12, 24, 64, 128, 512)),
'size': random.randrange(15, height), 'opt': 3, 'sq': random.choice((0, 1))}
randomRectFill(**fill_par)
flatten()
image = gimp.image_list()[0]
drawable = pdb.gimp_image_active_drawable(image)
canvas_par = {'Image': image, 'Draw': drawable, 'Direction': 1, 'Depth': 1}
pdb.plug_in_apply_canvas(canvas_par['Image'], canvas_par['Draw'], canvas_par['Direction'], canvas_par['Depth'])
def dimensionality(folder='',tweet=0):
# automated creation of dimensionality study piece
global settings_data
if folder == '':
folder = settings_data['path']['art_folder']+"resources/"+random.choice(["photos","fractals","plants","rock"])+"/"
loadDirLayer(folder,9699690)
if random.choice([0,0,0,0,1,1,1]):
flatten()
mirror()
def fractalMasking():
# fractal layered wtih tile masks
if random.choice([0,0,0,0,1,1,1]):
image=gimp.image_list()[0]
drawable = pdb.gimp_image_active_drawable(image)
pdb.gimp_invert(drawable)
for x in range(random.choice([3,6,7,8,9,10])):
addFractal()
tile([random.choice([1,2,3,4,5,6,7,8,12]),random.choice([1,2,3,4,5,6,7,8,12])])
if random.choice([0,0,0,0,1,1,1]):
flatten()
mirror()
def randomMasking():
# tile mask from random resources
image=gimp.image_list()[0]
drawable = pdb.gimp_image_active_drawable(image)
if random.choice([0,0,0,0,1,1,1]):
pdb.gimp_invert(drawable)
for x in range(random.choice([13,6,7,8,9,10])):
qRes(opacity=random.choice([13,25,33,50,66,75,85]))
layer_mode_par = {'layer': pdb.gimp_image_get_active_layer(image), 'mode': random.randrange(0, 25)}
pdb.gimp_layer_set_mode(layer_mode_par['layer'], layer_mode_par['mode'])
if 25 > random.randrange(0,100):
tile([random.randrange(1,12),random.randrange(1,12)])
if random.choice([0,0,0,0,1,1,1]):
flatten()
mirror()
def hybridMasking(option="SpBGsp", noise=0.3):
# masking resources with lots of options
drawInkBlot()
if 'SpBG' in option:
addSpacePhoto(opacity=50)
if "Re" in option:
applyEffect()
for x in range(4,(10+random.randrange(int(noise*5*-1),int(noise*10)))):
if 'ph'in option:
qRes()
if "Re" in option:
applyEffect()
tile([random.randrange(1,12),random.randrange(1,12)])
if "Re" in option:
editLayerMask(1)
applyEffect()
editLayerMask(0)
if 'sc' in option:
qRes()
if "Re" in option:
applyEffect()
tile([random.randrange(1,12),random.randrange(1,12)])
if "Re" in option:
editLayerMask(1)
applyEffect()
editLayerMask(0)
if 'sp' in option:
qRes()
if "Re" in option:
applyEffect()
tile([random.randrange(1,12),random.randrange(1,12)])
if "Re" in option:
editLayerMask(1)
applyEffect()
editLayerMask(0)
if 'fr' in option:
qRes()
if "Re" in option:
applyEffect()
tile([random.randrange(1,12),random.randrange(1,12)])
if "Re" in option:
editLayerMask(1)
applyEffect()
editLayerMask(0)
if random.choice([0,0,0,0,1,1,1]):
flatten()
mirror()
def spikeDif():
# draw spike ball or random rays
spikeBallStack(depth=random.choice([3,3,4,5,6,8,10,12,16,20]))
applyEffect()
if random.choice([0,0,0,0,1,1,1]):
flatten()
mirror()
def inkBlot():
# draq basic ink blot
inkBlotStack()
applyEffect()
if random.choice([0,0,0,0,1,1,1]):
flatten()
mirror()
def skeleton(type="",num=10,delay=10,tweet=1,study_name="Multifunctional Study"):
# function to take care of exporting and tweet all images produces
automations = {"spikeDif" : spikeDif,
"inkBlot" : inkBlot,
"hybridMasking" : hybridMasking,
"paint" : paint,
"fractalMasking" : fractalMasking,
"randomMasking" : randomMasking}
for i in range(0,num):
qS()
# ################## #
# This is the nugget #
# ################## #
if type == "":
automation_pick = random.choice(automations.keys())
print(automation_pick)
automations[automation_pick]()
else:
automations[type]()
if tweet:
signImage()
flatten()
tweet=imageTitle(2)+'\n by <NAME>\n'+study_name+'\n'
tweetImage(tweet+hashtagString(len(tweet)),qX()[1])
print(tweet)
closeAll()
sleep(delay)
else:
qX()
closeAll()
def doWeatherPainting():
# draw day
# -draw sun
# -draw clouds
# draw night
# -draw sun
# -draw clouds
print('weather painting?')
def addResource(options=0, resource_type="rock", opacity=90, resource_folder="", scale=[], position=[]):
avoid_folders=['brushes','fonts','gradients','mask','overlays','paths','scraps','signature','stamps','stickers','stock-image','templates','tiles']
if resource_type == "random":
cl=dict(Counter(os.listdir(settings_data['path']['art_folder']+'/resources/'))-Counter(avoid_folders)).keys()
resource_type = random.choice(cl)
if resource_folder == "":
resource_folder = settings_data['path']['art_folder']+'/resources/'+resource_type+'/'
resource_file = ""
resource_files = []
if options == 0:
if resource_type == "":
for file in os.listdir(resource_folder):
if os.path.isdir(resource_folder+file):
for sub_file in os.listdir(resource_folder+file+'/'):
if 'png' in sub_file:
resource_files.append(file+'/'+sub_file)
if 'jpg' in sub_file:
resource_files.append(file+'/'+sub_file)
else:
if 'png' in file:
resource_files.append(file)
if 'jpg' in file:
resource_files.append(file)
else:
for file in os.listdir(resource_folder):
if 'png' in file:
resource_files.append(file)
if 'jpg' in file:
resource_files.append(file)
resource_file = resource_folder+random.choice(resource_files)
loadLayer(resource_file)
image = gimp.image_list()[0]
active_layer = pdb.gimp_image_get_active_layer(image)
pdb.gimp_layer_set_opacity(active_layer, opacity)
if scale==[]:
pdb.gimp_layer_scale(active_layer, image.width, image.height, 0)
else:
pdb.gimp_layer_scale(active_layer, scale[0], scale[1], 0)
if position == []:
pdb.gimp_layer_set_offsets(active_layer, 0, 0)
else:
pdb.gimp_layer_set_offsets(active_layer, position[0], position[1])
def qRes(options=0, sticker_type="random", opacity=90, sticker_folder="", scale=[], position=[]):
if sticker_folder == "" and not sticker_type == 'random':
sticker_folder = settings_data['path']['art_folder']+'/resources/'+resource_type+'/'
addResource(options, sticker_type, opacity, sticker_folder, scale, position)
def addSticker(options=0, sticker_type="", opacity=90, sticker_folder="", scale=[], position=[]):
if sticker_folder == "":
sticker_folder = settings_data['path']['art_folder']+'/resources/stickers/'
addResource(options, sticker_type, opacity, sticker_folder, scale, position)
def addFractal(options=0, fractal_type="", opacity=90, fractal_folder="", scale=[], position=[]):
image = gimp.image_list()[0]
if fractal_folder == "":
fractal_folder = settings_data['path']['art_folder']+'/resources/fractals/'
if position == []:
position = [0,0]
if scale == []:
scale=[image.width, image.height]
addResource(options, fractal_type, opacity, fractal_folder, scale, position)
active_layer = pdb.gimp_image_get_active_layer(image)
pdb.gimp_layer_set_mode(active_layer, random.choice([17,6,15,0,0,0,0,0,0]))
def addPhoto(options=0, photo_type="", opacity=90, photo_folder="", scale=[], position=[]):
image = gimp.image_list()[0]
if photo_folder == "":
photo_folder = settings_data['path']['art_folder']+'/resources/photos/'
if position == []:
position = [0,0]
if scale == []:
scale=[image.width, image.height]
addResource(options, photo_type, opacity, photo_folder, scale, position)
active_layer = pdb.gimp_image_get_active_layer(image)
pdb.gimp_layer_set_mode(active_layer, random.choice([17,6,15,0,0,0,0,0,0]))
def addSpacePhoto(options=0, type="", opacity=90, space_folder="", scale=[], position=[]):
image = gimp.image_list()[0]
if space_folder == "":
space_folder = settings_data['path']['art_folder']+'/resources/space/'
if position == []:
position = [0,0]
if scale == []:
scale=[image.width, image.height]
addResource(options, type, opacity, space_folder, scale, position)
active_layer = pdb.gimp_image_get_active_layer(image)
pdb.gimp_layer_set_mode(active_layer, random.choice([17,6,15,0,0,0,0,0,0]))
def addScriptDrawing(options=0, type="", opacity=90, script_folder="", scale=[], position=[]):
image = gimp.image_list()[0]
if script_folder == "":
script_folder = settings_data['path']['art_folder']+'/resources/script_drawings/'
if position == []:
position = [0,0]
if scale == []:
scale=[image.width, image.height]
addResource(options, type, opacity, script_folder, scale, position)
active_layer = pdb.gimp_image_get_active_layer(image)
pdb.gimp_layer_set_mode(active_layer, random.choice([17,6,15,0,0,0,0,0,0]))
|
[
"gimp.pdb.gimp_image_get_active_layer",
"random.shuffle",
"gimp.pdb.plug_in_plasma",
"random.choice",
"gimp.pdb.gimp_image_active_drawable",
"time.sleep",
"gimp.pdb.gimp_layer_set_opacity",
"TwitterAPI.TwitterAPI",
"gimp.pdb.gimp_layer_set_mode",
"gimp.pdb.plug_in_apply_canvas",
"gimp.pdb.gimp_invert",
"gimp.pdb.gimp_layer_scale",
"collections.Counter",
"datetime.datetime.now",
"gimp.pdb.gimp_layer_set_offsets",
"gimp.image_list"
] |
[((2575, 2592), 'datetime.datetime.now', 'dt.datetime.now', ([], {}), '()\n', (2590, 2592), True, 'import datetime as dt\n'), ((3868, 3963), 'TwitterAPI.TwitterAPI', 'TwitterAPI.TwitterAPI', (['CONSUMER_KEY', 'CONSUMER_SECRET', 'ACCESS_TOKEN_KEY', 'ACCESS_TOKEN_SECRET'], {}), '(CONSUMER_KEY, CONSUMER_SECRET, ACCESS_TOKEN_KEY,\n ACCESS_TOKEN_SECRET)\n', (3889, 3963), False, 'import TwitterAPI\n'), ((4654, 4662), 'time.sleep', 'sleep', (['(1)'], {}), '(1)\n', (4659, 4662), False, 'from time import sleep\n'), ((6471, 6479), 'time.sleep', 'sleep', (['(5)'], {}), '(5)\n', (6476, 6479), False, 'from time import sleep\n'), ((17638, 17675), 'gimp.pdb.gimp_image_active_drawable', 'pdb.gimp_image_active_drawable', (['image'], {}), '(image)\n', (17668, 17675), False, 'from gimp import pdb\n'), ((17760, 17875), 'gimp.pdb.plug_in_apply_canvas', 'pdb.plug_in_apply_canvas', (["canvas_par['Image']", "canvas_par['Draw']", "canvas_par['Direction']", "canvas_par['Depth']"], {}), "(canvas_par['Image'], canvas_par['Draw'],\n canvas_par['Direction'], canvas_par['Depth'])\n", (17784, 17875), False, 'from gimp import pdb\n'), ((18809, 18846), 'gimp.pdb.gimp_image_active_drawable', 'pdb.gimp_image_active_drawable', (['image'], {}), '(image)\n', (18839, 18846), False, 'from gimp import pdb\n'), ((24262, 24300), 'gimp.pdb.gimp_image_get_active_layer', 'pdb.gimp_image_get_active_layer', (['image'], {}), '(image)\n', (24293, 24300), False, 'from gimp import pdb\n'), ((24305, 24354), 'gimp.pdb.gimp_layer_set_opacity', 'pdb.gimp_layer_set_opacity', (['active_layer', 'opacity'], {}), '(active_layer, opacity)\n', (24331, 24354), False, 'from gimp import pdb\n'), ((25768, 25806), 'gimp.pdb.gimp_image_get_active_layer', 'pdb.gimp_image_get_active_layer', (['image'], {}), '(image)\n', (25799, 25806), False, 'from gimp import pdb\n'), ((26326, 26364), 'gimp.pdb.gimp_image_get_active_layer', 'pdb.gimp_image_get_active_layer', (['image'], {}), '(image)\n', (26357, 26364), False, 'from gimp import pdb\n'), ((26876, 26914), 'gimp.pdb.gimp_image_get_active_layer', 'pdb.gimp_image_get_active_layer', (['image'], {}), '(image)\n', (26907, 26914), False, 'from gimp import pdb\n'), ((27443, 27481), 'gimp.pdb.gimp_image_get_active_layer', 'pdb.gimp_image_get_active_layer', (['image'], {}), '(image)\n', (27474, 27481), False, 'from gimp import pdb\n'), ((2004, 2015), 'random.shuffle', 'shuffle', (['il'], {}), '(il)\n', (2011, 2015), False, 'from random import randrange, choice, shuffle\n'), ((2961, 2978), 'datetime.datetime.now', 'dt.datetime.now', ([], {}), '()\n', (2976, 2978), True, 'import datetime as dt\n'), ((4993, 5010), 'gimp.image_list', 'gimp.image_list', ([], {}), '()\n', (5008, 5010), False, 'import gimp\n'), ((5493, 5510), 'gimp.image_list', 'gimp.image_list', ([], {}), '()\n', (5508, 5510), False, 'import gimp\n'), ((7659, 7676), 'gimp.image_list', 'gimp.image_list', ([], {}), '()\n', (7674, 7676), False, 'import gimp\n'), ((8186, 8258), 'gimp.pdb.gimp_layer_set_mode', 'pdb.gimp_layer_set_mode', (["layer_mode_par['layer']", "layer_mode_par['mode']"], {}), "(layer_mode_par['layer'], layer_mode_par['mode'])\n", (8209, 8258), False, 'from gimp import pdb\n'), ((8303, 8340), 'gimp.pdb.gimp_image_active_drawable', 'pdb.gimp_image_active_drawable', (['image'], {}), '(image)\n', (8333, 8340), False, 'from gimp import pdb\n'), ((9353, 9390), 'gimp.pdb.gimp_image_active_drawable', 'pdb.gimp_image_active_drawable', (['image'], {}), '(image)\n', (9383, 9390), False, 'from gimp import pdb\n'), ((9608, 9718), 'gimp.pdb.plug_in_plasma', 'pdb.plug_in_plasma', (["plasma_par['Image']", "plasma_par['Draw']", "plasma_par['Seed']", "plasma_par['Turbulence']"], {}), "(plasma_par['Image'], plasma_par['Draw'], plasma_par[\n 'Seed'], plasma_par['Turbulence'])\n", (9626, 9718), False, 'from gimp import pdb\n'), ((10493, 10565), 'gimp.pdb.gimp_layer_set_mode', 'pdb.gimp_layer_set_mode', (["layer_mode_par['layer']", "layer_mode_par['mode']"], {}), "(layer_mode_par['layer'], layer_mode_par['mode'])\n", (10516, 10565), False, 'from gimp import pdb\n'), ((10991, 11028), 'gimp.pdb.gimp_image_active_drawable', 'pdb.gimp_image_active_drawable', (['image'], {}), '(image)\n', (11021, 11028), False, 'from gimp import pdb\n'), ((11246, 11356), 'gimp.pdb.plug_in_plasma', 'pdb.plug_in_plasma', (["plasma_par['Image']", "plasma_par['Draw']", "plasma_par['Seed']", "plasma_par['Turbulence']"], {}), "(plasma_par['Image'], plasma_par['Draw'], plasma_par[\n 'Seed'], plasma_par['Turbulence'])\n", (11264, 11356), False, 'from gimp import pdb\n'), ((11685, 11757), 'gimp.pdb.gimp_layer_set_mode', 'pdb.gimp_layer_set_mode', (["layer_mode_par['layer']", "layer_mode_par['mode']"], {}), "(layer_mode_par['layer'], layer_mode_par['mode'])\n", (11708, 11757), False, 'from gimp import pdb\n'), ((15025, 15062), 'gimp.pdb.gimp_image_active_drawable', 'pdb.gimp_image_active_drawable', (['image'], {}), '(image)\n', (15055, 15062), False, 'from gimp import pdb\n'), ((15280, 15390), 'gimp.pdb.plug_in_plasma', 'pdb.plug_in_plasma', (["plasma_par['Image']", "plasma_par['Draw']", "plasma_par['Seed']", "plasma_par['Turbulence']"], {}), "(plasma_par['Image'], plasma_par['Draw'], plasma_par[\n 'Seed'], plasma_par['Turbulence'])\n", (15298, 15390), False, 'from gimp import pdb\n'), ((15635, 15707), 'gimp.pdb.gimp_layer_set_mode', 'pdb.gimp_layer_set_mode', (["layer_mode_par['layer']", "layer_mode_par['mode']"], {}), "(layer_mode_par['layer'], layer_mode_par['mode'])\n", (15658, 15707), False, 'from gimp import pdb\n'), ((17602, 17619), 'gimp.image_list', 'gimp.image_list', ([], {}), '()\n', (17617, 17619), False, 'import gimp\n'), ((18397, 18434), 'gimp.pdb.gimp_image_active_drawable', 'pdb.gimp_image_active_drawable', (['image'], {}), '(image)\n', (18427, 18434), False, 'from gimp import pdb\n'), ((18443, 18468), 'gimp.pdb.gimp_invert', 'pdb.gimp_invert', (['drawable'], {}), '(drawable)\n', (18458, 18468), False, 'from gimp import pdb\n'), ((18773, 18790), 'gimp.image_list', 'gimp.image_list', ([], {}), '()\n', (18788, 18790), False, 'import gimp\n'), ((18894, 18919), 'gimp.pdb.gimp_invert', 'pdb.gimp_invert', (['drawable'], {}), '(drawable)\n', (18909, 18919), False, 'from gimp import pdb\n'), ((19148, 19220), 'gimp.pdb.gimp_layer_set_mode', 'pdb.gimp_layer_set_mode', (["layer_mode_par['layer']", "layer_mode_par['mode']"], {}), "(layer_mode_par['layer'], layer_mode_par['mode'])\n", (19171, 19220), False, 'from gimp import pdb\n'), ((24222, 24239), 'gimp.image_list', 'gimp.image_list', ([], {}), '()\n', (24237, 24239), False, 'import gimp\n'), ((24381, 24445), 'gimp.pdb.gimp_layer_scale', 'pdb.gimp_layer_scale', (['active_layer', 'image.width', 'image.height', '(0)'], {}), '(active_layer, image.width, image.height, 0)\n', (24401, 24445), False, 'from gimp import pdb\n'), ((24464, 24521), 'gimp.pdb.gimp_layer_scale', 'pdb.gimp_layer_scale', (['active_layer', 'scale[0]', 'scale[1]', '(0)'], {}), '(active_layer, scale[0], scale[1], 0)\n', (24484, 24521), False, 'from gimp import pdb\n'), ((24553, 24599), 'gimp.pdb.gimp_layer_set_offsets', 'pdb.gimp_layer_set_offsets', (['active_layer', '(0)', '(0)'], {}), '(active_layer, 0, 0)\n', (24579, 24599), False, 'from gimp import pdb\n'), ((24618, 24684), 'gimp.pdb.gimp_layer_set_offsets', 'pdb.gimp_layer_set_offsets', (['active_layer', 'position[0]', 'position[1]'], {}), '(active_layer, position[0], position[1])\n', (24644, 24684), False, 'from gimp import pdb\n'), ((25424, 25441), 'gimp.image_list', 'gimp.image_list', ([], {}), '()\n', (25439, 25441), False, 'import gimp\n'), ((25992, 26009), 'gimp.image_list', 'gimp.image_list', ([], {}), '()\n', (26007, 26009), False, 'import gimp\n'), ((26549, 26566), 'gimp.image_list', 'gimp.image_list', ([], {}), '()\n', (26564, 26566), False, 'import gimp\n'), ((27103, 27120), 'gimp.image_list', 'gimp.image_list', ([], {}), '()\n', (27118, 27120), False, 'import gimp\n'), ((4487, 4502), 'random.choice', 'choice', (['letters'], {}), '(letters)\n', (4493, 4502), False, 'from random import randrange, choice, shuffle\n'), ((4958, 4973), 'random.choice', 'choice', (['letters'], {}), '(letters)\n', (4964, 4973), False, 'from random import randrange, choice, shuffle\n'), ((5458, 5473), 'random.choice', 'choice', (['letters'], {}), '(letters)\n', (5464, 5473), False, 'from random import randrange, choice, shuffle\n'), ((8105, 8143), 'gimp.pdb.gimp_image_get_active_layer', 'pdb.gimp_image_get_active_layer', (['image'], {}), '(image)\n', (8136, 8143), False, 'from gimp import pdb\n'), ((8675, 8785), 'gimp.pdb.plug_in_plasma', 'pdb.plug_in_plasma', (["plasma_par['Image']", "plasma_par['Draw']", "plasma_par['Seed']", "plasma_par['Turbulence']"], {}), "(plasma_par['Image'], plasma_par['Draw'], plasma_par[\n 'Seed'], plasma_par['Turbulence'])\n", (8693, 8785), False, 'from gimp import pdb\n'), ((10412, 10450), 'gimp.pdb.gimp_image_get_active_layer', 'pdb.gimp_image_get_active_layer', (['image'], {}), '(image)\n', (10443, 10450), False, 'from gimp import pdb\n'), ((10951, 10968), 'gimp.image_list', 'gimp.image_list', ([], {}), '()\n', (10966, 10968), False, 'import gimp\n'), ((11415, 11432), 'gimp.image_list', 'gimp.image_list', ([], {}), '()\n', (11430, 11432), False, 'import gimp\n'), ((11604, 11642), 'gimp.pdb.gimp_image_get_active_layer', 'pdb.gimp_image_get_active_layer', (['image'], {}), '(image)\n', (11635, 11642), False, 'from gimp import pdb\n'), ((14985, 15002), 'gimp.image_list', 'gimp.image_list', ([], {}), '()\n', (15000, 15002), False, 'import gimp\n'), ((15554, 15592), 'gimp.pdb.gimp_image_get_active_layer', 'pdb.gimp_image_get_active_layer', (['image'], {}), '(image)\n', (15585, 15592), False, 'from gimp import pdb\n'), ((16509, 16546), 'gimp.pdb.gimp_image_active_drawable', 'pdb.gimp_image_active_drawable', (['image'], {}), '(image)\n', (16539, 16546), False, 'from gimp import pdb\n'), ((16816, 16926), 'gimp.pdb.plug_in_plasma', 'pdb.plug_in_plasma', (["plasma_par['Image']", "plasma_par['Draw']", "plasma_par['Seed']", "plasma_par['Turbulence']"], {}), "(plasma_par['Image'], plasma_par['Draw'], plasma_par[\n 'Seed'], plasma_par['Turbulence'])\n", (16834, 16926), False, 'from gimp import pdb\n'), ((18357, 18374), 'gimp.image_list', 'gimp.image_list', ([], {}), '()\n', (18372, 18374), False, 'import gimp\n'), ((19067, 19105), 'gimp.pdb.gimp_image_get_active_layer', 'pdb.gimp_image_get_active_layer', (['image'], {}), '(image)\n', (19098, 19105), False, 'from gimp import pdb\n'), ((22364, 22376), 'time.sleep', 'sleep', (['delay'], {}), '(delay)\n', (22369, 22376), False, 'from time import sleep\n'), ((4471, 4486), 'random.choice', 'choice', (['letters'], {}), '(letters)\n', (4477, 4486), False, 'from random import randrange, choice, shuffle\n'), ((4942, 4957), 'random.choice', 'choice', (['letters'], {}), '(letters)\n', (4948, 4957), False, 'from random import randrange, choice, shuffle\n'), ((5442, 5457), 'random.choice', 'choice', (['letters'], {}), '(letters)\n', (5448, 5457), False, 'from random import randrange, choice, shuffle\n'), ((5901, 5916), 'random.choice', 'choice', (['letters'], {}), '(letters)\n', (5907, 5916), False, 'from random import randrange, choice, shuffle\n'), ((16465, 16482), 'gimp.image_list', 'gimp.image_list', ([], {}), '()\n', (16480, 16482), False, 'import gimp\n'), ((4455, 4470), 'random.choice', 'choice', (['letters'], {}), '(letters)\n', (4461, 4470), False, 'from random import randrange, choice, shuffle\n'), ((4926, 4941), 'random.choice', 'choice', (['letters'], {}), '(letters)\n', (4932, 4941), False, 'from random import randrange, choice, shuffle\n'), ((5426, 5441), 'random.choice', 'choice', (['letters'], {}), '(letters)\n', (5432, 5441), False, 'from random import randrange, choice, shuffle\n'), ((5885, 5900), 'random.choice', 'choice', (['letters'], {}), '(letters)\n', (5891, 5900), False, 'from random import randrange, choice, shuffle\n'), ((5869, 5884), 'random.choice', 'choice', (['letters'], {}), '(letters)\n', (5875, 5884), False, 'from random import randrange, choice, shuffle\n'), ((6861, 6878), 'datetime.datetime.now', 'dt.datetime.now', ([], {}), '()\n', (6876, 6878), True, 'import datetime as dt\n'), ((22968, 22990), 'collections.Counter', 'Counter', (['avoid_folders'], {}), '(avoid_folders)\n', (22975, 22990), False, 'from collections import Counter\n'), ((4408, 4425), 'datetime.datetime.now', 'dt.datetime.now', ([], {}), '()\n', (4423, 4425), True, 'import datetime as dt\n'), ((4879, 4896), 'datetime.datetime.now', 'dt.datetime.now', ([], {}), '()\n', (4894, 4896), True, 'import datetime as dt\n'), ((5379, 5396), 'datetime.datetime.now', 'dt.datetime.now', ([], {}), '()\n', (5394, 5396), True, 'import datetime as dt\n'), ((5822, 5839), 'datetime.datetime.now', 'dt.datetime.now', ([], {}), '()\n', (5837, 5839), True, 'import datetime as dt\n')]
|
import asyncio
from email.mime.text import MIMEText
from random import random, choice
from aiosmtplib import SMTP
class CheckerEmail:
"""
class for check email
use :
from CheckerMailPy import CheckerEmail
from email.mime.text import MIMEText
async def async_start() -> None:
checker_mail: CheckerEmail.CheckerEmail = CheckerEmail.CheckerEmail(hostname_mail=smtp_host,
port=smtp_port, password=<PASSWORD>,
login=smtp_login)
check.change_len_code(new_len_code=5)
check.get_random_code()
code: int = check.get_code()
await check.async_send_message()
# or sync code
check.sync_send_message()
"""
def __init__(self, hostname_mail: str, port: int, login: str, password: str, loop=None) -> None:
"""
:param hostname_mail:
:param port:
:param login:
:param password:
"""
if loop is None:
loop = asyncio.get_event_loop()
self.host_name: str = hostname_mail
self.port: int = port
self.message: MIMEText
self.len_code: int = 1
self.client = SMTP(password=password, username=login, loop=loop)
def change_len_code(self, new_len_code) -> None:
"""
:param new_len_code:
:return:
"""
self.len_code = new_len_code
def get_random_code(self) -> int:
"""
:return:
"""
code = ""
alphacode = list(str(random()).replace(".", ""))
for i in range(self.len_code):
code += str(choice(alphacode))
return code
def build_message(self, text, from_mail, to, subject) -> None:
"""
:param text:
:param from_mail:
:param to:
:param subject:
:return:
"""
self.message = MIMEText(text)
self.message["From"] = from_mail
self.message["To"] = to
self.message["Subject"] = subject
async def async_send_message(self, start_tls: bool = False, use_tls: bool = False) -> None:
"""
async out
:return:
"""
await self.client.connect(hostname=self.host_name, port=self.port, use_tls=use_tls, start_tls=start_tls)
async with self.client:
await self.client.send_message(self.message)
print(200)
def sync_send_message(self, start_tls: bool = False, use_tls: bool = False) -> None:
"""
for sync code
"""
loop = asyncio.get_event_loop()
loop.run_until_complete(self.async_send_message(start_tls=start_tls, use_tls=use_tls))
|
[
"asyncio.get_event_loop",
"aiosmtplib.SMTP",
"email.mime.text.MIMEText",
"random.choice",
"random.random"
] |
[((1348, 1398), 'aiosmtplib.SMTP', 'SMTP', ([], {'password': 'password', 'username': 'login', 'loop': 'loop'}), '(password=password, username=login, loop=loop)\n', (1352, 1398), False, 'from aiosmtplib import SMTP\n'), ((2040, 2054), 'email.mime.text.MIMEText', 'MIMEText', (['text'], {}), '(text)\n', (2048, 2054), False, 'from email.mime.text import MIMEText\n'), ((2704, 2728), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (2726, 2728), False, 'import asyncio\n'), ((1165, 1189), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (1187, 1189), False, 'import asyncio\n'), ((1779, 1796), 'random.choice', 'choice', (['alphacode'], {}), '(alphacode)\n', (1785, 1796), False, 'from random import random, choice\n'), ((1688, 1696), 'random.random', 'random', ([], {}), '()\n', (1694, 1696), False, 'from random import random, choice\n')]
|
import torch
import torch.nn.functional as F
import mxnet as mx
def get_mma_loss(weight):
'''
MMA regularization in PyTorch
:param weight: parameter of a layer in model, out_features * in_features
:return: mma loss
'''
# for convolutional layers, flatten
if weight.dim() > 2:
weight = weight.view(weight.size(0), -1)
# computing cosine similarity: dot product of normalized weight vectors
weight_ = F.normalize(weight, p=2, dim=1)
cosine = torch.matmul(weight_, weight_.t())
# make sure that the diagnonal elements cannot be selected
cosine = cosine - 2. * torch.diag(torch.diag(cosine))
# maxmize the minimum angle
loss = -torch.acos(cosine.max(dim=1)[0].clamp(-0.99999, 0.99999)).mean()
return loss
def get_angular_loss_mxsymbol(weight):
'''
MMA regularization in Symbol of MXNet
:param weight: parameter of a layer in model, out_features * in_features
:return: mma loss
'''
# for convolutional layers, flatten
if 'conv' in weight.name:
num_filter = int(weight.attr('num_filter'))
weight = weight.reshape((num_filter, -1))
else:
num_filter = int(weight.attr('num_hidden'))
# computing cosine similarity: dot product of normalized weight vectors, and make sure that the diagnonal elements cannot be selected
weight_ = mx.symbol.L2Normalization(weight, mode='instance')
cosine = mx.symbol.linalg.syrk(weight_, alpha=1., transpose=False) - 2. * mx.symbol.eye(num_filter)
# maxmize the minimum angle
theta = mx.symbol.arccos(mx.symbol.max(cosine, axis=1))
loss = -mx.symbol.mean(theta)
return loss
|
[
"mxnet.symbol.linalg.syrk",
"mxnet.symbol.eye",
"mxnet.symbol.mean",
"torch.diag",
"mxnet.symbol.L2Normalization",
"mxnet.symbol.max",
"torch.nn.functional.normalize"
] |
[((447, 478), 'torch.nn.functional.normalize', 'F.normalize', (['weight'], {'p': '(2)', 'dim': '(1)'}), '(weight, p=2, dim=1)\n', (458, 478), True, 'import torch.nn.functional as F\n'), ((1362, 1412), 'mxnet.symbol.L2Normalization', 'mx.symbol.L2Normalization', (['weight'], {'mode': '"""instance"""'}), "(weight, mode='instance')\n", (1387, 1412), True, 'import mxnet as mx\n'), ((1426, 1484), 'mxnet.symbol.linalg.syrk', 'mx.symbol.linalg.syrk', (['weight_'], {'alpha': '(1.0)', 'transpose': '(False)'}), '(weight_, alpha=1.0, transpose=False)\n', (1447, 1484), True, 'import mxnet as mx\n'), ((1579, 1608), 'mxnet.symbol.max', 'mx.symbol.max', (['cosine'], {'axis': '(1)'}), '(cosine, axis=1)\n', (1592, 1608), True, 'import mxnet as mx\n'), ((1622, 1643), 'mxnet.symbol.mean', 'mx.symbol.mean', (['theta'], {}), '(theta)\n', (1636, 1643), True, 'import mxnet as mx\n'), ((1491, 1516), 'mxnet.symbol.eye', 'mx.symbol.eye', (['num_filter'], {}), '(num_filter)\n', (1504, 1516), True, 'import mxnet as mx\n'), ((629, 647), 'torch.diag', 'torch.diag', (['cosine'], {}), '(cosine)\n', (639, 647), False, 'import torch\n')]
|
import json
import math
from datetime import datetime, timedelta
from decimal import Decimal
from typing import Any, Dict, List
import boto3
import boto3.dynamodb.types
from boto3.dynamodb.conditions import Attr
from logzero import logger
def send_messages(messages: List[Dict[str, str]], queue_name: str,
batch_size: int = 10) -> None:
sqs = boto3.resource('sqs')
client = boto3.client('sqs')
queue = sqs.get_queue_by_name(QueueName=queue_name)
entries = [{
'Id': f'{i}',
'MessageBody': json.dumps(msg)
} for i, msg in enumerate(messages)]
num_segments = math.ceil(len(messages)/float(batch_size))
for k in range(num_segments):
i = k*batch_size
j = i+batch_size
client.send_message_batch(
QueueUrl=queue.url,
Entries=entries[i:j]
)
def get_messages(queue_name: str, batch_size: int = 10) -> List[Dict[str, Any]]:
sqs = boto3.resource('sqs')
client = boto3.client('sqs')
queue = sqs.get_queue_by_name(QueueName=queue_name)
response = client.receive_message(QueueUrl=queue.url,
MaxNumberOfMessages=batch_size,
WaitTimeSeconds=0)
return response.get('Messages', [])
def delete_messages(messages: List[Dict[str, Any]], queue_name: str):
sqs = boto3.resource('sqs')
client = boto3.client('sqs')
queue = sqs.get_queue_by_name(QueueName=queue_name)
for message in messages:
client.delete_message(QueueUrl=queue.url,
ReceiptHandle=message['ReceiptHandle'])
def store_item(item: Dict[str, Any], table_name: str) -> None:
dynamodb = boto3.resource('dynamodb')
table = dynamodb.Table(table_name)
table.put_item(Item=item)
def get_news_items(news_item_table_name: str, created_at_from: datetime,
created_at_to: datetime) -> List[Dict[str, Any]]:
client = boto3.client('dynamodb')
paginator = client.get_paginator('query')
number_of_queries = math.ceil((created_at_to - created_at_from).total_seconds() / 86400.0) + 1
_now = datetime.now()
partition_keys = [str((_now - timedelta(days=d)).date()) for d in range(number_of_queries)]
operation_parameters_list = [{
'TableName': news_item_table_name,
'IndexName': 'LSI',
'KeyConditionExpression': ('created_at_date = :created_at_date AND '
'created_at BETWEEN :ca_from AND :ca_to'),
'ExpressionAttributeValues': {
':created_at_date': {'S': pk},
':ca_from': {'N': str(created_at_from.timestamp())},
':ca_to': {'N': str(created_at_to.timestamp())},
}
} for pk in partition_keys]
items = []
deserializer = boto3.dynamodb.types.TypeDeserializer()
for operation_parameters in operation_parameters_list:
page_iterator = paginator.paginate(**operation_parameters)
for page in page_iterator:
for item in page['Items']:
items.append(deserializer.deserialize({'M': item}))
logger.info('Found {} news items to evaluate'.format(len(items)))
return items
def get_preferences(preference_table_name: str):
client = boto3.client('dynamodb')
paginator = client.get_paginator('query')
operation_parameters = {
'TableName': preference_table_name,
'KeyConditionExpression': 'preference_type = :preference_type',
'ExpressionAttributeValues': {
':preference_type': {'S': 'KEYWORD'}
}
}
items = []
deserializer = boto3.dynamodb.types.TypeDeserializer()
page_iterator = paginator.paginate(**operation_parameters)
for page in page_iterator:
for item in page['Items']:
items.append(deserializer.deserialize({'M': item}))
logger.info('Found {} keywords'.format(len(items)))
return items
def store_preference(keyword: str, weight: float, preference_table_name: str):
preference_item = {
'preference_type': 'KEYWORD',
'preference_key': keyword,
'preference_weight': Decimal(weight)
}
return store_item(preference_item, preference_table_name)
def send_notification(msg: str, topic_arn_hint: str, subject: str) -> None:
client = boto3.client('sns')
topics = client.list_topics()['Topics']
topic_arn = get_topic_arn(topics, topic_arn_hint)
client.publish(
Subject=subject,
Message=msg,
TopicArn=topic_arn
)
def get_topic_arn(topics: List[Dict[str, Any]], topic_arn_hint: str) -> str:
for topic in topics:
if topic_arn_hint in topic['TopicArn']:
return topic['TopicArn']
def get_reports(evaluation_report_table_name: str, created_at_from: datetime,
created_at_to: datetime) -> List[Dict[str, Any]]:
dynamodb = boto3.resource('dynamodb')
table = dynamodb.Table(evaluation_report_table_name)
response = table.scan(
FilterExpression=(
Attr('created_at').gte(Decimal(created_at_from.timestamp())) &
Attr('created_at').lt(Decimal(created_at_to.timestamp()))
)
)
return response['Items']
|
[
"boto3.client",
"decimal.Decimal",
"json.dumps",
"boto3.resource",
"datetime.timedelta",
"boto3.dynamodb.conditions.Attr",
"boto3.dynamodb.types.TypeDeserializer",
"datetime.datetime.now"
] |
[((368, 389), 'boto3.resource', 'boto3.resource', (['"""sqs"""'], {}), "('sqs')\n", (382, 389), False, 'import boto3\n'), ((403, 422), 'boto3.client', 'boto3.client', (['"""sqs"""'], {}), "('sqs')\n", (415, 422), False, 'import boto3\n'), ((949, 970), 'boto3.resource', 'boto3.resource', (['"""sqs"""'], {}), "('sqs')\n", (963, 970), False, 'import boto3\n'), ((984, 1003), 'boto3.client', 'boto3.client', (['"""sqs"""'], {}), "('sqs')\n", (996, 1003), False, 'import boto3\n'), ((1368, 1389), 'boto3.resource', 'boto3.resource', (['"""sqs"""'], {}), "('sqs')\n", (1382, 1389), False, 'import boto3\n'), ((1403, 1422), 'boto3.client', 'boto3.client', (['"""sqs"""'], {}), "('sqs')\n", (1415, 1422), False, 'import boto3\n'), ((1708, 1734), 'boto3.resource', 'boto3.resource', (['"""dynamodb"""'], {}), "('dynamodb')\n", (1722, 1734), False, 'import boto3\n'), ((1961, 1985), 'boto3.client', 'boto3.client', (['"""dynamodb"""'], {}), "('dynamodb')\n", (1973, 1985), False, 'import boto3\n'), ((2142, 2156), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (2154, 2156), False, 'from datetime import datetime, timedelta\n'), ((2798, 2837), 'boto3.dynamodb.types.TypeDeserializer', 'boto3.dynamodb.types.TypeDeserializer', ([], {}), '()\n', (2835, 2837), False, 'import boto3\n'), ((3258, 3282), 'boto3.client', 'boto3.client', (['"""dynamodb"""'], {}), "('dynamodb')\n", (3270, 3282), False, 'import boto3\n'), ((3612, 3651), 'boto3.dynamodb.types.TypeDeserializer', 'boto3.dynamodb.types.TypeDeserializer', ([], {}), '()\n', (3649, 3651), False, 'import boto3\n'), ((4301, 4320), 'boto3.client', 'boto3.client', (['"""sns"""'], {}), "('sns')\n", (4313, 4320), False, 'import boto3\n'), ((4868, 4894), 'boto3.resource', 'boto3.resource', (['"""dynamodb"""'], {}), "('dynamodb')\n", (4882, 4894), False, 'import boto3\n'), ((4126, 4141), 'decimal.Decimal', 'Decimal', (['weight'], {}), '(weight)\n', (4133, 4141), False, 'from decimal import Decimal\n'), ((542, 557), 'json.dumps', 'json.dumps', (['msg'], {}), '(msg)\n', (552, 557), False, 'import json\n'), ((2191, 2208), 'datetime.timedelta', 'timedelta', ([], {'days': 'd'}), '(days=d)\n', (2200, 2208), False, 'from datetime import datetime, timedelta\n'), ((5019, 5037), 'boto3.dynamodb.conditions.Attr', 'Attr', (['"""created_at"""'], {}), "('created_at')\n", (5023, 5037), False, 'from boto3.dynamodb.conditions import Attr\n'), ((5094, 5112), 'boto3.dynamodb.conditions.Attr', 'Attr', (['"""created_at"""'], {}), "('created_at')\n", (5098, 5112), False, 'from boto3.dynamodb.conditions import Attr\n')]
|
from django.shortcuts import render
from django.http import HttpResponse, HttpResponseRedirect
from django.template import loader
from django.contrib.auth.mixins import LoginRequiredMixin, UserPassesTestMixin
from django.views.generic import ListView, DetailView, CreateView, UpdateView, DeleteView
# from geelweb.django.ratings.views import RateView
from .models import post
from django.urls import reverse
from django.shortcuts import get_object_or_404
import random
def home(request):
try:
posts= post.objects.all()
posts = posts[::-1]
one_post = random.randint(0, len(posts)-1)
random_post= posts[one_post]
print(random_post)
except post.DoesNotExist:
posts = None
return render(request, 'awwardapp/home.html', locals())
class PostListView(ListView):
model = post
template_name = 'awwardapp/home.html' #<app>/<model> <viewtype>.html
context_object_name = 'posts'
ordering = ['-date_posted']
class PostDetailView(DetailView):
model = post
class PostCreateView(LoginRequiredMixin, CreateView):
model = post
fields = ['title', 'caption', 'image', 'owner', 'url']
def form_valid(self, form):
form.instance.owner = self.request.user
return super().form_valid(form)
class PostUpdateView(LoginRequiredMixin, UserPassesTestMixin, UpdateView):
model = post
fields = ['title', 'caption']
def form_valid(self, form):
form.instance.owner = self.request.user
return super().form_valid(form)
def test_func(self):
post = self.get_object()
if self.request.user == post.owner:
return True
else:
return False
class PostDeleteView(LoginRequiredMixin, UserPassesTestMixin, DeleteView):
model = post
success_url = '/'
def test_func(self):
post = self.get_object()
if self.request.user == post.owner:
return True
else:
return False
def about(request):
return render(request, 'awwardapp/about.html', {'title': 'About'})
# class PostRateView(LoginRequiredMixin, RateView):
# model = Rating
# fields = ['usability', 'design', 'content']
# def form_valid(self, form):
# form.instance.owner = self.request.user
# return super().form_valid(form)
|
[
"django.shortcuts.render"
] |
[((2028, 2087), 'django.shortcuts.render', 'render', (['request', '"""awwardapp/about.html"""', "{'title': 'About'}"], {}), "(request, 'awwardapp/about.html', {'title': 'About'})\n", (2034, 2087), False, 'from django.shortcuts import render\n')]
|
# (c)2020 TeleBot
# You may not use this file without proper authorship and consent from @TeleBotSupport
#
"""
Available command(s)
.sticklol
Generates a. random laughing sticker.
"""
import random
from telethon import functions, types, utils
from telebot.utils import admin_cmd
def choser(cmd, pack, blacklist=None):
if blacklist is None:
blacklist = {}
docs = None
@telebot.on(admin_cmd(pattern=rf"{cmd}", outgoing=True))
async def handler(event):
await event.delete()
nonlocal docs
if docs is None:
docs = [
utils.get_input_document(x)
for x in (
await borg(
functions.messages.GetStickerSetRequest(
types.InputStickerSetShortName(pack)
)
)
).documents
if x.id not in blacklist
]
await event.respond(file=random.choice(docs))
choser(
"sticklol",
"TeleBot_LOLPack",
{
3088919966519394666,
3088919966519394334,
3088919966519394334,
3088919966519394334,
},
)
|
[
"telethon.types.InputStickerSetShortName",
"random.choice",
"telethon.utils.get_input_document",
"telebot.utils.admin_cmd"
] |
[((404, 446), 'telebot.utils.admin_cmd', 'admin_cmd', ([], {'pattern': 'f"""{cmd}"""', 'outgoing': '(True)'}), "(pattern=f'{cmd}', outgoing=True)\n", (413, 446), False, 'from telebot.utils import admin_cmd\n'), ((593, 620), 'telethon.utils.get_input_document', 'utils.get_input_document', (['x'], {}), '(x)\n', (617, 620), False, 'from telethon import functions, types, utils\n'), ((975, 994), 'random.choice', 'random.choice', (['docs'], {}), '(docs)\n', (988, 994), False, 'import random\n'), ((773, 809), 'telethon.types.InputStickerSetShortName', 'types.InputStickerSetShortName', (['pack'], {}), '(pack)\n', (803, 809), False, 'from telethon import functions, types, utils\n')]
|
import dash
import os
import dash_bootstrap_components as dbc
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output, State
import json
import requests
from bs4 import BeautifulSoup
import pandas as pd
import numpy as np
from selenium import webdriver
chrome_exec_shim = "/app/.apt/opt/google/chrome/chrome"
opts = webdriver.ChromeOptions()
opts.binary_location = chrome_exec_shim
opts.add_argument("--no-sandbox");
opts.add_argument("--disable-gpu");
driver = webdriver.Chrome(executable_path=chrome_exec_shim, chrome_options=opts)
import pickle
with open('notebooks/pipeline.pkl', 'rb') as f:
pipeline = pickle.load(f)
from app import app
class Player:
def __init__(self, level, rating, prestige, games_won, qps, medals):
self.level = level
self.rating = rating
self.prestige = prestige
self.qps = qps
self.medals = medals
self.games_won = games_won
class Stats:
def __init__(self, elims=0, dmg_done=0, deaths=0, solo_kills=0):
self.elims = elims
self.dmg_done = dmg_done
self.deaths = deaths
self.solo_kills = solo_kills
class Medals:
def __init__(self, bronze=0, silver=0, gold=0):
self.bronze = bronze
self.silver = silver
self.gold = gold
def create_player(js):
if 'error' in js:
return Player(0,0,0, 0, Stats(), Medals())
if 'quickPlayStats' not in js:
return Player(js['level'],js['rating'],js['prestige'], 0, Stats(), Medals())
if 'careerStats' not in js['quickPlayStats']:
return Player(js['level'],js['rating'],js['prestige'], 0, Stats(), Medals())
if js.get('quickPlayStats',{}).get('careerStats',{}) == None or 'allHeroes' not in js.get('quickPlayStats',{}).get('careerStats',{}):
return Player(js['level'],js['rating'],js['prestige'], 0, Stats(), Medals())
elims = 0
damageDone = 0
deaths = 0
soloKills = 0
if js['quickPlayStats']['careerStats']['allHeroes']['combat'] != None:
if 'eliminations' in js['quickPlayStats']['careerStats']['allHeroes']['combat']:
elims = js['quickPlayStats']['careerStats']['allHeroes']['combat']['eliminations']
if 'damageDone' in js['quickPlayStats']['careerStats']['allHeroes']['combat']:
damageDone = js['quickPlayStats']['careerStats']['allHeroes']['combat']['damageDone']
if 'deaths' in js['quickPlayStats']['careerStats']['allHeroes']['combat']:
deaths = js['quickPlayStats']['careerStats']['allHeroes']['combat']['deaths']
if 'soloKills' in js['quickPlayStats']['careerStats']['allHeroes']['combat']:
soloKills = js['quickPlayStats']['careerStats']['allHeroes']['combat']['soloKills']
qps = Stats(elims,damageDone,deaths,soloKills)
medals = Medals(js['quickPlayStats']['awards'].get('medalsBronze'),
js['quickPlayStats']['awards'].get('medalsSilver'),
js['quickPlayStats']['awards'].get('medalsGold'))
return Player(js['level'],js['rating'],js['prestige'], js['quickPlayStats']['games']['won'], qps, medals)
def df_object(p):
item = [p.level,p.rating,p.prestige,p.games_won,p.qps.elims,p.qps.dmg_done,
p.qps.deaths,p.qps.solo_kills,p.medals.bronze,p.medals.silver,p.medals.gold]
return item
def select_player(username):
url = f"https://ow-api.com/v1/stats/pc/us/{username}/complete"
print(url)
response = requests.get(url)
j = json.loads(response.text)
return create_player(j)
##dataframe setup
columns = ['level','rating','prestige','games_won','qps_elims','qps_dmg_done',
'qps_deaths','qps_solo_kills','medals_bronze','medals_silver','medals_gold']
def predict(data):
kd = [i/(1+sum([data.qps_elims,data.qps_deaths])) for i in [data.qps_elims,data.qps_deaths]]
data['kill_ratio'] = kd[0]
data['death_ratio'] = kd[1]
column0 = []
column1 = []
for col in data.columns:
column0.append(col+str(0))
column1.append(col+str(1))
team1 = data.iloc[0:6].mean(axis=0)
team2 = data.iloc[6:12].mean(axis=0)
t1 = 0
t2 = 0
for col in data.columns:
if 'deaths' in col:
if team1[col] > team2[col]:
t1 = t1 - 1
t2 = t2 + 1
else:
t1 = t1 + 1
t2 = t2 - 1
else:
if team1[col] > team2[col]:
t1 = t1 + 1
t2 = t2 - 1
else:
t1 = t1 - 1
t2 = t2 + 1
data1 = dict(zip(column0,team1))
data2 = dict(zip(column1,team2))
data3 = pd.DataFrame([data1,data2])
data4 = pd.DataFrame(data3.max()).T
if np.random.randint(0,100) >= 90:
t1 = t1 + 10
elif np.random.randint(0,100) <= 10:
t2 = t2 + 10
if t1 > t2:
data4['won'] = 0
elif t2 > t1:
data4['won'] = 1
else:
data4['won'] = 0
data4 = data4.fillna(0)
target = 'won'
X_test = data4.drop(columns=target)
return pipeline.predict(X_test)
amount = 12;
list_col1_inputs = []
list_col1_inputs.append(
html.H2("Enter Teammate Usernames")
)
for i in range(amount):
if(i == 6):
list_col1_inputs.append(html.H2("Enter Enemy Usernames"))
temp = html.Div(className="container",children=[
dcc.Input(
id='username-'+str(i),
className='userinput',
placeholder='Enter Username',
type='text',
value=''
)
]
)
list_col1_inputs.append(temp)
list_col1_inputs.extend([html.Button('Submit' ,id='submit'),html.P(id='username_out')])
column1 = dbc.Col(
list_col1_inputs,
md=5,
)
list_col2_inputs = [html.H2('Select Teammates')]
for i in range(amount):
if(i == 6):
list_col2_inputs.append(html.H2("Select Enemies"))
list_col2_inputs.append(html.Div(id='listofusernames'+str(i)))
list_col2_inputs.append(html.Button("Complete",id='complete'))
column2 = dbc.Col(
list_col2_inputs
)
column3 = dbc.Col(
[
html.Div(id='prediction')
]
)
layout = [dbc.Row([column1, column2]), dbc.Row([column3])]
list_of_username_outputs = []
list_of_username_inputs = []
list_of_username_variables= []
list_of_users_input = []
for i in range(amount):
list_of_username_outputs.append(Output('listofusernames'+str(i),'children'))
list_of_username_inputs.append(State('username-'+str(i), 'value'))
list_of_users_input.append(State('user'+str(i), 'value'))
@app.callback(list_of_username_outputs,
[Input('submit', 'n_clicks')],
state=list_of_username_inputs
)
def search_players(n_clicks,*args):
if n_clicks != None:
dropdowns = []
for i in range(amount):
driver.get(f"https://www.overbuff.com/search?q={args[i]}")
page_source = driver.page_source
soup = BeautifulSoup(page_source)
players = soup.find_all('a', class_="SearchResult", href=True)
userlist = []
for element in players:
if element.find(class_='player-platform').find(class_="fa fa-windows") == None:
continue
players.remove(element)
user = element['href'][12:]
userlist.append({'label':user,'value':user})
dropdowns.append(dcc.Dropdown(
id='user'+str(i),
options=userlist,
placeholder='Select Player',
value=userlist[0]['value']
))
return dropdowns
@app.callback(Output('prediction','children'),
[Input('complete', 'n_clicks')],
state=list_of_users_input
)
def create_teams(n_clicks,*args):
if n_clicks != None:
team1 = []
team2 = []
teams_dataframe = pd.DataFrame(columns=columns)
for i in range(len(args)):
player = select_player(args[i])
teams_dataframe.loc[len(teams_dataframe), :] = df_object(player)
chance = np.random.random()*100
return f'Chances of you winning this game is {chance}%'
|
[
"pandas.DataFrame",
"json.loads",
"dash_html_components.H2",
"dash_bootstrap_components.Row",
"dash_html_components.Div",
"dash_html_components.Button",
"dash.dependencies.Input",
"dash_bootstrap_components.Col",
"dash_html_components.P",
"pickle.load",
"selenium.webdriver.ChromeOptions",
"numpy.random.randint",
"requests.get",
"selenium.webdriver.Chrome",
"bs4.BeautifulSoup",
"numpy.random.random",
"dash.dependencies.Output"
] |
[((377, 402), 'selenium.webdriver.ChromeOptions', 'webdriver.ChromeOptions', ([], {}), '()\n', (400, 402), False, 'from selenium import webdriver\n'), ((523, 594), 'selenium.webdriver.Chrome', 'webdriver.Chrome', ([], {'executable_path': 'chrome_exec_shim', 'chrome_options': 'opts'}), '(executable_path=chrome_exec_shim, chrome_options=opts)\n', (539, 594), False, 'from selenium import webdriver\n'), ((5836, 5867), 'dash_bootstrap_components.Col', 'dbc.Col', (['list_col1_inputs'], {'md': '(5)'}), '(list_col1_inputs, md=5)\n', (5843, 5867), True, 'import dash_bootstrap_components as dbc\n'), ((6170, 6195), 'dash_bootstrap_components.Col', 'dbc.Col', (['list_col2_inputs'], {}), '(list_col2_inputs)\n', (6177, 6195), True, 'import dash_bootstrap_components as dbc\n'), ((673, 687), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (684, 687), False, 'import pickle\n'), ((3520, 3537), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (3532, 3537), False, 'import requests\n'), ((3546, 3571), 'json.loads', 'json.loads', (['response.text'], {}), '(response.text)\n', (3556, 3571), False, 'import json\n'), ((4725, 4753), 'pandas.DataFrame', 'pd.DataFrame', (['[data1, data2]'], {}), '([data1, data2])\n', (4737, 4753), True, 'import pandas as pd\n'), ((5244, 5279), 'dash_html_components.H2', 'html.H2', (['"""Enter Teammate Usernames"""'], {}), "('Enter Teammate Usernames')\n", (5251, 5279), True, 'import dash_html_components as html\n'), ((5900, 5927), 'dash_html_components.H2', 'html.H2', (['"""Select Teammates"""'], {}), "('Select Teammates')\n", (5907, 5927), True, 'import dash_html_components as html\n'), ((6120, 6158), 'dash_html_components.Button', 'html.Button', (['"""Complete"""'], {'id': '"""complete"""'}), "('Complete', id='complete')\n", (6131, 6158), True, 'import dash_html_components as html\n'), ((6297, 6324), 'dash_bootstrap_components.Row', 'dbc.Row', (['[column1, column2]'], {}), '([column1, column2])\n', (6304, 6324), True, 'import dash_bootstrap_components as dbc\n'), ((6326, 6344), 'dash_bootstrap_components.Row', 'dbc.Row', (['[column3]'], {}), '([column3])\n', (6333, 6344), True, 'import dash_bootstrap_components as dbc\n'), ((7819, 7851), 'dash.dependencies.Output', 'Output', (['"""prediction"""', '"""children"""'], {}), "('prediction', 'children')\n", (7825, 7851), False, 'from dash.dependencies import Input, Output, State\n'), ((4805, 4830), 'numpy.random.randint', 'np.random.randint', (['(0)', '(100)'], {}), '(0, 100)\n', (4822, 4830), True, 'import numpy as np\n'), ((5762, 5796), 'dash_html_components.Button', 'html.Button', (['"""Submit"""'], {'id': '"""submit"""'}), "('Submit', id='submit')\n", (5773, 5796), True, 'import dash_html_components as html\n'), ((5797, 5822), 'dash_html_components.P', 'html.P', ([], {'id': '"""username_out"""'}), "(id='username_out')\n", (5803, 5822), True, 'import dash_html_components as html\n'), ((6244, 6269), 'dash_html_components.Div', 'html.Div', ([], {'id': '"""prediction"""'}), "(id='prediction')\n", (6252, 6269), True, 'import dash_html_components as html\n'), ((6754, 6781), 'dash.dependencies.Input', 'Input', (['"""submit"""', '"""n_clicks"""'], {}), "('submit', 'n_clicks')\n", (6759, 6781), False, 'from dash.dependencies import Input, Output, State\n'), ((8062, 8091), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': 'columns'}), '(columns=columns)\n', (8074, 8091), True, 'import pandas as pd\n'), ((7867, 7896), 'dash.dependencies.Input', 'Input', (['"""complete"""', '"""n_clicks"""'], {}), "('complete', 'n_clicks')\n", (7872, 7896), False, 'from dash.dependencies import Input, Output, State\n'), ((4867, 4892), 'numpy.random.randint', 'np.random.randint', (['(0)', '(100)'], {}), '(0, 100)\n', (4884, 4892), True, 'import numpy as np\n'), ((5355, 5387), 'dash_html_components.H2', 'html.H2', (['"""Enter Enemy Usernames"""'], {}), "('Enter Enemy Usernames')\n", (5362, 5387), True, 'import dash_html_components as html\n'), ((6002, 6027), 'dash_html_components.H2', 'html.H2', (['"""Select Enemies"""'], {}), "('Select Enemies')\n", (6009, 6027), True, 'import dash_html_components as html\n'), ((7085, 7111), 'bs4.BeautifulSoup', 'BeautifulSoup', (['page_source'], {}), '(page_source)\n', (7098, 7111), False, 'from bs4 import BeautifulSoup\n'), ((8265, 8283), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (8281, 8283), True, 'import numpy as np\n')]
|
from framework.utils.common_utils import by_css
from testdata.test_data import url
USERNAME = 'username'
PASSWORD = 'password'
VALID_CREDENTIALS = {USERNAME: "<EMAIL>",
PASSWORD: "<PASSWORD>"}
DATA_WINNERS_ACCOUNT_PAGE = url("/account/")
ORGANIZATION_SECTOR_DROP_DOWN_LIST = by_css("select#id_sector")
|
[
"testdata.test_data.url",
"framework.utils.common_utils.by_css"
] |
[((244, 260), 'testdata.test_data.url', 'url', (['"""/account/"""'], {}), "('/account/')\n", (247, 260), False, 'from testdata.test_data import url\n'), ((298, 324), 'framework.utils.common_utils.by_css', 'by_css', (['"""select#id_sector"""'], {}), "('select#id_sector')\n", (304, 324), False, 'from framework.utils.common_utils import by_css\n')]
|
# Copied from https://github.com/Athesdrake/aiotfm/blob/master/aiotfm/client.py
import sys
import asyncio
import traceback
class InvalidEvent(Exception):
"""Exception thrown when you added an invalid event to the client.
An event is valid only if its name begin by 'on_' and it is coroutine.
"""
class EventBased:
"""A class that implements asynchronous events
"""
def __init__(self):
self._waiters = {}
def event(self, coro):
"""A decorator that registers an event.
"""
name = coro.__name__
if not name.startswith('on_'):
raise InvalidEvent("'{}' isn't a correct event naming.".format(name))
if not asyncio.iscoroutinefunction(coro):
message = "Couldn't register a non-coroutine function for the event {}.".format(name)
raise InvalidEvent(message)
setattr(self, name, coro)
return coro
def wait_for(self, event, condition=None, timeout=None, stopPropagation=False):
"""Wait for an event.
:param event: :class:`str` the event name.
:param condition: Optional[`function`] A predicate to check what to wait for.
The arguments must meet the parameters of the event being waited for.
:param timeout: Optional[:class:`int`] the number of seconds before
throwing asyncio.TimeoutError
:return: [`asyncio.Future`](https://docs.python.org/3/library/asyncio-future.html#asyncio.Future)
a future that you must await.
"""
event = event.lower()
future = self.loop.create_future()
if condition is None:
def everything(*a):
return True
condition = everything
if event not in self._waiters:
self._waiters[event] = []
self._waiters[event].append((condition, future, stopPropagation))
return asyncio.wait_for(future, timeout)
async def _run_event(self, coro, event_name, *args, **kwargs):
"""|coro|
Runs an event and handle the error if any.
:param coro: a coroutine function.
:param event_name: :class:`str` the event's name.
:param args: arguments to pass to the coro.
:param kwargs: keyword arguments to pass to the coro.
:return: :class:`bool` whether the event ran successfully or not
"""
try:
await coro(*args, **kwargs)
return True
# except asyncio.CancelledError:
# raise
except Exception as e:
if hasattr(self, 'on_error'):
await self.on_error(event_name, e, *args, **kwargs)
return False
def dispatch(self, event, *args, **kwargs):
"""Dispatches events
:param event: :class:`str` event's name. (without 'on_')
:param args: arguments to pass to the coro.
:param kwargs: keyword arguments to pass to the coro.
:return: [`Task`](https://docs.python.org/3/library/asyncio-task.html#asyncio.Task)
the _run_event wrapper task
"""
method = 'on_' + event
if method in self._waiters:
to_remove = []
waiters = self._waiters[method]
for i, (cond, fut, stop) in enumerate(waiters):
if fut.cancelled():
to_remove.append(i)
continue
try:
result = bool(cond(*args))
except Exception as e:
fut.set_exception(e)
else:
if result:
fut.set_result(args[0] if len(args) == 1 else args if len(args) > 0 else None)
if stop:
del waiters[i]
return None
to_remove.append(i)
if len(to_remove) == len(waiters):
del self._waiters[method]
else:
for i in to_remove[::-1]:
del waiters[i]
coro = getattr(self, method, None)
if coro is not None:
dispatch = self._run_event(coro, method, *args, **kwargs)
return self.loop.call_soon_threadsafe(
self.loop.create_task,
dispatch
)
async def on_error(self, event, err, *a, **kw):
"""Default on_error event handler. Prints the traceback of the error."""
message = '\nAn error occurred while dispatching the event "{0}":\n\n{2}'
tb = traceback.format_exc(limit=-3)
print(message.format(event, err, tb), file=sys.stderr)
return message.format(event, err, tb)
|
[
"asyncio.wait_for",
"traceback.format_exc",
"asyncio.iscoroutinefunction"
] |
[((1665, 1698), 'asyncio.wait_for', 'asyncio.wait_for', (['future', 'timeout'], {}), '(future, timeout)\n', (1681, 1698), False, 'import asyncio\n'), ((3722, 3752), 'traceback.format_exc', 'traceback.format_exc', ([], {'limit': '(-3)'}), '(limit=-3)\n', (3742, 3752), False, 'import traceback\n'), ((628, 661), 'asyncio.iscoroutinefunction', 'asyncio.iscoroutinefunction', (['coro'], {}), '(coro)\n', (655, 661), False, 'import asyncio\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Sep 17 21:24:37 2019
@author: anilosmantur
"""
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Sep 17 20:43:41 2019
@author: anilosmantur
"""
import numpy as np
import matplotlib.pyplot as plt
from sklearn.ensemble import RandomForestClassifier
from sklearn.preprocessing import MinMaxScaler
from sklearn import metrics
from pylab import rcParams
rcParams['figure.figsize'] = 10, 5
n_samples = 30#91
dataNameList = ['attention','meditation','rawValue','delta','theta','lowAlpha','highAlpha',
'lowBeta','highBeta','lowGamma','midGamma','poorSignal']
featureList = ['attention','meditation','rawValue','delta','theta','lowAlpha','highAlpha',
'lowBeta','highBeta','lowGamma','midGamma']
labels = ['focus','relax', 'upWord', 'downWord',
'upColor', 'downColor',
'CyanUP','greenDOWN', 'yellowRIGHT', 'BlackLEFT']#,'blink']
labels = ['relax','upColor','CyanUP']
n_label = len(labels)
#label = labels[2]
#count = 0
trainDataDict = dict()
for data in dataNameList:
trainDataDict[data] = []
testDataDict = dict()
for data in dataNameList:
testDataDict[data] = []
def load_data(dataDict, label, count):
for data in dataNameList:
dataDict[data].append(np.load('dataset/{}/{}/{}.npy'.format(label,count,data))[:100])
#n_samples = 10
test_n_samples = int(n_samples/2)
test_size = n_label * int(n_samples/2)
train_n_samples = round(n_samples/2)
train_size = n_label * round(n_samples/2)
#nums = np.arange(n_samples)*2
nums = np.arange(n_samples)
trainNums = np.concatenate([nums[:5],nums[10:15],nums[20:25]])#,nums[31:41], nums[51:61],nums[71:81]])
#trainNums = nums[:5]
np.random.shuffle(trainNums)
testNums = np.concatenate([nums[5:10],nums[15:20],nums[25:30]])#,nums[41:51], nums[61:71],nums[81:91]])
#testNums = nums[5:10]
np.random.shuffle(testNums)
for label in labels:
for i in trainNums:
load_data(trainDataDict,label, i)
for label in labels:
for i in testNums:
load_data(testDataDict,label, i)
for data in dataNameList:
trainDataDict[data] = np.array(trainDataDict[data])
for data in dataNameList:
testDataDict[data] = np.array(testDataDict[data])
#connect features
trainData = []
for data in featureList:
trainData.append(trainDataDict[data])
trainData = np.array(trainData).transpose(1,0,2)
testData = []
for data in featureList:
testData.append(testDataDict[data])
testData = np.array(testData).transpose(1,0,2)
trainData = trainData.astype('float32')
testData = testData.astype('float32')
## normalization needed
scaler = MinMaxScaler()
print(scaler.fit(trainData.reshape(-1, 1100)))
trainData = scaler.transform(trainData.reshape(-1, 1100))
testData = scaler.transform(testData.reshape(-1, 1100))
trainLabels = []
for i in range(n_label):
trainLabels.append(np.ones(train_n_samples)*i )#,np.ones(15)*2])
trainLabels = np.concatenate(trainLabels)
testLabels = []
for i in range(n_label):
testLabels.append(np.ones(test_n_samples)*i )#,np.ones(15)*2])
testLabels = np.concatenate(testLabels)
from sklearn.model_selection import GridSearchCV
param_grid = {
'n_estimators':[20, 50, 100, 150, 200],
'max_features':['auto', 'sqrt', 'log2'],
'max_depth':[2,3,4],
'criterion':['gini','entropy'],
}
rfc = RandomForestClassifier(random_state=42)
rfc_cv = GridSearchCV(estimator=rfc,param_grid=param_grid,cv=5)
rfc_cv.fit(trainData, trainLabels)
#print('feature : ', dataNameList[i])
print(rfc_cv.best_score_)
print(rfc_cv.best_params_)
preds = np.array(rfc_cv.predict(testData))
scores = metrics.accuracy_score(testLabels, preds)
print('test %: {:6.2f}%'.format(scores*100))
|
[
"sklearn.ensemble.RandomForestClassifier",
"sklearn.model_selection.GridSearchCV",
"numpy.concatenate",
"sklearn.metrics.accuracy_score",
"sklearn.preprocessing.MinMaxScaler",
"numpy.ones",
"numpy.arange",
"numpy.array",
"numpy.random.shuffle"
] |
[((1575, 1595), 'numpy.arange', 'np.arange', (['n_samples'], {}), '(n_samples)\n', (1584, 1595), True, 'import numpy as np\n'), ((1608, 1660), 'numpy.concatenate', 'np.concatenate', (['[nums[:5], nums[10:15], nums[20:25]]'], {}), '([nums[:5], nums[10:15], nums[20:25]])\n', (1622, 1660), True, 'import numpy as np\n'), ((1721, 1749), 'numpy.random.shuffle', 'np.random.shuffle', (['trainNums'], {}), '(trainNums)\n', (1738, 1749), True, 'import numpy as np\n'), ((1761, 1815), 'numpy.concatenate', 'np.concatenate', (['[nums[5:10], nums[15:20], nums[25:30]]'], {}), '([nums[5:10], nums[15:20], nums[25:30]])\n', (1775, 1815), True, 'import numpy as np\n'), ((1877, 1904), 'numpy.random.shuffle', 'np.random.shuffle', (['testNums'], {}), '(testNums)\n', (1894, 1904), True, 'import numpy as np\n'), ((2630, 2644), 'sklearn.preprocessing.MinMaxScaler', 'MinMaxScaler', ([], {}), '()\n', (2642, 2644), False, 'from sklearn.preprocessing import MinMaxScaler\n'), ((2932, 2959), 'numpy.concatenate', 'np.concatenate', (['trainLabels'], {}), '(trainLabels)\n', (2946, 2959), True, 'import numpy as np\n'), ((3082, 3108), 'numpy.concatenate', 'np.concatenate', (['testLabels'], {}), '(testLabels)\n', (3096, 3108), True, 'import numpy as np\n'), ((3364, 3403), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'random_state': '(42)'}), '(random_state=42)\n', (3386, 3403), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((3413, 3469), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', ([], {'estimator': 'rfc', 'param_grid': 'param_grid', 'cv': '(5)'}), '(estimator=rfc, param_grid=param_grid, cv=5)\n', (3425, 3469), False, 'from sklearn.model_selection import GridSearchCV\n'), ((3648, 3689), 'sklearn.metrics.accuracy_score', 'metrics.accuracy_score', (['testLabels', 'preds'], {}), '(testLabels, preds)\n', (3670, 3689), False, 'from sklearn import metrics\n'), ((2132, 2161), 'numpy.array', 'np.array', (['trainDataDict[data]'], {}), '(trainDataDict[data])\n', (2140, 2161), True, 'import numpy as np\n'), ((2213, 2241), 'numpy.array', 'np.array', (['testDataDict[data]'], {}), '(testDataDict[data])\n', (2221, 2241), True, 'import numpy as np\n'), ((2355, 2374), 'numpy.array', 'np.array', (['trainData'], {}), '(trainData)\n', (2363, 2374), True, 'import numpy as np\n'), ((2482, 2500), 'numpy.array', 'np.array', (['testData'], {}), '(testData)\n', (2490, 2500), True, 'import numpy as np\n'), ((2872, 2896), 'numpy.ones', 'np.ones', (['train_n_samples'], {}), '(train_n_samples)\n', (2879, 2896), True, 'import numpy as np\n'), ((3024, 3047), 'numpy.ones', 'np.ones', (['test_n_samples'], {}), '(test_n_samples)\n', (3031, 3047), True, 'import numpy as np\n')]
|
from __future__ import absolute_import, print_function
from numpy.testing import TestCase, dec, assert_, run_module_suite
from scipy.weave import inline_tools
class TestInline(TestCase):
"""These are long running tests...
Would be useful to benchmark these things somehow.
"""
@dec.slow
def test_exceptions(self):
a = 3
code = """
if (a < 2)
throw_error(PyExc_ValueError,
"the variable 'a' should not be less than 2");
else
return_val = PyInt_FromLong(a+1);
"""
result = inline_tools.inline(code,['a'])
assert_(result == 4)
## Unfortunately, it is not always possible to catch distutils compiler
## errors, since SystemExit is used. Until that is fixed, these tests
## cannot be run in the same process as the test suite.
## try:
## a = 1
## result = inline_tools.inline(code,['a'])
## assert_(1) # should've thrown a ValueError
## except ValueError:
## pass
## from distutils.errors import DistutilsError, CompileError
## try:
## a = 'string'
## result = inline_tools.inline(code,['a'])
## assert_(1) # should've gotten an error
## except:
## # ?CompileError is the error reported, but catching it doesn't work
## pass
if __name__ == "__main__":
run_module_suite()
|
[
"scipy.weave.inline_tools.inline",
"numpy.testing.assert_",
"numpy.testing.run_module_suite"
] |
[((1475, 1493), 'numpy.testing.run_module_suite', 'run_module_suite', ([], {}), '()\n', (1491, 1493), False, 'from numpy.testing import TestCase, dec, assert_, run_module_suite\n'), ((632, 664), 'scipy.weave.inline_tools.inline', 'inline_tools.inline', (['code', "['a']"], {}), "(code, ['a'])\n", (651, 664), False, 'from scipy.weave import inline_tools\n'), ((672, 692), 'numpy.testing.assert_', 'assert_', (['(result == 4)'], {}), '(result == 4)\n', (679, 692), False, 'from numpy.testing import TestCase, dec, assert_, run_module_suite\n')]
|
import os
import subprocess
import re
BIN_FFMPEG = 'ffmpeg'
def convert(srcfile, outfile, bit_rate, channels, sample_rate, codec, tags, volume=None, verbose=False):
"""
Converts the source file to the outfile with the proper transformations.
Includes the additional tags.
"""
if srcfile == outfile:
raise Exception('Does not support overwriting file')
if os.path.isfile(outfile):
os.unlink(outfile)
cmd = [
BIN_FFMPEG, '-i', srcfile,
'-vn', '-sn', '-dn',
'-acodec', codec, '-ar', str(sample_rate),
'-ac', str(channels), '-b:a', str(bit_rate),
'-bits_per_raw_sample', '16'
]
if volume:
cmd.append('-filter:a')
cmd.append("volume={0}".format(volume))
for k, v in tags.items():
cmd.append('-metadata')
cmd.append('{0}={1}'.format(k, v))
cmd.append(outfile)
if verbose:
print(' '.join(cmd))
# force bits per sample = 16.
subprocess.run(cmd,
check=True,
stdout=subprocess.DEVNULL,
stderr=subprocess.PIPE)
def trim_audio(srcfile, outfile, start_time, end_time):
"""
Trims audio. Start and end time must be in the "hh:mm:ss.nn" format (e.g. 00:01:22.00)
"""
if srcfile == outfile:
raise Exception('Does not support overwriting file')
if os.path.isfile(outfile):
os.unlink(outfile)
cmd = [
BIN_FFMPEG, '-i', srcfile,
#'-movflags', 'use_metadata_tags',
'-map_metadata', '0:g',
'-map_metadata:s:a', '0:g',
'-c', 'copy'
]
if start_time is not None:
cmd.extend(['-ss', start_time])
if end_time is not None:
cmd.extend(['-to', end_time])
cmd.append(outfile)
print('Running "{0}"'.format(' '.join(cmd)))
subprocess.run(cmd,
check=True,
stdout=subprocess.DEVNULL,
stderr=subprocess.PIPE)
LINE_MEAN_VOLUME = \
re.compile(r'\[Parsed_volumedetect_\d+ @ ([^\]]+)\] mean_volume: (-?\d+\.?\d*) dB')
LINE_MAX_VOLUME = \
re.compile(r'\[Parsed_volumedetect_\d+ @ ([^\]]+)\] max_volume: (-?\d+\.?\d*) dB')
LINE_HISTOGRAM_VOLUME = \
re.compile(r'\[Parsed_volumedetect_\d+ @ ([^\]]+)\] histogram_(\d+)db: (-?\d+\.?\d*)')
class VolumeLevel(object):
def __init__(self, mean_v, max_v, hist):
self.mean = mean_v
self.max = max_v
self.histogram = hist
def find_volume_levels(srcfile):
"""
"""
cmd = [
BIN_FFMPEG, '-i', srcfile,
'-af', "volumedetect",
'-vn', '-sn', '-dn',
'-f', 'null', os.path.devnull
]
# print('DEBUG running [{0}]'.format(' '.join(cmd)))
proc = subprocess.run(cmd, check=True,
#capture_output=True,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
encoding='utf-8', errors='ignore')
max_volume = None
mean_volume = None
histogram = {}
for line in proc.stdout.splitlines():
line = line.strip()
# print('DEBUG output [{0}]'.format(line))
m = LINE_MEAN_VOLUME.match(line)
if m:
mean_volume = float(m.group(2))
# print('DEBUG mean_volume = {0} / {1}'.format(m.group(2), mean_volume))
m = LINE_MAX_VOLUME.match(line)
if m:
max_volume = float(m.group(2))
# print('DEBUG max_volume = {0} / {1}'.format(m.group(2), max_volume))
m = LINE_HISTOGRAM_VOLUME.match(line)
if m:
histogram[m.group(2)] = float(m.group(3))
# print('DEBUG histogram {0}db = {1}'.format(m.group(2), m.group(3)))
if mean_volume is None or max_volume is None or histogram is None:
return None
return VolumeLevel(mean_volume, max_volume, histogram)
|
[
"subprocess.run",
"os.path.isfile",
"os.unlink",
"re.compile"
] |
[((1942, 2041), 're.compile', 're.compile', (['"""\\\\[Parsed_volumedetect_\\\\d+ @ ([^\\\\]]+)\\\\] mean_volume: (-?\\\\d+\\\\.?\\\\d*) dB"""'], {}), "(\n '\\\\[Parsed_volumedetect_\\\\d+ @ ([^\\\\]]+)\\\\] mean_volume: (-?\\\\d+\\\\.?\\\\d*) dB'\n )\n", (1952, 2041), False, 'import re\n'), ((2050, 2148), 're.compile', 're.compile', (['"""\\\\[Parsed_volumedetect_\\\\d+ @ ([^\\\\]]+)\\\\] max_volume: (-?\\\\d+\\\\.?\\\\d*) dB"""'], {}), "(\n '\\\\[Parsed_volumedetect_\\\\d+ @ ([^\\\\]]+)\\\\] max_volume: (-?\\\\d+\\\\.?\\\\d*) dB'\n )\n", (2060, 2148), False, 'import re\n'), ((2163, 2266), 're.compile', 're.compile', (['"""\\\\[Parsed_volumedetect_\\\\d+ @ ([^\\\\]]+)\\\\] histogram_(\\\\d+)db: (-?\\\\d+\\\\.?\\\\d*)"""'], {}), "(\n '\\\\[Parsed_volumedetect_\\\\d+ @ ([^\\\\]]+)\\\\] histogram_(\\\\d+)db: (-?\\\\d+\\\\.?\\\\d*)'\n )\n", (2173, 2266), False, 'import re\n'), ((392, 415), 'os.path.isfile', 'os.path.isfile', (['outfile'], {}), '(outfile)\n', (406, 415), False, 'import os\n'), ((980, 1067), 'subprocess.run', 'subprocess.run', (['cmd'], {'check': '(True)', 'stdout': 'subprocess.DEVNULL', 'stderr': 'subprocess.PIPE'}), '(cmd, check=True, stdout=subprocess.DEVNULL, stderr=\n subprocess.PIPE)\n', (994, 1067), False, 'import subprocess\n'), ((1349, 1372), 'os.path.isfile', 'os.path.isfile', (['outfile'], {}), '(outfile)\n', (1363, 1372), False, 'import os\n'), ((1808, 1895), 'subprocess.run', 'subprocess.run', (['cmd'], {'check': '(True)', 'stdout': 'subprocess.DEVNULL', 'stderr': 'subprocess.PIPE'}), '(cmd, check=True, stdout=subprocess.DEVNULL, stderr=\n subprocess.PIPE)\n', (1822, 1895), False, 'import subprocess\n'), ((2675, 2796), 'subprocess.run', 'subprocess.run', (['cmd'], {'check': '(True)', 'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.STDOUT', 'encoding': '"""utf-8"""', 'errors': '"""ignore"""'}), "(cmd, check=True, stdout=subprocess.PIPE, stderr=subprocess.\n STDOUT, encoding='utf-8', errors='ignore')\n", (2689, 2796), False, 'import subprocess\n'), ((425, 443), 'os.unlink', 'os.unlink', (['outfile'], {}), '(outfile)\n', (434, 443), False, 'import os\n'), ((1382, 1400), 'os.unlink', 'os.unlink', (['outfile'], {}), '(outfile)\n', (1391, 1400), False, 'import os\n')]
|
import os
ENVIRONMENT = os.environ.get("ENVIRONMENT")
SECRET_KEY = os.environ.get("SECRET_KEY")
ORDNANCE_SURVEY_PLACES_API_KEY = os.environ.get("ORDNANCE_SURVEY_PLACES_API_KEY")
PERMANENT_SESSION_LIFETIME = int(os.environ.get("PERMANENT_SESSION_LIFETIME"))
GA_TRACKING_ID = os.environ.get("GA_TRACKING_ID")
GA_CROSS_DOMAIN_TRACKING_ID = os.environ.get("GA_CROSS_DOMAIN_TRACKING_ID")
# NHS OIDC config
NHS_OIDC_AUTHORITY_URL = os.environ.get("NHS_OIDC_AUTHORITY_URL")
NHS_OIDC_CLIENT_ID = os.environ.get("NHS_OIDC_CLIENT_ID")
NHS_OIDC_REGISTRATION_CALLBACK_URL = os.environ.get("NHS_OIDC_REGISTRATION_CALLBACK_URL")
NHS_OIDC_LOGIN_CALLBACK_URL = os.environ.get("NHS_OIDC_LOGIN_CALLBACK_URL")
NHS_OIDC_LOGIN_PRIVATE_KEY = os.environ.get("NHS_OIDC_LOGIN_PRIVATE_KEY")
# AWS CONFIG
LOCAL_AWS_ENDPOINT_URL = os.environ.get("LOCAL_AWS_ENDPOINT_URL")
LOCAL_SQS_ENDPOINT_URL = os.environ.get("LOCAL_SQS_ENDPOINT_URL")
AWS_REGION = os.environ.get("AWS_REGION")
AWS_ACCESS_KEY = os.environ.get("AWS_ACCESS_KEY")
AWS_SECRET_ACCESS_KEY = os.environ.get("AWS_SECRET_ACCESS_KEY")
DATABASE_SECRET_TAGS = [s.strip() for s in os.environ.get("DATABASE_SECRET_TAGS", "").split(",")]
DATABASE_CLUSTER_PREFIX = os.environ.get("DATABASE_CLUSTER_PREFIX")
AWS_RDS_DATABASE_ARN_OVERRIDE = os.environ.get("AWS_RDS_DATABASE_ARN_OVERRIDE")
AWS_RDS_SECRET_ARN_OVERRIDE = os.environ.get("AWS_RDS_SECRET_ARN_OVERRIDE")
AWS_SQS_QUEUE_URL = os.environ.get("AWS_SQS_QUEUE_URL")
SENTRY_DSN = os.environ.get("SENTRY_DSN")
POSTCODE_TIER_OVERRIDE = os.environ.get("POSTCODE_TIER_OVERRIDE")
SUBMISSION_TRACING_PEPPER = os.environ.get("SUBMISSION_TRACING_PEPPER")
|
[
"os.environ.get"
] |
[((25, 54), 'os.environ.get', 'os.environ.get', (['"""ENVIRONMENT"""'], {}), "('ENVIRONMENT')\n", (39, 54), False, 'import os\n'), ((68, 96), 'os.environ.get', 'os.environ.get', (['"""SECRET_KEY"""'], {}), "('SECRET_KEY')\n", (82, 96), False, 'import os\n'), ((130, 178), 'os.environ.get', 'os.environ.get', (['"""ORDNANCE_SURVEY_PLACES_API_KEY"""'], {}), "('ORDNANCE_SURVEY_PLACES_API_KEY')\n", (144, 178), False, 'import os\n'), ((275, 307), 'os.environ.get', 'os.environ.get', (['"""GA_TRACKING_ID"""'], {}), "('GA_TRACKING_ID')\n", (289, 307), False, 'import os\n'), ((338, 383), 'os.environ.get', 'os.environ.get', (['"""GA_CROSS_DOMAIN_TRACKING_ID"""'], {}), "('GA_CROSS_DOMAIN_TRACKING_ID')\n", (352, 383), False, 'import os\n'), ((429, 469), 'os.environ.get', 'os.environ.get', (['"""NHS_OIDC_AUTHORITY_URL"""'], {}), "('NHS_OIDC_AUTHORITY_URL')\n", (443, 469), False, 'import os\n'), ((491, 527), 'os.environ.get', 'os.environ.get', (['"""NHS_OIDC_CLIENT_ID"""'], {}), "('NHS_OIDC_CLIENT_ID')\n", (505, 527), False, 'import os\n'), ((565, 617), 'os.environ.get', 'os.environ.get', (['"""NHS_OIDC_REGISTRATION_CALLBACK_URL"""'], {}), "('NHS_OIDC_REGISTRATION_CALLBACK_URL')\n", (579, 617), False, 'import os\n'), ((648, 693), 'os.environ.get', 'os.environ.get', (['"""NHS_OIDC_LOGIN_CALLBACK_URL"""'], {}), "('NHS_OIDC_LOGIN_CALLBACK_URL')\n", (662, 693), False, 'import os\n'), ((723, 767), 'os.environ.get', 'os.environ.get', (['"""NHS_OIDC_LOGIN_PRIVATE_KEY"""'], {}), "('NHS_OIDC_LOGIN_PRIVATE_KEY')\n", (737, 767), False, 'import os\n'), ((807, 847), 'os.environ.get', 'os.environ.get', (['"""LOCAL_AWS_ENDPOINT_URL"""'], {}), "('LOCAL_AWS_ENDPOINT_URL')\n", (821, 847), False, 'import os\n'), ((873, 913), 'os.environ.get', 'os.environ.get', (['"""LOCAL_SQS_ENDPOINT_URL"""'], {}), "('LOCAL_SQS_ENDPOINT_URL')\n", (887, 913), False, 'import os\n'), ((927, 955), 'os.environ.get', 'os.environ.get', (['"""AWS_REGION"""'], {}), "('AWS_REGION')\n", (941, 955), False, 'import os\n'), ((973, 1005), 'os.environ.get', 'os.environ.get', (['"""AWS_ACCESS_KEY"""'], {}), "('AWS_ACCESS_KEY')\n", (987, 1005), False, 'import os\n'), ((1030, 1069), 'os.environ.get', 'os.environ.get', (['"""AWS_SECRET_ACCESS_KEY"""'], {}), "('AWS_SECRET_ACCESS_KEY')\n", (1044, 1069), False, 'import os\n'), ((1196, 1237), 'os.environ.get', 'os.environ.get', (['"""DATABASE_CLUSTER_PREFIX"""'], {}), "('DATABASE_CLUSTER_PREFIX')\n", (1210, 1237), False, 'import os\n'), ((1271, 1318), 'os.environ.get', 'os.environ.get', (['"""AWS_RDS_DATABASE_ARN_OVERRIDE"""'], {}), "('AWS_RDS_DATABASE_ARN_OVERRIDE')\n", (1285, 1318), False, 'import os\n'), ((1349, 1394), 'os.environ.get', 'os.environ.get', (['"""AWS_RDS_SECRET_ARN_OVERRIDE"""'], {}), "('AWS_RDS_SECRET_ARN_OVERRIDE')\n", (1363, 1394), False, 'import os\n'), ((1415, 1450), 'os.environ.get', 'os.environ.get', (['"""AWS_SQS_QUEUE_URL"""'], {}), "('AWS_SQS_QUEUE_URL')\n", (1429, 1450), False, 'import os\n'), ((1464, 1492), 'os.environ.get', 'os.environ.get', (['"""SENTRY_DSN"""'], {}), "('SENTRY_DSN')\n", (1478, 1492), False, 'import os\n'), ((1519, 1559), 'os.environ.get', 'os.environ.get', (['"""POSTCODE_TIER_OVERRIDE"""'], {}), "('POSTCODE_TIER_OVERRIDE')\n", (1533, 1559), False, 'import os\n'), ((1589, 1632), 'os.environ.get', 'os.environ.get', (['"""SUBMISSION_TRACING_PEPPER"""'], {}), "('SUBMISSION_TRACING_PEPPER')\n", (1603, 1632), False, 'import os\n'), ((212, 256), 'os.environ.get', 'os.environ.get', (['"""PERMANENT_SESSION_LIFETIME"""'], {}), "('PERMANENT_SESSION_LIFETIME')\n", (226, 256), False, 'import os\n'), ((1114, 1156), 'os.environ.get', 'os.environ.get', (['"""DATABASE_SECRET_TAGS"""', '""""""'], {}), "('DATABASE_SECRET_TAGS', '')\n", (1128, 1156), False, 'import os\n')]
|
from django.http import HttpResponseRedirect
from django.shortcuts import get_object_or_404, render
from django.urls import reverse
from django.core.paginator import Paginator
from django.db.models import Q, Count, Max
from django.views.generic import ListView, DetailView, TemplateView
from django_filters.views import FilterView
from rest_framework.generics import ListAPIView, RetrieveAPIView
from django_filters import rest_framework as filters
from .models import Author, Work, Character, CharacterInstance, Speech, SpeechCluster
from .serializers import AuthorSerializer, WorkSerializer, CharacterSerializer, CharacterInstanceSerializer, SpeechSerializer, SpeechClusterSerializer
import logging
# Get an instance of a logger
logger = logging.getLogger(__name__)
CTS_READER = 'https://scaife.perseus.org/reader/'
PAGE_SIZE = 25
# parameter validation
def ValidateParams(request, valid_params):
'''collect valid parameters, check types'''
params = {}
for param, vtype in valid_params:
if param in request.GET:
val = request.GET[param][:256].strip()
if val != '':
try:
params[param] = vtype(val)
except ValueError:
pass
return params
#
# API filters
#
class AuthorFilter(filters.FilterSet):
class Meta:
model = Author
fields = ['id', 'name', 'wd']
class WorkFilter(filters.FilterSet):
author_id = filters.NumberFilter('author__id')
author_name = filters.CharFilter('author__name')
author_wd = filters.CharFilter('author__wd')
author_urn = filters.CharFilter('author__urn')
class Meta:
model = Work
fields = ['id', 'title', 'wd', 'urn',
'author_name', 'author_id', 'author_wd']
class CharacterFilter(filters.FilterSet):
class Meta:
model = Character
fields = ['id', 'name', 'wd', 'manto', 'gender', 'number', 'being']
class CharacterInstanceFilter(filters.FilterSet):
name = filters.CharFilter('name')
gender = filters.ChoiceFilter('gender',
choices=Character.CharacterGender.choices)
number = filters.ChoiceFilter('number',
choices=Character.CharacterNumber.choices)
being = filters.ChoiceFilter('being',
choices=Character.CharacterBeing.choices)
anon = filters.BooleanFilter('anon')
char_id = filters.NumberFilter('char__id')
char_name = filters.CharFilter('char__name')
char_wd = filters.CharFilter('char__wd')
char_manto = filters.CharFilter('char__manto')
char_gender = filters.ChoiceFilter('char__gender',
choices=Character.CharacterGender.choices)
char_number = filters.ChoiceFilter('char__number',
choices=Character.CharacterNumber.choices)
char_being = filters.ChoiceFilter('char__being',
choices=Character.CharacterBeing.choices)
class Meta:
model = CharacterInstance
fields = ['id', 'name', 'gender', 'number', 'being', 'anon',
'char_id', 'char_name', 'char_wd', 'char_manto',
'char_gender', 'char_number', 'char_being']
class SpeechFilter(filters.FilterSet):
spkr_id = filters.NumberFilter('spkr__char__id')
spkr_name = filters.CharFilter('spkr__name')
spkr_manto = filters.CharFilter('spkr__char__manto')
spkr_wd = filters.CharFilter('spkr__char__wd')
spkr_gender = filters.ChoiceFilter('spkr__gender',
choices=Character.CharacterGender.choices)
spkr_number = filters.ChoiceFilter('spkr__number',
choices=Character.CharacterNumber.choices)
spkr_being = filters.ChoiceFilter('spkr__being',
choices=Character.CharacterBeing.choices)
spkr_anon = filters.BooleanFilter('spkr__anon')
addr_id = filters.NumberFilter('addr__char__id')
addr_name = filters.CharFilter('addr__name')
addr_manto = filters.CharFilter('addr__char__manto')
addr_wd = filters.CharFilter('addr__char__wd')
addr_gender = filters.ChoiceFilter('addr__gender',
choices=Character.CharacterGender.choices)
addr_number = filters.ChoiceFilter('addr__number',
choices=Character.CharacterNumber.choices)
addr_being = filters.ChoiceFilter('addr__being',
choices=Character.CharacterBeing.choices)
addr_anon = filters.BooleanFilter('addr__anon')
spkr_inst = filters.NumberFilter('spkr__id')
addr_inst = filters.NumberFilter('addr__id')
type = filters.ChoiceFilter('type', choices=Speech.SpeechType.choices)
cluster_id = filters.NumberFilter('cluster__id')
work_id = filters.NumberFilter('work__id')
work_title = filters.CharFilter('work__title')
work_urn = filters.CharFilter('work__urn')
work_wd = filters.CharFilter('work__wd')
author_id = filters.NumberFilter('work__author__id')
author_name = filters.CharFilter('work__author__name')
author_wd = filters.CharFilter('work__author__wd')
author_urn = filters.CharFilter('work__author__urn')
class Meta:
model = Speech
fields = ['id',
'spkr_id', 'spkr_name', 'spkr_manto', 'spkr_wd', 'spkr_gender',
'spkr_number', 'spkr_being', 'spkr_anon',
'addr_id', 'addr_name', 'addr_manto', 'addr_wd', 'addr_gender',
'addr_number', 'addr_being', 'addr_anon',
'spkr_inst', 'addr_inst',
'type',
'cluster_id',
'work_id', 'work_title', 'work_urn', 'work_wd',
'author_id', 'author_name', 'author_urn', 'author_wd',
'part']
class SpeechClusterFilter(filters.FilterSet):
class Meta:
model = SpeechCluster
fields = ['id']
#
# API class-based views
#
class AuthorList(ListAPIView):
queryset = Author.objects.all()
serializer_class = AuthorSerializer
filterset_class = AuthorFilter
class AuthorDetail(RetrieveAPIView):
queryset = Author.objects.all()
serializer_class = AuthorSerializer
class WorkList(ListAPIView):
queryset = Work.objects.all()
serializer_class = WorkSerializer
filterset_class = WorkFilter
class WorkDetail(RetrieveAPIView):
queryset = Work.objects.all()
serializer_class = WorkSerializer
class CharacterList(ListAPIView):
queryset = Character.objects.all()
serializer_class = CharacterSerializer
filterset_class = CharacterFilter
class CharacterDetail(RetrieveAPIView):
queryset = Character.objects.all()
serializer_class = CharacterSerializer
class CharacterInstanceList(ListAPIView):
queryset = CharacterInstance.objects.all()
serializer_class = CharacterInstanceSerializer
filterset_class = CharacterInstanceFilter
class CharacterInstanceDetail(RetrieveAPIView):
queryset = CharacterInstance.objects.all()
serializer_class = CharacterInstanceSerializer
class SpeechList(ListAPIView):
queryset = Speech.objects.all()
serializer_class = SpeechSerializer
filterset_class = SpeechFilter
class SpeechDetail(RetrieveAPIView):
queryset = Speech.objects.all()
serializer_class = SpeechSerializer
class SpeechClusterList(ListAPIView):
queryset = SpeechCluster.objects.all()
serializer_class = SpeechClusterSerializer
filterset_class = SpeechClusterFilter
class SpeechClusterDetail(RetrieveAPIView):
queryset = SpeechCluster.objects.all()
serializer_class = SpeechClusterSerializer
#
# Web frontend class-based views
#
class AppAuthorList(ListView):
model = Author
template_name = 'speechdb/author_list.html'
queryset = Author.objects.all()
paginate_by = PAGE_SIZE
class AppWorkList(ListView):
model = Work
template_name = 'speechdb/work_list.html'
queryset = Work.objects.all()
paginate_by = PAGE_SIZE
class AppCharacterList(ListView):
model = Character
template_name = 'speechdb/character_list.html'
queryset = Character.objects.all()
paginate_by = PAGE_SIZE
_valid_params = [
('name', str),
]
def get_context_data(self, **kwargs):
# Call the base implementation first to get a context
context = super().get_context_data(**kwargs)
# add useful info
context['search_params'] = self.params.items()
return context
def get_queryset(self):
# collect user search params
self.params = ValidateParams(self.request, self._valid_params)
# construct query
query = []
# speaker by id
if 'name' in self.params:
query.append(Q(name=self.params['name']))
qs = Character.objects.filter(*query).order_by('name')
# calculate some useful counts
qs = qs.annotate(
Count('instances__speeches', distinct=True),
Count('instances__addresses', distinct=True),
)
return qs
class AppCharacterInstanceList(ListView):
model = CharacterInstance
template_name = 'speechdb/characterinstance_list.html'
queryset = CharacterInstance.objects.all()
paginate_by = PAGE_SIZE
_valid_params = [
('name', str),
]
def get_context_data(self, **kwargs):
# Call the base implementation first to get a context
context = super().get_context_data(**kwargs)
# add useful info
context['search_params'] = self.params.items()
return context
def get_queryset(self):
# collect user search params
self.params = ValidateParams(self.request, self._valid_params)
# construct query
query = []
# speaker by id
if 'name' in self.params:
query.append(Q(char__name=self.params['name']))
qs = CharacterInstance.objects.filter(*query).order_by('char__name')
# calculate some useful counts
qs = qs.annotate(
Count('speeches', distinct=True),
Count('addresses', distinct=True),
)
return qs
class AppCharacterInstanceDetail(DetailView):
model = CharacterInstance
template_name = 'speechdb/characterinstance_detail.html'
context_object_name = 'inst'
def get_context_data(self, **kwargs):
# Call the base implementation first to get a context
context = super().get_context_data(**kwargs)
# add useful info
context['reader'] = CTS_READER
return context
class AppCharacterDetail(DetailView):
model = Character
template_name = 'speechdb/character_detail.html'
context_object_name = 'char'
def get_context_data(self, **kwargs):
# Call the base implementation first to get a context
context = super().get_context_data(**kwargs)
# add useful info
context['reader'] = CTS_READER
return context
class AppSpeechList(ListView):
model = Speech
template_name = 'speechdb/speech_list.html'
paginate_by = PAGE_SIZE
ordering = ['work', 'seq']
_valid_params = [
('spkr_id', int),
('addr_id', int),
('char_id', int),
('char_inst', int),
('spkr_inst', int),
('addr_inst', int),
('cluster_id', int),
('type', str),
('part', int),
('n_parts', int),
('work_id', int),
]
def get_context_data(self, **kwargs):
# Call the base implementation first to get a context
context = super().get_context_data(**kwargs)
# add useful info
context['reader'] = CTS_READER
context['works'] = Work.objects.all()
context['characters'] = Character.objects.all()
context['speech_types'] = Speech.SpeechType.choices
context['search_params'] = self.params.items()
return context
def get_queryset(self):
# collect user search params
self.params = ValidateParams(self.request, self._valid_params)
# initial set of objects plus annotations
qs = Speech.objects.annotate(Count('cluster__speech'))
# construct query
query = []
# any participant
if 'char_id' in self.params:
query.append(
Q(spkr__char=self.params['char_id']) |
Q(spkr__disg=self.params['char_id']) |
Q(addr__char=self.params['char_id']) |
Q(addr__disg=self.params['char_id'])
)
# speaker by id
if 'spkr_id' in self.params:
query.append(Q(spkr__char=self.params['spkr_id']) | Q(spkr__disg=self.params['spkr_id']))
# speaker by instance
if 'spkr_inst' in self.params:
query.append(Q(spkr=self.params['spkr_inst']))
# addressee by id
if 'addr_id' in self.params:
query.append(Q(addr__char=self.params['addr_id']) | Q(addr__disg=self.params['addr_id']))
# addressee by instance
if 'addr_inst' in self.params:
query.append(Q(addr=self.params['addr_inst']))
if 'cluster_id' in self.params:
query.append(Q(cluster__pk=self.params['cluster_id']))
if 'type' in self.params:
query.append(Q(type=self.params['type']))
if 'part' in self.params:
query.append(Q(part=self.params['part']))
if 'n_parts' in self.params:
query.append(Q(cluster__speech__count=self.params['n_parts']))
if 'work_id' in self.params:
query.append(Q(cluster__work__pk=self.params['work_id']))
qs = qs.filter(*query)
qs = qs.order_by('seq')
qs = qs.order_by('work')
return qs
class AppSpeechClusterList(ListView):
model = SpeechCluster
template_name = 'speechdb/speechcluster_list.html'
queryset = SpeechCluster.objects.all()
paginate_by = PAGE_SIZE
_valid_params = []
def get_queryset(self):
# collect user search params
self.params = ValidateParams(self.request, self._valid_params)
# construct query
query = []
return SpeechCluster.objects.filter(*query)
def get_context_data(self, **kwargs):
# Call the base implementation first to get a context
context = super().get_context_data(**kwargs)
# add useful info
context['reader'] = CTS_READER
context['search_params'] = self.params.items()
return context
class AppSpeechClusterDetail(DetailView):
model = SpeechCluster
template_name = 'speechdb/speechcluster_detail.html'
context_object_name = 'cluster'
def get_context_data(self, **kwargs):
# Call the base implementation first to get a context
context = super().get_context_data(**kwargs)
# add useful info
context['reader'] = CTS_READER
return context
class AppIndex(TemplateView):
template_name = 'speechdb/index.html'
def get_context_data(self, **kwargs):
# Call the base implementation first to get a context
context = super().get_context_data(**kwargs)
# add useful info
context['works'] = Work.objects.all()
context['characters'] = Character.objects.all()
context['speeches'] = Speech.objects.all()
return context
class AppSpeechSearch(TemplateView):
template_name = 'speechdb/speech_search.html'
def get_context_data(self, **kwargs):
# Call the base implementation first to get a context
context = super().get_context_data(**kwargs)
# add useful info
context['works'] = Work.objects.all()
context['characters'] = Character.objects.all()
context['max_parts'] = Speech.objects.aggregate(Max('part'))['part__max']
context['speech_types'] = Speech.SpeechType.choices
return context
class AppSpeechClusterSearch(TemplateView):
template_name = 'speechdb/speechcluster_search.html'
def get_context_data(self, **kwargs):
# Call the base implementation first to get a context
context = super().get_context_data(**kwargs)
# add useful info
context['works'] = Work.objects.all()
context['characters'] = Character.objects.all()
context['speech_types'] = Speech.SpeechType.choices
return context
class AppCharacterSearch(TemplateView):
template_name = 'speechdb/character_search.html'
def get_context_data(self, **kwargs):
# Call the base implementation first to get a context
context = super().get_context_data(**kwargs)
# add useful info
context['characters'] = Character.objects.all()
return context
|
[
"django.db.models.Max",
"django_filters.rest_framework.CharFilter",
"django_filters.rest_framework.NumberFilter",
"django.db.models.Q",
"django.db.models.Count",
"django_filters.rest_framework.BooleanFilter",
"logging.getLogger",
"django_filters.rest_framework.ChoiceFilter"
] |
[((741, 768), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (758, 768), False, 'import logging\n'), ((1456, 1490), 'django_filters.rest_framework.NumberFilter', 'filters.NumberFilter', (['"""author__id"""'], {}), "('author__id')\n", (1476, 1490), True, 'from django_filters import rest_framework as filters\n'), ((1509, 1543), 'django_filters.rest_framework.CharFilter', 'filters.CharFilter', (['"""author__name"""'], {}), "('author__name')\n", (1527, 1543), True, 'from django_filters import rest_framework as filters\n'), ((1560, 1592), 'django_filters.rest_framework.CharFilter', 'filters.CharFilter', (['"""author__wd"""'], {}), "('author__wd')\n", (1578, 1592), True, 'from django_filters import rest_framework as filters\n'), ((1610, 1643), 'django_filters.rest_framework.CharFilter', 'filters.CharFilter', (['"""author__urn"""'], {}), "('author__urn')\n", (1628, 1643), True, 'from django_filters import rest_framework as filters\n'), ((2023, 2049), 'django_filters.rest_framework.CharFilter', 'filters.CharFilter', (['"""name"""'], {}), "('name')\n", (2041, 2049), True, 'from django_filters import rest_framework as filters\n'), ((2063, 2136), 'django_filters.rest_framework.ChoiceFilter', 'filters.ChoiceFilter', (['"""gender"""'], {'choices': 'Character.CharacterGender.choices'}), "('gender', choices=Character.CharacterGender.choices)\n", (2083, 2136), True, 'from django_filters import rest_framework as filters\n'), ((2171, 2244), 'django_filters.rest_framework.ChoiceFilter', 'filters.ChoiceFilter', (['"""number"""'], {'choices': 'Character.CharacterNumber.choices'}), "('number', choices=Character.CharacterNumber.choices)\n", (2191, 2244), True, 'from django_filters import rest_framework as filters\n'), ((2277, 2348), 'django_filters.rest_framework.ChoiceFilter', 'filters.ChoiceFilter', (['"""being"""'], {'choices': 'Character.CharacterBeing.choices'}), "('being', choices=Character.CharacterBeing.choices)\n", (2297, 2348), True, 'from django_filters import rest_framework as filters\n'), ((2380, 2409), 'django_filters.rest_framework.BooleanFilter', 'filters.BooleanFilter', (['"""anon"""'], {}), "('anon')\n", (2401, 2409), True, 'from django_filters import rest_framework as filters\n'), ((2424, 2456), 'django_filters.rest_framework.NumberFilter', 'filters.NumberFilter', (['"""char__id"""'], {}), "('char__id')\n", (2444, 2456), True, 'from django_filters import rest_framework as filters\n'), ((2473, 2505), 'django_filters.rest_framework.CharFilter', 'filters.CharFilter', (['"""char__name"""'], {}), "('char__name')\n", (2491, 2505), True, 'from django_filters import rest_framework as filters\n'), ((2520, 2550), 'django_filters.rest_framework.CharFilter', 'filters.CharFilter', (['"""char__wd"""'], {}), "('char__wd')\n", (2538, 2550), True, 'from django_filters import rest_framework as filters\n'), ((2568, 2601), 'django_filters.rest_framework.CharFilter', 'filters.CharFilter', (['"""char__manto"""'], {}), "('char__manto')\n", (2586, 2601), True, 'from django_filters import rest_framework as filters\n'), ((2620, 2699), 'django_filters.rest_framework.ChoiceFilter', 'filters.ChoiceFilter', (['"""char__gender"""'], {'choices': 'Character.CharacterGender.choices'}), "('char__gender', choices=Character.CharacterGender.choices)\n", (2640, 2699), True, 'from django_filters import rest_framework as filters\n'), ((2739, 2818), 'django_filters.rest_framework.ChoiceFilter', 'filters.ChoiceFilter', (['"""char__number"""'], {'choices': 'Character.CharacterNumber.choices'}), "('char__number', choices=Character.CharacterNumber.choices)\n", (2759, 2818), True, 'from django_filters import rest_framework as filters\n'), ((2856, 2933), 'django_filters.rest_framework.ChoiceFilter', 'filters.ChoiceFilter', (['"""char__being"""'], {'choices': 'Character.CharacterBeing.choices'}), "('char__being', choices=Character.CharacterBeing.choices)\n", (2876, 2933), True, 'from django_filters import rest_framework as filters\n'), ((3267, 3305), 'django_filters.rest_framework.NumberFilter', 'filters.NumberFilter', (['"""spkr__char__id"""'], {}), "('spkr__char__id')\n", (3287, 3305), True, 'from django_filters import rest_framework as filters\n'), ((3322, 3354), 'django_filters.rest_framework.CharFilter', 'filters.CharFilter', (['"""spkr__name"""'], {}), "('spkr__name')\n", (3340, 3354), True, 'from django_filters import rest_framework as filters\n'), ((3372, 3411), 'django_filters.rest_framework.CharFilter', 'filters.CharFilter', (['"""spkr__char__manto"""'], {}), "('spkr__char__manto')\n", (3390, 3411), True, 'from django_filters import rest_framework as filters\n'), ((3426, 3462), 'django_filters.rest_framework.CharFilter', 'filters.CharFilter', (['"""spkr__char__wd"""'], {}), "('spkr__char__wd')\n", (3444, 3462), True, 'from django_filters import rest_framework as filters\n'), ((3481, 3560), 'django_filters.rest_framework.ChoiceFilter', 'filters.ChoiceFilter', (['"""spkr__gender"""'], {'choices': 'Character.CharacterGender.choices'}), "('spkr__gender', choices=Character.CharacterGender.choices)\n", (3501, 3560), True, 'from django_filters import rest_framework as filters\n'), ((3600, 3679), 'django_filters.rest_framework.ChoiceFilter', 'filters.ChoiceFilter', (['"""spkr__number"""'], {'choices': 'Character.CharacterNumber.choices'}), "('spkr__number', choices=Character.CharacterNumber.choices)\n", (3620, 3679), True, 'from django_filters import rest_framework as filters\n'), ((3717, 3794), 'django_filters.rest_framework.ChoiceFilter', 'filters.ChoiceFilter', (['"""spkr__being"""'], {'choices': 'Character.CharacterBeing.choices'}), "('spkr__being', choices=Character.CharacterBeing.choices)\n", (3737, 3794), True, 'from django_filters import rest_framework as filters\n'), ((3831, 3866), 'django_filters.rest_framework.BooleanFilter', 'filters.BooleanFilter', (['"""spkr__anon"""'], {}), "('spkr__anon')\n", (3852, 3866), True, 'from django_filters import rest_framework as filters\n'), ((3886, 3924), 'django_filters.rest_framework.NumberFilter', 'filters.NumberFilter', (['"""addr__char__id"""'], {}), "('addr__char__id')\n", (3906, 3924), True, 'from django_filters import rest_framework as filters\n'), ((3941, 3973), 'django_filters.rest_framework.CharFilter', 'filters.CharFilter', (['"""addr__name"""'], {}), "('addr__name')\n", (3959, 3973), True, 'from django_filters import rest_framework as filters\n'), ((3991, 4030), 'django_filters.rest_framework.CharFilter', 'filters.CharFilter', (['"""addr__char__manto"""'], {}), "('addr__char__manto')\n", (4009, 4030), True, 'from django_filters import rest_framework as filters\n'), ((4045, 4081), 'django_filters.rest_framework.CharFilter', 'filters.CharFilter', (['"""addr__char__wd"""'], {}), "('addr__char__wd')\n", (4063, 4081), True, 'from django_filters import rest_framework as filters\n'), ((4100, 4179), 'django_filters.rest_framework.ChoiceFilter', 'filters.ChoiceFilter', (['"""addr__gender"""'], {'choices': 'Character.CharacterGender.choices'}), "('addr__gender', choices=Character.CharacterGender.choices)\n", (4120, 4179), True, 'from django_filters import rest_framework as filters\n'), ((4219, 4298), 'django_filters.rest_framework.ChoiceFilter', 'filters.ChoiceFilter', (['"""addr__number"""'], {'choices': 'Character.CharacterNumber.choices'}), "('addr__number', choices=Character.CharacterNumber.choices)\n", (4239, 4298), True, 'from django_filters import rest_framework as filters\n'), ((4336, 4413), 'django_filters.rest_framework.ChoiceFilter', 'filters.ChoiceFilter', (['"""addr__being"""'], {'choices': 'Character.CharacterBeing.choices'}), "('addr__being', choices=Character.CharacterBeing.choices)\n", (4356, 4413), True, 'from django_filters import rest_framework as filters\n'), ((4450, 4485), 'django_filters.rest_framework.BooleanFilter', 'filters.BooleanFilter', (['"""addr__anon"""'], {}), "('addr__anon')\n", (4471, 4485), True, 'from django_filters import rest_framework as filters\n'), ((4507, 4539), 'django_filters.rest_framework.NumberFilter', 'filters.NumberFilter', (['"""spkr__id"""'], {}), "('spkr__id')\n", (4527, 4539), True, 'from django_filters import rest_framework as filters\n'), ((4556, 4588), 'django_filters.rest_framework.NumberFilter', 'filters.NumberFilter', (['"""addr__id"""'], {}), "('addr__id')\n", (4576, 4588), True, 'from django_filters import rest_framework as filters\n'), ((4605, 4668), 'django_filters.rest_framework.ChoiceFilter', 'filters.ChoiceFilter', (['"""type"""'], {'choices': 'Speech.SpeechType.choices'}), "('type', choices=Speech.SpeechType.choices)\n", (4625, 4668), True, 'from django_filters import rest_framework as filters\n'), ((4687, 4722), 'django_filters.rest_framework.NumberFilter', 'filters.NumberFilter', (['"""cluster__id"""'], {}), "('cluster__id')\n", (4707, 4722), True, 'from django_filters import rest_framework as filters\n'), ((4742, 4774), 'django_filters.rest_framework.NumberFilter', 'filters.NumberFilter', (['"""work__id"""'], {}), "('work__id')\n", (4762, 4774), True, 'from django_filters import rest_framework as filters\n'), ((4792, 4825), 'django_filters.rest_framework.CharFilter', 'filters.CharFilter', (['"""work__title"""'], {}), "('work__title')\n", (4810, 4825), True, 'from django_filters import rest_framework as filters\n'), ((4841, 4872), 'django_filters.rest_framework.CharFilter', 'filters.CharFilter', (['"""work__urn"""'], {}), "('work__urn')\n", (4859, 4872), True, 'from django_filters import rest_framework as filters\n'), ((4887, 4917), 'django_filters.rest_framework.CharFilter', 'filters.CharFilter', (['"""work__wd"""'], {}), "('work__wd')\n", (4905, 4917), True, 'from django_filters import rest_framework as filters\n'), ((4939, 4979), 'django_filters.rest_framework.NumberFilter', 'filters.NumberFilter', (['"""work__author__id"""'], {}), "('work__author__id')\n", (4959, 4979), True, 'from django_filters import rest_framework as filters\n'), ((4998, 5038), 'django_filters.rest_framework.CharFilter', 'filters.CharFilter', (['"""work__author__name"""'], {}), "('work__author__name')\n", (5016, 5038), True, 'from django_filters import rest_framework as filters\n'), ((5055, 5093), 'django_filters.rest_framework.CharFilter', 'filters.CharFilter', (['"""work__author__wd"""'], {}), "('work__author__wd')\n", (5073, 5093), True, 'from django_filters import rest_framework as filters\n'), ((5111, 5150), 'django_filters.rest_framework.CharFilter', 'filters.CharFilter', (['"""work__author__urn"""'], {}), "('work__author__urn')\n", (5129, 5150), True, 'from django_filters import rest_framework as filters\n'), ((8889, 8932), 'django.db.models.Count', 'Count', (['"""instances__speeches"""'], {'distinct': '(True)'}), "('instances__speeches', distinct=True)\n", (8894, 8932), False, 'from django.db.models import Q, Count, Max\n'), ((8946, 8990), 'django.db.models.Count', 'Count', (['"""instances__addresses"""'], {'distinct': '(True)'}), "('instances__addresses', distinct=True)\n", (8951, 8990), False, 'from django.db.models import Q, Count, Max\n'), ((10056, 10088), 'django.db.models.Count', 'Count', (['"""speeches"""'], {'distinct': '(True)'}), "('speeches', distinct=True)\n", (10061, 10088), False, 'from django.db.models import Q, Count, Max\n'), ((10102, 10135), 'django.db.models.Count', 'Count', (['"""addresses"""'], {'distinct': '(True)'}), "('addresses', distinct=True)\n", (10107, 10135), False, 'from django.db.models import Q, Count, Max\n'), ((12206, 12230), 'django.db.models.Count', 'Count', (['"""cluster__speech"""'], {}), "('cluster__speech')\n", (12211, 12230), False, 'from django.db.models import Q, Count, Max\n'), ((8702, 8729), 'django.db.models.Q', 'Q', ([], {'name': "self.params['name']"}), "(name=self.params['name'])\n", (8703, 8729), False, 'from django.db.models import Q, Count, Max\n'), ((9849, 9882), 'django.db.models.Q', 'Q', ([], {'char__name': "self.params['name']"}), "(char__name=self.params['name'])\n", (9850, 9882), False, 'from django.db.models import Q, Count, Max\n'), ((12894, 12926), 'django.db.models.Q', 'Q', ([], {'spkr': "self.params['spkr_inst']"}), "(spkr=self.params['spkr_inst'])\n", (12895, 12926), False, 'from django.db.models import Q, Count, Max\n'), ((13207, 13239), 'django.db.models.Q', 'Q', ([], {'addr': "self.params['addr_inst']"}), "(addr=self.params['addr_inst'])\n", (13208, 13239), False, 'from django.db.models import Q, Count, Max\n'), ((13315, 13355), 'django.db.models.Q', 'Q', ([], {'cluster__pk': "self.params['cluster_id']"}), "(cluster__pk=self.params['cluster_id'])\n", (13316, 13355), False, 'from django.db.models import Q, Count, Max\n'), ((13425, 13452), 'django.db.models.Q', 'Q', ([], {'type': "self.params['type']"}), "(type=self.params['type'])\n", (13426, 13452), False, 'from django.db.models import Q, Count, Max\n'), ((13522, 13549), 'django.db.models.Q', 'Q', ([], {'part': "self.params['part']"}), "(part=self.params['part'])\n", (13523, 13549), False, 'from django.db.models import Q, Count, Max\n'), ((13622, 13670), 'django.db.models.Q', 'Q', ([], {'cluster__speech__count': "self.params['n_parts']"}), "(cluster__speech__count=self.params['n_parts'])\n", (13623, 13670), False, 'from django.db.models import Q, Count, Max\n'), ((13743, 13786), 'django.db.models.Q', 'Q', ([], {'cluster__work__pk': "self.params['work_id']"}), "(cluster__work__pk=self.params['work_id'])\n", (13744, 13786), False, 'from django.db.models import Q, Count, Max\n'), ((15996, 16007), 'django.db.models.Max', 'Max', (['"""part"""'], {}), "('part')\n", (15999, 16007), False, 'from django.db.models import Q, Count, Max\n'), ((12568, 12604), 'django.db.models.Q', 'Q', ([], {'addr__disg': "self.params['char_id']"}), "(addr__disg=self.params['char_id'])\n", (12569, 12604), False, 'from django.db.models import Q, Count, Max\n'), ((12714, 12750), 'django.db.models.Q', 'Q', ([], {'spkr__char': "self.params['spkr_id']"}), "(spkr__char=self.params['spkr_id'])\n", (12715, 12750), False, 'from django.db.models import Q, Count, Max\n'), ((12753, 12789), 'django.db.models.Q', 'Q', ([], {'spkr__disg': "self.params['spkr_id']"}), "(spkr__disg=self.params['spkr_id'])\n", (12754, 12789), False, 'from django.db.models import Q, Count, Max\n'), ((13025, 13061), 'django.db.models.Q', 'Q', ([], {'addr__char': "self.params['addr_id']"}), "(addr__char=self.params['addr_id'])\n", (13026, 13061), False, 'from django.db.models import Q, Count, Max\n'), ((13064, 13100), 'django.db.models.Q', 'Q', ([], {'addr__disg': "self.params['addr_id']"}), "(addr__disg=self.params['addr_id'])\n", (13065, 13100), False, 'from django.db.models import Q, Count, Max\n'), ((12512, 12548), 'django.db.models.Q', 'Q', ([], {'addr__char': "self.params['char_id']"}), "(addr__char=self.params['char_id'])\n", (12513, 12548), False, 'from django.db.models import Q, Count, Max\n'), ((12400, 12436), 'django.db.models.Q', 'Q', ([], {'spkr__char': "self.params['char_id']"}), "(spkr__char=self.params['char_id'])\n", (12401, 12436), False, 'from django.db.models import Q, Count, Max\n'), ((12456, 12492), 'django.db.models.Q', 'Q', ([], {'spkr__disg': "self.params['char_id']"}), "(spkr__disg=self.params['char_id'])\n", (12457, 12492), False, 'from django.db.models import Q, Count, Max\n')]
|
import tensorflow as tf
from tensorflow.python.framework import ops
import os
dot_slash = os.path.dirname(__file__)
# Making roi_pooling_layer available for import as a library
roi_location = os.path.join(dot_slash, "rpl.so")
op_module = tf.load_op_library(roi_location)
roi_pooling_layer = op_module.roi_pooler
# Maknig nms available for import as a library
nms_location = os.path.join(dot_slash, "nms.so")
nms_module = tf.load_op_library(nms_location)
nms = nms_module.nms
# Making roi_pooling_layer's gradient available for import
roi_grad_location = os.path.join(dot_slash, "rpl_grad.so")
roi_grad_module = tf.load_op_library(roi_grad_location)
roi_pooling_layer_grad = roi_grad_module.roi_pooler_grad
@ops.RegisterGradient("RoiPooler")
def _roi_pool_grad_cc(op, grad):
return [roi_pooling_layer_grad(op.inputs[0], op.inputs[1], op.inputs[2], grad,
op.get_attr("pooled_height"), op.get_attr("pooled_width"),
op.get_attr("feature_stride")), None, None]
# Making iou_labeler available for import
iou_labeler_location = os.path.join(dot_slash, "iou_labeler.so")
iou_labeler_module = tf.load_op_library(iou_labeler_location)
iou_labeler = iou_labeler_module.iou_labeler
|
[
"tensorflow.load_op_library",
"os.path.dirname",
"os.path.join",
"tensorflow.python.framework.ops.RegisterGradient"
] |
[((91, 116), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (106, 116), False, 'import os\n'), ((194, 227), 'os.path.join', 'os.path.join', (['dot_slash', '"""rpl.so"""'], {}), "(dot_slash, 'rpl.so')\n", (206, 227), False, 'import os\n'), ((240, 272), 'tensorflow.load_op_library', 'tf.load_op_library', (['roi_location'], {}), '(roi_location)\n', (258, 272), True, 'import tensorflow as tf\n'), ((377, 410), 'os.path.join', 'os.path.join', (['dot_slash', '"""nms.so"""'], {}), "(dot_slash, 'nms.so')\n", (389, 410), False, 'import os\n'), ((424, 456), 'tensorflow.load_op_library', 'tf.load_op_library', (['nms_location'], {}), '(nms_location)\n', (442, 456), True, 'import tensorflow as tf\n'), ((558, 596), 'os.path.join', 'os.path.join', (['dot_slash', '"""rpl_grad.so"""'], {}), "(dot_slash, 'rpl_grad.so')\n", (570, 596), False, 'import os\n'), ((615, 652), 'tensorflow.load_op_library', 'tf.load_op_library', (['roi_grad_location'], {}), '(roi_grad_location)\n', (633, 652), True, 'import tensorflow as tf\n'), ((713, 746), 'tensorflow.python.framework.ops.RegisterGradient', 'ops.RegisterGradient', (['"""RoiPooler"""'], {}), "('RoiPooler')\n", (733, 746), False, 'from tensorflow.python.framework import ops\n'), ((1049, 1090), 'os.path.join', 'os.path.join', (['dot_slash', '"""iou_labeler.so"""'], {}), "(dot_slash, 'iou_labeler.so')\n", (1061, 1090), False, 'import os\n'), ((1112, 1152), 'tensorflow.load_op_library', 'tf.load_op_library', (['iou_labeler_location'], {}), '(iou_labeler_location)\n', (1130, 1152), True, 'import tensorflow as tf\n')]
|
from django.views.generic import ListView, DetailView
from eventex.core.models import Speaker, Talk
home = ListView.as_view(template_name='index.html', model=Speaker)
speaker_detail = DetailView.as_view(model=Speaker)
talk_list = ListView.as_view(model=Talk)
|
[
"django.views.generic.ListView.as_view",
"django.views.generic.DetailView.as_view"
] |
[((109, 168), 'django.views.generic.ListView.as_view', 'ListView.as_view', ([], {'template_name': '"""index.html"""', 'model': 'Speaker'}), "(template_name='index.html', model=Speaker)\n", (125, 168), False, 'from django.views.generic import ListView, DetailView\n'), ((187, 220), 'django.views.generic.DetailView.as_view', 'DetailView.as_view', ([], {'model': 'Speaker'}), '(model=Speaker)\n', (205, 220), False, 'from django.views.generic import ListView, DetailView\n'), ((234, 262), 'django.views.generic.ListView.as_view', 'ListView.as_view', ([], {'model': 'Talk'}), '(model=Talk)\n', (250, 262), False, 'from django.views.generic import ListView, DetailView\n')]
|
from skfda.representation.basis import (
FDataBasis, Monomial, BSpline, Fourier, Constant, VectorValued, Tensor)
import unittest
import numpy as np
class TestBasisEvaluationFourier(unittest.TestCase):
def test_evaluation_simple_fourier(self):
"""Test the evaluation of FDataBasis"""
fourier = Fourier(domain_range=(0, 2), n_basis=5)
coefficients = np.array([[1, 2, 3, 4, 5],
[6, 7, 8, 9, 10]])
f = FDataBasis(fourier, coefficients)
t = np.linspace(0, 2, 11)
# Results in R package fda
res = np.array([[8.71, 9.66, 1.84, -4.71, -2.80, 2.71,
2.45, -3.82, -6.66, -0.30, 8.71],
[22.24, 26.48, 10.57, -4.95, -3.58, 6.24,
5.31, -7.69, -13.32, 1.13, 22.24]])[..., np.newaxis]
np.testing.assert_array_almost_equal(f(t).round(2), res)
np.testing.assert_array_almost_equal(f.evaluate(t).round(2), res)
def test_evaluation_point_fourier(self):
"""Test the evaluation of a single point FDataBasis"""
fourier = Fourier(domain_range=(0, 1), n_basis=3)
coefficients = np.array([[0.00078238, 0.48857741, 0.63971985],
[0.01778079, 0.73440271, 0.20148638]])
f = FDataBasis(fourier, coefficients)
# Test different ways of call f with a point
res = np.array([-0.903918107989282, -0.267163981229459]
).reshape((2, 1, 1)).round(4)
np.testing.assert_array_almost_equal(f([0.5]).round(4), res)
np.testing.assert_array_almost_equal(f((0.5,)).round(4), res)
np.testing.assert_array_almost_equal(f(0.5).round(4), res)
np.testing.assert_array_almost_equal(f(np.array([0.5])).round(4), res)
# Problematic case, should be accepted or no?
#np.testing.assert_array_almost_equal(f(np.array(0.5)).round(4), res)
def test_evaluation_derivative_fourier(self):
"""Test the evaluation of the derivative of a FDataBasis"""
fourier = Fourier(domain_range=(0, 1), n_basis=3)
coefficients = np.array([[0.00078238, 0.48857741, 0.63971985],
[0.01778079, 0.73440271, 0.20148638]])
f = FDataBasis(fourier, coefficients)
t = np.linspace(0, 1, 4)
res = np.array([4.34138447771721, -7.09352774867064, 2.75214327095343,
4.34138447771721, 6.52573053999253,
-4.81336320468984, -1.7123673353027, 6.52573053999253]
).reshape((2, 4, 1)).round(3)
f_deriv = f.derivative()
np.testing.assert_array_almost_equal(
f_deriv(t).round(3), res
)
def test_evaluation_grid_fourier(self):
"""Test the evaluation of FDataBasis with the grid option set to
true. Nothing should be change due to the domain dimension is 1,
but can accept the """
fourier = Fourier(domain_range=(0, 1), n_basis=3)
coefficients = np.array([[0.00078238, 0.48857741, 0.63971985],
[0.01778079, 0.73440271, 0.20148638]])
f = FDataBasis(fourier, coefficients)
t = np.linspace(0, 1, 4)
res_test = f(t)
# Different ways to pass the axes
np.testing.assert_array_almost_equal(f(t, grid=True), res_test)
np.testing.assert_array_almost_equal(f((t,), grid=True), res_test)
np.testing.assert_array_almost_equal(f([t], grid=True), res_test)
np.testing.assert_array_almost_equal(f(np.atleast_2d(t), grid=True),
res_test)
# Number of axis different than the domain dimension (1)
with np.testing.assert_raises(ValueError):
f((t, t), grid=True)
def test_evaluation_composed_fourier(self):
"""Test the evaluation of FDataBasis the a matrix of times instead of
a list of times """
fourier = Fourier(domain_range=(0, 1), n_basis=3)
coefficients = np.array([[0.00078238, 0.48857741, 0.63971985],
[0.01778079, 0.73440271, 0.20148638]])
f = FDataBasis(fourier, coefficients)
t = np.linspace(0, 1, 4)
# Test same result than evaluation standart
np.testing.assert_array_almost_equal(f([1]),
f([[1], [1]],
aligned=False))
np.testing.assert_array_almost_equal(f(t), f(np.vstack((t, t)),
aligned=False))
# Different evaluation times
t_multiple = [[0, 0.5], [0.2, 0.7]]
np.testing.assert_array_almost_equal(f(t_multiple[0])[0],
f(t_multiple,
aligned=False)[0])
np.testing.assert_array_almost_equal(f(t_multiple[1])[1],
f(t_multiple,
aligned=False)[1])
def test_domain_in_list_fourier(self):
"""Test the evaluation of FDataBasis"""
for fourier in (Fourier(domain_range=[(0, 1)], n_basis=3),
Fourier(domain_range=((0, 1),), n_basis=3),
Fourier(domain_range=np.array((0, 1)), n_basis=3),
Fourier(domain_range=np.array([(0, 1)]), n_basis=3)):
coefficients = np.array([[0.00078238, 0.48857741, 0.63971985],
[0.01778079, 0.73440271, 0.20148638]])
f = FDataBasis(fourier, coefficients)
t = np.linspace(0, 1, 4)
res = np.array([0.905, 0.147, -1.05, 0.905, 0.303,
0.775, -1.024, 0.303]).reshape((2, 4, 1))
np.testing.assert_array_almost_equal(f(t).round(3), res)
np.testing.assert_array_almost_equal(f.evaluate(t).round(3), res)
class TestBasisEvaluationBSpline(unittest.TestCase):
def test_evaluation_simple_bspline(self):
"""Test the evaluation of FDataBasis"""
bspline = BSpline(domain_range=(0, 2), n_basis=5)
coefficients = np.array([[1, 2, 3, 4, 5],
[6, 7, 8, 9, 10]])
f = FDataBasis(bspline, coefficients)
t = np.linspace(0, 2, 11)
# Results in R package fda
res = np.array([[1, 1.54, 1.99, 2.37, 2.7, 3,
3.3, 3.63, 4.01, 4.46, 5],
[6, 6.54, 6.99, 7.37, 7.7, 8,
8.3, 8.63, 9.01, 9.46, 10]])[..., np.newaxis]
np.testing.assert_array_almost_equal(f(t).round(2), res)
np.testing.assert_array_almost_equal(f.evaluate(t).round(2), res)
def test_evaluation_point_bspline(self):
"""Test the evaluation of a single point FDataBasis"""
bspline = BSpline(domain_range=(0, 1), n_basis=5, order=3)
coefficients = [[0.00078238, 0.48857741, 0.63971985, 0.23, 0.33],
[0.01778079, 0.73440271, 0.20148638, 0.54, 0.12]]
f = FDataBasis(bspline, coefficients)
# Test different ways of call f with a point
res = np.array([[0.5696], [0.3104]])[..., np.newaxis]
np.testing.assert_array_almost_equal(f([0.5]).round(4), res)
np.testing.assert_array_almost_equal(f((0.5,)).round(4), res)
np.testing.assert_array_almost_equal(f(0.5).round(4), res)
np.testing.assert_array_almost_equal(f(np.array([0.5])).round(4), res)
# Problematic case, should be accepted or no?
#np.testing.assert_array_almost_equal(f(np.array(0.5)).round(4), res)
def test_evaluation_derivative_bspline(self):
"""Test the evaluation of the derivative of a FDataBasis"""
bspline = BSpline(domain_range=(0, 1), n_basis=5, order=3)
coefficients = [[0.00078238, 0.48857741, 0.63971985, 0.23, 0.33],
[0.01778079, 0.73440271, 0.20148638, 0.54, 0.12]]
f = FDataBasis(bspline, coefficients)
t = np.linspace(0, 1, 4)
f_deriv = f.derivative()
np.testing.assert_array_almost_equal(
f_deriv(t).round(3),
np.array([[2.927, 0.453, -1.229, 0.6],
[4.3, -1.599, 1.016, -2.52]])[..., np.newaxis]
)
def test_evaluation_grid_bspline(self):
"""Test the evaluation of FDataBasis with the grid option set to
true. Nothing should be change due to the domain dimension is 1,
but can accept the """
bspline = BSpline(domain_range=(0, 1), n_basis=5, order=3)
coefficients = [[0.00078238, 0.48857741, 0.63971985, 0.23, 0.33],
[0.01778079, 0.73440271, 0.20148638, 0.54, 0.12]]
f = FDataBasis(bspline, coefficients)
t = np.linspace(0, 1, 4)
res_test = f(t)
# Different ways to pass the axes
np.testing.assert_array_almost_equal(f(t, grid=True), res_test)
np.testing.assert_array_almost_equal(f((t,), grid=True), res_test)
np.testing.assert_array_almost_equal(f([t], grid=True), res_test)
np.testing.assert_array_almost_equal(
f(np.atleast_2d(t), grid=True), res_test)
# Number of axis different than the domain dimension (1)
with np.testing.assert_raises(ValueError):
f((t, t), grid=True)
def test_evaluation_composed_bspline(self):
"""Test the evaluation of FDataBasis the a matrix of times instead of
a list of times """
bspline = BSpline(domain_range=(0, 1), n_basis=5, order=3)
coefficients = [[0.00078238, 0.48857741, 0.63971985, 0.23, 0.33],
[0.01778079, 0.73440271, 0.20148638, 0.54, 0.12]]
f = FDataBasis(bspline, coefficients)
t = np.linspace(0, 1, 4)
# Test same result than evaluation standart
np.testing.assert_array_almost_equal(f([1]),
f([[1], [1]],
aligned=False))
np.testing.assert_array_almost_equal(f(t), f(np.vstack((t, t)),
aligned=False))
# Different evaluation times
t_multiple = [[0, 0.5], [0.2, 0.7]]
np.testing.assert_array_almost_equal(f(t_multiple[0])[0],
f(t_multiple,
aligned=False)[0])
np.testing.assert_array_almost_equal(f(t_multiple[1])[1],
f(t_multiple,
aligned=False)[1])
def test_domain_in_list_bspline(self):
"""Test the evaluation of FDataBasis"""
for bspline in (BSpline(domain_range=[(0, 1)], n_basis=5, order=3),
BSpline(domain_range=((0, 1),), n_basis=5, order=3),
BSpline(domain_range=np.array((0, 1)), n_basis=5,
order=3),
BSpline(domain_range=np.array([(0, 1)]), n_basis=5,
order=3)
):
coefficients = [[0.00078238, 0.48857741, 0.63971985, 0.23, 0.33],
[0.01778079, 0.73440271, 0.20148638, 0.54, 0.12]]
f = FDataBasis(bspline, coefficients)
t = np.linspace(0, 1, 4)
res = np.array([[0.001, 0.564, 0.435, 0.33],
[0.018, 0.468, 0.371, 0.12]])[..., np.newaxis]
np.testing.assert_array_almost_equal(f(t).round(3), res)
np.testing.assert_array_almost_equal(f.evaluate(t).round(3), res)
# Check error
with np.testing.assert_raises(ValueError):
BSpline(domain_range=[(0, 1), (0, 1)])
class TestBasisEvaluationMonomial(unittest.TestCase):
def test_evaluation_simple_monomial(self):
"""Test the evaluation of FDataBasis"""
monomial = Monomial(domain_range=(0, 2), n_basis=5)
coefficients = np.array([[1, 2, 3, 4, 5],
[6, 7, 8, 9, 10]])
f = FDataBasis(monomial, coefficients)
t = np.linspace(0, 2, 11)
# Results in R package fda
res = np.array(
[[1.00, 1.56, 2.66, 4.79, 8.62, 15.00,
25.00, 39.86, 61.03, 90.14, 129.00],
[6.00, 7.81, 10.91, 16.32, 25.42, 40.00,
62.21, 94.59, 140.08, 201.98, 284.00]])[..., np.newaxis]
np.testing.assert_array_almost_equal(f(t).round(2), res)
np.testing.assert_array_almost_equal(f.evaluate(t).round(2), res)
def test_evaluation_point_monomial(self):
"""Test the evaluation of a single point FDataBasis"""
monomial = Monomial(domain_range=(0, 1), n_basis=3)
coefficients = [[1, 2, 3], [0.5, 1.4, 1.3]]
f = FDataBasis(monomial, coefficients)
# Test different ways of call f with a point
res = np.array([[2.75], [1.525]])[..., np.newaxis]
np.testing.assert_array_almost_equal(f([0.5]).round(4), res)
np.testing.assert_array_almost_equal(f((0.5,)).round(4), res)
np.testing.assert_array_almost_equal(f(0.5).round(4), res)
np.testing.assert_array_almost_equal(f(np.array([0.5])).round(4), res)
# Problematic case, should be accepted or no?
#np.testing.assert_array_almost_equal(f(np.array(0.5)).round(4), res)
def test_evaluation_derivative_monomial(self):
"""Test the evaluation of the derivative of a FDataBasis"""
monomial = Monomial(domain_range=(0, 1), n_basis=3)
coefficients = [[1, 2, 3], [0.5, 1.4, 1.3]]
f = FDataBasis(monomial, coefficients)
t = np.linspace(0, 1, 4)
f_deriv = f.derivative()
np.testing.assert_array_almost_equal(
f_deriv(t).round(3),
np.array([[2., 4., 6., 8.],
[1.4, 2.267, 3.133, 4.]])[..., np.newaxis]
)
def test_evaluation_grid_monomial(self):
"""Test the evaluation of FDataBasis with the grid option set to
true. Nothing should be change due to the domain dimension is 1,
but can accept the """
monomial = Monomial(domain_range=(0, 1), n_basis=3)
coefficients = [[1, 2, 3], [0.5, 1.4, 1.3]]
f = FDataBasis(monomial, coefficients)
t = np.linspace(0, 1, 4)
res_test = f(t)
# Different ways to pass the axes
np.testing.assert_array_almost_equal(f(t, grid=True), res_test)
np.testing.assert_array_almost_equal(f((t,), grid=True), res_test)
np.testing.assert_array_almost_equal(f([t], grid=True), res_test)
np.testing.assert_array_almost_equal(
f(np.atleast_2d(t), grid=True), res_test)
# Number of axis different than the domain dimension (1)
with np.testing.assert_raises(ValueError):
f((t, t), grid=True)
def test_evaluation_composed_monomial(self):
"""Test the evaluation of FDataBasis the a matrix of times instead of
a list of times """
monomial = Monomial(domain_range=(0, 1), n_basis=3)
coefficients = [[1, 2, 3], [0.5, 1.4, 1.3]]
f = FDataBasis(monomial, coefficients)
t = np.linspace(0, 1, 4)
# Test same result than evaluation standart
np.testing.assert_array_almost_equal(f([1]),
f([[1], [1]],
aligned=False))
np.testing.assert_array_almost_equal(f(t), f(np.vstack((t, t)),
aligned=False))
# Different evaluation times
t_multiple = [[0, 0.5], [0.2, 0.7]]
np.testing.assert_array_almost_equal(f(t_multiple[0])[0],
f(t_multiple,
aligned=False)[0])
np.testing.assert_array_almost_equal(f(t_multiple[1])[1],
f(t_multiple,
aligned=False)[1])
def test_domain_in_list_monomial(self):
"""Test the evaluation of FDataBasis"""
for monomial in (Monomial(domain_range=[(0, 1)], n_basis=3),
Monomial(domain_range=((0, 1),), n_basis=3),
Monomial(domain_range=np.array((0, 1)), n_basis=3),
Monomial(domain_range=np.array([(0, 1)]), n_basis=3)):
coefficients = [[1, 2, 3], [0.5, 1.4, 1.3]]
f = FDataBasis(monomial, coefficients)
t = np.linspace(0, 1, 4)
res = np.array([[1., 2., 3.667, 6.],
[0.5, 1.111, 2.011, 3.2]])[..., np.newaxis]
np.testing.assert_array_almost_equal(f(t).round(3), res)
np.testing.assert_array_almost_equal(f.evaluate(t).round(3), res)
class TestBasisEvaluationVectorValued(unittest.TestCase):
def test_vector_valued_constant(self):
basis_first = Constant()
basis_second = Constant()
basis = VectorValued([basis_first, basis_second])
fd = FDataBasis(basis=basis, coefficients=[[1, 2], [3, 4]])
self.assertEqual(fd.dim_codomain, 2)
res = np.array([[[1, 2]], [[3, 4]]])
np.testing.assert_allclose(fd(0), res)
def test_vector_valued_constant_monomial(self):
basis_first = Constant(domain_range=(0, 5))
basis_second = Monomial(n_basis=3, domain_range=(0, 5))
basis = VectorValued([basis_first, basis_second])
fd = FDataBasis(basis=basis, coefficients=[
[1, 2, 3, 4], [3, 4, 5, 6]])
self.assertEqual(fd.dim_codomain, 2)
np.testing.assert_allclose(fd.domain_range[0], (0, 5))
res = np.array([[[1, 2], [1, 9], [1, 24]],
[[3, 4], [3, 15], [3, 38]]])
np.testing.assert_allclose(fd([0, 1, 2]), res)
class TestBasisEvaluationTensor(unittest.TestCase):
def test_tensor_monomial_constant(self):
basis = Tensor([Monomial(n_basis=2), Constant()])
fd = FDataBasis(basis=basis, coefficients=[1, 1])
self.assertEqual(fd.dim_domain, 2)
self.assertEqual(fd.dim_codomain, 1)
np.testing.assert_allclose(fd([0., 0.]), [[[1.]]])
np.testing.assert_allclose(fd([0.5, 0.5]), [[[1.5]]])
np.testing.assert_allclose(
fd([(0., 0.), (0.5, 0.5)]), [[[1.0], [1.5]]])
fd_grid = fd.to_grid()
fd2 = fd_grid.to_basis(basis)
np.testing.assert_allclose(fd.coefficients, fd2.coefficients)
if __name__ == '__main__':
print()
unittest.main()
|
[
"unittest.main",
"skfda.representation.basis.BSpline",
"numpy.testing.assert_raises",
"skfda.representation.basis.Fourier",
"skfda.representation.basis.Constant",
"skfda.representation.basis.VectorValued",
"numpy.array",
"skfda.representation.basis.Monomial",
"numpy.linspace",
"numpy.testing.assert_allclose",
"skfda.representation.basis.FDataBasis",
"numpy.vstack",
"numpy.atleast_2d"
] |
[((18780, 18795), 'unittest.main', 'unittest.main', ([], {}), '()\n', (18793, 18795), False, 'import unittest\n'), ((322, 361), 'skfda.representation.basis.Fourier', 'Fourier', ([], {'domain_range': '(0, 2)', 'n_basis': '(5)'}), '(domain_range=(0, 2), n_basis=5)\n', (329, 361), False, 'from skfda.representation.basis import FDataBasis, Monomial, BSpline, Fourier, Constant, VectorValued, Tensor\n'), ((386, 431), 'numpy.array', 'np.array', (['[[1, 2, 3, 4, 5], [6, 7, 8, 9, 10]]'], {}), '([[1, 2, 3, 4, 5], [6, 7, 8, 9, 10]])\n', (394, 431), True, 'import numpy as np\n'), ((485, 518), 'skfda.representation.basis.FDataBasis', 'FDataBasis', (['fourier', 'coefficients'], {}), '(fourier, coefficients)\n', (495, 518), False, 'from skfda.representation.basis import FDataBasis, Monomial, BSpline, Fourier, Constant, VectorValued, Tensor\n'), ((532, 553), 'numpy.linspace', 'np.linspace', (['(0)', '(2)', '(11)'], {}), '(0, 2, 11)\n', (543, 553), True, 'import numpy as np\n'), ((1128, 1167), 'skfda.representation.basis.Fourier', 'Fourier', ([], {'domain_range': '(0, 1)', 'n_basis': '(3)'}), '(domain_range=(0, 1), n_basis=3)\n', (1135, 1167), False, 'from skfda.representation.basis import FDataBasis, Monomial, BSpline, Fourier, Constant, VectorValued, Tensor\n'), ((1192, 1283), 'numpy.array', 'np.array', (['[[0.00078238, 0.48857741, 0.63971985], [0.01778079, 0.73440271, 0.20148638]]'], {}), '([[0.00078238, 0.48857741, 0.63971985], [0.01778079, 0.73440271, \n 0.20148638]])\n', (1200, 1283), True, 'import numpy as np\n'), ((1325, 1358), 'skfda.representation.basis.FDataBasis', 'FDataBasis', (['fourier', 'coefficients'], {}), '(fourier, coefficients)\n', (1335, 1358), False, 'from skfda.representation.basis import FDataBasis, Monomial, BSpline, Fourier, Constant, VectorValued, Tensor\n'), ((2086, 2125), 'skfda.representation.basis.Fourier', 'Fourier', ([], {'domain_range': '(0, 1)', 'n_basis': '(3)'}), '(domain_range=(0, 1), n_basis=3)\n', (2093, 2125), False, 'from skfda.representation.basis import FDataBasis, Monomial, BSpline, Fourier, Constant, VectorValued, Tensor\n'), ((2150, 2241), 'numpy.array', 'np.array', (['[[0.00078238, 0.48857741, 0.63971985], [0.01778079, 0.73440271, 0.20148638]]'], {}), '([[0.00078238, 0.48857741, 0.63971985], [0.01778079, 0.73440271, \n 0.20148638]])\n', (2158, 2241), True, 'import numpy as np\n'), ((2283, 2316), 'skfda.representation.basis.FDataBasis', 'FDataBasis', (['fourier', 'coefficients'], {}), '(fourier, coefficients)\n', (2293, 2316), False, 'from skfda.representation.basis import FDataBasis, Monomial, BSpline, Fourier, Constant, VectorValued, Tensor\n'), ((2330, 2350), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(4)'], {}), '(0, 1, 4)\n', (2341, 2350), True, 'import numpy as np\n'), ((2998, 3037), 'skfda.representation.basis.Fourier', 'Fourier', ([], {'domain_range': '(0, 1)', 'n_basis': '(3)'}), '(domain_range=(0, 1), n_basis=3)\n', (3005, 3037), False, 'from skfda.representation.basis import FDataBasis, Monomial, BSpline, Fourier, Constant, VectorValued, Tensor\n'), ((3062, 3153), 'numpy.array', 'np.array', (['[[0.00078238, 0.48857741, 0.63971985], [0.01778079, 0.73440271, 0.20148638]]'], {}), '([[0.00078238, 0.48857741, 0.63971985], [0.01778079, 0.73440271, \n 0.20148638]])\n', (3070, 3153), True, 'import numpy as np\n'), ((3195, 3228), 'skfda.representation.basis.FDataBasis', 'FDataBasis', (['fourier', 'coefficients'], {}), '(fourier, coefficients)\n', (3205, 3228), False, 'from skfda.representation.basis import FDataBasis, Monomial, BSpline, Fourier, Constant, VectorValued, Tensor\n'), ((3241, 3261), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(4)'], {}), '(0, 1, 4)\n', (3252, 3261), True, 'import numpy as np\n'), ((4006, 4045), 'skfda.representation.basis.Fourier', 'Fourier', ([], {'domain_range': '(0, 1)', 'n_basis': '(3)'}), '(domain_range=(0, 1), n_basis=3)\n', (4013, 4045), False, 'from skfda.representation.basis import FDataBasis, Monomial, BSpline, Fourier, Constant, VectorValued, Tensor\n'), ((4070, 4161), 'numpy.array', 'np.array', (['[[0.00078238, 0.48857741, 0.63971985], [0.01778079, 0.73440271, 0.20148638]]'], {}), '([[0.00078238, 0.48857741, 0.63971985], [0.01778079, 0.73440271, \n 0.20148638]])\n', (4078, 4161), True, 'import numpy as np\n'), ((4203, 4236), 'skfda.representation.basis.FDataBasis', 'FDataBasis', (['fourier', 'coefficients'], {}), '(fourier, coefficients)\n', (4213, 4236), False, 'from skfda.representation.basis import FDataBasis, Monomial, BSpline, Fourier, Constant, VectorValued, Tensor\n'), ((4249, 4269), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(4)'], {}), '(0, 1, 4)\n', (4260, 4269), True, 'import numpy as np\n'), ((6174, 6213), 'skfda.representation.basis.BSpline', 'BSpline', ([], {'domain_range': '(0, 2)', 'n_basis': '(5)'}), '(domain_range=(0, 2), n_basis=5)\n', (6181, 6213), False, 'from skfda.representation.basis import FDataBasis, Monomial, BSpline, Fourier, Constant, VectorValued, Tensor\n'), ((6238, 6283), 'numpy.array', 'np.array', (['[[1, 2, 3, 4, 5], [6, 7, 8, 9, 10]]'], {}), '([[1, 2, 3, 4, 5], [6, 7, 8, 9, 10]])\n', (6246, 6283), True, 'import numpy as np\n'), ((6337, 6370), 'skfda.representation.basis.FDataBasis', 'FDataBasis', (['bspline', 'coefficients'], {}), '(bspline, coefficients)\n', (6347, 6370), False, 'from skfda.representation.basis import FDataBasis, Monomial, BSpline, Fourier, Constant, VectorValued, Tensor\n'), ((6384, 6405), 'numpy.linspace', 'np.linspace', (['(0)', '(2)', '(11)'], {}), '(0, 2, 11)\n', (6395, 6405), True, 'import numpy as np\n'), ((6940, 6988), 'skfda.representation.basis.BSpline', 'BSpline', ([], {'domain_range': '(0, 1)', 'n_basis': '(5)', 'order': '(3)'}), '(domain_range=(0, 1), n_basis=5, order=3)\n', (6947, 6988), False, 'from skfda.representation.basis import FDataBasis, Monomial, BSpline, Fourier, Constant, VectorValued, Tensor\n'), ((7151, 7184), 'skfda.representation.basis.FDataBasis', 'FDataBasis', (['bspline', 'coefficients'], {}), '(bspline, coefficients)\n', (7161, 7184), False, 'from skfda.representation.basis import FDataBasis, Monomial, BSpline, Fourier, Constant, VectorValued, Tensor\n'), ((7857, 7905), 'skfda.representation.basis.BSpline', 'BSpline', ([], {'domain_range': '(0, 1)', 'n_basis': '(5)', 'order': '(3)'}), '(domain_range=(0, 1), n_basis=5, order=3)\n', (7864, 7905), False, 'from skfda.representation.basis import FDataBasis, Monomial, BSpline, Fourier, Constant, VectorValued, Tensor\n'), ((8068, 8101), 'skfda.representation.basis.FDataBasis', 'FDataBasis', (['bspline', 'coefficients'], {}), '(bspline, coefficients)\n', (8078, 8101), False, 'from skfda.representation.basis import FDataBasis, Monomial, BSpline, Fourier, Constant, VectorValued, Tensor\n'), ((8115, 8135), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(4)'], {}), '(0, 1, 4)\n', (8126, 8135), True, 'import numpy as np\n'), ((8630, 8678), 'skfda.representation.basis.BSpline', 'BSpline', ([], {'domain_range': '(0, 1)', 'n_basis': '(5)', 'order': '(3)'}), '(domain_range=(0, 1), n_basis=5, order=3)\n', (8637, 8678), False, 'from skfda.representation.basis import FDataBasis, Monomial, BSpline, Fourier, Constant, VectorValued, Tensor\n'), ((8841, 8874), 'skfda.representation.basis.FDataBasis', 'FDataBasis', (['bspline', 'coefficients'], {}), '(bspline, coefficients)\n', (8851, 8874), False, 'from skfda.representation.basis import FDataBasis, Monomial, BSpline, Fourier, Constant, VectorValued, Tensor\n'), ((8887, 8907), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(4)'], {}), '(0, 1, 4)\n', (8898, 8907), True, 'import numpy as np\n'), ((9620, 9668), 'skfda.representation.basis.BSpline', 'BSpline', ([], {'domain_range': '(0, 1)', 'n_basis': '(5)', 'order': '(3)'}), '(domain_range=(0, 1), n_basis=5, order=3)\n', (9627, 9668), False, 'from skfda.representation.basis import FDataBasis, Monomial, BSpline, Fourier, Constant, VectorValued, Tensor\n'), ((9831, 9864), 'skfda.representation.basis.FDataBasis', 'FDataBasis', (['bspline', 'coefficients'], {}), '(bspline, coefficients)\n', (9841, 9864), False, 'from skfda.representation.basis import FDataBasis, Monomial, BSpline, Fourier, Constant, VectorValued, Tensor\n'), ((9877, 9897), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(4)'], {}), '(0, 1, 4)\n', (9888, 9897), True, 'import numpy as np\n'), ((12061, 12101), 'skfda.representation.basis.Monomial', 'Monomial', ([], {'domain_range': '(0, 2)', 'n_basis': '(5)'}), '(domain_range=(0, 2), n_basis=5)\n', (12069, 12101), False, 'from skfda.representation.basis import FDataBasis, Monomial, BSpline, Fourier, Constant, VectorValued, Tensor\n'), ((12126, 12171), 'numpy.array', 'np.array', (['[[1, 2, 3, 4, 5], [6, 7, 8, 9, 10]]'], {}), '([[1, 2, 3, 4, 5], [6, 7, 8, 9, 10]])\n', (12134, 12171), True, 'import numpy as np\n'), ((12225, 12259), 'skfda.representation.basis.FDataBasis', 'FDataBasis', (['monomial', 'coefficients'], {}), '(monomial, coefficients)\n', (12235, 12259), False, 'from skfda.representation.basis import FDataBasis, Monomial, BSpline, Fourier, Constant, VectorValued, Tensor\n'), ((12273, 12294), 'numpy.linspace', 'np.linspace', (['(0)', '(2)', '(11)'], {}), '(0, 2, 11)\n', (12284, 12294), True, 'import numpy as np\n'), ((12851, 12891), 'skfda.representation.basis.Monomial', 'Monomial', ([], {'domain_range': '(0, 1)', 'n_basis': '(3)'}), '(domain_range=(0, 1), n_basis=3)\n', (12859, 12891), False, 'from skfda.representation.basis import FDataBasis, Monomial, BSpline, Fourier, Constant, VectorValued, Tensor\n'), ((12958, 12992), 'skfda.representation.basis.FDataBasis', 'FDataBasis', (['monomial', 'coefficients'], {}), '(monomial, coefficients)\n', (12968, 12992), False, 'from skfda.representation.basis import FDataBasis, Monomial, BSpline, Fourier, Constant, VectorValued, Tensor\n'), ((13664, 13704), 'skfda.representation.basis.Monomial', 'Monomial', ([], {'domain_range': '(0, 1)', 'n_basis': '(3)'}), '(domain_range=(0, 1), n_basis=3)\n', (13672, 13704), False, 'from skfda.representation.basis import FDataBasis, Monomial, BSpline, Fourier, Constant, VectorValued, Tensor\n'), ((13771, 13805), 'skfda.representation.basis.FDataBasis', 'FDataBasis', (['monomial', 'coefficients'], {}), '(monomial, coefficients)\n', (13781, 13805), False, 'from skfda.representation.basis import FDataBasis, Monomial, BSpline, Fourier, Constant, VectorValued, Tensor\n'), ((13819, 13839), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(4)'], {}), '(0, 1, 4)\n', (13830, 13839), True, 'import numpy as np\n'), ((14318, 14358), 'skfda.representation.basis.Monomial', 'Monomial', ([], {'domain_range': '(0, 1)', 'n_basis': '(3)'}), '(domain_range=(0, 1), n_basis=3)\n', (14326, 14358), False, 'from skfda.representation.basis import FDataBasis, Monomial, BSpline, Fourier, Constant, VectorValued, Tensor\n'), ((14425, 14459), 'skfda.representation.basis.FDataBasis', 'FDataBasis', (['monomial', 'coefficients'], {}), '(monomial, coefficients)\n', (14435, 14459), False, 'from skfda.representation.basis import FDataBasis, Monomial, BSpline, Fourier, Constant, VectorValued, Tensor\n'), ((14472, 14492), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(4)'], {}), '(0, 1, 4)\n', (14483, 14492), True, 'import numpy as np\n'), ((15207, 15247), 'skfda.representation.basis.Monomial', 'Monomial', ([], {'domain_range': '(0, 1)', 'n_basis': '(3)'}), '(domain_range=(0, 1), n_basis=3)\n', (15215, 15247), False, 'from skfda.representation.basis import FDataBasis, Monomial, BSpline, Fourier, Constant, VectorValued, Tensor\n'), ((15314, 15348), 'skfda.representation.basis.FDataBasis', 'FDataBasis', (['monomial', 'coefficients'], {}), '(monomial, coefficients)\n', (15324, 15348), False, 'from skfda.representation.basis import FDataBasis, Monomial, BSpline, Fourier, Constant, VectorValued, Tensor\n'), ((15361, 15381), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(4)'], {}), '(0, 1, 4)\n', (15372, 15381), True, 'import numpy as np\n'), ((17149, 17159), 'skfda.representation.basis.Constant', 'Constant', ([], {}), '()\n', (17157, 17159), False, 'from skfda.representation.basis import FDataBasis, Monomial, BSpline, Fourier, Constant, VectorValued, Tensor\n'), ((17183, 17193), 'skfda.representation.basis.Constant', 'Constant', ([], {}), '()\n', (17191, 17193), False, 'from skfda.representation.basis import FDataBasis, Monomial, BSpline, Fourier, Constant, VectorValued, Tensor\n'), ((17211, 17252), 'skfda.representation.basis.VectorValued', 'VectorValued', (['[basis_first, basis_second]'], {}), '([basis_first, basis_second])\n', (17223, 17252), False, 'from skfda.representation.basis import FDataBasis, Monomial, BSpline, Fourier, Constant, VectorValued, Tensor\n'), ((17267, 17321), 'skfda.representation.basis.FDataBasis', 'FDataBasis', ([], {'basis': 'basis', 'coefficients': '[[1, 2], [3, 4]]'}), '(basis=basis, coefficients=[[1, 2], [3, 4]])\n', (17277, 17321), False, 'from skfda.representation.basis import FDataBasis, Monomial, BSpline, Fourier, Constant, VectorValued, Tensor\n'), ((17383, 17413), 'numpy.array', 'np.array', (['[[[1, 2]], [[3, 4]]]'], {}), '([[[1, 2]], [[3, 4]]])\n', (17391, 17413), True, 'import numpy as np\n'), ((17538, 17567), 'skfda.representation.basis.Constant', 'Constant', ([], {'domain_range': '(0, 5)'}), '(domain_range=(0, 5))\n', (17546, 17567), False, 'from skfda.representation.basis import FDataBasis, Monomial, BSpline, Fourier, Constant, VectorValued, Tensor\n'), ((17591, 17631), 'skfda.representation.basis.Monomial', 'Monomial', ([], {'n_basis': '(3)', 'domain_range': '(0, 5)'}), '(n_basis=3, domain_range=(0, 5))\n', (17599, 17631), False, 'from skfda.representation.basis import FDataBasis, Monomial, BSpline, Fourier, Constant, VectorValued, Tensor\n'), ((17649, 17690), 'skfda.representation.basis.VectorValued', 'VectorValued', (['[basis_first, basis_second]'], {}), '([basis_first, basis_second])\n', (17661, 17690), False, 'from skfda.representation.basis import FDataBasis, Monomial, BSpline, Fourier, Constant, VectorValued, Tensor\n'), ((17705, 17771), 'skfda.representation.basis.FDataBasis', 'FDataBasis', ([], {'basis': 'basis', 'coefficients': '[[1, 2, 3, 4], [3, 4, 5, 6]]'}), '(basis=basis, coefficients=[[1, 2, 3, 4], [3, 4, 5, 6]])\n', (17715, 17771), False, 'from skfda.representation.basis import FDataBasis, Monomial, BSpline, Fourier, Constant, VectorValued, Tensor\n'), ((17852, 17906), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['fd.domain_range[0]', '(0, 5)'], {}), '(fd.domain_range[0], (0, 5))\n', (17878, 17906), True, 'import numpy as np\n'), ((17922, 17987), 'numpy.array', 'np.array', (['[[[1, 2], [1, 9], [1, 24]], [[3, 4], [3, 15], [3, 38]]]'], {}), '([[[1, 2], [1, 9], [1, 24]], [[3, 4], [3, 15], [3, 38]]])\n', (17930, 17987), True, 'import numpy as np\n'), ((18241, 18285), 'skfda.representation.basis.FDataBasis', 'FDataBasis', ([], {'basis': 'basis', 'coefficients': '[1, 1]'}), '(basis=basis, coefficients=[1, 1])\n', (18251, 18285), False, 'from skfda.representation.basis import FDataBasis, Monomial, BSpline, Fourier, Constant, VectorValued, Tensor\n'), ((18673, 18734), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['fd.coefficients', 'fd2.coefficients'], {}), '(fd.coefficients, fd2.coefficients)\n', (18699, 18734), True, 'import numpy as np\n'), ((604, 772), 'numpy.array', 'np.array', (['[[8.71, 9.66, 1.84, -4.71, -2.8, 2.71, 2.45, -3.82, -6.66, -0.3, 8.71], [\n 22.24, 26.48, 10.57, -4.95, -3.58, 6.24, 5.31, -7.69, -13.32, 1.13, 22.24]]'], {}), '([[8.71, 9.66, 1.84, -4.71, -2.8, 2.71, 2.45, -3.82, -6.66, -0.3, \n 8.71], [22.24, 26.48, 10.57, -4.95, -3.58, 6.24, 5.31, -7.69, -13.32, \n 1.13, 22.24]])\n', (612, 772), True, 'import numpy as np\n'), ((3762, 3798), 'numpy.testing.assert_raises', 'np.testing.assert_raises', (['ValueError'], {}), '(ValueError)\n', (3786, 3798), True, 'import numpy as np\n'), ((5219, 5260), 'skfda.representation.basis.Fourier', 'Fourier', ([], {'domain_range': '[(0, 1)]', 'n_basis': '(3)'}), '(domain_range=[(0, 1)], n_basis=3)\n', (5226, 5260), False, 'from skfda.representation.basis import FDataBasis, Monomial, BSpline, Fourier, Constant, VectorValued, Tensor\n'), ((5286, 5328), 'skfda.representation.basis.Fourier', 'Fourier', ([], {'domain_range': '((0, 1),)', 'n_basis': '(3)'}), '(domain_range=((0, 1),), n_basis=3)\n', (5293, 5328), False, 'from skfda.representation.basis import FDataBasis, Monomial, BSpline, Fourier, Constant, VectorValued, Tensor\n'), ((5511, 5602), 'numpy.array', 'np.array', (['[[0.00078238, 0.48857741, 0.63971985], [0.01778079, 0.73440271, 0.20148638]]'], {}), '([[0.00078238, 0.48857741, 0.63971985], [0.01778079, 0.73440271, \n 0.20148638]])\n', (5519, 5602), True, 'import numpy as np\n'), ((5652, 5685), 'skfda.representation.basis.FDataBasis', 'FDataBasis', (['fourier', 'coefficients'], {}), '(fourier, coefficients)\n', (5662, 5685), False, 'from skfda.representation.basis import FDataBasis, Monomial, BSpline, Fourier, Constant, VectorValued, Tensor\n'), ((5703, 5723), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(4)'], {}), '(0, 1, 4)\n', (5714, 5723), True, 'import numpy as np\n'), ((6456, 6585), 'numpy.array', 'np.array', (['[[1, 1.54, 1.99, 2.37, 2.7, 3, 3.3, 3.63, 4.01, 4.46, 5], [6, 6.54, 6.99, \n 7.37, 7.7, 8, 8.3, 8.63, 9.01, 9.46, 10]]'], {}), '([[1, 1.54, 1.99, 2.37, 2.7, 3, 3.3, 3.63, 4.01, 4.46, 5], [6, 6.54,\n 6.99, 7.37, 7.7, 8, 8.3, 8.63, 9.01, 9.46, 10]])\n', (6464, 6585), True, 'import numpy as np\n'), ((7253, 7283), 'numpy.array', 'np.array', (['[[0.5696], [0.3104]]'], {}), '([[0.5696], [0.3104]])\n', (7261, 7283), True, 'import numpy as np\n'), ((9376, 9412), 'numpy.testing.assert_raises', 'np.testing.assert_raises', (['ValueError'], {}), '(ValueError)\n', (9400, 9412), True, 'import numpy as np\n'), ((10848, 10898), 'skfda.representation.basis.BSpline', 'BSpline', ([], {'domain_range': '[(0, 1)]', 'n_basis': '(5)', 'order': '(3)'}), '(domain_range=[(0, 1)], n_basis=5, order=3)\n', (10855, 10898), False, 'from skfda.representation.basis import FDataBasis, Monomial, BSpline, Fourier, Constant, VectorValued, Tensor\n'), ((10924, 10975), 'skfda.representation.basis.BSpline', 'BSpline', ([], {'domain_range': '((0, 1),)', 'n_basis': '(5)', 'order': '(3)'}), '(domain_range=((0, 1),), n_basis=5, order=3)\n', (10931, 10975), False, 'from skfda.representation.basis import FDataBasis, Monomial, BSpline, Fourier, Constant, VectorValued, Tensor\n'), ((11411, 11444), 'skfda.representation.basis.FDataBasis', 'FDataBasis', (['bspline', 'coefficients'], {}), '(bspline, coefficients)\n', (11421, 11444), False, 'from skfda.representation.basis import FDataBasis, Monomial, BSpline, Fourier, Constant, VectorValued, Tensor\n'), ((11462, 11482), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(4)'], {}), '(0, 1, 4)\n', (11473, 11482), True, 'import numpy as np\n'), ((11800, 11836), 'numpy.testing.assert_raises', 'np.testing.assert_raises', (['ValueError'], {}), '(ValueError)\n', (11824, 11836), True, 'import numpy as np\n'), ((11850, 11888), 'skfda.representation.basis.BSpline', 'BSpline', ([], {'domain_range': '[(0, 1), (0, 1)]'}), '(domain_range=[(0, 1), (0, 1)])\n', (11857, 11888), False, 'from skfda.representation.basis import FDataBasis, Monomial, BSpline, Fourier, Constant, VectorValued, Tensor\n'), ((12345, 12513), 'numpy.array', 'np.array', (['[[1.0, 1.56, 2.66, 4.79, 8.62, 15.0, 25.0, 39.86, 61.03, 90.14, 129.0], [\n 6.0, 7.81, 10.91, 16.32, 25.42, 40.0, 62.21, 94.59, 140.08, 201.98, 284.0]]'], {}), '([[1.0, 1.56, 2.66, 4.79, 8.62, 15.0, 25.0, 39.86, 61.03, 90.14, \n 129.0], [6.0, 7.81, 10.91, 16.32, 25.42, 40.0, 62.21, 94.59, 140.08, \n 201.98, 284.0]])\n', (12353, 12513), True, 'import numpy as np\n'), ((13061, 13088), 'numpy.array', 'np.array', (['[[2.75], [1.525]]'], {}), '([[2.75], [1.525]])\n', (13069, 13088), True, 'import numpy as np\n'), ((14961, 14997), 'numpy.testing.assert_raises', 'np.testing.assert_raises', (['ValueError'], {}), '(ValueError)\n', (14985, 14997), True, 'import numpy as np\n'), ((16334, 16376), 'skfda.representation.basis.Monomial', 'Monomial', ([], {'domain_range': '[(0, 1)]', 'n_basis': '(3)'}), '(domain_range=[(0, 1)], n_basis=3)\n', (16342, 16376), False, 'from skfda.representation.basis import FDataBasis, Monomial, BSpline, Fourier, Constant, VectorValued, Tensor\n'), ((16403, 16446), 'skfda.representation.basis.Monomial', 'Monomial', ([], {'domain_range': '((0, 1),)', 'n_basis': '(3)'}), '(domain_range=((0, 1),), n_basis=3)\n', (16411, 16446), False, 'from skfda.representation.basis import FDataBasis, Monomial, BSpline, Fourier, Constant, VectorValued, Tensor\n'), ((16679, 16713), 'skfda.representation.basis.FDataBasis', 'FDataBasis', (['monomial', 'coefficients'], {}), '(monomial, coefficients)\n', (16689, 16713), False, 'from skfda.representation.basis import FDataBasis, Monomial, BSpline, Fourier, Constant, VectorValued, Tensor\n'), ((16731, 16751), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(4)'], {}), '(0, 1, 4)\n', (16742, 16751), True, 'import numpy as np\n'), ((3598, 3614), 'numpy.atleast_2d', 'np.atleast_2d', (['t'], {}), '(t)\n', (3611, 3614), True, 'import numpy as np\n'), ((4551, 4568), 'numpy.vstack', 'np.vstack', (['(t, t)'], {}), '((t, t))\n', (4560, 4568), True, 'import numpy as np\n'), ((8261, 8329), 'numpy.array', 'np.array', (['[[2.927, 0.453, -1.229, 0.6], [4.3, -1.599, 1.016, -2.52]]'], {}), '([[2.927, 0.453, -1.229, 0.6], [4.3, -1.599, 1.016, -2.52]])\n', (8269, 8329), True, 'import numpy as np\n'), ((9257, 9273), 'numpy.atleast_2d', 'np.atleast_2d', (['t'], {}), '(t)\n', (9270, 9273), True, 'import numpy as np\n'), ((10179, 10196), 'numpy.vstack', 'np.vstack', (['(t, t)'], {}), '((t, t))\n', (10188, 10196), True, 'import numpy as np\n'), ((11502, 11570), 'numpy.array', 'np.array', (['[[0.001, 0.564, 0.435, 0.33], [0.018, 0.468, 0.371, 0.12]]'], {}), '([[0.001, 0.564, 0.435, 0.33], [0.018, 0.468, 0.371, 0.12]])\n', (11510, 11570), True, 'import numpy as np\n'), ((13965, 14023), 'numpy.array', 'np.array', (['[[2.0, 4.0, 6.0, 8.0], [1.4, 2.267, 3.133, 4.0]]'], {}), '([[2.0, 4.0, 6.0, 8.0], [1.4, 2.267, 3.133, 4.0]])\n', (13973, 14023), True, 'import numpy as np\n'), ((14842, 14858), 'numpy.atleast_2d', 'np.atleast_2d', (['t'], {}), '(t)\n', (14855, 14858), True, 'import numpy as np\n'), ((15663, 15680), 'numpy.vstack', 'np.vstack', (['(t, t)'], {}), '((t, t))\n', (15672, 15680), True, 'import numpy as np\n'), ((16771, 16831), 'numpy.array', 'np.array', (['[[1.0, 2.0, 3.667, 6.0], [0.5, 1.111, 2.011, 3.2]]'], {}), '([[1.0, 2.0, 3.667, 6.0], [0.5, 1.111, 2.011, 3.2]])\n', (16779, 16831), True, 'import numpy as np\n'), ((18193, 18212), 'skfda.representation.basis.Monomial', 'Monomial', ([], {'n_basis': '(2)'}), '(n_basis=2)\n', (18201, 18212), False, 'from skfda.representation.basis import FDataBasis, Monomial, BSpline, Fourier, Constant, VectorValued, Tensor\n'), ((18214, 18224), 'skfda.representation.basis.Constant', 'Constant', ([], {}), '()\n', (18222, 18224), False, 'from skfda.representation.basis import FDataBasis, Monomial, BSpline, Fourier, Constant, VectorValued, Tensor\n'), ((5375, 5391), 'numpy.array', 'np.array', (['(0, 1)'], {}), '((0, 1))\n', (5383, 5391), True, 'import numpy as np\n'), ((5450, 5468), 'numpy.array', 'np.array', (['[(0, 1)]'], {}), '([(0, 1)])\n', (5458, 5468), True, 'import numpy as np\n'), ((5743, 5810), 'numpy.array', 'np.array', (['[0.905, 0.147, -1.05, 0.905, 0.303, 0.775, -1.024, 0.303]'], {}), '([0.905, 0.147, -1.05, 0.905, 0.303, 0.775, -1.024, 0.303])\n', (5751, 5810), True, 'import numpy as np\n'), ((11022, 11038), 'numpy.array', 'np.array', (['(0, 1)'], {}), '((0, 1))\n', (11030, 11038), True, 'import numpy as np\n'), ((11138, 11156), 'numpy.array', 'np.array', (['[(0, 1)]'], {}), '([(0, 1)])\n', (11146, 11156), True, 'import numpy as np\n'), ((16495, 16511), 'numpy.array', 'np.array', (['(0, 1)'], {}), '((0, 1))\n', (16503, 16511), True, 'import numpy as np\n'), ((16572, 16590), 'numpy.array', 'np.array', (['[(0, 1)]'], {}), '([(0, 1)])\n', (16580, 16590), True, 'import numpy as np\n'), ((1427, 1477), 'numpy.array', 'np.array', (['[-0.903918107989282, -0.267163981229459]'], {}), '([-0.903918107989282, -0.267163981229459])\n', (1435, 1477), True, 'import numpy as np\n'), ((1784, 1799), 'numpy.array', 'np.array', (['[0.5]'], {}), '([0.5])\n', (1792, 1799), True, 'import numpy as np\n'), ((2366, 2531), 'numpy.array', 'np.array', (['[4.34138447771721, -7.09352774867064, 2.75214327095343, 4.34138447771721, \n 6.52573053999253, -4.81336320468984, -1.7123673353027, 6.52573053999253]'], {}), '([4.34138447771721, -7.09352774867064, 2.75214327095343, \n 4.34138447771721, 6.52573053999253, -4.81336320468984, -1.7123673353027,\n 6.52573053999253])\n', (2374, 2531), True, 'import numpy as np\n'), ((7555, 7570), 'numpy.array', 'np.array', (['[0.5]'], {}), '([0.5])\n', (7563, 7570), True, 'import numpy as np\n'), ((13360, 13375), 'numpy.array', 'np.array', (['[0.5]'], {}), '([0.5])\n', (13368, 13375), True, 'import numpy as np\n')]
|
import tensorflow as tf
import time
from IPython.display import display
from utils.label_generator import classifier_label_generator
def frcnn_train_step(model, train_dataset, train_stage, epochs=1, valid_dataset=None, change_lr=False, rpn_lr=None, cls_lr=None):
if change_lr:
if rpn_lr:
tf.keras.backend.set_value(model.rpn.optimizer.learning_rate, rpn_lr)
if cls_lr:
tf.keras.backend.set_value(model.classifier.optimizer.learning_rate, cls_lr)
if train_stage == 1:
print('Train RPNs \n')
model.rpn.trainable = True
model.classifier.trainable = False
elif train_stage == 2:
print('Train Fast R-CNN using the proposals from RPNs \n')
model.rpn.trainable = False
model.rpn.base_model.trainable = True
model.classifier.trainable = True
elif train_stage == 3:
print('Fix the shared convolutional layers and fine-tune unique layers to RPN \n')
model.rpn.trainable = True
model.rpn.base_model.trainable = False
model.classifier.trainable = False
elif train_stage == 4:
print('Fine-tune unique layers to Fast R-CNN \n')
model.rpn.trainable = False
model.classifier.trainable = True
max_step = 'Unknown'
for epoch in range(epochs):
epoch_start = time.time()
print(f"epoch {epoch+1}/{epochs}")
display_loss = display("Training loss at step 0 : 0", display_id=True)
for step, (x_batch_train, y_batch_train) in enumerate(train_dataset):
start = time.time()
y_cls_rpn, y_reg_rpn, gts = y_batch_train
if train_stage == 1 or train_stage == 3:
result = model.rpn.train_step((x_batch_train, (y_cls_rpn, y_reg_rpn)))
losses = round(float(result['rpn_loss'].numpy()), 5)
else:
scores, rps, feature_map = model.rpn(x_batch_train, training=False)
if train_stage == 2:
model.rpn.train_step((x_batch_train, (y_cls_rpn, y_reg_rpn)))
rps = model.rpn.inverse_bbox_regression(rps)
candidate_area, scores = model.get_candidate((scores, rps, model.n_train_pre_nms))
nms = model.get_nms((candidate_area, scores, model.n_train_post_nms))
box_labels, cls_labels, nms = classifier_label_generator(nms, gts)
rois = model.roipool((feature_map, nms))
result = model.classifier.train_step(((rois, nms), (cls_labels, box_labels)))
losses = round(float(result['classifier_loss'].numpy()), 5)
display_loss.update(f"Training loss at step {step}/{max_step} : {losses} - {round(time.time() - start, 4)}sec/step - {time.strftime('%Hh%Mm%Ss', time.gmtime(time.time()-epoch_start))}/epoch")
max_step = step
display_loss.update(f"Training loss at step {step}/{max_step} : {losses} - {round(time.time()-start, 4)}sec/step - {time.strftime('%Hh%Mm%Ss', time.gmtime(time.time()-epoch_start))}/epoch")
if valid_dataset is not None:
display_loss_valid = display("validation loss : 0", display_id=True)
for x_batch_test, y_batch_test in valid_dataset:
y_cls_rpn, y_reg_rpn, gts = y_batch_test
if train_stage == 1 or train_stage == 3:
result = model.rpn.test_step((x_batch_test, (y_cls_rpn, y_reg_rpn)))
losses = round(float(result['rpn_loss_val'].numpy()), 5)
else:
scores, rps, feature_map = model.rpn.predict(x_batch_test)
rps = model.rpn.inverse_bbox_regression(rps)
candidate_area, scores = model.get_candidate((scores, rps, model.n_test_pre_nms))
nms = model.get_nms((candidate_area, scores, model.n_test_post_nms))
box_labels, cls_labels, nms = classifier_label_generator(nms, gts, valid=True)
rois = model.roipool((feature_map, nms))
result = model.classifier.test_step(((rois, nms), (cls_labels, box_labels)))
losses = round(float(result['classifier_loss_val'].numpy()), 5)
display_loss_valid.update(f"validation loss : {losses}")
return model
|
[
"tensorflow.keras.backend.set_value",
"utils.label_generator.classifier_label_generator",
"IPython.display.display",
"time.time"
] |
[((1330, 1341), 'time.time', 'time.time', ([], {}), '()\n', (1339, 1341), False, 'import time\n'), ((1408, 1463), 'IPython.display.display', 'display', (['"""Training loss at step 0 : 0"""'], {'display_id': '(True)'}), "('Training loss at step 0 : 0', display_id=True)\n", (1415, 1463), False, 'from IPython.display import display\n'), ((313, 382), 'tensorflow.keras.backend.set_value', 'tf.keras.backend.set_value', (['model.rpn.optimizer.learning_rate', 'rpn_lr'], {}), '(model.rpn.optimizer.learning_rate, rpn_lr)\n', (339, 382), True, 'import tensorflow as tf\n'), ((414, 490), 'tensorflow.keras.backend.set_value', 'tf.keras.backend.set_value', (['model.classifier.optimizer.learning_rate', 'cls_lr'], {}), '(model.classifier.optimizer.learning_rate, cls_lr)\n', (440, 490), True, 'import tensorflow as tf\n'), ((1562, 1573), 'time.time', 'time.time', ([], {}), '()\n', (1571, 1573), False, 'import time\n'), ((3126, 3173), 'IPython.display.display', 'display', (['"""validation loss : 0"""'], {'display_id': '(True)'}), "('validation loss : 0', display_id=True)\n", (3133, 3173), False, 'from IPython.display import display\n'), ((2363, 2399), 'utils.label_generator.classifier_label_generator', 'classifier_label_generator', (['nms', 'gts'], {}), '(nms, gts)\n', (2389, 2399), False, 'from utils.label_generator import classifier_label_generator\n'), ((3923, 3971), 'utils.label_generator.classifier_label_generator', 'classifier_label_generator', (['nms', 'gts'], {'valid': '(True)'}), '(nms, gts, valid=True)\n', (3949, 3971), False, 'from utils.label_generator import classifier_label_generator\n'), ((2946, 2957), 'time.time', 'time.time', ([], {}), '()\n', (2955, 2957), False, 'import time\n'), ((2722, 2733), 'time.time', 'time.time', ([], {}), '()\n', (2731, 2733), False, 'import time\n'), ((3019, 3030), 'time.time', 'time.time', ([], {}), '()\n', (3028, 3030), False, 'import time\n'), ((2797, 2808), 'time.time', 'time.time', ([], {}), '()\n', (2806, 2808), False, 'import time\n')]
|
from time import time, strftime, sleep
import praw
source = 'the_donald'
dest = 'td_uncensored'
log_file = 'td_bot_log.txt'
reddit = praw.Reddit(
client_id='client_id',
client_secret='client_secret',
password='password',
username='username',
user_agent='linux:td_uncensored:0.1 (by /u/username)'
)
def log_event(string):
"""Log events and errors"""
with open(log_file, 'a') as log:
log.write('{}\t\t'.format(strftime("%Y-%m-%d\t%H:%M:%S")) + string + '\n')
def cross_post(sub):
"""Create the cross-post"""
if sub.selftext:
reddit.subreddit(dest).submit(title=sub.title, selftext=sub.selftext, send_replies=False)
return
reddit.subreddit(dest).submit(title=sub.title, url=sub.url, send_replies=False)
def main():
"""Stream and cross-post submissions. Exit if more than 5 restarts"""
resets = 0
while resets < 5:
start = time()
try:
for submission in reddit.subreddit(source).stream.submissions():
if not submission.created_utc < start:
cross_post(submission)
except Exception as e:
log_event('Reset\t\t{}: {}'.format(type(e).__name__, e))
resets += 1
sleep(60)
continue
log_event('Stopped\t\tExcessive Restarts\n')
if __name__ == '__main__':
log_event('Start')
main()
|
[
"time.sleep",
"praw.Reddit",
"time.strftime",
"time.time"
] |
[((135, 307), 'praw.Reddit', 'praw.Reddit', ([], {'client_id': '"""client_id"""', 'client_secret': '"""client_secret"""', 'password': '"""password"""', 'username': '"""username"""', 'user_agent': '"""linux:td_uncensored:0.1 (by /u/username)"""'}), "(client_id='client_id', client_secret='client_secret', password=\n 'password', username='username', user_agent=\n 'linux:td_uncensored:0.1 (by /u/username)')\n", (146, 307), False, 'import praw\n'), ((914, 920), 'time.time', 'time', ([], {}), '()\n', (918, 920), False, 'from time import time, strftime, sleep\n'), ((1245, 1254), 'time.sleep', 'sleep', (['(60)'], {}), '(60)\n', (1250, 1254), False, 'from time import time, strftime, sleep\n'), ((449, 479), 'time.strftime', 'strftime', (['"""%Y-%m-%d\t%H:%M:%S"""'], {}), "('%Y-%m-%d\\t%H:%M:%S')\n", (457, 479), False, 'from time import time, strftime, sleep\n')]
|
import pandas as pd
averages_df = employee.groupby(by='department').mean()[['salary']]
merged_df = pd.merge(employee, averages_df, how='inner',
left_on='department', right_on=averages_df.index)
merged_df[['department', 'first_name', 'salary_x', 'salary_y']]
|
[
"pandas.merge"
] |
[((101, 200), 'pandas.merge', 'pd.merge', (['employee', 'averages_df'], {'how': '"""inner"""', 'left_on': '"""department"""', 'right_on': 'averages_df.index'}), "(employee, averages_df, how='inner', left_on='department', right_on\n =averages_df.index)\n", (109, 200), True, 'import pandas as pd\n')]
|
# Fichier permettant de moduler les differentes methodes de clustering
try:
# Import generaux
import numpy as np
import pylab
import sys
import platform
import matplotlib.pyplot as plt
import re
# Import locaux
import kmeans
import rkde
except:
exit(1)
""" Clustering """
# Clusterise les donnees avec la methode desiree
# Entree :
# - M : la matrice des distances entre les objets
# - methode : une chaine de caractere donnant le nom de la methode (nom de module)
# - params : une liste des parametres requis pour la methode demandee
# - kmeans : params = [k, n_iter]
# - rkde : params = [bandwidth, prob]
# Sortie :
# - assign : un tableau donnant pour chaque entier (objet) son numero de cluster
# - nb_cluster : le nombre de clusters formes
def make_clusters(M, methode, params):
function = methode + ".do"
assign, nb_clusters = eval(function)(M, params[0], params[1])
return assign, nb_clusters
""" Lecture et affichage de donnees """
# Fonction de lecture dans un fichier
# Entree :
# - file_name : une chaine de caracteres donnant le nom du fichier a ouvrir
# - nb_item : nombre de lignes a lire (-1 pour tout lire, defaut a -1)
# Sortie :
# - data : une liste de liste de flottants
def read_data(file_name, nb_item = -1):
f = open(file_name,'r')
data = []
cpt = 0
for line in f:
if (0 <= nb_item and nb_item <= cpt):
break
line = re.split('\s+', line) # '\s' matches whitespace characters
line = [float(x) for x in line if x != '']
data.append(line)
cpt += 1
f.close()
return data
# Fonction d'affichage d'un nuage de points
# Entree :
# - data : un ensemble de points sous la forme d'une matrice de taille n*2
# - assign : un tableau de taille n representant une assignation de [data]
def show(data, assign):
colors = "bgrcmyk"
symbols = ".ov18sp*h+xD_"
nb_clusters = max(assign) + 1
pylab.figure()
mini = min( min(data[:][0]), min(data[:][1]) )
maxi = max( max(data[i][0]), max(data[i][1]) )
pylab.xlim([mini, maxi])
pylab.ylim([mini, maxi])
if (nb_clusters < 8):
for i_k in range(nb_clusters):
pylab.plot([data[i][0] for i in range(len(data)) if assign[i] == i_k],
[data[i][1] for i in range(len(data)) if assign[i] == i_k],
colors[i_k] + ".")
else:
for i_k in range(nb_clusters):
pylab.plot( [data[i][0] for i in range(len(data)) if assign[i] == i_k],
[data[i][1] for i in range(len(data)) if assign[i] == i_k],
colors[i_k % 7]) + symbols[int(i_k / 7)]
pylab.show()
""" Lecture et ecriture d'une assignation """
# Lis un fichier ou est inscrit une assignation.
# Entree :
# - file : adresse et nom du fichier
# Sortie :
# - assign : un vecteur numpy d'entiers
def read_assign(file_name):
f = open(file_name,'r')
assign_tmp = []
i = 0
for line in f:
try:
assign_tmp.append(int(line))
i = i + 1
except ValueError:
continue
f.close()
return np.array(assign_tmp)
# Ecris une assignation dans un fichier
# Entree :
# - file_name : adresse et nom d'un fichier
# - assign : l'assignation a ecrire
# - nb_iter : le nombre d'iterations faites par l'algorithme (-1) s'il n'est pas
# base sur ce principe
# - s : la seed utilisee pour le clustering
def write_cluster(file_name, assign, nb_iter, s):
nb_data = len(assign)
nb_cluster = max(assign) + 1
f = open(file_name, 'w')
f.write('nb_cluster = ' + str(nb_cluster) + '\n')
f.write('nb_iter = ' + str(nb_iter) + '\n')
f.write('nb_data = ' + str(nb_data) + '\n')
f.write('seed = ' + str(s) + '\n')
for i in assign:
f.write(str(i) + '\n')
f.close()
""" Fonctions non encore retravaillees """
# Fonction pour enregistrer des images :
# data_file = fichier contenant les donnees
# assign_file = fichier cree a partir du clustering et contenant la table d'assignation
# file_figure = nom du fichier dans lequel sera enregistre l'image
# format = nom de l'extention du fichier cree (pdf,svg,png...)
# exemple : save('cercles/cercles.txt', 'cercles_kmeans', 'figure_cercles_kmeans', 'pdf')
def save(data_file, assign_file,file_figure,format):
data = read_data(data_file)
assign = read_assign(data,assign_file)
nombre_clusters = numpy.amax(assign) +1
plt.ioff()
fig = plt.figure()
colors = "bgrcmyk"
symbols = ".ov18sp*h+xD_"
mini = min( min([data[i][0] for i in range(len(data))]), min([data[i][1] for i in range(len(data))]) )
maxi = max( max([data[i][0] for i in range(len(data))]), max([data[i][1] for i in range(len(data))]) )
plt.xlim([mini, maxi])
plt.ylim([mini, maxi])
if (nombre_clusters < 8):
for i_k in range(nombre_clusters):
plt.plot([data[i][0] for i in range(len(data)) if assign[i] == i_k],
[data[i][1] for i in range(len(data)) if assign[i] == i_k],
colors[i_k] + ".")
else:
if (nombre_clusters < 85):
for i_k in range(nombre_clusters):
plt.plot( [data[i][0] for i in range(len(data)) if assign[i] == i_k],
[data[i][1] for i in range(len(data)) if assign[i] == i_k],
colors[i_k % 7] + symbols[int(i_k / 7)] )
else:
print("too many clusters")
if (platform.system() == "Windows"):
plt.savefig('C:/users/alex/documents/Alex/Cours/ENS/M1_Cours/Projet/data/Results/'+file_figure+'.'+format)
else:
plt.savefig('../data/Results/'+file_figure+'.'+format)
plt.close(fig)
|
[
"matplotlib.pyplot.xlim",
"pylab.show",
"re.split",
"matplotlib.pyplot.ioff",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.close",
"matplotlib.pyplot.figure",
"numpy.array",
"pylab.figure",
"pylab.ylim",
"platform.system",
"pylab.xlim",
"matplotlib.pyplot.savefig"
] |
[((2035, 2049), 'pylab.figure', 'pylab.figure', ([], {}), '()\n', (2047, 2049), False, 'import pylab\n'), ((2159, 2183), 'pylab.xlim', 'pylab.xlim', (['[mini, maxi]'], {}), '([mini, maxi])\n', (2169, 2183), False, 'import pylab\n'), ((2189, 2213), 'pylab.ylim', 'pylab.ylim', (['[mini, maxi]'], {}), '([mini, maxi])\n', (2199, 2213), False, 'import pylab\n'), ((2758, 2770), 'pylab.show', 'pylab.show', ([], {}), '()\n', (2768, 2770), False, 'import pylab\n'), ((3256, 3276), 'numpy.array', 'np.array', (['assign_tmp'], {}), '(assign_tmp)\n', (3264, 3276), True, 'import numpy as np\n'), ((4658, 4668), 'matplotlib.pyplot.ioff', 'plt.ioff', ([], {}), '()\n', (4666, 4668), True, 'import matplotlib.pyplot as plt\n'), ((4680, 4692), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (4690, 4692), True, 'import matplotlib.pyplot as plt\n'), ((4969, 4991), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[mini, maxi]'], {}), '([mini, maxi])\n', (4977, 4991), True, 'import matplotlib.pyplot as plt\n'), ((4997, 5019), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[mini, maxi]'], {}), '([mini, maxi])\n', (5005, 5019), True, 'import matplotlib.pyplot as plt\n'), ((5915, 5929), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (5924, 5929), True, 'import matplotlib.pyplot as plt\n'), ((1516, 1538), 're.split', 're.split', (['"""\\\\s+"""', 'line'], {}), "('\\\\s+', line)\n", (1524, 1538), False, 'import re\n'), ((5685, 5702), 'platform.system', 'platform.system', ([], {}), '()\n', (5700, 5702), False, 'import platform\n'), ((5727, 5848), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('C:/users/alex/documents/Alex/Cours/ENS/M1_Cours/Projet/data/Results/' +\n file_figure + '.' + format)"], {}), "(\n 'C:/users/alex/documents/Alex/Cours/ENS/M1_Cours/Projet/data/Results/' +\n file_figure + '.' + format)\n", (5738, 5848), True, 'import matplotlib.pyplot as plt\n'), ((5855, 5915), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('../data/Results/' + file_figure + '.' + format)"], {}), "('../data/Results/' + file_figure + '.' + format)\n", (5866, 5915), True, 'import matplotlib.pyplot as plt\n')]
|
import os
import time
import subprocess
SOURCE_IMAP_HOST=os.getenv('SOURCE_IMAP_HOST', '')
SOURCE_IMAP_PORT=os.getenv('SOURCE_IMAP_PORT', None)
TARGET_AUTH_FILE=os.getenv('TARGET_AUTH_FILE')
_example_out = r"""
Here is imapsync 1.983 on host 39eb9c59f7a5, a linux system with 0.6/1.9 free GiB of RAM
with Perl 5.28.1 and Mail::IMAPClient 3.42
Transfer started at Sun May 3 12:08:04 2020
PID is 1 my PPID is 0
Load is 0.40 0.09 0.03 4/418 on 2 cores
Current directory is /var/tmp
Real user id is nobody (uid 65534)
Effective user id is nobody (euid 65534)
$RCSfile: imapsync,v $ $Revision: 1.983 $ $Date: 2020/03/19 02:08:12 $
Command line used, run by /usr/bin/perl:
/usr/bin/imapsync --host1 imap.mail.hostpoint.ch --ssl1 --user1 <EMAIL> --password1 <PASSWORD> --gmail2 --user2 <EMAIL> --password2 <PASSWORD> --authmech2 XOAUTH2
Temp directory is /tmp ( to change it use --tmpdir dirpath )
Under docker context so installing only signals to exit
kill -INT 1 # special behavior: call to sub catch_exit
kill -QUIT 1 # special behavior: call to sub catch_exit
kill -TERM 1 # special behavior: call to sub catch_exit
No variable pid_filename
PID file is unset ( to set it, use --pidfile filepath ; to avoid it use --pidfile "" )
Modules version list:
Authen::NTLM 1.09
CGI 4.40
Compress::Zlib 2.074
Crypt::OpenSSL::RSA 0.31
Data::Uniqid 0.12
Digest::HMAC_MD5 1.01
Digest::HMAC_SHA1 1.03
Digest::MD5 2.55
Encode 2.97
Encode::IMAPUTF7 1.05
File::Copy::Recursive 0.44
File::Spec 3.74
Getopt::Long 2.5
HTML::Entities 3.69
IO::Socket 1.39
IO::Socket::INET 1.39
IO::Socket::INET6 2.72
IO::Socket::IP 0.39
IO::Socket::SSL 2.060
IO::Tee 0.65
JSON 4.02
JSON::WebToken 0.10
LWP 6.36
MIME::Base64 3.15
Mail::IMAPClient 3.42
Net::Ping 2.62
Net::SSLeay 1.85
Term::ReadKey 2.38
Test::MockObject 1.20180705
Time::HiRes 1.9759
URI::Escape 3.31
Unicode::String 2.10
( use --no-modulesversion to turn off printing this Perl modules list )
Info: will resync flags for already transferred messages. Use --noresyncflags to not resync flags.
SSL debug mode level is --debugssl 1 (can be set from 0 meaning no debug to 4 meaning max debug)
Host1: SSL default mode is like --sslargs1 "SSL_verify_mode=0", meaning for host1 SSL_VERIFY_NONE, ie, do not check the certificate server.
Host1: Use --sslargs1 SSL_verify_mode=1 to have SSL_VERIFY_PEER, ie, check the certificate server of host1
Host2: SSL default mode is like --sslargs2 "SSL_verify_mode=0", meaning for host2 SSL_VERIFY_NONE, ie, do not check the certificate server.
Host2: Use --sslargs2 SSL_verify_mode=1 to have SSL_VERIFY_PEER, ie, check the certificate server of host2
Info: turned ON syncinternaldates, will set the internal dates (arrival dates) on host2 same as host1.
Host1: will try to use LOGIN authentication on host1
Host2: will try to use XOAUTH2 authentication on host2
Host1: imap connection timeout is 120 seconds
Host2: imap connection timeout is 120 seconds
Host1: IMAP server [imap.mail.hostpoint.ch] port [993] user [<EMAIL>]
Host2: IMAP server [imap.gmail.com] port [993] user [<EMAIL>]
Host1: connecting and login on host1 [imap.mail.hostpoint.ch] port [993] with user [<EMAIL>]
Host1 IP address: 172.16.17.32
Host1 banner: * OK [CAPABILITY IMAP4rev1 LITERAL+ SASL-IR LOGIN-REFERRALS ID ENABLE IDLE AUTH=PLAIN] Dovecot ready.
Host1 capability before authentication: IMAP4rev1 LITERAL+ SASL-IR LOGIN-REFERRALS ID ENABLE IDLE AUTH=PLAIN AUTH
Host1: success login on [imap.mail.hostpoint.ch] with user [<EMAIL>] auth [LOGIN]
Host2: connecting and login on host2 [imap.gmail.com] port [993] with user [<EMAIL>]
Host2 IP address: 192.168.3.11
Host2 banner: * OK Gimap ready for requests from 172.16.31.10 u21mb183888216edq
Host2 capability before authentication: IMAP4rev1 UNSELECT IDLE NAMESPACE QUOTA ID XLIST CHILDREN X-GM-EXT-1 XYZZY SASL-IR AUTH=XOAUTH2 AUTH=PLAIN AUTH=PLAIN-CLIENTTOKEN AUTH=OAUTHBEARER AUTH=XOAUTH AUTH
Host2: imap.gmail.com says it has CAPABILITY for AUTHENTICATE XOAUTH2
Host2: success login on [imap.gmail.com] with user [<EMAIL>] auth [XOAUTH2]
Host1: state Authenticated
Host2: state Authenticated
Host1 capability once authenticated: IMAP4rev1 SASL-IR LOGIN-REFERRALS ID ENABLE IDLE SORT SORT=DISPLAY THREAD=REFERENCES THREAD=REFS THREAD=ORDEREDSUBJECT MULTIAPPEND URL-PARTIAL CATENATE UNSELECT CHILDREN NAMESPACE UIDPLUS LIST-EXTENDED I18NLEVEL=1 CONDSTORE QRESYNC ESEARCH ESORT SEARCHRES WITHIN CONTEXT=SEARCH LIST-STATUS BINARY MOVE SNIPPET=FUZZY PREVIEW=FUZZY LITERAL+ NOTIFY SPECIAL-USE QUOTA THREAD I18NLEVEL CONTEXT SNIPPET PREVIEW
Host2 capability once authenticated: IMAP4rev1 UNSELECT IDLE NAMESPACE QUOTA ID XLIST CHILDREN X-GM-EXT-1 UIDPLUS COMPRESS=DEFLATE ENABLE MOVE CONDSTORE ESEARCH UTF8=ACCEPT LIST-EXTENDED LIST-STATUS LITERAL- SPECIAL-USE APPENDLIMIT=35651584 COMPRESS UTF8 APPENDLIMIT
Host1: found ID capability. Sending/receiving ID, presented in raw IMAP for now.
In order to avoid sending/receiving ID, use option --noid
Sending: 4 ID ("name" "imapsync" "version" "1.983" "os" "linux" "vendor" "Gilles LAMIRAL" "support-url" "https://imapsync.lamiral.info/" "date" "19-Mar-2020 02:08:12 +0000" "side" "host1")
Sent 181 bytes
Read: * ID ("name" "Dovecot")
4 OK ID completed (0.001 + 0.000 secs).
Sending: 4 ID ("name" "imapsync" "version" "1.983" "os" "linux" "vendor" "Gilles LAMIRAL" "support-url" "https://imapsync.lamiral.info/" "date" "19-Mar-2020 02:08:12 +0000" "side" "host2")
Host2: found ID capability. Sending/receiving ID, presented in raw IMAP for now.
In order to avoid sending/receiving ID, use option --noid
Sent 181 bytes
Read: * ID ("name" "GImap" "vendor" "Google, Inc." "support-url" "https://support.google.com/mail" "version" "gmail_imap_200428.12_p0" "remote-host" "172.16.31.10")
4 OK Success
Host2: found quota, presented in raw IMAP
Sending: 5 GETQUOTAROOT INBOX
Sent 22 bytes
Host2: Quota current storage is 460800 bytes. Limit is 32212254720 bytes. So 0.00 % full
Host2: found APPENDLIMIT=35651584 in CAPABILITY (use --appendlimit xxxx to override this automatic setting)
Read: * QUOTAROOT "INBOX" ""
* QUOTA "" (STORAGE 450 31457280)
5 OK Success
Host2: Setting maxsize to 35651584 (min of --maxsize 35651584 and appendlimit 35651584
Host1: found 5 folders.
Host2: found 9 folders.
Host1: guessing separator from folder listing: [/]
Host1: separator given by NAMESPACE: [/]
Host2: guessing separator from folder listing: [/]
Host2: separator given by NAMESPACE: [/]
Host1: guessing prefix from folder listing: []
Host1: prefix given by NAMESPACE: []
Host2: guessing prefix from folder listing: []
Host2: prefix given by NAMESPACE: []
Host1: separator and prefix: [/][]
Host2: separator and prefix: [/][]
Including all folders found by default. Use --subscribed or --folder or --folderrec or --include to select specific folders. Use --exclude to unselect specific folders.
Excluding folders matching pattern \[Gmail\]$
Host1: Checking wanted folders exist. Use --nocheckfoldersexist to avoid this check (shared of public namespace targeted).
Host1: Checking wanted folders are selectable. Use --nocheckselectable to avoid this check.
Turned on automapping folders ( use --noautomap to turn off automapping )
Host1: special Drafts = \Drafts
Host1: special Sent = \Sent
Host1: special Trash = \Trash
Host1: special spam = \Junk
Host2: special [Gmail]/All Mail = \All
Host2: special [Gmail]/Bin = \Trash
Host2: special [Gmail]/Drafts = \Drafts
Host2: special [Gmail]/Sent Mail = \Sent
Host2: special [Gmail]/Spam = \Junk
Host2: special [Gmail]/Starred = \Flagged
++++ Listing folders
All foldernames are presented between brackets like [X] where X is the foldername.
When a foldername contains non-ASCII characters it is presented in the form
[X] = [Y] where
X is the imap foldername you have to use in command line options and
Y is the utf8 output just printed for convenience, to recognize it.
Host1: folders list (first the raw imap format then the [X] = [Y]):
* LIST (\HasNoChildren \UnMarked \Drafts) "/" Drafts
* LIST (\HasNoChildren \UnMarked \Sent) "/" Sent
* LIST (\HasNoChildren \UnMarked \Trash) "/" Trash
* LIST (\HasNoChildren \UnMarked \Junk) "/" spam
* LIST (\HasNoChildren) "/" INBOX
15 OK List completed (0.001 + 0.000 secs).
[Drafts]
[INBOX]
[Sent]
[Trash]
[spam]
Host2: folders list (first the raw imap format then the [X] = [Y]):
* LIST (\HasNoChildren) "/" "INBOX"
* LIST (\HasNoChildren) "/" "Originals"
* LIST (\HasChildren \Noselect) "/" "[Gmail]"
* LIST (\All \HasNoChildren) "/" "[Gmail]/All Mail"
* LIST (\HasNoChildren \Trash) "/" "[Gmail]/Bin"
* LIST (\Drafts \HasNoChildren) "/" "[Gmail]/Drafts"
* LIST (\HasNoChildren \Important) "/" "[Gmail]/Important"
* LIST (\HasNoChildren \Sent) "/" "[Gmail]/Sent Mail"
* LIST (\HasNoChildren \Junk) "/" "[Gmail]/Spam"
* LIST (\Flagged \HasNoChildren) "/" "[Gmail]/Starred"
11 OK Success
[INBOX]
[Originals]
[[Gmail]/All Mail]
[[Gmail]/Bin]
[[Gmail]/Drafts]
[[Gmail]/Important]
[[Gmail]/Sent Mail]
[[Gmail]/Spam]
[[Gmail]/Starred]
Folders in host2 not in host1:
[[Gmail]/Starred]
[[Gmail]/Important]
[[Gmail]/All Mail]
[Originals]
Folders mapping from --automap feature (use --f1f2 to override any mapping):
[spam] -> [[Gmail]/Spam]
[Sent] -> [[Gmail]/Sent Mail]
[Drafts] -> [[Gmail]/Drafts]
[Trash] -> [[Gmail]/Bin]
Checking SEARCH ALL works on both accounts. To avoid that check, use --nochecknoabletosearch
Host1: checking if SEARCH ALL works on INBOX
Host1: folder [INBOX] has 25 messages mentioned by SELECT
Host1: folder [INBOX] has 25 messages found by SEARCH ALL
Host1: folder [INBOX] has the same messages count (25) by SELECT and SEARCH ALL
Host2: checking if SEARCH ALL works on INBOX
Host2: folder [INBOX] has 32 messages mentioned by SELECT
Host2: folder [INBOX] has 32 messages found by SEARCH ALL
Host2: folder [INBOX] has the same messages count (32) by SELECT and SEARCH ALL
Good! SEARCH ALL works on both accounts.
Folders sizes before the synchronization.
You can remove foldersizes listings by using "--nofoldersizes" and "--nofoldersizesatend"
but then you will also lose the ETA (Estimation Time of Arrival) given after each message copy.
Host1 folder 1/5 [Drafts] Size: 0 Messages: 0 Biggest: 0
Host2 folder 1/5 [[Gmail]/Drafts] Size: 0 Messages: 0 Biggest: 0
Host2-Host1 0 0 0
Host1 folder 2/5 [INBOX] Size: 393251 Messages: 25 Biggest: 59437
Host2 folder 2/5 [INBOX] Size: 461389 Messages: 32 Biggest: 59437
Host2-Host1 68138 7 0
Host1 folder 3/5 [Sent] Size: 0 Messages: 0 Biggest: 0
Host2 folder 3/5 [[Gmail]/Sent Mail] Size: 0 Messages: 0 Biggest: 0
Host2-Host1 0 0 0
Host1 folder 4/5 [Trash] Size: 0 Messages: 0 Biggest: 0
Host2 folder 4/5 [[Gmail]/Bin] Size: 0 Messages: 0 Biggest: 0
Host2-Host1 0 0 0
Host1 folder 5/5 [spam] Size: 0 Messages: 0 Biggest: 0
Host2 folder 5/5 [[Gmail]/Spam] Size: 0 Messages: 0 Biggest: 0
Host2-Host1 0 0 0
Host1 Nb folders: 5 folders
Host2 Nb folders: 5 folders
Host1 Nb messages: 25 messages
Host2 Nb messages: 32 messages
Host1 Total size: 393251 bytes (384.034 KiB)
Host2 Total size: 461389 bytes (450.575 KiB)
Host1 Biggest message: 59437 bytes (58.044 KiB)
Host2 Biggest message: 59437 bytes (58.044 KiB)
Time spent on sizing: 2.4 seconds
++++ Looping on each one of 5 folders to sync
ETA: Sun May 3 12:08:06 2020 0 s 25/25 msgs left
Folder 1/5 [Drafts] -> [[Gmail]/Drafts]
Host1: folder [Drafts] has 0 messages in total (mentioned by SELECT)
Host2: folder [[Gmail]/Drafts] has 0 messages in total (mentioned by SELECT)
Host2: folder [[Gmail]/Drafts] permanentflags:
Host1: Expunging Drafts
Host1: folder [Drafts] considering 0 messages
Host2: folder [[Gmail]/Drafts] considering 0 messages
Host1: folder [Drafts] selected 0 messages, duplicates 0
Host2: folder [[Gmail]/Drafts] selected 0 messages, duplicates 0
Host1: Expunging folder Drafts
ETA: Sun May 3 12:08:09 2020 2 s 25/25 msgs left
Folder 2/5 [INBOX] -> [INBOX]
Host1: folder [INBOX] has 25 messages in total (mentioned by SELECT)
Host2: folder [INBOX] has 32 messages in total (mentioned by SELECT)
Host2: folder [INBOX] permanentflags:
Host1: Expunging INBOX
Host1: folder [INBOX] considering 25 messages
Host2: folder [INBOX] considering 32 messages
Host1: folder [INBOX] selected 25 messages, duplicates 0
Host2: folder [INBOX] selected 32 messages, duplicates 0
Host1: Expunging folder INBOX
ETA: Sun May 3 12:08:07 2020 0 s 0/25 msgs left
Folder 3/5 [Sent] -> [[Gmail]/Sent Mail]
Host1: folder [Sent] has 0 messages in total (mentioned by SELECT)
Host2: folder [[Gmail]/Sent Mail] has 0 messages in total (mentioned by SELECT)
Host2: folder [[Gmail]/Sent Mail] permanentflags:
Host1: Expunging Sent
Host1: folder [Sent] considering 0 messages
Host2: folder [[Gmail]/Sent Mail] considering 0 messages
Host1: folder [Sent] selected 0 messages, duplicates 0
Host2: folder [[Gmail]/Sent Mail] selected 0 messages, duplicates 0
Host1: Expunging folder Sent
ETA: Sun May 3 12:08:07 2020 0 s 0/25 msgs left
Folder 4/5 [Trash] -> [[Gmail]/Bin]
Host1: folder [Trash] has 0 messages in total (mentioned by SELECT)
Host2: folder [[Gmail]/Bin] has 0 messages in total (mentioned by SELECT)
Host2: folder [[Gmail]/Bin] permanentflags:
Host1: Expunging Trash
Host1: folder [Trash] considering 0 messages
Host2: folder [[Gmail]/Bin] considering 0 messages
Host1: folder [Trash] selected 0 messages, duplicates 0
Host2: folder [[Gmail]/Bin] selected 0 messages, duplicates 0
Host1: Expunging folder Trash
ETA: Sun May 3 12:08:07 2020 0 s 0/25 msgs left
Folder 5/5 [spam] -> [[Gmail]/Spam]
Host1: folder [spam] has 0 messages in total (mentioned by SELECT)
Host2: folder [[Gmail]/Spam] has 0 messages in total (mentioned by SELECT)
Host2: folder [[Gmail]/Spam] permanentflags:
Host1: Expunging spam
Host1: folder [spam] considering 0 messages
Host2: folder [[Gmail]/Spam] considering 0 messages
Host1: folder [spam] selected 0 messages, duplicates 0
Host2: folder [[Gmail]/Spam] selected 0 messages, duplicates 0
Host1: Expunging folder spam
ETA: Sun May 3 12:08:07 2020 0 s 0/25 msgs left
++++ End looping on each folder
Folders sizes after the synchronization.
You can remove this foldersizes listing by using "--nofoldersizesatend"
Host1 folder 1/5 [Drafts] Size: 0 Messages: 0 Biggest: 0
Host2 folder 1/5 [[Gmail]/Drafts] Size: 0 Messages: 0 Biggest: 0
Host2-Host1 0 0 0
Host1 folder 2/5 [INBOX] Size: 393251 Messages: 25 Biggest: 59437
Host2 folder 2/5 [INBOX] Size: 461389 Messages: 32 Biggest: 59437
Host2-Host1 68138 7 0
Host1 folder 3/5 [Sent] Size: 0 Messages: 0 Biggest: 0
Host2 folder 3/5 [[Gmail]/Sent Mail] Size: 0 Messages: 0 Biggest: 0
Host2-Host1 0 0 0
Host1 folder 4/5 [Trash] Size: 0 Messages: 0 Biggest: 0
Host2 folder 4/5 [[Gmail]/Bin] Size: 0 Messages: 0 Biggest: 0
Host2-Host1 0 0 0
Host1 folder 5/5 [spam] Size: 0 Messages: 0 Biggest: 0
Host2 folder 5/5 [[Gmail]/Spam] Size: 0 Messages: 0 Biggest: 0
Host2-Host1 0 0 0
Host1 Nb folders: 5 folders
Host2 Nb folders: 5 folders
Host1 Nb messages: 25 messages
Host2 Nb messages: 32 messages
Host1 Total size: 393251 bytes (384.034 KiB)
Host2 Total size: 461389 bytes (450.575 KiB)
Host1 Biggest message: 59437 bytes (58.044 KiB)
Host2 Biggest message: 59437 bytes (58.044 KiB)
Time spent on sizing: 0.4 seconds
++++ Statistics
Transfer started on : Sun May 3 12:08:04 2020
Transfer ended on : Sun May 3 12:08:07 2020
Transfer time : 3.4 sec
Folders synced : 5/5 synced
Messages transferred : 0
Messages skipped : 25
Messages found duplicate on host1 : 0
Messages found duplicate on host2 : 0
Messages found crossduplicate on host2 : 0
Messages void (noheader) on host1 : 0
Messages void (noheader) on host2 : 0
Messages found in host1 not in host2 : 0 messages
Messages found in host2 not in host1 : 7 messages
Messages deleted on host1 : 0
Messages deleted on host2 : 0
Total bytes transferred : 0 (0.000 KiB)
Total bytes skipped : 393251 (384.034 KiB)
Message rate : 0.0 messages/s
Average bandwidth rate : 0.0 KiB/s
Reconnections to host1 : 0
Reconnections to host2 : 0
Memory consumption at the end : 183.0 MiB (started with 159.8 MiB)
Load end is : 0.40 0.09 0.03 1/418 on 2 cores
Biggest message : 0 bytes (0.000 KiB)
Memory/biggest message ratio : NA
Start difference host2 - host1 : 7 messages, 68138 bytes (66.541 KiB)
Final difference host2 - host1 : 7 messages, 68138 bytes (66.541 KiB)
The sync looks good, all 25 identified messages in host1 are on host2.
There is no unidentified message
The sync is not strict, there are 7 messages in host2 that are not on host1. Use --delete2 to delete them and have a strict sync. (32 identified messages in host2)
Detected 0 errors
This imapsync is up to date. ( local 1.983 >= official 1.977 )( Use --noreleasecheck to avoid this release check. )
Homepage: https://imapsync.lamiral.info/
Exiting with return value 0 (EX_OK: successful termination) 0/50 nb_errors/max_errors
"""
def imapsync(source_username, source_password, target_email):
print(f'Received task with {source_username} {target_email}')
time.sleep(10)
return _example_out
|
[
"os.getenv",
"time.sleep"
] |
[((58, 91), 'os.getenv', 'os.getenv', (['"""SOURCE_IMAP_HOST"""', '""""""'], {}), "('SOURCE_IMAP_HOST', '')\n", (67, 91), False, 'import os\n'), ((109, 144), 'os.getenv', 'os.getenv', (['"""SOURCE_IMAP_PORT"""', 'None'], {}), "('SOURCE_IMAP_PORT', None)\n", (118, 144), False, 'import os\n'), ((162, 191), 'os.getenv', 'os.getenv', (['"""TARGET_AUTH_FILE"""'], {}), "('TARGET_AUTH_FILE')\n", (171, 191), False, 'import os\n'), ((20283, 20297), 'time.sleep', 'time.sleep', (['(10)'], {}), '(10)\n', (20293, 20297), False, 'import time\n')]
|
# Generated by Django 2.2.4 on 2020-04-18 20:10
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('core', '0004_event'),
]
operations = [
migrations.CreateModel(
name='Persona',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('mobile', models.CharField(max_length=20)),
('whatsapp', models.CharField(blank=True, max_length=20)),
('telephone', models.CharField(blank=True, max_length=20)),
('email', models.EmailField(blank=True, max_length=254)),
('street', models.CharField(blank=True, max_length=255)),
('complement', models.CharField(blank=True, max_length=100)),
('postalcode', models.CharField(blank=True, max_length=20)),
('dob', models.DateField(blank=True)),
('comment', models.CharField(blank=True, max_length=255)),
('events', models.ManyToManyField(to='core.Event')),
('kollegen', models.ManyToManyField(to='core.Kollege')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ['name'],
},
),
]
|
[
"django.db.models.ManyToManyField",
"django.db.models.CharField",
"django.db.models.ForeignKey",
"django.db.models.EmailField",
"django.db.models.AutoField",
"django.db.models.DateField"
] |
[((381, 474), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (397, 474), False, 'from django.db import migrations, models\n'), ((498, 530), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)'}), '(max_length=255)\n', (514, 530), False, 'from django.db import migrations, models\n'), ((560, 591), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(20)'}), '(max_length=20)\n', (576, 591), False, 'from django.db import migrations, models\n'), ((623, 666), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(20)'}), '(blank=True, max_length=20)\n', (639, 666), False, 'from django.db import migrations, models\n'), ((699, 742), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(20)'}), '(blank=True, max_length=20)\n', (715, 742), False, 'from django.db import migrations, models\n'), ((771, 816), 'django.db.models.EmailField', 'models.EmailField', ([], {'blank': '(True)', 'max_length': '(254)'}), '(blank=True, max_length=254)\n', (788, 816), False, 'from django.db import migrations, models\n'), ((846, 890), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(255)'}), '(blank=True, max_length=255)\n', (862, 890), False, 'from django.db import migrations, models\n'), ((924, 968), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(100)'}), '(blank=True, max_length=100)\n', (940, 968), False, 'from django.db import migrations, models\n'), ((1002, 1045), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(20)'}), '(blank=True, max_length=20)\n', (1018, 1045), False, 'from django.db import migrations, models\n'), ((1072, 1100), 'django.db.models.DateField', 'models.DateField', ([], {'blank': '(True)'}), '(blank=True)\n', (1088, 1100), False, 'from django.db import migrations, models\n'), ((1131, 1175), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(255)'}), '(blank=True, max_length=255)\n', (1147, 1175), False, 'from django.db import migrations, models\n'), ((1205, 1244), 'django.db.models.ManyToManyField', 'models.ManyToManyField', ([], {'to': '"""core.Event"""'}), "(to='core.Event')\n", (1227, 1244), False, 'from django.db import migrations, models\n'), ((1276, 1317), 'django.db.models.ManyToManyField', 'models.ManyToManyField', ([], {'to': '"""core.Kollege"""'}), "(to='core.Kollege')\n", (1298, 1317), False, 'from django.db import migrations, models\n'), ((1345, 1441), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': 'settings.AUTH_USER_MODEL'}), '(on_delete=django.db.models.deletion.CASCADE, to=settings.\n AUTH_USER_MODEL)\n', (1362, 1441), False, 'from django.db import migrations, models\n')]
|
#!/usr/bin/python3
import ctypes
import os
import sys
import argparse
import json
from PIL import Image, ImageFont, ImageDraw
org_wall = 'wall.jpg'
new_wall = 'task_wall.jpg'
try:
rf = open('data.json', 'r')
# Initial usage of the script will create a json file with all the default settings and a list to store tasks
except FileNotFoundError:
temp_wf = open('data.json', 'w')
init_data = {'tasks': [], 'xpos': 600, 'colour': 'white', 'fontsize': 30}
json.dump(init_data, temp_wf, indent=4)
temp_wf.close()
rf = open('data.json', 'r')
# Is used throughout this program to access the tasks, settings in json file and update them
data = json.load(rf)
def create_wallpaper():
''' Create a new wallpaper by accessing tasks list in data dict loaded from json file. Is called when tasks are added or removed from json file'''
# Open org_wall. Tasks will be drawn on contents of this image and will be saved to new_wall without affecting org_wall
try:
wall = Image.open(org_wall)
except FileNotFoundError:
print("Name your wallpaper 'wall' and store in script directory")
exit()
draw = ImageDraw.Draw(wall)
fontsize = int(data['fontsize'])
font = ImageFont.truetype('arial.ttf', fontsize)
ypos = 50
xpos = data['xpos']
# Loop over each item in tasks list saved in json data file. The tasks are then drawn on wallpaper.
for task in data['tasks']:
try:
draw.text((xpos, ypos), '{}. {}'.format(task['id'], task['t']), data['colour'], font=font)
except ValueError:
print('The colour or the font you set did not exist')
# ypos is updated by the font size to ensure that two tasks don't draw over each other on the wallpaper.
ypos += fontsize
with open('data.json', 'w') as wf:
json.dump(data, wf, indent=4)
# Save changed image to new_wall, leaving the org_wall unaffected
wall.save(new_wall)
def update_wallpaper(file):
'''Sets a new desktop background'''
SPI_SETDESKWALLPAPER = 20
SPIF_UPDATEINIFILE = 1
if os.path.isfile(file):
ctypes.windll.user32.SystemParametersInfoW(SPI_SETDESKWALLPAPER, 0, file, SPIF_UPDATEINIFILE)
else:
print("File does not exist")
def add(added_tasks):
''' Adds tasks to data dict. added_tasks is a list from taking in arguments in the commandline from user.'''
# Following is to get max no of tasks in data so as to assign id to a task by continuing after the largest id.
tasks_len = len(data['tasks'])
for i in range(len(added_tasks)):
# Number each task by adding tasks_len to i, so the id keeps increasing.
cur_id = tasks_len + i
data['tasks'].append({'t': '{}'.format(added_tasks[i]), 'id': cur_id})
# Return updated data dict which will be saved to data.json in main() and then new wallpaper is created with new data
return data
def remove(ids):
# Reverse sorted ids list since am removing from data['tasks] by index. If args were not reversed; ex:[2,5], After removing 2 first would result in
# abrupt IndexError since index 5 would no longer exist
try:
for i in sorted(ids, reverse=True):
del data['tasks'][i]
# Re-assign the ids to tasks as there will be gaps after removing.
for i, task_dict in enumerate(data['tasks']):
task_dict['id'] = i
# If user provides an index that has not been assigned to a task.
except IndexError:
print("There does not exist a task with atleast one of the indexes you provided")
def clear_tasks():
''' Clears all tasks from data.json and subsequently the wallpaper'''
# Empty the tasks list
data['tasks'] = []
# Dump the updated data dict to the same file from where it was retrieved
with open('data.json', 'w') as wf:
json.dump(data, wf, indent=4)
wf.close()
try:
# Remove new_wall and change wallpaper to original
os.remove(new_wall)
update_wallpaper(os.path.join(os.getcwd(), org_wall))
# If file 'new_wall' is not found, the user has already cleared tasks by removing new_wall
except FileNotFoundError:
print('You have already cleared all your tasks.')
def main():
parser = argparse.ArgumentParser(description='Add or remove tasks to your wallpaper')
parser.add_argument('-a', '--add', nargs='*', help='Add tasks to wallpaper')
parser.add_argument('-c', '--clear', help='Clear all of your tasks', action='store_true')
parser.add_argument('-r', '--remove', nargs='*', type=int, help='Remove a task by id')
parser.add_argument('-m', '--margin', nargs=1, type=int, help='Change left margin')
parser.add_argument('-cl', '--colour', nargs=1, help='Change colour of the tasks')
parser.add_argument('-fs', '--fontsize', nargs=1, help='Change font of the tasks')
args = parser.parse_args()
# Following arguments will always require 3 or more arguments to make a change
if len(sys.argv) > 2:
# Stores returned data after it has been changed in a function. Is then dumped to json data.
updated_data = {}
if args.add:
updated_data = add(args.add)
if args.remove:
updated_data = remove(args.remove)
if args.margin:
data['xpos'] = args.margin
if args.colour:
data['colour'] = args.colour
if args.fontsize:
data['font'] = args.fontsize
with open('data.json', 'w') as wf:
json.dump(updated_data, wf, indent=4)
wf.close()
create_wallpaper()
update_wallpaper(os.path.join(os.getcwd(), new_wall))
if args.clear:
clear_tasks()
if __name__ == "__main__":
main()
|
[
"json.dump",
"os.remove",
"json.load",
"argparse.ArgumentParser",
"os.getcwd",
"PIL.Image.open",
"PIL.ImageFont.truetype",
"os.path.isfile",
"PIL.ImageDraw.Draw",
"ctypes.windll.user32.SystemParametersInfoW"
] |
[((662, 675), 'json.load', 'json.load', (['rf'], {}), '(rf)\n', (671, 675), False, 'import json\n'), ((1155, 1175), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['wall'], {}), '(wall)\n', (1169, 1175), False, 'from PIL import Image, ImageFont, ImageDraw\n'), ((1224, 1265), 'PIL.ImageFont.truetype', 'ImageFont.truetype', (['"""arial.ttf"""', 'fontsize'], {}), "('arial.ttf', fontsize)\n", (1242, 1265), False, 'from PIL import Image, ImageFont, ImageDraw\n'), ((2093, 2113), 'os.path.isfile', 'os.path.isfile', (['file'], {}), '(file)\n', (2107, 2113), False, 'import os\n'), ((4268, 4344), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Add or remove tasks to your wallpaper"""'}), "(description='Add or remove tasks to your wallpaper')\n", (4291, 4344), False, 'import argparse\n'), ((469, 508), 'json.dump', 'json.dump', (['init_data', 'temp_wf'], {'indent': '(4)'}), '(init_data, temp_wf, indent=4)\n', (478, 508), False, 'import json\n'), ((1003, 1023), 'PIL.Image.open', 'Image.open', (['org_wall'], {}), '(org_wall)\n', (1013, 1023), False, 'from PIL import Image, ImageFont, ImageDraw\n'), ((1835, 1864), 'json.dump', 'json.dump', (['data', 'wf'], {'indent': '(4)'}), '(data, wf, indent=4)\n', (1844, 1864), False, 'import json\n'), ((2123, 2220), 'ctypes.windll.user32.SystemParametersInfoW', 'ctypes.windll.user32.SystemParametersInfoW', (['SPI_SETDESKWALLPAPER', '(0)', 'file', 'SPIF_UPDATEINIFILE'], {}), '(SPI_SETDESKWALLPAPER, 0, file,\n SPIF_UPDATEINIFILE)\n', (2165, 2220), False, 'import ctypes\n'), ((3851, 3880), 'json.dump', 'json.dump', (['data', 'wf'], {'indent': '(4)'}), '(data, wf, indent=4)\n', (3860, 3880), False, 'import json\n'), ((3976, 3995), 'os.remove', 'os.remove', (['new_wall'], {}), '(new_wall)\n', (3985, 3995), False, 'import os\n'), ((5525, 5562), 'json.dump', 'json.dump', (['updated_data', 'wf'], {'indent': '(4)'}), '(updated_data, wf, indent=4)\n', (5534, 5562), False, 'import json\n'), ((4034, 4045), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (4043, 4045), False, 'import os\n'), ((5652, 5663), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (5661, 5663), False, 'import os\n')]
|
from bubblesort import bubblesort
from heapsort import heapsort
from insertionsort import insertionsort
from mergesort import mergesort
from quicksort import quicksort
from radixsort import radixsort
from selectionsort import selectionsort
from timsort import timsort
from timeit import default_timer as timer
# BubbleSort
start = timer()
bubblesort()
end = timer()
bubblesort_elapsed_time = end - start
# HeapSort
start = timer()
heapsort()
end = timer()
heapsort_elapsed_time = end - start
# InsertionSort
start = timer()
insertionsort()
end = timer()
insertion_elapsed_time = end - start
# MergeSort
start = timer()
mergesort()
end = timer()
mergesort_elapsed_time = end - start
# QuickSort
start = timer()
quicksort()
end = timer()
quicksort_elapsed_time = end - start
# RadixSort
start = timer()
radixsort()
end = timer()
radixsort_elapsed_time = end - start
# SelectionSort
start = timer()
selectionsort()
end = timer()
selectionsort_elapsed_time = end - start
# TimSort
start = timer()
timsort()
end = timer()
timsort_elapsed_time = end - start
print(f"Elapsed time for bubblesort: {bubblesort_elapsed_time}")
print(f"Elapsed time for heapsort: {heapsort_elapsed_time}")
print(f"Elapsed time for insertionsort: {insertion_elapsed_time}")
print(f"Elapsed time for quicksort: {quicksort_elapsed_time}")
print(f"Elapsed time for radixsort: {radixsort_elapsed_time}")
print(f"Elapsed time for selectionsort: {selectionsort_elapsed_time}")
print(f"Elapsed time for timsort: {timsort_elapsed_time}")
|
[
"insertionsort.insertionsort",
"heapsort.heapsort",
"timeit.default_timer",
"timsort.timsort",
"bubblesort.bubblesort",
"selectionsort.selectionsort",
"mergesort.mergesort",
"radixsort.radixsort",
"quicksort.quicksort"
] |
[((333, 340), 'timeit.default_timer', 'timer', ([], {}), '()\n', (338, 340), True, 'from timeit import default_timer as timer\n'), ((341, 353), 'bubblesort.bubblesort', 'bubblesort', ([], {}), '()\n', (351, 353), False, 'from bubblesort import bubblesort\n'), ((360, 367), 'timeit.default_timer', 'timer', ([], {}), '()\n', (365, 367), True, 'from timeit import default_timer as timer\n'), ((426, 433), 'timeit.default_timer', 'timer', ([], {}), '()\n', (431, 433), True, 'from timeit import default_timer as timer\n'), ((434, 444), 'heapsort.heapsort', 'heapsort', ([], {}), '()\n', (442, 444), False, 'from heapsort import heapsort\n'), ((451, 458), 'timeit.default_timer', 'timer', ([], {}), '()\n', (456, 458), True, 'from timeit import default_timer as timer\n'), ((520, 527), 'timeit.default_timer', 'timer', ([], {}), '()\n', (525, 527), True, 'from timeit import default_timer as timer\n'), ((528, 543), 'insertionsort.insertionsort', 'insertionsort', ([], {}), '()\n', (541, 543), False, 'from insertionsort import insertionsort\n'), ((550, 557), 'timeit.default_timer', 'timer', ([], {}), '()\n', (555, 557), True, 'from timeit import default_timer as timer\n'), ((616, 623), 'timeit.default_timer', 'timer', ([], {}), '()\n', (621, 623), True, 'from timeit import default_timer as timer\n'), ((624, 635), 'mergesort.mergesort', 'mergesort', ([], {}), '()\n', (633, 635), False, 'from mergesort import mergesort\n'), ((642, 649), 'timeit.default_timer', 'timer', ([], {}), '()\n', (647, 649), True, 'from timeit import default_timer as timer\n'), ((708, 715), 'timeit.default_timer', 'timer', ([], {}), '()\n', (713, 715), True, 'from timeit import default_timer as timer\n'), ((716, 727), 'quicksort.quicksort', 'quicksort', ([], {}), '()\n', (725, 727), False, 'from quicksort import quicksort\n'), ((734, 741), 'timeit.default_timer', 'timer', ([], {}), '()\n', (739, 741), True, 'from timeit import default_timer as timer\n'), ((800, 807), 'timeit.default_timer', 'timer', ([], {}), '()\n', (805, 807), True, 'from timeit import default_timer as timer\n'), ((808, 819), 'radixsort.radixsort', 'radixsort', ([], {}), '()\n', (817, 819), False, 'from radixsort import radixsort\n'), ((826, 833), 'timeit.default_timer', 'timer', ([], {}), '()\n', (831, 833), True, 'from timeit import default_timer as timer\n'), ((896, 903), 'timeit.default_timer', 'timer', ([], {}), '()\n', (901, 903), True, 'from timeit import default_timer as timer\n'), ((904, 919), 'selectionsort.selectionsort', 'selectionsort', ([], {}), '()\n', (917, 919), False, 'from selectionsort import selectionsort\n'), ((926, 933), 'timeit.default_timer', 'timer', ([], {}), '()\n', (931, 933), True, 'from timeit import default_timer as timer\n'), ((994, 1001), 'timeit.default_timer', 'timer', ([], {}), '()\n', (999, 1001), True, 'from timeit import default_timer as timer\n'), ((1002, 1011), 'timsort.timsort', 'timsort', ([], {}), '()\n', (1009, 1011), False, 'from timsort import timsort\n'), ((1018, 1025), 'timeit.default_timer', 'timer', ([], {}), '()\n', (1023, 1025), True, 'from timeit import default_timer as timer\n')]
|
import json
import os
from classes.firebase import InitFirebaseConnection
from firebase_admin import db
from classes.google_maps import GoogleMaps
from classes.pharma_consults import PharmaConsults
from datetime import datetime
from sys import argv
print(argv)
if not argv[1] in ["-fo", "-fa"]:
raise BaseException("Invalid command option")
if argv[1] == "-fo":
InitFirebaseConnection()
print("Fetching currently open pharmacies...")
currentlyOpenPharmacies = PharmaConsults.getCurrentlyOpenPharmacies()
if argv[2] and argv[2] == "-p":
if len(currentlyOpenPharmacies)!= 0:
print("Updating database...")
try:
now = datetime.now().timestamp()
ref = db.reference("/currently_open")
ref.set(currentlyOpenPharmacies)
ref = db.reference("/last_update")
ref.set(now)
except Exception as e:
raise e
elif argv[1] == "-fa":
# Fetch all pharmacies coordinates
from classes.base_pharmacy import BasePharmacy
with open("./pharmacies.txt", "r") as pharmcies_file:
pharmacies = pharmcies_file.read().strip().split("\n")
links = GoogleMaps.get_meta_links(pharmacies)
if argv[2] and argv[2] == "-p":
if os.path.exists("./cached.json"):
with open("./cached.json", "r") as f:
data = json.loads(f.read())
ref = db.reference("/all")
all = ref.get()
if all:
ans = input("Overwrite ? Y/N > ").lower()
if ans == "y":
ref = db.reference()
ref.child("all").set(data)
else:
ref = db.reference()
ref.child("all_pharmacies").set(data)
|
[
"classes.pharma_consults.PharmaConsults.getCurrentlyOpenPharmacies",
"classes.google_maps.GoogleMaps.get_meta_links",
"os.path.exists",
"firebase_admin.db.reference",
"classes.firebase.InitFirebaseConnection",
"datetime.datetime.now"
] |
[((374, 398), 'classes.firebase.InitFirebaseConnection', 'InitFirebaseConnection', ([], {}), '()\n', (396, 398), False, 'from classes.firebase import InitFirebaseConnection\n'), ((480, 523), 'classes.pharma_consults.PharmaConsults.getCurrentlyOpenPharmacies', 'PharmaConsults.getCurrentlyOpenPharmacies', ([], {}), '()\n', (521, 523), False, 'from classes.pharma_consults import PharmaConsults\n'), ((1224, 1261), 'classes.google_maps.GoogleMaps.get_meta_links', 'GoogleMaps.get_meta_links', (['pharmacies'], {}), '(pharmacies)\n', (1249, 1261), False, 'from classes.google_maps import GoogleMaps\n'), ((1318, 1349), 'os.path.exists', 'os.path.exists', (['"""./cached.json"""'], {}), "('./cached.json')\n", (1332, 1349), False, 'import os\n'), ((744, 775), 'firebase_admin.db.reference', 'db.reference', (['"""/currently_open"""'], {}), "('/currently_open')\n", (756, 775), False, 'from firebase_admin import db\n'), ((847, 875), 'firebase_admin.db.reference', 'db.reference', (['"""/last_update"""'], {}), "('/last_update')\n", (859, 875), False, 'from firebase_admin import db\n'), ((1467, 1487), 'firebase_admin.db.reference', 'db.reference', (['"""/all"""'], {}), "('/all')\n", (1479, 1487), False, 'from firebase_admin import db\n'), ((695, 709), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (707, 709), False, 'from datetime import datetime\n'), ((1785, 1799), 'firebase_admin.db.reference', 'db.reference', ([], {}), '()\n', (1797, 1799), False, 'from firebase_admin import db\n'), ((1671, 1685), 'firebase_admin.db.reference', 'db.reference', ([], {}), '()\n', (1683, 1685), False, 'from firebase_admin import db\n')]
|
import json
import os
import random
import subprocess
import time
from pathlib import Path
from tempfile import NamedTemporaryFile
from typing import List, Optional
import docker
import pytest
from determined.common.api import bindings
from tests import config as conf
from tests import experiment as exp
from ..cluster.test_users import ADMIN_CREDENTIALS, logged_in_user
def det_deploy(subcommand: List) -> None:
command = [
"det",
"deploy",
"local",
] + subcommand
subprocess.run(command)
def cluster_up(arguments: List) -> None:
command = ["cluster-up", "--no-gpu"]
det_version = conf.DET_VERSION
if det_version is not None:
command += ["--det-version", det_version]
command += arguments
det_deploy(command)
def cluster_down(arguments: List) -> None:
command = ["cluster-down"]
command += arguments
det_deploy(command)
def master_up(arguments: List) -> None:
command = ["master-up"]
det_version = conf.DET_VERSION
if det_version is not None:
command += ["--det-version", det_version]
command += arguments
det_deploy(command)
def master_down(arguments: List) -> None:
command = ["master-down"]
command += arguments
det_deploy(command)
def agent_up(arguments: List, fluent_offset: Optional[int] = None) -> None:
command = ["agent-up", conf.MASTER_IP, "--no-gpu"]
det_version = conf.DET_VERSION
if det_version is not None:
command += ["--det-version", det_version]
command += arguments
if fluent_offset is not None:
with NamedTemporaryFile() as tf:
with open(tf.name, "w") as f:
f.write(
f"""
fluent:
port: {24224 + fluent_offset}
container_name: fluent-{fluent_offset}"""
)
det_deploy(command + ["--agent-config-path", tf.name])
else:
det_deploy(command)
def agent_down(arguments: List) -> None:
command = ["agent-down"]
command += arguments
det_deploy(command)
def agent_enable(arguments: List) -> None:
with logged_in_user(ADMIN_CREDENTIALS):
subprocess.run(["det", "-m", conf.make_master_url(), "agent", "enable"] + arguments)
def agent_disable(arguments: List) -> None:
with logged_in_user(ADMIN_CREDENTIALS):
subprocess.run(["det", "-m", conf.make_master_url(), "agent", "disable"] + arguments)
@pytest.mark.det_deploy_local
def test_cluster_down() -> None:
master_host = "localhost"
master_port = "8080"
name = "fixture_down_test"
conf.MASTER_IP = master_host
conf.MASTER_PORT = master_port
cluster_up(["--cluster-name", name])
container_name = name + "_determined-master_1"
client = docker.from_env()
containers = client.containers.list(filters={"name": container_name})
assert len(containers) > 0
cluster_down(["--cluster-name", name])
containers = client.containers.list(filters={"name": container_name})
assert len(containers) == 0
@pytest.mark.det_deploy_local
def test_custom_etc() -> None:
master_host = "localhost"
master_port = "8080"
conf.MASTER_IP = master_host
conf.MASTER_PORT = master_port
etc_path = str(Path(__file__).parent.joinpath("etc/master.yaml").resolve())
cluster_up(["--master-config-path", etc_path])
exp.run_basic_test(
conf.fixtures_path("no_op/single-default-ckpt.yaml"),
conf.fixtures_path("no_op"),
1,
)
assert os.path.exists("/tmp/ckpt-test/")
cluster_down([])
@pytest.mark.det_deploy_local
def test_agent_config_path() -> None:
master_host = "localhost"
master_port = "8080"
conf.MASTER_IP = master_host
conf.MASTER_PORT = master_port
master_up([])
# Config makes it unmodified.
etc_path = str(Path(__file__).parent.joinpath("etc/agent.yaml").resolve())
agent_name = "test-path-agent"
agent_up(["--agent-config-path", etc_path])
client = docker.from_env()
agent_container = client.containers.get(agent_name)
exit_code, out = agent_container.exec_run(["cat", "/etc/determined/agent.yaml"])
assert exit_code == 0
with open(etc_path) as f:
assert f.read() == out.decode("utf-8")
for _ in range(10):
try:
client.containers.get("test-fluent")
break
except docker.errors.NotFound:
print("Waiting for 'test-fluent' container to be created")
time.sleep(10)
else:
pytest.fail("uh-oh, fluent didn't come online")
agent_down(["--agent-name", agent_name])
# Validate CLI flags overwrite config file options.
agent_name += "-2"
agent_up(
["--agent-name", agent_name, "--agent-config-path", etc_path, "--agent-label", "cli-flag"]
)
agent_list = json.loads(subprocess.check_output(["det", "a", "list", "--json"]).decode())
agent_list = [el for el in agent_list if el["id"] == agent_name]
assert len(agent_list) == 1
assert agent_list[0]["label"] == "cli-flag"
agent_down(["--agent-name", agent_name])
master_down([])
@pytest.mark.det_deploy_local
def test_custom_port() -> None:
name = "port_test"
master_host = "localhost"
master_port = "12321"
conf.MASTER_IP = master_host
conf.MASTER_PORT = master_port
arguments = [
"--cluster-name",
name,
"--master-port",
f"{master_port}",
]
cluster_up(arguments)
exp.run_basic_test(
conf.fixtures_path("no_op/single-one-short-step.yaml"),
conf.fixtures_path("no_op"),
1,
)
cluster_down(["--cluster-name", name])
@pytest.mark.det_deploy_local
def test_agents_made() -> None:
master_host = "localhost"
master_port = "8080"
name = "agents_test"
num_agents = 2
conf.MASTER_IP = master_host
conf.MASTER_PORT = master_port
arguments = [
"--cluster-name",
name,
"--agents",
f"{num_agents}",
]
cluster_up(arguments)
container_names = [name + f"-agent-{i}" for i in range(0, num_agents)]
client = docker.from_env()
for container_name in container_names:
containers = client.containers.list(filters={"name": container_name})
assert len(containers) > 0
cluster_down(["--cluster-name", name])
@pytest.mark.det_deploy_local
def test_master_up_down() -> None:
master_host = "localhost"
master_port = "8080"
name = "determined"
conf.MASTER_IP = master_host
conf.MASTER_PORT = master_port
master_up(["--master-name", name])
container_name = name + "_determined-master_1"
client = docker.from_env()
containers = client.containers.list(filters={"name": container_name})
assert len(containers) > 0
master_down([])
containers = client.containers.list(filters={"name": container_name})
assert len(containers) == 0
@pytest.mark.det_deploy_local
def test_agent_up_down() -> None:
master_host = "localhost"
master_port = "8080"
agent_name = "determined-agent"
conf.MASTER_IP = master_host
conf.MASTER_PORT = master_port
master_up([])
agent_up(["--agent-name", agent_name])
client = docker.from_env()
containers = client.containers.list(filters={"name": agent_name})
assert len(containers) > 0
agent_down(["--agent-name", agent_name])
containers = client.containers.list(filters={"name": agent_name})
assert len(containers) == 0
master_down([])
@pytest.mark.parametrize("steps", [10])
@pytest.mark.parametrize("num_agents", [3, 5])
@pytest.mark.parametrize("should_disconnect", [False, True])
@pytest.mark.det_deploy_local
def test_stress_agents_reconnect(steps: int, num_agents: int, should_disconnect: bool) -> None:
random.seed(42)
master_host = "localhost"
master_port = "8080"
conf.MASTER_IP = master_host
conf.MASTER_PORT = master_port
master_up([])
# Start all agents.
agents_are_up = [True] * num_agents
for i in range(num_agents):
agent_up(["--agent-name", f"agent-{i}"], fluent_offset=i)
time.sleep(3)
for _ in range(steps):
for agent_id, agent_is_up in enumerate(agents_are_up):
if random.choice([True, False]): # Flip agents status randomly.
continue
if should_disconnect:
# Can't just randomly deploy up/down due to just getting a Docker name conflict.
if agent_is_up:
agent_down(["--agent-name", f"agent-{agent_id}"])
else:
agent_up(["--agent-name", f"agent-{agent_id}"], fluent_offset=agent_id)
agents_are_up[agent_id] = not agents_are_up[agent_id]
else:
if random.choice([True, False]):
agent_disable([f"agent-{agent_id}"])
agents_are_up[agent_id] = False
else:
agent_enable([f"agent-{agent_id}"])
agents_are_up[agent_id] = True
time.sleep(10)
# Validate that our master kept track of the agent reconnect spam.
agent_list = json.loads(
subprocess.check_output(
[
"det",
"agent",
"list",
"--json",
]
).decode()
)
assert sum(agents_are_up) <= len(agent_list)
for agent in agent_list:
agent_id = int(agent["id"].replace("agent-", ""))
assert agents_are_up[agent_id] == agent["enabled"]
# Can we still schedule something?
if any(agents_are_up):
experiment_id = exp.create_experiment(
conf.fixtures_path("no_op/single-one-short-step.yaml"),
conf.fixtures_path("no_op"),
None,
)
exp.wait_for_experiment_state(
experiment_id, bindings.determinedexperimentv1State.STATE_COMPLETED
)
for agent_id in range(num_agents):
agent_down(["--agent-name", f"agent-{agent_id}"])
master_down([])
|
[
"docker.from_env",
"subprocess.run",
"tempfile.NamedTemporaryFile",
"tests.config.make_master_url",
"tests.experiment.wait_for_experiment_state",
"subprocess.check_output",
"pytest.fail",
"os.path.exists",
"random.choice",
"time.sleep",
"tests.config.fixtures_path",
"pathlib.Path",
"random.seed",
"pytest.mark.parametrize"
] |
[((7445, 7483), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""steps"""', '[10]'], {}), "('steps', [10])\n", (7468, 7483), False, 'import pytest\n'), ((7485, 7530), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""num_agents"""', '[3, 5]'], {}), "('num_agents', [3, 5])\n", (7508, 7530), False, 'import pytest\n'), ((7532, 7591), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""should_disconnect"""', '[False, True]'], {}), "('should_disconnect', [False, True])\n", (7555, 7591), False, 'import pytest\n'), ((508, 531), 'subprocess.run', 'subprocess.run', (['command'], {}), '(command)\n', (522, 531), False, 'import subprocess\n'), ((2729, 2746), 'docker.from_env', 'docker.from_env', ([], {}), '()\n', (2744, 2746), False, 'import docker\n'), ((3472, 3505), 'os.path.exists', 'os.path.exists', (['"""/tmp/ckpt-test/"""'], {}), "('/tmp/ckpt-test/')\n", (3486, 3505), False, 'import os\n'), ((3949, 3966), 'docker.from_env', 'docker.from_env', ([], {}), '()\n', (3964, 3966), False, 'import docker\n'), ((6063, 6080), 'docker.from_env', 'docker.from_env', ([], {}), '()\n', (6078, 6080), False, 'import docker\n'), ((6601, 6618), 'docker.from_env', 'docker.from_env', ([], {}), '()\n', (6616, 6618), False, 'import docker\n'), ((7154, 7171), 'docker.from_env', 'docker.from_env', ([], {}), '()\n', (7169, 7171), False, 'import docker\n'), ((7722, 7737), 'random.seed', 'random.seed', (['(42)'], {}), '(42)\n', (7733, 7737), False, 'import random\n'), ((8046, 8059), 'time.sleep', 'time.sleep', (['(3)'], {}), '(3)\n', (8056, 8059), False, 'import time\n'), ((3353, 3405), 'tests.config.fixtures_path', 'conf.fixtures_path', (['"""no_op/single-default-ckpt.yaml"""'], {}), "('no_op/single-default-ckpt.yaml')\n", (3371, 3405), True, 'from tests import config as conf\n'), ((3415, 3442), 'tests.config.fixtures_path', 'conf.fixtures_path', (['"""no_op"""'], {}), "('no_op')\n", (3433, 3442), True, 'from tests import config as conf\n'), ((4471, 4518), 'pytest.fail', 'pytest.fail', (['"""uh-oh, fluent didn\'t come online"""'], {}), '("uh-oh, fluent didn\'t come online")\n', (4482, 4518), False, 'import pytest\n'), ((5456, 5510), 'tests.config.fixtures_path', 'conf.fixtures_path', (['"""no_op/single-one-short-step.yaml"""'], {}), "('no_op/single-one-short-step.yaml')\n", (5474, 5510), True, 'from tests import config as conf\n'), ((5520, 5547), 'tests.config.fixtures_path', 'conf.fixtures_path', (['"""no_op"""'], {}), "('no_op')\n", (5538, 5547), True, 'from tests import config as conf\n'), ((8984, 8998), 'time.sleep', 'time.sleep', (['(10)'], {}), '(10)\n', (8994, 8998), False, 'import time\n'), ((1589, 1609), 'tempfile.NamedTemporaryFile', 'NamedTemporaryFile', ([], {}), '()\n', (1607, 1609), False, 'from tempfile import NamedTemporaryFile\n'), ((8166, 8194), 'random.choice', 'random.choice', (['[True, False]'], {}), '([True, False])\n', (8179, 8194), False, 'import random\n'), ((9830, 9933), 'tests.experiment.wait_for_experiment_state', 'exp.wait_for_experiment_state', (['experiment_id', 'bindings.determinedexperimentv1State.STATE_COMPLETED'], {}), '(experiment_id, bindings.\n determinedexperimentv1State.STATE_COMPLETED)\n', (9859, 9933), True, 'from tests import experiment as exp\n'), ((4438, 4452), 'time.sleep', 'time.sleep', (['(10)'], {}), '(10)\n', (4448, 4452), False, 'import time\n'), ((4791, 4846), 'subprocess.check_output', 'subprocess.check_output', (["['det', 'a', 'list', '--json']"], {}), "(['det', 'a', 'list', '--json'])\n", (4814, 4846), False, 'import subprocess\n'), ((8708, 8736), 'random.choice', 'random.choice', (['[True, False]'], {}), '([True, False])\n', (8721, 8736), False, 'import random\n'), ((9681, 9735), 'tests.config.fixtures_path', 'conf.fixtures_path', (['"""no_op/single-one-short-step.yaml"""'], {}), "('no_op/single-one-short-step.yaml')\n", (9699, 9735), True, 'from tests import config as conf\n'), ((9753, 9780), 'tests.config.fixtures_path', 'conf.fixtures_path', (['"""no_op"""'], {}), "('no_op')\n", (9771, 9780), True, 'from tests import config as conf\n'), ((2163, 2185), 'tests.config.make_master_url', 'conf.make_master_url', ([], {}), '()\n', (2183, 2185), True, 'from tests import config as conf\n'), ((2346, 2368), 'tests.config.make_master_url', 'conf.make_master_url', ([], {}), '()\n', (2366, 2368), True, 'from tests import config as conf\n'), ((9120, 9179), 'subprocess.check_output', 'subprocess.check_output', (["['det', 'agent', 'list', '--json']"], {}), "(['det', 'agent', 'list', '--json'])\n", (9143, 9179), False, 'import subprocess\n'), ((3209, 3223), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (3213, 3223), False, 'from pathlib import Path\n'), ((3792, 3806), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (3796, 3806), False, 'from pathlib import Path\n')]
|
from models2 import Department, Employee, db
from app2 import app
db.drop_all()
db.create_all()
d1 = Department(dept_code="mktg", dept_name="Marketing",phone="897-9999")
d2 = Department(dept_code="acct", dept_name="Accounting",phone="111-5429")
river = Employee(name="<NAME>", state="NY", dept_code="mktg")
summer = Employee(name="<NAME>", state="OR", dept_code="mktg")
joaquin = Employee(name="<NAME>", dept_code="acct")
db.session.add(d1)
db.session.add(d2)
db.session.commit()
db.session.add(river)
db.session.add(joaquin)
db.session.add(summer)
db.session.commit()
|
[
"models2.Employee",
"models2.db.session.add",
"models2.db.drop_all",
"models2.db.create_all",
"models2.Department",
"models2.db.session.commit"
] |
[((67, 80), 'models2.db.drop_all', 'db.drop_all', ([], {}), '()\n', (78, 80), False, 'from models2 import Department, Employee, db\n'), ((81, 96), 'models2.db.create_all', 'db.create_all', ([], {}), '()\n', (94, 96), False, 'from models2 import Department, Employee, db\n'), ((103, 172), 'models2.Department', 'Department', ([], {'dept_code': '"""mktg"""', 'dept_name': '"""Marketing"""', 'phone': '"""897-9999"""'}), "(dept_code='mktg', dept_name='Marketing', phone='897-9999')\n", (113, 172), False, 'from models2 import Department, Employee, db\n'), ((177, 247), 'models2.Department', 'Department', ([], {'dept_code': '"""acct"""', 'dept_name': '"""Accounting"""', 'phone': '"""111-5429"""'}), "(dept_code='acct', dept_name='Accounting', phone='111-5429')\n", (187, 247), False, 'from models2 import Department, Employee, db\n'), ((255, 308), 'models2.Employee', 'Employee', ([], {'name': '"""<NAME>"""', 'state': '"""NY"""', 'dept_code': '"""mktg"""'}), "(name='<NAME>', state='NY', dept_code='mktg')\n", (263, 308), False, 'from models2 import Department, Employee, db\n'), ((318, 371), 'models2.Employee', 'Employee', ([], {'name': '"""<NAME>"""', 'state': '"""OR"""', 'dept_code': '"""mktg"""'}), "(name='<NAME>', state='OR', dept_code='mktg')\n", (326, 371), False, 'from models2 import Department, Employee, db\n'), ((382, 423), 'models2.Employee', 'Employee', ([], {'name': '"""<NAME>"""', 'dept_code': '"""acct"""'}), "(name='<NAME>', dept_code='acct')\n", (390, 423), False, 'from models2 import Department, Employee, db\n'), ((425, 443), 'models2.db.session.add', 'db.session.add', (['d1'], {}), '(d1)\n', (439, 443), False, 'from models2 import Department, Employee, db\n'), ((444, 462), 'models2.db.session.add', 'db.session.add', (['d2'], {}), '(d2)\n', (458, 462), False, 'from models2 import Department, Employee, db\n'), ((463, 482), 'models2.db.session.commit', 'db.session.commit', ([], {}), '()\n', (480, 482), False, 'from models2 import Department, Employee, db\n'), ((484, 505), 'models2.db.session.add', 'db.session.add', (['river'], {}), '(river)\n', (498, 505), False, 'from models2 import Department, Employee, db\n'), ((506, 529), 'models2.db.session.add', 'db.session.add', (['joaquin'], {}), '(joaquin)\n', (520, 529), False, 'from models2 import Department, Employee, db\n'), ((530, 552), 'models2.db.session.add', 'db.session.add', (['summer'], {}), '(summer)\n', (544, 552), False, 'from models2 import Department, Employee, db\n'), ((553, 572), 'models2.db.session.commit', 'db.session.commit', ([], {}), '()\n', (570, 572), False, 'from models2 import Department, Employee, db\n')]
|
# -*- coding: utf-8 -*-
"""
Flatten mesh using conformal mapping
=============================================
Map 3D mesh to a 2D (complex) plane with angle-preserving (conformal) mapping
Based on these course notes
https://www.cs.cmu.edu/~kmcrane/Projects/DDG/
section 7.4.
"""
import numpy as np
from bfieldtools.flatten_mesh import flatten_mesh
from bfieldtools.flatten_mesh import mesh2plane
from bfieldtools.flatten_mesh import plane2mesh
from bfieldtools.utils import load_example_mesh
#%% Determine 2D parameterization and plot coordinate function on the 3D mesh
from mayavi import mlab
from bfieldtools.viz import plot_data_on_vertices, plot_mesh, plot_data_on_faces
mesh = load_example_mesh("meg_helmet", process=False)
u, v, mesh2d = flatten_mesh(mesh, _lambda=0.80)
plot_data_on_vertices(mesh, u, ncolors=15)
plot_data_on_vertices(mesh, v, ncolors=15)
#%% Determine lambda with smallest area distortion
# lls = np.linspace(0.01,1.0, 100)
# mm = []
# for ll in lls:
# u, v, mesh2d = flatten_mesh(mesh, _lambda=ll)
# d = mesh2d.area_faces / mesh.area_faces
# mm.append(np.std(d)/np.mean(d))
# print(np.std(d)/np.mean(d))
# plt.plot(lls, mm)
#%% Plot flattened mesh and area distortion on faces
plot_data_on_faces(mesh2d, mesh2d.area_faces / mesh.area_faces)
#%% Plot gradient of the two coordinate functions and the cosine of the angle between the gradients
from bfieldtools.mesh_calculus import gradient
gx = gradient(u, mesh)
gy = gradient(v, mesh)
cos = np.sum(gx * gy, axis=0) / (
np.linalg.norm(gx, axis=0) * np.linalg.norm(gy, axis=0)
)
plot_data_on_faces(mesh, cos, vmin=-1, vmax=1)
mlab.quiver3d(*mesh.triangles_center.T, *gx, color=(1, 0, 0), mode="arrow")
q = mlab.quiver3d(*mesh.triangles_center.T, *gy, color=(0, 0, 1), mode="arrow")
q.scene.isometric_view()
#%% Map hexagonal grid from 2d to the 3D mesh
d = np.sqrt(3 / 4)
m = np.min((u.min(), v.min()))
mm = np.min((u.max(), v.max()))
xx = np.linspace(m * 1.05, mm * 1.05, 12)
yy = np.linspace(m * 1.05, mm * 1.05, 12) * d
p = np.array(np.meshgrid(xx, yy, 0, indexing="ij"))
p[0, :, ::2] += (xx[1] - xx[0]) * 0.5
p = p.reshape(3, -1).T
pp = plane2mesh(p, mesh, u, v)
plot_data_on_vertices(mesh, u, ncolors=15)
mlab.points3d(*pp.T, scale_factor=0.01)
|
[
"bfieldtools.mesh_calculus.gradient",
"numpy.meshgrid",
"numpy.sum",
"mayavi.mlab.quiver3d",
"bfieldtools.flatten_mesh.flatten_mesh",
"bfieldtools.viz.plot_data_on_faces",
"mayavi.mlab.points3d",
"bfieldtools.utils.load_example_mesh",
"bfieldtools.viz.plot_data_on_vertices",
"numpy.linspace",
"bfieldtools.flatten_mesh.plane2mesh",
"numpy.linalg.norm",
"numpy.sqrt"
] |
[((712, 758), 'bfieldtools.utils.load_example_mesh', 'load_example_mesh', (['"""meg_helmet"""'], {'process': '(False)'}), "('meg_helmet', process=False)\n", (729, 758), False, 'from bfieldtools.utils import load_example_mesh\n'), ((775, 806), 'bfieldtools.flatten_mesh.flatten_mesh', 'flatten_mesh', (['mesh'], {'_lambda': '(0.8)'}), '(mesh, _lambda=0.8)\n', (787, 806), False, 'from bfieldtools.flatten_mesh import flatten_mesh\n'), ((811, 853), 'bfieldtools.viz.plot_data_on_vertices', 'plot_data_on_vertices', (['mesh', 'u'], {'ncolors': '(15)'}), '(mesh, u, ncolors=15)\n', (832, 853), False, 'from bfieldtools.viz import plot_data_on_vertices, plot_mesh, plot_data_on_faces\n'), ((855, 897), 'bfieldtools.viz.plot_data_on_vertices', 'plot_data_on_vertices', (['mesh', 'v'], {'ncolors': '(15)'}), '(mesh, v, ncolors=15)\n', (876, 897), False, 'from bfieldtools.viz import plot_data_on_vertices, plot_mesh, plot_data_on_faces\n'), ((1269, 1332), 'bfieldtools.viz.plot_data_on_faces', 'plot_data_on_faces', (['mesh2d', '(mesh2d.area_faces / mesh.area_faces)'], {}), '(mesh2d, mesh2d.area_faces / mesh.area_faces)\n', (1287, 1332), False, 'from bfieldtools.viz import plot_data_on_vertices, plot_mesh, plot_data_on_faces\n'), ((1492, 1509), 'bfieldtools.mesh_calculus.gradient', 'gradient', (['u', 'mesh'], {}), '(u, mesh)\n', (1500, 1509), False, 'from bfieldtools.mesh_calculus import gradient\n'), ((1516, 1533), 'bfieldtools.mesh_calculus.gradient', 'gradient', (['v', 'mesh'], {}), '(v, mesh)\n', (1524, 1533), False, 'from bfieldtools.mesh_calculus import gradient\n'), ((1634, 1680), 'bfieldtools.viz.plot_data_on_faces', 'plot_data_on_faces', (['mesh', 'cos'], {'vmin': '(-1)', 'vmax': '(1)'}), '(mesh, cos, vmin=-1, vmax=1)\n', (1652, 1680), False, 'from bfieldtools.viz import plot_data_on_vertices, plot_mesh, plot_data_on_faces\n'), ((1682, 1757), 'mayavi.mlab.quiver3d', 'mlab.quiver3d', (['*mesh.triangles_center.T', '*gx'], {'color': '(1, 0, 0)', 'mode': '"""arrow"""'}), "(*mesh.triangles_center.T, *gx, color=(1, 0, 0), mode='arrow')\n", (1695, 1757), False, 'from mayavi import mlab\n'), ((1763, 1838), 'mayavi.mlab.quiver3d', 'mlab.quiver3d', (['*mesh.triangles_center.T', '*gy'], {'color': '(0, 0, 1)', 'mode': '"""arrow"""'}), "(*mesh.triangles_center.T, *gy, color=(0, 0, 1), mode='arrow')\n", (1776, 1838), False, 'from mayavi import mlab\n'), ((1919, 1933), 'numpy.sqrt', 'np.sqrt', (['(3 / 4)'], {}), '(3 / 4)\n', (1926, 1933), True, 'import numpy as np\n'), ((2005, 2041), 'numpy.linspace', 'np.linspace', (['(m * 1.05)', '(mm * 1.05)', '(12)'], {}), '(m * 1.05, mm * 1.05, 12)\n', (2016, 2041), True, 'import numpy as np\n'), ((2215, 2240), 'bfieldtools.flatten_mesh.plane2mesh', 'plane2mesh', (['p', 'mesh', 'u', 'v'], {}), '(p, mesh, u, v)\n', (2225, 2240), False, 'from bfieldtools.flatten_mesh import plane2mesh\n'), ((2244, 2286), 'bfieldtools.viz.plot_data_on_vertices', 'plot_data_on_vertices', (['mesh', 'u'], {'ncolors': '(15)'}), '(mesh, u, ncolors=15)\n', (2265, 2286), False, 'from bfieldtools.viz import plot_data_on_vertices, plot_mesh, plot_data_on_faces\n'), ((2288, 2327), 'mayavi.mlab.points3d', 'mlab.points3d', (['*pp.T'], {'scale_factor': '(0.01)'}), '(*pp.T, scale_factor=0.01)\n', (2301, 2327), False, 'from mayavi import mlab\n'), ((1541, 1564), 'numpy.sum', 'np.sum', (['(gx * gy)'], {'axis': '(0)'}), '(gx * gy, axis=0)\n', (1547, 1564), True, 'import numpy as np\n'), ((2048, 2084), 'numpy.linspace', 'np.linspace', (['(m * 1.05)', '(mm * 1.05)', '(12)'], {}), '(m * 1.05, mm * 1.05, 12)\n', (2059, 2084), True, 'import numpy as np\n'), ((2103, 2140), 'numpy.meshgrid', 'np.meshgrid', (['xx', 'yy', '(0)'], {'indexing': '"""ij"""'}), "(xx, yy, 0, indexing='ij')\n", (2114, 2140), True, 'import numpy as np\n'), ((1574, 1600), 'numpy.linalg.norm', 'np.linalg.norm', (['gx'], {'axis': '(0)'}), '(gx, axis=0)\n', (1588, 1600), True, 'import numpy as np\n'), ((1603, 1629), 'numpy.linalg.norm', 'np.linalg.norm', (['gy'], {'axis': '(0)'}), '(gy, axis=0)\n', (1617, 1629), True, 'import numpy as np\n')]
|
import re
from smartsearch.matcher import field_matcher, phrase_matcher, zip_matcher
from smartsearch.model import extractions, nlp
from smartsearch.referencer import extract_references
def static_args(**kwargs):
"""This decorator method is used to add static arguments to another method.
The reason we are doing this is because we are passing a regular expression as an argument. Since the regular
expression is being compiled the via the static args, it is not being compiled on every method call.
"""
def decorate(func):
for k in kwargs:
setattr(func, k, kwargs[k])
return func
return decorate
def remove_commas(match):
"""Helper method to remove commas from a match group
Args:
match (match): This is a regular expression match object.
Returns:
(str): The text with commas removed.
"""
match = match.group()
return re.sub(',', '', match)
@static_args(squarefoot=re.compile(r"(?<!\w)(square|sq(\.)?)(\s)?(feet|foot|ft(\.)?)(?!\w)"),
numberRE=re.compile(r"\$?\d+,?\d+", re.I))
def preprocess(text):
"""This method is used to preprocess text before any nlp is done on it.
The method first turns any variation of squarefoot into "squarefoot" so we do not have to check for it later in
the pipeline. It also removes commas from the numbers as its easier piped into API as an integer.
Note:
This method is always called before parse() is called.
Args:
text (str): This is the block of text that we wish to preprocess.
Returns:
(str): The text that has been preprocessed.
"""
sqftSub = preprocess.squarefoot.sub("squarefoot", text)
numberSub = preprocess.numberRE.sub(remove_commas, sqftSub)
return numberSub
def parse(text):
"""This method is what is called by the models.
Args:
text (str): This is a string of text that has been preprocessed.
Returns:
(dict): Dictionary of fields mapped to their respective values.
"""
extractions.clear()
doc = nlp(preprocess(text))
for span in [doc[head:tail] for (match_id, head, tail) in phrase_matcher(doc)]:
try:
span.merge()
except IndexError:
pass
for span in [doc[head:tail] for (match_id, head, tail) in field_matcher(doc)]:
try:
span.merge()
except IndexError:
pass
zip_matcher(doc)
extract_references(doc, extractions)
if is_negated(doc):
negate(extractions)
return extractions
def is_negated(doc):
"""This method checks if the sentence has been negated indicating the user wants the opposite of what he/she asked.
Ex. "Show me all the apartments that are NOT dog friendly". This works by starting at the head of the sentence and
then navigating through the parse tree looking for a negated target word.
Args:
doc (doc): This is a spacy doc object.
Returns:
(bool): True if text contains a negation, False otherwise.
"""
token = doc[0]
while token != token.head:
token = token.head
children = [i.text.lower() for i in token.children]
negations = ["no", "not", "n't", "nothing", "nowhere"]
if any(i in children for i in negations):
return True
# this could be improved using what's in references
topics = [
"home", "homes",
"house", "houses",
"apartment", "apartments",
"building", "buildings",
"place", "places",
"residence", "residences",
"anywhere",
"anyplace"]
for i in token.children:
if i.text.lower() in topics:
grandchildren = [j.text.lower() for j in i.children]
if any(j in grandchildren for j in negations):
return True
return False
def negate(extractions):
"""This is a helper method to negate the fields if a negation is found.
Args:
extractions (dict): This is a dictionary object which contains all of the extracted fields.
Returns:
(bool): True if text contains a negation, False otherwise.
"""
if extractions.get("max_price") and not extractions.get("min_price"):
extractions["min_price"] = extractions["max_price"]
extractions["max_price"] = None
elif extractions.get("min_price") and not extractions.get("max_price"):
extractions["max_price"] = extractions["min_price"]
extractions["min_price"] = None
if extractions.get("max_sqft") and not extractions.get("min_sqft"):
extractions["min_sqft"] = extractions["max_sqft"]
extractions["max_sqft"] = None
elif extractions.get("min_sqft") and not extractions.get("max_sqft"):
extractions["max_sqft"] = extractions["min_sqft"]
extractions["min_sqft"] = None
if extractions.get("max_bed") and not extractions.get("min_bed"):
extractions["min_bed"] = extractions["max_bed"]
extractions["max_bed"] = None
elif extractions.get("min_bed") and not extractions.get("max_bed"):
extractions["max_bed"] = extractions["min_bed"]
extractions["min_bed"] = None
if extractions.get("dog_friendly"):
extractions.get["dog_friendly"] = False
if extractions.get("cat_friendly"):
extractions.get["cat_friendly"] = False
|
[
"smartsearch.matcher.zip_matcher",
"re.compile",
"smartsearch.model.extractions.get",
"smartsearch.matcher.phrase_matcher",
"smartsearch.matcher.field_matcher",
"smartsearch.model.extractions.clear",
"re.sub",
"smartsearch.referencer.extract_references"
] |
[((922, 944), 're.sub', 're.sub', (['""","""', '""""""', 'match'], {}), "(',', '', match)\n", (928, 944), False, 'import re\n'), ((2047, 2066), 'smartsearch.model.extractions.clear', 'extractions.clear', ([], {}), '()\n', (2064, 2066), False, 'from smartsearch.model import extractions, nlp\n'), ((2437, 2453), 'smartsearch.matcher.zip_matcher', 'zip_matcher', (['doc'], {}), '(doc)\n', (2448, 2453), False, 'from smartsearch.matcher import field_matcher, phrase_matcher, zip_matcher\n'), ((2458, 2494), 'smartsearch.referencer.extract_references', 'extract_references', (['doc', 'extractions'], {}), '(doc, extractions)\n', (2476, 2494), False, 'from smartsearch.referencer import extract_references\n'), ((5186, 5217), 'smartsearch.model.extractions.get', 'extractions.get', (['"""dog_friendly"""'], {}), "('dog_friendly')\n", (5201, 5217), False, 'from smartsearch.model import extractions, nlp\n'), ((5274, 5305), 'smartsearch.model.extractions.get', 'extractions.get', (['"""cat_friendly"""'], {}), "('cat_friendly')\n", (5289, 5305), False, 'from smartsearch.model import extractions, nlp\n'), ((971, 1043), 're.compile', 're.compile', (['"""(?<!\\\\w)(square|sq(\\\\.)?)(\\\\s)?(feet|foot|ft(\\\\.)?)(?!\\\\w)"""'], {}), "('(?<!\\\\w)(square|sq(\\\\.)?)(\\\\s)?(feet|foot|ft(\\\\.)?)(?!\\\\w)')\n", (981, 1043), False, 'import re\n'), ((1063, 1097), 're.compile', 're.compile', (['"""\\\\$?\\\\d+,?\\\\d+"""', 're.I'], {}), "('\\\\$?\\\\d+,?\\\\d+', re.I)\n", (1073, 1097), False, 'import re\n'), ((4163, 4191), 'smartsearch.model.extractions.get', 'extractions.get', (['"""max_price"""'], {}), "('max_price')\n", (4178, 4191), False, 'from smartsearch.model import extractions, nlp\n'), ((4514, 4541), 'smartsearch.model.extractions.get', 'extractions.get', (['"""max_sqft"""'], {}), "('max_sqft')\n", (4529, 4541), False, 'from smartsearch.model import extractions, nlp\n'), ((4855, 4881), 'smartsearch.model.extractions.get', 'extractions.get', (['"""max_bed"""'], {}), "('max_bed')\n", (4870, 4881), False, 'from smartsearch.model import extractions, nlp\n'), ((2162, 2181), 'smartsearch.matcher.phrase_matcher', 'phrase_matcher', (['doc'], {}), '(doc)\n', (2176, 2181), False, 'from smartsearch.matcher import field_matcher, phrase_matcher, zip_matcher\n'), ((2329, 2347), 'smartsearch.matcher.field_matcher', 'field_matcher', (['doc'], {}), '(doc)\n', (2342, 2347), False, 'from smartsearch.matcher import field_matcher, phrase_matcher, zip_matcher\n'), ((4200, 4228), 'smartsearch.model.extractions.get', 'extractions.get', (['"""min_price"""'], {}), "('min_price')\n", (4215, 4228), False, 'from smartsearch.model import extractions, nlp\n'), ((4339, 4367), 'smartsearch.model.extractions.get', 'extractions.get', (['"""min_price"""'], {}), "('min_price')\n", (4354, 4367), False, 'from smartsearch.model import extractions, nlp\n'), ((4550, 4577), 'smartsearch.model.extractions.get', 'extractions.get', (['"""min_sqft"""'], {}), "('min_sqft')\n", (4565, 4577), False, 'from smartsearch.model import extractions, nlp\n'), ((4685, 4712), 'smartsearch.model.extractions.get', 'extractions.get', (['"""min_sqft"""'], {}), "('min_sqft')\n", (4700, 4712), False, 'from smartsearch.model import extractions, nlp\n'), ((4890, 4916), 'smartsearch.model.extractions.get', 'extractions.get', (['"""min_bed"""'], {}), "('min_bed')\n", (4905, 4916), False, 'from smartsearch.model import extractions, nlp\n'), ((5021, 5047), 'smartsearch.model.extractions.get', 'extractions.get', (['"""min_bed"""'], {}), "('min_bed')\n", (5036, 5047), False, 'from smartsearch.model import extractions, nlp\n'), ((4376, 4404), 'smartsearch.model.extractions.get', 'extractions.get', (['"""max_price"""'], {}), "('max_price')\n", (4391, 4404), False, 'from smartsearch.model import extractions, nlp\n'), ((4721, 4748), 'smartsearch.model.extractions.get', 'extractions.get', (['"""max_sqft"""'], {}), "('max_sqft')\n", (4736, 4748), False, 'from smartsearch.model import extractions, nlp\n'), ((5056, 5082), 'smartsearch.model.extractions.get', 'extractions.get', (['"""max_bed"""'], {}), "('max_bed')\n", (5071, 5082), False, 'from smartsearch.model import extractions, nlp\n')]
|
import os
import sqlite3
import logging
from esd_process import scrape_variables
class BaseBackend:
"""
Base class for backends, must be inherited to use
"""
def __init__(self):
self.output_folder = None
# these attributes are populated during scrape and saved to the backend (database)
self.downloaded_success_count = 0
self.downloaded_error_count = 0
self.ignored_count = 0
self.ship_name = ''
self.survey_name = ''
self.survey_url = ''
self.raw_data_path = ''
self.processed_data_path = ''
self.grid_path = ''
self._backend_logger = logging.getLogger(scrape_variables.logger_name + '_backend')
self._backend_logger.setLevel(scrape_variables.logger_level)
def _configure_backend(self):
raise NotImplementedError('_configure_backend must be implemented for this backend to operate')
def _create_backend(self):
raise NotImplementedError('_create_backend must be implemented for this backend to operate')
def _add_survey(self):
raise NotImplementedError('_add_survey must be implemented for this backend to operate')
def _check_for_survey(self, shipname: str, surveyname: str):
raise NotImplementedError('_check_for_survey must be implemented for this backend to operate')
def _check_for_grid(self, shipname: str, surveyname: str):
raise NotImplementedError('_check_for_grid must be implemented for this backend to operate')
def _remove_survey(self, shipname: str, surveyname: str):
raise NotImplementedError('_remove_survey must be implemented for this backend to operate')
def _close_backend(self):
raise NotImplementedError('_close_backend must be implemented for this backend to operate')
class SqlBackend(BaseBackend):
"""
python sqlite3 backend, will store metdata about surveys in the 'surveys' table in the self.database_file sqlite3 file.
"""
def __init__(self):
super().__init__()
self.database_file = None
self._cur = None
self._conn = None
def _configure_backend(self):
"""
Creates the database_file if it does not exist. Will also run _create_backend to generate a blank table
if that table does not exist.
"""
self.database_file = os.path.join(self.output_folder, 'survey_database.sqlite3')
needs_create = False
if not os.path.exists(self.database_file):
needs_create = True
self._conn = sqlite3.connect(self.database_file)
self._cur = self._conn.cursor()
if needs_create:
self._create_backend()
def _create_backend(self):
"""
Generate a new sqlite3 database for the project
"""
self._backend_logger.log(logging.INFO, f'Generating new table "surveys" for scrape data...')
# create the single table that we need to store survey metadata
self._cur.execute('''CREATE TABLE surveys
(ship_name text, survey text, downloaded_success int, downloaded_error int,
ignored int, raw_data_path text, processed_data_path text, grid_path text)''')
self._conn.commit()
def _add_survey(self):
"""
Add a new entry for this survey to the database
"""
if self.ship_name and self.survey_name:
if not self._check_for_survey(self.ship_name, self.survey_name):
self._backend_logger.log(logging.INFO, f'Adding new data for {self.ship_name}/{self.survey_name} to sqlite database')
self._cur.execute(f'INSERT INTO surveys VALUES ("{self.ship_name.lower()}","{self.survey_name.lower()}",'
f'{self.downloaded_success_count},{self.downloaded_error_count},{self.ignored_count},'
f'"{self.raw_data_path}","{self.processed_data_path}","{self.grid_path}")')
self._conn.commit()
# reset data to defaults to get ready for next survey
self.ship_name = ''
self.survey_name = ''
self.downloaded_success_count = 0
self.downloaded_error_count = 0
self.ignored_count = 0
self.raw_data_path = ''
self.processed_data_path = ''
self.grid_path = ''
def _check_for_survey(self, shipname: str, surveyname: str):
"""
Check to see if this survey exists in the database
"""
data = self._cur.execute(f'SELECT * FROM surveys WHERE ship_name="{shipname.lower()}" and survey="{surveyname.lower()}"')
if len(data.fetchall()) > 0:
return True
else:
return False
def _check_for_grid(self, shipname: str, surveyname: str):
"""
Check to see if this survey has a grid path in the database (lets you know if you have successfully created a
grid with this survey)
"""
data = self._cur.execute(f'SELECT * FROM surveys WHERE ship_name="{shipname.lower()}" and survey="{surveyname.lower()}" and grid_path != ""')
if len(data.fetchall()) > 0:
return True
else:
return False
def _remove_survey(self, shipname: str, surveyname: str):
"""
Remove the entry for this survey from the database
"""
self._cur.execute(f'DELETE FROM surveys WHERE shipname="{shipname.lower()}" and survey="{surveyname.lower()}"')
self._conn.commit()
def _close_backend(self):
"""
Close the database connection
"""
self._conn.close()
|
[
"os.path.exists",
"sqlite3.connect",
"os.path.join",
"logging.getLogger"
] |
[((653, 713), 'logging.getLogger', 'logging.getLogger', (["(scrape_variables.logger_name + '_backend')"], {}), "(scrape_variables.logger_name + '_backend')\n", (670, 713), False, 'import logging\n'), ((2356, 2415), 'os.path.join', 'os.path.join', (['self.output_folder', '"""survey_database.sqlite3"""'], {}), "(self.output_folder, 'survey_database.sqlite3')\n", (2368, 2415), False, 'import os\n'), ((2549, 2584), 'sqlite3.connect', 'sqlite3.connect', (['self.database_file'], {}), '(self.database_file)\n', (2564, 2584), False, 'import sqlite3\n'), ((2460, 2494), 'os.path.exists', 'os.path.exists', (['self.database_file'], {}), '(self.database_file)\n', (2474, 2494), False, 'import os\n')]
|
import random
class Utils():
@classmethod
def _get_random_alphanumeric_string(cls):
return ''.join(random.choice('ABCDSFGEHIJK123456') for i in range(5))
@classmethod
def _get_random_numeric_string(cls):
return ''.join(random.choice('1234567890') for i in range(10))
@classmethod
def _get_random_five_number_string(cls):
return ''.join(random.choice('123456789') for i in range(5))
@classmethod
def _get_random_alphabetic_string(cls):
return ''.join(random.choice('ABCDSFGEHIJK') for i in range(5))
|
[
"random.choice"
] |
[((116, 151), 'random.choice', 'random.choice', (['"""ABCDSFGEHIJK123456"""'], {}), "('ABCDSFGEHIJK123456')\n", (129, 151), False, 'import random\n'), ((253, 280), 'random.choice', 'random.choice', (['"""1234567890"""'], {}), "('1234567890')\n", (266, 280), False, 'import random\n'), ((387, 413), 'random.choice', 'random.choice', (['"""123456789"""'], {}), "('123456789')\n", (400, 413), False, 'import random\n'), ((518, 547), 'random.choice', 'random.choice', (['"""ABCDSFGEHIJK"""'], {}), "('ABCDSFGEHIJK')\n", (531, 547), False, 'import random\n')]
|
from __future__ import unicode_literals
import requests
import time
import mimetypes
from bs4 import BeautifulSoup
from datetime import datetime, timedelta
from decimal import Decimal
from .exceptions import (
PageError, DisambiguationError, RedirectError, HTTPTimeoutError,
WikiaException, ODD_ERROR_MESSAGE)
from .util import cache, stdout_encode, debug
# Generate all extensions from the OS
mimetypes.init()
API_URL = 'http://{lang}{sub_wikia}.wikia.com/api/v1/{action}'
# URL used when browsing the wikia proper
STANDARD_URL = 'http://{lang}{sub_wikia}.wikia.com/wiki/{page}'
LANG = ""
RATE_LIMIT = False
RATE_LIMIT_MIN_WAIT = None
RATE_LIMIT_LAST_CALL = None
USER_AGENT = 'wikia (https://github.com/Timidger/Wikia/)'
def set_lang(language):
'''
Sets the global language variable, which is sent in the params
'''
global LANG
LANG = language.lower() + '.' if language else ''
for cached_func in (search, summary):
cached_func.clear_cache()
def set_user_agent(user_agent_string):
'''
Set the User-Agent string to be used for all requests.
Arguments:
* user_agent_string - (string) a string specifying the User-Agent header
'''
global USER_AGENT
USER_AGENT = user_agent_string
def set_rate_limiting(rate_limit, min_wait=timedelta(milliseconds=50)):
'''
Enable or disable rate limiting on requests to the wikia servers.
If rate limiting is not enabled, under some circumstances (depending on
load on Wikia, the number of requests you and other `wikia` users
are making, and other factors), Wikia may return an HTTP timeout error.
Enabling rate limiting generally prevents that issue, but please note that
HTTPTimeoutError still might be raised.
Arguments:
* rate_limit - (Boolean) whether to enable rate limiting or not
Keyword arguments:
* min_wait - if rate limiting is enabled, `min_wait` is a timedelta describing the minimum time to wait before requests.
Defaults to timedelta(milliseconds=50)
'''
global RATE_LIMIT
global RATE_LIMIT_MIN_WAIT
global RATE_LIMIT_LAST_CALL
RATE_LIMIT = rate_limit
if not rate_limit:
RATE_LIMIT_MIN_WAIT = None
else:
RATE_LIMIT_MIN_WAIT = min_wait
RATE_LIMIT_LAST_CALL = None
@cache
def search(sub_wikia, query, results=10):
'''
Do a Wikia search for `query`.
Keyword arguments:
* sub_wikia - the sub wikia to search in (i.e: "runescape", "elderscrolls")
* results - the maxmimum number of results returned
'''
global LANG
search_params = {
'action': 'Search/List?/',
'sub_wikia': sub_wikia,
'lang': LANG,
'limit': results,
'query': query
}
raw_results = _wiki_request(search_params)
try:
search_results = (d['title'] for d in raw_results['items'])
except KeyError as e:
raise WikiaError("Could not locate page \"{}\" in subwikia \"{}\"".format(query,
sub_wikia))
return list(search_results)
def random(pages=1):
'''
Get a list of random Wikia article titles.
.. note:: Random only gets articles from namespace 0, meaning no Category, U
Keyword arguments:
* pages - the number of random pages returned (max of 10)
'''
#http://en.wikia.org/w/api.php?action=query&list=random&rnlimit=5000&format=
query_params = {
'lang': LANG
}
request = _wiki_request(query_params)
titles = [page['title'] for page in request['query']['random']]
if len(titles) == 1:
return titles[0]
return titles
@cache
def summary(sub_wikia, title, chars=500, redirect=True):
'''
Plain text summary of the page from the sub-wikia.
.. note:: This is a convenience wrapper - auto_suggest and redirect are enab
Keyword arguments:
* chars - if set, return only the first `chars` characters (limit is 500)
* auto_suggest - let Wikia find a valid page title for the query
* redirect - allow redirection without raising RedirectError
'''
# use auto_suggest and redirect to get the correct article
# also, use page's error checking to raise DisambiguationError if necessary
page_info = page(sub_wikia, title, redirect=redirect)
title = page_info.title
pageid = page_info.pageid
query_params = {
'action': 'Articles/Details?/',
'sub_wikia': sub_wikia,
'titles': title,
'ids': pageid,
'abstract': chars,
'lang': LANG
}
request = _wiki_request(query_params)
summary = request['items'][str(pageid)]['abstract']
return summary
def page(sub_wikia, title=None, pageid=None, redirect=True, preload=False):
'''
Get a WikiaPage object for the page in the sub wikia with title `title` or the pageid
`pageid` (mutually exclusive).
Keyword arguments:
* title - the title of the page to load
* pageid - the numeric pageid of the page to load
* redirect - allow redirection without raising RedirectError
* preload - load content, summary, images, references, and links during initialization
'''
if title is not None:
return WikiaPage(sub_wikia, title, redirect=redirect, preload=preload)
elif pageid is not None:
return WikiaPage(sub_wikia, pageid=pageid, preload=preload)
else:
raise ValueError("Either a title or a pageid must be specified")
class WikiaPage(object):
'''
Contains data from a Wikia page.
Uses property methods to filter data from the raw HTML.
'''
def __init__(self, sub_wikia, title=None, pageid=None, redirect=True, preload=False, original_title=''):
if title is not None:
self.title = title
self.original_title = original_title or title
elif pageid is not None:
self.pageid = pageid
else:
raise ValueError("Either a title or a pageid must be specified")
self.sub_wikia = sub_wikia
try:
self.__load(redirect=redirect, preload=preload)
except AttributeError as e:
raise WikiaError("Could not locate page \"{}\" in subwikia \"{}\"".format(title or pageid,
sub_wikia))
if preload:
for prop in ('content', 'summary', 'images', 'references', 'links', 'sections'):
getattr(self, prop)
def __repr__(self):
return stdout_encode(u'<WikiaPage \'{}\'>'.format(self.title))
def __eq__(self, other):
try:
return (
self.pageid == other.pageid
and self.title == other.title
and self.url == other.url
)
except:
return False
def __load(self, redirect=True, preload=False):
'''
Load basic information from Wikia.
Confirm that page exists and is not a disambiguation/redirect.
Does not need to be called manually, should be called automatically during __init__.
'''
query_params = {
'action': 'Articles/Details?/',
'sub_wikia': self.sub_wikia,
'lang': LANG,
}
if not getattr(self, 'pageid', None):
query_params['titles'] = self.title
else:
query_params['ids'] = self.pageid
try:
request = _wiki_request(query_params)
query = list(request['items'].values())[0]
except IndexError:
raise WikiaError("Could not find page \"{}\""
"of the sub-wikia {}".format(self.title or self.pageid,
self.sub_wikia))
self.pageid = query['id']
self.title = query['title']
lang = query_params['lang']
self.url = STANDARD_URL.format(lang=lang, sub_wikia=self.sub_wikia,
page=self.title)
def __continued_query(self, query_params):
'''
Based on https://www.mediawiki.org/wiki/API:Query#Continuing_queries
'''
query_params.update(self.__title_query_param)
last_continue = {}
prop = query_params.get('prop', None)
while True:
params = query_params.copy()
params.update(last_continue)
request = _wiki_request(params)
if 'query' not in request:
break
pages = request['query']['pages']
if 'generator' in query_params:
for datum in pages.values(): # in python 3.3+: "yield from pages.values()"
yield datum
else:
for datum in pages[self.pageid][prop]:
yield datum
if 'continue' not in request:
break
last_continue = request['continue']
@property
def __title_query_param(self):
if getattr(self, 'title', None) is not None:
return {'titles': self.title}
else:
return {'pageids': self.pageid}
def html(self):
'''
Get full page HTML.
.. warning:: This can get pretty slow on long pages.
'''
if not getattr(self, '_html', False):
request = requests.get(self.url)
self._html = request.text
return self._html
@property
def content(self):
'''
Plain text content of each section of the page, excluding images, tables,
and other data.
'''
if not getattr(self, '_content', False):
query_params = {
'action': "Articles/AsSimpleJson?/",
'id': self.pageid,
'sub_wikia': self.sub_wikia,
'lang': LANG
}
request = _wiki_request(query_params)
self._content = "\n".join(segment['text'] for section in request['sections']
for segment in section['content']
if segment['type'] == "paragraph")
return self._content
@property
def revision_id(self):
'''
Revision ID of the page.
The revision ID is a number that uniquely identifies the current
version of the page. It can be used to create the permalink or for
other direct API calls. See `Help:Page history
<http://en.wikia.org/wiki/Wikia:Revision>`_ for more
information.
'''
if not getattr(self, '_revid', False):
query_params = {
'action': "Articles/Details?/",
'ids': self.pageid,
'sub_wikia': self.sub_wikia,
'lang': LANG
}
request = _wiki_request(query_params)
self._revision_id = request['items'][str(self.pageid)]['revision']['id']
return self._revision_id
@property
def summary(self):
'''
Plain text summary of the page.
'''
if not getattr(self, '_summary', False):
self._summary = summary(self.sub_wikia, self.title)
return self._summary
@property
def images(self):
'''
List of URLs of images on the page.
'''
if not getattr(self, '_images', False):
# Get the first round of images
query_params = {
'action': "Articles/AsSimpleJson?/",
'id': str(self.pageid),
'sub_wikia': self.sub_wikia,
'lang': LANG,
}
request = _wiki_request(query_params)
images = [section['images'][0]['src'] for section in request["sections"]
if section['images']]
# Get the second round of images
# This time, have to use a different API call
query_params['action'] = "Articles/Details?/"
query_params['titles'] = self.title # This stops redirects
request = _wiki_request(query_params)
image_thumbnail = request["items"][str(self.pageid)]["thumbnail"]
# Only if there are more pictures to grab
if image_thumbnail:
images.append(image_thumbnail)
# A little URL manipulation is required to get the full sized version
for index, image in enumerate(images):
# Remove the /revision/ fluff after the image url
image = image.partition("/revision/")[0]
image_type = mimetypes.guess_type(image)[0]
image_type = "." + image_type.split("/")[-1]
# JPEG has a special case, where sometimes it is written as "jpg"
if image_type == ".jpeg" and image_type not in image:
image_type = ".jpg"
# Remove the filler around the image url that reduces the size
image = "".join(image.partition(image_type)[:2])
images[index] = image.replace("/thumb/", "/")
self._images = images
return self._images
@property
def related_pages(self):
'''
Lists up to 10 of the wikia URLs of pages related to this page.
'''
if not getattr(self, "_related_pages", False):
query_params = {
'action': "RelatedPages/List?/",
'ids': self.pageid,
'limit': 10,
'sub_wikia': self.sub_wikia,
'lang': LANG,
}
request = _wiki_request(query_params)
self._related_pages = [request['basepath'] + url['url']
for url in request['items'][str(self.pageid)]]
return self._related_pages
@property
def sections(self):
'''
List of section titles from the table of contents on the page.
'''
if not getattr(self, '_sections', False):
query_params = {
'action': 'Articles/AsSimpleJson?/',
'id': self.pageid,
'sub_wikia': self.sub_wikia,
'lang': LANG,
}
request = _wiki_request(query_params)
self._sections = [section['title'] for section in request['sections']]
return self._sections
def section(self, section_title):
'''
Get the plain text content of a section from `self.sections`.
Returns None if `section_title` isn't found, otherwise returns a whitespace stripped string.
This is a convenience method that wraps self.content.
.. warning:: Calling `section` on a section that has subheadings will NOT return
the full text of all of the subsections. It only gets the text between
`section_title` and the next subheading, which is often empty.
'''
if section_title not in self.sections:
return None
query_params = {
'action': "Articles/AsSimpleJson?/",
'id': self.pageid,
'sub_wikia': self.sub_wikia,
'lang': LANG
}
request = _wiki_request(query_params)
section = "\n".join(segment['text'] for section in request['sections']
if section['title'] == section_title
for segment in section['content']
if segment['type'] == "paragraph")
return section
@cache
def languages():
'''
List all the currently supported language prefixes (usually ISO language code).
Can be inputted to `set_lang` to change the Wikia that `wikia` requests
results from.
Returns: dict of <prefix>: <local_lang_name> pairs. To get just a list of prefixes,
use `wikia.languages().keys()`.
'''
query_params = {
'action': "WAM/WAMLanguages?/",
'timestamp': time.time(), # Uses the UNIX timestamp to determine available LANGs
'sub_wikia': '',
'lang': LANG
}
request = _wiki_request(query_params)
return response['languages']
def _wiki_request(params):
'''
Make a request to the Wikia API using the given search parameters.
Returns a parsed dict of the JSON response.
'''
global RATE_LIMIT_LAST_CALL
global USER_AGENT
api_url = API_URL.format(**params)
params['format'] = 'json'
headers = {
'User-Agent': USER_AGENT
}
if RATE_LIMIT and RATE_LIMIT_LAST_CALL and \
RATE_LIMIT_LAST_CALL + RATE_LIMIT_MIN_WAIT > datetime.now():
# it hasn't been long enough since the last API call
# so wait until we're in the clear to make the request
wait_time = (RATE_LIMIT_LAST_CALL + RATE_LIMIT_MIN_WAIT) - datetime.now()
time.sleep(int(wait_time.total_seconds()))
r = requests.get(api_url, params=params, headers=headers)
if RATE_LIMIT:
RATE_LIMIT_LAST_CALL = datetime.now()
# If getting the json representation did not work, our data is mangled
try:
r = r.json()
except ValueError:
raise WikiaError("Your request to the url \"{url}\" with the paramaters"
"\"{params}\" returned data in a format other than JSON."
"Please check your input data.".format(url=api_url,
params=params))
# If we got a json response, then we know the format of the input was correct
if "exception" in r:
details, message, error_code= r['exception'].values()
if error_code == 408:
raise HTTPTimeoutError(query)
raise WikiaError("{}. {} ({})".format(message, details, error_code))
return r
class WikiaError(Exception):
pass
|
[
"mimetypes.init",
"time.time",
"datetime.timedelta",
"requests.get",
"datetime.datetime.now",
"mimetypes.guess_type"
] |
[((401, 417), 'mimetypes.init', 'mimetypes.init', ([], {}), '()\n', (415, 417), False, 'import mimetypes\n'), ((1267, 1293), 'datetime.timedelta', 'timedelta', ([], {'milliseconds': '(50)'}), '(milliseconds=50)\n', (1276, 1293), False, 'from datetime import datetime, timedelta\n'), ((15421, 15474), 'requests.get', 'requests.get', (['api_url'], {'params': 'params', 'headers': 'headers'}), '(api_url, params=params, headers=headers)\n', (15433, 15474), False, 'import requests\n'), ((14559, 14570), 'time.time', 'time.time', ([], {}), '()\n', (14568, 14570), False, 'import time\n'), ((15520, 15534), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (15532, 15534), False, 'from datetime import datetime, timedelta\n'), ((8653, 8675), 'requests.get', 'requests.get', (['self.url'], {}), '(self.url)\n', (8665, 8675), False, 'import requests\n'), ((15155, 15169), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (15167, 15169), False, 'from datetime import datetime, timedelta\n'), ((15352, 15366), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (15364, 15366), False, 'from datetime import datetime, timedelta\n'), ((11523, 11550), 'mimetypes.guess_type', 'mimetypes.guess_type', (['image'], {}), '(image)\n', (11543, 11550), False, 'import mimetypes\n')]
|
import sys
import os
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import rubik_solver
from rubik_solver.defs import available_moves
|
[
"os.path.dirname"
] |
[((71, 96), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (86, 96), False, 'import os\n')]
|
"""
*********************************************************
* *
* Project Name: Recursive Fibonacci Sequence *
* Author: github.com/kirigaine *
* Description: A simple program to put in how many *
* numbers of the Fibonacci Sequence you would like. *
* Requirements: Python Standard Library (re) *
* *
*********************************************************
"""
import re
def main():
while True:
# Prompt user, exit program if "-1"
x = userPrompt()
if x == -1:
break
print("------------------------------------------------------------")
print(str(fibonacci(x)))
print("------------------------------------------------------------\n")
def userPrompt():
# Prompt user for nth number to print up to
temp_nth = ""
regex_passed = None
# Accept only integers and negative one
while not regex_passed:
temp_nth = input("How many digits of the Fibonacci Sequence would you like (-1 to quit): ")
regex_passed = re.search("^(([0-9]*)|(-1))$", temp_nth)
if not regex_passed:
print("Invalid integer. Please try again entering a positive integer (or -1 to quit).\n")
return int(temp_nth)
def fibonacci(x):
# fibonacci(x) == fibonacci(x-1) + fibonacci(x-2)
# ...
# fibonacci(4) = fibonacci(3) + fibonacci(2)
# fibonacci(3) = fibonacci(2) + fibonacci(1)
# fibonacci(2) = 1
# fibonacci(1) = 0
if x is not None:
if x==0:
# If you request none, you get none!
return None
elif x==1:
return 0
elif x==2:
return 1
else:
# print(f"{x-1} {x-2}")
return(fibonacci(x-1) + fibonacci(x-2))
main()
|
[
"re.search"
] |
[((1175, 1215), 're.search', 're.search', (['"""^(([0-9]*)|(-1))$"""', 'temp_nth'], {}), "('^(([0-9]*)|(-1))$', temp_nth)\n", (1184, 1215), False, 'import re\n')]
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-06-06 07:22
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('transactions', '0004_auto_20170606_0638'),
]
operations = [
migrations.AddField(
model_name='transaction',
name='balanced',
field=models.BooleanField(default=False, verbose_name='balanced'),
),
migrations.AddField(
model_name='transaction',
name='balanced_at',
field=models.DateTimeField(blank=True, null=True, verbose_name='balanced at'),
),
migrations.AddField(
model_name='transaction',
name='datetime',
field=models.DateTimeField(blank=True, null=True, verbose_name='datetime'),
),
]
|
[
"django.db.models.DateTimeField",
"django.db.models.BooleanField"
] |
[((411, 470), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)', 'verbose_name': '"""balanced"""'}), "(default=False, verbose_name='balanced')\n", (430, 470), False, 'from django.db import migrations, models\n'), ((600, 671), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'blank': '(True)', 'null': '(True)', 'verbose_name': '"""balanced at"""'}), "(blank=True, null=True, verbose_name='balanced at')\n", (620, 671), False, 'from django.db import migrations, models\n'), ((798, 866), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'blank': '(True)', 'null': '(True)', 'verbose_name': '"""datetime"""'}), "(blank=True, null=True, verbose_name='datetime')\n", (818, 866), False, 'from django.db import migrations, models\n')]
|
import os, uuid, sys
from azure.mgmt.iotcentral import IotCentralClient
from azure.mgmt.iotcentral.models import App, AppSkuInfo, AppPatch
from msrestazure.azure_active_directory import MSIAuthentication
from azure.common.credentials import UserPassCredentials, get_azure_cli_credentials
# login with az login
creds = get_azure_cli_credentials()
subId = "FILL IN SUB ID"
appName = "iot-central-app-tocreate"
resourceGroup = "myResourceGroup"
print(creds[0])
print(creds[1])
client = IotCentralClient(creds[0], subId)
result = client.apps.check_name_availability(appName)
print(result)
app = App(location="unitedstates", sku=AppSkuInfo(name="ST2"))
app.subdomain = appName
app.display_name = appName
createResult = client.apps.create_or_update(resourceGroup, appName, app)
print(createResult)
getResult = client.apps.get(resourceGroup, appName)
print(getResult)
updateApp = AppPatch()
updateApp.display_name = appName + "-new-name"
updateResult = client.apps.update(resourceGroup, appName, updateApp)
print(updateResult)
appsInGroup = client.apps.list_by_resource_group(resourceGroup)
appsInGroup.next()
for item in appsInGroup.current_page:
print(item)
# deleteResult = client.apps.delete(resourceGroup, appName)
# print(deleteResult)
print("done")
|
[
"azure.mgmt.iotcentral.IotCentralClient",
"azure.common.credentials.get_azure_cli_credentials",
"azure.mgmt.iotcentral.models.AppPatch",
"azure.mgmt.iotcentral.models.AppSkuInfo"
] |
[((319, 346), 'azure.common.credentials.get_azure_cli_credentials', 'get_azure_cli_credentials', ([], {}), '()\n', (344, 346), False, 'from azure.common.credentials import UserPassCredentials, get_azure_cli_credentials\n'), ((486, 519), 'azure.mgmt.iotcentral.IotCentralClient', 'IotCentralClient', (['creds[0]', 'subId'], {}), '(creds[0], subId)\n', (502, 519), False, 'from azure.mgmt.iotcentral import IotCentralClient\n'), ((882, 892), 'azure.mgmt.iotcentral.models.AppPatch', 'AppPatch', ([], {}), '()\n', (890, 892), False, 'from azure.mgmt.iotcentral.models import App, AppSkuInfo, AppPatch\n'), ((629, 651), 'azure.mgmt.iotcentral.models.AppSkuInfo', 'AppSkuInfo', ([], {'name': '"""ST2"""'}), "(name='ST2')\n", (639, 651), False, 'from azure.mgmt.iotcentral.models import App, AppSkuInfo, AppPatch\n')]
|
import numpy as np
from tqdm import tqdm
from typing import Dict, Union
import torch
import gtimer as gt
import matplotlib
from matplotlib import pyplot as plt
import self_supervised.utils.typed_dicts as td
from self_supervised.base.data_collector.data_collector import \
PathCollectorSelfSupervised
from self_sup_comb_discrete_skills.data_collector.path_collector_discrete_skills import \
PathCollectorSelfSupervisedDiscreteSkills
from self_supervised.memory.self_sup_replay_buffer import \
SelfSupervisedEnvSequenceReplayBuffer
from self_supervised.env_wrapper.rlkit_wrapper import NormalizedBoxEnvWrapper
from self_supervised.base.algo.algo_base import BaseRLAlgorithmSelfSup
from self_supervised.utils.writer import MyWriterWithActivation
import self_sup_combined.utils.typed_dicts as tdssc
from self_sup_combined.base.writer.diagnostics_writer import DiagnosticsWriter
from self_sup_combined.algo.trainer_sac import SelfSupCombSACTrainer
from self_sup_combined.algo.trainer_mode import ModeTrainer
from self_sup_combined.algo.algorithm import SelfSupCombAlgo
from self_sup_comb_discrete_skills.algo.mode_trainer_discrete_skill import \
ModeTrainerWithDiagnosticsDiscrete
from self_sup_comb_discrete_skills.memory.replay_buffer_discrete_skills import \
SelfSupervisedEnvSequenceReplayBufferDiscreteSkills
import self_sup_comb_discrete_skills.utils.typed_dicts as tdsscds
import rlkit.torch.pytorch_util as ptu
from rlkit.core import logger, eval_util
from rlkit.core.rl_algorithm import _get_epoch_timings
matplotlib.use('Agg')
class SelfSupCombAlgoDiscrete(SelfSupCombAlgo):
def __init__(self,
sac_trainer: SelfSupCombSACTrainer,
mode_trainer: ModeTrainerWithDiagnosticsDiscrete,
exploration_env: NormalizedBoxEnvWrapper,
evaluation_env: NormalizedBoxEnvWrapper,
exploration_data_collector: PathCollectorSelfSupervisedDiscreteSkills,
evaluation_data_collector: PathCollectorSelfSupervisedDiscreteSkills,
replay_buffer: SelfSupervisedEnvSequenceReplayBufferDiscreteSkills,
diangnostic_writer: DiagnosticsWriter,
**kwargs
):
super().__init__(
sac_trainer=sac_trainer,
mode_trainer=mode_trainer,
exploration_env=exploration_env,
evaluation_env=evaluation_env,
exploration_data_collector=exploration_data_collector,
evaluation_data_collector=evaluation_data_collector,
replay_buffer=replay_buffer,
**kwargs
)
self.mode_dim = self.mode_trainer.model.mode_dim
self.num_skills = self.mode_trainer.num_skills
self.skill_idx_now = 0
assert type(self.mode_trainer) == ModeTrainerWithDiagnosticsDiscrete
self.discrete_skills = self.get_grid()
self.diagnostic_writer = diangnostic_writer
def _train_mode(self,
train_data: td.TransitonModeMappingDiscreteSkills
):
self.mode_trainer.train(
data=tdsscds.ModeTrainerDataMappingDiscreteSkills(
skills_gt=ptu.from_numpy(train_data.mode),
obs_seq=ptu.from_numpy(train_data.obs),
skill_id=ptu.from_numpy(train_data.skill_id)
)
)
def set_next_skill(self,
path_collector: PathCollectorSelfSupervisedDiscreteSkills):
assert type(path_collector) is PathCollectorSelfSupervisedDiscreteSkills
skill_idx = np.random.randint(self.num_skills - 1)
skill_vec = self.discrete_skills[skill_idx]
path_collector.set_discrete_skill(
skill_vec=skill_vec,
skill_id=skill_idx,
)
def get_grid(self):
assert type(self.mode_trainer) == ModeTrainerWithDiagnosticsDiscrete
assert self.mode_trainer.num_skills == 10
assert self.mode_trainer.model.mode_dim == 2
# Hard coded for testing
radius1 = 0.75
radius2 = 1.
radius3 = 1.38
grid = np.array([
[0., 0.],
[radius1, 0.],
[0., radius1],
[-radius1, 0.],
[0, -radius1],
[radius2, radius2],
[-radius2, radius2],
[radius2, -radius2],
[-radius2, -radius2],
[0, radius3]
], dtype=np.float)
grid = ptu.from_numpy(grid)
return grid
def _get_paths_mode_influence_test(self):
assert type(self.eval_data_collector) is PathCollectorSelfSupervisedDiscreteSkills
self.eval_data_collector.reset()
for skill_id, discrete_skill in enumerate(self.discrete_skills):
self.eval_data_collector.set_discrete_skill(
skill_vec=discrete_skill,
skill_id=skill_id
)
self.eval_data_collector.collect_new_paths(
seq_len=self.seq_len,
num_seqs=1,
)
mode_influence_eval_paths = self.eval_data_collector.get_epoch_paths()
return mode_influence_eval_paths
def write_mode_influence(self, epoch):
paths = self._get_paths_mode_influence_test()
obs_dim = self.policy.obs_dim
action_dim = self.policy.action_dim
for path in paths:
assert path.obs.shape == (obs_dim, self.seq_len)
assert path.action.shape == (action_dim, self.seq_len)
skill_id = path.skill_id.squeeze()[0]
self.diagnostic_writer.writer.plot_lines(
legend_str=['dim' + str(i) for i in range(obs_dim)],
tb_str="mode influence test: observations/mode {}".format(
skill_id),
#arrays_to_plot=[dim for dim in obs],
arrays_to_plot=path.obs,
step=epoch,
y_lim=[-3, 3]
)
self.diagnostic_writer.writer.plot_lines(
legend_str=["dim {}".format(dim) for dim in range(action_dim)],
tb_str="mode influence test: actions/mode {}".format(
skill_id),
arrays_to_plot=path.action,
step=epoch,
y_lim=[-1.2, 1.2]
)
seq_dim = -1
data_dim = 0
path = path.transpose(seq_dim, data_dim)
rewards = self.trainer.intrinsic_reward_calculator.calc_rewards(
obs_seq=ptu.from_numpy(path.obs).unsqueeze(dim=0),
action_seq=ptu.from_numpy(path.action).unsqueeze(dim=0),
skill_gt=ptu.from_numpy(path.mode).unsqueeze(dim=0)
)
assert rewards.shape == torch.Size((1, self.seq_len, 1))
rewards = rewards.squeeze()
assert rewards.shape == torch.Size((self.seq_len,))
self.diagnostic_writer.writer.plot_lines(
legend_str="skill_id {}".format(skill_id),
tb_str="mode influence test rewards/skill_id {}".format(skill_id),
arrays_to_plot=ptu.get_numpy(rewards),
step=epoch,
y_lim=[-7, 2]
)
def _log_stats(self, epoch):
logger.log("Epoch {} finished".format(epoch), with_timestamp=True)
gt.stamp('logging')
logger.record_dict(_get_epoch_timings())
logger.record_tabular('Epoch', epoch)
logger.dump_tabular(with_prefix=False, with_timestamp=False)
gt.stamp('log outputting')
def _end_epoch(self, epoch):
super()._end_epoch(epoch)
if self.diagnostic_writer.is_log(epoch):
self.write_mode_influence(epoch)
gt.stamp('saving')
self._log_stats(epoch)
|
[
"rlkit.torch.pytorch_util.get_numpy",
"rlkit.core.rl_algorithm._get_epoch_timings",
"rlkit.torch.pytorch_util.from_numpy",
"matplotlib.use",
"numpy.array",
"numpy.random.randint",
"torch.Size",
"gtimer.stamp",
"rlkit.core.logger.record_tabular",
"rlkit.core.logger.dump_tabular"
] |
[((1535, 1556), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (1549, 1556), False, 'import matplotlib\n'), ((3585, 3623), 'numpy.random.randint', 'np.random.randint', (['(self.num_skills - 1)'], {}), '(self.num_skills - 1)\n', (3602, 3623), True, 'import numpy as np\n'), ((4117, 4326), 'numpy.array', 'np.array', (['[[0.0, 0.0], [radius1, 0.0], [0.0, radius1], [-radius1, 0.0], [0, -radius1],\n [radius2, radius2], [-radius2, radius2], [radius2, -radius2], [-radius2,\n -radius2], [0, radius3]]'], {'dtype': 'np.float'}), '([[0.0, 0.0], [radius1, 0.0], [0.0, radius1], [-radius1, 0.0], [0, \n -radius1], [radius2, radius2], [-radius2, radius2], [radius2, -radius2],\n [-radius2, -radius2], [0, radius3]], dtype=np.float)\n', (4125, 4326), True, 'import numpy as np\n'), ((4459, 4479), 'rlkit.torch.pytorch_util.from_numpy', 'ptu.from_numpy', (['grid'], {}), '(grid)\n', (4473, 4479), True, 'import rlkit.torch.pytorch_util as ptu\n'), ((7317, 7336), 'gtimer.stamp', 'gt.stamp', (['"""logging"""'], {}), "('logging')\n", (7325, 7336), True, 'import gtimer as gt\n'), ((7394, 7431), 'rlkit.core.logger.record_tabular', 'logger.record_tabular', (['"""Epoch"""', 'epoch'], {}), "('Epoch', epoch)\n", (7415, 7431), False, 'from rlkit.core import logger, eval_util\n'), ((7440, 7500), 'rlkit.core.logger.dump_tabular', 'logger.dump_tabular', ([], {'with_prefix': '(False)', 'with_timestamp': '(False)'}), '(with_prefix=False, with_timestamp=False)\n', (7459, 7500), False, 'from rlkit.core import logger, eval_util\n'), ((7509, 7535), 'gtimer.stamp', 'gt.stamp', (['"""log outputting"""'], {}), "('log outputting')\n", (7517, 7535), True, 'import gtimer as gt\n'), ((7708, 7726), 'gtimer.stamp', 'gt.stamp', (['"""saving"""'], {}), "('saving')\n", (7716, 7726), True, 'import gtimer as gt\n'), ((7364, 7384), 'rlkit.core.rl_algorithm._get_epoch_timings', '_get_epoch_timings', ([], {}), '()\n', (7382, 7384), False, 'from rlkit.core.rl_algorithm import _get_epoch_timings\n'), ((6738, 6770), 'torch.Size', 'torch.Size', (['(1, self.seq_len, 1)'], {}), '((1, self.seq_len, 1))\n', (6748, 6770), False, 'import torch\n'), ((6847, 6874), 'torch.Size', 'torch.Size', (['(self.seq_len,)'], {}), '((self.seq_len,))\n', (6857, 6874), False, 'import torch\n'), ((7103, 7125), 'rlkit.torch.pytorch_util.get_numpy', 'ptu.get_numpy', (['rewards'], {}), '(rewards)\n', (7116, 7125), True, 'import rlkit.torch.pytorch_util as ptu\n'), ((3196, 3227), 'rlkit.torch.pytorch_util.from_numpy', 'ptu.from_numpy', (['train_data.mode'], {}), '(train_data.mode)\n', (3210, 3227), True, 'import rlkit.torch.pytorch_util as ptu\n'), ((3253, 3283), 'rlkit.torch.pytorch_util.from_numpy', 'ptu.from_numpy', (['train_data.obs'], {}), '(train_data.obs)\n', (3267, 3283), True, 'import rlkit.torch.pytorch_util as ptu\n'), ((3310, 3345), 'rlkit.torch.pytorch_util.from_numpy', 'ptu.from_numpy', (['train_data.skill_id'], {}), '(train_data.skill_id)\n', (3324, 3345), True, 'import rlkit.torch.pytorch_util as ptu\n'), ((6504, 6528), 'rlkit.torch.pytorch_util.from_numpy', 'ptu.from_numpy', (['path.obs'], {}), '(path.obs)\n', (6518, 6528), True, 'import rlkit.torch.pytorch_util as ptu\n'), ((6574, 6601), 'rlkit.torch.pytorch_util.from_numpy', 'ptu.from_numpy', (['path.action'], {}), '(path.action)\n', (6588, 6601), True, 'import rlkit.torch.pytorch_util as ptu\n'), ((6645, 6670), 'rlkit.torch.pytorch_util.from_numpy', 'ptu.from_numpy', (['path.mode'], {}), '(path.mode)\n', (6659, 6670), True, 'import rlkit.torch.pytorch_util as ptu\n')]
|
import cadquery as cq
import numpy as np
from OCP.Standard import Standard_ConstructionError
def linear_milling_vol(cut, start_point, end_point, mill_diameter):
"""creates the volume that gets milled from linear move
Keyword arguments:
start_point -- [x,y,z] toolcentrepoint mm
end_point -- [x,y,z] toolcentrepoint mm
mill_diameter -- tooldiameter mm
Output:
CADquery Object
"""
assert (start_point[2] == end_point[2] != 0)
alpha = np.arctan2(end_point[1] - start_point[1], end_point[0] - start_point[0])
points = [[start_point[0] + mill_diameter / 2 * np.cos(alpha + np.pi / 2),
start_point[1] + mill_diameter / 2 * np.sin(alpha + np.pi / 2)],
[start_point[0] + mill_diameter / 2 * np.cos(alpha + np.pi),
start_point[1] + mill_diameter / 2 * np.sin(alpha + np.pi)],
[start_point[0] + mill_diameter / 2 * np.cos(alpha - np.pi / 2),
start_point[1] + mill_diameter / 2 * np.sin(alpha - np.pi / 2)],
[end_point[0] + mill_diameter / 2 * np.cos(alpha - np.pi / 2),
end_point[1] + mill_diameter / 2 * np.sin(alpha - np.pi / 2)],
[end_point[0] + mill_diameter / 2 * np.cos(alpha), end_point[1] + mill_diameter / 2 * np.sin(alpha)],
[end_point[0] + mill_diameter / 2 * np.cos(alpha + np.pi / 2),
end_point[1] + mill_diameter / 2 * np.sin(alpha + np.pi / 2)]]
cut = cut.moveTo(points[0][0], points[0][1]).threePointArc(points[1], points[2]).lineTo(points[3][0], points[3][1]) \
.threePointArc(points[4], points[5]).close().extrude(end_point[2])
return cut
def circular_milling_vol(cut, start_point, end_point, mill_diameter, arc_centre):
"""creates the volume that gets milled from circular move
Keyword arguments:
start_point -- [x,y,z] toolcentrepoint mm
end_point -- [x,y,z] toolcentrepoint mm
mill_diameter -- tooldiameter mm
arc_centre -- !!! noch nicht sicher!!! entweder radius oder kreismittelpunkt
Output:
CADquery Object
"""
pass # weil grade noch leer, dann löahen
# ...
def draw_and_subtract(moves, workpiece, mill_diameter):
"""gets moves of one timestep
Keyword arguments:
moves -- moves of current timestep
workpiece -- current workpiece
mill_diameter -- Mill Diameter
Output:
intersection -- virtual chip (spahn)
workpiece -- updated workpiece
"""
cut = cq.Workplane("front")
for move in moves:
if len(move) == 2:
cut = linear_milling_vol(cut, move[0], move[1], mill_diameter)
else:
cut = circular_milling_vol(cut, move[0], move[1], move[2], mill_diameter)
try:
intersection = workpiece.intersect(cut)
intersection.largestDimension()
except Standard_ConstructionError:
intersection = None
if intersection is not None:
wp = workpiece.cut(cut)
return intersection, wp
def get_param_for_neural_net(moves, workpiece, mill_diameter):
"""appends cutting-simulation-parameters line at csv list
Keyword arguments:
moves -- moves of current timestep
workpiece -- current workpiece
mill_diameter -- Mill Diameter
Output:
compounded_move -- is move compounded (zusammengestzt)
alpha -- direction angle of movement
b_box -- boundingbox of virtual chip, corresponds to Umschlingungswinkel
vol -- volume of virtual chip
z_hight -- z-hight-information, noch unklar
"""
# inter = intersection
inter, workpiece = draw_and_subtract(moves, workpiece, mill_diameter)
compounded_move = len(moves) - 1 # zum Abfangen wenn stückechen zusammengestzt
# Umschlingungswinkel -in Fahrtrichtung drehen: alpha
alpha = np.arctan2(moves[-1][1][1] - moves[0][0][1], moves[-1][1][0] - moves[0][0][0])
shape = inter.val().rotate((0, 0, 0), (0, 0, 1), alpha)
vol = shape.Volume()
b_box = shape.BoundingBox() # ähnlich zu Umschlingungswinkel -> =Umschlingungswinkel
z_hight = moves[0][0][2] # noch unklar
return [compounded_move, alpha, b_box, vol, z_hight]
|
[
"numpy.sin",
"numpy.arctan2",
"cadquery.Workplane",
"numpy.cos"
] |
[((478, 550), 'numpy.arctan2', 'np.arctan2', (['(end_point[1] - start_point[1])', '(end_point[0] - start_point[0])'], {}), '(end_point[1] - start_point[1], end_point[0] - start_point[0])\n', (488, 550), True, 'import numpy as np\n'), ((2469, 2490), 'cadquery.Workplane', 'cq.Workplane', (['"""front"""'], {}), "('front')\n", (2481, 2490), True, 'import cadquery as cq\n'), ((3768, 3846), 'numpy.arctan2', 'np.arctan2', (['(moves[-1][1][1] - moves[0][0][1])', '(moves[-1][1][0] - moves[0][0][0])'], {}), '(moves[-1][1][1] - moves[0][0][1], moves[-1][1][0] - moves[0][0][0])\n', (3778, 3846), True, 'import numpy as np\n'), ((604, 629), 'numpy.cos', 'np.cos', (['(alpha + np.pi / 2)'], {}), '(alpha + np.pi / 2)\n', (610, 629), True, 'import numpy as np\n'), ((683, 708), 'numpy.sin', 'np.sin', (['(alpha + np.pi / 2)'], {}), '(alpha + np.pi / 2)\n', (689, 708), True, 'import numpy as np\n'), ((763, 784), 'numpy.cos', 'np.cos', (['(alpha + np.pi)'], {}), '(alpha + np.pi)\n', (769, 784), True, 'import numpy as np\n'), ((838, 859), 'numpy.sin', 'np.sin', (['(alpha + np.pi)'], {}), '(alpha + np.pi)\n', (844, 859), True, 'import numpy as np\n'), ((914, 939), 'numpy.cos', 'np.cos', (['(alpha - np.pi / 2)'], {}), '(alpha - np.pi / 2)\n', (920, 939), True, 'import numpy as np\n'), ((993, 1018), 'numpy.sin', 'np.sin', (['(alpha - np.pi / 2)'], {}), '(alpha - np.pi / 2)\n', (999, 1018), True, 'import numpy as np\n'), ((1071, 1096), 'numpy.cos', 'np.cos', (['(alpha - np.pi / 2)'], {}), '(alpha - np.pi / 2)\n', (1077, 1096), True, 'import numpy as np\n'), ((1148, 1173), 'numpy.sin', 'np.sin', (['(alpha - np.pi / 2)'], {}), '(alpha - np.pi / 2)\n', (1154, 1173), True, 'import numpy as np\n'), ((1226, 1239), 'numpy.cos', 'np.cos', (['alpha'], {}), '(alpha)\n', (1232, 1239), True, 'import numpy as np\n'), ((1276, 1289), 'numpy.sin', 'np.sin', (['alpha'], {}), '(alpha)\n', (1282, 1289), True, 'import numpy as np\n'), ((1342, 1367), 'numpy.cos', 'np.cos', (['(alpha + np.pi / 2)'], {}), '(alpha + np.pi / 2)\n', (1348, 1367), True, 'import numpy as np\n'), ((1419, 1444), 'numpy.sin', 'np.sin', (['(alpha + np.pi / 2)'], {}), '(alpha + np.pi / 2)\n', (1425, 1444), True, 'import numpy as np\n')]
|
import sqlite3
with sqlite3.connect('new.db') as conn:
cursor = conn.cursor()
cursor.execute(
"""
SELECT population.city, population.population, regions.region
FROM population, regions
WHERE population.city = regions.city
"""
)
rows = cursor.fetchall()
for row in rows:
print("City:", row[0])
print("Population:", row[1])
print("Region:", row[2])
print("---------------------------")
|
[
"sqlite3.connect"
] |
[((21, 46), 'sqlite3.connect', 'sqlite3.connect', (['"""new.db"""'], {}), "('new.db')\n", (36, 46), False, 'import sqlite3\n')]
|
"""
2-input XOR example -- this is most likely the simplest possible example.
"""
from __future__ import print_function
import neat
import multiprocessing
# 2-input XOR inputs and expected outputs.
xor_inputs = [(0.0, 0.0), (0.0, 1.0), (1.0, 0.0), (1.0, 1.0)]
xor_outputs = [ (0.0,), (1.0,), (1.0,), (0.0,)]
def eval_genomes(genomes, config):
for genome_id, genome in genomes:
genome.fitness = 4.0
net = neat.nn.FeedForwardNetwork.create(genome, config)
for xi, xo in zip(xor_inputs, xor_outputs):
output = net.activate(xi)
genome.fitness -= (output[0] - xo[0]) ** 2
def eval_genome(genome, config):
error = 4.0
net = neat.nn.FeedForwardNetwork.create(genome, config)
for xi, xo in zip(xor_inputs, xor_outputs):
output = net.activate(xi)
error -= (output[0] - xo[0]) ** 2
return error
def run():
# Load configuration.
config = neat.Config(neat.SharedGenome, neat.DefaultReproduction,
neat.DefaultSpeciesSet, neat.DefaultStagnation,
'config-feedforward')
# Create the population, which is the top-level object for a NEAT run.
p = neat.Population(config)
# Add a stdout reporter to show progress in the terminal.
p.add_reporter(neat.StdOutReporter(False))
pe = neat.ParallelEvaluator(multiprocessing.cpu_count(), eval_genome)
# Run until a solution is found.
winner = p.run(pe.evaluate, 100)
# Display the winning genome.
print('\nBest genome:\n{!s}'.format(winner))
# Show output of the most fit genome against training data.
print('\nOutput:')
winner_net = neat.nn.FeedForwardNetwork.create(winner, config)
for xi, xo in zip(xor_inputs, xor_outputs):
output = winner_net.activate(xi)
print(" input {!r}, expected output {!r}, got {!r}".format(xi, xo, output))
if __name__ == '__main__':
run()
|
[
"neat.Config",
"neat.StdOutReporter",
"neat.nn.FeedForwardNetwork.create",
"neat.Population",
"multiprocessing.cpu_count"
] |
[((697, 746), 'neat.nn.FeedForwardNetwork.create', 'neat.nn.FeedForwardNetwork.create', (['genome', 'config'], {}), '(genome, config)\n', (730, 746), False, 'import neat\n'), ((940, 1071), 'neat.Config', 'neat.Config', (['neat.SharedGenome', 'neat.DefaultReproduction', 'neat.DefaultSpeciesSet', 'neat.DefaultStagnation', '"""config-feedforward"""'], {}), "(neat.SharedGenome, neat.DefaultReproduction, neat.\n DefaultSpeciesSet, neat.DefaultStagnation, 'config-feedforward')\n", (951, 1071), False, 'import neat\n'), ((1199, 1222), 'neat.Population', 'neat.Population', (['config'], {}), '(config)\n', (1214, 1222), False, 'import neat\n'), ((1671, 1720), 'neat.nn.FeedForwardNetwork.create', 'neat.nn.FeedForwardNetwork.create', (['winner', 'config'], {}), '(winner, config)\n', (1704, 1720), False, 'import neat\n'), ((442, 491), 'neat.nn.FeedForwardNetwork.create', 'neat.nn.FeedForwardNetwork.create', (['genome', 'config'], {}), '(genome, config)\n', (475, 491), False, 'import neat\n'), ((1305, 1331), 'neat.StdOutReporter', 'neat.StdOutReporter', (['(False)'], {}), '(False)\n', (1324, 1331), False, 'import neat\n'), ((1366, 1393), 'multiprocessing.cpu_count', 'multiprocessing.cpu_count', ([], {}), '()\n', (1391, 1393), False, 'import multiprocessing\n')]
|
#!/usr/bin/env python
import requests
import argparse
from colorama import Fore
DEFAULT_URL = "https://automatetheboringstuff.com/files/rj.txt"
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--url", default=DEFAULT_URL, type=str)
parser.add_argument("--verbose", default=False)
args = parser.parse_args()
res = requests.get(args.url)
res.raise_for_status()
# Sample data
print("Sample data")
print(Fore.CYAN + res.text[:250] + Fore.RESET)
print("Exited cleanly!")
if __name__ == "__main__":
main()
|
[
"argparse.ArgumentParser",
"requests.get"
] |
[((173, 198), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (196, 198), False, 'import argparse\n'), ((358, 380), 'requests.get', 'requests.get', (['args.url'], {}), '(args.url)\n', (370, 380), False, 'import requests\n')]
|
# -*- coding: utf-8 -*-
"""Support Vector Machine (SVM) classification for machine learning.
SVM is a binary classifier. The objective of the SVM is to find the best
separating hyperplane in vector space which is also referred to as the
decision boundary. And it decides what separating hyperplane is the 'best'
because the distance from it and the associating data it is separating is the
greatest at the plane in question.
This is the file where I create use scikit-learn to use the algorithm.
dataset is breast cancer data from: http://archive.ics.uci.edu/ml/datasets.html
Example:
$ python regularSupportVectorMachine.py
Todo:
*
"""
import numpy as np
import pandas as pd
from sklearn import svm
from sklearn.model_selection import train_test_split
df = pd.read_csv('breast-cancer-wisconsin.data')
df.replace('?', -99999, inplace=True) # make missing attribute values outliers
df.drop(['id'], 1, inplace=True) # remove useless column
X = np.array(df.drop(['class'], 1)) # features
y = np.array(df['class']) # labels
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
clf = svm.SVC()
clf.fit(X_train, y_train)
# Could have saved in a pickle, but not a very large data set.
accuracy = clf.score(X_test, y_test)
print(accuracy)
example1 = [4, 2, 1, 1, 1, 2, 3, 2, 1]
example2 = [4, 2, 1, 2, 2, 2, 3, 2, 1]
example_measures = np.array([example1, example2])
example_measures = example_measures.reshape(len(example_measures), -1)
prediction = clf.predict(example_measures)
print(prediction)
|
[
"pandas.read_csv",
"sklearn.model_selection.train_test_split",
"numpy.array",
"sklearn.svm.SVC"
] |
[((778, 821), 'pandas.read_csv', 'pd.read_csv', (['"""breast-cancer-wisconsin.data"""'], {}), "('breast-cancer-wisconsin.data')\n", (789, 821), True, 'import pandas as pd\n'), ((1013, 1034), 'numpy.array', 'np.array', (["df['class']"], {}), "(df['class'])\n", (1021, 1034), True, 'import numpy as np\n'), ((1081, 1118), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(0.2)'}), '(X, y, test_size=0.2)\n', (1097, 1118), False, 'from sklearn.model_selection import train_test_split\n'), ((1126, 1135), 'sklearn.svm.SVC', 'svm.SVC', ([], {}), '()\n', (1133, 1135), False, 'from sklearn import svm\n'), ((1379, 1409), 'numpy.array', 'np.array', (['[example1, example2]'], {}), '([example1, example2])\n', (1387, 1409), True, 'import numpy as np\n')]
|
from app.functions.firestore import is_doc_exist
from app.models.firestore import AnnotationTypeEnum
from typing import Any, Dict, List
from pydantic import BaseModel, ValidationError, root_validator, validator
from app.models.firestore import annot_cls_dict
class RequestTaskUpload(BaseModel):
task_id: str
annotation_type: AnnotationTypeEnum
title: str
question: str
description: str
annotations_data: List
@validator("task_id")
def task_id_is_unique(cls, v) -> str:
if is_doc_exist("tasks", v):
raise ValueError(f"task_id: {v} は既に存在します.")
return v
@validator("task_id")
def task_id_not_contains_slash(cls, v) -> str:
if "/" in v:
raise ValueError('task_id に "/" を含めることはできません')
return v
class ResponseTaskUpload(BaseModel):
message: str
task_id: str
annotation_num: int
task_url: str
|
[
"pydantic.validator",
"app.functions.firestore.is_doc_exist"
] |
[((441, 461), 'pydantic.validator', 'validator', (['"""task_id"""'], {}), "('task_id')\n", (450, 461), False, 'from pydantic import BaseModel, ValidationError, root_validator, validator\n'), ((620, 640), 'pydantic.validator', 'validator', (['"""task_id"""'], {}), "('task_id')\n", (629, 640), False, 'from pydantic import BaseModel, ValidationError, root_validator, validator\n'), ((515, 539), 'app.functions.firestore.is_doc_exist', 'is_doc_exist', (['"""tasks"""', 'v'], {}), "('tasks', v)\n", (527, 539), False, 'from app.functions.firestore import is_doc_exist\n')]
|
import torch
from torch.utils.data import DataLoader
from torchvision import datasets, transforms
import dataLoader as dl
"""
A function which creates train and valid dataloaders that can be iterated over.
To do so, you need to structure your data as follows:
root_dir
|_train
|_class_1
|_xxx.png
.....
.....
|_class_n
|_xxx.png
|_validation
|_class_1
|_xxx.png
.....
.....
|_class_n
|_xxx.png
that means that each class has its own directory.
By giving this structure, the name of the class will be taken by the name of the folder!
Parameters
----------
root_dir: (str) "Path to where the data is"
trainbatchsize: (int) batch size for training
validbatchsize: (int) batch size for validation
testbatchsize: (int) batch size for testing the model
"""
def pre_processor(root_dir, trainbatchsize, validbatchsize, testbatchsize):
train_data = datasets.ImageFolder(root_dir + '/New_train')
test_data = datasets.ImageFolder(root_dir + '/New_test')
siamese_train_dataset = dl.SNNTrain(imageFolderDataset = train_data,
transform = transforms.Compose([transforms.Grayscale(num_output_channels=1),
transforms.Resize((105,105)),
transforms.ToTensor()]),
# transforms.Normalize([0.4318, 0.4012, 0.3913], [0.2597, 0.2561, 0.2525])]),
should_invert = False)
siamese_test_dataset = dl.SNNTest(imageFolderDataset = test_data,
transform = transforms.Compose([transforms.Grayscale(num_output_channels=1),
transforms.Resize((105,105)),
transforms.ToTensor()]),
# transforms.Normalize([0.4318, 0.4012, 0.3913], [0.2597, 0.2561, 0.2525])]),
should_invert = False)
# Train_valid split
train_len = int(0.8*len(siamese_train_dataset)) # 80:20 split
valid_len = len(siamese_train_dataset) - train_len
train_set, val_set = torch.utils.data.random_split(siamese_train_dataset, [train_len, valid_len])
# create the dataloaders
train_loader = DataLoader(train_set, batch_size = trainbatchsize,
shuffle = True
)
valid_loader = DataLoader(val_set, batch_size = validbatchsize,
shuffle = False) # shuffle doesn't matter during validation and testing
test_loader = DataLoader(siamese_test_dataset, batch_size = testbatchsize,
shuffle = False)
return train_loader , valid_loader, test_loader
|
[
"torch.utils.data.DataLoader",
"torchvision.transforms.ToTensor",
"torchvision.datasets.ImageFolder",
"torchvision.transforms.Grayscale",
"torch.utils.data.random_split",
"torchvision.transforms.Resize"
] |
[((974, 1019), 'torchvision.datasets.ImageFolder', 'datasets.ImageFolder', (["(root_dir + '/New_train')"], {}), "(root_dir + '/New_train')\n", (994, 1019), False, 'from torchvision import datasets, transforms\n'), ((1036, 1080), 'torchvision.datasets.ImageFolder', 'datasets.ImageFolder', (["(root_dir + '/New_test')"], {}), "(root_dir + '/New_test')\n", (1056, 1080), False, 'from torchvision import datasets, transforms\n'), ((2629, 2705), 'torch.utils.data.random_split', 'torch.utils.data.random_split', (['siamese_train_dataset', '[train_len, valid_len]'], {}), '(siamese_train_dataset, [train_len, valid_len])\n', (2658, 2705), False, 'import torch\n'), ((2755, 2817), 'torch.utils.data.DataLoader', 'DataLoader', (['train_set'], {'batch_size': 'trainbatchsize', 'shuffle': '(True)'}), '(train_set, batch_size=trainbatchsize, shuffle=True)\n', (2765, 2817), False, 'from torch.utils.data import DataLoader\n'), ((2931, 2992), 'torch.utils.data.DataLoader', 'DataLoader', (['val_set'], {'batch_size': 'validbatchsize', 'shuffle': '(False)'}), '(val_set, batch_size=validbatchsize, shuffle=False)\n', (2941, 2992), False, 'from torch.utils.data import DataLoader\n'), ((3115, 3188), 'torch.utils.data.DataLoader', 'DataLoader', (['siamese_test_dataset'], {'batch_size': 'testbatchsize', 'shuffle': '(False)'}), '(siamese_test_dataset, batch_size=testbatchsize, shuffle=False)\n', (3125, 3188), False, 'from torch.utils.data import DataLoader\n'), ((1231, 1274), 'torchvision.transforms.Grayscale', 'transforms.Grayscale', ([], {'num_output_channels': '(1)'}), '(num_output_channels=1)\n', (1251, 1274), False, 'from torchvision import datasets, transforms\n'), ((1352, 1381), 'torchvision.transforms.Resize', 'transforms.Resize', (['(105, 105)'], {}), '((105, 105))\n', (1369, 1381), False, 'from torchvision import datasets, transforms\n'), ((1458, 1479), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (1477, 1479), False, 'from torchvision import datasets, transforms\n'), ((1882, 1925), 'torchvision.transforms.Grayscale', 'transforms.Grayscale', ([], {'num_output_channels': '(1)'}), '(num_output_channels=1)\n', (1902, 1925), False, 'from torchvision import datasets, transforms\n'), ((2003, 2032), 'torchvision.transforms.Resize', 'transforms.Resize', (['(105, 105)'], {}), '((105, 105))\n', (2020, 2032), False, 'from torchvision import datasets, transforms\n'), ((2109, 2130), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (2128, 2130), False, 'from torchvision import datasets, transforms\n')]
|
from rest_framework import viewsets
from socialpy.server.rest.serializers import CategorySerializer, PostSerializer, PostSerializerUrl
from socialpy.server.data.models import Category, Post
class CategoryViewSet(viewsets.ModelViewSet):
"""
Returns a list of all categorys in the db.
"""
queryset = Category.objects.all()
serializer_class = CategorySerializer
class PostViewSet(viewsets.ModelViewSet):
"""
The view set of the post model.
"""
queryset = Post.objects.all()
serializer_class = PostSerializer
def get_serializer_class(self):
if self.request and self.request.accepted_renderer.format == 'api':
return PostSerializerUrl
return PostSerializer
|
[
"socialpy.server.data.models.Post.objects.all",
"socialpy.server.data.models.Category.objects.all"
] |
[((315, 337), 'socialpy.server.data.models.Category.objects.all', 'Category.objects.all', ([], {}), '()\n', (335, 337), False, 'from socialpy.server.data.models import Category, Post\n'), ((490, 508), 'socialpy.server.data.models.Post.objects.all', 'Post.objects.all', ([], {}), '()\n', (506, 508), False, 'from socialpy.server.data.models import Category, Post\n')]
|
# Generated by Django 2.1.7 on 2019-04-05 22:00
import uuid
import django.contrib.auth.validators
import django.db.models.deletion
from django.conf import settings
from django.db import migrations, models
import athena.authentication.models
class Migration(migrations.Migration):
initial = True
dependencies = [("edu", "0001_initial")]
operations = [
migrations.CreateModel(
name="User",
fields=[
("password", models.CharField(max_length=128, verbose_name="password")),
(
"last_login",
models.DateTimeField(
blank=True, null=True, verbose_name="last login"
),
),
(
"id",
models.UUIDField(
default=uuid.uuid4, primary_key=True, serialize=False
),
),
(
"username",
models.CharField(
error_messages={
"unique": "A user with that username already exists."
},
max_length=150,
unique=True,
validators=[
django.contrib.auth.validators.UnicodeUsernameValidator()
],
),
),
("first_name", models.CharField(blank=True, max_length=30)),
("second_name", models.CharField(blank=True, max_length=30)),
("last_name", models.CharField(blank=True, max_length=30)),
("is_staff", models.BooleanField(default=False)),
("is_superuser", models.BooleanField(default=False)),
("is_active", models.BooleanField(default=True)),
],
options={"abstract": False},
managers=[("objects", athena.authentication.models.UserManager())],
),
migrations.CreateModel(
name="Role",
fields=[
(
"name",
models.CharField(max_length=32, primary_key=True, serialize=False),
)
],
),
migrations.CreateModel(
name="Student",
fields=[
(
"id",
models.UUIDField(
default=uuid.uuid4, primary_key=True, serialize=False
),
),
("cipher", models.CharField(max_length=15, unique=True)),
(
"student_group",
models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="students",
to="edu.StudentGroup",
),
),
(
"user",
models.OneToOneField(
on_delete=django.db.models.deletion.CASCADE,
related_name="student",
to=settings.AUTH_USER_MODEL,
),
),
],
options={"abstract": False},
),
migrations.CreateModel(
name="Teacher",
fields=[
(
"id",
models.UUIDField(
default=uuid.uuid4, primary_key=True, serialize=False
),
),
(
"subjects",
models.ManyToManyField(related_name="teachers", to="edu.Subject"),
),
(
"user",
models.OneToOneField(
on_delete=django.db.models.deletion.CASCADE,
related_name="teacher",
to=settings.AUTH_USER_MODEL,
),
),
],
options={"abstract": False},
),
migrations.CreateModel(
name="Tutor",
fields=[
(
"id",
models.UUIDField(
default=uuid.uuid4, primary_key=True, serialize=False
),
),
(
"user",
models.OneToOneField(
on_delete=django.db.models.deletion.CASCADE,
related_name="tutor",
to=settings.AUTH_USER_MODEL,
),
),
],
options={"abstract": False},
),
migrations.AddField(
model_name="user",
name="roles",
field=models.ManyToManyField(
related_name="users", to="authentication.Role"
),
),
]
|
[
"django.db.models.OneToOneField",
"django.db.models.ManyToManyField",
"django.db.models.UUIDField",
"django.db.models.CharField",
"django.db.models.ForeignKey",
"django.db.models.BooleanField",
"django.db.models.DateTimeField"
] |
[((4909, 4979), 'django.db.models.ManyToManyField', 'models.ManyToManyField', ([], {'related_name': '"""users"""', 'to': '"""authentication.Role"""'}), "(related_name='users', to='authentication.Role')\n", (4931, 4979), False, 'from django.db import migrations, models\n'), ((478, 535), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(128)', 'verbose_name': '"""password"""'}), "(max_length=128, verbose_name='password')\n", (494, 535), False, 'from django.db import migrations, models\n'), ((610, 680), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'blank': '(True)', 'null': '(True)', 'verbose_name': '"""last login"""'}), "(blank=True, null=True, verbose_name='last login')\n", (630, 680), False, 'from django.db import migrations, models\n'), ((811, 882), 'django.db.models.UUIDField', 'models.UUIDField', ([], {'default': 'uuid.uuid4', 'primary_key': '(True)', 'serialize': '(False)'}), '(default=uuid.uuid4, primary_key=True, serialize=False)\n', (827, 882), False, 'from django.db import migrations, models\n'), ((1487, 1530), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(30)'}), '(blank=True, max_length=30)\n', (1503, 1530), False, 'from django.db import migrations, models\n'), ((1565, 1608), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(30)'}), '(blank=True, max_length=30)\n', (1581, 1608), False, 'from django.db import migrations, models\n'), ((1641, 1684), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(30)'}), '(blank=True, max_length=30)\n', (1657, 1684), False, 'from django.db import migrations, models\n'), ((1716, 1750), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (1735, 1750), False, 'from django.db import migrations, models\n'), ((1786, 1820), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (1805, 1820), False, 'from django.db import migrations, models\n'), ((1853, 1886), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(True)'}), '(default=True)\n', (1872, 1886), False, 'from django.db import migrations, models\n'), ((2180, 2246), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(32)', 'primary_key': '(True)', 'serialize': '(False)'}), '(max_length=32, primary_key=True, serialize=False)\n', (2196, 2246), False, 'from django.db import migrations, models\n'), ((2437, 2508), 'django.db.models.UUIDField', 'models.UUIDField', ([], {'default': 'uuid.uuid4', 'primary_key': '(True)', 'serialize': '(False)'}), '(default=uuid.uuid4, primary_key=True, serialize=False)\n', (2453, 2508), False, 'from django.db import migrations, models\n'), ((2602, 2646), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(15)', 'unique': '(True)'}), '(max_length=15, unique=True)\n', (2618, 2646), False, 'from django.db import migrations, models\n'), ((2724, 2850), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'null': '(True)', 'on_delete': 'django.db.models.deletion.SET_NULL', 'related_name': '"""students"""', 'to': '"""edu.StudentGroup"""'}), "(null=True, on_delete=django.db.models.deletion.SET_NULL,\n related_name='students', to='edu.StudentGroup')\n", (2741, 2850), False, 'from django.db import migrations, models\n'), ((3052, 3174), 'django.db.models.OneToOneField', 'models.OneToOneField', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""student"""', 'to': 'settings.AUTH_USER_MODEL'}), "(on_delete=django.db.models.deletion.CASCADE,\n related_name='student', to=settings.AUTH_USER_MODEL)\n", (3072, 3174), False, 'from django.db import migrations, models\n'), ((3498, 3569), 'django.db.models.UUIDField', 'models.UUIDField', ([], {'default': 'uuid.uuid4', 'primary_key': '(True)', 'serialize': '(False)'}), '(default=uuid.uuid4, primary_key=True, serialize=False)\n', (3514, 3569), False, 'from django.db import migrations, models\n'), ((3706, 3771), 'django.db.models.ManyToManyField', 'models.ManyToManyField', ([], {'related_name': '"""teachers"""', 'to': '"""edu.Subject"""'}), "(related_name='teachers', to='edu.Subject')\n", (3728, 3771), False, 'from django.db import migrations, models\n'), ((3858, 3980), 'django.db.models.OneToOneField', 'models.OneToOneField', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""teacher"""', 'to': 'settings.AUTH_USER_MODEL'}), "(on_delete=django.db.models.deletion.CASCADE,\n related_name='teacher', to=settings.AUTH_USER_MODEL)\n", (3878, 3980), False, 'from django.db import migrations, models\n'), ((4302, 4373), 'django.db.models.UUIDField', 'models.UUIDField', ([], {'default': 'uuid.uuid4', 'primary_key': '(True)', 'serialize': '(False)'}), '(default=uuid.uuid4, primary_key=True, serialize=False)\n', (4318, 4373), False, 'from django.db import migrations, models\n'), ((4506, 4626), 'django.db.models.OneToOneField', 'models.OneToOneField', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""tutor"""', 'to': 'settings.AUTH_USER_MODEL'}), "(on_delete=django.db.models.deletion.CASCADE,\n related_name='tutor', to=settings.AUTH_USER_MODEL)\n", (4526, 4626), False, 'from django.db import migrations, models\n')]
|
from src.mnist import load_mnist_data
from src.Model import Model
import matplotlib.pyplot as plt
def test_prediction(index, data, model:Model):
current_image = data["inputs"][index]
y_predict = model.predict(current_image)[0]
prediction = (y_predict == y_predict.max()).astype(int)
guess = list(prediction).index(1)
label = data["outputs"][index]
ground_truth = list(label).index(1)
print("Label: ", label)
print("Prediction: ", prediction)
# Opção de desobrigar de fornecer label correto, para quando formor utilizar paint
if len(label) < 10:
label = "made on paint"
ground_truth = " paint"
print("Label: ", label)
print("Prediction: ", prediction)
plt.gray()
plt.title("Model thinks it is: " + str(guess) + "\nGround truth: " + str(ground_truth))
plt.imshow( current_image.reshape((28, 28)) * 255, interpolation='nearest')
plt.xticks([])
plt.yticks([0])
plt.show()
def __main__():
all_data = load_mnist_data()
print("Quantidade de exemplos:", len(all_data["inputs"]))
print("Dimensão da imagem: ", len(all_data["inputs"][0]))
print("Quantidade de digitos: ", len(all_data["outputs"][0]))
# Treinamos com 42 mil exemplos
train_data = {
"inputs" : all_data["inputs" ][:42000],
"outputs": all_data["outputs"][:42000]
}
# Testamos com restante 28 mil exemplos
test_data = {
"inputs" : all_data["inputs" ][42000:],
"outputs": all_data["outputs"][42000:]
}
learning_rate = 0.035
# 3 épocas é bem pouco, mas já chega a 95% de acurácia, podendo chega a 98.9% com mais treinamento
epochs = 3
model_filename = "model_128x128"
model = Model((784,128,128,10), activation="sigmoid", verbose=0, wr=(-0.5,0.5))
#model = Model.load("./models/" + model_filename)
print("\n> Model Started Training...\n")
model.train(
train_data["inputs"],
train_data["outputs"],
lr = learning_rate, epochs=epochs,
shuffle=True,
autosave=False)
print("> Done.")
model.print()
model.save("./models/" + model_filename)
print("> model saved in: ",model_filename)
while True:
index = input("> Escolha uma imagem entre [0, 28'000): ")
if not index.isnumeric():
break
try:
test_prediction(int(index),test_data, model)
except:
print("> Imagem deve ser entre 1 e 28'000\n")
continue
__main__()
|
[
"matplotlib.pyplot.gray",
"matplotlib.pyplot.show",
"src.mnist.load_mnist_data",
"src.Model.Model",
"matplotlib.pyplot.yticks",
"matplotlib.pyplot.xticks"
] |
[((698, 708), 'matplotlib.pyplot.gray', 'plt.gray', ([], {}), '()\n', (706, 708), True, 'import matplotlib.pyplot as plt\n'), ((879, 893), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]'], {}), '([])\n', (889, 893), True, 'import matplotlib.pyplot as plt\n'), ((896, 911), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[0]'], {}), '([0])\n', (906, 911), True, 'import matplotlib.pyplot as plt\n'), ((914, 924), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (922, 924), True, 'import matplotlib.pyplot as plt\n'), ((964, 981), 'src.mnist.load_mnist_data', 'load_mnist_data', ([], {}), '()\n', (979, 981), False, 'from src.mnist import load_mnist_data\n'), ((1648, 1723), 'src.Model.Model', 'Model', (['(784, 128, 128, 10)'], {'activation': '"""sigmoid"""', 'verbose': '(0)', 'wr': '(-0.5, 0.5)'}), "((784, 128, 128, 10), activation='sigmoid', verbose=0, wr=(-0.5, 0.5))\n", (1653, 1723), False, 'from src.Model import Model\n')]
|
from django.db import models
class SampleKeyword(models.Model):
"""An ontology term associated with a sample in our database"""
name = models.ForeignKey("OntologyTerm", on_delete=models.CASCADE, related_name="+")
sample = models.ForeignKey("Sample", on_delete=models.CASCADE, related_name="keywords")
source = models.ForeignKey("Contribution", on_delete=models.CASCADE)
|
[
"django.db.models.ForeignKey"
] |
[((146, 223), 'django.db.models.ForeignKey', 'models.ForeignKey', (['"""OntologyTerm"""'], {'on_delete': 'models.CASCADE', 'related_name': '"""+"""'}), "('OntologyTerm', on_delete=models.CASCADE, related_name='+')\n", (163, 223), False, 'from django.db import models\n'), ((237, 315), 'django.db.models.ForeignKey', 'models.ForeignKey', (['"""Sample"""'], {'on_delete': 'models.CASCADE', 'related_name': '"""keywords"""'}), "('Sample', on_delete=models.CASCADE, related_name='keywords')\n", (254, 315), False, 'from django.db import models\n'), ((329, 388), 'django.db.models.ForeignKey', 'models.ForeignKey', (['"""Contribution"""'], {'on_delete': 'models.CASCADE'}), "('Contribution', on_delete=models.CASCADE)\n", (346, 388), False, 'from django.db import models\n')]
|
import numpy as np
import nudged
from scipy.linalg import eig, sqrtm, norm
from .utils import adjust
def find_linear_projections(X, d, objective, iters=20):
n = X.shape[1]
objective.X = X
XBXT = adjust(objective.XBXT)
sqrtXBXT = np.real(sqrtm(XBXT))
projections = []
selected = []
C = np.zeros((X.shape[0], X.shape[0]))
for i in range(iters):
if i == 0:
XLXT = objective.XLXT
else:
XLXT = objective.XLXT + objective.alpha * C
XLXT = 0.5 * (XLXT + XLXT.T)
XLXT = adjust(XLXT)
ev, eV, *_ = eig(XLXT, XBXT)
ev = np.real(ev)
eV = np.dot(sqrtXBXT, np.real(eV))
if objective.alpha < 0:
ev = -ev
idx = np.argsort(ev)
V = eV[:, idx[0:d]]
for j in range(d):
V[:, j] /= norm(V[:, j])
projections.append(V)
C += V.dot(V.T)
if i == 0 or dissimilar(V, selected, X, objective.threshold):
selected.append(V)
return selected
def dissimilar(V, projections, X, min_threshold, err_threshold=0.8):
VT = V.T
m = 2 - min(map(lambda p: norm(VT.dot(p)), projections))
if m < min_threshold:
return False
Y = X.T.dot(V).tolist()
for p in projections:
Y2 = X.T.dot(p)
affine = nudged.estimate(Y, Y2.tolist())
err = norm(Y2 - np.array(affine.transform(Y))) / norm(Y2)
if err < err_threshold:
return False
return True
|
[
"numpy.zeros",
"scipy.linalg.eig",
"numpy.argsort",
"scipy.linalg.sqrtm",
"scipy.linalg.norm",
"numpy.real"
] |
[((318, 352), 'numpy.zeros', 'np.zeros', (['(X.shape[0], X.shape[0])'], {}), '((X.shape[0], X.shape[0]))\n', (326, 352), True, 'import numpy as np\n'), ((257, 268), 'scipy.linalg.sqrtm', 'sqrtm', (['XBXT'], {}), '(XBXT)\n', (262, 268), False, 'from scipy.linalg import eig, sqrtm, norm\n'), ((594, 609), 'scipy.linalg.eig', 'eig', (['XLXT', 'XBXT'], {}), '(XLXT, XBXT)\n', (597, 609), False, 'from scipy.linalg import eig, sqrtm, norm\n'), ((624, 635), 'numpy.real', 'np.real', (['ev'], {}), '(ev)\n', (631, 635), True, 'import numpy as np\n'), ((747, 761), 'numpy.argsort', 'np.argsort', (['ev'], {}), '(ev)\n', (757, 761), True, 'import numpy as np\n'), ((666, 677), 'numpy.real', 'np.real', (['eV'], {}), '(eV)\n', (673, 677), True, 'import numpy as np\n'), ((841, 854), 'scipy.linalg.norm', 'norm', (['V[:, j]'], {}), '(V[:, j])\n', (845, 854), False, 'from scipy.linalg import eig, sqrtm, norm\n'), ((1409, 1417), 'scipy.linalg.norm', 'norm', (['Y2'], {}), '(Y2)\n', (1413, 1417), False, 'from scipy.linalg import eig, sqrtm, norm\n')]
|
# coding: utf-8
from __future__ import unicode_literals
import pytest
@pytest.mark.parametrize("text", ["(Ma'arif)"])
def test_id_tokenizer_splits_no_special(id_tokenizer, text):
tokens = id_tokenizer(text)
assert len(tokens) == 3
@pytest.mark.parametrize("text", ["Ma'arif"])
def test_id_tokenizer_splits_no_punct(id_tokenizer, text):
tokens = id_tokenizer(text)
assert len(tokens) == 1
@pytest.mark.parametrize("text", ["(Ma'arif"])
def test_id_tokenizer_splits_prefix_punct(id_tokenizer, text):
tokens = id_tokenizer(text)
assert len(tokens) == 2
@pytest.mark.parametrize("text", ["Ma'arif)"])
def test_id_tokenizer_splits_suffix_punct(id_tokenizer, text):
tokens = id_tokenizer(text)
assert len(tokens) == 2
@pytest.mark.parametrize("text", ["(Ma'arif)"])
def test_id_tokenizer_splits_even_wrap(id_tokenizer, text):
tokens = id_tokenizer(text)
assert len(tokens) == 3
@pytest.mark.parametrize("text", ["(Ma'arif?)"])
def test_tokenizer_splits_uneven_wrap(id_tokenizer, text):
tokens = id_tokenizer(text)
assert len(tokens) == 4
@pytest.mark.parametrize("text,length", [("S.Kom.", 1), ("SKom.", 2), ("(S.Kom.", 2)])
def test_id_tokenizer_splits_prefix_interact(id_tokenizer, text, length):
tokens = id_tokenizer(text)
assert len(tokens) == length
@pytest.mark.parametrize("text", ["S.Kom.)"])
def test_id_tokenizer_splits_suffix_interact(id_tokenizer, text):
tokens = id_tokenizer(text)
assert len(tokens) == 2
@pytest.mark.parametrize("text", ["(S.Kom.)"])
def test_id_tokenizer_splits_even_wrap_interact(id_tokenizer, text):
tokens = id_tokenizer(text)
assert len(tokens) == 3
@pytest.mark.parametrize("text", ["(S.Kom.?)"])
def test_id_tokenizer_splits_uneven_wrap_interact(id_tokenizer, text):
tokens = id_tokenizer(text)
assert len(tokens) == 4
@pytest.mark.parametrize(
"text,length", [("gara-gara", 1), ("Jokowi-Ahok", 3), ("Sukarno-Hatta", 3)]
)
def test_id_tokenizer_splits_hyphens(id_tokenizer, text, length):
tokens = id_tokenizer(text)
assert len(tokens) == length
@pytest.mark.parametrize("text", ["0.1-13.5", "0.0-0.1", "103.27-300"])
def test_id_tokenizer_splits_numeric_range(id_tokenizer, text):
tokens = id_tokenizer(text)
assert len(tokens) == 3
@pytest.mark.parametrize("text", ["ini.Budi", "Halo.Bandung"])
def test_id_tokenizer_splits_period_infix(id_tokenizer, text):
tokens = id_tokenizer(text)
assert len(tokens) == 3
@pytest.mark.parametrize("text", ["Halo,Bandung", "satu,dua"])
def test_id_tokenizer_splits_comma_infix(id_tokenizer, text):
tokens = id_tokenizer(text)
assert len(tokens) == 3
assert tokens[0].text == text.split(",")[0]
assert tokens[1].text == ","
assert tokens[2].text == text.split(",")[1]
@pytest.mark.parametrize("text", ["halo...Bandung", "dia...pergi"])
def test_id_tokenizer_splits_ellipsis_infix(id_tokenizer, text):
tokens = id_tokenizer(text)
assert len(tokens) == 3
def test_id_tokenizer_splits_double_hyphen_infix(id_tokenizer):
tokens = id_tokenizer("<NAME>--<NAME>--melakukan konferensi pers.")
assert len(tokens) == 10
assert tokens[0].text == "Arsene"
assert tokens[1].text == "Wenger"
assert tokens[2].text == "--"
assert tokens[3].text == "manajer"
assert tokens[4].text == "Arsenal"
assert tokens[5].text == "--"
assert tokens[6].text == "melakukan"
assert tokens[7].text == "konferensi"
assert tokens[8].text == "pers"
assert tokens[9].text == "."
|
[
"pytest.mark.parametrize"
] |
[((74, 120), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""text"""', '["(Ma\'arif)"]'], {}), '(\'text\', ["(Ma\'arif)"])\n', (97, 120), False, 'import pytest\n'), ((245, 289), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""text"""', '["Ma\'arif"]'], {}), '(\'text\', ["Ma\'arif"])\n', (268, 289), False, 'import pytest\n'), ((412, 457), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""text"""', '["(Ma\'arif"]'], {}), '(\'text\', ["(Ma\'arif"])\n', (435, 457), False, 'import pytest\n'), ((584, 629), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""text"""', '["Ma\'arif)"]'], {}), '(\'text\', ["Ma\'arif)"])\n', (607, 629), False, 'import pytest\n'), ((756, 802), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""text"""', '["(Ma\'arif)"]'], {}), '(\'text\', ["(Ma\'arif)"])\n', (779, 802), False, 'import pytest\n'), ((926, 973), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""text"""', '["(Ma\'arif?)"]'], {}), '(\'text\', ["(Ma\'arif?)"])\n', (949, 973), False, 'import pytest\n'), ((1096, 1186), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""text,length"""', "[('S.Kom.', 1), ('SKom.', 2), ('(S.Kom.', 2)]"], {}), "('text,length', [('S.Kom.', 1), ('SKom.', 2), (\n '(S.Kom.', 2)])\n", (1119, 1186), False, 'import pytest\n'), ((1324, 1368), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""text"""', "['S.Kom.)']"], {}), "('text', ['S.Kom.)'])\n", (1347, 1368), False, 'import pytest\n'), ((1498, 1543), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""text"""', "['(S.Kom.)']"], {}), "('text', ['(S.Kom.)'])\n", (1521, 1543), False, 'import pytest\n'), ((1676, 1722), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""text"""', "['(S.Kom.?)']"], {}), "('text', ['(S.Kom.?)'])\n", (1699, 1722), False, 'import pytest\n'), ((1857, 1962), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""text,length"""', "[('gara-gara', 1), ('Jokowi-Ahok', 3), ('Sukarno-Hatta', 3)]"], {}), "('text,length', [('gara-gara', 1), ('Jokowi-Ahok', 3\n ), ('Sukarno-Hatta', 3)])\n", (1880, 1962), False, 'import pytest\n'), ((2098, 2168), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""text"""', "['0.1-13.5', '0.0-0.1', '103.27-300']"], {}), "('text', ['0.1-13.5', '0.0-0.1', '103.27-300'])\n", (2121, 2168), False, 'import pytest\n'), ((2296, 2357), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""text"""', "['ini.Budi', 'Halo.Bandung']"], {}), "('text', ['ini.Budi', 'Halo.Bandung'])\n", (2319, 2357), False, 'import pytest\n'), ((2484, 2545), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""text"""', "['Halo,Bandung', 'satu,dua']"], {}), "('text', ['Halo,Bandung', 'satu,dua'])\n", (2507, 2545), False, 'import pytest\n'), ((2800, 2866), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""text"""', "['halo...Bandung', 'dia...pergi']"], {}), "('text', ['halo...Bandung', 'dia...pergi'])\n", (2823, 2866), False, 'import pytest\n')]
|
"""Implementation of the Gated PixelCNN [1].
Gated PixelCNN extends the original PixelCNN [2] by incorporating ideas
motivated by the more effective PixelRNNs. The first extension is to use
GatedActivations (instead of ReLUs) to mimic the gated functions in RNN. The
second extension is to use a two-stream architecture to mitigate the blind spot
introduced by autoregressively masking convolution filters.
We follow the implementation in [3] but use a casually masked GatedPixelCNNLayer
for the input instead of a causally masked Conv2d layer. For efficiency, the
masked Nx1 and 1xN convolutions are implemented via unmasked (N//2+1)x1 and
1x(N//2+1) convolutions with padding and cropping, as suggested in [1].
NOTE: Our implementaiton does *not* use autoregressive channel masking. This
means that each output depends on whole pixels not sub-pixels. For outputs with
multiple channels, other methods can be used, e.g. [4].
References (used throughout the code):
[1]: https://arxiv.org/abs/1606.05328
[2]: https://arxiv.org/abs/1601.06759
[3]: http://www.scottreed.info/files/iclr2017.pdf
[4]: https://arxiv.org/abs/1701.05517
"""
import torch
from torch import distributions
from torch import nn
from pytorch_generative import nn as pg_nn
from pytorch_generative.models import base
class GatedPixelCNNLayer(nn.Module):
"""A Gated PixelCNN layer.
The layer takes as input 'vstack' and 'hstack' from previous
'GatedPixelCNNLayers' and returns 'vstack', 'hstack', 'skip' where 'skip' is
the skip connection to the pre-logits layer.
"""
def __init__(self, in_channels, out_channels, kernel_size=3, is_causal=False):
"""Initializes a new GatedPixelCNNLayer instance.
Args:
in_channels: The number of channels in the input.
out_channels: The number of output channels.
kernel_size: The size of the (masked) convolutional kernel to use.
is_causal: Whether the 'GatedPixelCNNLayer' is causal. If 'True', the
current pixel is masked out so the computation only depends on pixels to
the left and above. The residual connection in the horizontal stack is
also removed.
"""
super().__init__()
assert kernel_size % 2 == 1, "kernel_size cannot be even"
self._in_channels = in_channels
self._out_channels = out_channels
self._activation = pg_nn.GatedActivation()
self._kernel_size = kernel_size
self._padding = (kernel_size - 1) // 2 # (kernel_size - stride) / 2
self._is_causal = is_causal
# Vertical stack convolutions.
self._vstack_1xN = nn.Conv2d(
in_channels=self._in_channels,
out_channels=self._out_channels,
kernel_size=(1, self._kernel_size),
padding=(0, self._padding),
)
# TODO(eugenhotaj): Is it better to shift down the the vstack_Nx1 output
# instead of adding extra padding to the convolution? When we add extra
# padding, the cropped output rows will no longer line up with the rows of
# the vstack_1x1 output.
self._vstack_Nx1 = nn.Conv2d(
in_channels=self._out_channels,
out_channels=2 * self._out_channels,
kernel_size=(self._kernel_size // 2 + 1, 1),
padding=(self._padding + 1, 0),
)
self._vstack_1x1 = nn.Conv2d(
in_channels=in_channels, out_channels=2 * out_channels, kernel_size=1
)
self._link = nn.Conv2d(
in_channels=2 * out_channels, out_channels=2 * out_channels, kernel_size=1
)
# Horizontal stack convolutions.
self._hstack_1xN = nn.Conv2d(
in_channels=self._in_channels,
out_channels=2 * self._out_channels,
kernel_size=(1, self._kernel_size // 2 + 1),
padding=(0, self._padding + int(self._is_causal)),
)
self._hstack_residual = nn.Conv2d(
in_channels=out_channels, out_channels=out_channels, kernel_size=1
)
self._hstack_skip = nn.Conv2d(
in_channels=out_channels, out_channels=out_channels, kernel_size=1
)
def forward(self, vstack_input, hstack_input):
"""Computes the forward pass.
Args:
vstack_input: The input to the vertical stack.
hstack_input: The input to the horizontal stack.
Returns:
(vstack, hstack, skip) where vstack and hstack are the vertical stack and
horizontal stack outputs respectively and skip is the skip connection
output.
"""
_, _, h, w = vstack_input.shape # Assuming NCHW.
# Compute vertical stack.
vstack = self._vstack_Nx1(self._vstack_1xN(vstack_input))[:, :, :h, :]
link = self._link(vstack)
vstack += self._vstack_1x1(vstack_input)
vstack = self._activation(vstack)
# Compute horizontal stack.
hstack = link + self._hstack_1xN(hstack_input)[:, :, :, :w]
hstack = self._activation(hstack)
skip = self._hstack_skip(hstack)
hstack = self._hstack_residual(hstack)
# NOTE(eugenhotaj): We cannot use a residual connection for causal layers
# otherwise we'll have access to future pixels.
if not self._is_causal:
hstack += hstack_input
return vstack, hstack, skip
class GatedPixelCNN(base.AutoregressiveModel):
"""The Gated PixelCNN model."""
def __init__(
self,
in_channels=1,
out_channels=1,
n_gated=10,
gated_channels=128,
head_channels=32,
sample_fn=None,
):
"""Initializes a new GatedPixelCNN instance.
Args:
in_channels: The number of input channels.
out_channels: The number of output channels.
n_gated: The number of gated layers (not including the input layers).
gated_channels: The number of channels to use in the gated layers.
head_channels: The number of channels to use in the 1x1 convolution blocks
in the head after all the gated channels.
sample_fn: See the base class.
"""
super().__init__(sample_fn)
self._input = GatedPixelCNNLayer(
in_channels=in_channels,
out_channels=gated_channels,
kernel_size=7,
is_causal=True,
)
self._gated_layers = nn.ModuleList(
[
GatedPixelCNNLayer(
in_channels=gated_channels,
out_channels=gated_channels,
kernel_size=3,
is_causal=False,
)
for _ in range(n_gated)
]
)
self._head = nn.Sequential(
nn.ReLU(),
nn.Conv2d(
in_channels=gated_channels, out_channels=head_channels, kernel_size=1
),
nn.ReLU(),
nn.Conv2d(
in_channels=head_channels, out_channels=out_channels, kernel_size=1
),
)
def forward(self, x):
vstack, hstack, skip_connections = self._input(x, x)
for gated_layer in self._gated_layers:
vstack, hstack, skip = gated_layer(vstack, hstack)
skip_connections += skip
return self._head(skip_connections)
def reproduce(
n_epochs=427, batch_size=128, log_dir="/tmp/run", device="cuda", debug_loader=None
):
"""Training script with defaults to reproduce results.
The code inside this function is self contained and can be used as a top level
training script, e.g. by copy/pasting it into a Jupyter notebook.
Args:
n_epochs: Number of epochs to train for.
batch_size: Batch size to use for training and evaluation.
log_dir: Directory where to log trainer state and TensorBoard summaries.
device: Device to train on (either 'cuda' or 'cpu').
debug_loader: Debug DataLoader which replaces the default training and
evaluation loaders if not 'None'. Do not use unless you're writing unit
tests.
"""
from torch import optim
from torch import distributions
from torch.nn import functional as F
from torch.optim import lr_scheduler
from torch.utils import data
from torchvision import datasets
from torchvision import transforms
from pytorch_generative import trainer
from pytorch_generative import models
transform = transforms.Compose(
[transforms.ToTensor(), lambda x: distributions.Bernoulli(probs=x).sample()]
)
train_loader = debug_loader or data.DataLoader(
datasets.MNIST("/tmp/data", train=True, download=True, transform=transform),
batch_size=batch_size,
shuffle=True,
num_workers=8,
)
test_loader = debug_loader or data.DataLoader(
datasets.MNIST("/tmp/data", train=False, download=True, transform=transform),
batch_size=batch_size,
num_workers=8,
)
model = models.GatedPixelCNN(
in_channels=1, out_channels=1, n_gated=10, gated_channels=128, head_channels=32
)
optimizer = optim.Adam(model.parameters(), lr=1e-3)
scheduler = lr_scheduler.MultiplicativeLR(optimizer, lr_lambda=lambda _: 0.9999)
def loss_fn(x, _, preds):
batch_size = x.shape[0]
x, preds = x.view((batch_size, -1)), preds.view((batch_size, -1))
loss = F.binary_cross_entropy_with_logits(preds, x, reduction="none")
return loss.sum(dim=1).mean()
model_trainer = trainer.Trainer(
model=model,
loss_fn=loss_fn,
optimizer=optimizer,
train_loader=train_loader,
eval_loader=test_loader,
lr_scheduler=scheduler,
log_dir=log_dir,
device=device,
)
model_trainer.interleaved_train_and_eval(n_epochs)
|
[
"torch.optim.lr_scheduler.MultiplicativeLR",
"torch.distributions.Bernoulli",
"torch.nn.ReLU",
"torch.nn.Conv2d",
"torch.nn.functional.binary_cross_entropy_with_logits",
"pytorch_generative.trainer.Trainer",
"pytorch_generative.models.GatedPixelCNN",
"pytorch_generative.nn.GatedActivation",
"torchvision.datasets.MNIST",
"torchvision.transforms.ToTensor"
] |
[((9106, 9211), 'pytorch_generative.models.GatedPixelCNN', 'models.GatedPixelCNN', ([], {'in_channels': '(1)', 'out_channels': '(1)', 'n_gated': '(10)', 'gated_channels': '(128)', 'head_channels': '(32)'}), '(in_channels=1, out_channels=1, n_gated=10,\n gated_channels=128, head_channels=32)\n', (9126, 9211), False, 'from pytorch_generative import models\n'), ((9294, 9362), 'torch.optim.lr_scheduler.MultiplicativeLR', 'lr_scheduler.MultiplicativeLR', (['optimizer'], {'lr_lambda': '(lambda _: 0.9999)'}), '(optimizer, lr_lambda=lambda _: 0.9999)\n', (9323, 9362), False, 'from torch.optim import lr_scheduler\n'), ((9637, 9820), 'pytorch_generative.trainer.Trainer', 'trainer.Trainer', ([], {'model': 'model', 'loss_fn': 'loss_fn', 'optimizer': 'optimizer', 'train_loader': 'train_loader', 'eval_loader': 'test_loader', 'lr_scheduler': 'scheduler', 'log_dir': 'log_dir', 'device': 'device'}), '(model=model, loss_fn=loss_fn, optimizer=optimizer,\n train_loader=train_loader, eval_loader=test_loader, lr_scheduler=\n scheduler, log_dir=log_dir, device=device)\n', (9652, 9820), False, 'from pytorch_generative import trainer\n'), ((2436, 2459), 'pytorch_generative.nn.GatedActivation', 'pg_nn.GatedActivation', ([], {}), '()\n', (2457, 2459), True, 'from pytorch_generative import nn as pg_nn\n'), ((2680, 2821), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': 'self._in_channels', 'out_channels': 'self._out_channels', 'kernel_size': '(1, self._kernel_size)', 'padding': '(0, self._padding)'}), '(in_channels=self._in_channels, out_channels=self._out_channels,\n kernel_size=(1, self._kernel_size), padding=(0, self._padding))\n', (2689, 2821), False, 'from torch import nn\n'), ((3181, 3346), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': 'self._out_channels', 'out_channels': '(2 * self._out_channels)', 'kernel_size': '(self._kernel_size // 2 + 1, 1)', 'padding': '(self._padding + 1, 0)'}), '(in_channels=self._out_channels, out_channels=2 * self.\n _out_channels, kernel_size=(self._kernel_size // 2 + 1, 1), padding=(\n self._padding + 1, 0))\n', (3190, 3346), False, 'from torch import nn\n'), ((3423, 3508), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': 'in_channels', 'out_channels': '(2 * out_channels)', 'kernel_size': '(1)'}), '(in_channels=in_channels, out_channels=2 * out_channels, kernel_size=1\n )\n', (3432, 3508), False, 'from torch import nn\n'), ((3548, 3637), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': '(2 * out_channels)', 'out_channels': '(2 * out_channels)', 'kernel_size': '(1)'}), '(in_channels=2 * out_channels, out_channels=2 * out_channels,\n kernel_size=1)\n', (3557, 3637), False, 'from torch import nn\n'), ((3990, 4067), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': 'out_channels', 'out_channels': 'out_channels', 'kernel_size': '(1)'}), '(in_channels=out_channels, out_channels=out_channels, kernel_size=1)\n', (3999, 4067), False, 'from torch import nn\n'), ((4118, 4195), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': 'out_channels', 'out_channels': 'out_channels', 'kernel_size': '(1)'}), '(in_channels=out_channels, out_channels=out_channels, kernel_size=1)\n', (4127, 4195), False, 'from torch import nn\n'), ((9515, 9577), 'torch.nn.functional.binary_cross_entropy_with_logits', 'F.binary_cross_entropy_with_logits', (['preds', 'x'], {'reduction': '"""none"""'}), "(preds, x, reduction='none')\n", (9549, 9577), True, 'from torch.nn import functional as F\n'), ((6860, 6869), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (6867, 6869), False, 'from torch import nn\n'), ((6883, 6968), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': 'gated_channels', 'out_channels': 'head_channels', 'kernel_size': '(1)'}), '(in_channels=gated_channels, out_channels=head_channels, kernel_size=1\n )\n', (6892, 6968), False, 'from torch import nn\n'), ((7007, 7016), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (7014, 7016), False, 'from torch import nn\n'), ((7030, 7108), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': 'head_channels', 'out_channels': 'out_channels', 'kernel_size': '(1)'}), '(in_channels=head_channels, out_channels=out_channels, kernel_size=1)\n', (7039, 7108), False, 'from torch import nn\n'), ((8595, 8616), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (8614, 8616), False, 'from torchvision import transforms\n'), ((8737, 8812), 'torchvision.datasets.MNIST', 'datasets.MNIST', (['"""/tmp/data"""'], {'train': '(True)', 'download': '(True)', 'transform': 'transform'}), "('/tmp/data', train=True, download=True, transform=transform)\n", (8751, 8812), False, 'from torchvision import datasets\n'), ((8955, 9031), 'torchvision.datasets.MNIST', 'datasets.MNIST', (['"""/tmp/data"""'], {'train': '(False)', 'download': '(True)', 'transform': 'transform'}), "('/tmp/data', train=False, download=True, transform=transform)\n", (8969, 9031), False, 'from torchvision import datasets\n'), ((8628, 8660), 'torch.distributions.Bernoulli', 'distributions.Bernoulli', ([], {'probs': 'x'}), '(probs=x)\n', (8651, 8660), False, 'from torch import distributions\n')]
|
"""
Module to work with objects, specifically dealing with ca_extension functions
"""
import logging
from ctypes import byref, cast, c_ubyte
from _ctypes import POINTER
from pycryptoki.attributes import to_byte_array
from pycryptoki.ca_extensions.session import ca_get_session_info_ex
from pycryptoki.cryptoki import CK_ULONG, CK_SLOT_ID, CA_GetObjectHandle, CA_DestroyMultipleObjects
from pycryptoki.defines import CKR_OK
from pycryptoki.exceptions import make_error_handle_function
from pycryptoki.common_utils import AutoCArray
LOG = logging.getLogger(__name__)
def ca_get_object_handle(slot, session, objectouid):
"""
Calls CA_GetObjectHandle to get the object handle from OUID
:param slot: partition slot number
:param session: session id that was opened to run the function
:param objectouid: OUID, a string of the hex value that maps to object handle
:return: a tuple containing the return code and the object handle mapping the given OUID
"""
objecttype = CK_ULONG()
objecthandle = CK_ULONG()
# ulContainerNumber is required which is of type CK_ULONG
container_number = ca_get_session_info_ex(session)["containerNumber"]
ouid, size_ouid = to_byte_array(int(objectouid, 16))
c_ouid = cast(ouid, POINTER(c_ubyte))
ret = CA_GetObjectHandle(
CK_SLOT_ID(slot), container_number, c_ouid, byref(objecttype), byref(objecthandle)
)
if ret != CKR_OK:
return ret, None
return ret, objecthandle.value
ca_get_object_handle_ex = make_error_handle_function(ca_get_object_handle)
def ca_destroy_multiple_objects(h_session, objects):
"""Delete multiple objects corresponding to given object handles
:param int h_session: Session handle
:param list objects: The handles of the objects to delete
:returns: Return code
"""
handles_count = len(objects)
handles = AutoCArray(data=objects, ctype=CK_ULONG)
ret = CA_DestroyMultipleObjects(h_session, handles_count, handles.array, byref(CK_ULONG()))
return ret
ca_destroy_multiple_objects_ex = make_error_handle_function(ca_destroy_multiple_objects)
|
[
"pycryptoki.exceptions.make_error_handle_function",
"pycryptoki.common_utils.AutoCArray",
"pycryptoki.ca_extensions.session.ca_get_session_info_ex",
"pycryptoki.cryptoki.CK_SLOT_ID",
"ctypes.byref",
"_ctypes.POINTER",
"pycryptoki.cryptoki.CK_ULONG",
"logging.getLogger"
] |
[((541, 568), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (558, 568), False, 'import logging\n'), ((1518, 1566), 'pycryptoki.exceptions.make_error_handle_function', 'make_error_handle_function', (['ca_get_object_handle'], {}), '(ca_get_object_handle)\n', (1544, 1566), False, 'from pycryptoki.exceptions import make_error_handle_function\n'), ((2063, 2118), 'pycryptoki.exceptions.make_error_handle_function', 'make_error_handle_function', (['ca_destroy_multiple_objects'], {}), '(ca_destroy_multiple_objects)\n', (2089, 2118), False, 'from pycryptoki.exceptions import make_error_handle_function\n'), ((1003, 1013), 'pycryptoki.cryptoki.CK_ULONG', 'CK_ULONG', ([], {}), '()\n', (1011, 1013), False, 'from pycryptoki.cryptoki import CK_ULONG, CK_SLOT_ID, CA_GetObjectHandle, CA_DestroyMultipleObjects\n'), ((1033, 1043), 'pycryptoki.cryptoki.CK_ULONG', 'CK_ULONG', ([], {}), '()\n', (1041, 1043), False, 'from pycryptoki.cryptoki import CK_ULONG, CK_SLOT_ID, CA_GetObjectHandle, CA_DestroyMultipleObjects\n'), ((1876, 1916), 'pycryptoki.common_utils.AutoCArray', 'AutoCArray', ([], {'data': 'objects', 'ctype': 'CK_ULONG'}), '(data=objects, ctype=CK_ULONG)\n', (1886, 1916), False, 'from pycryptoki.common_utils import AutoCArray\n'), ((1129, 1160), 'pycryptoki.ca_extensions.session.ca_get_session_info_ex', 'ca_get_session_info_ex', (['session'], {}), '(session)\n', (1151, 1160), False, 'from pycryptoki.ca_extensions.session import ca_get_session_info_ex\n'), ((1261, 1277), '_ctypes.POINTER', 'POINTER', (['c_ubyte'], {}), '(c_ubyte)\n', (1268, 1277), False, 'from _ctypes import POINTER\n'), ((1318, 1334), 'pycryptoki.cryptoki.CK_SLOT_ID', 'CK_SLOT_ID', (['slot'], {}), '(slot)\n', (1328, 1334), False, 'from pycryptoki.cryptoki import CK_ULONG, CK_SLOT_ID, CA_GetObjectHandle, CA_DestroyMultipleObjects\n'), ((1362, 1379), 'ctypes.byref', 'byref', (['objecttype'], {}), '(objecttype)\n', (1367, 1379), False, 'from ctypes import byref, cast, c_ubyte\n'), ((1381, 1400), 'ctypes.byref', 'byref', (['objecthandle'], {}), '(objecthandle)\n', (1386, 1400), False, 'from ctypes import byref, cast, c_ubyte\n'), ((2000, 2010), 'pycryptoki.cryptoki.CK_ULONG', 'CK_ULONG', ([], {}), '()\n', (2008, 2010), False, 'from pycryptoki.cryptoki import CK_ULONG, CK_SLOT_ID, CA_GetObjectHandle, CA_DestroyMultipleObjects\n')]
|
"""
Export pickled dictionary
"""
import pickle
from source.database.database_entry import Entry
def export():
dictionary = set()
with open('final_dictionary.txt', 'r') as source, open('dictionary', 'wb') as destination:
for line in source:
word, frequency = line.split()
frequency = int(frequency)
dictionary.update([Entry(word, frequency)])
pickle.dump(dictionary, destination)
if __name__ == '__main__':
export()
|
[
"pickle.dump",
"source.database.database_entry.Entry"
] |
[((408, 444), 'pickle.dump', 'pickle.dump', (['dictionary', 'destination'], {}), '(dictionary, destination)\n', (419, 444), False, 'import pickle\n'), ((374, 396), 'source.database.database_entry.Entry', 'Entry', (['word', 'frequency'], {}), '(word, frequency)\n', (379, 396), False, 'from source.database.database_entry import Entry\n')]
|
#!/usr/bin/env python2
import common_pl
if __name__ == '__main__':
common_pl.main()
|
[
"common_pl.main"
] |
[((74, 90), 'common_pl.main', 'common_pl.main', ([], {}), '()\n', (88, 90), False, 'import common_pl\n')]
|
#!usr/bin/python
# -*- coding:utf8 -*-
"""
@pytest.fixture注册成为一个fixture函数,来为测试用例
提供一个fixture对象
"""
import pytest
import make_warning
class TestWarns():
def test_make_warn(self):
with pytest.warns(DeprecationWarning):
make_warning.make_warn()
def test_not_warn(self):
with pytest.warns(SyntaxWarning):
make_warning.not_warn()
def test_user_warn(self):
with pytest.warns(UserWarning):
make_warning.make_warn()
|
[
"pytest.warns",
"make_warning.not_warn",
"make_warning.make_warn"
] |
[((197, 229), 'pytest.warns', 'pytest.warns', (['DeprecationWarning'], {}), '(DeprecationWarning)\n', (209, 229), False, 'import pytest\n'), ((243, 267), 'make_warning.make_warn', 'make_warning.make_warn', ([], {}), '()\n', (265, 267), False, 'import make_warning\n'), ((311, 338), 'pytest.warns', 'pytest.warns', (['SyntaxWarning'], {}), '(SyntaxWarning)\n', (323, 338), False, 'import pytest\n'), ((352, 375), 'make_warning.not_warn', 'make_warning.not_warn', ([], {}), '()\n', (373, 375), False, 'import make_warning\n'), ((420, 445), 'pytest.warns', 'pytest.warns', (['UserWarning'], {}), '(UserWarning)\n', (432, 445), False, 'import pytest\n'), ((459, 483), 'make_warning.make_warn', 'make_warning.make_warn', ([], {}), '()\n', (481, 483), False, 'import make_warning\n')]
|