text stringlengths 957 885k |
|---|
import datetime
from flask import render_template, flash, redirect
from flask_login import login_required, login_user, logout_user, current_user
from app import app, db, lm, bcrypt
from .forms import SpeakerForm, LoginForm, ChangePwdForm
from .models import Submission, User, Vote
from config import appConfiguration, logger
from sqlalchemy import func
@app.route('/admin')
@login_required
def admin():
return render_template("adminindex.html",
settings = appConfiguration)
@app.route('/admin/submissions')
@login_required
def adminsubmissions():
queryResult = Submission.query.all()
return render_template("adminsubmissions.html",
settings = appConfiguration,
submissions = queryResult)
@app.route('/admin/config')
@login_required
def adminconfig():
return render_template("adminconfig.html",
settings = appConfiguration)
@app.route('/admin/changepwd', methods=["GET", "POST"])
@login_required
def changepwd():
form = ChangePwdForm()
if form.validate_on_submit():
user = current_user
if user:
if bcrypt.check_password_hash(user.password, form.currentpassword.data):
if form.newpassword1.data == form.newpassword2.data:
user.password = <PASSWORD>.generate_password_hash(form.newpassword1.data)
db.session.add(user)
db.session.commit()
return render_template('adminindex.html', settings = appConfiguration)
else:
statusmessage = 'Passwords do not match'
else:
statusmessage = 'Invalid password'
else:
statusmessage = 'No user'
flash(statusmessage)
return render_template('changepwd.html',
settings = appConfiguration,
form=form)
@app.route("/login", methods=["GET", "POST"])
def login():
form = LoginForm()
if form.validate_on_submit():
user = User.query.get(form.email.data)
if user:
if bcrypt.check_password_hash(user.password, form.password.data):
user.authenticated = True
db.session.add(user)
db.session.commit()
login_user(user, remember=True)
return redirect("admin")
return render_template("login.html", form=form)
@app.route("/logout", methods=["GET"])
@login_required
def logout():
user = current_user
user.authenticated = False
db.session.add(user)
db.session.commit()
logout_user()
return redirect("/")
@lm.user_loader
def user_loader(user_id):
return User.query.get(user_id)
@app.route('/admin/speakers', methods=['GET'])
@login_required
def get_speakers():
items = db.session.query(Submission.email, func.count(Submission.email)).group_by(Submission.email).order_by(Submission.email).all()
totalSubmissions = 0
totalSpeakers = 0
for item in items:
totalSpeakers += 1
totalSubmissions += item[1]
logger.info('Found {} speakers with {} submissions.'.format(totalSpeakers, totalSubmissions))
return render_template("adminspeakers.html",
items = items,
totalSpeakers = totalSpeakers,
totalSubmissions=totalSubmissions)
@app.route('/admin/votetotals', methods=['GET'])
@login_required
def get_votetotals():
items = db.session.query(Submission.title,
Submission.firstName,
Submission.lastName,
Submission.time,
func.count(Vote.id).label("voteCount"),
func.avg(Vote.fitsTechfest).label("avgFitsTechfest"),
func.avg(Vote.fitsTrack).label("avgIWouldGo"),
func.avg(Vote.expectedAttendance).label("avgExpectedAttendance")).\
join(Vote).\
group_by(Submission.title).\
group_by(Submission.firstName).\
group_by(Submission.lastName).\
group_by(Submission.time).\
order_by(Submission.title).\
all()
return render_template("adminvotetotals.html",
items = items) |
# Classes are defined with "class" keyword.
# "self" keyword denotes an object that was just created.
# "width" and "height" are going to be properties of the class "Rectangle".
class Rectangle:
def __init__(self, width, height):
self.width = width
self.height = height
# Object can be initialized with its properties.
r1 = Rectangle(10, 20)
# Properties of an object can be called upon.
print(r1.width)
# Properties of an object can be changed, as well.
r1.width = 100
print(r1.width)
# Method can be implemented in a class.
class Rectangle:
def __init__(self, width, height):
self.width = width
self.height = height
def area(self):
return self.width * self.height
def perimeter(self):
return 2 * (self.width + self.width)
# Methods of a class are easily called upon.
r1 = Rectangle(10, 20)
print(r1.area())
print(r1.perimeter())
# An object can be printed. "__main__" is the current module.
print(str(r1))
# Printed value is just Python's definition of that object. Printing of an object along with its properties can be done with new module ("to_string").
class Rectangle:
def __init__(self, width, height):
self.width = width
self.height = height
def area(self):
return self.width * self.height
def perimeter(self):
return 2 * (self.width + self.width)
def to_string(self):
return "Rectangle with width = {0} and height = {1}.".format(self.width, self.height)
r1 = Rectangle(10, 20)
print(r1.to_string())
# Behaviour of an already defined method can be changed through the use of "magic methods".
# "Magic methods" are special methods that are redefined to add "magic" to your classes.
# They're always surrounded by double underscores (e.g. __init__ or __lt__).
# The behaviour of "str" method is now going to be altered to print the "Rectangle" object alongside its properties.
class Rectangle:
def __init__(self, width, height):
self.width = width
self.height = height
def area(self):
return self.width * self.height
def perimeter(self):
return 2 * (self.width + self.width)
def __str__(self):
return "Rectangle with width = {0} and height = {1}.".format(self.width, self.height)
r1 = Rectangle(10, 20)
print(str(r1))
# Still, printing an object.
print(repr(r1))
# This can be changed by altering the "repr" method.
class Rectangle:
def __init__(self, width, height):
self.width = width
self.height = height
def area(self):
return self.width * self.height
def perimeter(self):
return 2 * (self.width + self.width)
def __str__(self):
return "Rectangle with width = {0} and height = {1}.".format(self.width, self.height)
def __repr__(self):
return "Rectangle({0}, {1}).".format(self.width, self.height)
r1 = Rectangle(10, 20)
print(repr(r1))
# There is no limit on the number of different objects a class can initialize.
r2 = Rectangle(10, 20)
# Even though "r1" and "r2" have the same properties, they are not the same objects.
print(r1 is not r2)
# This has returned "True" since the memory locations of respective objects are different.
# However, checking for equality will return "False", as well.
print(r1 == r2)
# This can be changed by altering the "eq" method.
class Rectangle:
def __init__(self, width, height):
self.width = width
self.height = height
def area(self):
return self.width * self.height
def perimeter(self):
return 2 * (self.width + self.width)
def __str__(self):
return "Rectangle with width = {0} and height = {1}.".format(self.width, self.height)
def __repr__(self):
return "Rectangle({0}, {1}).".format(self.width, self.height)
def __eq__(self, other):
# "other" needs to be an instance of "Rectangle", same as "self".
if isinstance(other, Rectangle):
return self.width == other.width and self.height == other.height
# Comparing something that is not "Rectangle" with "Rectangle" must return "False".
else:
return False
r1 = Rectangle(10, 20)
r2 = Rectangle(10, 20)
print(r1 == r2)
print(r1 == 100)
# Changing the "lt" method allows for "less than" comparison between "Rectangle" objects.
class Rectangle:
def __init__(self, width, height):
self.width = width
self.height = height
def area(self):
return self.width * self.height
def perimeter(self):
return 2 * (self.width + self.width)
def __str__(self):
return "Rectangle with width = {0} and height = {1}.".format(self.width, self.height)
def __repr__(self):
return "Rectangle({0}, {1}).".format(self.width, self.height)
def __eq__(self, other):
if isinstance(other, Rectangle):
return self.width == other.width and self.height == other.height
else:
return False
def __lt__(self, other):
if isinstance(other, Rectangle):
return self.area() < other.area()
else:
# Return an error without raising an exception.
return NotImplemented
r1 = Rectangle(10, 20)
r2 = Rectangle(100, 200)
print(r1 < r2)
# This works as well. Even though "gt" method was not implemented, Python tries to flip the values inside comparison and executes "r1 < r2" with "lt".
print(r2 > r1)
# However, calling r1 <= r2 will not work, since methods that perform "<=" or ">=" operations have not been changed.
# It is a good practice to define "getters" and "setters" that will not allow to change properties of "Rectangle" to negative values, for example.
class Rectangle:
# Putting an "_" in front of a variable means that this variable should be treated as private.
# "_width" and "_height" are pseudo-private properties.
def __init__(self, width, height):
self._width = width
self._height = height
def get_width(self):
return self._width
def set_width(self, width):
if width <= 0:
raise ValueError("Width must be positive.")
else:
self._width = width
def get_height(self):
return self._height
def set_height(self, height):
if height <= 0:
raise ValueError("Height must be positive.")
else:
self._height = height
def __str__(self):
return "Rectangle with width = {0} and height = {1}.".format(self._width, self._height)
def __repr__(self):
return "Rectangle({0}, {1}).".format(self._width, self._height)
def __eq__(self, other):
if isinstance(other, Rectangle):
return self._width == other._width and self._height == other._height
else:
return False
r1 = Rectangle(10, 20)
# Typing "r1.width" will raise an error, since there is no property called "width". "r1._width" will work, but it should not be used.
# Instead, "getwidth" method should be called upon on "Rectangle" object.
print(r1.get_width())
print(r1._width)
# Typing "r1.width = -100" will still work, however. This is called monkey patching and it will be covered later in the course.
# "set_width" can be used to set the value of property. Using "set_width" method to set width value to -10 will throw an error.
r1.set_width(100)
print(r1)
# "Pythonic" way of writing "getters" and "setters" is done like this.
class Rectangle:
def __init__(self, width, height):
self.width = width
self.height = height
# By not putting "_" in front of "width" and "height", the class will raise an error when trying to instance "r1 = Rectangle(-100, 20)", as well.
# This is due to the fact the "setter" function will be called upon instancing. Writing code in this way enables backward compatibility.
# Function that comes after "@property" is a property.
@property
def width(self):
return self._width
# Different property is in front of the function, so it is possible for them to have the same name.
@width.setter
def width(self, width):
if width <= 0:
raise ValueError("Width must be positive.")
else:
self._width = width
@property
def height(self):
return self._height
@height.setter
def height(self, height):
if height <= 0:
raise ValueError("Height must be positive.")
else:
self._height = height
def __str__(self):
return "Rectangle with width = {0} and height = {1}.".format(self.width, self.height)
def __repr__(self):
return "Rectangle({0}, {1}).".format(self.width, self.height)
def __eq__(self, other):
if isinstance(other, Rectangle):
return self.width == other.width and self.height == other.height
else:
return False
r1 = Rectangle(10, 20)
# Getting the "width" value is possible with its properties defined in the last "Rectangle" class.
print(r1.width)
# Assigning a value will call the function with "@height.setter" property.
r1.width = 100
print(r1)
|
import numpy as np
from sklearn.model_selection import train_test_split
from torch.utils.data import Dataset
from sklearn.model_selection import PredefinedSplit
UNLABELLED_CLASS = -1
def merge_train_dev(train, dev):
"""
Merge the train and dev `skorch.Dataset` and return the associated
`sklearn.model_selection.PredefinedSplit`.
"""
train_valid_X = np.concatenate((train.X, dev.X))
train_valid_y = np.concatenate((train.y, dev.y))
cv = PredefinedSplit([-1 for _ in range(len(train))
] + [0 for _ in range(len(dev))])
return train_valid_X, train_valid_y, cv
def make_ssl_dataset_(supervised, n_labels,
unlabeled_class=UNLABELLED_CLASS, seed=123, is_stratify=True):
"""Take a supervised dataset and turn it into an unsupervised one(inplace),
by giving a special unlabeled class as target."""
stratify = supervised.targets if is_stratify else None
idcs_unlabel, indcs_labels = train_test_split(list(range(len(supervised))),
stratify=stratify,
test_size=n_labels,
random_state=seed)
supervised.n_labels = len(indcs_labels)
# cannot set _DatasetSubset through indexing
targets = supervised.targets
targets[idcs_unlabel] = unlabeled_class
supervised.targets = targets
class _DatasetSubset(Dataset):
"""Helper to split train dataset into train and dev dataset.
Parameters
----------
to_split: Dataset
Dataset to subset.
idx_mapping: array-like
Indices of the subset.
Notes
-----
- Modified from: https: // gist.github.com / Fuchai / 12f2321e6c8fa53058f5eb23aeddb6ab
- Does modify the length and targets with indexing anymore! I.e.
`d.targets[1]=-1` doesn't work because np.array doesn't allow `arr[i][j]=-1`
but you can do `d.targets=targets`
"""
def __init__(self, to_split, idx_mapping):
self.idx_mapping = idx_mapping
self.length = len(idx_mapping)
self.to_split = to_split
def __getitem__(self, index):
return self.to_split[self.idx_mapping[index]]
def __len__(self):
return self.length
@property
def targets(self):
return self.to_split.targets[self.idx_mapping]
@targets.setter
def targets(self, values):
self.to_split.targets[self.idx_mapping] = values
@property
def data(self):
return self.to_split.data[self.idx_mapping]
def __getattr__(self, attr):
return getattr(self.to_split, attr)
def train_dev_split(to_split, dev_size=0.1, seed=123, is_stratify=True):
"""Split a training dataset into a training and validation one.
Parameters
----------
dev_size: float or int
If float, should be between 0.0 and 1.0 and represent the proportion of
the dataset to include in the dev split. If int, represents the absolute
number of dev samples.
seed: int
Random seed.
is_stratify: bool
Whether to stratify splits based on class label.
"""
n_all = len(to_split)
idcs_all = list(range(n_all))
stratify = to_split.targets if is_stratify else None
idcs_train, indcs_val = train_test_split(idcs_all,
stratify=stratify,
test_size=dev_size,
random_state=seed)
train = _DatasetSubset(to_split, idcs_train)
valid = _DatasetSubset(to_split, indcs_val)
return train, valid
|
#!/usr/bin/env python
# vim:ts=4:sts=4:sw=4:et
# args: harisekhon
#
# Author: <NAME>
# Date: 2016-05-27 13:15:30 +0100 (Fri, 27 May 2016)
#
# https://github.com/harisekhon/devops-python-tools
#
# License: see accompanying Hari Sekhon LICENSE file
#
# If you're using my code you're welcome to connect with me on LinkedIn
# and optionally send me feedback to help improve this or other code I publish
#
# https://www.linkedin.com/in/harisekhon
#
"""
Tool to search DockerHub repos and return a configurable number of results
Mimics 'docker search' results format but more flexible
Older Docker CLI didn't support configuring the returned number of search results and always returned 25:
https://github.com/docker/docker/issues/23055
Verbose mode will also show a summary for number of results displayed and total number of results available
Caveat: maxes out at 100 results, to iterate for more than that see dockerhub_search.sh
See also:
dockerhub_search.sh
in the DevOps Bash tools repo - https://github.com/harisekhon/devops-python-tools
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
#from __future__ import unicode_literals
import json
import logging
import os
import sys
import traceback
import urllib
try:
import requests
except ImportError:
print(traceback.format_exc(), end='')
sys.exit(4)
srcdir = os.path.abspath(os.path.dirname(__file__))
libdir = os.path.join(srcdir, 'pylib')
sys.path.append(libdir)
try:
# pylint: disable=wrong-import-position
from harisekhon.utils import log, die, prog, isJson, jsonpp, isInt, validate_int, support_msg_api
from harisekhon import CLI
except ImportError as _:
print(traceback.format_exc(), end='')
sys.exit(4)
__author__ = '<NAME>'
__version__ = '0.6.2'
class DockerHubSearch(CLI):
def __init__(self):
# Python 2.x
super(DockerHubSearch, self).__init__()
# Python 3.x
# super().__init__()
self._CLI__parser.usage = '{0} [options] TERM'.format(prog)
self.timeout_default = 30
self.quiet = False
def add_options(self):
self.add_opt('-l', '--limit', default=50, type=int,
help='Number of results to return (default: 50)')
self.add_opt('-q', '--quiet', action='store_true',
help='Output only the image names, one per line (useful for shell scripting)')
def run(self):
if not self.args:
self.usage('no search term given as args')
if len(self.args) > 1:
self.usage('only single search term argument may be given')
self.quiet = self.get_opt('quiet')
term = self.args[0]
log.info('term: %s', term)
limit = self.get_opt('limit')
validate_int(limit, 'limit', 1, 1000)
self.print_results(self.args[0], limit)
def print_results(self, term, limit=None):
data = self.search(term, limit)
results = {}
longest_name = 8
try:
# collect in dict to order by stars like normal docker search command
for item in data['results']:
star = item['star_count']
name = item['name']
if len(name) > longest_name:
longest_name = len(name)
if not isInt(star):
die("star count '{0}' for repo '{1}' is not an integer! {2}".format(star, name, support_msg_api()))
results[star] = results.get(star, {})
results[star][name] = results[star].get(name, {})
result = {}
result['description'] = item['description']
result['official'] = '[OK]' if item['is_official'] else ''
# docker search doesn't output this so neither will I
#result['trusted'] = result['is_trusted']
result['automated'] = '[OK]' if item['is_automated'] else ''
results[star][name] = result
# mimicking out spacing from 'docker search' command
if not self.quiet:
print('{0:{5}s} {1:45s} {2:7s} {3:8s} {4:10s}'.
format('NAME', 'DESCRIPTION', 'STARS', 'OFFICIAL', 'AUTOMATED', longest_name))
except KeyError as _:
die('failed to parse results fields from data returned by DockerHub ' +
'(format may have changed?): {0}'.format(_))
except IOError as _:
if str(_) == '[Errno 32] Broken pipe':
pass
else:
raise
def truncate(mystr, length):
if len(mystr) > length:
mystr = mystr[0:length-3] + '...'
return mystr
for star in reversed(sorted(results)):
for name in sorted(results[star]):
if self.quiet:
print(name.encode('utf-8'))
else:
desc = truncate(results[star][name]['description'], 45)
print('{0:{5}s} {1:45s} {2:<7d} {3:8s} {4:10s}'.
format(name.encode('utf-8'), desc.encode('utf-8'), star,
results[star][name]['official'],
results[star][name]['automated'],
longest_name))
if self.verbose and not self.quiet:
try:
print('\nResults Shown: {0}\nTotal Results: {1}'.format(len(data['results']), data['num_results']))
except KeyError as _:
die('failed to parse get total results count from data returned by DockerHub ' +
'(format may have changed?): {0}'.format(_))
@staticmethod
def search(term, limit=25):
url = 'https://index.docker.io/v1/search?q={0}&n={1}'.format(urllib.quote_plus(term), limit)
log.debug('GET %s' % url)
try:
verify = True
# workaround for Travis CI and older pythons - we're not exchanging secret data so this is ok
#if os.getenv('TRAVIS'):
# verify = False
req = requests.get(url, verify=verify)
except requests.exceptions.RequestException as _:
die(_)
log.debug("response: %s %s", req.status_code, req.reason)
log.debug("content:\n%s\n%s\n%s", '='*80, req.content.strip(), '='*80)
if req.status_code != 200:
die("%s %s" % (req.status_code, req.reason))
if not isJson(req.content):
die('invalid non-JSON response from DockerHub!')
if log.isEnabledFor(logging.DEBUG):
print(jsonpp(req.content), file=sys.stderr)
print('='*80, file=sys.stderr)
try:
data = json.loads(req.content)
except KeyError as _:
die('failed to parse output from DockerHub (format may have changed?): {0}'.format(_))
return data
if __name__ == '__main__':
DockerHubSearch().main()
|
<gh_stars>0
#!/usr/bin/env python
#===========================================================================
#
# Plot Ray details for KDP analysis
#
#===========================================================================
import os
import sys
import subprocess
from optparse import OptionParser
import numpy as np
import matplotlib.pyplot as plt
import math
def main():
global options
global dirPath
global fileIndex
global fileList
global colHeaders
global colData
global rayTime
global elevation
global azimuth
global dbzBias
global accumCorrelation
global thisScriptName
thisScriptName = os.path.basename(__file__)
# parse the command line
usage = "usage: %prog [options]"
parser = OptionParser(usage)
parser.add_option('--debug',
dest='debug', default=False,
action="store_true",
help='Set debugging on')
parser.add_option('--verbose',
dest='verbose', default=False,
action="store_true",
help='Set verbose debugging on')
parser.add_option('--file',
dest='initialFilePath',
default='/tmp/selfcon_run_files/cal/selfcon_run_20150716-040521.081_srun-361_erun-561_el-000.5_az-027.8_.txt',
help='Initial file path')
parser.add_option('--title',
dest='title',
default='SELF-CONSISTENCY RUN PLOT',
help='Title for plot')
parser.add_option('--width',
dest='figWidthMm',
default=600,
help='Width of figure in mm')
parser.add_option('--height',
dest='figHeightMm',
default=250,
help='Height of figure in mm')
(options, args) = parser.parse_args()
if (options.verbose == True):
options.debug = True
if (options.debug == True):
print >>sys.stderr, "Running ", thisScriptName
print >>sys.stderr, " initialFilePath: ", options.initialFilePath
# read in file list
readFileList()
# read in data for self_con results
if (readFileData() != 0):
sys.exit(-1)
# create the plots
# from there it is interactive for the user
createPlots()
# done
sys.exit(0)
########################################################################
# Read in list of available files in the directory
def readFileList():
global dirPath
global fileIndex
global fileList
dirPath = os.path.dirname(options.initialFilePath)
fileList = os.listdir(dirPath)
fileList.sort()
fileIndex = 0
for index, file in enumerate(fileList):
if (options.initialFilePath.find(file) > 0):
fileIndex = index
break
if (options.debug == True):
print >>sys.stderr, "====>> File list"
print >>sys.stderr, " Dir path: ", dirPath
print >>sys.stderr, " Initial file path: ", options.initialFilePath
print >>sys.stderr, " File index : ", fileIndex
print >>sys.stderr, " n files : ", len(fileList)
print >>sys.stderr, " Computed File path: ", getFilePath()
if (options.verbose == True):
print >>sys.stderr, " Files: "
for index, file in enumerate(fileList):
print >>sys.stderr, " ", index, ": ", file
########################################################################
# Get the path to the current data file
def getFilePath():
filePath = os.path.join(dirPath, fileList[fileIndex])
return filePath
########################################################################
# Get the name of the current data file
def getFileName():
return fileList[fileIndex]
########################################################################
# Read column-based header and data
def readFileData():
global colHeaders
global colData
global rayTime
global elevation
global azimuth
global dbzBias
global accumCorrelation
colHeaders = []
colData = {}
fp = open(getFilePath(), 'r')
lines = fp.readlines()
fp.close()
if (len(lines) < 2):
print >>sys.stderr, "ERROR - no data, file: ", getFilePath()
return -1
commentIndex = lines[0].find("#")
if (commentIndex == 0):
# header
colHeaders = lines[0].lstrip("# ").rstrip("\n").split()
if (options.debug == True):
print >>sys.stderr, "colHeaders: ", colHeaders
else:
print >>sys.stderr, "ERROR - readFileData"
print >>sys.stderr, " First line does not start with #"
return -1
# create data variables (dictionary)
for index, var in enumerate(colHeaders, start=0):
colData[var] = []
# decode a line at a time, set colData
for line in lines:
commentIndex = line.find("#")
if (commentIndex >= 0):
toks = line.strip().split()
if (len(toks) >= 4 and toks[1] == 'time:'):
rayTime = toks[2] + '-' + toks[3]
elif (len(toks) >= 3 and toks[1] == 'elev:'):
elevation = toks[2]
elif (len(toks) >= 3 and toks[1] == 'az:'):
azimuth = toks[2]
elif (len(toks) >= 3 and toks[1] == 'dbzBias:'):
dbzBias = toks[2]
elif (len(toks) >= 3 and toks[1] == 'accumCorrelation:'):
accumCorrelation = toks[2]
continue
data = line.strip().split()
for index, var in enumerate(colHeaders, start=0):
if (var == 'gateNum'):
colData[var].append(int(data[index]))
else:
colData[var].append(float(data[index]))
return 0
########################################################################
# Key-press event
def press(event):
global fileIndex
if (options.debug == True):
print >>sys.stderr, "press: ", event.key
if (event.key == 'left'):
if (fileIndex > 0):
fileIndex = fileIndex - 1
reloadAndDraw()
if (event.key == 'right'):
if (fileIndex < len(fileList) - 1):
fileIndex = fileIndex + 1
reloadAndDraw()
if (options.debug == True):
print >>sys.stderr, " File index : ", fileIndex
print >>sys.stderr, " File path : ", getFilePath()
########################################################################
# Create the plots - original instance
def createPlots():
global fig1
global ax1
global ax2
global ax3
global ax4
global ax5
widthIn = float(options.figWidthMm) / 25.4
htIn = float(options.figHeightMm) / 25.4
fig1 = plt.figure(1, (widthIn, htIn))
fig1.canvas.mpl_connect('key_press_event', press)
ax1 = fig1.add_subplot(1,5,1,xmargin=0.0)
ax2 = fig1.add_subplot(1,5,2,xmargin=0.0)
ax3 = fig1.add_subplot(1,5,3,xmargin=0.0)
ax4 = fig1.add_subplot(1,5,4,xmargin=0.0)
ax5 = fig1.add_subplot(1,5,5,xmargin=0.0)
doPlot()
fig1.suptitle("SELF-CONSISTENCY RUN ANALYSIS - file " + getFileName())
plt.tight_layout()
plt.subplots_adjust(top=0.9)
plt.show()
########################################################################
# Reload and redraw - after getting new file
def reloadAndDraw():
# read in data for self_con results
if (readFileData() != 0):
sys.exit(-1)
# plot XY
doPlot()
plt.draw()
########################################################################
# Plot data on axes
def doPlot():
ax1.clear()
ax2.clear()
ax3.clear()
ax4.clear()
ax5.clear()
fileName = fileList[fileIndex]
nameParts = fileName.split("_")
timeStr = "Time " + rayTime
runStartStr = nameParts[3]
runEndStr = nameParts[4]
azStr = nameParts[5]
elStr = nameParts[6]
gateNum = colData['gateNum']
# plot 1 - SNR and DBZ
ax1.set_title(timeStr, fontsize=12)
ax1.plot(gateNum, colData['snr'], label='snr', color='red', linewidth=1)
ax1.plot(gateNum, colData['dbzCorr'], label='dbzCorr', color='blue', linewidth=1)
ax1.plot(gateNum, colData['dbzObs'], label='dbzObs', color='cyan', linewidth=1)
legend1 = ax1.legend(loc='lower center', ncol=1, columnspacing=0, labelspacing=0)
for label in legend1.get_texts():
label.set_fontsize('small')
ax1.set_xlabel("gateNum")
ax1.set_ylabel("SNR, DBZ")
# plot 2 - PHIDP
phidpEst = np.array(colData['phidpEst']).astype(np.double)
phidpFilt = np.array(colData['phidpFilt']).astype(np.double)
phidpFiltMin = np.min(phidpFilt)
phidpFiltExcess = phidpFilt - phidpFiltMin
phidpDiff = (phidpEst - phidpFilt) + phidpFiltMin
phidpDiffNorm = (phidpEst - phidpFilt) / (phidpFilt - phidpFiltMin)
phidpDiffNorm[phidpDiffNorm < -2.0] = float('NaN')
phidpDiffNorm[phidpDiffNorm > 2.0] = float('NaN')
ax2.set_title('dbzBias: ' + dbzBias + ' corr: ' + accumCorrelation, fontsize=12)
#ax2.plot(gateNum, colData['phidpObs'], label='phidpObs', color='cyan', linewidth=1)
ax2.plot(gateNum, colData['phidpEst'], label='phidpEst', color='red', linewidth=2)
ax2.plot(gateNum, colData['phidpFilt'], label='phidpFilt', color='green', linewidth=2)
ax2.plot(gateNum, colData['phidpCondFilt'], label='phidpCondFilt', color='orange', linewidth=1)
ax2.plot(gateNum, phidpDiff, label='phidpDiff', color='blue', linewidth=2)
ax2.plot([gateNum[0], gateNum[-1]], [phidpFiltMin, phidpFiltMin],
label='phidpFiltMin', color='lightblue', linewidth=1)
minPhidp = min(colData['phidpFilt'])
maxPhidp = max(colData['phidpFilt'])
rangePhidp = maxPhidp - minPhidp
plotMin2 = minPhidp - rangePhidp * 0.3
ax2.set_ylim(bottom = plotMin2)
legend2 = ax2.legend(loc='lower right', ncol=2, columnspacing=0, labelspacing=0)
for label in legend2.get_texts():
label.set_fontsize('small')
ax2.set_xlabel("gateNum")
ax2.set_ylabel("PHIDP (deg)")
# plot 3 - KDP, PSOB
ax3.set_title('Elevation = ' + elevation, fontsize=12)
ax3.plot(gateNum, colData['kdp'], label='kdp', color='red', linewidth=2)
ax3.plot(gateNum, colData['psob'], label='psob', color='orange', linewidth=1)
ax3.plot(gateNum, colData['rhohv'], label='rhohv', color='black', linewidth=1)
ax3.plot(gateNum, colData['zdrObs'], label='ZdrObs', color='cyan', linewidth=1)
ax3.plot(gateNum, colData['zdrCorr'], label='ZdrCorr', color='blue', linewidth=1)
ax3.plot(gateNum, colData['zdrTerm'], label='ZdrTerm', color='green', linewidth=2)
# ax3.plot(gateNum, phidpDiffNorm, label='diffNorm', color='magenta', linewidth=2)
legend3 = ax3.legend(loc='lower center', ncol=2, columnspacing=0, labelspacing=0)
for label in legend3.get_texts():
label.set_fontsize('small')
ax3.set_xlabel("gateNum")
ax3.set_ylabel("KDP, PSOB")
ax3.set_ylim(bottom = -1)
# plot 4 - PID
ax4.set_title('Azimuth = ' + azimuth, fontsize=12)
ax4.plot(gateNum, colData['pid'], label='PID', color='red', linewidth=2)
legend4 = ax4.legend(loc='lower center', ncol=2, columnspacing=0, labelspacing=0)
for label in legend4.get_texts():
label.set_fontsize('small')
ax4.set_xlabel("gateNum")
ax4.set_ylabel("PID")
ax4.set_ylim([0, 7])
# plot 5 - Z vs ZDR
ax5.set_title('Z-ZDR')
ax5.plot(colData['dbzCorr'], colData['zdrCorr'], 'o', label='Z-ZDR', color='red')
legend5 = ax5.legend(loc='upper left', ncol=2, columnspacing=0, labelspacing=0)
for label in legend5.get_texts():
label.set_fontsize('small')
ax5.set_xlabel("DBZ corrected")
ax5.set_ylabel("ZDR corrected")
ax5.set_xlim([10, 55])
ax5.set_ylim([-0.5, 3.5])
# title
fig1.suptitle("SELF-CONSISTENCY RUN ANALYSIS - file " + getFileName())
return
########################################################################
# Run a command in a shell, wait for it to complete
def runCommand(cmd):
if (options.debug == True):
print >>sys.stderr, "running cmd:",cmd
try:
retcode = subprocess.call(cmd, shell=True)
if retcode < 0:
print >>sys.stderr, "Child was terminated by signal: ", -retcode
else:
if (options.debug == True):
print >>sys.stderr, "Child returned code: ", retcode
except OSError, e:
print >>sys.stderr, "Execution failed:", e
########################################################################
# Run - entry point
if __name__ == "__main__":
main()
|
from device import *
from usb2iic import *
from logger import *
from bme680_defs import *
import time
from ctypes import *
import platform
import globalvar
from save_data import *
globalvar._init()
BME680_lock = Lock()
#根据系统自动导入对应的库文件,若没能识别到正确的系统,可以修改下面的源码
if(platform.system()=="Windows"):
if "64bit" in platform.architecture():
BME680 = windll.LoadLibrary( "./bme680/libs/win_x64/BME680.dll" )
else:
BME680 = windll.LoadLibrary( "./bme680/libs/win_x86/BME680.dll" )
elif(platform.system()=="Darwin"):
BME680 = cdll.LoadLibrary( "./bme680/libs/Macos/libBME680.dylib" )
elif(platform.system()=="Linux"):
if 'arm' in platform.machine():
print("unsupported system")
else:
if "64bit" in platform.architecture():
BME680 = cdll.LoadLibrary( "./bme680/libs/Linux_x64/libBME680.so" )
else:
BME680 = cdll.LoadLibrary( "./bme680/libs/Linux_x86/libBME680.so" )
else:
print("unsupported system")
exit()
IICIndex_Sensor = 0
IICAddr_Sensor = 0x77
def Sensor_Init():
logger.info ('Sensor init')
logger.info ('Initialize i2c')
IICConfig = IIC_CONFIG()
IICConfig.ClockSpeed = 400000
IICConfig.Master = 1
IICConfig.AddrBits = 7
IICConfig.EnablePu = 1
ret = IIC_Init(DevHandles[DevIndex],IICIndex_Sensor,byref(IICConfig));
if ret != IIC_SUCCESS:
logger.critical("Initialize iic faild!")
exit()
else:
logger.info("Initialize iic sunccess!")
SlaveAddr = (c_ushort * 128)()
SlaveAddrNum = IIC_GetSlaveAddr(DevHandles[DevIndex],IICIndex_Sensor,byref(SlaveAddr))
if SlaveAddrNum <= 0:
logger.critical("Get iic address faild!")
exit()
else:
logger.info("Get iic address sunccess!")
SensorConnected = False
for i in range(0,SlaveAddrNum):
if (SlaveAddr[i] == IICAddr_Sensor):
SensorConnected = True
logger.info('Sensor 0x%02X Connected', SlaveAddr[i])
if SensorConnected == False:
logger.critical("Sensor not connect!")
exit()
def analyze_sensor_data(data, n_meas):
MIN_TEMPERATURE = 0 #/* 0 degree Celsius */
MAX_TEMPERATURE = 6000 #/* 60 degree Celsius */
MIN_PRESSURE = 90000 #/* 900 hecto Pascals */
MAX_PRESSURE = 110000 #/* 1100 hecto Pascals */
MIN_HUMIDITY = 20000 #/* 20% relative humidity */
MAX_HUMIDITY = 80000 #/* 80% relative humidity*/
rslt = BME680_OK
self_test_failed = 0
i = 0
cent_res = 0
if ((data[0].temperature < MIN_TEMPERATURE) or (data[0].temperature > MAX_TEMPERATURE)):
self_test_failed += 1
if ((data[0].pressure < MIN_PRESSURE) or (data[0].pressure > MAX_PRESSURE)):
self_test_failed += 1
if ((data[0].humidity < MIN_HUMIDITY) or (data[0].humidity > MAX_HUMIDITY)):
self_test_failed += 1
for i in range(0, n_meas):
if ((data[i].status & BME680_GASM_VALID_MSK) == 0):
self_test_failed += 1
if n_meas >= 6:
cent_res = (data[3].gas_resistance + data[5].gas_resistance) / (2 * data[4].gas_resistance)
if ((cent_res * 5) < 6):
self_test_failed += 1
if self_test_failed > 0:
rslt = -6
logger.error("bme680 data analyze invalid!!!")
return rslt
def Sensor_Read(dev_id, reg_addr, pdata, len):
buf = (c_uint8 * 1)()
buf[0] = reg_addr
databuf = (c_uint8 * len)()
dev_lock.acquire()
ret = IIC_WriteReadBytes(DevHandles[DevIndex],IICIndex_Sensor, dev_id, buf, 1, byref(databuf), len, 1000)
dev_lock.release()
memmove(c_char_p(pdata), byref(databuf), len)
return ret
def Sensor_Write(dev_id, reg_addr, pdata, len):
databuf = (c_uint8 * len)()
memmove(byref(databuf), c_char_p(pdata), len)
buf = (c_uint8 * (len + 1))()
buf[0] = reg_addr
for i in range(0, len):
buf[i+1] = databuf[i]
dev_lock.acquire()
ret = IIC_WriteBytes(DevHandles[DevIndex],IICIndex_Sensor, dev_id, buf, (len+1), 1000)
dev_lock.release()
return ret
def Sensor_Delay_ms(period):
period /= 1000
time.sleep(period)
return
def bme680_thread():
dev = bme680_dev()
dev.dev_id = IICAddr_Sensor
dev.amb_temp = 25
dev.read = bme680_com_fptr_t(Sensor_Read)
dev.write = bme680_com_fptr_t(Sensor_Write)
dev.intf = BME680_I2C_INTF
dev.delay_ms = bme680_delay_fptr_t(Sensor_Delay_ms)
rslt = BME680.bme680_init(byref(dev))
rslt &= 0xFF
if rslt != BME680_OK:
logger.critical("BME680 init faild!\nError Code: 0x%X", rslt)
exit()
logger.info("BME680 init succeed :)")
logger.info("BME680 config")
#/* Set the temperature, pressure and humidity & filter settings */
dev.tph_sett.os_hum = BME680_OS_2X
dev.tph_sett.os_pres = BME680_OS_4X
dev.tph_sett.os_temp = BME680_OS_8X
dev.tph_sett.filter = BME680_FILTER_SIZE_3
#/* Select the power mode */
#/* Must be set before writing the sensor configuration */
dev.power_mode = BME680_FORCED_MODE
#/* Set the remaining gas sensor settings and link the heating profile */
dev.gas_sett.run_gas = BME680_ENABLE_GAS_MEAS
dev.gas_sett.heatr_temp = 320
dev.gas_sett.heatr_dur = 150
settings_sel = BME680_OST_SEL | BME680_OSP_SEL | BME680_OSH_SEL | BME680_GAS_SENSOR_SEL | BME680_FILTER_SEL
#/* Set the desired sensor configuration */=
rslt = BME680.bme680_set_sensor_settings(settings_sel, byref(dev))
if rslt != BME680_OK:
logger.error("Set the desired sensor configuration failed!!!")
#/* Set the power mode */
rslt = BME680.bme680_set_sensor_mode(byref(dev))
if rslt != BME680_OK:
logger.error("Set the power mode failed!!!")
#/* Get the total measurement duration so as to sleep or wait till the measurement is complete */
profile_dur = c_uint16()
BME680.bme680_get_profile_dur(byref(profile_dur), byref(dev))
# Collect gas resistance burn-in values, then use the average
# of the last 50 values to set the upper limit for calculating
# gas_baseline.
burn_in_time = 300
burn_in_data = []
# Set the humidity baseline to 40%, an optimal indoor humidity.
hum_baseline = 40.0
# This sets the balance between humidity and gas reading in the
# calculation of air_quality_score (25:75, humidity:gas)
hum_weighting = 0.25
elapsed = 0
air_quality_index = 0
savedata_time = 0
while True:
start = time.perf_counter()
Sensor_Delay_ms(profile_dur.value * 2)
data = (bme680_field_data)()
rslt = BME680.bme680_get_sensor_data(byref(data), byref(dev))
if rslt == BME680_OK and ((data.status & BME680_HEAT_STAB_MSK) > 0):
if burn_in_time > 0:
gas = data.gas_resistance
burn_in_data.append(gas)
if burn_in_time <= elapsed:
gas_baseline = sum(burn_in_data[-50:]) / 50.0
logger.info('Gas baseline: {0} Ohms, humidity baseline: {1:.2f} %RH\n'.format(gas_baseline, hum_baseline))
burn_in_time -= elapsed
logger.info('Burn in remain {:.0f}s'.format(burn_in_time))
temp = data.temperature / 100.0 #摄氏度
hum = data.humidity / 1000.0 #百分比
press = pressure = data.pressure / 1000.0 #帕斯卡
gas = gas_resistance = data.gas_resistance #欧姆
if burn_in_time <= 0:
hum_offset = hum - hum_baseline
gas_offset = gas_baseline - gas
# Calculate hum_score as the distance from the hum_baseline.
if hum_offset > 0:
hum_score = (100 - hum_baseline - hum_offset)
hum_score /= (100 - hum_baseline)
hum_score *= (hum_weighting * 100)
else:
hum_score = (hum_baseline + hum_offset)
hum_score /= hum_baseline
hum_score *= (hum_weighting * 100)
# Calculate gas_score as the distance from the gas_baseline.
if gas_offset > 0:
gas_score = (gas / gas_baseline)
gas_score *= (100 - (hum_weighting * 100))
else:
gas_score = 100 - (hum_weighting * 100)
# Calculate air_quality_score.
air_quality_score = hum_score + gas_score
# Calculate air_quality_index. From 0 (Good) to 500 (Bad).
air_quality_index = 500 * (1- (air_quality_score/100))
seaLevel = 1013.25 # hPa
atmospheric = pressure * 10.0
altitude = 44330.0 * (1.0 - pow(atmospheric / seaLevel, 0.1903)) #米
BME680_lock.acquire()
globalvar.set_value('BME680_Temperature', temp)
globalvar.set_value('BME680_Humidity', hum)
globalvar.set_value('BME680_Pressure', press)
globalvar.set_value('BME680_AQI', air_quality_index)
BME680_lock.release()
if start > savedata_time:
savedata_time = time.perf_counter()
savedata_time += 5*60
save_data(temp, hum, press, gas_resistance, air_quality_index)
#logger.info("\n\t温度:\t{:.2f} ℃ \
#\n\t湿度:\t{:.2f} %RH \
#\n\tAQI:\t{:.2f} \
#\n\t压强:\t{:.3f} kPa \
#\n\tRES:\t{:d} Ohms \
#\n\t海拔: \t{:.2f} m".format(
# data.temperature / 100,
# data.humidity / 1000,
# air_quality_index,
# data.pressure / 1000,
# data.gas_resistance,
# altitude
#))
elif (data.status & BME680_HEAT_STAB_MSK) == 0:
logger.warn('Sensor data still not stable!')
else:
logger.error("Get the sensor data failed!!!")
elapsed = (time.perf_counter() - start)
#/* Trigger the next measurement if you would like to read data out continuously */
if (dev.power_mode == BME680_FORCED_MODE):
rslt = BME680.bme680_set_sensor_mode(byref(dev))
def bme680_thread_start():
t1=Thread(target=bme680_thread)
t1.setDaemon(True)
t1.start()
|
<reponame>juliusf/Neurogenesis<gh_stars>1-10
import re
import sys
import os
from neurogenesis.util import Logger
def extract(scalars_file, simulations):
scalars = []
scalars_file = open(scalars_file, "rb")
[scalars.append(scalar.rstrip()) for scalar in scalars_file]
scalars_file.close()
for simulation in simulations.values():
scalar_encountered = dict( zip( scalars, [False] * len(scalars)))
try:
path = None
resultdir = os.path.join(simulation.path, "results")
for file in os.listdir(resultdir):
if file.endswith('.sca'):
path = os.path.join(resultdir, file)
break
if path is None:
raise IOError('could not find result file!')
with open(path) as result_file:
for line in result_file:
if line.startswith("scalar"):
for scalar in scalars:
if scalar in line:
simulation.results[scalar] = extract_scalar_value(line)
scalar_encountered[scalar] = True
elif line.startswith("param"):
for scalar in scalars:
if scalar in line:
simulation.results[scalar] = extract_parameter_value(line)
scalar_encountered[scalar] = True
for k,v in scalar_encountered.items():
if not v:
Logger.warning("Couldn't find scalar %s in result file %s!" % (k, path) )
except IOError as e:
Logger.error("Results file for simulation: %s not found! error: %s" % (simulation.path, e.strerror))
sys.exit(-1)
Logger.info("Extracted scalars of %s simulations." % (len(simulations.values())))
return simulations
def extract_scalar_value(line):
# end_of_scalar_name = line.rfind("\"")
# nr = line[end_of_scalar_name + 1:].strip()
nr = line.strip().split()[-1]
return float(nr)
def extract_parameter_value(line):
if line.strip().endswith('"'):
segments = line.split('"')
value = segments[-2]
else:
segments = line.strip().split(" ")
value = segments[-1]
value = re.sub("[^0-9e\-\.]", "", value) # replaces all non numeric values
try:
return float(value)
except ValueError:
return value
def extract_vectors(vector_file, simulations):
with open(vector_file, "rb") as vector_file:
vectors = [vector.rstrip() for vector in vector_file]
encountered_filters = {}
for simulation in simulations.values():
try:
path = None
resultdir = os.path.join(simulation.path, "results")
for file in os.listdir(resultdir):
if file.endswith('.vec'):
path = os.path.join(resultdir, file)
break
if path is None:
raise IOError('could not find result file!')
with open(path) as result_vector: # valid till omnet5.0
values = {}
values_names = {}
for line in result_vector:
if line.startswith("vector"):
nr, module = line.split(" ")[1:3]
name = " ".join(line.split(" ")[3:]) #ugly hack to allow for Vector names with spaces
name = name.split(":vector")[0]
name = name.rstrip("ETV\n")
name = name.strip()
if check_match(vectors, module, name, encountered_filters):
values[nr] = []
values_names[nr] = (module, name)
elif len(line.strip()) > 0 and line.split()[0] in values:
v = [float(x) for x in line.split()[1:4]]
values[line.split()[0]].append(v)
for k in values_names:
simulation.result_vectors[values_names[k]] = values[k]
except IOError as e:
Logger.error("Results file for simulation: %s not found! error: %s" % (simulation.path, e.strerror))
sys.exit(-1)
for k, v in encountered_filters.iteritems():
if not v:
Logger.warning("Exctractor couldn't find match for filter: %s" % (k))
else:
Logger.info("Extractor found match for filter %s" % (k))
return simulations
def check_match(patterns, module, name, encountered_filters):
for pattern in patterns:
if pattern not in encountered_filters:
encountered_filters[pattern] = False
if module in pattern and name in pattern:
encountered_filters[pattern] = True
return True
|
<reponame>abollu779/brain_language_nlp
import numpy as np
from scipy import stats
import torch
import torch.nn as nn
from torch.optim import Optimizer
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
class MLPEncodingModel(nn.Module):
def __init__(self, input_size, hidden_sizes, output_size, is_mlp_allvoxels = False):
super(MLPEncodingModel, self).__init__()
self.model = None
if len(hidden_sizes) == 2:
self.model = nn.Sequential(nn.Linear(input_size, hidden_sizes[0]),
nn.ReLU(),
nn.Linear(hidden_sizes[0], hidden_sizes[1]),
nn.ReLU(),
nn.Linear(hidden_sizes[1], output_size))
else:
assert len(hidden_sizes) == 1
self.model = nn.Sequential(nn.Linear(input_size, hidden_sizes[0]),
nn.ReLU(),
nn.Linear(hidden_sizes[0], output_size))
def forward(self, features):
outputs = self.model(features)
return outputs
########################################
# CUSTOM OPTIMIZER #
# Copied SGD optimizer definition from
# pytorch docs and adjusted to use
# optimal lambdas and lrs per voxel
########################################
"""
IMPORTANT:
This criterion should only be used with mlp_allvoxels architecture (where output layer is of size num_voxels).
Using it on any other model would probably result in unpredictable results as it was designed with this use in mind.
"""
class SGD_by_voxel(Optimizer):
def __init__(self, params, lrs=None, momentum=0, dampening=0,
weight_decays=None, nesterov=False):
if lrs is None or type(lrs).__module__ != np.__name__:
raise ValueError("Need to enter a valid np array of learning rates: {}".format(lrs))
if (lrs < 0.0).sum() > 0:
raise ValueError("Invalid learning rate detected (< 0): {}".format(lrs))
if momentum < 0.0:
raise ValueError("Invalid momentum value: {}".format(momentum))
if weight_decays is not None and type(weight_decays).__module__ != np.__name__:
raise ValueError("Weight decays must be provided as a np array: {}".format(weight_decays))
if weight_decays is not None and (weight_decays < 0.0).sum() > 0:
raise ValueError("Invalid weight_decay value detected: {}".format(weight_decays))
lr_mode = stats.mode(lrs)[0][0]
lrs = torch.from_numpy(lrs).to(device)
weight_decay_mode = None
if weight_decays is not None:
weight_decay_mode = stats.mode(weight_decays)[0][0]
weight_decays = torch.from_numpy(weight_decays).to(device)
defaults = dict(lrs=lrs, momentum=momentum, dampening=dampening,
weight_decays=weight_decays, nesterov=nesterov,
lr_mode=lr_mode, weight_decay_mode=weight_decay_mode)
if nesterov and (momentum <= 0 or dampening != 0):
raise ValueError("Nesterov momentum requires a momentum and zero dampening")
super(SGD_by_voxel, self).__init__(params, defaults)
def __setstate__(self, state):
super(SGD_by_voxel, self).__setstate__(state)
for group in self.param_groups:
group.setdefault('nesterov', False)
@torch.no_grad()
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
lrs = group['lrs']
weight_decays = group['weight_decays']
momentum = group['momentum']
dampening = group['dampening']
nesterov = group['nesterov']
for p in group['params']:
if p.grad is None:
continue
d_p = p.grad
if weight_decays is not None:
if p.shape[0] == 640: # Input -> Hidden Weights
d_p = d_p.add(p, alpha=2*group['weight_decay_mode'])
else: # Hidden -> Output Weights
d_p = d_p + (torch.mul(p.T, 2*weight_decays)).T
if momentum != 0:
param_state = self.state[p]
if 'momentum_buffer' not in param_state:
buf = param_state['momentum_buffer'] = torch.clone(d_p).detach()
else:
buf = param_state['momentum_buffer']
buf.mul_(momentum).add_(d_p, alpha=1 - dampening)
if nesterov:
d_p = d_p.add(buf, alpha=momentum)
else:
d_p = buf
if p.shape[0] == 640: # Input -> Hidden Weights
p = p.add(d_p, alpha=-group['lr_mode'])
else: # Hidden -> Output Weights
p = p + (torch.mul(d_p.T, -lrs)).T
return loss |
"""
Title: Neural style transfer
Author: [fchollet](https://twitter.com/fchollet)
Date created: 2016/01/11
Last modified: 2020/05/02
Description: Transfering the style of a reference image to target image using gradient descent.
"""
"""
## Introduction
Style transfer consists in generating an image
with the same "content" as a base image, but with the
"style" of a different picture (typically artistic).
This is achieved through the optimization of a loss function
that has 3 components: "style loss", "content loss",
and "total variation loss":
- The total variation loss imposes local spatial continuity between
the pixels of the combination image, giving it visual coherence.
- The style loss is where the deep learning keeps in --that one is defined
using a deep convolutional neural network. Precisely, it consists in a sum of
L2 distances between the Gram matrices of the representations of
the base image and the style reference image, extracted from
different layers of a convnet (trained on ImageNet). The general idea
is to capture color/texture information at different spatial
scales (fairly large scales --defined by the depth of the layer considered).
- The content loss is a L2 distance between the features of the base
image (extracted from a deep layer) and the features of the combination image,
keeping the generated image close enough to the original one.
**Reference:** [A Neural Algorithm of Artistic Style](
http://arxiv.org/abs/1508.06576)
"""
"""
## Setup
"""
import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.applications import vgg19
base_image_path = keras.utils.get_file("paris.jpg", "https://i.imgur.com/F28w3Ac.jpg")
style_reference_image_path = keras.utils.get_file(
"starry_night.jpg", "https://i.imgur.com/9ooB60I.jpg"
)
result_prefix = "paris_generated"
# Weights of the different loss components
total_variation_weight = 1e-6
style_weight = 1e-6
content_weight = 2.5e-8
# Dimensions of the generated picture.
width, height = keras.preprocessing.image.load_img(base_image_path).size
img_nrows = 400
img_ncols = int(width * img_nrows / height)
"""
## Let's take a look at our base (content) image and our style reference image
"""
from IPython.display import Image, display
display(Image(base_image_path))
display(Image(style_reference_image_path))
"""
## Image preprocessing / deprocessing utilities
"""
def preprocess_image(image_path):
# Util function to open, resize and format pictures into appropriate tensors
img = keras.preprocessing.image.load_img(
image_path, target_size=(img_nrows, img_ncols)
)
img = keras.preprocessing.image.img_to_array(img)
img = np.expand_dims(img, axis=0)
img = vgg19.preprocess_input(img)
return tf.convert_to_tensor(img)
def deprocess_image(x):
# Util function to convert a tensor into a valid image
x = x.reshape((img_nrows, img_ncols, 3))
# Remove zero-center by mean pixel
x[:, :, 0] += 103.939
x[:, :, 1] += 116.779
x[:, :, 2] += 123.68
# 'BGR'->'RGB'
x = x[:, :, ::-1]
x = np.clip(x, 0, 255).astype("uint8")
return x
"""
## Compute the style transfer loss
First, we need to define 4 utility functions:
- `gram_matrix` (used to compute the style loss)
- The `style_loss` function, which keeps the generated image close to the local textures
of the style reference image
- The `content_loss` function, which keeps the high-level representation of the
generated image close to that of the base image
- The `total_variation_loss` function, a regularization loss which keeps the generated
image locally-coherent
"""
# The gram matrix of an image tensor (feature-wise outer product)
def gram_matrix(x):
x = tf.transpose(x, (2, 0, 1))
features = tf.reshape(x, (tf.shape(x)[0], -1))
gram = tf.matmul(features, tf.transpose(features))
return gram
# The "style loss" is designed to maintain
# the style of the reference image in the generated image.
# It is based on the gram matrices (which capture style) of
# feature maps from the style reference image
# and from the generated image
def style_loss(style, combination):
S = gram_matrix(style)
C = gram_matrix(combination)
channels = 3
size = img_nrows * img_ncols
return tf.reduce_sum(tf.square(S - C)) / (4.0 * (channels ** 2) * (size ** 2))
# An auxiliary loss function
# designed to maintain the "content" of the
# base image in the generated image
def content_loss(base, combination):
return tf.reduce_sum(tf.square(combination - base))
# The 3rd loss function, total variation loss,
# designed to keep the generated image locally coherent
def total_variation_loss(x):
a = tf.square(
x[:, : img_nrows - 1, : img_ncols - 1, :] - x[:, 1:, : img_ncols - 1, :]
)
b = tf.square(
x[:, : img_nrows - 1, : img_ncols - 1, :] - x[:, : img_nrows - 1, 1:, :]
)
return tf.reduce_sum(tf.pow(a + b, 1.25))
"""
Next, let's create a feature extraction model that retrieves the intermediate activations
of VGG19 (as a dict, by name).
"""
# Build a VGG19 model loaded with pre-trained ImageNet weights
model = vgg19.VGG19(weights="imagenet", include_top=False)
# Get the symbolic outputs of each "key" layer (we gave them unique names).
outputs_dict = dict([(layer.name, layer.output) for layer in model.layers])
# Set up a model that returns the activation values for every layer in
# VGG19 (as a dict).
feature_extractor = keras.Model(inputs=model.inputs, outputs=outputs_dict)
"""
Finally, here's the code that computes the style transfer loss.
"""
# List of layers to use for the style loss.
style_layer_names = [
"block1_conv1",
"block2_conv1",
"block3_conv1",
"block4_conv1",
"block5_conv1",
]
# The layer to use for the content loss.
content_layer_name = "block5_conv2"
def compute_loss(combination_image, base_image, style_reference_image):
input_tensor = tf.concat(
[base_image, style_reference_image, combination_image], axis=0
)
features = feature_extractor(input_tensor)
# Initialize the loss
loss = tf.zeros(shape=())
# Add content loss
layer_features = features[content_layer_name]
base_image_features = layer_features[0, :, :, :]
combination_features = layer_features[2, :, :, :]
loss = loss + content_weight * content_loss(
base_image_features, combination_features
)
# Add style loss
for layer_name in style_layer_names:
layer_features = features[layer_name]
style_reference_features = layer_features[1, :, :, :]
combination_features = layer_features[2, :, :, :]
sl = style_loss(style_reference_features, combination_features)
loss += (style_weight / len(style_layer_names)) * sl
# Add total variation loss
loss += total_variation_weight * total_variation_loss(combination_image)
return loss
"""
## Add a tf.function decorator to loss & gradient computation
To compile it, and thus make it fast.
"""
@tf.function
def compute_loss_and_grads(combination_image, base_image, style_reference_image):
with tf.GradientTape() as tape:
loss = compute_loss(combination_image, base_image, style_reference_image)
grads = tape.gradient(loss, combination_image)
return loss, grads
"""
## The training loop
Repeatedly run vanilla gradient descent steps to minimize the loss, and save the
resulting image every 100 iterations.
We decay the learning rate by 0.96 every 100 steps.
"""
optimizer = keras.optimizers.SGD(
keras.optimizers.schedules.ExponentialDecay(
initial_learning_rate=100.0, decay_steps=100, decay_rate=0.96
)
)
base_image = preprocess_image(base_image_path)
style_reference_image = preprocess_image(style_reference_image_path)
combination_image = tf.Variable(preprocess_image(base_image_path))
iterations = 4000
for i in range(1, iterations + 1):
loss, grads = compute_loss_and_grads(
combination_image, base_image, style_reference_image
)
optimizer.apply_gradients([(grads, combination_image)])
if i % 100 == 0:
print("Iteration %d: loss=%.2f" % (i, loss))
img = deprocess_image(combination_image.numpy())
fname = result_prefix + "_at_iteration_%d.png" % i
keras.preprocessing.image.save_img(fname, img)
"""
After 4000 iterations, you get the following result:
"""
display(Image(result_prefix + "_at_iteration_4000.png"))
|
<reponame>martimunicoy/offpele-benchmarks
class MoleculeMinimized:
"""
It contains all the tools to minimize a molecule with the OpenForceField toolkit for PELE.
"""
def __init__(self, input_file, PELE_version):
"""
It initializes a MocelueMinimized object.
Parameters:
----------
input_file: PDB with the parameters of the molecule.
PELE_version: str
path of an executable version of PELE
Examples:
----------
Load a molecule from a PDB file and minimize it in the vacuum and OBC solvent.
>>> import MoleculeMinimized as MM
>>> new_molecule = MM.MoleculeMinimized('ligand.pdb', '/home/municoy/builds/PELE/PELE-repo_serial/PELE-1.6')
>>> new_molecule.minimize(input_file = 'ligand.pdb', PELE_version = '/home/municoy/builds/PELE/PELE-repo_serial/PELE-1.6')
"""
self.input_file = input_file
self.PELE_version = PELE_version
def _output_folder(self,input_file):
"""
It creates an output folder with a copy of the ligand's pdb where all the results will be saved.
"""
from pathlib import Path
import shutil
import os
# Handels path of the input file
p = Path(input_file)
file, folder = p.name, p.parents[0]
# It makes the output directory
os.makedirs('output',exist_ok = True)
shutil.copy(p, os.path.join(os.getcwd(),'output', 'ligand.pdb'))
def _generate_parameters(self):
"""
It generates the parameters of the molecule (from the input_file) as DataLocal in the output folder.
"""
import peleffy
from peleffy.topology import Molecule
from peleffy.template import Impact
from peleffy.solvent import OBC2
from peleffy.main import handle_output_paths
import os
# Forcefield and charges method
forcefield = 'openff_unconstrained-1.2.0.offxml'
charges_method = 'am1bcc'
# Create representation of a particular molecule
PATH_molecule = os.path.join(os.getcwd(),'output', 'ligand.pdb')
molecule = Molecule(PATH_molecule)
# Saving paths
rotamer_library_output_path, impact_output_path, solvent_output_path = \
handle_output_paths(molecule = molecule, output =os.path.join(os.getcwd(),'output'), as_datalocal = True )
# Generate its rotamer library
rotamer_library = peleffy.topology.RotamerLibrary(molecule)
rotamer_library.to_file(rotamer_library_output_path)
# Generate its parameters and template file
molecule.parameterize(forcefield, charges_method=charges_method)
impact = Impact(molecule)
impact.write(impact_output_path)
# Generate its solvent parameters
solvent = OBC2(molecule)
solvent.to_json_file(solvent_output_path)
def _link_folders(self):
"""
It links the encessary folders to the output folder.
"""
import os
PELE_SRC = '/home/municoy/repos/PELE-repo/'
os.symlink('{}Data'.format(PELE_SRC), os.path.join(os.getcwd(),'output','Data'))
os.symlink('{}Documents'.format(PELE_SRC), os.path.join(os.getcwd(),'output', 'Documents'))
def minimize(self,input_file, PELE_version):
"""
It minimized the molecule with the open forcefield (OFFPELE).
Parameters:
----------
input_file: PDB with the parameters of the molecule.
PELE_version: str
path of an executable version of PELE
"""
import os
import requests
from pathlib import Path
VACUUM_CF = '/home/lauramalo/repos/peleffy-benchmarks/benchmarks/solvent/Conf/VACUUM_minimization.conf'
OBC_CF = '/home/lauramalo/repos/peleffy-benchmarks/benchmarks/solvent/Conf/OBC_minimization.conf'
self._output_folder(input_file)
self._link_folders()
self._generate_parameters()
# Minimization
os.chdir("./output/")
os.system(" %s %s > VACUUM_minimization.out" % (PELE_version, VACUUM_CF))
os.system(" %s %s > OBC_minimization.out" % (PELE_version, OBC_CF))
# Rename the output folder to the molecule name
os.chdir("..")
p = Path(input_file)
file, folder = p.name, p.parents[0]
new_folder = os.path.splitext(file)[0]
os.rename('output', new_folder)
|
# -*- coding: utf-8 -*-
import torch
import time
import numpy as np
from collections import namedtuple
from duelling_network import DuellingDQN
N_Step_Transition = namedtuple('N_Step_Transition', ['S_t', 'A_t', 'R_ttpB', 'Gamma_ttpB', 'qS_t', 'S_tpn', 'qS_tpn', 'key'])
class Learner(object):
def __init__(self, env_conf, learner_params, shared_state, shared_replay_memory):
self.state_shape = env_conf['state_shape']
action_dim = env_conf['action_dim']
self.params = learner_params
self.shared_state = shared_state
self.Q = DuellingDQN(self.state_shape, action_dim)
self.Q_double = DuellingDQN(self.state_shape, action_dim) # Target Q network which is slow moving replica of self.Q
if self.params['load_saved_state']:
try:
saved_state = torch.load(self.params['load_saved_state'])
self.Q.load_state_dict(saved_state['Q_state'])
except FileNotFoundError:
print("WARNING: No trained model found. Training from scratch")
self.shared_state["Q_state_dict"] = self.Q.state_dict()
self.replay_memory = shared_replay_memory
self.optimizer = torch.optim.RMSprop(self.Q.parameters(), lr=0.00025 / 4, weight_decay=0.95, eps=1.5e-7)
self.num_q_updates = 0
def compute_loss_and_priorities(self, xp_batch):
"""
Computes the double-Q learning loss and the proportional experience priorities.
:param xp_batch: list of experiences of type N_Step_Transition
:return: double-Q learning loss and the proportional experience priorities
"""
n_step_transitions = N_Step_Transition(*zip(*xp_batch))
# Convert tuple to numpy array; Convert observations(S_t and S_tpn) to c x w x h torch Tensors (aka Variable)
S_t = torch.from_numpy(np.array(n_step_transitions.S_t)).float().requires_grad_(True)
S_tpn = torch.from_numpy(np.array(n_step_transitions.S_tpn)).float().requires_grad_(True)
rew_t_to_tpB = np.array(n_step_transitions.R_ttpB)
gamma_t_to_tpB = np.array(n_step_transitions.Gamma_ttpB)
A_t = np.array(n_step_transitions.A_t)
with torch.no_grad():
G_t = rew_t_to_tpB + gamma_t_to_tpB * \
self.Q_double(S_tpn)[2].gather(1, torch.argmax(self.Q(S_tpn)[2], 1).view(-1, 1)).squeeze()
Q_S_A = self.Q(S_t)[2].gather(1, torch.from_numpy(A_t).reshape(-1, 1)).squeeze()
batch_td_error = G_t.float() - Q_S_A
loss = 1/2 * (batch_td_error)**2
# Compute the new priorities of the experience
priorities = {k: v for k in n_step_transitions.key for v in abs(batch_td_error.detach().data.numpy())}
return loss.mean(), priorities
def update_Q(self, loss):
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
self.num_q_updates += 1
if self.num_q_updates % self.params['q_target_sync_freq']:
self.Q_double.load_state_dict(self.Q.state_dict())
def learn(self, T):
while self.replay_memory.size() <= self.params["min_replay_mem_size"]:
time.sleep(1)
for t in range(T):
# 4. Sample a prioritized batch of transitions
prioritized_xp_batch = self.replay_memory.sample(int(self.params['replay_sample_size']))
# 5. & 7. Apply double-Q learning rule, compute loss and experience priorities
loss, priorities = self.compute_loss_and_priorities(prioritized_xp_batch)
#print("\nLearner: t=", t, "loss:", loss, "RPM.size:", self.replay_memory.size(), end='\r')
# 6. Update parameters of the Q network(s)
self.update_Q(loss)
self.shared_state['Q_state_dict'] = self.Q.state_dict()
# 8. Update priorities
self.replay_memory.set_priorities(priorities)
# 9. Periodically remove old experience from replay memory
if t % self.params['remove_old_xp_freq'] == 0:
self.replay_memory.remove_to_fit() |
__author__ = 'frieder'
from PyQt4 import QtCore, QtGui, Qwt5
from guiqwt.plot import CurvePlot, PlotManager, CurveDialog
from guiqwt.tools import SelectPointTool
from guiqwt.builder import make
import numpy as np
import logic.DataFlowControl as DataController
class Ui_PlotWidget_Feature_Set(QtGui.QWidget):
""""""
def __init__(self, parent=None):
"""Constructor for Ui_PlotWidget"""
QtGui.QWidget.__init__(self, parent)
self.setupUi()
def setupUi(self):
"""loads numpy array
Args:
self
Returns:
nothing
"""
#self.plot = CurvePlot(self)
self.dialog = CurveDialog(edit=False, toolbar=True, parent=self)
self.plot = self.dialog.get_plot()
self.plot.set_antialiasing(True)
#x = np.linspace(-10,10,200)
#dy = x/100.
#y = np.sin(np.sin(np.sin(x)))
#self.plot.add_item(make.curve(x,y))
self.loadButton = QtGui.QPushButton("Load")
self.trainButton = QtGui.QPushButton("Train Model")
ly = QtGui.QVBoxLayout()
ly.addWidget(self.plot)
#ly.addWidget(self.loadButton)
#ly.addWidget(self.trainButton)
self.plot.setAxisTitle(Qwt5.Qwt.QwtPlot.xBottom, 'Time')
self.plot.setAxisTitle(Qwt5.Qwt.QwtPlot.yLeft, 'Value')
self.manager = PlotManager(self)
self.manager.add_plot(self.plot)
#self.manager.
legend = make.legend('TL')
self.plot.add_item(legend)
self.setLayout(ly)
self.move(300, 200)
self.show()
self.dataController = DataController.DataController()
def loadData(self):
self.trainingData = self.dataController.loadSampleData()
import logic.dimreduce.paa as PAA
p = PAA.paa()
data = p.process(self.trainingData["PIR"][:], 100)
r = np.array(range(len(data))).reshape(len(data), 1)
s = np.array(data).reshape(len(data), 1)
rs = np.hstack((s, s))
print rs
import logic.featurex.kmeans as km
k = km.kmeans()
labels = k.process(rs, 2)
print labels
self.plot.add_item(make.curve(range(0, len(data)), data))
from guiqwt.styles import AnnotationParam
i=0
i_beg = 0
i_end = 0
while i < len(labels):
cur = labels[i_end]
if i < len(labels)-1:
if labels[i_end + 1] != cur:
i_end=i
from guiqwt.annotations import AnnotatedRectangle
param = AnnotationParam()
param.title = str(labels[int(i_beg)])
param.show_computations = False
anno = AnnotatedRectangle(r[int(i_beg)],0.5,r[int(i_end)],0.2, param) #TODO: y axis scaling
self.plot.add_item(anno)
i_beg=i_end
print "s1"
else:
i_end =i
print "s2"
print "s3"
print "s4",i_end,len(labels)
i+=1
#param = AnnotationParam()
#param.title = "alright"
#param.show_computations = False
##anno = AnnotatedRectangle(0., 1., 1.5, 0.5, param)
#anno.set_style("plot", "shape/drag")
#anno.set_style("shape/drag/fill/color", "white")
#self.plot.add_item(anno)
#self.rangeSelection = make.range(-2, 2)
#disp0 = make.range_info_label(self.rangeSelection, 'BR', u"x = %.1f +- %.1f cm",
# title="Range infos")
#self.plot.add_item(self.rangeSelection)
#self.plot.add_item(disp0)
self.plot.replot()
def trainData(self, string_length, vocab, window_length, clusterN):
a, b = self.rangeSelection.get_range()
if a > b:
a, b = b, a
print a, b
print "in widget", int(string_length), int(vocab), int(window_length), int(clusterN)
self.dataController.trainData(a, b, int(string_length), int(vocab), int(window_length), int(clusterN))
|
<reponame>shanks2999/SearchEngine
import settings
import preprocess
import urllib.request
from urllib.request import urlopen
import requests
from bs4 import BeautifulSoup
from bs4.element import Comment
import urllib.request
import httplib2
h = httplib2.Http()
myList = []
def tag_visible(element):
if element.parent.name in ['style', 'script', 'head', 'title', 'meta', '[document]']:
return False
if isinstance(element, Comment):
return False
return True
def crawl_and_fetch(threshold):
index = 0
count = 1
while count <= threshold or index >= len(myList):
try:
page = requests.get(myList[index])
if int(page.status_code) != 200:
index += 1
continue
soup = BeautifulSoup(page.text, 'html.parser')
except:
index += 1
continue
# [s.extract() for s in soup(['style', 'script', '[document]', 'head', 'title'])]
# visible_text_A = soup.getText()
text = filter(tag_visible, soup.findAll(text=True))
visible_text_B = u" ".join(t.strip() for t in text)
data = preprocess.removePunctuation(visible_text_B)
data = preprocess.removeDigits(data)
tokens = preprocess.tokenizeData(data)
tokens = preprocess.removeStopWords(tokens)
tokens = preprocess.performStemming(tokens)
tokens = preprocess.removeStopWords(tokens)
# settings.web_dict[link] = tokens[:]
getAllLinks(myList[index], soup)
with open("./data/all_url", 'a', encoding="utf-8") as fobj:
fobj.write(myList[index])
fobj.write("\n")
preprocess.export_spider_sequential(myList[index], tokens[:], count)
index += 1
count += 1
# def fetch_link_graph(threshold):
# for index in range(threshold):
# page = requests.get(myList[index])
# soup = BeautifulSoup(page.text, 'html.parser')
# getAllLinks(myList[index], soup)
# if len(myList) > threshold:
# break
def getAllLinks(link, soup):
for l in soup.find_all('a', href=True):
url = l['href']
final_url = ""
if "#" in url:
url = url[:url.find("#")]
if url.endswith('/'):
url = url[:-1]
if url.endswith(('.pdf', '.doc', '.docx', '.xls', '.xlsx', '.jpg', '.png')) or url.rfind(":") > 6 or "@" in url:
continue
if len(url) > 1:
if url.startswith('/'):
final_url = link + url
elif url.startswith('http'):
final_url = url
else:
final_url = link + "/" + url
if final_url != "":
domain = final_url[final_url.find('//') + 2:]
if domain.find('/') > -1:
domain = domain[:domain.find('/')]
if domain.endswith('uic.edu'):
if final_url[final_url.find("//")+2:] not in settings.link_set:
settings.link_set.add(final_url[final_url.find("//")+2:])
myList.append(final_url)
def is_valid_url(url):
try:
# resp = h.request(url, 'HEAD')
# if int(resp[0]['status']) == 200:
urlopen(url)
return True
# else:
# return False
# r = requests.head(url)
# if int(r.status_code) == 200:
# return True
# else:
# return False
except:
return False
def main():
settings.init()
link = "https://www.cs.uic.edu"
with open("./data/all_url", 'w', encoding="utf-8") as fobj: pass
settings.link_set.add(link[link.find("//")+2:])
myList.append(link)
crawl_and_fetch(3000)
if __name__ == '__main__':
import sys
main()
|
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/05_classification.core.ipynb (unless otherwise specified).
__all__ = ['pil_loader', 'cv2_loader', 'denormalize', 'show_image_batch', 'DatasetDict', 'ClassificationMapper',
'ClassificationDataset', 'FolderParser', 'PandasParser', 'CSVParser']
# Cell
import logging
import os
from collections import namedtuple
from typing import *
import albumentations as A
import cv2
import numpy as np
import pandas as pd
import torch
import torchvision.transforms as T
from fastcore.all import *
from PIL import Image
from timm.data.constants import *
from timm.data.parsers.parser import Parser
from timm.data.parsers.parser_image_folder import ParserImageFolder
from ..utils.display import show_image, show_images
_logging = logging.getLogger(__name__)
# Cell
def pil_loader(path: str) -> Image.Image:
"""
Loads in a Image using PIL
"""
im = Image.open(path).convert("RGB")
return im
# Cell
def cv2_loader(path: str) -> np.ndarray:
"""
Loads in a Image using cv2
"""
im = cv2.imread(path)
im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
return im
# Cell
@typedispatch
def convert_image(image: Image.Image):
return np.array(image)
# Cell
@typedispatch
def convert_image(image: np.ndarray):
return Image.fromarray(image)
# Cell
@typedispatch
def load_and_apply_image_transforms(path: str, transforms: A.Compose):
image = cv2_loader(path)
aug_image = transforms(image=image)
return aug_image["image"]
# Cell
@typedispatch
def load_and_apply_image_transforms(path: str, transforms: T.Compose):
image = pil_loader(path)
aug_image = transforms(image)
return aug_image
# Cell
@typedispatch
def apply_transforms(im: Image.Image, transform: T.Compose):
return transform(im)
# Cell
@typedispatch
def apply_transforms(im: Image.Image, transform: A.Compose):
image = np.array(im)
return transform(image=image)["image"]
# Cell
def denormalize(x: torch.Tensor, mean: torch.FloatTensor, std: torch.FloatTensor):
"Denormalize `x` with `mean` and `std`."
return x.cpu().float() * std[..., None, None] + mean[..., None, None]
# Cell
@use_kwargs_dict(
keep=True,
figsize=None,
imsize=3,
suptitle=None,
)
def show_image_batch(
batch: Tuple[torch.Tensor],
n: int = 8,
mean=IMAGENET_DEFAULT_MEAN,
std=IMAGENET_DEFAULT_STD,
nrows=2,
ncols=4,
**kwargs
):
"Displays an image batch after applying `denormalize`"
images, labels = batch
images, labels = images[:n], labels[:n]
if not isinstance(mean, torch.Tensor):
mean = torch.Tensor(mean).float()
if not isinstance(std, torch.Tensor):
std = torch.Tensor(std).float()
images = denormalize(images, mean, std)
images = images.clip(0, 1)
labels = [x.cpu().numpy().item() for x in labels]
show_images(ims=images, titles=labels, nrows=nrows, ncols=ncols, **kwargs)
# Cell
class DatasetDict(namedtuple("dataset_dict", ["file_name", "target"])):
"""
A simple structure that contains the path to the Images and
Interger target of the Images.
"""
def __new__(cls, file_name: str, target: int):
return super().__new__(cls, file_name, target)
# Cell
class ClassificationMapper(DisplayedTransform):
decodes = noop
"""
A callable which takes in a dataset and map it into a format used by the model.
This mapper takes in a Dict/str as input . The key "file_name" must contain the
path to the Image to be loaded and key "target" must contain the integer target.
The callable currently does the following:
1. Reads in the image from `file_name`.
2. Applies transformations to the Images
3. Converts dataset to return `torch.Tensor` Images & `torch.long` targets
You can also optionally pass in `xtras` these which must be a callable functions. This function
is applied after converting the images to to tensors. Helpfull for applying trasnformations like
RandomErasing which requires the inputs to be tensors.
"""
def __init__(
self,
augmentations: Optional[Union[T.Compose, A.Compose]] = None,
mean: Sequence[float] = IMAGENET_DEFAULT_MEAN,
std: Sequence[float] = IMAGENET_DEFAULT_STD,
xtras: Optional[Callable] = noop,
):
"""
Arguments:
1. `augmentations`: a list of augmentations or deterministic transforms to apply
2. `mean`, `std`: list or tuple with #channels element, representing the per-channel mean and
std to be used to normalize the input image. Note: These should be normalized values.
4. `xtras`: A callable funtion applied after images are normalized and converted to tensors.
"""
super().__init__()
store_attr()
# fmt: off
self.normalize = T.Compose([
T.ToTensor(),
T.Normalize(torch.tensor(self.mean), torch.tensor(self.std)),
])
# fmt: on
def encodes(self, dataset_dict: DatasetDict):
"""
For normal use-cases
"""
# fmt: off
image = load_and_apply_image_transforms(dataset_dict.file_name, self.augmentations)
# fmt: on
image = self.normalize(image)
image = self.xtras(image)
assert isinstance(image, torch.Tensor)
target = dataset_dict.target
target = torch.tensor(target, dtype=torch.long)
return image, target
def encodes(self, torchvision_instance: Tuple):
"""
For torhcvision instances
"""
image, target = torchvision_instance
image = apply_transforms(image, self.augmentations)
image = self.normalize(image)
image = self.xtras(image)
assert isinstance(image, torch.Tensor)
target = torch.tensor(target, dtype=torch.long)
return image, target
# Cell
class ClassificationDataset(torch.utils.data.Dataset):
"""
Map a function over the elements returned by a parser
Arguments :
1. `mapper`: a callable which maps the element in dataset, typically `ClassificationMapper`.
2. `parser`: a `Parser` to load in the Images and their corresponding targets.
"""
def __init__(self, mapper: DisplayedTransform, parser: Parser):
store_attr("parser, mapper")
def __len__(self):
return len(self.parser)
def __getitem__(self, index):
dataset_dict = self.parser[index]
# preprocess and load the data
return self.mapper.encodes(dataset_dict)
# Cell
class FolderParser(ParserImageFolder):
"""
A generic parser which loads data from `root` where samples are arranged in this way:
```
root/class_x/xxx.ext
root/class_x/xxy.ext
root/class_x/[...]/xxz.ext
root/class_y/123.ext
root/class_y/nsdf3.ext
root/class_y/[...]/asd932_.ext
```
"""
def __getitem__(self, index):
path, target = self.samples[index]
return DatasetDict(file_name=path, target=target)
# Cell
class PandasParser(Parser):
"""
A generic parser which parser data from a pandas dataframe.
Arguments:
1. `df`: a pandas dataframe
2. `path_columne`: name of the column where the Images are stored.
3. `label_column`: name of the column where the Image targets are stored.
"""
def __init__(self, df: pd.DataFrame, path_column: str, label_column: str):
self.df = df
imgs = self.df[path_column]
labels = self.df[label_column]
self.samples = [(i, t) for i, t in zip(imgs, labels)]
def __getitem__(self, index):
path, target = self.samples[index]
return DatasetDict(file_name=str(path), target=target)
def __len__(self):
return len(self.samples)
def _filename(self, index):
return self.samples[index][0]
def filename(self, index):
return self._filename(index)
def filenames(self):
return [self._filename(index) for index in range(len(self))]
# Cell
class CSVParser(PandasParser):
"""
Parser for csv files. Parser first loads in csv as a pandas dataframe
and rest functionality is same as `PandasParser`
"""
@delegates(pd.read_csv)
def __init__(self, path: str, path_column: str, label_column: str, **kwargs):
"""
Arguments:
1. `path`: path to a csv file
2. `path_columne`: name of the column where the Images are stored.
3. `label_column`: name of the column where the Image targets are stored.
"""
self.df = pd.read_csv(path, **kwargs)
imgs = self.df[path_column]
labels = self.df[label_column]
self.samples = [(i, t) for i, t in zip(imgs, labels)] |
#=======================================================================================================================
#
# ALLSorts v2 - Find Centroids
# Not all subtypes are destined to be classified by a small set of genes, they are defined by group membership.
#
# Author: <NAME>
# License: MIT
#
#=======================================================================================================================
''' --------------------------------------------------------------------------------------------------------------------
Imports
---------------------------------------------------------------------------------------------------------------------'''
''' Internal '''
from ALLSorts.common import message, _flat_hierarchy, _pseudo_counts
''' External '''
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.ensemble import RandomForestClassifier
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import KernelPCA
# Methods
import pandas as pd
''' --------------------------------------------------------------------------------------------------------------------
Classes
---------------------------------------------------------------------------------------------------------------------'''
class CentroidCreate(BaseEstimator, TransformerMixin):
"""
A class that represents the custom feature that represents the distance to each subtype centroid.
...
Attributes
__________
BaseEstimator : inherit
Inherit from Scikit-learn BaseEstimator class
TransformerMixin : inherit
Inherit from Scikit-learn TransformerMixin class
Methods
-------
fit(counts, y)
Get all centroid parameters relative to the training set.
transform(counts)
Transform input counts by parameters determined by training set.
"""
def __init__(self, hierarchy=False, distance_function=False, only=False):
"""
Initialise the class
Parameters
__________
hierarchy : dict
Dictionary representing the hierarchy of subtypes
distance_function : object
The distance function to calculate between centroids and samples. i.e. cosine_similarity.
only : bool
Discard all other features, keep only the distances to centroids.
"""
self.hierarchy = hierarchy
self.cv_genes = {}
self.distance_function = distance_function
self.centroids = {}
self.kpca = {}
self.only = only
self.scaler = {}
self.training = True
def _check_input(self, X):
# Prepare
if isinstance(X, dict):
counts = X["counts"]
self.genes = X["genes"]
else:
counts = X.copy()
self.genes = False
return counts
def _get_flat_hierarchy(self):
return _flat_hierarchy(self.hierarchy, flat_hierarchy={})
def _get_pseudo_counts(self, X, y, parents):
return _pseudo_counts(X, y, parents, self.f_hierarchy)
def _recurse_subtypes(self, sub_hier, X, y, name="root"):
if sub_hier == False: # Recursion stop condition
return False
parents = list(sub_hier.keys())
# Create pseudo-counts based on hiearachy
X_p, y_p = self._get_pseudo_counts(X, y, parents)
subtypes = list(y_p.unique())
if len(subtypes) > 2:
for subtype in subtypes:
labels = y_p.copy()
labels[~labels.isin([subtype])] = "Others"
rf = RandomForestClassifier(n_estimators=100, class_weight="balanced").fit(X_p, labels)
coefs = pd.DataFrame(rf.feature_importances_, index=X_p.columns)[0]
self.cv_genes[subtype] = list(coefs.abs().sort_values(ascending=False).iloc[0:20].index)
self.kpca[subtype] = KernelPCA(2, kernel="rbf").fit(X_p[self.cv_genes[subtype]])
X_p_pca = pd.DataFrame(self.kpca[subtype].transform(X_p[self.cv_genes[subtype]]), index=X_p.index)
# Now calculate centroids
centroid = X_p_pca.loc[labels == subtype].median()
self.centroids[subtype] = pd.DataFrame(centroid).transpose()
# Recurse through the hierarchy
for parent in parents:
self._recurse_subtypes(sub_hier[parent], X, y, name=parent)
def _centroid_similarity(self, counts, y):
distance_features = pd.DataFrame(index=counts.index)
for subtype in self.centroids.keys():
genes = self.cv_genes[subtype]
counts_pca = pd.DataFrame(self.kpca[subtype].transform(counts[genes]), index=counts.index)
distance = self.distance_function(counts_pca, self.centroids[subtype])
distance_ = []
for d in distance:
distance_.append(d[0])
distance_features["distance_"+subtype] = distance_
if self.training:
self.scaler = StandardScaler().fit(distance_features)
distance_features = pd.DataFrame(self.scaler.transform(distance_features),
columns = distance_features.columns,
index = distance_features.index)
counts = pd.concat([counts, distance_features], axis=1, join="inner")
if self.training:
for subtype in self.centroids.keys():
if self.only:
self.genes[subtype] = list(distance_features.columns)
else:
self.genes[subtype] += list(distance_features.columns)
self.training = False
return counts
def fit(self, X, y):
counts = self._check_input(X)
self.f_hierarchy = self._get_flat_hierarchy()
self._recurse_subtypes(self.hierarchy, counts, y)
return self
def transform(self, X, y=False):
counts = self._check_input(X)
counts = self._centroid_similarity(counts, y)
return {"genes": self.genes, "counts": counts}
|
import sys, os, re, time, math, random, struct, zipfile, operator, csv, hashlib, uuid, pdb, types
import settings, logging
from collections import defaultdict
sys.path.insert(0, 'libs')
from bs4 import BeautifulSoup
from utils.deferred import deferred
logging.basicConfig(filename=settings.LOG_FILENAME, level=logging.DEBUG)
from utils.levenshtein import levenshtein
from lsh_matrix import Matrix, MatrixRow
from utils.procache import Cache
symbols = re.compile('\W+')
class PeerbeltLine(object):
text_file_pattern = re.compile('^{"id":"([^"]*):html","text":"(.*)}', flags=re.DOTALL)
@staticmethod
def parse(line):
found_pattern = PeerbeltLine.text_file_pattern.search(line)
doc_id = found_pattern.group(1)
text = found_pattern.group(2)
udata = text.decode("utf-8")
text = udata.encode("ascii","ignore")
text = text.replace('\\n',' ').replace('\\t',' ').replace("'", "''")
soup = BeautifulSoup(text.replace('\\n',' '))
[s.extract() for s in soup(['script', 'style'])]
text = soup.get_text(separator=' ', strip=True)
text = symbols.sub(' ', text.lower())
text = ' '.join(text.split())
return doc_id, text
shingle_cache = Cache(max_size = 1)
def lsh_text(LineFormat, zip_reader, filename, matrix_key, text_filename):
logging.info('<TextWorker filename={filename} text_filename={text_filename}>'\
.format(filename=filename, text_filename=text_filename))
infolist = zip_reader.infolist()
Matrix._initialize()
MatrixRow._initialize()
dataset = Matrix.find(matrix_key)
for info in infolist:
if info.filename == text_filename:
break
with zip_reader.open(info) as text_reader:
logging.debug('Reading file %s', info.filename)
stats = {}
for line in text_reader:
doc_id, text = LineFormat.parse(line)
doc = dataset.create_doc(doc_id, text, stats)
stats = {}
logging.info('</TextWorker filename={filename} text_filename={text_filename}>'\
.format(filename=filename, text_filename=text_filename))
def lsh_zipfile(LineFormat, zip_reader, source, filename, file_key = ''):
infolist = zip_reader.infolist()
dummydoc = MatrixRow.create() # force the creation of the table
dataset = Matrix.create(source, filename, file_key) # force the creation of the table and filling it with a row
dataset = Matrix.find(dataset.ds_key)
start = time.time()
all_stats = defaultdict(float)
new_docs_count = 0
docs_cache = Cache(max_size = 15)
for info in infolist:
with zip_reader.open(info) as text_reader:
logging.debug('Reading file %s', info.filename)
deferred.defer(lsh_text, LineFormat, zip_reader, filename, matrix_key = dataset.ds_key, text_filename = info.filename)
return
def main():
"""
Read input zip file, minhash the documents in it and put them in buckets
The zip file should have been created with data_prep/prepare_blobstore_zips
"""
try:
filename = os.path.abspath(sys.argv[1])
except IndexError:
print 'filename not provided'
exit(1)
try:
zip_reader = zipfile.ZipFile(filename)
except IOError:
print 'unable to read file {file}'.format(file = filename)
exit(1)
except zipfile.BadZipfile:
print 'file {file} is not a zip file'.format(file = filename)
exit(1)
lsh_zipfile(PeerbeltLine, zip_reader, 'bash', filename)
if __name__ == "__main__":
main()
|
<reponame>WxBDM/nwsapy<gh_stars>1-10
# General file structure:
# Request error object
# Individual components of the module
# Base endpoint
# All endpoints associated with /path/to/endpoint
import shapely
from shapely.geometry import Point
from datetime import datetime
from collections import OrderedDict
from warnings import warn
import pandas as pd
import pytz
import numpy as np
from nwsapy.core.inheritance.base_endpoint import BaseEndpoint
class IndividualAlert:
def __init__(self, alert_list):
# These need to get updated, tjhey're not parameters - they're attributes.ß
"""Individual alert class, holds properties describing each individual
alert. The attributes are as follows:
affectedZones: A list of affected zones by ID. Type: list[str]
areaDesc: A description of the area that the alert covers. Type: str
category: The category in which the alert falls under. Type: str
description: Describes the alert. Type: str
effective: When the alert is effective (local time). Type: datetime.datetime
effective_utc: When the alert is effective (local time). Type: datetime.datetime
ends: When the alert ends (local time). Type: datetime.datetime or None
ends_utc: When the alert ends (UTC time). Type: datetime.datetime or None
event: The event of which this alert is (used as the object type) Type: tr
expires: When the alert ends (local time). Type: datetime.datetime or None
expires_utc: When the alert ends (UTC time). Type: datetime.datetime or None
geocode: Unknown. Type: dict
headline: The headline of the alert. Type: str
id: The associated ID of the alert. Type: str
instruction: The “call to action” of the alert. Type: str
messageType: What kind of message the alert is (update, warning, etc). Type: str
onset: When the alert was onset (local time). Type: datetime.datetime
onset_utc: When the alert was onset (UTC time). Type: datetime.datetime
parameters: Unknown. Type: dict
points: Points where the alert lies (lat/lon). Type: list, containing shapely.Point or None
polygon: The polygon where the alert lies. Type: shapely.Polygon or None
references: Unknown. Type: list
sender: Who sent the alert. Type:str
senderName: Which NWS office sent the alert. Type: senderName: str
sent: When the alert was sent (local time). Type: datetime.datetime
sent_utc: When the alert was sent (UTC time). Type: datetime.datetime
series: A pandas series with all attributes of this object. Type: pandas.Series
severity: The severity level of the alert. Type: str
status: The status level of the alert. Type: str
urgency: The urgency level of the alert. Type: str
"""
alert_d = alert_list['properties'] # prep to set all attributes
# set all attributes, starting with geometry.
geom_d = self._format_geometry(alert_list['geometry'])
alert_d.update(geom_d)
# set all times
times = {'sent': alert_d['sent'], 'effective': alert_d['effective'],
'onset': alert_d['onset'], 'expires': alert_d['expires'],
'ends': alert_d['ends']}
time_d = self._set_times(times)
alert_d.update(time_d)
# fix the affected zones so it's only the zoneID.
alert_d['affectedZones'] = [zone.split("/")[-1] for zone in alert_d['affectedZones']]
alert_d['areaDesc'] = alert_d['areaDesc'].split(";")
# set the attributes for the class.
for k, v in alert_d.items():
setattr(self, k, v)
# used for to_dict(). __dict__ doesn't get class variables.
self._d = alert_d
self._series = pd.Series(data = self._d)
def _format_geometry(self, geometries):
# if there's any kind of geometry
if not isinstance(geometries, type(None)):
geometry_type = geometries['type']
# First check to see if it's a multipolygon.
# If so, then create polygons out of it.
if geometry_type == "MultiPolygon":
points = []
polygons = []
for polygon in geometries['coordinates']:
polygon_points = [Point(x[0], x[0]) for x in polygon[0]]
points.append(polygon_points)
polygons.append(shapely.geometry.Polygon(polygon_points))
return dict({"points" : points, "polygon" : polygons})
# determine the geometry kind. If it's a point, make a list of
# shapely point objects.
points = [Point(x[0], x[1]) for x in geometries['coordinates'][0]]
polygon_d = dict({'points': points})
# If the geometry type is a polygon, make a polygon object as well.
# Otherwise set to none.
if geometry_type == 'Polygon':
polygon_d['polygon'] = shapely.geometry.Polygon(points)
else: # only if it's a point (just in case, this needs to be tested)
polygon_d['polygon'] = None
else: # there's no geometry tag, so make them none.
polygon_d = dict({"points": None, 'polygon': None}) # set to none
return polygon_d
def _set_times(self, times):
utc = pytz.timezone("UTC")
time_d = {}
# iterate through the dictionary of times.
for time in times:
if not isinstance(times[time], type(None)):
time_d[time] = datetime.fromisoformat(times[time])
time_d[time + "_utc"] = time_d[time].astimezone(utc)
else:
time_d[time] = None
time_d[time + "_utc"] = None
return time_d
def to_dict(self):
r"""Converts all of the attributes to a dictionary.
:return: A dictionary containing all of the attributes of the object.
:rtype: dict
"""
return self._d
def sent_before(self, other):
"""Method to compare sent times. All times are compared in UTC.
:param other: Another individual alert object.
:type other: alerts.IndividualAlert
:return: True if the alert was sent before ``other``.
:rtype: bool
"""
return self.sent_utc > other.sent_utc
def sent_after(self, other):
"""Method to compare sent times. All times are compared in UTC.
:param other: Another individual alert object.
:type other: alerts.IndividualAlert
:return: True if the alert was after before ``other``.
:rtype: bool
"""
return self.sent_utc < other.sent_utc
def effective_before(self, other):
"""Method to compare effective times. All times are compared in UTC.
:param other: Another individual alert object.
:type other: alerts.IndividualAlert
:return: True if the alert was effective before ``other``.
:rtype: bool
"""
return self.effective_utc > other.effective_utc
def effective_after(self, other):
"""Method to compare effective times. All times are compared in UTC.
:param other: Another individual alert object.
:type other: alerts.IndividualAlert
:return: True if the alert was effective after ``other``.
:rtype: bool
"""
return self.effective_utc < other.effective_utc
def onset_before(self, other):
"""Method to compare onset times. All times are compared in UTC.
:param other: Another individual alert object.
:type other: alerts.IndividualAlert
:return: True if the alert was onset before ``other``.
:rtype: bool
"""
return self.onset_utc > other.onset_utc
def onset_after(self, other):
"""Method to compare onset times. All times are compared in UTC.
:param other: Another individual alert object.
:type other: alerts.IndividualAlert
:return: True if the alert was onset after ``other``.
:rtype: bool
"""
return self.onset_utc < other.onset_utc
def expires_before(self, other):
"""Method to compare expire times. All times are compared in UTC.
:param other: Another individual alert object.
:type other: alerts.IndividualAlert
:return: True if the alert expires before ``other``.
:rtype: bool
"""
return self.expires_utc > other.expires_utc
def expires_after(self, other):
"""Method to compare expire times. All times are compared in UTC.
:param other: Another individual alert object.
:type other: alerts.IndividualAlert
:return: True if the alert expires before ``other``.
:rtype: bool
"""
return self.expires_utc < other.expires_utc
def ends_before(self, other):
"""Method to compare end times. All times are compared in UTC.
:param other: Another individual alert object.
:type other: alerts.IndividualAlert
:return: True if the alert ends before ``other``.
:rtype: bool
"""
return self.ends_utc > other.ends_utc
def ends_after(self, other):
"""Method to compare end times. All times are compared in UTC.
:param other: Another individual alert object.
:type other: alerts.IndividualAlert
:return: True if the alert ends after ``other``.
:rtype: bool
"""
return self.ends_utc < other.ends_utc
class BaseAlert(BaseEndpoint):
def __init__(self):
super(BaseEndpoint, self).__init__()
def to_dict(self):
"""Returns the alerts in a dictionary format, where the keys are numbers
which map to an individual alert.
:return: Dictionary containing the values of the active alerts.
:rtype: dict
"""
# in case it's an error (i.e. correlationid is in it)
if isinstance(self.values, dict):
return self.values
# otherwise, create a new dictionary to reformat it and make it look
# better.
d = {}
for index, alert in enumerate(self.values):
d[index + 1] = alert.to_dict()
return d
def to_df(self):
"""Returns the values of the alerts in a pandas dataframe structure.
:return: Dataframe of the values of the alerts.
:rtype: pandas.DataFrame
"""
# if it's an error
if isinstance(self.values, dict):
return pd.DataFrame(data = self.values)
# Performance issue: appending to a dataframe. This isn't ideal, so
# solution to this is found here:
# https://stackoverflow.com/questions/27929472/improve-row-append-performance-on-pandas-dataframes
# ... it's a lot faster, wow.
d = OrderedDict()
# self.values index is arbitrary.
for index, individual_alert in enumerate(self.values):
d[index] = individual_alert._d
df = pd.DataFrame.from_dict(d).transpose()
df = df.reindex(sorted(df.columns), axis = 1) # alphabetize columns.
df = df.fillna(value = np.nan)
return df
class ActiveAlerts(BaseAlert):
def __init__(self):
super(BaseAlert, self).__init__()
class Alerts(BaseAlert):
def __init__(self):
super(BaseAlert, self).__init__()
class AlertById(BaseAlert):
def __init__(self):
super(BaseAlert, self).__init__()
class AlertByArea(BaseAlert):
def __init__(self):
super(BaseAlert, self).__init__()
class AlertByZone(BaseAlert):
def __init__(self):
super(BaseAlert, self).__init__()
class AlertByMarineRegion(BaseAlert):
def __init__(self):
super(BaseAlert, self).__init__()
class AlertByType(BaseEndpoint):
def __init__(self):
super(BaseEndpoint, self).__init__()
class AlertCount(BaseEndpoint):
def __init__(self):
super(BaseEndpoint, self).__init__()
def to_dict(self):
return self.values
# The dataframe method could get pretty interesting. There's a few different
# ways that it could be implemented, but for the sake of v1.0.0, this
# method is left out.
# Figure out what these methods do in the original package.
def filter_zones(self, zone):
warn("This method has not been implemented yet.")
return {}
def filter_land_areas(self, area):
warn("This method has not been implemented yet.")
return {}
def filter_marine_regions(self, region):
warn("This method has not been implemented yet.")
return {} |
<reponame>rimmartin/cctbx_project<gh_stars>0
from __future__ import division
from mmtbx.disorder import backbone
import iotbx.pdb.hierarchy
from cStringIO import StringIO
pdb_raw = """
CRYST1 21.937 6.000 23.477 90.00 107.08 90.00 P 1 21 1 2
ATOM 1 N GLY A 1 -9.009 4.612 6.102 1.00 16.77 N
ATOM 2 CA GLY A 1 -9.052 4.207 4.651 1.00 16.57 C
ATOM 3 C GLY A 1 -8.015 3.140 4.419 1.00 16.16 C
ATOM 4 O GLY A 1 -7.523 2.521 5.381 1.00 16.78 O
ATOM 5 N ASN A 2 -7.656 2.923 3.155 1.00 15.02 N
ATOM 6 CA ASN A 2 -6.522 2.038 2.831 1.00 14.10 C
ATOM 7 C ASN A 2 -5.241 2.537 3.427 1.00 13.13 C
ATOM 8 O ASN A 2 -4.978 3.742 3.426 1.00 11.91 O
ATOM 9 CB ASN A 2 -6.346 1.881 1.341 1.00 15.38 C
ATOM 10 CG ASN A 2 -7.584 1.342 0.692 1.00 14.08 C
ATOM 11 OD1 ASN A 2 -8.025 0.227 1.016 1.00 17.46 O
ATOM 12 ND2 ASN A 2 -8.204 2.155 -0.169 1.00 11.72 N
ATOM 13 N AASN A 3 -4.438 1.590 3.905 0.50 12.26 N
ATOM 14 CA AASN A 3 -3.193 1.904 4.589 0.50 11.74 C
ATOM 15 C AASN A 3 -1.955 1.332 3.895 0.50 11.10 C
ATOM 16 O AASN A 3 -1.872 0.119 3.648 0.50 10.42 O
ATOM 17 CB AASN A 3 -3.259 1.378 6.042 0.50 12.15 C
ATOM 18 CG AASN A 3 -2.006 1.739 6.861 0.50 12.82 C
ATOM 19 OD1AASN A 3 -1.702 2.925 7.072 0.50 15.05 O
ATOM 20 ND2AASN A 3 -1.271 0.715 7.306 0.50 13.48 N
ATOM 13 N BASN A 3 -4.438 1.590 3.905 0.50 12.26 N
ATOM 14 CA BASN A 3 -3.193 1.904 4.589 0.50 11.74 C
ATOM 15 C BASN A 3 -1.993 1.011 4.266 0.50 11.10 C
ATOM 16 O BASN A 3 -2.000 -0.192 4.568 0.50 10.42 O
ATOM 17 CB BASN A 3 -3.259 1.378 6.042 0.50 12.15 C
ATOM 18 CG BASN A 3 -2.006 1.739 6.861 0.50 12.82 C
ATOM 19 OD1BASN A 3 -1.702 2.925 7.072 0.50 15.05 O
ATOM 20 ND2BASN A 3 -1.271 0.715 7.306 0.50 13.48 N
ATOM 21 N AGLN A 4 -1.005 2.228 3.598 0.50 10.29 N
ATOM 22 CA AGLN A 4 0.384 1.888 3.199 0.50 10.53 C
ATOM 23 C AGLN A 4 1.435 2.606 4.088 0.50 10.24 C
ATOM 24 O AGLN A 4 1.547 3.843 4.115 0.50 8.86 O
ATOM 25 CB AGLN A 4 0.656 2.148 1.711 0.50 9.80 C
ATOM 26 CG AGLN A 4 1.944 1.458 1.213 0.50 10.25 C
ATOM 27 CD AGLN A 4 2.504 2.044 -0.089 0.50 12.43 C
ATOM 28 OE1AGLN A 4 2.744 3.268 -0.190 0.50 14.62 O
ATOM 29 NE2AGLN A 4 2.750 1.161 -1.091 0.50 9.05 N
ATOM 21 N BGLN A 4 -0.972 1.629 3.658 0.50 10.29 N
ATOM 22 CA BGLN A 4 0.392 1.063 3.508 0.50 10.53 C
ATOM 23 C BGLN A 4 1.482 2.027 4.050 0.50 10.24 C
ATOM 24 O BGLN A 4 1.689 3.145 3.548 0.50 8.86 O
ATOM 25 CB BGLN A 4 0.705 0.638 2.067 0.50 9.80 C
ATOM 26 CG BGLN A 4 1.943 -0.280 1.974 0.50 10.25 C
ATOM 27 CD BGLN A 4 2.565 -0.350 0.574 0.50 12.43 C
ATOM 28 OE1BGLN A 4 2.900 0.692 -0.032 0.50 14.62 O
ATOM 29 NE2BGLN A 4 2.757 -1.593 0.061 0.50 9.05 N
ATOM 30 N AGLN A 5 2.154 1.821 4.871 0.50 10.38 N
ATOM 31 CA AGLN A 5 3.270 2.361 5.640 0.50 11.39 C
ATOM 32 C AGLN A 5 4.594 1.768 5.172 0.50 11.52 C
ATOM 33 O AGLN A 5 4.768 0.546 5.054 0.50 12.05 O
ATOM 34 CB AGLN A 5 3.056 2.183 7.147 0.50 11.96 C
ATOM 35 CG AGLN A 5 1.829 2.950 7.647 0.50 10.81 C
ATOM 36 CD AGLN A 5 1.344 2.414 8.954 0.50 13.10 C
ATOM 37 OE1AGLN A 5 0.774 1.325 9.002 0.50 10.65 O
ATOM 38 NE2AGLN A 5 1.549 3.187 10.039 0.50 12.30 N
ATOM 30 N BGLN A 5 2.127 1.614 5.127 0.50 10.38 N
ATOM 31 CA BGLN A 5 3.270 2.361 5.640 0.50 11.39 C
ATOM 32 C BGLN A 5 4.594 1.768 5.172 0.50 11.52 C
ATOM 33 O BGLN A 5 4.768 0.546 5.054 0.50 12.05 O
ATOM 34 CB BGLN A 5 3.056 2.183 7.147 0.50 11.96 C
ATOM 35 CG BGLN A 5 1.829 2.950 7.647 0.50 10.81 C
ATOM 36 CD BGLN A 5 1.344 2.414 8.954 0.50 13.10 C
ATOM 37 OE1BGLN A 5 0.774 1.325 9.002 0.50 10.65 O
ATOM 38 NE2BGLN A 5 1.549 3.187 10.039 0.50 12.30 N
ATOM 39 N ASN A 6 5.514 2.664 4.856 1.00 11.99 N
ATOM 40 CA ASN A 6 6.831 2.310 4.318 1.00 12.30 C
ATOM 41 C ASN A 6 7.854 2.761 5.324 1.00 13.40 C
ATOM 42 O ASN A 6 8.219 3.943 5.374 1.00 13.92 O
ATOM 43 CB ASN A 6 7.065 3.016 2.993 1.00 12.13 C
ATOM 44 CG ASN A 6 5.961 2.735 2.003 1.00 12.77 C
ATOM 45 OD1 ASN A 6 5.798 1.604 1.551 1.00 14.27 O
ATOM 46 ND2 ASN A 6 5.195 3.747 1.679 1.00 10.07 N
ATOM 47 N TYR A 7 8.292 1.817 6.147 1.00 14.70 N
ATOM 48 CA TYR A 7 9.159 2.144 7.299 1.00 15.18 C
ATOM 49 C TYR A 7 10.603 2.331 6.885 1.00 15.91 C
ATOM 50 O TYR A 7 11.041 1.811 5.855 1.00 15.76 O
ATOM 51 CB TYR A 7 9.061 1.065 8.369 1.00 15.35 C
ATOM 52 CG TYR A 7 7.665 0.929 8.902 1.00 14.45 C
ATOM 53 CD1 TYR A 7 6.771 0.021 8.327 1.00 15.68 C
ATOM 54 CD2 TYR A 7 7.210 1.756 9.920 1.00 14.80 C
ATOM 55 CE1 TYR A 7 5.480 -0.094 8.796 1.00 13.46 C
ATOM 56 CE2 TYR A 7 5.904 1.649 10.416 1.00 14.33 C
ATOM 57 CZ TYR A 7 5.047 0.729 9.831 1.00 15.09 C
ATOM 58 OH TYR A 7 3.766 0.589 10.291 1.00 14.39 O
ATOM 59 OXT TYR A 7 11.358 2.999 7.612 1.00 17.49 O
TER 60 TYR A 7
HETATM 61 O HOH A 8 -6.471 5.227 7.124 1.00 22.62 O
HETATM 62 O HOH A 9 10.431 1.858 3.216 1.00 19.71 O
HETATM 63 O HOH A 10 -11.286 1.756 -1.468 1.00 17.08 O
HETATM 64 O HOH A 11 11.808 4.179 9.970 1.00 23.99 O
HETATM 65 O HOH A 12 13.605 1.327 9.198 1.00 26.17 O
HETATM 66 O HOH A 13 -2.749 3.429 10.024 1.00 39.15 O
HETATM 67 O HOH A 14 -1.500 0.682 10.967 1.00 43.49 O
END
"""
def exercise():
pdb_in = iotbx.pdb.hierarchy.input(pdb_string=pdb_raw)
open("tmp.pdb", "w").write(pdb_in.hierarchy.as_pdb_string())
backrubs = backbone.find_backrubs(
pdb_hierarchy=pdb_in.hierarchy)
assert (len(backrubs) == 1)
out = StringIO()
backrubs[0].show(out=out)
assert (out.getvalue() == """backrub A 4 (A,B): angle=-26.0\n"""), \
out.getvalue()
if (__name__ == "__main__"):
exercise()
print "OK"
|
<gh_stars>0
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
This experiment was created using PsychoPy2 Experiment Builder (v1.85.2),
on August 14, 2018, at 13:42
If you publish work using this script please cite the PsychoPy publications:
<NAME> (2007) PsychoPy - Psychophysics software in Python.
Journal of Neuroscience Methods, 162(1-2), 8-13.
Peirce, JW (2009) Generating stimuli for neuroscience using PsychoPy.
Frontiers in Neuroinformatics, 2:10. doi: 10.3389/neuro.11.010.2008
"""
from __future__ import absolute_import, division
from psychopy import locale_setup, sound, gui, visual, core, data, event, logging
from psychopy.constants import (NOT_STARTED, STARTED, PLAYING, PAUSED,
STOPPED, FINISHED, PRESSED, RELEASED, FOREVER)
import numpy as np # whole numpy lib is available, prepend 'np.'
from numpy import (sin, cos, tan, log, log10, pi, average,
sqrt, std, deg2rad, rad2deg, linspace, asarray)
from numpy.random import random, randint, normal, shuffle
import os # handy system and path functions
import sys # to get file system encoding
# Ensure that relative paths start from the same directory as this script
_thisDir = os.path.dirname(os.path.abspath(__file__)).decode(sys.getfilesystemencoding())
os.chdir(_thisDir)
# Store info about the experiment session
expName = 'Experiment1' # from the Builder filename that created this script
expInfo = {'participant':'', 'session':'001'}
dlg = gui.DlgFromDict(dictionary=expInfo, title=expName)
if dlg.OK == False:
core.quit() # user pressed cancel
expInfo['date'] = data.getDateStr() # add a simple timestamp
expInfo['expName'] = expName
# Data file name stem = absolute path + name; later add .psyexp, .csv, .log, etc
filename = _thisDir + os.sep + u'data/%s_%s_%s' % (expInfo['participant'], expName, expInfo['date'])
# An ExperimentHandler isn't essential but helps with data saving
thisExp = data.ExperimentHandler(name=expName, version='',
extraInfo=expInfo, runtimeInfo=None,
originPath=u'C:\\Users\\Jojo\\Downloads\\Psychopy\\WTP\\CardTaskFinal1.psyexp',
savePickle=True, saveWideText=True,
dataFileName=filename)
# save a log file for detail verbose info
logFile = logging.LogFile(filename+'.log', level=logging.EXP)
logging.console.setLevel(logging.WARNING) # this outputs to the screen, not a file
endExpNow = False # flag for 'escape' or other condition => quit the exp
# Start Code - component code to be run before the window creation
# Setup the Window
win = visual.Window(
size=(1280, 720), fullscr=True, screen=0,
allowGUI=False, allowStencil=False,
monitor='testMonitor', color=[0,0,0], colorSpace='rgb',
blendMode='avg', useFBO=True)
# store frame rate of monitor if we can measure it
expInfo['frameRate'] = win.getActualFrameRate()
if expInfo['frameRate'] != None:
frameDur = 1.0 / round(expInfo['frameRate'])
else:
frameDur = 1.0 / 60.0 # could not measure, so guess
# Initialize components for Routine "Instructions"
InstructionsClock = core.Clock()
Welcome = visual.TextStim(win=win, name='Welcome',
text='Welcome to the experiment!\n\nIn this first part of the task, you will be playing a simple game \nin which you will have the chance to earn some money.\n\nPress the spacebar to continue.',
font='Arial',
pos=(0, 0), height=0.1, wrapWidth=None, ori=0,
color='white', colorSpace='rgb', opacity=1,
depth=0.0);
# Initialize components for Routine "firstITI"
firstITIClock = core.Clock()
firstTrialITI = visual.TextStim(win=win, name='firstTrialITI',
text='+',
font='Arial',
pos=(0, 0), height=0.1, wrapWidth=None, ori=0,
color='white', colorSpace='rgb', opacity=1,
depth=0.0);
# Initialize components for Routine "code_2"
code_2Clock = core.Clock()
nRepsblock1=0
nRepsblock2=0
# Initialize components for Routine "Reward_Trial"
Reward_TrialClock = core.Clock()
polygon = visual.Rect(
win=win, name='polygon',
width=(0.5, 1.0)[0], height=(0.5, 1.0)[1],
ori=0, pos=(0, 0),
lineWidth=5, lineColor=[1,1,1], lineColorSpace='rgb',
fillColor=[0,0,0], fillColorSpace='rgb',
opacity=1, depth=0.0, interpolate=True)
text = visual.TextStim(win=win, name='text',
text='?',
font='Arial',
pos=(0, 0), height=0.1, wrapWidth=None, ori=0,
color='white', colorSpace='rgb', opacity=1,
depth=-1.0);
ISI = visual.TextStim(win=win, name='ISI',
text='+',
font='Arial',
pos=(0, 0), height=0.1, wrapWidth=None, ori=0,
color='white', colorSpace='rgb', opacity=1,
depth=-4.0);
# Initialize components for Routine "RewardOutcome"
RewardOutcomeClock = core.Clock()
polygon2_2 = visual.Rect(
win=win, name='polygon2_2',
width=(0.5, 1.0)[0], height=(0.5, 1.0)[1],
ori=0, pos=(0, 0),
lineWidth=5, lineColor=[1,1,1], lineColorSpace='rgb',
fillColor=[0,0,0], fillColorSpace='rgb',
opacity=1, depth=0.0, interpolate=True)
feedback1_2 = visual.TextStim(win=win, name='feedback1_2',
text='default text',
font='Arial',
pos=(0, 0), height=0.1, wrapWidth=None, ori=0,
color='white', colorSpace='rgb', opacity=1,
depth=-1.0);
arrow1_2 = visual.TextStim(win=win, name='arrow1_2',
text='default text',
font='Arial',
pos=(0, 0), height=0.1, wrapWidth=None, ori=0,
color=1.0, colorSpace='rgb', opacity=1,
depth=-2.0);
ITI_2 = visual.TextStim(win=win, name='ITI_2',
text='+',
font='Arial',
pos=(0, 0), height=0.1, wrapWidth=None, ori=0,
color='white', colorSpace='rgb', opacity=1,
depth=-3.0);
# Initialize components for Routine "Punishment_trial"
Punishment_trialClock = core.Clock()
rectangle = visual.Rect(
win=win, name='rectangle',
width=(0.5, 1)[0], height=(0.5, 1)[1],
ori=0, pos=(0, 0),
lineWidth=5, lineColor=[1,1,1], lineColorSpace='rgb',
fillColor=[0,0,0], fillColorSpace='rgb',
opacity=1, depth=0.0, interpolate=True)
text1 = visual.TextStim(win=win, name='text1',
text='?',
font='Arial',
pos=(0, 0), height=0.1, wrapWidth=None, ori=0,
color='white', colorSpace='rgb', opacity=1,
depth=-1.0);
ISI2 = visual.TextStim(win=win, name='ISI2',
text='+',
font='Arial',
pos=(0, 0), height=0.1, wrapWidth=None, ori=0,
color='white', colorSpace='rgb', opacity=1,
depth=-4.0);
# Initialize components for Routine "PunishmentOutcome"
PunishmentOutcomeClock = core.Clock()
rectangle2_2 = visual.Rect(
win=win, name='rectangle2_2',
width=(0.5, 1)[0], height=(0.5, 1)[1],
ori=0, pos=(0, 0),
lineWidth=5, lineColor=[1,1,1], lineColorSpace='rgb',
fillColor=[0,0,0], fillColorSpace='rgb',
opacity=1, depth=0.0, interpolate=True)
feedback2_2 = visual.TextStim(win=win, name='feedback2_2',
text='default text',
font='Arial',
pos=(0, 0), height=0.1, wrapWidth=None, ori=0,
color=[1.000,1.000,1.000], colorSpace='rgb', opacity=1,
depth=-1.0);
arrow2_2 = visual.TextStim(win=win, name='arrow2_2',
text='default text',
font='Arial',
units='norm', pos=(0, 0), height=0.1, wrapWidth=0.2, ori=0,
color=1.0, colorSpace='rgb', opacity=1,
depth=-2.0);
ITI2_2 = visual.TextStim(win=win, name='ITI2_2',
text='+',
font='Arial',
pos=(0, 0), height=0.1, wrapWidth=None, ori=0,
color='white', colorSpace='rgb', opacity=1,
depth=-3.0);
# Initialize components for Routine "EndText"
EndTextClock = core.Clock()
Finished = visual.TextStim(win=win, name='Finished',
text='Thanks for playing!\n\nYou earned $6.00!\n\nPlease let the experimenter know you have finished the first part of the task',
font='Arial',
pos=(0, 0), height=0.1, wrapWidth=None, ori=0,
color='white', colorSpace='rgb', opacity=1,
depth=0.0);
# Create some handy timers
globalClock = core.Clock() # to track the time since experiment started
routineTimer = core.CountdownTimer() # to track time remaining of each (non-slip) routine
# ------Prepare to start Routine "Instructions"-------
t = 0
InstructionsClock.reset() # clock
frameN = -1
continueRoutine = True
# update component parameters for each repeat
response1 = event.BuilderKeyResponse()
# keep track of which components have finished
InstructionsComponents = [Welcome, response1]
for thisComponent in InstructionsComponents:
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
# -------Start Routine "Instructions"-------
while continueRoutine:
# get current time
t = InstructionsClock.getTime()
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
# *Welcome* updates
if t >= 0.0 and Welcome.status == NOT_STARTED:
# keep track of start time/frame for later
Welcome.tStart = t
Welcome.frameNStart = frameN # exact frame index
Welcome.setAutoDraw(True)
# *response1* updates
if t >= 0.0 and response1.status == NOT_STARTED:
# keep track of start time/frame for later
response1.tStart = t
response1.frameNStart = frameN # exact frame index
response1.status = STARTED
# keyboard checking is just starting
win.callOnFlip(response1.clock.reset) # t=0 on next screen flip
event.clearEvents(eventType='keyboard')
if response1.status == STARTED:
theseKeys = event.getKeys(keyList=['space'])
# check for quit:
if "escape" in theseKeys:
endExpNow = True
if len(theseKeys) > 0: # at least one key was pressed
response1.keys = theseKeys[-1] # just the last key pressed
response1.rt = response1.clock.getTime()
# a response ends the routine
continueRoutine = False
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in InstructionsComponents:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
# check for quit (the Esc key)
if endExpNow or event.getKeys(keyList=["escape"]):
core.quit()
# refresh the screen
if continueRoutine: # don't flip if this routine is over or we'll get a blank screen
win.flip()
# -------Ending Routine "Instructions"-------
for thisComponent in InstructionsComponents:
if hasattr(thisComponent, "setAutoDraw"):
thisComponent.setAutoDraw(False)
# check responses
if response1.keys in ['', [], None]: # No response was made
response1.keys=None
thisExp.addData('response1.keys',response1.keys)
if response1.keys != None: # we had a response
thisExp.addData('response1.rt', response1.rt)
thisExp.nextEntry()
# the Routine "Instructions" was not non-slip safe, so reset the non-slip timer
routineTimer.reset()
# ------Prepare to start Routine "firstITI"-------
t = 0
firstITIClock.reset() # clock
frameN = -1
continueRoutine = True
routineTimer.add(6.000000)
# update component parameters for each repeat
# keep track of which components have finished
firstITIComponents = [firstTrialITI]
for thisComponent in firstITIComponents:
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
# -------Start Routine "firstITI"-------
while continueRoutine and routineTimer.getTime() > 0:
# get current time
t = firstITIClock.getTime()
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
# *firstTrialITI* updates
if t >= 0.0 and firstTrialITI.status == NOT_STARTED:
# keep track of start time/frame for later
firstTrialITI.tStart = t
firstTrialITI.frameNStart = frameN # exact frame index
firstTrialITI.setAutoDraw(True)
frameRemains = 0.0 + 6.0- win.monitorFramePeriod * 0.75 # most of one frame period left
if firstTrialITI.status == STARTED and t >= frameRemains:
firstTrialITI.setAutoDraw(False)
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in firstITIComponents:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
# check for quit (the Esc key)
if endExpNow or event.getKeys(keyList=["escape"]):
core.quit()
# refresh the screen
if continueRoutine: # don't flip if this routine is over or we'll get a blank screen
win.flip()
# -------Ending Routine "firstITI"-------
for thisComponent in firstITIComponents:
if hasattr(thisComponent, "setAutoDraw"):
thisComponent.setAutoDraw(False)
# set up handler to look after randomisation of conditions etc
alltrials = data.TrialHandler(nReps=1, method='fullRandom',
extraInfo=expInfo, originPath=-1,
trialList=data.importConditions('..\\spreadsheet5.xlsx'),
seed=None, name='alltrials')
thisExp.addLoop(alltrials) # add the loop to the experiment
thisAlltrial = alltrials.trialList[0] # so we can initialise stimuli with some values
# abbreviate parameter names if possible (e.g. rgb = thisAlltrial.rgb)
if thisAlltrial != None:
for paramName in thisAlltrial.keys():
exec(paramName + '= thisAlltrial.' + paramName)
for thisAlltrial in alltrials:
currentLoop = alltrials
# abbreviate parameter names if possible (e.g. rgb = thisAlltrial.rgb)
if thisAlltrial != None:
for paramName in thisAlltrial.keys():
exec(paramName + '= thisAlltrial.' + paramName)
# ------Prepare to start Routine "code_2"-------
t = 0
code_2Clock.reset() # clock
frameN = -1
continueRoutine = True
# update component parameters for each repeat
if selectBlock==1:
nRepsblock1=1
nRepsblock2=0
elif selectBlock==2:
nRepsblock1=0
nRepsblock2=1
# keep track of which components have finished
code_2Components = []
for thisComponent in code_2Components:
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
# -------Start Routine "code_2"-------
while continueRoutine:
# get current time
t = code_2Clock.getTime()
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in code_2Components:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
# check for quit (the Esc key)
if endExpNow or event.getKeys(keyList=["escape"]):
core.quit()
# refresh the screen
if continueRoutine: # don't flip if this routine is over or we'll get a blank screen
win.flip()
# -------Ending Routine "code_2"-------
for thisComponent in code_2Components:
if hasattr(thisComponent, "setAutoDraw"):
thisComponent.setAutoDraw(False)
# the Routine "code_2" was not non-slip safe, so reset the non-slip timer
routineTimer.reset()
# set up handler to look after randomisation of conditions etc
Reward = data.TrialHandler(nReps=nRepsblock1, method='sequential',
extraInfo=expInfo, originPath=-1,
trialList=[None],
seed=None, name='Reward')
thisExp.addLoop(Reward) # add the loop to the experiment
thisReward = Reward.trialList[0] # so we can initialise stimuli with some values
# abbreviate parameter names if possible (e.g. rgb = thisReward.rgb)
if thisReward != None:
for paramName in thisReward.keys():
exec(paramName + '= thisReward.' + paramName)
for thisReward in Reward:
currentLoop = Reward
# abbreviate parameter names if possible (e.g. rgb = thisReward.rgb)
if thisReward != None:
for paramName in thisReward.keys():
exec(paramName + '= thisReward.' + paramName)
# ------Prepare to start Routine "Reward_Trial"-------
t = 0
Reward_TrialClock.reset() # clock
frameN = -1
continueRoutine = True
routineTimer.add(4.000000)
# update component parameters for each repeat
response2 = event.BuilderKeyResponse()
# keep track of which components have finished
Reward_TrialComponents = [polygon, text, response2, ISI]
for thisComponent in Reward_TrialComponents:
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
# -------Start Routine "Reward_Trial"-------
while continueRoutine and routineTimer.getTime() > 0:
# get current time
t = Reward_TrialClock.getTime()
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
# *polygon* updates
if t >= 0.0 and polygon.status == NOT_STARTED:
# keep track of start time/frame for later
polygon.tStart = t
polygon.frameNStart = frameN # exact frame index
polygon.setAutoDraw(True)
frameRemains = 0.0 + 2.0- win.monitorFramePeriod * 0.75 # most of one frame period left
if polygon.status == STARTED and t >= frameRemains:
polygon.setAutoDraw(False)
# *text* updates
if t >= 0.0 and text.status == NOT_STARTED:
# keep track of start time/frame for later
text.tStart = t
text.frameNStart = frameN # exact frame index
text.setAutoDraw(True)
frameRemains = 0.0 + 2.0- win.monitorFramePeriod * 0.75 # most of one frame period left
if text.status == STARTED and t >= frameRemains:
text.setAutoDraw(False)
# *response2* updates
if t >= 0.0 and response2.status == NOT_STARTED:
# keep track of start time/frame for later
response2.tStart = t
response2.frameNStart = frameN # exact frame index
response2.status = STARTED
# keyboard checking is just starting
win.callOnFlip(response2.clock.reset) # t=0 on next screen flip
event.clearEvents(eventType='keyboard')
frameRemains = 0.0 + 2.0- win.monitorFramePeriod * 0.75 # most of one frame period left
if response2.status == STARTED and t >= frameRemains:
response2.status = STOPPED
if response2.status == STARTED:
theseKeys = event.getKeys(keyList=['1', '2'])
# check for quit:
if "escape" in theseKeys:
endExpNow = True
if len(theseKeys) > 0: # at least one key was pressed
response2.keys = theseKeys[-1] # just the last key pressed
response2.rt = response2.clock.getTime()
# *ISI* updates
if t >= 2.0 and ISI.status == NOT_STARTED:
# keep track of start time/frame for later
ISI.tStart = t
ISI.frameNStart = frameN # exact frame index
ISI.setAutoDraw(True)
frameRemains = 2.0 + 2.0- win.monitorFramePeriod * 0.75 # most of one frame period left
if ISI.status == STARTED and t >= frameRemains:
ISI.setAutoDraw(False)
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in Reward_TrialComponents:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
# check for quit (the Esc key)
if endExpNow or event.getKeys(keyList=["escape"]):
core.quit()
# refresh the screen
if continueRoutine: # don't flip if this routine is over or we'll get a blank screen
win.flip()
# -------Ending Routine "Reward_Trial"-------
for thisComponent in Reward_TrialComponents:
if hasattr(thisComponent, "setAutoDraw"):
thisComponent.setAutoDraw(False)
# check responses
if response2.keys in ['', [], None]: # No response was made
response2.keys=None
Reward.addData('response2.keys',response2.keys)
if response2.keys != None: # we had a response
Reward.addData('response2.rt', response2.rt)
outcome = []
if response2.keys == '1':
outcome = np.random.randint(1,4);
elif response2.keys == '2':
outcome = np.random.randint(6,9);
else:
outcome = "No Response";
money = []
color = []
if response2.keys == '1':
money = '+$0.50';
color = [-1.0,1.0,-1.0];
elif response2.keys == '2':
money = '+$0.50';
color = [-1.0,1.0,-1.0];
else:
money = '#';
color = [1.0,1.0,1.0];
# ------Prepare to start Routine "RewardOutcome"-------
t = 0
RewardOutcomeClock.reset() # clock
frameN = -1
continueRoutine = True
routineTimer.add(4.000000)
# update component parameters for each repeat
arrow1_2.setColor(color, colorSpace='rgb')
arrow1_2.setText(money)
# keep track of which components have finished
RewardOutcomeComponents = [polygon2_2, feedback1_2, arrow1_2, ITI_2]
for thisComponent in RewardOutcomeComponents:
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
# -------Start Routine "RewardOutcome"-------
while continueRoutine and routineTimer.getTime() > 0:
# get current time
t = RewardOutcomeClock.getTime()
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
# *polygon2_2* updates
if t >= 0 and polygon2_2.status == NOT_STARTED:
# keep track of start time/frame for later
polygon2_2.tStart = t
polygon2_2.frameNStart = frameN # exact frame index
polygon2_2.setAutoDraw(True)
frameRemains = 0 + 2- win.monitorFramePeriod * 0.75 # most of one frame period left
if polygon2_2.status == STARTED and t >= frameRemains:
polygon2_2.setAutoDraw(False)
# *feedback1_2* updates
if t >= 0 and feedback1_2.status == NOT_STARTED:
# keep track of start time/frame for later
feedback1_2.tStart = t
feedback1_2.frameNStart = frameN # exact frame index
feedback1_2.setAutoDraw(True)
frameRemains = 0 + 1- win.monitorFramePeriod * 0.75 # most of one frame period left
if feedback1_2.status == STARTED and t >= frameRemains:
feedback1_2.setAutoDraw(False)
if feedback1_2.status == STARTED: # only update if drawing
feedback1_2.setText(outcome, log=False)
# *arrow1_2* updates
if t >= 1 and arrow1_2.status == NOT_STARTED:
# keep track of start time/frame for later
arrow1_2.tStart = t
arrow1_2.frameNStart = frameN # exact frame index
arrow1_2.setAutoDraw(True)
frameRemains = 1 + 1.0- win.monitorFramePeriod * 0.75 # most of one frame period left
if arrow1_2.status == STARTED and t >= frameRemains:
arrow1_2.setAutoDraw(False)
# *ITI_2* updates
if t >= 2.0 and ITI_2.status == NOT_STARTED:
# keep track of start time/frame for later
ITI_2.tStart = t
ITI_2.frameNStart = frameN # exact frame index
ITI_2.setAutoDraw(True)
frameRemains = 2.0 + 2.0- win.monitorFramePeriod * 0.75 # most of one frame period left
if ITI_2.status == STARTED and t >= frameRemains:
ITI_2.setAutoDraw(False)
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in RewardOutcomeComponents:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
# check for quit (the Esc key)
if endExpNow or event.getKeys(keyList=["escape"]):
core.quit()
# refresh the screen
if continueRoutine: # don't flip if this routine is over or we'll get a blank screen
win.flip()
# -------Ending Routine "RewardOutcome"-------
for thisComponent in RewardOutcomeComponents:
if hasattr(thisComponent, "setAutoDraw"):
thisComponent.setAutoDraw(False)
thisExp.nextEntry()
# completed nRepsblock1 repeats of 'Reward'
# set up handler to look after randomisation of conditions etc
Loss = data.TrialHandler(nReps=nRepsblock2, method='random',
extraInfo=expInfo, originPath=-1,
trialList=[None],
seed=None, name='Loss')
thisExp.addLoop(Loss) # add the loop to the experiment
thisLos = Loss.trialList[0] # so we can initialise stimuli with some values
# abbreviate parameter names if possible (e.g. rgb = thisLos.rgb)
if thisLos != None:
for paramName in thisLos.keys():
exec(paramName + '= thisLos.' + paramName)
for thisLos in Loss:
currentLoop = Loss
# abbreviate parameter names if possible (e.g. rgb = thisLos.rgb)
if thisLos != None:
for paramName in thisLos.keys():
exec(paramName + '= thisLos.' + paramName)
# ------Prepare to start Routine "Punishment_trial"-------
t = 0
Punishment_trialClock.reset() # clock
frameN = -1
continueRoutine = True
routineTimer.add(4.000000)
# update component parameters for each repeat
response3 = event.BuilderKeyResponse()
# keep track of which components have finished
Punishment_trialComponents = [rectangle, text1, response3, ISI2]
for thisComponent in Punishment_trialComponents:
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
# -------Start Routine "Punishment_trial"-------
while continueRoutine and routineTimer.getTime() > 0:
# get current time
t = Punishment_trialClock.getTime()
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
# *rectangle* updates
if t >= 0.0 and rectangle.status == NOT_STARTED:
# keep track of start time/frame for later
rectangle.tStart = t
rectangle.frameNStart = frameN # exact frame index
rectangle.setAutoDraw(True)
frameRemains = 0.0 + 2- win.monitorFramePeriod * 0.75 # most of one frame period left
if rectangle.status == STARTED and t >= frameRemains:
rectangle.setAutoDraw(False)
# *text1* updates
if t >= 0.0 and text1.status == NOT_STARTED:
# keep track of start time/frame for later
text1.tStart = t
text1.frameNStart = frameN # exact frame index
text1.setAutoDraw(True)
frameRemains = 0.0 + 2.0- win.monitorFramePeriod * 0.75 # most of one frame period left
if text1.status == STARTED and t >= frameRemains:
text1.setAutoDraw(False)
# *response3* updates
if t >= 0.0 and response3.status == NOT_STARTED:
# keep track of start time/frame for later
response3.tStart = t
response3.frameNStart = frameN # exact frame index
response3.status = STARTED
# keyboard checking is just starting
win.callOnFlip(response3.clock.reset) # t=0 on next screen flip
event.clearEvents(eventType='keyboard')
frameRemains = 0.0 + 2- win.monitorFramePeriod * 0.75 # most of one frame period left
if response3.status == STARTED and t >= frameRemains:
response3.status = STOPPED
if response3.status == STARTED:
theseKeys = event.getKeys(keyList=['1', '2'])
# check for quit:
if "escape" in theseKeys:
endExpNow = True
if len(theseKeys) > 0: # at least one key was pressed
response3.keys = theseKeys[-1] # just the last key pressed
response3.rt = response3.clock.getTime()
# *ISI2* updates
if t >= 2.0 and ISI2.status == NOT_STARTED:
# keep track of start time/frame for later
ISI2.tStart = t
ISI2.frameNStart = frameN # exact frame index
ISI2.setAutoDraw(True)
frameRemains = 2.0 + 2- win.monitorFramePeriod * 0.75 # most of one frame period left
if ISI2.status == STARTED and t >= frameRemains:
ISI2.setAutoDraw(False)
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in Punishment_trialComponents:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
# check for quit (the Esc key)
if endExpNow or event.getKeys(keyList=["escape"]):
core.quit()
# refresh the screen
if continueRoutine: # don't flip if this routine is over or we'll get a blank screen
win.flip()
# -------Ending Routine "Punishment_trial"-------
for thisComponent in Punishment_trialComponents:
if hasattr(thisComponent, "setAutoDraw"):
thisComponent.setAutoDraw(False)
# check responses
if response3.keys in ['', [], None]: # No response was made
response3.keys=None
Loss.addData('response3.keys',response3.keys)
if response3.keys != None: # we had a response
Loss.addData('response3.rt', response3.rt)
outcome = []
if response3.keys == '1':
outcome = np.random.randint(6,9);
elif response3.keys == '2':
outcome = np.random.randint(1,4);
else:
outcome = "No Response";
money = []
color = []
if response3.keys == '1':
money = '-$0.25';
color = [1.0,-1.0,-1.0];
elif response3.keys == '2':
money = '-$0.25';
color = [1.0,-1.0,-1.0];
else:
money = '#';
color = [1.0,1.0,1.0];
# ------Prepare to start Routine "PunishmentOutcome"-------
t = 0
PunishmentOutcomeClock.reset() # clock
frameN = -1
continueRoutine = True
routineTimer.add(4.000000)
# update component parameters for each repeat
feedback2_2.setText(outcome)
arrow2_2.setColor(color, colorSpace='rgb')
arrow2_2.setText(money)
# keep track of which components have finished
PunishmentOutcomeComponents = [rectangle2_2, feedback2_2, arrow2_2, ITI2_2]
for thisComponent in PunishmentOutcomeComponents:
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
# -------Start Routine "PunishmentOutcome"-------
while continueRoutine and routineTimer.getTime() > 0:
# get current time
t = PunishmentOutcomeClock.getTime()
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
# *rectangle2_2* updates
if t >= 0 and rectangle2_2.status == NOT_STARTED:
# keep track of start time/frame for later
rectangle2_2.tStart = t
rectangle2_2.frameNStart = frameN # exact frame index
rectangle2_2.setAutoDraw(True)
frameRemains = 0 + 2- win.monitorFramePeriod * 0.75 # most of one frame period left
if rectangle2_2.status == STARTED and t >= frameRemains:
rectangle2_2.setAutoDraw(False)
# *feedback2_2* updates
if t >= 0 and feedback2_2.status == NOT_STARTED:
# keep track of start time/frame for later
feedback2_2.tStart = t
feedback2_2.frameNStart = frameN # exact frame index
feedback2_2.setAutoDraw(True)
frameRemains = 0 + 1.0- win.monitorFramePeriod * 0.75 # most of one frame period left
if feedback2_2.status == STARTED and t >= frameRemains:
feedback2_2.setAutoDraw(False)
# *arrow2_2* updates
if t >= 1 and arrow2_2.status == NOT_STARTED:
# keep track of start time/frame for later
arrow2_2.tStart = t
arrow2_2.frameNStart = frameN # exact frame index
arrow2_2.setAutoDraw(True)
frameRemains = 1 + 1.0- win.monitorFramePeriod * 0.75 # most of one frame period left
if arrow2_2.status == STARTED and t >= frameRemains:
arrow2_2.setAutoDraw(False)
# *ITI2_2* updates
if t >= 2 and ITI2_2.status == NOT_STARTED:
# keep track of start time/frame for later
ITI2_2.tStart = t
ITI2_2.frameNStart = frameN # exact frame index
ITI2_2.setAutoDraw(True)
frameRemains = 2 + 2.0- win.monitorFramePeriod * 0.75 # most of one frame period left
if ITI2_2.status == STARTED and t >= frameRemains:
ITI2_2.setAutoDraw(False)
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in PunishmentOutcomeComponents:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
# check for quit (the Esc key)
if endExpNow or event.getKeys(keyList=["escape"]):
core.quit()
# refresh the screen
if continueRoutine: # don't flip if this routine is over or we'll get a blank screen
win.flip()
# -------Ending Routine "PunishmentOutcome"-------
for thisComponent in PunishmentOutcomeComponents:
if hasattr(thisComponent, "setAutoDraw"):
thisComponent.setAutoDraw(False)
thisExp.nextEntry()
# completed nRepsblock2 repeats of 'Loss'
thisExp.nextEntry()
# completed 1 repeats of 'alltrials'
# ------Prepare to start Routine "EndText"-------
t = 0
EndTextClock.reset() # clock
frameN = -1
continueRoutine = True
# update component parameters for each repeat
key_resp_2 = event.BuilderKeyResponse()
# keep track of which components have finished
EndTextComponents = [Finished, key_resp_2]
for thisComponent in EndTextComponents:
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
# -------Start Routine "EndText"-------
while continueRoutine:
# get current time
t = EndTextClock.getTime()
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
# *Finished* updates
if t >= 0.0 and Finished.status == NOT_STARTED:
# keep track of start time/frame for later
Finished.tStart = t
Finished.frameNStart = frameN # exact frame index
Finished.setAutoDraw(True)
# *key_resp_2* updates
if t >= 0.0 and key_resp_2.status == NOT_STARTED:
# keep track of start time/frame for later
key_resp_2.tStart = t
key_resp_2.frameNStart = frameN # exact frame index
key_resp_2.status = STARTED
# keyboard checking is just starting
win.callOnFlip(key_resp_2.clock.reset) # t=0 on next screen flip
event.clearEvents(eventType='keyboard')
if key_resp_2.status == STARTED:
theseKeys = event.getKeys(keyList=['space'])
# check for quit:
if "escape" in theseKeys:
endExpNow = True
if len(theseKeys) > 0: # at least one key was pressed
key_resp_2.keys = theseKeys[-1] # just the last key pressed
key_resp_2.rt = key_resp_2.clock.getTime()
# a response ends the routine
continueRoutine = False
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in EndTextComponents:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
# check for quit (the Esc key)
if endExpNow or event.getKeys(keyList=["escape"]):
core.quit()
# refresh the screen
if continueRoutine: # don't flip if this routine is over or we'll get a blank screen
win.flip()
# -------Ending Routine "EndText"-------
for thisComponent in EndTextComponents:
if hasattr(thisComponent, "setAutoDraw"):
thisComponent.setAutoDraw(False)
# check responses
if key_resp_2.keys in ['', [], None]: # No response was made
key_resp_2.keys=None
thisExp.addData('key_resp_2.keys',key_resp_2.keys)
if key_resp_2.keys != None: # we had a response
thisExp.addData('key_resp_2.rt', key_resp_2.rt)
thisExp.nextEntry()
# the Routine "EndText" was not non-slip safe, so reset the non-slip timer
routineTimer.reset()
# these shouldn't be strictly necessary (should auto-save)
thisExp.saveAsWideText(filename+'.csv')
thisExp.saveAsPickle(filename)
logging.flush()
# make sure everything is closed down
thisExp.abort() # or data files will save again on exit
win.close()
core.quit()
|
# coding: utf-8
"""
Kintone REST API
Kintone REST API # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from openapi_client.configuration import Configuration
class InlineResponse200(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'rights': 'list[Right]',
'revision': 'int'
}
attribute_map = {
'rights': 'rights',
'revision': 'revision'
}
def __init__(self, rights=None, revision=None, local_vars_configuration=None): # noqa: E501
"""InlineResponse200 - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._rights = None
self._revision = None
self.discriminator = None
if rights is not None:
self.rights = rights
if revision is not None:
self.revision = revision
@property
def rights(self):
"""Gets the rights of this InlineResponse200. # noqa: E501
:return: The rights of this InlineResponse200. # noqa: E501
:rtype: list[Right]
"""
return self._rights
@rights.setter
def rights(self, rights):
"""Sets the rights of this InlineResponse200.
:param rights: The rights of this InlineResponse200. # noqa: E501
:type: list[Right]
"""
self._rights = rights
@property
def revision(self):
"""Gets the revision of this InlineResponse200. # noqa: E501
:return: The revision of this InlineResponse200. # noqa: E501
:rtype: int
"""
return self._revision
@revision.setter
def revision(self, revision):
"""Sets the revision of this InlineResponse200.
:param revision: The revision of this InlineResponse200. # noqa: E501
:type: int
"""
self._revision = revision
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, InlineResponse200):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, InlineResponse200):
return True
return self.to_dict() != other.to_dict()
|
<filename>PicACG.py
#!/usr/bin/env python3
import hmac
import time
import json
import uuid
import urllib.parse
import requests
from requests.packages.urllib3.exceptions import InsecureRequestWarning
# 关闭安全请求警告
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
class Pica(object):
def __init__(self, api_key, api_secret, token='', channel=1, quality='original', proxies=None, debug=False):
"""
:param api_key: str apiKey
:param api_secret: str apiSecret
:param token: str 用户Token 可留空
:param channel: int 分流通道 可选:1、2、3
:param quality: str 图片质量 可选:original、low、medium、high
:param proxies: str 代理 默认无代理
:param debug: bool Debug 默认关闭
"""
self.apiKey = api_key
self.apiSecret = api_secret
self.token = token
self.proxies = proxies
self.debug = debug
self.url = 'https://picaapi.picacomic.com/'
self.header = {
'Host': 'picaapi.picacomic.com',
'authorization': token,
'api-key': api_key,
'accept': 'application/vnd.picacomic.com.v1+json',
'app-channel': str(channel),
'time': '',
'nonce': '',
'signature': '',
'app-version': '2.2.1.3.3.4',
'app-uuid': 'defaultUuid',
'image-quality': quality,
'app-platform': 'android',
'app-build-version': '45',
'accept-encoding': 'gzip',
'user-agent': 'okhttp/3.8.1'
}
def get_signature(self, path, method, timestamp, nonce):
"""
:param path: str 完整路径,如:https://picaapi.picacomic.com/users/profile
:param method str 请求的方法 POST或GET
:param timestamp str 时间戳(精确到秒)
:param nonce str 32位随机数字字母,使用uuid去掉-实现
"""
raw = (path + str(timestamp) + nonce + method + self.apiKey).lower()
signature = hmac.new(self.apiSecret.encode(), raw.encode(), 'sha256')
return signature.hexdigest()
def post(self, url, data=None):
ts = str(int(time.time()))
nonce = str(uuid.uuid4()).replace('-', '')
self.header['time'] = ts
self.header['nonce'] = nonce
self.header['authorization'] = self.token
self.header['signature'] = self.get_signature(url, 'POST', ts, nonce)
header_tmp = self.header.copy()
header_tmp.pop('authorization')
header_tmp['content-type'] = 'application/json; charset=UTF-8'
if self.debug:
print(url)
print(header_tmp)
return requests.post(url=self.url + url, data=data, headers=header_tmp, verify=False, proxies=self.proxies)
def get(self, url):
ts = str(int(time.time()))
nonce = str(uuid.uuid4()).replace('-', '')
self.header['time'] = ts
self.header['nonce'] = nonce
self.header['authorization'] = self.token
self.header['signature'] = self.get_signature(url, 'GET', ts, nonce)
if self.debug:
print(url)
print(self.header)
# s = requests.Session()
# s.mount(self.url, HTTP20Adapter())
# return s.get(url=self.url + url, headers=self.header, verify=False, proxies=self.proxies)
return requests.get(url=self.url + url, headers=self.header, verify=False, proxies=self.proxies)
def sign_in(self, email, password):
try:
token = self.post('auth/sign-in', json.dumps({'email': email, 'password': password})).json()
except ValueError:
return ''
else:
if token['code'] == 200:
self.token = token['data']['token']
return token['message']
# 个人信息
def user_info(self):
# 用户信息
return self.get('users/profile').json()
def favourite(self, page='1', sort='dd'):
# 收藏夹
return self.get('users/favourite?s={0}&page={1}'.format(sort, page)).json()
def my_comments(self, page='1'):
# 我的评论
return self.get('users/my-comments?page={0}'.format(page)).json()
# 广告和推荐
def collections(self):
# 首页推荐
return self.get('collections').json()
def announcements(self, page='1'):
# 公告
return self.get('announcements?page={0}'.format(page)).json()
def init_app(self):
# 应用初始化信息,包含部分分类、服务器地址和APP版本信息
return self.get('init?platform=android').json()
def banners(self):
# 首页banners广告
return self.get('banners').json()
# 搜索和分类
def keywords(self):
# 搜索建议关键词
return self.get('keywords').json()
def categories(self):
# 分类
return self.get('categories').json()
def advanced_search(self, keyword, page='1', sort='dd'):
# 高级搜索
return self.post('comics/advanced-search?page={0}'.format(page),
json.dumps({'keyword': keyword, 'sort': sort})).json()
# 游戏
def games_list(self, page='1'):
# 游戏列表
return self.get('games?page={0}'.format(page)).json()
def games(self, game_id):
# 游戏信息
return self.get('games/{0}'.format(game_id)).json()
def games_comments(self, game_id, page='1'):
# 游戏评论
return self.get('games/{0}/comments?page={1}'.format(game_id, page)).json()
# 漫画
def comics_list(self, page='1', c=None, t=None, a=None, f=None, s=None, ct=None, ca=None):
# 漫画分类列表
# 参数:page:页数 c:分类 t:标签 a:作者 --f:未知-- s:排序 ct:汉化组 --ca:未知--
# sort可取:dd - 新到旧,da - 旧到新,ld - 最多爱心,vd - 最多绅士指名
url = 'comics?page=' + page
if c:
url += '&c=' + urllib.parse.quote(c, safe='()')
if t:
url += '&t=' + urllib.parse.quote(t, safe='()')
if a:
url += '&a=' + urllib.parse.quote(a, safe='()')
if f:
url += '&f=' + urllib.parse.quote(f, safe='()')
if s:
url += '&s=' + urllib.parse.quote(s, safe='()')
if ct:
url += '&ct=' + urllib.parse.quote(ct, safe='()')
if ca:
url += '&ca=' + urllib.parse.quote(ca, safe='()')
return self.get(url).json()
def comics_info(self, comic_id):
# 漫画信息
return self.get('comics/{0}'.format(comic_id)).json()
def comics_comments(self, comic_id, page='1'):
# 漫画评论
return self.get('comics/{0}/comments?page={1}'.format(comic_id, page)).json()
def comments_children(self, comic_id, page='1'):
# 评论的回复(适用于漫画和游戏)
return self.get('comments/{0}/childrens?page={1}'.format(comic_id, page)).json()
def comics_recommendation(self, comic_id):
# 相关推荐(看了这本子的人也在看)
return self.get('comics/{0}/recommendation'.format(comic_id)).json()
def comics_eps(self, comic_id, page='1'):
# 漫画章节
return self.get('comics/{0}/eps?page={1}'.format(comic_id, page)).json()
def comic(self, comic_id, order, page='1'):
# 漫画内容(包含图片地址)
return self.get('comics/{0}/order/{1}/pages?page={2}'.format(comic_id, order, page)).json()
def comics_random(self):
# 随机漫画
return self.get('comics/random').json()
# 排行榜
def leaderboard(self, tt='H24', ct='VC'):
# 漫画排行
# tt可取:H24 - 过去24小时,D7 - 过去7天,D30 -过去30天
return self.get('comics/leaderboard?tt={0}&ct={1}'.format(tt, ct)).json()
def knight_leader_board(self):
# 骑士榜(用户上传数量排行)
return self.get('comics/knight-leaderboard').json()
|
<reponame>vahidrnaderi/django-shop
from django.utils import timezone
from rest_framework import serializers
from shop.conf import app_settings
from shop.shopmodels.cart import CartModel
# from shop.shopmodels.defaults.cart import Cart
from shop.shopmodels.order import OrderModel
# from shop.shopmodels.defaults.order import Order
from shop.shopmodifiers.pool import cart_modifiers_pool
from shop.rest.money import MoneyField
class OrderListSerializer(serializers.ModelSerializer):
number = serializers.CharField(
source='get_number',
read_only=True,
)
url = serializers.URLField(
source='get_absolute_url',
read_only=True,
)
status = serializers.CharField(
source='status_name',
read_only=True,
)
subtotal = MoneyField()
total = MoneyField()
class Meta:
model = OrderModel
# model = Order
fields = ['number', 'url', 'created_at', 'updated_at', 'subtotal', 'total', 'status',
'shipping_address_text', 'billing_address_text'] # TODO: these fields are not part of the base model
read_only_fields = ['shipping_address_text', 'billing_address_text']
class OrderDetailSerializer(OrderListSerializer):
items = app_settings.ORDER_ITEM_SERIALIZER(
many=True,
read_only=True,
)
extra = serializers.DictField(read_only=True)
amount_paid = MoneyField(read_only=True)
outstanding_amount = MoneyField(read_only=True)
cancelable = serializers.BooleanField(read_only=True)
is_partially_paid = serializers.SerializerMethodField(
method_name='get_partially_paid',
help_text="Returns true, if order has been partially paid",
)
annotation = serializers.CharField(
write_only=True,
required=False,
)
reorder = serializers.BooleanField(
write_only=True,
default=False,
)
cancel = serializers.BooleanField(
write_only=True,
default=False,
)
active_payment_method = serializers.SerializerMethodField()
active_shipping_method = serializers.SerializerMethodField()
class Meta:
model = OrderModel
# model = Order
exclude = ['id', 'customer', 'stored_request', '_subtotal', '_total']
read_only_fields = ['shipping_address_text', 'billing_address_text'] # TODO: not part of OrderBase
def get_partially_paid(self, order):
return order.amount_paid > 0
def get_active_payment_method(self, order):
modifier = cart_modifiers_pool.get_active_payment_modifier(order.extra.get('payment_modifier'))
value, label = modifier.get_choice() if modifier else (None, "")
return {'value': value, 'label': label}
def get_active_shipping_method(self, order):
modifier = cart_modifiers_pool.get_active_shipping_modifier(order.extra.get('shipping_modifier'))
value, label = modifier.get_choice() if modifier else (None, "")
return {'value': value, 'label': label}
def update(self, order, validated_data):
order.extra.setdefault('addendum', [])
if validated_data.get('annotation'):
timestamp = timezone.now().isoformat()
order.extra['addendum'].append((timestamp, validated_data['annotation']))
order.save()
if validated_data['reorder'] is True:
cart = CartModel.objects.get_from_request(self.context['request'])
# cart = Cart.objects.get_from_request(self.context['request'])
order.readd_to_cart(cart)
if validated_data['cancel'] is True and order.cancelable():
order.cancel_order()
order.save(with_notification=True)
return order
|
<filename>countess/tests/utilities.py
"""
Enrich2 tests utils module
==========================
Module consists of assorted utility functions.
"""
import os
import json
import pandas as pd
from ..base.config_constants import SCORER, SCORER_OPTIONS, SCORER_PATH
from ..base.config_constants import FORCE_RECALCULATE, COMPONENT_OUTLIERS
from ..base.config_constants import TSV_REQUESTED, OUTPUT_DIR_OVERRIDE
from ..base.utils import multi_index_tsv_to_dataframe
TOP_LEVEL = os.path.dirname(__file__)
DEFAULT_STORE_PARAMS = {
FORCE_RECALCULATE: False,
COMPONENT_OUTLIERS: False,
TSV_REQUESTED: False,
OUTPUT_DIR_OVERRIDE: False,
}
__all__ = [
"TOP_LEVEL",
"DEFAULT_STORE_PARAMS",
"create_file_path",
"load_config_data",
"load_df_from_pkl",
"load_df_from_txt",
"dispatch_loader",
"update_cfg_file",
"save_result_to_pkl",
"save_result_to_txt",
"print_test_comparison",
"SCORING_ATTRS",
"SCORING_PATHS",
]
def create_file_path(fname, direc="data/result/"):
"""
Utility function to create an absolute path to data in the tests directory.
Parameters
----------
fname : `str`
The name of the file.
direc : `str`
The directory of the file in tests directory.
Returns
-------
`str`
Absolute file path.
"""
path = os.path.join(TOP_LEVEL, direc, fname)
return path
def load_config_data(fname, direc="data/config/"):
"""
Utility function to load a configuration file.
Parameters
----------
fname : `str`
Name of file in the directory `direc`.
direc : `str`, optional
Directory where the file is relative to :py:mod:`~enrich2.tests`.
Returns
-------
`dict`
Dictionary containing the loaded key-value pairs.
"""
path = create_file_path(fname, direc)
try:
with open(path, "rt") as fp:
return json.load(fp)
except (IOError, ValueError):
raise IOError("Failed to open '{}".format(path))
def load_df_from_txt(fname, direc="data/result/", sep="\t"):
"""
Utility function to load a table stored as txt with an arbitrary separator.
Parameters
----------
fname : `str`
Name of file in the directory ``direc``.
direc : `str`
Directory where the file is relative to :py:mod:`~enrich2.tests`
sep : `str`
Delimiter to use between columns.
Returns
-------
:py:class:`~pandas.DataFrame`
A Pandas DataFrame object parsed from the file.
"""
path = create_file_path(fname, direc)
try:
return multi_index_tsv_to_dataframe(path, sep, header_rows=None)
except IOError:
raise IOError("Failed to open '{}".format(path))
def load_df_from_pkl(fname, direc="data/result/"):
"""
Utility function to load a table stored in py:module:`pickle` format.
Parameters
----------
fname : `str`
Name of file in the directory ``direc``.
direc : `str`
Directory where the file is relative to :py:mod:`~enrich2.tests`.
Returns
-------
:py:class:`~pandas.DataFrame`
A Pandas DataFrame object parsed from the file.
"""
path = create_file_path(fname, direc)
try:
return pd.read_pickle(path)
except IOError:
raise IOError("Failed to open '{}".format(path))
def save_result_to_txt(test_obj, direc, sep="\t"):
"""
Utility function to save a :py:class:`~pd.HDFStore` as a series of
delimited tsv. One file is created for each :py:class:`~pd.DataFrame` in
the store.
Parameters
----------
test_obj : :py:class:`~pd.HDFStore`
HDFStore object to save to delimited text files.
direc : `str`
Directory to save the file.
sep : `str`
Delimiter to use between columns.
Returns
-------
None
This function does not return anything.
"""
for key in test_obj.store:
name = "{}/{}.tsv".format(direc, key[1:].replace("/", "_"))
path = create_file_path(name, direc="")
print("saving {} to {}".format(key, path))
test_obj.store[key].to_csv(path, sep=sep, index=True, na_rep="NaN")
return
def save_result_to_pkl(test_obj, direc):
"""
Utility function to save a :py:class:`~pd.HDFStore` as a series of
pickle files. One file is created for each :py:class:`~pd.DataFrame` in
the store. Each file has the extension 'pkl'.
Parameters
----------
test_obj : :py:class:`~pandas.DataFrame`
HDFStore object to save to pickle files.
direc : `str`
Directory to save the file.
Returns
-------
None
This function does not return anything.
"""
for key in test_obj.store:
name = "{}/{}.pkl".format(direc, key[1:].replace("/", "_"))
path = create_file_path(name, direc="")
print("saving {} to {}".format(key, path))
test_obj.store[key].to_pickle(path)
return
def dispatch_loader(fname, direc, sep="\t"):
"""
Utility function to load a filename based on the extension it has.
Parameters
----------
fname : `str` {'pkl', 'tsv', 'txt'}
Filename with extension
direc : `str`
Directory to save the file.
sep : `str`
Delimiter to use between columns.
Returns
-------
:py:class:`~pandas.DataFrame`
DataFrame parsed from the file.
"""
ext = fname.split(".")[-1]
# print('Loading from: {}/{}'.format(direc, fname))
if ext in ("tsv" or "txt"):
return load_df_from_txt(fname, direc, sep)
elif ext == "pkl":
return load_df_from_pkl(fname, direc)
else:
raise IOError("Unexpected file extension {}.".format(ext))
def print_test_comparison(test_name, expected, result):
"""
Utility function to nicely format the a test comparison as a string.
Parameters
----------
test_name : `str`
Name of the test.
expected : :py:class:`~pandas.DataFrame`
Expected test result that can be represented as text
result : :py:class:`~pandas.DataFrame`
Expected test result that can be represented as text
Returns
-------
`str`
String object represeting a test.
"""
line = "\n"
line += "-" * 60 + "\n"
line += "{}\n".format(test_name)
line += "-" * 60 + "\n"
line += "-" * 26 + "EXPECTED" + "-" * 26 + "\n"
line += "{}\n".format(expected)
line += "-" * 28 + "END" + "-" * 29 + "\n"
line += "-" * 27 + "RESULT" + "-" * 27 + "\n"
line += "{}\n".format(result)
line += "-" * 28 + "END" + "-" * 29 + "\n"
line += "\n"
return line
def update_cfg_file(cfg, scoring, logr):
"""
Utility function that takes a configuration dictionary and updates the
scorer fields.
Parameters
----------
cfg : `dict`
Dictionary that can initialize a
:py:class:`~enrich2.base.store.StoreManager` object.
scoring : {'WLS', 'OLS', 'counts', 'ratios', 'simple'}
Choice of scoring option
logr : {'complete', 'full', 'wt'}
Choice of scoring normalization method
Returns
-------
`dict`
Modified dictionary (in-place)
"""
cfg[SCORER][SCORER_PATH] = SCORING_PATHS.get(scoring)
cfg[SCORER][SCORER_OPTIONS] = SCORING_ATTRS.get(scoring).get(logr)
return cfg
SCORING_PATHS = {
"counts": create_file_path("counts_scorer.py", "data/plugins"),
"ratios": create_file_path("ratios_scorer.py", "data/plugins"),
"simple": create_file_path("simple_scorer.py", "data/plugins"),
"WLS": create_file_path("regression_scorer.py", "data/plugins"),
"OLS": create_file_path("regression_scorer.py", "data/plugins"),
}
SCORING_ATTRS = {
"WLS": {
"full": {"logr_method": "full", "weighted": True},
"complete": {"logr_method": "complete", "weighted": True},
"wt": {"logr_method": "wt", "weighted": True},
},
"OLS": {
"full": {"logr_method": "full", "weighted": False},
"complete": {"logr_method": "complete", "weighted": False},
"wt": {"logr_method": "wt", "weighted": False},
},
"ratios": {
"full": {"logr_method": "full"},
"complete": {"logr_method": "complete"},
"wt": {"logr_method": "wt"},
},
"counts": {"full": {}, "complete": {}, "wt": {}},
"simple": {"full": {}, "complete": {}, "wt": {}},
}
|
# Standard Library
import io
import json
import logging
import sys
# external
import cybox.utils.caches
from sdv import codes, errors, scripts
import stix2
# internal
from stix2slider.convert_stix import convert_bundle
from stix2slider.options import (
get_option_value, get_validator_options, setup_logger
)
# Module-level logger
log = logging.getLogger(__name__)
def slide_file(fn, encoding="utf-8"):
cybox.utils.caches.cache_clear()
setup_logger(fn)
validator_options = get_validator_options()
with io.open(fn, "r", encoding=encoding) as json_data:
json_content = json.load(json_data)
obj = stix2.parse(json_content, allow_custom=True, version=get_option_value("version_of_stix2x"))
stix_package = convert_bundle(obj)
if stix_package:
xml = stix_package.to_xml(encoding=None)
validator_options.in_files = io.StringIO(xml)
try:
scripts.set_output_level(validator_options)
validation_results = scripts.validate_file(
validator_options.in_files,
validator_options
)
results = {stix_package.id_: validation_results}
# Print stix-validator results
scripts.print_results(results, validator_options)
except (errors.ValidationError, IOError) as ex:
scripts.error(
"Validation error occurred: '%s'" % str(ex),
codes.EXIT_VALIDATION_ERROR
)
except Exception:
log.exception("Fatal error occurred", extra={'ecode': 0})
sys.exit(codes.EXIT_FAILURE)
return xml
def slide_string(string):
cybox.utils.caches.cache_clear()
obj = stix2.parse(string)
setup_logger(obj["id"])
validator_options = get_validator_options()
stix_package = convert_bundle(obj)
if stix_package:
xml = stix_package.to_xml(encoding=None)
validator_options.in_files = io.StringIO(xml)
try:
scripts.set_output_level(validator_options)
validation_results = scripts.validate_file(
validator_options.in_files,
validator_options
)
results = {stix_package.id_: validation_results}
# Print stix-validator results
scripts.print_results(results, validator_options)
except (errors.ValidationError, IOError) as ex:
scripts.error(
"Validation error occurred: '%s'" % str(ex),
codes.EXIT_VALIDATION_ERROR
)
except Exception:
log.exception("Fatal error occurred", extra={'ecode': 0})
sys.exit(codes.EXIT_FAILURE)
return xml
def slide_bundle(bundle):
cybox.utils.caches.cache_clear()
setup_logger(bundle["id"])
stix_package = convert_bundle(bundle)
validator_options = get_validator_options()
if stix_package:
xml = stix_package.to_xml(encoding=None)
validator_options.in_files = io.StringIO(xml)
try:
scripts.set_output_level(validator_options)
validation_results = scripts.validate_file(
validator_options.in_files,
validator_options
)
results = {stix_package.id_: validation_results}
# Print stix-validator results
scripts.print_results(results, validator_options)
except (errors.ValidationError, IOError) as ex:
scripts.error(
"Validation error occurred: '%s'" % str(ex),
codes.EXIT_VALIDATION_ERROR
)
except Exception:
log.exception("Fatal error occurred", extra={'ecode': 0})
sys.exit(codes.EXIT_FAILURE)
return xml
|
#!/usr/bin/env python
# coding: utf-8
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
import argparse
import codecs
import logging
import os
import os.path
import re
import shutil
import subprocess
import sys
import tempfile
from jinja2 import Template
from PIL import Image
from . import plugins
logging.basicConfig(level=logging.WARNING)
__author__ = '<NAME>'
__email__ = '<EMAIL>'
INFO_PLIST_TEMPLATE = '''
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<plist version="1.0">
<dict>
{% if environment -%}
<key>LSEnvironment</key>
<dict>
{% for key, value in environment.iteritems() -%}
<key>{{ key }}</key>
<string>{{ value }}</string>
{% endfor -%}
</dict>
{% endif -%}
<key>CFBundleDevelopmentRegion</key>
<string>English</string>
<key>CFBundleExecutable</key>
<string>{{ executable }}</string>
{% if icon_file -%}
<key>CFBundleIconFile</key>
<string>{{ icon_file }}</string>
{% endif -%}
<key>CFBundleIdentifier</key>
<string>{{ group }}.{{ name }}</string>
<key>CFBundleInfoDictionaryVersion</key>
<string>6.0</string>
<key>CFBundleName</key>
<string>{{ name }}</string>
<key>CFBundleDisplayName</key>
<string>{{ name }}</string>
<key>CFBundleShortVersionString</key>
<string>{{ short_version }}</string>
<key>CFBundleVersion</key>
<string>{{ version }}</string>
<key>CFBundlePackageType</key>
<string>APPL</string>
<key>CFBundleSignature</key>
<string>????</string>
{% if hidden -%}
<key>LSUIElement</key>
<string>1</string>
{% endif -%}
<key>NSSupportsAutomaticGraphicsSwitching</key>
<true/>
</dict>
</plist>
'''.strip()
PKG_INFO_CONTENT = 'APPL????'
class TemporaryDirectory(object):
def __init__(self):
self._path = tempfile.mkdtemp()
def close(self):
shutil.rmtree(self._path)
self._path = None
def __enter__(self):
return self._path
def __exit__(self, typ, value, traceback):
self.close()
@property
def path(self):
return self._path
class Arguments(object):
def __init__(self, **kwargs):
super(Arguments, self).__setattr__('_members', {})
for key, value in kwargs.items():
self._members[key] = value
def __getattr__(self, attr):
return self._members[attr]
def __setattr__(self, key, value):
raise NotImplementedError
def __getitem__(self, item):
return getattr(self, item)
def keys(self):
return self._members.keys()
class MissingIconError(Exception):
pass
class AppAlreadyExistingError(Exception):
pass
class DmgAlreadyExistingError(Exception):
pass
class InvalidAppPath(Exception):
pass
def parse_args():
def parse_commandline():
parser = argparse.ArgumentParser(
description='''
Creates a runnable application for Mac OS X with references to
system libraries. The result is a NON-self-contained app bundle.'''
)
parser.add_argument(
'-d',
'--executable-directory',
dest='executable_root_path',
action='store',
type=os.path.abspath,
help='Defines the executable root directory that will be included in the app.'
)
parser.add_argument(
'-e',
'--environment',
dest='environment_vars',
action='store',
nargs='+',
help='Specifies which environment variables -- set on the current interpreter startup -- '
' shall be included in the app bundle.'
)
parser.add_argument(
'-i',
'--icon',
dest='icon_path',
action='store',
type=os.path.abspath,
help='Image file that is used for app icon creation. It must be quadratic with a '
'resolution of 1024x1024 pixels or more.'
)
parser.add_argument(
'-g',
'--group',
dest='group',
action='store',
help='Developer group name that is saved to the internal app plist.'
)
parser.add_argument(
'-n', '--hidden', dest='hidden', action='store_true', help='Hides the app icon in the dock when given.'
)
parser.add_argument(
'-o',
'--output',
dest='app_path',
action='store',
type=os.path.abspath,
help='Sets the path the app will be saved to.'
)
parser.add_argument(
'-v',
'--version',
dest='version_string',
action='store',
help='Specifies the version string of the program.'
)
parser.add_argument(
'executable_path',
action='store',
type=os.path.abspath,
help='Sets the executable that is started when the app is opened.'
)
plugins.add_plugin_command_line_arguments(parser)
if len(sys.argv) < 2:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
return args
def map_environment_arguments_to_dict(enviroment_argument_list):
if enviroment_argument_list is not None:
keys_and_values = [item.split('=') for item in enviroment_argument_list]
for item in keys_and_values:
if len(item) < 2:
item.append(os.environ[item[0]])
result = dict(keys_and_values)
else:
result = None
return result
args = parse_commandline()
checked_args = {}
checked_args['executable_root_path'] = args.executable_root_path
checked_args['icon_path'] = args.icon_path
checked_args['group'] = args.group if args.group else 'undefined'
checked_args['hidden'] = args.hidden
checked_args['environment_vars'] = map_environment_arguments_to_dict(args.environment_vars)
if args.app_path is not None:
checked_args['app_path'] = args.app_path
else:
basename_without_ext = os.path.splitext(os.path.basename(os.path.abspath(args.executable_path)))[0]
checked_args['app_path'] = '{basename_without_ext}.app'.format(basename_without_ext=basename_without_ext)
if args.version_string is not None:
checked_args['version_string'] = args.version_string
else:
checked_args['version_string'] = '0.0.0'
checked_args['executable_path'] = args.executable_path
plugin_args = plugins.parse_command_line_arguments(os.path.splitext(checked_args['executable_path'])[1], args)
args = checked_args.copy()
args.update(plugin_args)
return Arguments(**args)
def create_info_plist_content(
app_name,
version,
group,
executable_path,
executable_root_path=None,
icon_path=None,
hidden=False,
environment_vars=None
):
def get_short_version(version):
match_obj = re.search(r'\d+(\.\d+){0,2}', version)
if match_obj is not None:
short_version = match_obj.group()
while not re.match(r'\d+\.\d+\.\d+', short_version):
short_version += '.0'
else:
short_version = '0.0.0'
return short_version
if executable_root_path is None:
executable_root_path = os.path.dirname(executable_path)
if os.path.abspath(executable_path).startswith(os.path.abspath(executable_root_path)):
executable = os.path.relpath(executable_path, executable_root_path)
else:
executable = executable_path
variables = {
'executable': executable,
'icon_file': os.path.basename(icon_path) if icon_path is not None else None,
'name': app_name,
'group': group,
'hidden': hidden,
'short_version': get_short_version(version),
'version': version
}
if environment_vars is not None:
environment_variables = dict(((key, os.environ[key]) for key in environment_vars))
variables['environment'] = environment_variables
template = Template(INFO_PLIST_TEMPLATE)
info_plist = template.render(**variables)
return info_plist
def create_icon_set(icon_path, iconset_out_path):
with TemporaryDirectory() as tmp_dir:
tmp_icns_dir = os.path.join(tmp_dir, 'icon.iconset')
os.mkdir(tmp_icns_dir)
original_icon = Image.open(icon_path)
for name, size in (
('icon_{size}x{size}{suffix}.png'.format(size=size, suffix=suffix), factor * size)
for size in (16, 32, 128, 256, 512) for factor, suffix in ((1, ''), (2, '@2x'))
):
resized_icon = original_icon.resize((size, size), Image.ANTIALIAS)
resized_icon.save(os.path.join(tmp_icns_dir, name))
subprocess.check_call(('iconutil', '--convert', 'icns', tmp_icns_dir, '--output', iconset_out_path))
def create_dmg(app_name, app_path, dmg_path):
app_filename = os.path.basename(app_path)
app_dirpath = os.path.dirname(app_path)
create_dmg_url = 'https://github.com/andreyvit/create-dmg.git'
with TemporaryDirectory() as tmp_dir:
subprocess.check_call(('git', 'clone', '--depth=1', create_dmg_url), cwd=tmp_dir)
subprocess.check_call(
(
'./create-dmg', '--volname', app_name, '--window-size', '800', '400', '--background',
os.path.join(os.path.dirname(__file__), 'dmg_background.png'), '--icon', app_filename, '200', '200',
'--hide-extension', app_filename, '--app-drop-link', '600', '200', dmg_path, app_dirpath
),
cwd=os.path.join(tmp_dir, 'create-dmg')
)
def create_app(
app_path,
version_string,
group,
executable_path,
executable_root_path=None,
icon_path=None,
hidden=False,
environment_vars=None,
**kwargs
):
def abs_path(relative_bundle_path, base=None):
return os.path.abspath(os.path.join(base or app_path, relative_bundle_path))
def error_checks():
if os.path.exists(abs_path('.')):
raise AppAlreadyExistingError('The app path {app_path} already exists.'.format(app_path=app_path))
if dmg_requested and os.path.exists(dmg_path):
raise DmgAlreadyExistingError('The dmg path {dmg_path} already exists.'.format(dmg_path=dmg_path))
if executable_root_path is not None and abs_path('.').startswith(os.path.abspath(executable_root_path) + '/'):
raise InvalidAppPath('The specified app path is a subpath of the source root directory.')
def write_info_plist():
info_plist_content = create_info_plist_content(
app_name, version_string, group, app_executable_path, executable_root_path, bundle_icon_path, hidden,
environment_vars
)
with codecs.open(abs_path('Info.plist', contents_path), 'w', 'utf-8') as f:
f.write(info_plist_content)
def write_pkg_info():
with codecs.open(abs_path('PkgInfo', contents_path), 'w', 'utf-8') as f:
f.write(PKG_INFO_CONTENT)
def copy_source():
if executable_root_path is None:
shutil.copy(executable_path, macos_path)
else:
os.rmdir(macos_path)
shutil.copytree(executable_root_path, macos_path)
def set_file_permissions():
os.chmod(abs_path(app_executable_path, macos_path), 0o555)
directory_structure = ('Contents', 'Contents/MacOS', 'Contents/Resources')
app_name = os.path.splitext(os.path.basename(app_path))[0]
dmg_requested = (os.path.splitext(app_path)[1] == '.dmg')
tmp_dir_wrapper = None
dmg_path = None
if dmg_requested:
tmp_dir_wrapper = TemporaryDirectory()
dmg_path = app_path
app_path = os.path.join(tmp_dir_wrapper.path, app_name + '.app')
contents_path, macos_path, resources_path = (abs_path(dir) for dir in directory_structure)
bundle_icon_path = abs_path('Icon.icns', resources_path) if icon_path is not None else None
if executable_root_path is not None:
app_executable_path = os.path.relpath(executable_path, executable_root_path)
else:
app_executable_path = os.path.basename(executable_path)
error_checks()
for current_path in (abs_path(dir) for dir in directory_structure):
os.makedirs(current_path)
copy_source()
if icon_path is not None:
try:
create_icon_set(icon_path, bundle_icon_path)
except IOError as e:
raise MissingIconError(e)
setup_result = plugins.setup_startup(
os.path.splitext(executable_path)[1], app_path, executable_path, app_executable_path, executable_root_path,
macos_path, resources_path
)
if setup_result is not NotImplemented:
app_executable_path = setup_result
write_info_plist()
write_pkg_info()
set_file_permissions()
if dmg_requested:
create_dmg(app_name, app_path, dmg_path)
tmp_dir_wrapper.close()
def main():
args = parse_args()
plugins.pre_create_app(os.path.splitext(args.executable_path)[1], **args)
create_app(**args)
plugins.post_create_app(os.path.splitext(args.executable_path)[1], **args)
if __name__ == '__main__':
main()
|
<reponame>JamesG3/Checkers_AI
class Piece(object):
'''
two type of players: black and white
'''
def __init__(self, player):
self.player = player
class Grid(object):
'''
each grid has one color: W - white / B - black
a grid may point to a piece object
'''
def __init__(self, color, piece = None):
self.color = color
self.piece = piece
class Board(object):
def __init__(self):
self.checkerBoard = [[0 for _ in xrange(6)] for _ in xrange(6)]
self._create()
# OR (conf.BOARDSIZE/2) * (conf.BOARDSIZE/2 - 1)
self.white_piece_Num = 6
self.black_piece_Num = 6
def _create(self):
'''
initialize a checker board
assign grid color and pieces
'''
for i in xrange(6):
for j in xrange(6):
if not i%2 and not j%2: # both even
self.checkerBoard[i][j] = Grid("W")
elif i%2 and not j%2: # odd, even
self.checkerBoard[i][j] = Grid("B")
elif not i%2 and j%2: # even, odd
self.checkerBoard[i][j] = Grid("B")
else: # odd, odd
self.checkerBoard[i][j] = Grid("W")
if self.checkerBoard[i][j].color == "B":
if j<2:
self.checkerBoard[i][j].piece = Piece("white")
elif 3<j<6:
self.checkerBoard[i][j].piece = Piece("black")
return
def _direction(self, i, j, moveto):
'''
calculate coordinates after a move on selected direction
return type: tuple
'''
return {'UpLeft': lambda: (i-1, j-1),
'UpRight': lambda: (i+1, j-1),
'DownLeft': lambda: (i-1, j+1),
'DownRight': lambda: (i+1, j+1),
}.get(moveto)()
def _valid_position(self, i, j):
'''
check whether given position is valid in checkerBoard
return type: bool
'''
return (-1 < i < 6) and (-1 < j < 6)
def move(self, start, end):
'''
move piece from start to end (coordinate)
'''
s_i, s_j = start[0], start[1]
e_i, e_j = end[0], end[1]
self.checkerBoard[e_i][e_j].piece = self.checkerBoard[s_i][s_j].piece
self.checkerBoard[s_i][s_j].piece = None
def remove(self, piece):
'''
remove piece from board
'''
i, j = piece[0], piece[1]
if self.checkerBoard[i][j].piece.player == "white":
self.white_piece_Num -= 1
else:
self.black_piece_Num -= 1
self.checkerBoard[i][j].piece = None
def check_jump(self, player):
'''
return all capture moves for given player
return type: list[list, list, ...]
'''
jump_list = []
for i in xrange(6):
for j in xrange(6):
if self.checkerBoard[i][j].piece\
and self.checkerBoard[i][j].piece.player == player:
if player == "white":
adversary = "black"
L_move1 = self._direction(i, j, 'DownLeft')
R_move1 = self._direction(i, j, 'DownRight')
L_move2 = self._direction(L_move1[0], L_move1[1], 'DownLeft')
R_move2 = self._direction(R_move1[0], R_move1[1], 'DownRight')
L1_i, L1_j, R1_i, R1_j = L_move1[0], L_move1[1], R_move1[0], R_move1[1]
L2_i, L2_j, R2_i, R2_j = L_move2[0], L_move2[1], R_move2[0], R_move2[1]
else:
adversary = "white"
L_move1 = self._direction(i, j, 'UpLeft')
R_move1 = self._direction(i, j, 'UpRight')
L_move2 = self._direction(L_move1[0], L_move1[1], 'UpLeft')
R_move2 = self._direction(R_move1[0], R_move1[1], 'UpRight')
L1_i, L1_j, R1_i, R1_j = L_move1[0], L_move1[1], R_move1[0], R_move1[1]
L2_i, L2_j, R2_i, R2_j = L_move2[0], L_move2[1], R_move2[0], R_move2[1]
if self._valid_position(L2_i, L2_j) or self._valid_position(R2_i, R2_j):
if self._valid_position(L2_i, L2_j)\
and self.checkerBoard[L1_i][L1_j].piece\
and self.checkerBoard[L1_i][L1_j].piece.player == adversary\
and self.checkerBoard[L2_i][L2_j].piece is None:
jump_list.append([i, j])
if self._valid_position(R2_i, R2_j)\
and self.checkerBoard[R1_i][R1_j].piece\
and self.checkerBoard[R1_i][R1_j].piece.player == adversary\
and self.checkerBoard[R2_i][R2_j].piece is None:
jump_list.append([i, j])
return jump_list
def valid_moves(self, piece, jump = 0):
'''
return all valid moves for selected piece
return type: list[list, list, ...]
'''
i, j = piece
cur_grid = self.checkerBoard[i][j]
if cur_grid.piece == None: # if no piece in that grid
return []
valid_moves = []
if jump: # if current piece is from another position after one capture move,
# then check whether there are other capture moves
# robot move
if cur_grid.piece.player == "white":
adversary = "black"
L_move1 = self._direction(i, j, 'DownLeft')
R_move1 = self._direction(i, j, 'DownRight')
L_move2 = self._direction(L_move1[0], L_move1[1], 'DownLeft')
R_move2 = self._direction(R_move1[0], R_move1[1], 'DownRight')
L1_i, L1_j, R1_i, R1_j = L_move1[0], L_move1[1], R_move1[0], R_move1[1]
L2_i, L2_j, R2_i, R2_j = L_move2[0], L_move2[1], R_move2[0], R_move2[1]
# human move
else:
adversary = "white"
L_move1 = self._direction(i, j, 'UpLeft')
R_move1 = self._direction(i, j, 'UpRight')
L_move2 = self._direction(L_move1[0], L_move1[1], 'UpLeft')
R_move2 = self._direction(R_move1[0], R_move1[1], 'UpRight')
L1_i, L1_j, R1_i, R1_j = L_move1[0], L_move1[1], R_move1[0], R_move1[1]
L2_i, L2_j, R2_i, R2_j = L_move2[0], L_move2[1], R_move2[0], R_move2[1]
# check left
if (self._valid_position(L2_i, L2_j))\
and self.checkerBoard[L1_i][L1_j].piece\
and self.checkerBoard[L1_i][L1_j].piece.player == adversary\
and self.checkerBoard[L2_i][L2_j].piece is None: # empty
valid_moves.append([L2_i, L2_j])
# check right
if self._valid_position(R2_i, R2_j)\
and self.checkerBoard[R1_i][R1_j].piece\
and self.checkerBoard[R1_i][R1_j].piece.player == adversary\
and self.checkerBoard[R2_i][R2_j].piece is None: # empty
valid_moves.append([R2_i, R2_j])
# if not after a capture move
else:
# computer move
jump_exist = 0 # capture move flag
player = cur_grid.piece.player
if cur_grid.piece.player == "white":
adversary = "black"
L_move1 = self._direction(i, j, 'DownLeft')
R_move1 = self._direction(i, j, 'DownRight')
L_move2 = self._direction(L_move1[0], L_move1[1], 'DownLeft')
R_move2 = self._direction(R_move1[0], R_move1[1], 'DownRight')
L1_i, L1_j, R1_i, R1_j = L_move1[0], L_move1[1], R_move1[0], R_move1[1]
L2_i, L2_j, R2_i, R2_j = L_move2[0], L_move2[1], R_move2[0], R_move2[1]
else:
adversary = "white"
L_move1 = self._direction(i, j, 'UpLeft')
R_move1 = self._direction(i, j, 'UpRight')
L_move2 = self._direction(L_move1[0], L_move1[1], 'UpLeft')
R_move2 = self._direction(R_move1[0], R_move1[1], 'UpRight')
L1_i, L1_j, R1_i, R1_j = L_move1[0], L_move1[1], R_move1[0], R_move1[1]
L2_i, L2_j, R2_i, R2_j = L_move2[0], L_move2[1], R_move2[0], R_move2[1]
# if capture moves exist, return all capture moves
if self._valid_position(L2_i, L2_j) or self._valid_position(R2_i, R2_j):
if self._valid_position(L2_i, L2_j)\
and self.checkerBoard[L1_i][L1_j].piece\
and self.checkerBoard[L1_i][L1_j].piece.player == adversary\
and self.checkerBoard[L2_i][L2_j].piece is None:
jump_exist = 1
valid_moves.append([L2_i, L2_j])
if self._valid_position(R2_i, R2_j)\
and self.checkerBoard[R1_i][R1_j].piece\
and self.checkerBoard[R1_i][R1_j].piece.player == adversary\
and self.checkerBoard[R2_i][R2_j].piece is None:
jump_exist = 1
valid_moves.append([R2_i, R2_j])
if jump_exist == 0: # if there is no capture move
if self._valid_position(L1_i, L1_j)\
and self.checkerBoard[L1_i][L1_j].piece == None:
valid_moves.append([L1_i, L1_j])
if self._valid_position(R1_i, R1_j)\
and self.checkerBoard[R1_i][R1_j].piece == None:
valid_moves.append([R1_i, R1_j])
return valid_moves
|
<gh_stars>0
import datetime
from google.cloud import datacatalog_v1
class Client:
def __init__(self, project_id: str, region: str) -> None:
self.client = datacatalog_v1.DataCatalogClient()
self.project_id = project_id
self.region = region
def get_entry_group(self, entry_group_id: str) -> datacatalog_v1.EntryGroup:
return self.client.get_entry_group(
name=self.client.entry_group_path(
self.project_id, self.region, entry_group_id
)
)
def get_tag_template(self, tag_template_id: str) -> datacatalog_v1.TagTemplate:
return self.client.get_tag_template(
name=self.client.tag_template_path(
self.project_id, self.region, tag_template_id
)
)
def get_entry(
self, entry_group: datacatalog_v1.EntryGroup, entry_id: str
) -> datacatalog_v1.Entry:
entries = self.client.list_entries(parent=entry_group.name)
name = f"{entry_group.name}/entries/{entry_id}"
entry = None
for e in entries:
if e.name == name:
entry = e
break
return entry
def get_tag(self, entry: datacatalog_v1.Entry) -> datacatalog_v1.Tag:
tags = self.client.list_tags(parent=entry.name)
tag = None
for t in tags:
tag = t
break
return tag
def create_entry(
self,
entry_group: datacatalog_v1.EntryGroup,
entry_id: str,
entry: datacatalog_v1.Entry,
) -> datacatalog_v1.Entry:
return self.client.create_entry(
parent=entry_group.name, entry_id=entry_id, entry=entry
)
def create_tag(
self, entry: datacatalog_v1.Entry, tag: datacatalog_v1.Tag
) -> datacatalog_v1.Tag:
return self.client.create_tag(parent=entry.name, tag=tag)
def set_status_running(self, tag: datacatalog_v1.Tag) -> datacatalog_v1.Tag:
tag.fields["latest_job_status"] = datacatalog_v1.types.TagField()
tag.fields[
"latest_job_status"
].enum_value = datacatalog_v1.types.TagField.EnumValue()
tag.fields["latest_job_status"].enum_value.display_name = "running"
tag.fields["latest_job_start_datetime"] = datacatalog_v1.types.TagField()
tag.fields[
"latest_job_start_datetime"
].timestamp_value = datetime.datetime.now()
return self.client.update_tag(tag=tag)
def set_status_completed(self, tag: datacatalog_v1.Tag) -> datacatalog_v1.Tag:
tag.fields["latest_job_status"] = datacatalog_v1.types.TagField()
tag.fields[
"latest_job_status"
].enum_value = datacatalog_v1.types.TagField.EnumValue()
tag.fields["latest_job_status"].enum_value.display_name = "completed"
tag.fields["latest_job_end_datetime"] = datacatalog_v1.types.TagField()
tag.fields["latest_job_end_datetime"].timestamp_value = datetime.datetime.now()
tag.fields["latest_job_run_time"] = datacatalog_v1.types.TagField()
tag.fields["latest_job_run_time"].string_value = str(
tag.fields["latest_job_end_datetime"].timestamp_value
- tag.fields["latest_job_start_datetime"].timestamp_value
)
return self.client.update_tag(tag=tag)
|
<gh_stars>1-10
#coding=utf-8
'''
Created on 2015-10-10
@author: Devuser
'''
class CITemplatePath(object):
left_nav_template_path = "ci/ci_left_nav.html"
class CIDashBoardPath(CITemplatePath):
dashboard_index_path = "dashboard/ci_dashboard_index.html"
task_queue_webpart="dashboard/ci_dashboard_task_queue.html"
task_queue_list_controll="dashboard/ci_dashboard_task_queue_list_controll.html"
task_build_status_controll="dashboard/ci_dashboard_build_status_controll.html"
task_build_status_page="dashboard/ci_dashboard_build_status_page.html"
class CITaskPath(CITemplatePath):
task_index_path = "task/ci_task_index.html"
sub_nav_template_path = "task/ci_task_leftsub_nav.html"
task_create_dialog = "task/ci_task_create_dialog.html"
task_list_webpart = "task/ci_task_list_page.html"
task_list_controll = "task/ci_task_list_controll.html"
task_config_webpart = "task/ci_task_config_page.html"
task_config_basic = "task/ci_task_config_basic.html"
task_create_form = "task/ci_task_create_form.html"
task_config_pop_menu = "task/ci_task_plugin_menu_control.html"
task_history_list="task_history/ci_task_history_list_controll.html"
task_history_page="task_history/ci_task_history_list_page.html"
task_download_package="task_history/ci_task_package_download__control.html"
history_build_log="task_history/ci_task_build_log.html"
task_property_nav= "task/ci_task_property_nav.html"
testing_property_nav= "testing_task/testing_task_property_nav.html"
task_parameter_list="task_parameter/task_parameter_list_controll.html"
task_parameter_page="task_parameter/task_parameter_list_page.html"
task_parameter_edit="task_parameter/task_parameter_edit_page.html"
task_parameter_menu="task_parameter/task_parameter_menu_control.html"
task_build_confirm_page="task/ci_task_build_confirm_page.html"
task_parameter_confirm="task_parameter/task_release_parameter_group__confirm_dialog.html"
task_parameter_group_type_menu="task_parameter/task_parameter_group_type_menu.html"
task_parameter_plugin="task_parameter/task_plugin.html"
task_changelog_list="task_change_log/task_change_log_list.html"
task_changelog_page="task_change_log/task_change_log_page.html"
task_changelog_detail="task_change_log/task_changelog_detail_page.html"
history_clean_page="task_history/ci_task_history_clean_page.html"
task_confirm_dialog="task/ci_task_confirm_dialog.html"
class TestingTaskPath(CITemplatePath):
testing_history_list="testing_task/testing_history_list_controll.html"
teting_history_page="testing_task/testing_history_list_page.html"
teting_analytics_webpart="testing_task/testing_history_analytics.html"
teting_caseresult_list="testing_task/testing_case_result_list_controll.html"
class CIServicePath(CITemplatePath):
service_index_path = "service/ci_service_index.html"
sub_nav_template_path = "service/ci_service_leftsub_nav.html"
service_list_webpart = "service/ci_service_list_page.html"
service_list_controll = "service/ci_service_list_controll.html"
service_config_page="service/ci_service_config_page.html"
class CISettingsPath(CITemplatePath):
settings_index_path = "settings/ci_settings_index.html"
sub_nav_template_path = "settings/ci_settings_leftsub_nav.html"
settings_global_config_page_path="settings/ci_settings_global_variable_page.html"
settings_agent_controll="settings/ci_settings_agent_list_controll.html"
settings_agent_create_form="settings/ci_agent_create_form.html"
settings_agent_create_dialog="settings/ci_settings_agent_create_dialog.html"
settings_agent_webpart="settings/ci_settings_agent_page.html"
settings_server_form="settings/ci_settings_server_form.html"
settings_server_page="settings/ci_settings_server_page.html"
settings_credential_form="settings/ci_settings_credential_form.html"
settings_credential_page="settings/ci_settings_credential_page.html"
settings_tag_list="settings/ci_settings_tag_list_controll.html"
settings_tag_webpart="settings/ci_settings_tag_listpage.html"
class CIPluginPath(object):
svn_plugin = "plugins/ci_svn_plugin.html"
git_plugin = "plugins/ci_git_plugin.html"
shell_command="plugins/ci_shell_command_plugin.html"
shell_build="plugins/ci_shell_build_plugin.html"
gradle_build="plugins/ci_gradle_build_plugin.html"
ant_build="plugins/ci_ant_build_plugin.html"
ios_build="plugins/ci_ios_build_plugin.html"
ios_command_build="plugins/ci_ios_command_build_plugin.html"
service_replace_file="plugins/ci_service_replace_plugin.html"
service_deploy="plugins/ci_deploy_service_plugin.html"
copy2_server="plugins/ci_copy_2server_plugin.html"
auto_apitesting="plugins/auto_apitesting_plugin.html"
auto_webuitesting="plugins/auto_webui_testing_plugin.html"
xcode_settings_check="plugins/ci_xcode_settings_check_plugin.html"
xctest_plugin="plugins/ci_xctest_plugin.html"
class CICommonControllPath(object):
service_dropdown_controll = "common/deploy_service_dropdown_list_controll.html"
agent_condations_controll = "common/agent_condations_dropdown_list_controll.html"
agent_controll = "common/agent_dropdown_list_controll.html"
credential_type_dropdownlist_controll="common/credential_type_dropdown_list_controll.html"
credential_dropdown_controll="common/server_credential_dropdown_list_controll.html"
ci_build_log_js="common/ci_build_log.js"
ci_build_log_dialog="common/ci_build_log_dialog.html"
svn_checkout_strategy="common/svn_checkout_strategy_dropdown_list_controll.html"
git_checkout_strategy="common/git_checkout_strategy_dropdown_list_controll.html"
build_tool_sdk="common/buildtools_jdk_dropdown_list_controll.html"
build_tool_testenv="common/buildtools_testenv_dropdown_list_controll.html"
build_tool_gradle="common/buildtools_gradle_dropdown_list_controll.html"
build_tool_xcode="common/buildtools_xcode_dropdown_list_controll.html"
build_tool_pods="common/buildtools_pods_dropdown_list_controll.html"
ci_deploy_server_dropdownlist="common/deploy_server_dropdown_list_controll.html"
ci_task_dropdownlist="common/ci_task_dropdown_list_controll.html"
task_parameter_dropdownlist="common/build_parameter_dropdown_list_controll.html"
case_tag_dropdownlist="common/casetag_dropdown_list_controll.html"
task_tag_filter_menu="common/task_tag_filter_menu_control.html"
project_filter_menu="common/task_project_filter_menu.html"
|
<reponame>dmitrii/eucaconsole
# -*- coding: utf-8 -*-
# Copyright 2013-2017 Ent. Services Development Corporation LP
#
# Redistribution and use of this software in source and binary forms,
# with or without modification, are permitted provided that the following
# conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
Pyramid views for Eucalyptus and AWS CloudFormation stacks
"""
import base64
import simplejson as json
import hashlib
import logging
import os
import fnmatch
import time
import urllib2
from urllib2 import HTTPError, URLError
from boto.exception import BotoServerError
from pyramid.httpexceptions import HTTPFound, HTTPNotFound
from pyramid.view import view_config
from ..i18n import _
from ..forms import ChoicesManager, CFSampleTemplateManager
from ..forms.stacks import StacksDeleteForm, StacksFiltersForm, StacksCreateForm
from ..models import Notification
from ..models.auth import User
from ..views import LandingPageView, BaseView, TaggedItemView, JSONResponse, JSONError
from . import boto_error_handler
from .. import utils
TEMPLATE_BODY_LIMIT = 460800
class StackMixin(object):
def get_stack(self):
if self.cloudformation_conn:
try:
stack_param = self.request.matchdict.get('name')
if not(stack_param):
stack_param = self.request.params.get('stack-name')
if not(stack_param):
return None
stacks = self.cloudformation_conn.describe_stacks(stack_name_or_id=stack_param)
return stacks[0] if stacks else None
except BotoServerError:
pass
return None
def get_create_template_bucket(self, create=False):
s3_conn = self.get_connection(conn_type="s3")
account_id = User.get_account_id(ec2_conn=self.get_connection(), request=self.request)
region = self.request.session.get('region')
for suffix in ['', 'a', 'b', 'c']:
d = hashlib.md5()
d.update(account_id)
d.update(suffix)
md5 = d.digest()
acct_hash = base64.b64encode(md5, '--')
acct_hash = acct_hash[:acct_hash.find('=')]
try:
bucket_name = "cf-template-{acct_hash}-{region}".format(
acct_hash=acct_hash.lower(),
region=region
)
if create:
bucket = s3_conn.create_bucket(bucket_name)
else:
bucket = s3_conn.get_bucket(bucket_name)
return bucket
except BotoServerError as err:
if err.code != 'BucketAlreadyExists' and create:
BaseView.handle_error(err=err, request=self.request)
raise JSONError(status=500, message=_(
u'Cannot create S3 bucket to store your CloudFormation template due to namespace collision. '
u'Please contact your cloud administrator.'
))
def get_template_location(self, stack_id, default_name=None):
bucket = None
try:
bucket = self.get_create_template_bucket(create=False)
except:
pass
stack_id = stack_id[stack_id.rfind('/') + 1:]
d = hashlib.md5()
d.update(stack_id)
md5 = d.digest()
stack_hash = base64.b64encode(md5, '--').replace('=', '')
ret = {'template_name': default_name}
if bucket is not None:
keys = list(bucket.list(prefix=stack_hash))
if len(keys) > 0:
key = keys[0].key
name = key[key.rfind('-') + 1:]
ret['template_bucket'] = bucket.name
ret['template_key'] = key
ret['template_name'] = name
return ret
class StacksView(LandingPageView):
def __init__(self, request):
super(StacksView, self).__init__(request)
self.title_parts = [_(u'Stacks')]
self.cloudformation_conn = self.get_connection(conn_type="cloudformation")
self.initial_sort_key = 'name'
self.prefix = '/stacks'
self.filter_keys = ['name', 'create-time']
self.sort_keys = self.get_sort_keys()
self.json_items_endpoint = self.get_json_endpoint('stacks_json')
self.delete_form = StacksDeleteForm(request, formdata=request.params or None)
self.filters_form = StacksFiltersForm(
request, cloud_type=self.cloud_type, formdata=request.params or None)
search_facets = self.filters_form.facets
self.render_dict = dict(
filter_keys=self.filter_keys,
search_facets=BaseView.escape_json(json.dumps(search_facets)),
sort_keys=self.sort_keys,
prefix=self.prefix,
initial_sort_key=self.initial_sort_key,
json_items_endpoint=self.json_items_endpoint,
delete_form=self.delete_form,
delete_stack_url=request.route_path('stacks_delete'),
update_stack_url=request.route_path('stack_update', name='_name_'),
ufshost_error=utils.is_ufshost_error(self.cloudformation_conn, self.cloud_type)
)
@view_config(route_name='stacks', renderer='../templates/stacks/stacks.pt')
def stacks_landing(self):
# sort_keys are passed to sorting drop-down
return self.render_dict
@view_config(route_name='stacks_delete', request_method='POST', xhr=True)
def stacks_delete(self):
if self.delete_form.validate():
name = self.request.params.get('name')
prefix = _(u'Unable to delete stack')
template = u'{0} {1} - {2}'.format(prefix, name, '{0}')
with boto_error_handler(self.request, None, template):
self.cloudformation_conn.delete_stack(name)
prefix = _(u'Successfully sent delete stack request. It may take a moment to delete ')
msg = u'{0} {1}'.format(prefix, name)
return JSONResponse(status=200, message=msg)
form_errors = ', '.join(self.delete_form.get_errors_list())
return JSONResponse(status=400, message=form_errors) # Validation failure = bad request
@staticmethod
def get_sort_keys():
return [
dict(key='name', name=_(u'Name: A to Z')),
dict(key='-name', name=_(u'Name: Z to A')),
dict(key='creation_time', name=_(u'Creation time: Oldest to Newest')),
dict(key='-creation_time', name=_(u'Creation time: Newest to Oldest')),
]
class StacksJsonView(LandingPageView):
"""JSON response view for Stack landing page"""
def __init__(self, request):
super(StacksJsonView, self).__init__(request)
self.cloudformation_conn = self.get_connection(conn_type="cloudformation")
with boto_error_handler(request):
self.items = self.get_items()
@view_config(route_name='stacks_json', renderer='json', request_method='POST')
def stacks_json(self):
if not(self.is_csrf_valid()):
return JSONResponse(status=400, message="missing CSRF token")
transitional_states = [
'CREATE_IN_PROGRESS',
'ROLLBACK_IN_PROGRESS',
'DELETE_IN_PROGRESS',
'CREATE_FAILED',
'UPDATE_IN_PROGRESS',
'UPDATE_COMPLETE_CLEANUP_IN_PROGRESS',
'UPDATE_ROLLBACK_IN_PROGRESS',
'UPDATE_ROLLBACK_FAILED',
'UPDATE_ROLLBACK_COMPLETED_CLEANUP_IN_PROGRESS',
'UPDATE_FAILED'
]
with boto_error_handler(self.request):
stacks_array = []
for stack in self.filter_items(self.items):
is_transitional = stack.stack_status in transitional_states
name = stack.stack_name
status = stack.stack_status
if status == 'DELETE_COMPLETE':
continue
stacks_array.append(dict(
creation_time=self.dt_isoformat(stack.creation_time),
status=status.lower().capitalize().replace('_', '-'),
description=stack.description,
name=name,
transitional=is_transitional,
))
return dict(results=stacks_array)
def get_items(self):
return self.cloudformation_conn.describe_stacks() if self.cloudformation_conn else []
class StackView(BaseView, StackMixin):
"""Views for single stack"""
TEMPLATE = '../templates/stacks/stack_view.pt'
def __init__(self, request):
super(StackView, self).__init__(request)
self.title_parts = [_(u'Stack'), request.matchdict.get('name')]
self.cloudformation_conn = self.get_connection(conn_type='cloudformation')
with boto_error_handler(request):
self.stack = self.get_stack()
self.delete_form = StacksDeleteForm(self.request, formdata=self.request.params or None)
search_facets = [
{'name': 'status', 'label': _(u"Status"), 'options': [
{'key': 'create-complete', 'label': _("Create Complete")},
{'key': 'create-in-progress', 'label': _("Create In Progresss")},
{'key': 'create-failed', 'label': _("Create Failed")},
{'key': 'delete-in-progress', 'label': _("Delete In Progresss")},
{'key': 'delete-failed', 'label': _("Delete Failed")},
{'key': 'rollback-complete', 'label': _("Rollback Complete")},
{'key': 'rollback-in-progress', 'label': _("Rollback In Progresss")},
{'key': 'rollback-failed', 'label': _("Rollback Failed")}
]}
]
self.render_dict = dict(
stack=self.stack,
stack_name=self.stack.stack_name if self.stack else '',
stack_description=self.stack.description if self.stack else '',
stack_id=self.stack.stack_id if self.stack else '',
stack_creation_time=self.dt_isoformat(self.stack.creation_time) if self.stack else None,
status=self.stack.stack_status.lower().capitalize().replace('_', '-') if self.stack else None,
delete_form=self.delete_form,
in_use=False,
search_facets=BaseView.escape_json(json.dumps(search_facets)),
filter_keys=[],
controller_options_json=self.get_controller_options_json(),
)
@view_config(route_name='stack_view', request_method='GET', renderer=TEMPLATE)
def stack_view(self):
if self.stack is None and self.request.matchdict.get('name') != 'new':
raise HTTPNotFound
template_info = self.get_template_location(self.stack.stack_id)
template_info.update(self.render_dict)
return template_info
@view_config(route_name='stack_delete', request_method='POST', renderer=TEMPLATE)
def stack_delete(self):
if self.delete_form.validate():
name = self.request.params.get('name')
location = self.request.route_path('stacks')
prefix = _(u'Unable to delete stack')
template = u'{0} {1} - {2}'.format(prefix, self.stack.stack_name, '{0}')
with boto_error_handler(self.request, location, template):
msg = _(u"Deleting stack")
self.log_request(u"{0} {1}".format(msg, name))
self.cloudformation_conn.delete_stack(name)
prefix = _(u'Successfully sent delete stack request. It may take a moment to delete ')
msg = u'{0} {1}'.format(prefix, name)
self.request.session.flash(msg, queue=Notification.SUCCESS)
time.sleep(1) # delay to allow server to update state before moving user on
return HTTPFound(location=location)
else:
self.request.error_messages = self.delete_form.get_errors_list()
return self.render_dict
def get_controller_options_json(self):
if self.stack is None:
return '{}'
else:
return BaseView.escape_json(json.dumps({
'stack_name': self.stack.stack_name,
'stack_status_json_url': self.request.route_path('stack_state_json', name=self.stack.stack_name),
'stack_template_url': self.request.route_path('stack_template', name=self.stack.stack_name),
'stack_events_url': self.request.route_path('stack_events', name=self.stack.stack_name),
'stack_status': self.stack.stack_status.lower().capitalize().replace('_', '-'),
}))
class StackStateView(BaseView):
def __init__(self, request):
super(StackStateView, self).__init__(request)
self.request = request
self.cloudformation_conn = self.get_connection(conn_type='cloudformation')
self.stack_name = self.request.matchdict.get('name')
@view_config(route_name='stack_state_json', renderer='json', request_method='GET')
def stack_state_json(self):
"""Return current stack status"""
with boto_error_handler(self.request):
stacks = self.cloudformation_conn.describe_stacks(self.stack_name)
stack = stacks[0] if stacks else None
stack_resources = self.cloudformation_conn.list_stack_resources(self.stack_name)
stack_status = stack.stack_status if stack else 'delete_complete'
stack_outputs = stack.outputs if stack else None
outputs = []
for output in stack_outputs:
outputs.append({'key': output.key, 'description': output.description, 'value': output.value})
resources = []
for resource in stack_resources:
resources.append({
'type': resource.resource_type,
'logical_id': resource.logical_resource_id,
'physical_id': resource.physical_resource_id,
'status': resource.resource_status.lower().capitalize().replace('_', '-'),
'url': StackStateView.get_url_for_resource(
self.request,
resource.resource_type,
resource.physical_resource_id
),
'updated_timestamp': resource.LastUpdatedTimestamp})
return dict(
results=dict(
stack_status=stack_status.lower().capitalize().replace('_', '-'),
outputs=outputs,
resources=resources
)
)
@view_config(route_name='stack_template', renderer='json', request_method='GET')
def stack_template(self):
"""Return stack template"""
with boto_error_handler(self.request):
response = self.cloudformation_conn.get_template(self.stack_name)
template = response['GetTemplateResponse']['GetTemplateResult']['TemplateBody']
return dict(
results=template
)
@view_config(route_name='stack_events', renderer='json', request_method='GET')
def stack_events(self):
"""Return stack events"""
status = self.request.params.getall('status')
with boto_error_handler(self.request):
stack_events = self.cloudformation_conn.describe_stack_events(self.stack_name)
events = []
for event in stack_events:
stack_status = event.resource_status.lower().replace('_', '-')
if len(status) == 0 or stack_status in status:
events.append({
'timestamp': event.timestamp.strftime('%Y-%m-%dT%H:%M:%SZ'),
'status': stack_status,
'status_reason': event.resource_status_reason,
'type': event.resource_type,
'logical_id': event.logical_resource_id,
'physical_id': event.physical_resource_id,
'url': StackStateView.get_url_for_resource(
self.request,
event.resource_type,
event.physical_resource_id
)
})
return dict(
results=dict(events=events)
)
@staticmethod
def get_url_for_resource(request, res_type, resource_id):
url = None
if res_type == "AWS::ElasticLoadBalancing::LoadBalancer":
url = request.route_path('elb_view', id=resource_id)
elif "AWS::EC2::" in res_type:
if "SecurityGroup" in res_type:
url = request.route_path('securitygroup_view', id=resource_id)
elif res_type[10:] == "EIP":
url = request.route_path('ipaddress_view', public_ip=resource_id)
elif "Instance" in res_type:
url = request.route_path('instance_view', id=resource_id)
elif "Volume" in res_type:
url = request.route_path('volume_view', id=resource_id)
elif "AWS::AutoScaling::" in res_type:
if "LaunchConfiguration" in res_type:
url = request.route_path('launchconfig_view', id=resource_id)
if "ScalingGroup" in res_type:
url = request.route_path('scalinggroup_view', id=resource_id)
elif "AWS::IAM::" in res_type:
if "Group" in res_type:
url = request.route_path('group_view', name=resource_id)
elif "Role" in res_type:
url = request.route_path('role_view', name=resource_id)
elif "User" in res_type:
url = request.route_path('user_view', name=resource_id)
elif "AWS::S3::" in res_type:
if "Bucket" in res_type:
url = request.route_path('bucket_contents', name=resource_id, subpath='')
elif res_type == "AWS::CloudWatch::Alarm":
url = request.route_path('cloudwatch_alarm_view', alarm_id=base64.b64encode(bytes(resource_id), '--'))
return url
class StackWizardView(BaseView, StackMixin):
"""View for Create Stack wizard"""
TEMPLATE = '../templates/stacks/stack_wizard.pt'
TEMPLATE_UPDATE = '../templates/stacks/stack_update.pt'
def __init__(self, request):
super(StackWizardView, self).__init__(request)
self.cloudformation_conn = self.get_connection(conn_type='cloudformation')
self.title_parts = [_(u'Stack'), _(u'Create')]
self.create_form = None
location = self.request.route_path('stacks')
with boto_error_handler(self.request, location):
s3_bucket = self.get_template_samples_bucket()
self.create_form = StacksCreateForm(request, s3_bucket)
self.stack = self.get_stack()
self.render_dict = dict(
create_form=self.create_form,
controller_options_json=self.get_controller_options_json(),
)
def get_template_samples_bucket(self):
sample_bucket = self.request.registry.settings.get('cloudformation.samples.bucket')
if sample_bucket is None:
return None
s3_conn = self.get_connection(conn_type="s3")
try:
return s3_conn.get_bucket(sample_bucket)
except BotoServerError:
logging.warn(_(u'Configuration error: cloudformation.samples.bucket is referencing bucket that is not visible to this user.'))
return None
def get_controller_options_json(self):
return BaseView.escape_json(json.dumps({
'stack_template_url': self.request.route_path('stack_template_parse'),
'convert_template_url': self.request.route_path('stack_template_convert'),
'stack_template_read_url':
self.request.route_path('stack_template', name=self.stack.stack_name) if self.stack else '',
'sample_templates': self.create_form.sample_template.choices
}))
@view_config(route_name='stack_new', renderer=TEMPLATE, request_method='GET')
def stack_new(self):
"""Displays the Stack wizard"""
return self.render_dict
@view_config(route_name='stack_update', renderer=TEMPLATE_UPDATE, request_method='GET')
def stack_update_view(self):
"""Displays the Stack update wizard"""
stack_name = self.request.matchdict.get('name')
self.title_parts = [_(u'Stack'), stack_name, _(u'Update')]
ret = dict(
stack_name=stack_name,
)
template_info = self.get_template_location(self.stack.stack_id, default_name=_(u'Edit template'))
ret.update(template_info)
ret.update(self.render_dict)
return ret
@view_config(route_name='stack_update', renderer=TEMPLATE_UPDATE, request_method='POST')
def stack_update(self):
if not(self.is_csrf_valid()):
return JSONResponse(status=400, message="missing CSRF token")
stack_name = self.request.matchdict.get('name')
location = self.request.route_path('stack_update', name=stack_name)
(template_url, template_name, parsed) = self.parse_store_template()
capabilities = ['CAPABILITY_IAM']
params = []
if 'Parameters' in parsed.keys():
for name in parsed['Parameters']:
val = self.request.params.get(name)
if val:
params.append((name, val))
with boto_error_handler(self.request, location):
self.log_request(u"Updating stack:{0}".format(stack_name))
result = self.cloudformation_conn.update_stack(
stack_name, template_url=template_url, capabilities=capabilities,
parameters=params
)
stack_id = result[result.rfind('/') + 1:]
d = hashlib.md5()
d.update(stack_id)
md5 = d.digest()
stack_hash = base64.b64encode(md5, '--').replace('=', '')
bucket = self.get_create_template_bucket(create=True)
bucket.copy_key(
new_key_name="{0}-{1}".format(stack_hash, template_name),
src_key_name=template_name,
src_bucket_name=bucket.name
)
bucket.delete_key(template_name)
msg = _(u'Successfully sent update stack request. '
u'It may take a moment to update the stack.')
queue = Notification.SUCCESS
self.request.session.flash(msg, queue=queue)
location = self.request.route_path('stack_view', name=stack_name)
return HTTPFound(location=location)
@view_config(route_name='stack_cancel_update', request_method='POST', xhr=True)
def stack_cancel_update(self):
if not(self.is_csrf_valid()):
return JSONResponse(status=400, message="missing CSRF token")
stack_name = self.request.matchdict.get('name')
with boto_error_handler(self.request):
self.log_request(u"Cancelling update of stack:{0}".format(stack_name))
self.cloudformation_conn.cancel_update_stack(stack_name)
msg = _(u'Successfully sent cancel update request. '
u'It may take a moment to cancel the stack update.')
return JSONResponse(status=200, message=msg)
@view_config(route_name='stack_template_parse', renderer='json', request_method='POST')
def stack_template_parse(self):
"""
Fetches then parses template to return information needed by wizard,
namely description and parameters.
"""
with boto_error_handler(self.request):
try:
(template_url, template_name, parsed) = self.parse_store_template()
if 'Resources' not in parsed:
raise JSONError(message=_(u'Invalid CloudFormation Template, Resources not found'), status=400)
exception_list = []
if self.request.params.get('inputtype') != 'sample' and \
self.request.session.get('cloud_type', 'euca') == 'euca':
exception_list = StackWizardView.identify_aws_template(parsed)
if len(exception_list) > 0:
# massage for the browser
service_list = []
resource_list = []
property_list = []
parameter_list = []
for resource in exception_list:
if resource['type'] == 'Parameter':
parameter_list.append(resource['name'])
else:
tmp = resource['type']
tmp = tmp[5:]
if 'property' in resource.keys():
property_list.append('{0} ({1})'.format(tmp, resource['name']))
elif tmp.find('::') > -1: # this means there's a resource there
resource_list.append(tmp)
else:
service_list.append(tmp)
service_list = list(set(service_list))
resource_list = list(set(resource_list))
property_list = list(set(property_list))
return dict(
results=dict(
template_key=template_name,
description=parsed['Description'] if 'Description' in parsed else '',
service_list=service_list,
resource_list=resource_list,
property_list=property_list,
parameter_list=parameter_list
)
)
params = []
if 'Parameters' in parsed.keys():
params = self.generate_param_list(parsed)
if self.stack:
# populate defaults with actual values from stack
for param in params:
result = [p.value for p in self.stack.parameters if p.key == param['name']]
if result:
param['default'] = result[0]
return dict(
results=dict(
template_key=template_name,
description=parsed['Description'] if 'Description' in parsed else '',
parameters=params,
template_bucket=self.get_create_template_bucket().name
)
)
except ValueError as json_err:
raise JSONError(message=_(u'Invalid JSON File ({0})').format(json_err.message), status=400)
except HTTPError as http_err:
raise JSONError(message=_(u"""
Cannot read URL ({0}) If this URL is for an S3 object, be sure
that either the object has public read permissions or that the
URL is signed with authentication information.
""").format(http_err.reason), status=400)
@view_config(route_name='stack_template_convert', renderer='json', request_method='POST')
def stack_template_convert(self):
"""
Fetches then parsed template to return information needed by wizard,
namely description and parameters.
"""
with boto_error_handler(self.request):
(template_url, template_name, parsed) = self.parse_store_template()
StackWizardView.identify_aws_template(parsed, modify=True)
template_body = json.dumps(parsed, indent=2)
# now, store it back in S3
bucket = self.get_create_template_bucket(create=True)
key = bucket.get_key(template_name)
if key is None:
key = bucket.new_key(template_name)
key.set_contents_from_string(template_body)
params = []
if 'Parameters' in parsed.keys():
params = self.generate_param_list(parsed)
return dict(
results=dict(
template_key=template_name,
parameters=params
)
)
@staticmethod
def get_s3_template_url(key):
template_url = key.generate_url(1)
return template_url[:template_url.find('?')]
def generate_param_list(self, parsed):
"""
Valid values are [
String,
Number,
CommaDelimitedList,
AWS::EC2::AvailabilityZone::Name,
AWS::EC2::Image::Id,
AWS::EC2::Instance::Id,
AWS::EC2::KeyPair::KeyName,
AWS::EC2::SecurityGroup::GroupName,
AWS::EC2::SecurityGroup::Id,
AWS::EC2::Subnet::Id,
AWS::EC2::Volume::Id,
AWS::EC2::VPC::Id,
List<String>,
List<Number>,
List<AWS::EC2::AvailabilityZone::Name>,
List<AWS::EC2::Image::Id>,
List<AWS::EC2::Instance::Id>,
List<AWS::EC2::KeyPair::KeyName>,
List<AWS::EC2::SecurityGroup::GroupName>,
List<AWS::EC2::SecurityGroup::Id>,
List<AWS::EC2::Subnet::Id>,
List<AWS::EC2::Volume::Id>,
List<AWS::EC2::VPC::Id>
]
"""
params = []
for name in parsed['Parameters']:
param = parsed['Parameters'][name]
param_type = param['Type']
param_vals = {
'name': name,
'description': param['Description'] if 'Description' in param else '',
'type': param_type
}
if 'Default' in param:
param_vals['default'] = param['Default']
if 'MinLength' in param:
param_vals['min'] = param['MinLength']
if 'MaxLength' in param:
param_vals['max'] = param['MaxLength']
if 'AllowedPattern' in param:
param_vals['regex'] = param['AllowedPattern']
if 'ConstraintDescription' in param:
param_vals['constraint'] = param['ConstraintDescription']
if 'AllowedValues' in param:
param_vals['options'] = [(val, val) for val in param['AllowedValues']]
# guess at more options
name_l = name.lower()
if 'key' in name_l or param_type == 'AWS::EC2::KeyPair::KeyName':
param_vals['options'] = self.get_key_options() # fetch keypair names
if 'security' in name_l and 'group' in name_l or param_type == 'AWS::EC2::SecurityGroup::GroupName':
param_vals['options'] = self.get_group_options() # fetch security group names
if 'kernel' in name_l:
param_vals['options'] = self.get_image_options(img_type='kernel') # fetch kernel ids
if 'ramdisk' in name_l:
param_vals['options'] = self.get_image_options(img_type='ramdisk') # fetch ramdisk ids
if 'cert' in name_l:
param_vals['options'] = self.get_cert_options() # fetch server cert names
if 'instance' in name_l and 'profile' in name_l:
param_vals['options'] = self.get_instance_profile_options()
if ('instance' in name_l and 'instancetype' not in name_l) or param_type == 'AWS::EC2::Instance::Id':
param_vals['options'] = self.get_instance_options() # fetch instances
if 'volume' in name_l or param_type == 'AWS::EC2::Volume::Id':
param_vals['options'] = self.get_volume_options() # fetch volumes
if ('vmtype' in name_l or 'instancetype' in name_l) and \
'options' not in param_vals.keys():
param_vals['options'] = self.get_vmtype_options()
if 'zone' in name_l or param_type == 'AWS::EC2::AvailabilityZone::Name':
param_vals['options'] = self.get_availability_zone_options()
# if no default, and options are a single value, set that as default
if 'default' not in param_vals.keys() and \
'options' in param_vals.keys() and len(param_vals['options']) == 1:
param_vals['default'] = param_vals['options'][0][0]
param_vals['chosen'] = True if \
'options' in param_vals.keys() and len(param_vals['options']) > 9 \
else False
if 'image' in name_l or param_type == 'AWS::EC2::Image::Id':
if self.request.session.get('cloud_type', 'euca') == 'aws':
# populate with amazon and user's images
param_vals['options'] = self.get_image_options(owner_alias='self')
param_vals['options'].extend(self.get_image_options(owner_alias='amazon'))
else:
param_vals['options'] = self.get_image_options() # fetch image ids
# force image param to use chosen
param_vals['chosen'] = True
params.append(param_vals)
return params
def get_key_options(self):
conn = self.get_connection()
keys = conn.get_all_key_pairs()
ret = []
for key in keys:
ret.append((key.name, key.name))
return ret
def get_group_options(self):
conn = self.get_connection()
groups = conn.get_all_security_groups()
ret = []
for group in groups:
ret.append((group.name, group.name))
return ret
def get_instance_options(self):
conn = self.get_connection()
instances = conn.get_only_instances()
ret = []
for instance in instances:
ret.append((instance.id, TaggedItemView.get_display_name(instance)))
return ret
def get_volume_options(self):
conn = self.get_connection()
volumes = conn.get_all_volumes()
ret = []
for volume in volumes:
ret.append((volume.id, TaggedItemView.get_display_name(volume)))
return ret
def get_image_options(self, img_type='machine', owner_alias=None):
conn = self.get_connection()
region = self.request.session.get('region')
owners = [owner_alias] if owner_alias else []
images = []
if img_type == 'machine':
images = self.get_images(conn, owners, [], region)
elif img_type == 'kernel':
images = conn.get_all_kernels()
elif img_type == 'ramdisk':
images = conn.get_all_ramdisks()
ret = []
for image in images:
ret.append((image.id, "{0} ({1})".format(image.name, image.id)))
return ret
def get_cert_options(self):
ret = []
if self.cloud_type == 'euca':
conn = self.get_connection(conn_type="iam")
certs = conn.list_server_certs()
certs = certs['list_server_certificates_response'][
'list_server_certificates_result']['server_certificate_metadata_list']
for cert in certs:
ret.append((cert.arn, cert.server_certificate_name))
return ret
def get_instance_profile_options(self):
ret = []
if self.cloud_type == 'euca':
conn = self.get_connection(conn_type="iam")
profiles = conn.list_instance_profiles()
profiles = profiles['list_instance_profiles_response'][
'list_instance_profiles_result']['instance_profiles']
for profile in profiles:
ret.append((profile.arn, profile.instance_profile_name))
return ret
def get_vmtype_options(self):
conn = self.get_connection()
vmtypes = ChoicesManager(conn).instance_types(self.cloud_type)
return vmtypes
def get_availability_zone_options(self):
conn = self.get_connection()
zones = ChoicesManager(conn).availability_zones(self.cloud_type, add_blank=False)
return zones
@view_config(route_name='stack_create', renderer=TEMPLATE, request_method='POST')
def stack_create(self):
if True: # self.create_form.validate():
stack_name = self.request.params.get('name')
location = self.request.route_path('stacks')
(template_url, template_name, parsed) = self.parse_store_template()
capabilities = ['CAPABILITY_IAM']
params = []
if 'Parameters' in parsed.keys():
for name in parsed['Parameters']:
val = self.request.params.get(name)
if val:
params.append((name, val))
tags_json = self.request.params.get('tags')
tags = None
if tags_json:
tags = json.loads(tags_json)
with boto_error_handler(self.request, location):
self.log_request(u"Creating stack:{0}".format(stack_name))
result = self.cloudformation_conn.create_stack(
stack_name, template_url=template_url, capabilities=capabilities,
parameters=params, tags=tags
)
stack_id = result[result.rfind('/') + 1:]
d = hashlib.md5()
d.update(stack_id)
md5 = d.digest()
stack_hash = base64.b64encode(md5, '--').replace('=', '')
bucket = self.get_create_template_bucket(create=True)
bucket.copy_key(
new_key_name="{0}-{1}".format(stack_hash, template_name),
src_key_name=template_name,
src_bucket_name=bucket.name
)
bucket.delete_key(template_name)
msg = _(u'Successfully sent create stack request. '
u'It may take a moment to create the stack.')
queue = Notification.SUCCESS
self.request.session.flash(msg, queue=queue)
location = self.request.route_path('stack_view', name=stack_name)
return HTTPFound(location=location)
else:
self.request.error_messages = self.create_form.get_errors_list()
return self.render_dict
def parse_store_template(self):
s3_template_key = self.request.params.get('s3-template-key')
if s3_template_key:
# pull previously uploaded...
bucket = self.get_create_template_bucket(create=True)
key = bucket.get_key(s3_template_key)
template_name = s3_template_key
template_body = key.get_contents_as_string()
template_url = self.get_s3_template_url(key)
else:
template_name = self.request.params.get('sample-template')
template_url = self.request.params.get('template-url')
files = self.request.POST.getall('template-file')
template_body = self.request.params.get('template-body')
if len(files) > 0 and len(str(files[0])) > 0: # read from file
files[0].file.seek(0, 2) # seek to end
if files[0].file.tell() > TEMPLATE_BODY_LIMIT:
raise JSONError(status=400, message=_(u'File too large: ') + files[0].filename)
files[0].file.seek(0, 0) # seek to start
template_body = files[0].file.read()
template_name = files[0].filename
elif template_url: # read from url
whitelist = self.request.registry.settings.get('cloudformation.url.whitelist', 'http://*, https://*')
match = False
for pattern in whitelist.split(','):
matches = fnmatch.fnmatch(template_url, pattern.strip())
if matches:
match = True
if not match:
msg = _(u'The URL is invalid. Valid URLs can only include ')
last_comma_idx = whitelist.rfind(',')
if last_comma_idx != -1:
whitelist = whitelist[:last_comma_idx] + _(u' or') + whitelist[last_comma_idx + 1:]
msg = msg + whitelist + _(u' Please change your URL.')
raise JSONError(
status=400,
message=msg
)
try:
template_body = urllib2.urlopen(template_url).read(TEMPLATE_BODY_LIMIT)
except URLError:
raise JSONError(status=400, message=_(u'Cannot read from url provided.'))
template_name = template_url[template_url.rindex('/') + 1:]
if len(template_body) > TEMPLATE_BODY_LIMIT:
raise JSONError(status=400, message=_(u'Template too large: ') + template_name)
elif template_body:
# just proceed if body provided with request
template_name = 'current'
elif template_name is None and self.stack:
# loading template from existing stack
template_name = 'current'
response = self.cloudformation_conn.get_template(self.stack.stack_name)
template_body = response['GetTemplateResponse']['GetTemplateResult']['TemplateBody']
else:
s3_bucket = self.get_template_samples_bucket()
mgr = CFSampleTemplateManager(s3_bucket)
templates = mgr.get_template_list()
for directory, files in templates:
if template_name in [f for (name, f) in files]:
if directory == 's3':
s3_key = s3_bucket.get_key(template_name)
template_body = s3_key.get_contents_as_string()
else:
fd = open(os.path.join(directory, template_name), 'r')
template_body = fd.read()
# now that we have it, store in S3
bucket = self.get_create_template_bucket(create=True)
key = bucket.get_key(template_name)
if key is None:
key = bucket.new_key(template_name)
key.set_contents_from_string(template_body)
template_url = self.get_s3_template_url(key)
parsed = json.loads(template_body)
return template_url, template_name, parsed
@staticmethod
def identify_aws_template(parsed, modify=False):
"""
drawn from here:
http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-template-resource-type-ref.html
and https://www.eucalyptus.com/docs/eucalyptus/4.1.1/index.html#cloudformation/cf_overview.html
"""
aws_resource_prefixes = [
'AWS::AutoScaling::LifecycleHook',
'AWS::AutoScaling::ScheduledAction',
'AWS::CloudFront',
'AWS::CloudTrail',
'AWS::DynamoDB',
'AWS::EC2::VPCEndpoint',
'AWS::EC2::VPCPeeringConnection',
'AWS::EC2::VPNConnection',
'AWS::EC2::VPNConnectionRoute',
'AWS::EC2::VPNGateway',
'AWS::EC2::VPNGatewayRoutePropagation',
'AWS::ElastiCache',
'AWS::ElasticBeanstalk',
'AWS::Kinesis',
'AWS::Logs',
'AWS::OpsWOrks',
'AWS::Redshift',
'AWS::RDS',
'AWS::Route53',
'AWS::S3::BucketPolicy',
'AWS::SDB',
'AWS::SNS',
'AWS::SQS'
]
unsupported_properties = [
{'resource': 'AWS::AutoScaling::AutoScalingGroup', 'properties': [
'HealthCheckType', 'Tags', 'VpcZoneIdentifier'
]},
{'resource': 'AWS::AutoScaling::LaunchConiguration', 'properties': [
'AssociatePublicIpAddress'
]},
{'resource': 'AWS::EC2::EIP', 'properties': [
'Domain'
]},
{'resource': 'AWS::EC2::Volume', 'properties': [
'HealthCheckType', 'Tags'
]},
{'resource': 'AWS::ElasticLoadBalancing::LoadBalancer', 'properties': [
'AccessLoggingPolicy', 'ConnectionDrainingPolicy',
'Policies.InstancePorts', 'Policies.LoadBalancerPorts'
]},
{'resource': 'AWS::IAM::AccessKey', 'properties': [
'Serial'
]}
]
ret = []
# first pass, find non-euca resources
for name in parsed['Resources']:
resource = parsed['Resources'][name]
for prefix in aws_resource_prefixes:
if resource['Type'].find(prefix) == 0:
ret.append({'name': name, 'type': prefix})
# second pass, find non-euca properties
for name in parsed['Resources']:
resource = parsed['Resources'][name]
for props in unsupported_properties:
if resource['Type'].find(props['resource']) == 0:
for prop in props['properties']:
if 'Properties' in resource and prop in resource['Properties'].keys():
ret.append({
'name': prop,
'type': props['resource'],
'property': True
})
# third pass, find refs to cloud-specific resources
def find_image_ref(_name, item):
if _name == 'Parameters':
return # ignore refs already in params
if type(item) is dict and 'ImageId' in item.keys():
img_item = item['ImageId']
if isinstance(img_item, dict) and 'Ref' not in img_item.keys():
# check for emi lookup in map
if 'Fn::FindInMap' in img_item.keys():
map_name = img_item['Fn::FindInMap'][0]
if parsed['Mappings'] and parsed['Mappings'][map_name]:
img_map = parsed['Mappings'][map_name]
if json.dumps(img_map).find('emi-') > -1:
return
ret.append({
'name': 'ImageId',
'type': 'Parameter',
'item': item})
StackWizardView.traverse(parsed, find_image_ref)
if modify:
for res in ret:
# remove resources found in pass 1
for name in parsed['Resources'].keys():
if res['name'] == name and 'property' not in res.keys():
del parsed['Resources'][name]
# modify resource refs into params
if res['name'] == 'ImageId':
res['item']['ImageId'] = {'Ref': 'ImageId'}
parsed['Parameters']['ImageId'] = dict(
Description='Image required to run this template',
Type='String'
)
# and, because we provide instance types, remove 'AllowedValues' for InstanceType
if 'Parameters' in parsed.keys():
for name in parsed['Parameters']:
if name == 'InstanceType' and 'AllowedValues' in parsed['Parameters'][name]:
del parsed['Parameters'][name]['AllowedValues']
return ret
@staticmethod
def traverse(graph, func, depth=0):
if depth > 5: # safety valve
return
if type(graph) is list:
for item in graph:
func(None, item)
StackWizardView.traverse(item, func, depth + 1)
if type(graph) is dict:
for key in graph:
item = graph[key]
func(key, item)
StackWizardView.traverse(item, func, depth + 1)
|
<filename>tf_keras/prunned/keras_finetune_prune.py
import sys
import os.path
import os
#os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
import math
import numpy as np
import re
import glob
import argparse
import warnings
import time,datetime
from random import shuffle,seed
from sklearn.metrics import recall_score,accuracy_score
from sklearn.metrics.pairwise import pairwise_distances
import numpy as np
RANDOM_SEED=123
np.random.seed(RANDOM_SEED) # for reproducibility
seed(RANDOM_SEED)
VALID_IMAGE_FORMATS = frozenset(['jpg', 'jpeg'])
IMG_SIZE=224
import sys
new_sys_dir = os.path.join('..','..','keras-surgeon','src')
if not new_sys_dir in sys.path:
sys.path.append(new_sys_dir)
from kerassurgeon import identify,utils
from kerassurgeon.operations import delete_channels
from kerassurgeon import Surgeon
import cv2
def get_images(image_dir,image_lists, preprocess_fct):
images=[]
y=[]
class_ind=0
for label in image_lists:
img_files = sorted(image_lists[label]['training'])
shuffle(img_files)
for f in img_files[:5]:
#print(os.path.join(image_dir,image_lists[label]['dir'],f))
img=cv2.imread(os.path.join(image_dir,image_lists[label]['dir'],f))
img=cv2.cvtColor(img,cv2.COLOR_BGR2RGB)
img=cv2.resize(img,(IMG_SIZE,IMG_SIZE))
images.append(preprocess_fct(img.astype(np.float32)))
y.append(class_ind)
class_ind+=1
images=np.array(images)
y=np.array(y)
print(images.shape,y.shape)
return images,y
def get_model_apoz(model, image_dir,image_lists, preprocess_fct):
# Get APoZ
start = None
end = None
apoz = []
index=0
for layer in model.layers[start:end]:
if is_conv(layer):
print(layer.name)
images,_=get_images(image_dir,image_lists, preprocess_fct)
print(images.shape)
apoz.extend([(layer.name, i, value) for (i, value)
in enumerate(get_apoz(model, layer, images))]) #val_images[:1000]
index+=1
#if index>=2:
# break
layer_name, index, apoz_value = zip(*apoz)
apoz_df = pd.DataFrame({'layer': layer_name, 'index': index,
'apoz': apoz_value})
apoz_df = apoz_df.set_index('layer')
return apoz_df
def get_channels_apoz_importance(model, layer, x_val, node_indices=None):
if isinstance(layer, str):
layer = model.get_layer(name=layer)
# Check that layer is in the model
if layer not in model.layers:
raise ValueError('layer is not a valid Layer in model.')
layer_node_indices = utils.find_nodes_in_model(model, layer)
# If no nodes are specified, all of the layer's inbound nodes which are
# in model are selected.
if not node_indices:
node_indices = layer_node_indices
# Check for duplicate node indices
elif len(node_indices) != len(set(node_indices)):
raise ValueError('`node_indices` contains duplicate values.')
# Check that all of the selected nodes are in the layer
elif not set(node_indices).issubset(layer_node_indices):
raise ValueError('One or more nodes specified by `layer` and '
'`node_indices` are not in `model`.')
data_format = getattr(layer, 'data_format', 'channels_last')
# Perform the forward pass and get the activations of the layer.
mean_calculator = utils.MeanCalculator(sum_axis=0)
print('layer:',layer,layer_node_indices,node_indices)
for node_index in node_indices:
act_layer, act_index = utils.find_activation_layer(layer, node_index)
print('act layer',act_layer, act_index)
# Get activations
if hasattr(x_val, "__iter__"):
temp_model = Model(model.inputs,
act_layer.get_output_at(act_index))
print('before: act output',act_layer.get_output_at(act_index))
a = temp_model.predict(x_val)
#a=temp_model.predict_generator(x_val, x_val.n // x_val.batch_size)
print('after:',layer,a.shape)
else:
get_activations = K.function(
[single_element(model.inputs), K.learning_phase()],
[act_layer.get_output_at(act_index)])
a = get_activations([x_val, 0])[0]
# Ensure that the channels axis is last
if data_format == 'channels_first':
a = np.swapaxes(a, 1, -1)
# Flatten all except channels axis
activations = np.reshape(a, [-1, a.shape[-1]])
zeros = (activations == 0).astype(int)
mean_calculator.add(zeros)
return mean_calculator.calculate()
def get_channels_importance(model, layer, x_val, y , node_indices=None):
if isinstance(layer, str):
layer = model.get_layer(name=layer)
# Check that layer is in the model
if layer not in model.layers:
raise ValueError('layer is not a valid Layer in model.')
layer_node_indices = utils.find_nodes_in_model(model, layer)
print('layer:',layer,layer_node_indices)
# If no nodes are specified, all of the layer's inbound nodes which are
# in model are selected.
if not node_indices:
node_indices = layer_node_indices
# Check for duplicate node indices
elif len(node_indices) != len(set(node_indices)):
raise ValueError('`node_indices` contains duplicate values.')
# Check that all of the selected nodes are in the layer
elif not set(node_indices).issubset(layer_node_indices):
raise ValueError('One or more nodes specified by `layer` and '
'`node_indices` are not in `model`.')
data_format = getattr(layer, 'data_format', 'channels_last')
# Perform the forward pass and get the activations of the layer.
importances=[]
print('layer:',layer,layer_node_indices,node_indices)
if len(node_indices)>1:
print('ERROR!!!!!!!!!!!!!!!!!!!!!!!!')
# Get activations
if hasattr(x_val, "__iter__"):
temp_model = Model(model.inputs,layer.output)
print('before: act output',layer.output)
a = temp_model.predict(x_val)
#a=temp_model.predict_generator(x_val, x_val.n // x_val.batch_size)
if data_format == 'channels_first':
a = np.swapaxes(a, 1, -1)
print('after:',layer,a.shape,data_format)
# Flatten all except channels axis
for channel in range(a.shape[-1]):
activations = a[...,channel]
activations = np.reshape(activations, [activations.shape[0],-1])
#print('after:',layer,activations.shape)
#pair_dist=activations
pair_dist=pairwise_distances(activations)
#pair_dist/=pair_dist.mean()
if False:
importance=(abs(pair_dist)).sum()
elif False:
indices=np.argsort(pair_dist,axis=1)
same_class=(y[indices[:,1:]]==y.reshape(y.shape[0],1))
other_indices=indices[:,1:]
first_same_class=np.argmax(same_class,axis=1)
first_other_class=np.argmin(same_class,axis=1)
#importance=sum([pair_dist[i,other_indices[i,first_same_class[i]]]/pair_dist[i,other_indices[i,first_other_class[i]]] for i in range(y.shape[0])])/y.shape[0]
#importance=sum([pair_dist[i,other_indices[i,first_same_class[i]]] for i in range(y.shape[0])])/y.shape[0]
importance=sum([pair_dist[i,other_indices[i,first_other_class[i]]] for i in range(y.shape[0])])/y.shape[0]
else:
classes=np.unique(y)
num_classes=classes.shape[0]
delta=len(y)//num_classes
#class_dists=np.array([[np.median(pair_dist[y==classes[i]][:,y==classes[j]]) for j in range(num_classes)] for i in range(num_classes)])
#class_dists=np.array([[pair_dist[y==classes[i]][:,y==classes[j]][np.where(pair_dist[y==classes[i]][:,y==classes[j]]!=0)].mean() for j in range(num_classes)] for i in range(num_classes)])
#class_dists=np.array([[np.mean(pair_dist[i:i+delta,j:j+delta]) for j in range(0,len(y),delta)] for i in range(0,len(y),delta)])
pdr=pair_dist.reshape(num_classes,delta,num_classes,delta)
class_dists=np.median(pdr,axis=(1,3))
#class_dists=np.sum(pdr,axis=(1,3))/np.sum(pdr>0,axis=(1,3))
#instance_dists=np.array([[pair_dist[y==y[i]][:,y==y[j]][np.where(pair_dist[y==y[i]][:,y==y[j]]!=0)].mean() for j in range(y.shape[0])] for i in range(y.shape[0])])
#instance_dists=np.array([[np.median(pair_dist[y==y[i]][:,y==y[j]]) for j in range(y.shape[0])] for i in range(y.shape[0])])
#instance_dists=np.array([[np.median(pair_dist[y==y[i]][:,y==y[j]]) for j in range(y.shape[0])] for i in range(y.shape[0])])
#instance_dists=np.array([[class_dists[y[i]][y[j]] for j in range(y.shape[0])] for i in range(y.shape[0])])
instance_dists=np.repeat(np.repeat(class_dists,delta,axis=0),delta,axis=1)
importance=-(((pair_dist-instance_dists)**2)/instance_dists).sum() #+np.log(instance_dists)
#if abs(importance)<0.01:
# print(channel,pair_dist,instance_dists)
importances.append(importance)
#print(indices,y[indices])
#print(first_same_class,first_other_class)
#print(pair_dist)
#sys.exit(0)
importances=np.array(importances)
return importances
def get_channels_loss(model, layer, x_val, y , node_indices=None):
if isinstance(layer, str):
layer = model.get_layer(name=layer)
# Check that layer is in the model
if layer not in model.layers:
raise ValueError('layer is not a valid Layer in model.')
layer_node_indices = utils.find_nodes_in_model(model, layer)
print('layer:',layer,layer_node_indices)
# If no nodes are specified, all of the layer's inbound nodes which are
# in model are selected.
if not node_indices:
node_indices = layer_node_indices
# Check for duplicate node indices
elif len(node_indices) != len(set(node_indices)):
raise ValueError('`node_indices` contains duplicate values.')
# Check that all of the selected nodes are in the layer
elif not set(node_indices).issubset(layer_node_indices):
raise ValueError('One or more nodes specified by `layer` and '
'`node_indices` are not in `model`.')
data_format = getattr(layer, 'data_format', 'channels_last')
# Perform the forward pass and get the activations of the layer.
importances=[]
print('layer:',layer,layer_node_indices,node_indices)
if len(node_indices)>1:
print('ERROR!!!!!!!!!!!!!!!!!!!!!!!!')
# Get activations
if hasattr(x_val, "__iter__"):
temp_model = Model(model.inputs,layer.output)
func=K.function([layer.output,model.input], [model.output])
print('before: act output',layer.output)
a = temp_model.predict(x_val)
print(a.shape,layer.output.shape)
#a=temp_model.predict_generator(x_val, x_val.n // x_val.batch_size)
print('after:',layer,a.shape,data_format)
# Flatten all except channels axis
for channel in range(a.shape[-1]):
activations = a[...,channel]
#print('after:',layer,activations.shape)
if True:
a_new=a.copy()
if data_format == 'channels_first':
mean_activation=abs(a_new[:,channel]).mean()
a_new[:,channel]=0
else:
mean_activation=abs(a_new[...,channel]).mean()
a_new[...,channel]=0
else:
a_new=np.zeros(a.shape)
if data_format == 'channels_first':
mean_activation=abs(a[:,channel]).mean()
a_new[:,channel]=a[:,channel]
else:
mean_activation=abs(a[...,channel]).mean()
a_new[...,channel]=a[...,channel]
y_pred=[]
acc=0
loss=0
delta=128
for i in range(0,a_new.shape[0],delta):
x=a_new[i:i+delta]
b=func([x,x_val[i:i+delta]])[0]
#print(x.shape,b.shape,b[y[i]])
#y_pred.extend(b)
acc+=(np.argmax(b,axis=1)==y[i:i+delta]).sum()
ind=np.meshgrid(np.arange(b.shape[1]),np.arange(b.shape[0]))[0]
loss-=np.log(b[ind==y[i:i+delta].reshape(x.shape[0],1)]).sum()
#for j in range(b.shape[0]):
# loss-=math.log(b[j][y[i+j]])
#loss-=np.log().sum()
#if np.argmax(b)==y[i]:
# acc+=1
#loss-=math.log(b[y[i]])
y_pred=np.array(y_pred)
acc/=a_new.shape[0]
loss/=a_new.shape[0]
print(channel, 'y_pred:',y_pred.shape,acc,loss,mean_activation)
importances.append(loss)
#print(indices,y[indices])
#print(pair_dist)
#sys.exit(0)
#sys.exit(0)
importances=np.array(importances)
return importances
def get_channels_gradients(model, layer, x_val, y , node_indices=None):
if isinstance(layer, str):
layer = model.get_layer(name=layer)
# Check that layer is in the model
if layer not in model.layers:
raise ValueError('layer is not a valid Layer in model.')
layer_node_indices = utils.find_nodes_in_model(model, layer)
print('layer:',layer,layer_node_indices)
# If no nodes are specified, all of the layer's inbound nodes which are
# in model are selected.
if not node_indices:
node_indices = layer_node_indices
# Check for duplicate node indices
elif len(node_indices) != len(set(node_indices)):
raise ValueError('`node_indices` contains duplicate values.')
# Check that all of the selected nodes are in the layer
elif not set(node_indices).issubset(layer_node_indices):
raise ValueError('One or more nodes specified by `layer` and '
'`node_indices` are not in `model`.')
data_format = getattr(layer, 'data_format', 'channels_last')
# Perform the forward pass and get the activations of the layer.
importances=[]
print('layer:',layer,layer_node_indices,node_indices)
if len(node_indices)>1:
print('ERROR!!!!!!!!!!!!!!!!!!!!!!!!')
# Get activations
if hasattr(x_val, "__iter__"):
grads = K.gradients(model.total_loss, layer.output)[0]
input_tensors = [model.inputs[0], # input data
model.sample_weights[0], # how much to weight each sample by
model.targets[0], # labels
K.learning_phase(), # train or test mode
]
if False:
mul_a_grads_tensor=K.mean(layer.output,axis=0)*K.mean(grads,axis=0)
if K.image_data_format() != 'channels_first':
x = K.permute_dimensions(mul_a_grads_tensor, (2, 0, 1))
x_shape=K.int_shape(x)
#print(x_shape)
x=K.reshape(x,(x_shape[0],x_shape[1]*x_shape[2]))
x=K.sum(x,axis=1)
x=K.abs(x)
else:
mul_a_grads_tensor=layer.output*grads
if K.image_data_format() == 'channels_first':
x = K.permute_dimensions(mul_a_grads_tensor, (1, 0, 2, 3))
else:
x = K.permute_dimensions(mul_a_grads_tensor, (3, 0, 1, 2))
x_shape=K.int_shape(x)
#print(x_shape)
x=K.reshape(x,(x_shape[0],-1,x_shape[2]*x_shape[3]))
x=K.sum(x,axis=2)
x=K.abs(x)
x=K.sum(x,axis=1)
func=K.function(input_tensors, [x])
print('before: act output',layer.output)
delta=32
importances=None
for i in range(0,x_val.shape[0],delta):
x=x_val[i:i+delta]
q_part= func([x,np.ones(x.shape[0]),y[i:i+delta],0])[0]
if importances is None:
importances=q_part.copy()
else:
importances+=q_part
print('after:',importances.shape,layer.output.shape,data_format)
return importances
def get_channels_importance_with_gradient(model, layer, x_val, y, node_indices=None):
if isinstance(layer, str):
layer = model.get_layer(name=layer)
# Check that layer is in the model
if layer not in model.layers:
raise ValueError('layer is not a valid Layer in model.')
layer_node_indices = utils.find_nodes_in_model(model, layer)
print('layer:',layer,layer_node_indices)
# If no nodes are specified, all of the layer's inbound nodes which are
# in model are selected.
if not node_indices:
node_indices = layer_node_indices
# Check for duplicate node indices
elif len(node_indices) != len(set(node_indices)):
raise ValueError('`node_indices` contains duplicate values.')
# Check that all of the selected nodes are in the layer
elif not set(node_indices).issubset(layer_node_indices):
raise ValueError('One or more nodes specified by `layer` and '
'`node_indices` are not in `model`.')
data_format = getattr(layer, 'data_format', 'channels_last')
# Perform the forward pass and get the activations of the layer.
importances=[]
print('layer:',layer,layer_node_indices,node_indices)
if len(node_indices)>1:
print('ERROR!!!!!!!!!!!!!!!!!!!!!!!!')
# Get activations
if hasattr(x_val, "__iter__"):
temp_model = Model(model.inputs,layer.output)
print('before: act output',layer.output)
a = temp_model.predict(x_val)
grads = K.gradients(model.total_loss, layer.output)[0]
input_tensors = [model.inputs[0], # input data
model.sample_weights[0], # how much to weight each sample by
model.targets[0], # labels
K.learning_phase(), # train or test mode
]
if K.image_data_format() == 'channels_first':
dimensions=(1, 0, 2, 3)
else:
dimensions=(3, 0, 1, 2)
acts = K.permute_dimensions(layer.output, dimensions)
grads = K.permute_dimensions(grads, dimensions)
grads_shape=K.int_shape(grads)
#print(x_shape)
grads=K.reshape(grads,(grads_shape[0],-1,grads_shape[2]*grads_shape[3]))
grads=K.sum(K.abs(grads),axis=2)
acts=K.reshape(acts,(grads_shape[0],-1,grads_shape[2]*grads_shape[3]))
func=K.function(input_tensors, [acts,grads])
print('before: act output',layer.output)
delta=32
importances=None
for i in range(0,x_val.shape[0],delta):
x_part=x_val[i:i+delta]
y_part=y[i:i+delta]
a,g= func([x_part,np.ones(x_part.shape[0]),y_part,0])
#print('after:',a.shape,g.shape,data_format)
num_channels=a.shape[0]
if importances is None:
importances=np.zeros(num_channels)
for channel in range(num_channels):
activations = a[channel]
activations = np.reshape(activations, [activations.shape[0],-1])
#print('after:',layer,activations.shape)
#pair_dist=activations
pair_dist=pairwise_distances(activations)
weighted_pair_dist=pair_dist*np.transpose(g[channel])
if True:
importance=(abs(weighted_pair_dist)).sum()
else:
indices=np.argsort(weighted_pair_dist,axis=1)
same_class=(y_part[indices[:,1:]]==y_part.reshape(y_part.shape[0],1))
other_indices=indices[:,1:]
first_same_class=np.argmax(same_class,axis=1)
first_other_class=np.argmin(same_class,axis=1)
#importance=sum([pair_dist[i,other_indices[i,first_same_class[i]]]/pair_dist[i,other_indices[i,first_other_class[i]]] for i in range(y.shape[0])])/y.shape[0]
#importance=sum([pair_dist[i,other_indices[i,first_same_class[i]]] for i in range(y_part.shape[0])])/y_part.shape[0]
importance=sum([weighted_pair_dist[i,other_indices[i,first_other_class[i]]] for i in range(y_part.shape[0])])/y_part.shape[0]
importances[channel]+=importance
print('after:',importances.shape,layer.output.shape,data_format)
#sys.exit(0)
return importances
def get_channels_l1_norm(model, layer, node_indices=None):
if isinstance(layer, str):
layer = model.get_layer(name=layer)
# Check that layer is in the model
if layer not in model.layers:
raise ValueError('layer is not a valid Layer in model.')
layer_node_indices = utils.find_nodes_in_model(model, layer)
print('layer:',layer,layer_node_indices)
# If no nodes are specified, all of the layer's inbound nodes which are
# in model are selected.
if not node_indices:
node_indices = layer_node_indices
# Check for duplicate node indices
elif len(node_indices) != len(set(node_indices)):
raise ValueError('`node_indices` contains duplicate values.')
# Check that all of the selected nodes are in the layer
elif not set(node_indices).issubset(layer_node_indices):
raise ValueError('One or more nodes specified by `layer` and '
'`node_indices` are not in `model`.')
data_format = getattr(layer, 'data_format', 'channels_last')
w=layer.get_weights()[0]
if data_format == 'channels_first':
w = np.swapaxes(w, 1, -1)
importances=abs(w).sum(axis=(0,1,2))
print(w.shape,importances.shape)
return importances
def prune_model_by_layer(model, percent_channels_delete,image_dir,image_lists, preprocess_fct):
start = None
end = None
#model.summary()
# Create the Surgeon and add a 'delete_channels' job for each layer
# whose channels are to be deleted.
surgeon = Surgeon(model, copy=True)
for layer in model.layers[start:end]:
if identify.is_conv(layer):
print(layer.name)
num_total_channels=layer.output_shape[-1]
num_removed_channels=int(num_total_channels*percent_channels_delete/100)//16*16
if num_removed_channels>0:
if False:
images,y=get_images(image_dir,image_lists, preprocess_fct)
#channels_importance=get_channels_apoz_importance(model, layer, images)
#channels_importance=get_channels_importance(model, layer, images,y)
#channels_importance=get_channels_loss(model, layer, images,y)
#channels_importance=get_channels_gradients(model, layer, images,y)
#channels_importance=get_channels_importance_with_gradient(model, layer, images,y)
else:
channels_importance=get_channels_l1_norm(model, layer)
total_channels_sorted=np.argsort(channels_importance)
#print('channels_importance:',channels_importance.shape,total_channels_sorted[:5])
channels=total_channels_sorted[:num_removed_channels]
print('before add_job:',layer.name,channels,channels_importance[channels],len(channels),num_total_channels)
surgeon.add_job('delete_channels', layer,channels=channels)
# Delete channels
return surgeon.operate()
def prune_model_random(model, percent_channels_delete):
start = None
end = None
# Create the Surgeon and add a 'delete_channels' job for each layer
# whose channels are to be deleted.
surgeon = Surgeon(model, copy=True)
for layer in model.layers[start:end]:
if identify.is_conv(layer):
print(layer.name)
num_total_channels=layer.output_shape[-1]
total_channels = list(range(num_total_channels))
shuffle(total_channels)
num_removed_channels=int(num_total_channels*percent_channels_delete/100)//16*16
if num_removed_channels>0:
channels=total_channels[:num_removed_channels]
print('before add_job:',layer.name,channels,len(channels),num_total_channels)
surgeon.add_job('delete_channels', layer,channels=channels)
# Delete channels
return surgeon.operate()
BATCH_SIZE=32 #32 #8 #16
import tensorflow as tf
from tensorflow.python.platform import gfile
#from keras.applications.inception_v1 import InceptionV1
from keras.applications import vgg19, inception_v3, resnet50,inception_resnet_v2, resnet_v2, mobilenetv2, mobilenet
from keras.layers import Flatten, Dense, Dropout,GlobalAveragePooling2D,AveragePooling2D, Activation, Conv2D, Lambda, Input, Reshape
from keras.models import Model,load_model,model_from_json
from keras.optimizers import SGD, Adam
from keras.callbacks import ModelCheckpoint,EarlyStopping
from keras import regularizers
from keras.preprocessing.image import (ImageDataGenerator, Iterator,
array_to_img, img_to_array, load_img)
from keras import backend as K
def as_bytes(bytes_or_text, encoding='utf-8'):
"""Converts bytes or unicode to `bytes`, using utf-8 encoding for text.
# Arguments
bytes_or_text: A `bytes`, `str`, or `unicode` object.
encoding: A string indicating the charset for encoding unicode.
# Returns
A `bytes` object.
# Raises
TypeError: If `bytes_or_text` is not a binary or unicode string.
"""
if isinstance(bytes_or_text, six.text_type):
return bytes_or_text.encode(encoding)
elif isinstance(bytes_or_text, bytes):
return bytes_or_text
else:
raise TypeError('Expected binary or unicode string, got %r' %
(bytes_or_text,))
class CustomImageDataGenerator(ImageDataGenerator):
def flow_from_image_lists(self, image_lists,
category, image_dir,
target_size=(256, 256), color_mode='rgb',
class_mode='categorical',
batch_size=32, shuffle=True, seed=None,
save_to_dir=None,
save_prefix='',
save_format='jpeg'):
return ImageListIterator(
image_lists, self,
category, image_dir,
target_size=target_size, color_mode=color_mode,
class_mode=class_mode,
data_format=self.data_format,
batch_size=batch_size, shuffle=shuffle, seed=seed,
save_to_dir=save_to_dir,
save_prefix=save_prefix,
save_format=save_format)
class ImageListIterator(Iterator):
"""Iterator capable of reading images from a directory on disk.
# Arguments
image_lists: Dictionary of training images for each label.
image_data_generator: Instance of `ImageDataGenerator`
to use for random transformations and normalization.
target_size: tuple of integers, dimensions to resize input images to.
color_mode: One of `"rgb"`, `"grayscale"`. Color mode to read images.
classes: Optional list of strings, names of sudirectories
containing images from each class (e.g. `["dogs", "cats"]`).
It will be computed automatically if not set.
class_mode: Mode for yielding the targets:
`"binary"`: binary targets (if there are only two classes),
`"categorical"`: categorical targets,
`"sparse"`: integer targets,
`None`: no targets get yielded (only input images are yielded).
batch_size: Integer, size of a batch.
shuffle: Boolean, whether to shuffle the data between epochs.
seed: Random seed for data shuffling.
data_format: String, one of `channels_first`, `channels_last`.
save_to_dir: Optional directory where to save the pictures
being yielded, in a viewable format. This is useful
for visualizing the random transformations being
applied, for debugging purposes.
save_prefix: String prefix to use for saving sample
images (if `save_to_dir` is set).
save_format: Format to use for saving sample images
(if `save_to_dir` is set).
"""
def __init__(self, image_lists, image_data_generator,
category, image_dir,
target_size=(256, 256), color_mode='rgb',
class_mode='categorical',
batch_size=32, shuffle=True, seed=None,
data_format=None,
save_to_dir=None, save_prefix='', save_format='jpeg'):
if data_format is None:
data_format = K.image_data_format()
classes = list(image_lists.keys())
self.category = category
self.num_class = len(classes)
self.image_lists = image_lists
self.image_dir = image_dir
how_many_files = 0
for label_name in classes:
for _ in self.image_lists[label_name][category]:
how_many_files += 1
self.samples = how_many_files
self.class2id = dict(zip(classes, range(len(classes))))
self.id2class = dict((v, k) for k, v in self.class2id.items())
self.classes = np.zeros((self.samples,), dtype='int32')
self.image_data_generator = image_data_generator
self.target_size = tuple(target_size)
if color_mode not in {'rgb', 'grayscale'}:
raise ValueError('Invalid color mode:', color_mode,
'; expected "rgb" or "grayscale".')
self.color_mode = color_mode
self.data_format = data_format
if self.color_mode == 'rgb':
if self.data_format == 'channels_last':
self.image_shape = self.target_size + (3,)
else:
self.image_shape = (3,) + self.target_size
else:
if self.data_format == 'channels_last':
self.image_shape = self.target_size + (1,)
else:
self.image_shape = (1,) + self.target_size
if class_mode not in {'categorical', 'binary', 'sparse', None}:
raise ValueError('Invalid class_mode:', class_mode,
'; expected one of "categorical", '
'"binary", "sparse", or None.')
self.class_mode = class_mode
self.save_to_dir = save_to_dir
self.save_prefix = save_prefix
self.save_format = save_format
i = 0
self.filenames = []
for label_name in classes:
for j, _ in enumerate(self.image_lists[label_name][category]):
self.classes[i] = self.class2id[label_name]
img_path = get_image_path(self.image_lists,
label_name,
j,
self.image_dir,
self.category)
self.filenames.append(img_path)
i += 1
print("Found {} {} files".format(len(self.filenames), category))
super(ImageListIterator, self).__init__(self.samples, batch_size, shuffle,
seed)
def next(self):
"""For python 2.x.
# Returns
The next batch.
"""
# Keeps under lock only the mechanism which advances
# the indexing of each batch.
with self.lock:
index_array = next(self.index_generator)
# The transformation of images is not under thread lock
# so it can be done in parallel
return self._get_batches_of_transformed_samples(index_array)
def _get_batches_of_transformed_samples(self, index_array):
current_batch_size=len(index_array)
batch_x = np.zeros((current_batch_size,) + self.image_shape,
dtype=K.floatx())
grayscale = self.color_mode == 'grayscale'
# build batch of image data
for i, j in enumerate(index_array):
img = load_img(self.filenames[j],
grayscale=grayscale,
target_size=self.target_size)
x = img_to_array(img, data_format=self.data_format)
x = self.image_data_generator.random_transform(x)
x = self.image_data_generator.standardize(x)
batch_x[i] = x
# optionally save augmented images to disk for debugging purposes
if self.save_to_dir:
for i, j in enumerate(index_array):
img = array_to_img(batch_x[i], self.data_format, scale=True)
fname = '{prefix}_{index}_{hash}.{format}'.format(
prefix=self.save_prefix,
index=j,
hash=np.random.randint(10000),
format=self.save_format)
img.save(os.path.join(self.save_to_dir, fname))
# build batch of labels
if self.class_mode == 'sparse':
batch_y = self.classes[index_array]
elif self.class_mode == 'binary':
batch_y = self.classes[index_array].astype(K.floatx())
elif self.class_mode == 'categorical':
batch_y = np.zeros((len(batch_x), self.num_class),
dtype=K.floatx())
for i, label in enumerate(self.classes[index_array]):
batch_y[i, label] = 1.
else:
return batch_x
return batch_x, batch_y
def create_image_lists(image_dir, train_count):
if not os.path.isdir(image_dir):
raise ValueError("Image directory {} not found.".format(image_dir))
image_lists = {}
sub_dirs = [x[0] for x in os.walk(image_dir)]
sub_dirs_without_root = sub_dirs[1:] # first element is root directory
num_classes=0
for sub_dir in sub_dirs_without_root:
file_list = []
dir_name = os.path.basename(sub_dir)
if dir_name == image_dir:
continue
#print("Looking for images in '{}'".format(dir_name))
for extension in VALID_IMAGE_FORMATS:
file_glob = os.path.join(image_dir, dir_name, '*.' + extension)
file_list.extend(glob.glob(file_glob))
if not file_list:
continue
num_classes+=1
label_name = re.sub(r'[^a-z0-9]+', ' ', dir_name.lower())
training_images = []
validation_images = []
shuffle(file_list)
#train_cnt=int(math.ceil(train_percent*len(file_list)))
#print(label_name,train_percent,len(file_list),train_cnt)
for i,file_name in enumerate(file_list):
base_name = os.path.basename(file_name)
if i < train_count:
training_images.append(base_name)
#elif i<train_count+15:
else:
validation_images.append(base_name)
image_lists[label_name] = {
'dir': dir_name,
'training': training_images,
'validation': validation_images,
}
return image_lists,num_classes
# https://github.com/tensorflow/tensorflow/blob/master/tensorflow/examples/image_retraining/retrain.py
def get_image_path(image_lists, label_name, index, image_dir, category):
""""Returns a path to an image for a label at the given index.
# Arguments
image_lists: Dictionary of training images for each label.
label_name: Label string we want to get an image for.
index: Int offset of the image we want. This will be moduloed by the
available number of images for the label, so it can be arbitrarily large.
image_dir: Root folder string of the subfolders containing the training
images.
category: Name string of set to pull images from - training, testing, or
validation.
# Returns
File system path string to an image that meets the requested parameters.
"""
if label_name not in image_lists:
raise ValueError('Label does not exist ', label_name)
label_lists = image_lists[label_name]
if category not in label_lists:
raise ValueError('Category does not exist ', category)
category_list = label_lists[category]
if not category_list:
raise ValueError('Label %s has no images in the category %s.',
label_name, category)
mod_index = index % len(category_list)
base_name = category_list[mod_index]
sub_dir = label_lists['dir']
full_path = os.path.join(image_dir, sub_dir, base_name)
return full_path
def get_generators(image_lists, image_dir,preprocess_fct):
train_datagen = CustomImageDataGenerator(rotation_range=20,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,
fill_mode='nearest',
#rescale=1. / 255)
preprocessing_function=preprocess_fct)
test_datagen = CustomImageDataGenerator(preprocessing_function=preprocess_fct)
train_generator = train_datagen.flow_from_image_lists(
image_lists=image_lists,
category='training',
image_dir=image_dir,
target_size=(IMG_SIZE, IMG_SIZE),
batch_size=BATCH_SIZE,
class_mode='categorical',seed=RANDOM_SEED)
validation_generator = test_datagen.flow_from_image_lists(
image_lists=image_lists,
category='validation',
image_dir=image_dir,
target_size=(IMG_SIZE, IMG_SIZE),
batch_size=BATCH_SIZE,
class_mode='categorical',seed=RANDOM_SEED)
return train_generator, validation_generator
def get_model(classes_num):
if False:
basemodel = vgg19
basemodel_class = basemodel.VGG19
elif False:
basemodel = inception_v3
basemodel_class = basemodel.InceptionV3
elif False:
basemodel=mobilenet
basemodel_class = basemodel.MobileNet
net_model = basemodel_class(weights='imagenet',include_top=False,input_shape=(IMG_SIZE, IMG_SIZE, 3),pooling='avg')
elif True:
basemodel=mobilenetv2
basemodel_class = basemodel.MobileNetV2
net_model = basemodel_class(alpha=1.0,weights='imagenet',include_top=False,input_shape=(IMG_SIZE, IMG_SIZE, 3),pooling='avg')
elif True:
basemodel=inception_resnet_v2
basemodel_class = basemodel.InceptionResNetV2
elif True:
basemodel = resnet_v2
basemodel_class = basemodel.ResNet152V2
else:
basemodel = resnet50
basemodel_class = basemodel.ResNet50
if net_model is None:
net_model = basemodel_class(weights='imagenet',include_top=False,input_shape=(IMG_SIZE, IMG_SIZE, 3),pooling='avg')
last_model_layer = net_model.output
#last_model_layer = GlobalAveragePooling2D()(last_model_layer)
preds=Dense(classes_num, activation='softmax')(last_model_layer)
f_model = Model(net_model.input, preds)
return f_model,net_model,basemodel.preprocess_input
def save_model(model,filename):
model.save_weights('weights.h5')
model_json = model.to_json()
with open("model.json", "w") as json_file:
json_file.write(model_json)
json_file = open('model.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
loaded_model = model_from_json(loaded_model_json)
loaded_model.load_weights('weights.h5')
loaded_model.save(filename)
def train_model(f_model, base_model, generators, train_new_layers=True):
train_generator, val_generator = generators
nb_train_samples=train_generator.samples
nb_validation_samples=val_generator.samples
pretitle='caltech101/mobilenet_v2_1.0-l1_25-pruned'
#pretitle='dogs/mobilenet_v1-l1_25-pruned'
#pretitle='dogs/mobilenet_v1_l1reg-my25-pruned'
mc = ModelCheckpoint(pretitle+'-{epoch:02d}-{val_acc:.2f}.h5', monitor='val_acc', verbose=1, save_best_only=True)
if train_new_layers:
base_model.trainable=False
for l in base_model.layers:
l.trainable=False
f_model.compile(loss='categorical_crossentropy', optimizer=SGD(lr=0.01), metrics=['accuracy'])
#f_model.summary()
FIRST_EPOCHS=2
#mc = ModelCheckpoint('dogs_mobilenet_multi_heads-{epoch:02d}-{val_acc:.2f}.h5', monitor='val_acc', verbose=1, save_best_only=True)
hist1=f_model.fit_generator(train_generator, steps_per_epoch=nb_train_samples//BATCH_SIZE, epochs=FIRST_EPOCHS, verbose=1, initial_epoch=0, callbacks=[mc], validation_data=val_generator, validation_steps=nb_validation_samples // BATCH_SIZE)
base_model.trainable=True
for l in base_model.layers:
l.trainable=True
SECOND_EPOCHS=FIRST_EPOCHS+18
initial_epoch=len(hist1.history['loss'])
else:
initial_epoch=20 #0 #8
SECOND_EPOCHS=initial_epoch+20 #10 #2
f_model.compile(loss='categorical_crossentropy', optimizer=SGD(lr=0.001), metrics=['accuracy'])
f_model.summary()
es=EarlyStopping(monitor='val_acc',patience=2)
hist1=f_model.fit_generator(train_generator, steps_per_epoch=nb_train_samples//BATCH_SIZE, epochs=SECOND_EPOCHS, verbose=1,
initial_epoch=initial_epoch, callbacks=[mc,es], validation_data=val_generator, validation_steps=nb_validation_samples // BATCH_SIZE)
#f_model.save(pretitle+'.h5')
save_model(f_model,pretitle+'_short.h5')
return f_model
import tempfile
def add_l1l2_regularizer(model, l1=0.0, l2=0.0, reg_attributes=None):
# Add L1L2 regularization to the whole model.
# NOTE: This will save and reload the model. Do not call this function inplace but with
# model = add_l1l2_regularizer(model, ...)
#for l in model.layers:
# print(l,l.losses)
if not reg_attributes:
reg_attributes = ['kernel_regularizer', 'bias_regularizer']
if isinstance(reg_attributes, str):
reg_attributes = [reg_attributes]
regularizer = regularizers.l1(l1) #_l2(l1=l1, l2=l2)
for layer in model.layers:
for attr in reg_attributes:
if hasattr(layer, attr):
setattr(layer, attr, regularizer)
# So far, the regularizers only exist in the model config. We need to
# reload the model so that Keras adds them to each layer's losses.
model_json = model.to_json()
# Save the weights before reloading the model.
tmp_weights_path = os.path.join(tempfile.gettempdir(), 'tmp_weights.h5')
model.save_weights(tmp_weights_path)
# Reload the model
model = model_from_json(model_json)
model.load_weights(tmp_weights_path, by_name=True)
#print('new')
#for l in model.layers:
# print(l,l.losses)
#sys.exit(0)
return model
if __name__ == '__main__':
#model_file='dogs/mobilenet_v1-20-0.67.h5'
#model_file='dogs/mobilenet_v1_l1reg-40-0.72.h5'
#model_file='caltech101/caltech101_mobilenet_v2_1.0-20-0.92.h5'
model_file=''
if False:
file_name='caltech101_mobilenet_v2_1.4'
model=load_model(file_name+'.h5')
save_model(model,file_name+'_short.h5')
sys.exit(0)
image_dir='D:/datasets/caltech/101_ObjectCategories'
train_count=30
#image_dir='D:/datasets/StanfordDogs'
#train_count=70
image_lists,classes_num = create_image_lists(image_dir, train_count)
print("Number of classes found: {}".format(classes_num))
f_model,base_model,preprocess_fct = get_model(classes_num)
generators = get_generators(image_lists, image_dir,preprocess_fct)
train_new_layers=True
if False:
#model_files=['caltech101_mobilenet_v1_short','caltech101_mobilenet_v1-rnd25-pruned_short','caltech101_mobilenet_v2_1.0_short','caltech101_mobilenet_v2_1.0-rnd25-pruned_short','caltech101_mobilenet_v2_1.4_short','caltech101_mobilenet_v2_1.0-rnd25-pruned_short']
model_files=['mobilenet_v1_l1reg_short','mobilenet_v1_l1reg-my25-pruned_short','mobilenet_v2_1.0_l1reg_base_short','mobilenet_v2_1.0_l1reg_my25-pruned_short','mobilenet_v2_1.4_short','mobilenet_v2_1.4_l1reg-my25-pruned_short']
#basedir='caltech101_'
base_dir='dogs/'
#K.set_session(tf.Session(config=tf.ConfigProto(device_count = {'GPU' : 0})))
for model_file in model_files:
f_model=load_model(base_dir+model_file+'.h5') #'caltech101_mobilenet.h5') #-pruned
f_model.compile(loss='categorical_crossentropy', optimizer=SGD(lr=0.001), metrics=['accuracy'])
train_new_layers=False
if True:
x=np.random.uniform(-1,1,(1,IMG_SIZE,IMG_SIZE,3))
preds=f_model.predict(x)[0]
TESTS=100
start_time=time.time()
for _ in range(TESTS):
preds=f_model.predict(x)[0]
elapsed=time.time()-start_time
print(model_file,' elapsed (ms):',1000*elapsed/TESTS,' size (Mb):',os.path.getsize(base_dir+model_file+'.h5')/(1024*1024.0))
elif True:
val_generator=generators[1]
score = f_model.evaluate_generator(val_generator, val_generator.samples//BATCH_SIZE)
print(model_file, ' evaluation scores:',score)
sys.exit(0)
elif os.path.exists(model_file):
train_new_layers=False
f_model.load_weights(model_file)
#save_model(f_model,model_file+'_short.h5')
#sys.exit(0)
if True:
f_model.compile(loss='sparse_categorical_crossentropy', optimizer=SGD(lr=0.001), metrics=['accuracy'])
#f_model=prune_model_random(f_model, 25)
f_model=prune_model_by_layer(f_model, 25,image_dir,image_lists, preprocess_fct)
else:
f_model=add_l1l2_regularizer(f_model, l1=0.0001, l2=0.0)
f_model = train_model(f_model, base_model,generators=generators, train_new_layers=train_new_layers)
|
<gh_stars>1-10
from sympy.core.logic import (fuzzy_not, Logic, And, Or, Not, fuzzy_and,
fuzzy_or, _fuzzy_group)
from sympy.utilities.pytest import raises
T = True
F = False
U = None
def test_fuzzy_group():
from sympy.utilities.iterables import cartes
v = [T, F, U]
for i in cartes(*[v]*3):
assert _fuzzy_group(i) is (
None if None in i else (True if all(j for j in i) else False))
assert _fuzzy_group(i, quick_exit=True) is (
None if (i.count(False) > 1) else (None if None in i else (
True if all(j for j in i) else False)))
def test_fuzzy_not():
assert fuzzy_not(T) == F
assert fuzzy_not(F) == T
assert fuzzy_not(U) == U
def test_fuzzy_and():
assert fuzzy_and([T, T]) == T
assert fuzzy_and([T, F]) == F
assert fuzzy_and([T, U]) == U
assert fuzzy_and([F, F]) == F
assert fuzzy_and([F, U]) == F
assert fuzzy_and([U, U]) == U
assert [fuzzy_and([w]) for w in [U, T, F]] == [U, T, F]
assert fuzzy_and([T, F, U]) == F
assert fuzzy_and([]) == T
raises(TypeError, lambda: fuzzy_and())
def test_fuzzy_or():
assert fuzzy_or([T, T]) == T
assert fuzzy_or([T, F]) == T
assert fuzzy_or([T, U]) == T
assert fuzzy_or([F, F]) == F
assert fuzzy_or([F, U]) == U
assert fuzzy_or([U, U]) == U
assert [fuzzy_or([w]) for w in [U, T, F]] == [U, T, F]
assert fuzzy_or([T, F, U]) == T
assert fuzzy_or([]) == F
raises(TypeError, lambda: fuzzy_or())
def test_logic_cmp():
l1 = And('a', Not('b'))
l2 = And('a', Not('b'))
assert hash(l1) == hash(l2)
assert (l1 == l2) == T
assert (l1 != l2) == F
assert And('a', 'b', 'c') == And('b', 'a', 'c')
assert And('a', 'b', 'c') == And('c', 'b', 'a')
assert And('a', 'b', 'c') == And('c', 'a', 'b')
def test_logic_onearg():
assert And() is True
assert Or() is False
assert And(T) == T
assert And(F) == F
assert Or(T) == T
assert Or(F) == F
assert And('a') == 'a'
assert Or('a') == 'a'
def test_logic_xnotx():
assert And('a', Not('a')) == F
assert Or('a', Not('a')) == T
def test_logic_eval_TF():
assert And(F, F) == F
assert And(F, T) == F
assert And(T, F) == F
assert And(T, T) == T
assert Or(F, F) == F
assert Or(F, T) == T
assert Or(T, F) == T
assert Or(T, T) == T
assert And('a', T) == 'a'
assert And('a', F) == F
assert Or('a', T) == T
assert Or('a', F) == 'a'
def test_logic_combine_args():
assert And('a', 'b', 'a') == And('a', 'b')
assert Or('a', 'b', 'a') == Or('a', 'b')
assert And( And('a', 'b'), And('c', 'd') ) == And('a', 'b', 'c', 'd')
assert Or( Or('a', 'b'), Or('c', 'd') ) == Or('a', 'b', 'c', 'd')
assert Or( 't', And('n', 'p', 'r'), And('n', 'r'), And('n', 'p', 'r'), 't', And('n', 'r') ) == \
Or('t', And('n', 'p', 'r'), And('n', 'r'))
def test_logic_expand():
t = And(Or('a', 'b'), 'c')
assert t.expand() == Or(And('a', 'c'), And('b', 'c'))
t = And(Or('a', Not('b')), 'b')
assert t.expand() == And('a', 'b')
t = And(Or('a', 'b'), Or('c', 'd'))
assert t.expand() == \
Or(And('a', 'c'), And('a', 'd'), And('b', 'c'), And('b', 'd'))
def test_logic_fromstring():
S = Logic.fromstring
assert S('a') == 'a'
assert S('!a') == Not('a')
assert S('a & b') == And('a', 'b')
assert S('a | b') == Or('a', 'b')
assert S('a | b & c') == And(Or('a', 'b'), 'c')
assert S('a & b | c') == Or(And('a', 'b'), 'c')
assert S('a & b & c') == And('a', 'b', 'c')
assert S('a | b | c') == Or('a', 'b', 'c')
raises(ValueError, lambda: S('| a'))
raises(ValueError, lambda: S('& a'))
raises(ValueError, lambda: S('a | | b'))
raises(ValueError, lambda: S('a | & b'))
raises(ValueError, lambda: S('a & & b'))
raises(ValueError, lambda: S('a |'))
raises(ValueError, lambda: S('a|b'))
raises(ValueError, lambda: S('!'))
raises(ValueError, lambda: S('! a'))
def test_logic_not():
assert Not('a') != '!a'
assert Not('!a') != 'a'
# NOTE: we may want to change default Not behaviour and put this
# functionality into some method.
assert Not(And('a', 'b')) == Or(Not('a'), Not('b'))
assert Not(Or('a', 'b')) == And(Not('a'), Not('b'))
def test_formatting():
S = Logic.fromstring
raises(ValueError, lambda: S('a&b'))
raises(ValueError, lambda: S('a|b'))
raises(ValueError, lambda: S('! a'))
|
<filename>mir_eval/segment.py
# CREATED:2013-08-13 12:02:42 by <NAME> <<EMAIL>>
'''
Evaluation criteria for structural segmentation fall into two categories:
boundary annotation and structural annotation. Boundary annotation is the task
of predicting the times at which structural changes occur, such as when a verse
transitions to a refrain. Metrics for boundary annotation compare estimated
segment boundaries to reference boundaries. Structural annotation is the task
of assigning labels to detected segments. The estimated labels may be
arbitrary strings - such as A, B, C, - and they need not describe functional
concepts. Metrics for structural annotation are similar to those used for
clustering data.
Conventions
-----------
Both boundary and structural annotation metrics require two dimensional arrays
with two columns, one for boundary start times and one for boundary end times.
Structural annotation further require lists of reference and estimated segment
labels which must have a length which is equal to the number of rows in the
corresponding list of boundary edges. In both tasks, we assume that
annotations express a partitioning of the track into intervals. The function
:func:`mir_eval.util.adjust_intervals` can be used to pad or crop the segment
boundaries to span the duration of the entire track.
Metrics
-------
* :func:`mir_eval.segment.detection`: An estimated boundary is considered
correct if it falls within a window around a reference boundary
* :func:`mir_eval.segment.deviation`: Computes the median absolute time
difference from a reference boundary to its nearest estimated boundary, and
vice versa
* :func:`mir_eval.segment.pairwise`: For classifying pairs of sampled time
instants as belonging to the same structural component
* :func:`mir_eval.segment.rand_index`: Clusters reference and estimated
annotations and compares them by the Rand Index
* :func:`mir_eval.segment.ari`: Computes the Rand index, adjusted for chance
* :func:`mir_eval.segment.nce`: Interprets sampled reference and estimated
labels as samples of random variables :math:`Y_R, Y_E` from which the
conditional entropy of :math:`Y_R` given :math:`Y_E` (Under-Segmentation) and
:math:`Y_E` given :math:`Y_R` (Over-Segmentation) are estimated
* :func:`mir_eval.segment.mutual_information`: Computes the standard,
normalized, and adjusted mutual information of sampled reference and
estimated segments
'''
import collections
import warnings
import numpy as np
import scipy.stats
import scipy.sparse
import scipy.misc
import scipy.special
from . import util
def validate_boundary(reference_intervals, estimated_intervals, trim):
"""Checks that the input annotations to a segment boundary estimation
metric (i.e. one that only takes in segment intervals) look like valid
segment times, and throws helpful errors if not.
Parameters
----------
reference_intervals : np.ndarray, shape=(n, 2)
reference segment intervals, in the format returned by
:func:`mir_eval.io.load_intervals` or
:func:`mir_eval.io.load_labeled_intervals`.
estimated_intervals : np.ndarray, shape=(m, 2)
estimated segment intervals, in the format returned by
:func:`mir_eval.io.load_intervals` or
:func:`mir_eval.io.load_labeled_intervals`.
trim : bool
will the start and end events be trimmed?
"""
if trim:
# If we're trimming, then we need at least 2 intervals
min_size = 2
else:
# If we're not trimming, then we only need one interval
min_size = 1
if len(reference_intervals) < min_size:
warnings.warn("Reference intervals are empty.")
if len(estimated_intervals) < min_size:
warnings.warn("Estimated intervals are empty.")
for intervals in [reference_intervals, estimated_intervals]:
util.validate_intervals(intervals)
def validate_structure(reference_intervals, reference_labels,
estimated_intervals, estimated_labels):
"""Checks that the input annotations to a structure estimation metric (i.e.
one that takes in both segment boundaries and their labels) look like valid
segment times and labels, and throws helpful errors if not.
Parameters
----------
reference_intervals : np.ndarray, shape=(n, 2)
reference segment intervals, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
reference_labels : list, shape=(n,)
reference segment labels, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
estimated_intervals : np.ndarray, shape=(m, 2)
estimated segment intervals, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
estimated_labels : list, shape=(m,)
estimated segment labels, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
"""
for (intervals, labels) in [(reference_intervals, reference_labels),
(estimated_intervals, estimated_labels)]:
util.validate_intervals(intervals)
if intervals.shape[0] != len(labels):
raise ValueError('Number of intervals does not match number '
'of labels')
# Check only when intervals are non-empty
if intervals.size > 0:
# Make sure intervals start at 0
if not np.allclose(intervals.min(), 0.0):
raise ValueError('Segment intervals do not start at 0')
if reference_intervals.size == 0:
warnings.warn("Reference intervals are empty.")
if estimated_intervals.size == 0:
warnings.warn("Estimated intervals are empty.")
# Check only when intervals are non-empty
if reference_intervals.size > 0 and estimated_intervals.size > 0:
if not np.allclose(reference_intervals.max(),
estimated_intervals.max()):
raise ValueError('End times do not match')
def detection(reference_intervals, estimated_intervals,
window=0.5, beta=1.0, trim=False):
"""Boundary detection hit-rate.
A hit is counted whenever an reference boundary is within ``window`` of a
estimated boundary. Note that each boundary is matched at most once: this
is achieved by computing the size of a maximal matching between reference
and estimated boundary points, subject to the window constraint.
Examples
--------
>>> ref_intervals, _ = mir_eval.io.load_labeled_intervals('ref.lab')
>>> est_intervals, _ = mir_eval.io.load_labeled_intervals('est.lab')
>>> # With 0.5s windowing
>>> P05, R05, F05 = mir_eval.segment.detection(ref_intervals,
... est_intervals,
... window=0.5)
>>> # With 3s windowing
>>> P3, R3, F3 = mir_eval.segment.detection(ref_intervals,
... est_intervals,
... window=3)
>>> # Ignoring hits for the beginning and end of track
>>> P, R, F = mir_eval.segment.detection(ref_intervals,
... est_intervals,
... window=0.5,
... trim=True)
Parameters
----------
reference_intervals : np.ndarray, shape=(n, 2)
reference segment intervals, in the format returned by
:func:`mir_eval.io.load_intervals` or
:func:`mir_eval.io.load_labeled_intervals`.
estimated_intervals : np.ndarray, shape=(m, 2)
estimated segment intervals, in the format returned by
:func:`mir_eval.io.load_intervals` or
:func:`mir_eval.io.load_labeled_intervals`.
window : float > 0
size of the window of 'correctness' around ground-truth beats
(in seconds)
(Default value = 0.5)
beta : float > 0
weighting constant for F-measure.
(Default value = 1.0)
trim : boolean
if ``True``, the first and last boundary times are ignored.
Typically, these denote start (0) and end-markers.
(Default value = False)
Returns
-------
precision : float
precision of estimated predictions
recall : float
recall of reference reference boundaries
f_measure : float
F-measure (weighted harmonic mean of ``precision`` and ``recall``)
"""
validate_boundary(reference_intervals, estimated_intervals, trim)
# Convert intervals to boundaries
reference_boundaries = util.intervals_to_boundaries(reference_intervals)
estimated_boundaries = util.intervals_to_boundaries(estimated_intervals)
# Suppress the first and last intervals
if trim:
reference_boundaries = reference_boundaries[1:-1]
estimated_boundaries = estimated_boundaries[1:-1]
# If we have no boundaries, we get no score.
if len(reference_boundaries) == 0 or len(estimated_boundaries) == 0:
return 0.0, 0.0, 0.0
matching = util.match_events(reference_boundaries,
estimated_boundaries,
window)
precision = float(len(matching)) / len(estimated_boundaries)
recall = float(len(matching)) / len(reference_boundaries)
f_measure = util.f_measure(precision, recall, beta=beta)
return precision, recall, f_measure
def deviation(reference_intervals, estimated_intervals, trim=False):
"""Compute the median deviations between reference
and estimated boundary times.
Examples
--------
>>> ref_intervals, _ = mir_eval.io.load_labeled_intervals('ref.lab')
>>> est_intervals, _ = mir_eval.io.load_labeled_intervals('est.lab')
>>> r_to_e, e_to_r = mir_eval.boundary.deviation(ref_intervals,
... est_intervals)
Parameters
----------
reference_intervals : np.ndarray, shape=(n, 2)
reference segment intervals, in the format returned by
:func:`mir_eval.io.load_intervals` or
:func:`mir_eval.io.load_labeled_intervals`.
estimated_intervals : np.ndarray, shape=(m, 2)
estimated segment intervals, in the format returned by
:func:`mir_eval.io.load_intervals` or
:func:`mir_eval.io.load_labeled_intervals`.
trim : boolean
if ``True``, the first and last intervals are ignored.
Typically, these denote start (0.0) and end-of-track markers.
(Default value = False)
Returns
-------
reference_to_estimated : float
median time from each reference boundary to the
closest estimated boundary
estimated_to_reference : float
median time from each estimated boundary to the
closest reference boundary
"""
validate_boundary(reference_intervals, estimated_intervals, trim)
# Convert intervals to boundaries
reference_boundaries = util.intervals_to_boundaries(reference_intervals)
estimated_boundaries = util.intervals_to_boundaries(estimated_intervals)
# Suppress the first and last intervals
if trim:
reference_boundaries = reference_boundaries[1:-1]
estimated_boundaries = estimated_boundaries[1:-1]
# If we have no boundaries, we get no score.
if len(reference_boundaries) == 0 or len(estimated_boundaries) == 0:
return np.nan, np.nan
dist = np.abs(np.subtract.outer(reference_boundaries,
estimated_boundaries))
estimated_to_reference = np.median(dist.min(axis=0))
reference_to_estimated = np.median(dist.min(axis=1))
return reference_to_estimated, estimated_to_reference
def pairwise(reference_intervals, reference_labels,
estimated_intervals, estimated_labels,
frame_size=0.1, beta=1.0):
"""Frame-clustering segmentation evaluation by pair-wise agreement.
Examples
--------
>>> (ref_intervals,
... ref_labels) = mir_eval.io.load_labeled_intervals('ref.lab')
>>> (est_intervals,
... est_labels) = mir_eval.io.load_labeled_intervals('est.lab')
>>> # Trim or pad the estimate to match reference timing
>>> (ref_intervals,
... ref_labels) = mir_eval.util.adjust_intervals(ref_intervals,
... ref_labels,
... t_min=0)
>>> (est_intervals,
... est_labels) = mir_eval.util.adjust_intervals(
... est_intervals, est_labels, t_min=0, t_max=ref_intervals.max())
>>> precision, recall, f = mir_eval.structure.pairwise(ref_intervals,
... ref_labels,
... est_intervals,
... est_labels)
Parameters
----------
reference_intervals : np.ndarray, shape=(n, 2)
reference segment intervals, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
reference_labels : list, shape=(n,)
reference segment labels, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
estimated_intervals : np.ndarray, shape=(m, 2)
estimated segment intervals, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
estimated_labels : list, shape=(m,)
estimated segment labels, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
frame_size : float > 0
length (in seconds) of frames for clustering
(Default value = 0.1)
beta : float > 0
beta value for F-measure
(Default value = 1.0)
Returns
-------
precision : float > 0
Precision of detecting whether frames belong in the same cluster
recall : float > 0
Recall of detecting whether frames belong in the same cluster
f : float > 0
F-measure of detecting whether frames belong in the same cluster
"""
validate_structure(reference_intervals, reference_labels,
estimated_intervals, estimated_labels)
# Check for empty annotations. Don't need to check labels because
# validate_structure makes sure they're the same size as intervals
if reference_intervals.size == 0 or estimated_intervals.size == 0:
return 0., 0., 0.
# Generate the cluster labels
y_ref = util.intervals_to_samples(reference_intervals,
reference_labels,
sample_size=frame_size)[-1]
y_ref = util.index_labels(y_ref)[0]
# Map to index space
y_est = util.intervals_to_samples(estimated_intervals,
estimated_labels,
sample_size=frame_size)[-1]
y_est = util.index_labels(y_est)[0]
# Build the reference label agreement matrix
agree_ref = np.equal.outer(y_ref, y_ref)
# Count the unique pairs
n_agree_ref = (agree_ref.sum() - len(y_ref)) / 2.0
# Repeat for estimate
agree_est = np.equal.outer(y_est, y_est)
n_agree_est = (agree_est.sum() - len(y_est)) / 2.0
# Find where they agree
matches = np.logical_and(agree_ref, agree_est)
n_matches = (matches.sum() - len(y_ref)) / 2.0
precision = n_matches / n_agree_est
recall = n_matches / n_agree_ref
f_measure = util.f_measure(precision, recall, beta=beta)
return precision, recall, f_measure
def rand_index(reference_intervals, reference_labels,
estimated_intervals, estimated_labels,
frame_size=0.1, beta=1.0):
"""(Non-adjusted) Rand index.
Examples
--------
>>> (ref_intervals,
... ref_labels) = mir_eval.io.load_labeled_intervals('ref.lab')
>>> (est_intervals,
... est_labels) = mir_eval.io.load_labeled_intervals('est.lab')
>>> # Trim or pad the estimate to match reference timing
>>> (ref_intervals,
... ref_labels) = mir_eval.util.adjust_intervals(ref_intervals,
... ref_labels,
... t_min=0)
>>> (est_intervals,
... est_labels) = mir_eval.util.adjust_intervals(
... est_intervals, est_labels, t_min=0, t_max=ref_intervals.max())
>>> rand_index = mir_eval.structure.rand_index(ref_intervals,
... ref_labels,
... est_intervals,
... est_labels)
Parameters
----------
reference_intervals : np.ndarray, shape=(n, 2)
reference segment intervals, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
reference_labels : list, shape=(n,)
reference segment labels, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
estimated_intervals : np.ndarray, shape=(m, 2)
estimated segment intervals, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
estimated_labels : list, shape=(m,)
estimated segment labels, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
frame_size : float > 0
length (in seconds) of frames for clustering
(Default value = 0.1)
beta : float > 0
beta value for F-measure
(Default value = 1.0)
Returns
-------
rand_index : float > 0
Rand index
"""
validate_structure(reference_intervals, reference_labels,
estimated_intervals, estimated_labels)
# Check for empty annotations. Don't need to check labels because
# validate_structure makes sure they're the same size as intervals
if reference_intervals.size == 0 or estimated_intervals.size == 0:
return 0., 0., 0.
# Generate the cluster labels
y_ref = util.intervals_to_samples(reference_intervals,
reference_labels,
sample_size=frame_size)[-1]
y_ref = util.index_labels(y_ref)[0]
# Map to index space
y_est = util.intervals_to_samples(estimated_intervals,
estimated_labels,
sample_size=frame_size)[-1]
y_est = util.index_labels(y_est)[0]
# Build the reference label agreement matrix
agree_ref = np.equal.outer(y_ref, y_ref)
# Repeat for estimate
agree_est = np.equal.outer(y_est, y_est)
# Find where they agree
matches_pos = np.logical_and(agree_ref, agree_est)
# Find where they disagree
matches_neg = np.logical_and(~agree_ref, ~agree_est)
n_pairs = len(y_ref) * (len(y_ref) - 1) / 2.0
n_matches_pos = (matches_pos.sum() - len(y_ref)) / 2.0
n_matches_neg = matches_neg.sum() / 2.0
rand = (n_matches_pos + n_matches_neg) / n_pairs
return rand
def _contingency_matrix(reference_indices, estimated_indices):
"""Computes the contingency matrix of a true labeling vs an estimated one.
Parameters
----------
reference_indices : np.ndarray
Array of reference indices
estimated_indices : np.ndarray
Array of estimated indices
Returns
-------
contingency_matrix : np.ndarray
Contingency matrix, shape=(#reference indices, #estimated indices)
.. note:: Based on sklearn.metrics.cluster.contingency_matrix
"""
ref_classes, ref_class_idx = np.unique(reference_indices,
return_inverse=True)
est_classes, est_class_idx = np.unique(estimated_indices,
return_inverse=True)
n_ref_classes = ref_classes.shape[0]
n_est_classes = est_classes.shape[0]
# Using coo_matrix is faster than histogram2d
return scipy.sparse.coo_matrix((np.ones(ref_class_idx.shape[0]),
(ref_class_idx, est_class_idx)),
shape=(n_ref_classes, n_est_classes),
dtype=np.int).toarray()
def _adjusted_rand_index(reference_indices, estimated_indices):
"""Compute the Rand index, adjusted for change.
Parameters
----------
reference_indices : np.ndarray
Array of reference indices
estimated_indices : np.ndarray
Array of estimated indices
Returns
-------
ari : float
Adjusted Rand index
.. note:: Based on sklearn.metrics.cluster.adjusted_rand_score
"""
n_samples = len(reference_indices)
ref_classes = np.unique(reference_indices)
est_classes = np.unique(estimated_indices)
# Special limit cases: no clustering since the data is not split;
# or trivial clustering where each document is assigned a unique cluster.
# These are perfect matches hence return 1.0.
if (ref_classes.shape[0] == est_classes.shape[0] == 1 or
ref_classes.shape[0] == est_classes.shape[0] == 0 or
(ref_classes.shape[0] == est_classes.shape[0] ==
len(reference_indices))):
return 1.0
contingency = _contingency_matrix(reference_indices, estimated_indices)
# Compute the ARI using the contingency data
sum_comb_c = sum(scipy.misc.comb(n_c, 2, exact=1) for n_c in
contingency.sum(axis=1))
sum_comb_k = sum(scipy.misc.comb(n_k, 2, exact=1) for n_k in
contingency.sum(axis=0))
sum_comb = sum((scipy.misc.comb(n_ij, 2, exact=1) for n_ij in
contingency.flatten()))
prod_comb = (sum_comb_c * sum_comb_k)/float(scipy.misc.comb(n_samples, 2))
mean_comb = (sum_comb_k + sum_comb_c)/2.
return ((sum_comb - prod_comb)/(mean_comb - prod_comb))
def ari(reference_intervals, reference_labels,
estimated_intervals, estimated_labels,
frame_size=0.1):
"""Adjusted Rand Index (ARI) for frame clustering segmentation evaluation.
Examples
--------
>>> (ref_intervals,
... ref_labels) = mir_eval.io.load_labeled_intervals('ref.lab')
>>> (est_intervals,
... est_labels) = mir_eval.io.load_labeled_intervals('est.lab')
>>> # Trim or pad the estimate to match reference timing
>>> (ref_intervals,
... ref_labels) = mir_eval.util.adjust_intervals(ref_intervals,
... ref_labels,
... t_min=0)
>>> (est_intervals,
... est_labels) = mir_eval.util.adjust_intervals(
... est_intervals, est_labels, t_min=0, t_max=ref_intervals.max())
>>> ari_score = mir_eval.structure.ari(ref_intervals, ref_labels,
... est_intervals, est_labels)
Parameters
----------
reference_intervals : np.ndarray, shape=(n, 2)
reference segment intervals, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
reference_labels : list, shape=(n,)
reference segment labels, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
estimated_intervals : np.ndarray, shape=(m, 2)
estimated segment intervals, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
estimated_labels : list, shape=(m,)
estimated segment labels, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
frame_size : float > 0
length (in seconds) of frames for clustering
(Default value = 0.1)
Returns
-------
ari_score : float > 0
Adjusted Rand index between segmentations.
"""
validate_structure(reference_intervals, reference_labels,
estimated_intervals, estimated_labels)
# Check for empty annotations. Don't need to check labels because
# validate_structure makes sure they're the same size as intervals
if reference_intervals.size == 0 or estimated_intervals.size == 0:
return 0., 0., 0.
# Generate the cluster labels
y_ref = util.intervals_to_samples(reference_intervals,
reference_labels,
sample_size=frame_size)[-1]
y_ref = util.index_labels(y_ref)[0]
# Map to index space
y_est = util.intervals_to_samples(estimated_intervals,
estimated_labels,
sample_size=frame_size)[-1]
y_est = util.index_labels(y_est)[0]
return _adjusted_rand_index(y_ref, y_est)
def _mutual_info_score(reference_indices, estimated_indices, contingency=None):
"""Compute the mutual information between two sequence labelings.
Parameters
----------
reference_indices : np.ndarray
Array of reference indices
estimated_indices : np.ndarray
Array of estimated indices
contingency : np.ndarray
Pre-computed contingency matrix. If None, one will be computed.
(Default value = None)
Returns
-------
mi : float
Mutual information
.. note:: Based on sklearn.metrics.cluster.mutual_info_score
"""
if contingency is None:
contingency = _contingency_matrix(reference_indices,
estimated_indices).astype(float)
contingency_sum = np.sum(contingency)
pi = np.sum(contingency, axis=1)
pj = np.sum(contingency, axis=0)
outer = np.outer(pi, pj)
nnz = contingency != 0.0
# normalized contingency
contingency_nm = contingency[nnz]
log_contingency_nm = np.log(contingency_nm)
contingency_nm /= contingency_sum
# log(a / b) should be calculated as log(a) - log(b) for
# possible loss of precision
log_outer = -np.log(outer[nnz]) + np.log(pi.sum()) + np.log(pj.sum())
mi = (contingency_nm * (log_contingency_nm - np.log(contingency_sum)) +
contingency_nm * log_outer)
return mi.sum()
def _entropy(labels):
"""Calculates the entropy for a labeling.
Parameters
----------
labels : list-like
List of labels.
Returns
-------
entropy : float
Entropy of the labeling.
.. note:: Based on sklearn.metrics.cluster.entropy
"""
if len(labels) == 0:
return 1.0
label_idx = np.unique(labels, return_inverse=True)[1]
pi = np.bincount(label_idx).astype(np.float)
pi = pi[pi > 0]
pi_sum = np.sum(pi)
# log(a / b) should be calculated as log(a) - log(b) for
# possible loss of precision
return -np.sum((pi / pi_sum) * (np.log(pi) - np.log(pi_sum)))
def _adjusted_mutual_info_score(reference_indices, estimated_indices):
"""Compute the mutual information between two sequence labelings, adjusted for
chance.
Parameters
----------
reference_indices : np.ndarray
Array of reference indices
estimated_indices : np.ndarray
Array of estimated indices
Returns
-------
ami : float <= 1.0
Mutual information
.. note:: Based on sklearn.metrics.cluster.adjusted_mutual_info_score
and sklearn.metrics.cluster.expected_mutual_info_score
"""
n_samples = len(reference_indices)
ref_classes = np.unique(reference_indices)
est_classes = np.unique(estimated_indices)
# Special limit cases: no clustering since the data is not split.
# This is a perfect match hence return 1.0.
if (ref_classes.shape[0] == est_classes.shape[0] == 1 or
ref_classes.shape[0] == est_classes.shape[0] == 0):
return 1.0
contingency = _contingency_matrix(reference_indices,
estimated_indices).astype(float)
# Calculate the MI for the two clusterings
mi = _mutual_info_score(reference_indices, estimated_indices,
contingency=contingency)
# The following code is based on
# sklearn.metrics.cluster.expected_mutual_information
R, C = contingency.shape
N = float(n_samples)
a = np.sum(contingency, axis=1).astype(np.int32)
b = np.sum(contingency, axis=0).astype(np.int32)
# There are three major terms to the EMI equation, which are multiplied to
# and then summed over varying nij values.
# While nijs[0] will never be used, having it simplifies the indexing.
nijs = np.arange(0, max(np.max(a), np.max(b)) + 1, dtype='float')
# Stops divide by zero warnings. As its not used, no issue.
nijs[0] = 1
# term1 is nij / N
term1 = nijs / N
# term2 is log((N*nij) / (a * b)) == log(N * nij) - log(a * b)
# term2 uses the outer product
log_ab_outer = np.log(np.outer(a, b))
# term2 uses N * nij
log_Nnij = np.log(N * nijs)
# term3 is large, and involved many factorials. Calculate these in log
# space to stop overflows.
gln_a = scipy.special.gammaln(a + 1)
gln_b = scipy.special.gammaln(b + 1)
gln_Na = scipy.special.gammaln(N - a + 1)
gln_Nb = scipy.special.gammaln(N - b + 1)
gln_N = scipy.special.gammaln(N + 1)
gln_nij = scipy.special.gammaln(nijs + 1)
# start and end values for nij terms for each summation.
start = np.array([[v - N + w for w in b] for v in a], dtype='int')
start = np.maximum(start, 1)
end = np.minimum(np.resize(a, (C, R)).T, np.resize(b, (R, C))) + 1
# emi itself is a summation over the various values.
emi = 0
for i in range(R):
for j in range(C):
for nij in range(start[i, j], end[i, j]):
term2 = log_Nnij[nij] - log_ab_outer[i, j]
# Numerators are positive, denominators are negative.
gln = (gln_a[i] + gln_b[j] + gln_Na[i] + gln_Nb[j] -
gln_N - gln_nij[nij] -
scipy.special.gammaln(a[i] - nij + 1) -
scipy.special.gammaln(b[j] - nij + 1) -
scipy.special.gammaln(N - a[i] - b[j] + nij + 1))
term3 = np.exp(gln)
emi += (term1[nij] * term2 * term3)
# Calculate entropy for each labeling
h_true, h_pred = _entropy(reference_indices), _entropy(estimated_indices)
ami = (mi - emi) / (max(h_true, h_pred) - emi)
return ami
def _normalized_mutual_info_score(reference_indices, estimated_indices):
"""Compute the mutual information between two sequence labelings, adjusted for
chance.
Parameters
----------
reference_indices : np.ndarray
Array of reference indices
estimated_indices : np.ndarray
Array of estimated indices
Returns
-------
nmi : float <= 1.0
Normalized mutual information
.. note:: Based on sklearn.metrics.cluster.normalized_mutual_info_score
"""
ref_classes = np.unique(reference_indices)
est_classes = np.unique(estimated_indices)
# Special limit cases: no clustering since the data is not split.
# This is a perfect match hence return 1.0.
if (ref_classes.shape[0] == est_classes.shape[0] == 1 or
ref_classes.shape[0] == est_classes.shape[0] == 0):
return 1.0
contingency = _contingency_matrix(reference_indices,
estimated_indices).astype(float)
contingency = np.array(contingency, dtype='float')
# Calculate the MI for the two clusterings
mi = _mutual_info_score(reference_indices, estimated_indices,
contingency=contingency)
# Calculate the expected value for the mutual information
# Calculate entropy for each labeling
h_true, h_pred = _entropy(reference_indices), _entropy(estimated_indices)
nmi = mi / max(np.sqrt(h_true * h_pred), 1e-10)
return nmi
def mutual_information(reference_intervals, reference_labels,
estimated_intervals, estimated_labels,
frame_size=0.1):
"""Frame-clustering segmentation: mutual information metrics.
Examples
--------
>>> (ref_intervals,
... ref_labels) = mir_eval.io.load_labeled_intervals('ref.lab')
>>> (est_intervals,
... est_labels) = mir_eval.io.load_labeled_intervals('est.lab')
>>> # Trim or pad the estimate to match reference timing
>>> (ref_intervals,
... ref_labels) = mir_eval.util.adjust_intervals(ref_intervals,
... ref_labels,
... t_min=0)
>>> (est_intervals,
... est_labels) = mir_eval.util.adjust_intervals(
... est_intervals, est_labels, t_min=0, t_max=ref_intervals.max())
>>> mi, ami, nmi = mir_eval.structure.mutual_information(ref_intervals,
... ref_labels,
... est_intervals,
... est_labels)
Parameters
----------
reference_intervals : np.ndarray, shape=(n, 2)
reference segment intervals, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
reference_labels : list, shape=(n,)
reference segment labels, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
estimated_intervals : np.ndarray, shape=(m, 2)
estimated segment intervals, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
estimated_labels : list, shape=(m,)
estimated segment labels, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
frame_size : float > 0
length (in seconds) of frames for clustering
(Default value = 0.1)
Returns
-------
MI : float > 0
Mutual information between segmentations
AMI : float
Adjusted mutual information between segmentations.
NMI : float > 0
Normalize mutual information between segmentations
"""
validate_structure(reference_intervals, reference_labels,
estimated_intervals, estimated_labels)
# Check for empty annotations. Don't need to check labels because
# validate_structure makes sure they're the same size as intervals
if reference_intervals.size == 0 or estimated_intervals.size == 0:
return 0., 0., 0.
# Generate the cluster labels
y_ref = util.intervals_to_samples(reference_intervals,
reference_labels,
sample_size=frame_size)[-1]
y_ref = util.index_labels(y_ref)[0]
# Map to index space
y_est = util.intervals_to_samples(estimated_intervals,
estimated_labels,
sample_size=frame_size)[-1]
y_est = util.index_labels(y_est)[0]
# Mutual information
mutual_info = _mutual_info_score(y_ref, y_est)
# Adjusted mutual information
adj_mutual_info = _adjusted_mutual_info_score(y_ref, y_est)
# Normalized mutual information
norm_mutual_info = _normalized_mutual_info_score(y_ref, y_est)
return mutual_info, adj_mutual_info, norm_mutual_info
def nce(reference_intervals, reference_labels, estimated_intervals,
estimated_labels, frame_size=0.1, beta=1.0):
"""Frame-clustering segmentation: normalized conditional entropy
Computes cross-entropy of cluster assignment, normalized by the
max-entropy.
Examples
--------
>>> (ref_intervals,
... ref_labels) = mir_eval.io.load_labeled_intervals('ref.lab')
>>> (est_intervals,
... est_labels) = mir_eval.io.load_labeled_intervals('est.lab')
>>> # Trim or pad the estimate to match reference timing
>>> (ref_intervals,
... ref_labels) = mir_eval.util.adjust_intervals(ref_intervals,
... ref_labels,
... t_min=0)
>>> (est_intervals,
... est_labels) = mir_eval.util.adjust_intervals(
... est_intervals, est_labels, t_min=0, t_max=ref_intervals.max())
>>> S_over, S_under, S_F = mir_eval.structure.nce(ref_intervals,
... ref_labels,
... est_intervals,
... est_labels)
Parameters
----------
reference_intervals : np.ndarray, shape=(n, 2)
reference segment intervals, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
reference_labels : list, shape=(n,)
reference segment labels, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
estimated_intervals : np.ndarray, shape=(m, 2)
estimated segment intervals, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
estimated_labels : list, shape=(m,)
estimated segment labels, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
frame_size : float > 0
length (in seconds) of frames for clustering
(Default value = 0.1)
beta : float > 0
beta for F-measure
(Default value = 1.0)
Returns
-------
S_over
Over-clustering score:
``1 - H(y_est | y_ref) / log(|y_est|)``
If `|y_est|==1`, then `S_over` will be 0.
S_under
Under-clustering score:
``1 - H(y_ref | y_est) / log(|y_ref|)``
If `|y_ref|==1`, then `S_under` will be 0.
S_F
F-measure for (S_over, S_under)
"""
validate_structure(reference_intervals, reference_labels,
estimated_intervals, estimated_labels)
# Check for empty annotations. Don't need to check labels because
# validate_structure makes sure they're the same size as intervals
if reference_intervals.size == 0 or estimated_intervals.size == 0:
return 0., 0., 0.
# Generate the cluster labels
y_ref = util.intervals_to_samples(reference_intervals,
reference_labels,
sample_size=frame_size)[-1]
y_ref = util.index_labels(y_ref)[0]
# Map to index space
y_est = util.intervals_to_samples(estimated_intervals,
estimated_labels,
sample_size=frame_size)[-1]
y_est = util.index_labels(y_est)[0]
# Make the contingency table: shape = (n_ref, n_est)
contingency = _contingency_matrix(y_ref, y_est).astype(float)
# Normalize by the number of frames
contingency = contingency / len(y_ref)
# Compute the marginals
p_est = contingency.sum(axis=0)
p_ref = contingency.sum(axis=1)
# H(true | prediction) = sum_j P[estimated = j] *
# sum_i P[true = i | estimated = j] log P[true = i | estimated = j]
# entropy sums over axis=0, which is true labels
# The following scipy.stats.entropy calls are equivalent to
# scipy.stats.entropy(contingency, base=2)
# However the `base` kwarg has only been introduced in scipy 0.14.0
true_given_est = p_est.dot(scipy.stats.entropy(contingency) / np.log(2))
pred_given_ref = p_ref.dot(scipy.stats.entropy(contingency.T) / np.log(2))
score_under = 0.0
if contingency.shape[0] > 1:
score_under = 1. - true_given_est / np.log2(contingency.shape[0])
score_over = 0.0
if contingency.shape[1] > 1:
score_over = 1. - pred_given_ref / np.log2(contingency.shape[1])
f_measure = util.f_measure(score_over, score_under, beta=beta)
return score_over, score_under, f_measure
def evaluate(ref_intervals, ref_labels, est_intervals, est_labels, **kwargs):
"""Compute all metrics for the given reference and estimated annotations.
Examples
--------
>>> (ref_intervals,
... ref_labels) = mir_eval.io.load_labeled_intervals('ref.lab')
>>> (est_intervals,
... est_labels) = mir_eval.io.load_labeled_intervals('est.lab')
>>> scores = mir_eval.segment.evaluate(ref_intervals, ref_labels,
... est_intervals, est_labels)
Parameters
----------
ref_intervals : np.ndarray, shape=(n, 2)
reference segment intervals, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
ref_labels : list, shape=(n,)
reference segment labels, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
est_intervals : np.ndarray, shape=(m, 2)
estimated segment intervals, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
est_labels : list, shape=(m,)
estimated segment labels, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
kwargs
Additional keyword arguments which will be passed to the
appropriate metric or preprocessing functions.
Returns
-------
scores : dict
Dictionary of scores, where the key is the metric name (str) and
the value is the (float) score achieved.
"""
# Adjust timespan of estimations relative to ground truth
ref_intervals, ref_labels = \
util.adjust_intervals(ref_intervals, labels=ref_labels, t_min=0.0)
est_intervals, est_labels = \
util.adjust_intervals(est_intervals, labels=est_labels, t_min=0.0,
t_max=ref_intervals.max())
# Now compute all the metrics
scores = collections.OrderedDict()
# Boundary detection
# Force these values for window
kwargs['window'] = .5
scores['Precision@0.5'], scores['Recall@0.5'], scores['F-measure@0.5'] = \
util.filter_kwargs(detection, ref_intervals, est_intervals, **kwargs)
kwargs['window'] = 3.0
scores['Precision@3.0'], scores['Recall@3.0'], scores['F-measure@3.0'] = \
util.filter_kwargs(detection, ref_intervals, est_intervals, **kwargs)
# Boundary deviation
scores['Ref-to-est deviation'], scores['Est-to-ref deviation'] = \
util.filter_kwargs(deviation, ref_intervals, est_intervals, **kwargs)
# Pairwise clustering
(scores['Pairwise Precision'],
scores['Pairwise Recall'],
scores['Pairwise F-measure']) = util.filter_kwargs(pairwise,
ref_intervals,
ref_labels,
est_intervals,
est_labels, **kwargs)
# Rand index
scores['Rand Index'] = util.filter_kwargs(rand_index, ref_intervals,
ref_labels, est_intervals,
est_labels, **kwargs)
# Adjusted rand index
scores['Adjusted Rand Index'] = util.filter_kwargs(ari, ref_intervals,
ref_labels,
est_intervals,
est_labels, **kwargs)
# Mutual information metrics
(scores['Mutual Information'],
scores['Adjusted Mutual Information'],
scores['Normalized Mutual Information']) = \
util.filter_kwargs(mutual_information, ref_intervals, ref_labels,
est_intervals, est_labels, **kwargs)
# Conditional entropy metrics
scores['NCE Over'], scores['NCE Under'], scores['NCE F-measure'] = \
util.filter_kwargs(nce, ref_intervals, ref_labels, est_intervals,
est_labels, **kwargs)
return scores
|
"""
Coordinates System Transformations
==================================
Defines the objects to apply transformations on coordinates systems.
The following transformations are available:
- :func:`colour.algebra.cartesian_to_spherical`: Cartesian to spherical
transformation.
- :func:`colour.algebra.spherical_to_cartesian`: Spherical to cartesian
transformation.
- :func:`colour.algebra.cartesian_to_polar`: Cartesian to polar
transformation.
- :func:`colour.algebra.polar_to_cartesian`: Polar to cartesian
transformation.
- :func:`colour.algebra.cartesian_to_cylindrical`: Cartesian to cylindrical
transformation.
- :func:`colour.algebra.cylindrical_to_cartesian`: Cylindrical to cartesian
transformation.
References
----------
- :cite:`Wikipedia2005a` : Wikipedia. (2005). ISO 31-11. Retrieved July 31,
2016, from https://en.wikipedia.org/wiki/ISO_31-11
- :cite:`Wikipedia2006` : Wikipedia. (2006). List of common coordinate
transformations. Retrieved July 18, 2014, from
http://en.wikipedia.org/wiki/List_of_common_coordinate_transformations
"""
from __future__ import annotations
import numpy as np
from colour.algebra import sdiv, sdiv_mode
from colour.hints import ArrayLike, NDArray
from colour.utilities import as_float_array, tsplit, tstack
__author__ = "Colour Developers"
__copyright__ = "Copyright 2013 Colour Developers"
__license__ = "New BSD License - https://opensource.org/licenses/BSD-3-Clause"
__maintainer__ = "Colour Developers"
__email__ = "<EMAIL>"
__status__ = "Production"
__all__ = [
"cartesian_to_spherical",
"spherical_to_cartesian",
"cartesian_to_polar",
"polar_to_cartesian",
"cartesian_to_cylindrical",
"cylindrical_to_cartesian",
]
def cartesian_to_spherical(a: ArrayLike) -> NDArray:
"""
Transform given cartesian coordinates array :math:`xyz` to spherical
coordinates array :math:`\\rho\\theta\\phi` (radial distance, inclination
or elevation and azimuth).
Parameters
----------
a
Cartesian coordinates array :math:`xyz` to transform.
Returns
-------
:class:`numpy.ndarray`
Spherical coordinates array :math:`\\rho\\theta\\phi`, :math:`\\rho` is
in range [0, +inf], :math:`\\theta` is in range [0, pi] radians, i.e.
[0, 180] degrees, and :math:`\\phi` is in range [-pi, pi] radians, i.e.
[-180, 180] degrees.
References
----------
:cite:`Wikipedia2006`, :cite:`Wikipedia2005a`
Examples
--------
>>> a = np.array([3, 1, 6])
>>> cartesian_to_spherical(a) # doctest: +ELLIPSIS
array([ 6.7823299..., 0.4850497..., 0.3217505...])
"""
x, y, z = tsplit(a)
rho = np.linalg.norm(a, axis=-1)
with sdiv_mode():
theta = np.arccos(sdiv(z, rho))
phi = np.arctan2(y, x)
rtp = tstack([rho, theta, phi])
return rtp
def spherical_to_cartesian(a: ArrayLike) -> NDArray:
"""
Transform given spherical coordinates array :math:`\\rho\\theta\\phi`
(radial distance, inclination or elevation and azimuth) to cartesian
coordinates array :math:`xyz`.
Parameters
----------
a
Spherical coordinates array :math:`\\rho\\theta\\phi` to transform,
:math:`\\rho` is in range [0, +inf], :math:`\\theta` is in range
[0, pi] radians, i.e. [0, 180] degrees, and :math:`\\phi` is in range
[-pi, pi] radians, i.e. [-180, 180] degrees.
Returns
-------
:class:`numpy.ndarray`
Cartesian coordinates array :math:`xyz`.
References
----------
:cite:`Wikipedia2006`, :cite:`Wikipedia2005a`
Examples
--------
>>> a = np.array([6.78232998, 0.48504979, 0.32175055])
>>> spherical_to_cartesian(a) # doctest: +ELLIPSIS
array([ 3.0000000..., 0.9999999..., 5.9999999...])
"""
rho, theta, phi = tsplit(a)
x = rho * np.sin(theta) * np.cos(phi)
y = rho * np.sin(theta) * np.sin(phi)
z = rho * np.cos(theta)
xyz = tstack([x, y, z])
return xyz
def cartesian_to_polar(a: ArrayLike) -> NDArray:
"""
Transform given cartesian coordinates array :math:`xy` to polar
coordinates array :math:`\\rho\\phi` (radial coordinate, angular
coordinate).
Parameters
----------
a
Cartesian coordinates array :math:`xy` to transform.
Returns
-------
:class:`numpy.ndarray`
Polar coordinates array :math:`\\rho\\phi`, :math:`\\rho` is
in range [0, +inf], :math:`\\phi` is in range [-pi, pi] radians, i.e.
[-180, 180] degrees.
References
----------
:cite:`Wikipedia2006`, :cite:`Wikipedia2005a`
Examples
--------
>>> a = np.array([3, 1])
>>> cartesian_to_polar(a) # doctest: +ELLIPSIS
array([ 3.1622776..., 0.3217505...])
"""
x, y = tsplit(a)
rho = np.hypot(x, y)
phi = np.arctan2(y, x)
return tstack([rho, phi])
def polar_to_cartesian(a: ArrayLike) -> NDArray:
"""
Transform given polar coordinates array :math:`\\rho\\phi` (radial
coordinate, angular coordinate) to cartesian coordinates array :math:`xy`.
Parameters
----------
a
Polar coordinates array :math:`\\rho\\phi` to transform, :math:`\\rho`
is in range [0, +inf], :math:`\\phi` is in range [-pi, pi] radians
i.e. [-180, 180] degrees.
Returns
-------
:class:`numpy.ndarray`
Cartesian coordinates array :math:`xy`.
References
----------
:cite:`Wikipedia2006`, :cite:`Wikipedia2005a`
Examples
--------
>>> a = np.array([3.16227766, 0.32175055])
>>> polar_to_cartesian(a) # doctest: +ELLIPSIS
array([ 3. , 0.9999999...])
"""
rho, phi = tsplit(a)
x = rho * np.cos(phi)
y = rho * np.sin(phi)
return tstack([x, y])
def cartesian_to_cylindrical(a: ArrayLike) -> NDArray:
"""
Transform given cartesian coordinates array :math:`xyz` to cylindrical
coordinates array :math:`\\rho\\phi z` (radial distance, azimuth and
height).
Parameters
----------
a
Cartesian coordinates array :math:`xyz` to transform.
Returns
-------
:class:`numpy.ndarray`
Cylindrical coordinates array :math:`\\rho\\phi z`, :math:`\\rho` is in
range [0, +inf], :math:`\\phi` is in range [-pi, pi] radians i.e.
[-180, 180] degrees, :math:`z` is in range [0, +inf].
References
----------
:cite:`Wikipedia2006`, :cite:`Wikipedia2005a`
Examples
--------
>>> a = np.array([3, 1, 6])
>>> cartesian_to_cylindrical(a) # doctest: +ELLIPSIS
array([ 3.1622776..., 0.3217505..., 6. ])
"""
a = as_float_array(a)
rho, phi = tsplit(cartesian_to_polar(a[..., 0:2]))
return tstack([rho, phi, a[..., -1]])
def cylindrical_to_cartesian(a: ArrayLike) -> NDArray:
"""
Transform given cylindrical coordinates array :math:`\\rho\\phi z`
(radial distance, azimuth and height) to cartesian coordinates array
:math:`xyz`.
Parameters
----------
a
Cylindrical coordinates array :math:`\\rho\\phi z` to transform,
:math:`\\rho` is in range [0, +inf], :math:`\\phi` is in range
[-pi, pi] radians i.e. [-180, 180] degrees, :math:`z` is in range
[0, +inf].
Returns
-------
:class:`numpy.ndarray`
Cartesian coordinates array :math:`xyz`.
References
----------
:cite:`Wikipedia2006`, :cite:`Wikipedia2005a`
Examples
--------
>>> a = np.array([3.16227766, 0.32175055, 6.00000000])
>>> cylindrical_to_cartesian(a) # doctest: +ELLIPSIS
array([ 3. , 0.9999999..., 6. ])
"""
a = as_float_array(a)
x, y = tsplit(polar_to_cartesian(a[..., 0:2]))
return tstack([x, y, a[..., -1]])
|
<reponame>jmaces/aapm-ct-challenge
import os
from abc import ABCMeta, abstractmethod
from collections import OrderedDict
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import torch
from tqdm import tqdm
from operators import FanbeamRadon, l2_error
# ----- ----- Abstract Base Network ----- -----
class InvNet(torch.nn.Module, metaclass=ABCMeta):
""" Abstract base class for networks solving linear inverse problems.
The network is intended for the denoising of a direct inversion of a 2D
signal from (noisy) linear measurements. The measurement model
y = Ax + noise
can be used to obtain an approximate reconstruction x_ from y using, e.g.,
the pseudo-inverse of A. The task of the network is either to directly
obtain x from y or denoise and improve this first inversion x_ towards x.
"""
def __init__(self):
super(InvNet, self).__init__()
@abstractmethod
def forward(self, z):
"""
Applies the network to a batch of inputs z, either y or x_ or both.
"""
pass
def freeze(self):
""" Freeze all model weights, i.e. prohibit further updates. """
for param in self.parameters():
param.requires_grad = False
def unfreeze(self):
""" Unfreeze all model weights, i.e. allow further updates. """
for param in self.parameters():
param.requires_grad = True
@property
def device(self):
return next(self.parameters()).device
def _train_step(
self,
batch_idx,
batch,
loss_func,
optimizer,
scaler,
batch_size,
acc_steps,
):
with torch.cuda.amp.autocast(enabled=self.mixed_prec):
if len(batch) == 2:
inp, tar = batch
inp = inp.to(self.device)
tar = tar.to(self.device)
pred = self.forward(inp)
else:
inp, aux, tar = batch
inp = inp.to(self.device)
aux = aux.to(self.device)
tar = tar.to(self.device)
pred = self.forward((inp, aux))
loss = loss_func(pred, tar) / acc_steps
scaler.scale(loss).backward()
if (batch_idx // batch_size + 1) % acc_steps == 0:
scaler.step(optimizer)
scaler.update()
optimizer.zero_grad()
return loss * acc_steps, inp, tar, pred
def _val_step(self, batch_idx, batch, loss_func):
if len(batch) == 2:
inp, tar = batch
inp = inp.to(self.device)
tar = tar.to(self.device)
pred = self.forward(inp)
else:
inp, aux, tar = batch
inp = inp.to(self.device)
aux = aux.to(self.device)
tar = tar.to(self.device)
pred = self.forward((inp, aux))
loss = loss_func(pred, tar)
return loss, inp, tar, pred
def _on_epoch_end(
self,
epoch,
save_epochs,
save_path,
logging,
loss,
inp,
tar,
pred,
v_loss,
v_inp,
v_tar,
v_pred,
val_data,
rel_err_val,
chall_err_val,
):
self._print_info()
logging = logging.append(
{
"loss": loss.item(),
"val_loss": v_loss.item(),
"rel_l2_error": l2_error(
pred, tar, relative=True, squared=False
)[0].item(),
"val_rel_l2_error": rel_err_val,
"chall_err": l2_error(
pred, tar, relative=False, squared=False
)[0].item()
/ np.sqrt(pred.shape[-1] * pred.shape[-2]),
"val_chall_err": chall_err_val,
},
ignore_index=True,
sort=False,
)
print(logging.tail(1))
if (epoch + 1) % save_epochs == 0:
fig = self._create_figure(
logging, loss, inp, tar, pred, v_loss, v_inp, v_tar, v_pred
)
os.makedirs(save_path, exist_ok=True)
torch.save(
self.state_dict(),
os.path.join(
save_path, "model_weights_epoch{}.pt".format(epoch + 1)
),
)
logging.to_pickle(
os.path.join(
save_path, "losses_epoch{}.pkl".format(epoch + 1)
),
)
fig.savefig(
os.path.join(save_path, "plot_epoch{}.png".format(epoch + 1)),
bbox_inches="tight",
)
return logging
def _create_figure(
self, logging, loss, inp, tar, pred, v_loss, v_inp, v_tar, v_pred
):
def _implot(sub, im):
if im.shape[-3] == 2: # complex image
p = sub.imshow(
torch.sqrt(im.pow(2).sum(-3))[0, :, :].detach().cpu()
)
else: # real image
p = sub.imshow(im[0, 0, :, :].detach().cpu())
return p
fig, subs = plt.subplots(2, 3, clear=True, num=1, figsize=(15, 10))
# training and validation loss
subs[0, 0].set_title("losses")
subs[0, 0].semilogy(logging["loss"], label="train")
subs[0, 0].semilogy(logging["val_loss"], label="val")
subs[0, 0].legend()
# training and validation challenge-loss
subs[0, 1].set_title("challenge metrics")
subs[0, 1].semilogy(logging["chall_err"], label="train")
subs[0, 1].semilogy(logging["val_chall_err"], label="val")
subs[0, 1].legend()
# validation input
p10 = _implot(subs[1, 0], v_inp)
subs[1, 0].set_title("val input")
plt.colorbar(p10, ax=subs[1, 0])
# validation output
p11 = _implot(subs[1, 1], v_pred)
subs[1, 1].set_title(
"val:\n ||x0-xr||_2 / ||x0||_2 = \n "
"{:1.2e}".format(logging["val_rel_l2_error"].iloc[-1])
)
plt.colorbar(p11, ax=subs[1, 1])
# validation difference
p12 = _implot(subs[1, 2], v_pred - v_tar)
subs[1, 2].set_title(
"val diff: x0 - x_pred \n val_chall="
"{:1.2e}".format(logging["val_chall_err"].iloc[-1])
)
plt.colorbar(p12, ax=subs[1, 2])
# training output
p02 = _implot(subs[0, 2], pred)
subs[0, 2].set_title(
"train:\n ||x0-xr||_2 / ||x0||_2 = \n "
"{:1.2e}".format(logging["rel_l2_error"].iloc[-1])
)
plt.colorbar(p02, ax=subs[0, 2])
return fig
def _add_to_progress_bar(self, dict):
""" Can be overwritten by child classes to add to progress bar. """
return dict
def _on_train_end(self, save_path, logging):
os.makedirs(save_path, exist_ok=True)
torch.save(
self.state_dict(), os.path.join(save_path, "model_weights.pt")
)
logging.to_pickle(os.path.join(save_path, "losses.pkl"),)
def _print_info(self):
""" Can be overwritten by child classes to print at epoch end. """
pass
def train_on(
self,
train_data,
val_data,
num_epochs,
batch_size,
loss_func,
save_path,
save_epochs=50,
optimizer=torch.optim.Adam,
optimizer_params={"lr": 2e-4, "eps": 1e-3},
scheduler=torch.optim.lr_scheduler.StepLR,
scheduler_params={"step_size": 1, "gamma": 1.0},
acc_steps=1,
train_transform=None,
val_transform=None,
train_loader_params={"shuffle": True},
val_loader_params={"shuffle": False},
mixed_prec=False,
):
self.mixed_prec = mixed_prec
scaler = torch.cuda.amp.GradScaler(enabled=mixed_prec)
optimizer = optimizer(self.parameters(), **optimizer_params)
scheduler = scheduler(optimizer, **scheduler_params)
if isinstance(train_data, torch.utils.data.ConcatDataset):
for ds in train_data.datasets:
ds.transform = train_transform
else:
train_data.transform = train_transform
if isinstance(val_data, torch.utils.data.ConcatDataset):
for ds in val_data.datasets:
ds.transform = val_transform
else:
val_data.transform = val_transform
train_loader_params = dict(train_loader_params)
val_loader_params = dict(val_loader_params)
if "sampler" in train_loader_params:
train_loader_params["sampler"] = train_loader_params["sampler"](
train_data
)
if "sampler" in val_loader_params:
val_loader_params["sampler"] = val_loader_params["sampler"](
val_data
)
data_load_train = torch.utils.data.DataLoader(
train_data, batch_size, **train_loader_params
)
data_load_val = torch.utils.data.DataLoader(
val_data, batch_size, **val_loader_params
)
logging = pd.DataFrame(
columns=["loss", "val_loss", "rel_l2_error", "val_rel_l2_error"]
)
for epoch in range(num_epochs):
self.train() # make sure we are in train mode
t = tqdm(
enumerate(data_load_train),
desc="epoch {} / {}".format(epoch, num_epochs),
total=-(-len(train_data) // batch_size),
disable="SGE_TASK_ID" in os.environ,
)
optimizer.zero_grad()
loss = 0.0
for i, batch in t:
loss_b, inp, tar, pred = self._train_step(
i,
batch,
loss_func,
optimizer,
scaler,
batch_size,
acc_steps,
)
t.set_postfix(
**self._add_to_progress_bar({"loss": loss_b.item()})
)
loss += loss_b
loss /= i + 1
with torch.no_grad():
self.eval() # make sure we are in eval mode
scheduler.step()
v_loss = 0.0
rel_err_val = 0.0
chall_err_val = 0.0
for i, v_batch in enumerate(data_load_val):
v_loss_b, v_inp, v_tar, v_pred = self._val_step(
i, v_batch, loss_func
)
rel_err_val += l2_error(
v_pred, v_tar, relative=True, squared=False
)[0].item()
chall_err_val += l2_error(
v_pred, v_tar, relative=False, squared=False
)[0].item() / np.sqrt(v_pred.shape[-1] * v_pred.shape[-2])
v_loss += v_loss_b
v_loss /= i + 1
rel_err_val /= i + 1
chall_err_val /= i + 1
logging = self._on_epoch_end(
epoch,
save_epochs,
save_path,
logging,
loss,
inp,
tar,
pred,
v_loss,
v_inp,
v_tar,
v_pred,
val_data,
rel_err_val,
chall_err_val,
)
self._on_train_end(save_path, logging)
return logging
# ----- ----- Trainable Radon Op ----- -----
class RadonNet(InvNet):
def __init__(
self,
n,
angles,
scale,
d_source,
n_detect,
s_detect,
mode="fwd",
**kwargs,
):
super(RadonNet, self).__init__()
self.mode = mode
self.OpR = FanbeamRadon(
n, angles, scale, d_source, n_detect, s_detect, **kwargs,
)
@classmethod
def new_from_state_dict(cls, state, **kwargs):
state_init = {k[4:]: v for (k, v) in state.items()}
del state_init["m"]
del state_init["inv_scale"]
del state_init["fwd_offset"]
state_init.update(kwargs)
net = cls(**state_init)
net.load_state_dict(state)
return net
def forward(self, inp):
if self.mode == "fwd":
out = self.OpR.dot(inp)
elif self.mode == "bwd":
out = self.OpR.inv(inp)
elif self.mode == "both":
inp1, inp2 = inp
out = self.OpR.dot(inp1), self.OpR.inv(inp2)
elif self.mode == "chain":
out1 = self.OpR.dot(inp)
out2 = self.OpR.inv(out1)
out = (out1, out2)
return out
def _print_info(self):
print("Current parameters(s):")
print(list(self.parameters()))
def _train_step(
self,
batch_idx,
batch,
loss_func,
optimizer,
scaler,
batch_size,
acc_steps,
):
with torch.cuda.amp.autocast(enabled=False):
if self.mode == "fwd" or self.mode == "bwd":
inp, tar = batch
inp = inp.to(self.device)
tar = tar.to(self.device)
elif self.mode == "both":
inp1, inp2, tar1, tar2 = batch
inp1 = inp1.to(self.device)
inp2 = inp2.to(self.device)
tar1 = tar1.to(self.device)
tar2 = tar2.to(self.device)
inp = (inp1, inp2)
tar = (tar1, tar2)
elif self.mode == "chain":
inp, tar1, tar2 = batch
inp = inp.to(self.device)
tar1 = tar1.to(self.device)
tar2 = tar2.to(self.device)
tar = (tar1, tar2)
pred = self.forward(inp)
loss = loss_func(pred, tar) / acc_steps
scaler.scale(loss).backward()
if (batch_idx // batch_size + 1) % acc_steps == 0:
scaler.step(optimizer)
scaler.update()
optimizer.zero_grad()
return loss * acc_steps, inp, tar, pred
def _val_step(self, batch_idx, batch, loss_func):
if self.mode == "fwd" or self.mode == "bwd":
inp, tar = batch
inp = inp.to(self.device)
tar = tar.to(self.device)
elif self.mode == "both":
inp1, inp2, tar1, tar2 = batch
inp1 = inp1.to(self.device)
inp2 = inp2.to(self.device)
tar1 = tar1.to(self.device)
tar2 = tar2.to(self.device)
inp = (inp1, inp2)
tar = (tar1, tar2)
elif self.mode == "chain":
inp, tar1, tar2 = batch
inp = inp.to(self.device)
tar1 = tar1.to(self.device)
tar2 = tar2.to(self.device)
tar = (tar1, tar2)
pred = self.forward(inp)
loss = loss_func(pred, tar)
return loss, inp, tar, pred
def _on_epoch_end(
self,
epoch,
save_epochs,
save_path,
logging,
loss,
inp,
tar,
pred,
v_loss,
v_inp,
v_tar,
v_pred,
val_data,
rel_err_val,
chall_err_val,
):
self._print_info()
if self.mode == "fwd" or self.mode == "bwd":
logging = logging.append(
{
"loss": loss.item(),
"val_loss": v_loss.item(),
"rel_l2_error1": l2_error(
pred, tar, relative=True, squared=False
)[0].item(),
"val_rel_l2_error1": l2_error(
v_pred, v_tar, relative=True, squared=False
)[0].item(),
},
ignore_index=True,
sort=False,
)
elif self.mode == "both" or self.mode == "chain":
logging = logging.append(
{
"loss": loss.item(),
"val_loss": v_loss.item(),
"rel_l2_error1": l2_error(
pred[0], tar[0], relative=True, squared=False
)[0].item(),
"val_rel_l2_error1": l2_error(
v_pred[0], v_tar[0], relative=True, squared=False
)[0].item(),
"rel_l2_error2": l2_error(
pred[1], tar[1], relative=True, squared=False
)[0].item(),
"val_rel_l2_error2": l2_error(
v_pred[1], v_tar[1], relative=True, squared=False
)[0].item(),
},
ignore_index=True,
sort=False,
)
print(logging.tail(1))
if (epoch + 1) % save_epochs == 0:
fig = self._create_figure(
logging, loss, inp, tar, pred, v_loss, v_inp, v_tar, v_pred
)
os.makedirs(save_path, exist_ok=True)
torch.save(
self.state_dict(),
os.path.join(
save_path, "model_weights_epoch{}.pt".format(epoch + 1)
),
)
logging.to_pickle(
os.path.join(
save_path, "losses_epoch{}.pkl".format(epoch + 1)
),
)
fig.savefig(
os.path.join(save_path, "plot_epoch{}.png".format(epoch + 1)),
bbox_inches="tight",
)
return logging
def _create_figure(
self, logging, loss, inp, tar, pred, v_loss, v_inp, v_tar, v_pred
):
def _implot(sub, im):
if im.shape[-3] == 2: # complex image
p = sub.imshow(
torch.sqrt(im.pow(2).sum(-3))[0, :, :].detach().cpu()
)
else: # real image
p = sub.imshow(im[0, 0, :, :].detach().cpu())
return p
if self.mode == "fwd" or self.mode == "bwd":
fig, subs = plt.subplots(2, 3, clear=True, num=1, figsize=(20, 15))
v_inp1, v_tar1, v_pred1 = v_inp, v_tar, v_pred
elif self.mode == "both":
fig, subs = plt.subplots(2, 5, clear=True, num=1, figsize=(20, 15))
v_inp1, v_inp2 = v_inp
v_tar1, v_tar2 = v_tar
v_pred1, v_pred2 = v_pred
elif self.mode == "chain":
fig, subs = plt.subplots(2, 5, clear=True, num=1, figsize=(20, 15))
v_inp1 = v_inp
v_tar1, v_tar2 = v_tar
v_pred1, v_pred2 = v_pred
v_inp2 = v_inp1
# training and validation loss
subs[0, 0].set_title("losses")
subs[0, 0].semilogy(logging["loss"], label="train")
subs[0, 0].semilogy(logging["val_loss"], label="val")
subs[0, 0].legend()
# validation input
p01 = _implot(subs[0, 1], v_inp1)
subs[0, 1].set_title("val inp")
plt.colorbar(p01, ax=subs[0, 1])
# validation target
p11 = _implot(subs[1, 1], v_tar1)
subs[1, 1].set_title("val tar")
plt.colorbar(p11, ax=subs[1, 1])
# validation prediction
p12 = _implot(subs[1, 2], v_pred1)
subs[1, 2].set_title(
"val pred:\n rel. err. = \n "
"{:1.2e}".format(logging["val_rel_l2_error1"].iloc[-1])
)
plt.colorbar(p12, ax=subs[1, 2])
# validation difference
p02 = _implot(subs[0, 2], v_pred1 - v_tar1)
subs[0, 2].set_title("val diff")
plt.colorbar(p02, ax=subs[0, 2])
if self.mode == "both" or self.mode == "chain":
# validation input
p03 = _implot(subs[0, 3], v_inp2)
subs[0, 3].set_title("val inp")
plt.colorbar(p03, ax=subs[0, 3])
# validation target
p13 = _implot(subs[1, 3], v_tar2)
subs[1, 3].set_title("val tar")
plt.colorbar(p13, ax=subs[1, 3])
# validation prediction
p14 = _implot(subs[1, 4], v_pred2)
subs[1, 4].set_title(
"val pred:\n rel. err. = \n "
"{:1.2e}".format(logging["val_rel_l2_error2"].iloc[-1])
)
plt.colorbar(p14, ax=subs[1, 4])
# validation difference
p04 = _implot(subs[0, 4], v_pred2 - v_tar2)
subs[0, 4].set_title("val diff")
plt.colorbar(p04, ax=subs[0, 4])
return fig
# ----- ----- Iterative Networks ----- -----
class IterativeNet(InvNet):
def __init__(
self,
subnet,
num_iter,
lam,
lam_learnable=True,
final_dc=True,
resnet_factor=1.0,
inverter=None,
dc_operator=None,
use_memory=False,
):
super(IterativeNet, self).__init__()
if isinstance(subnet, list):
self.subnet = torch.nn.ModuleList(subnet)
else:
self.subnet = subnet
self.num_iter = num_iter
self.final_dc = final_dc
self.resnet_factor = resnet_factor
self.inverter = inverter
self.dc_operator = dc_operator
self.use_memory = use_memory
if not isinstance(lam, (list, tuple)):
lam = [lam] * num_iter
if not isinstance(lam_learnable, (list, tuple)):
lam_learnable = [lam_learnable] * len(lam)
self.lam = torch.nn.ParameterList(
[
torch.nn.Parameter(
torch.tensor(lam[it]), requires_grad=lam_learnable[it]
)
for it in range(len(lam))
]
)
def forward(self, inp):
x, y = inp # get sinogram and fbp
if self.inverter is not None:
xinv = self.inverter(y)
else:
xinv = x
if self.use_memory is not False:
x_shape = xinv.shape
s = torch.zeros(
x_shape[0],
self.use_memory,
x_shape[2],
x_shape[3],
device=xinv.device,
)
for it in range(self.num_iter):
if self.use_memory is not False:
if isinstance(self.subnet, torch.nn.ModuleList):
out = self.subnet[it](torch.cat([xinv, s], dim=1))
else:
out = self.subnet(torch.cat([xinv, s], dim=1))
xinv = self.resnet_factor * xinv + out[:, 0:1, ...]
s = out[:, 1:, ...]
else:
if isinstance(self.subnet, torch.nn.ModuleList):
xinv = self.resnet_factor * xinv + self.subnet[it](xinv)
else:
xinv = self.resnet_factor * xinv + self.subnet(xinv)
if (self.final_dc) or (
(not self.final_dc) and it < self.num_iter - 1
):
if self.dc_operator is not None:
xinv = xinv - self.lam[it] * self.dc_operator((y, xinv))
return xinv
def set_learnable_iteration(self, index):
for i in list(range(self.get_num_iter_max())):
if i in index:
self.lam[i].requires_grad = True
self.subnet[i].unfreeze()
else:
self.lam[i].requires_grad = False
self.subnet[i].freeze()
def get_num_iter_max(self):
return len(self.lam)
def _print_info(self):
print("Current lambda(s):")
print(
[
self.lam[it].item()
for it in range(len(self.lam))
if self.lam[it].numel() == 1
]
)
print([self.lam[it].requires_grad for it in range(len(self.lam))])
print("Epoch done", flush=True)
# ----- ----- Data Consistency Layer ----- -----
class DCLsqFPB(torch.nn.Module):
def __init__(self, operator):
super(DCLsqFPB, self).__init__()
self.operator = operator
def forward(self, inp):
y, x = inp
return self.operator.inv(self.operator(x) - y)
def freeze(self):
""" Freeze all model weights, i.e. prohibit further updates. """
for param in self.parameters():
param.requires_grad = False
# ----- ----- U-Net ----- -----
class GroupUNet(InvNet):
""" U-Net implementation.
Based on https://github.com/mateuszbuda/brain-segmentation-pytorch/
and modified in agreement with their licence:
-----
MIT License
Copyright (c) 2019 mateuszbuda
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
def __init__(
self,
in_channels=1,
out_channels=1,
base_features=32,
drop_factor=0.0,
do_center_crop=False,
num_groups=32,
):
# set properties of UNet
super(GroupUNet, self).__init__()
self.do_center_crop = do_center_crop
kernel_size = 3 if do_center_crop else 2
self.encoder1 = self._conv_block(
in_channels,
base_features,
num_groups,
drop_factor=drop_factor,
block_name="encoding_1",
)
self.pool1 = torch.nn.MaxPool2d(kernel_size=2, stride=2)
self.encoder2 = self._conv_block(
base_features,
base_features * 2,
num_groups,
drop_factor=drop_factor,
block_name="encoding_2",
)
self.pool2 = torch.nn.MaxPool2d(kernel_size=2, stride=2)
self.encoder3 = self._conv_block(
base_features * 2,
base_features * 4,
num_groups,
drop_factor=drop_factor,
block_name="encoding_3",
)
self.pool3 = torch.nn.MaxPool2d(kernel_size=2, stride=2)
self.encoder4 = self._conv_block(
base_features * 4,
base_features * 8,
num_groups,
drop_factor=drop_factor,
block_name="encoding_4",
)
self.pool4 = torch.nn.MaxPool2d(kernel_size=2, stride=2)
self.bottleneck = self._conv_block(
base_features * 8,
base_features * 16,
num_groups,
drop_factor=drop_factor,
block_name="bottleneck",
)
self.upconv4 = torch.nn.ConvTranspose2d(
base_features * 16,
base_features * 8,
kernel_size=kernel_size,
stride=2,
)
self.decoder4 = self._conv_block(
base_features * 16,
base_features * 8,
num_groups,
drop_factor=drop_factor,
block_name="decoding_4",
)
self.upconv3 = torch.nn.ConvTranspose2d(
base_features * 8,
base_features * 4,
kernel_size=kernel_size,
stride=2,
)
self.decoder3 = self._conv_block(
base_features * 8,
base_features * 4,
num_groups,
drop_factor=drop_factor,
block_name="decoding_3",
)
self.upconv2 = torch.nn.ConvTranspose2d(
base_features * 4,
base_features * 2,
kernel_size=kernel_size,
stride=2,
)
self.decoder2 = self._conv_block(
base_features * 4,
base_features * 2,
num_groups,
drop_factor=drop_factor,
block_name="decoding_2",
)
self.upconv1 = torch.nn.ConvTranspose2d(
base_features * 2, base_features, kernel_size=kernel_size, stride=2
)
self.decoder1 = self._conv_block(
base_features * 2,
base_features,
num_groups,
drop_factor=drop_factor,
block_name="decoding_1",
)
self.outconv = torch.nn.Conv2d(
in_channels=base_features,
out_channels=out_channels,
kernel_size=1,
)
def forward(self, x):
enc1 = self.encoder1(x)
enc2 = self.encoder2(self.pool1(enc1))
enc3 = self.encoder3(self.pool2(enc2))
enc4 = self.encoder4(self.pool3(enc3))
bottleneck = self.bottleneck(self.pool4(enc4))
dec4 = self.upconv4(bottleneck)
dec4 = self._center_crop(dec4, enc4.shape[-2], enc4.shape[-1])
dec4 = torch.cat((dec4, enc4), dim=1)
dec4 = self.decoder4(dec4)
dec3 = self.upconv3(dec4)
dec3 = self._center_crop(dec3, enc3.shape[-2], enc3.shape[-1])
dec3 = torch.cat((dec3, enc3), dim=1)
dec3 = self.decoder3(dec3)
dec2 = self.upconv2(dec3)
dec2 = self._center_crop(dec2, enc2.shape[-2], enc2.shape[-1])
dec2 = torch.cat((dec2, enc2), dim=1)
dec2 = self.decoder2(dec2)
dec1 = self.upconv1(dec2)
dec1 = self._center_crop(dec1, enc1.shape[-2], enc1.shape[-1])
dec1 = torch.cat((dec1, enc1), dim=1)
dec1 = self.decoder1(dec1)
return self.outconv(dec1)
def _conv_block(
self, in_channels, out_channels, num_groups, drop_factor, block_name
):
return torch.nn.Sequential(
OrderedDict(
[
(
block_name + "conv1",
torch.nn.Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
padding=1,
bias=True,
),
),
(
block_name + "bn_1",
torch.nn.GroupNorm(num_groups, out_channels),
),
(block_name + "relu1", torch.nn.ReLU(True)),
(block_name + "dr1", torch.nn.Dropout(p=drop_factor)),
(
block_name + "conv2",
torch.nn.Conv2d(
in_channels=out_channels,
out_channels=out_channels,
kernel_size=3,
padding=1,
bias=True,
),
),
(
block_name + "bn_2",
torch.nn.GroupNorm(num_groups, out_channels),
),
(block_name + "relu2", torch.nn.ReLU(True)),
(block_name + "dr2", torch.nn.Dropout(p=drop_factor)),
]
)
)
def _center_crop(self, layer, max_height, max_width):
if self.do_center_crop:
_, _, h, w = layer.size()
xy1 = (w - max_width) // 2
xy2 = (h - max_height) // 2
return layer[
:, :, xy2 : (xy2 + max_height), xy1 : (xy1 + max_width)
]
else:
return layer
# ----- ----- Tiramisu Network ----- -----
class Tiramisu(InvNet):
""" Tiramisu network implementation.
Based on https://github.com/bfortuner/pytorch_tiramisu
and modified in agreement with their licence:
-----
MIT License
Copyright (c) 2018 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
def __init__(
self,
in_channels=1,
out_channels=1,
drop_factor=0.0,
down_blocks=(5, 5, 5, 5, 5),
up_blocks=(5, 5, 5, 5, 5),
pool_factors=(2, 2, 2, 2, 2),
bottleneck_layers=5,
growth_rate=8,
out_chans_first_conv=16,
use_instance_norm=False,
):
super(Tiramisu, self).__init__()
self.down_blocks = down_blocks
self.up_blocks = up_blocks
self.use_instance_norm = use_instance_norm
# init counts of channels
cur_channels_count = 0
skip_connection_channel_counts = []
# First Convolution
self.bn_layer = (
torch.nn.InstanceNorm2d(out_chans_first_conv)
if self.use_instance_norm
else torch.nn.BatchNorm2d(out_chans_first_conv)
)
self.add_module(
"firstconv",
torch.nn.Conv2d(
in_channels=in_channels,
out_channels=out_chans_first_conv,
kernel_size=3,
stride=1,
padding=1,
bias=True,
),
)
cur_channels_count = out_chans_first_conv
# Downsampling path
self.denseBlocksDown = torch.nn.ModuleList([])
self.transDownBlocks = torch.nn.ModuleList([])
for i in range(len(self.down_blocks)):
self.denseBlocksDown.append(
Tiramisu._DenseBlock(
cur_channels_count,
growth_rate,
self.down_blocks[i],
drop_factor,
use_instance_norm=self.use_instance_norm,
)
)
cur_channels_count += growth_rate * self.down_blocks[i]
skip_connection_channel_counts.insert(0, cur_channels_count)
self.transDownBlocks.append(
Tiramisu._TransitionDown(
cur_channels_count,
drop_factor,
pool_factors[i],
use_instance_norm=self.use_instance_norm,
)
)
# Bottleneck
self.add_module(
"bottleneck",
Tiramisu._Bottleneck(
cur_channels_count,
growth_rate,
bottleneck_layers,
drop_factor,
use_instance_norm=self.use_instance_norm,
),
)
prev_block_channels = growth_rate * bottleneck_layers
cur_channels_count += prev_block_channels
# Upsampling path
self.transUpBlocks = torch.nn.ModuleList([])
self.denseBlocksUp = torch.nn.ModuleList([])
for i in range(len(up_blocks) - 1):
self.transUpBlocks.append(
Tiramisu._TransitionUp(
prev_block_channels,
prev_block_channels,
pool_factors[-i - 1],
)
)
cur_channels_count = (
prev_block_channels + skip_connection_channel_counts[i]
)
self.denseBlocksUp.append(
Tiramisu._DenseBlock(
cur_channels_count,
growth_rate,
up_blocks[i],
drop_factor,
upsample=True,
use_instance_norm=self.use_instance_norm,
)
)
prev_block_channels = growth_rate * self.up_blocks[i]
cur_channels_count += prev_block_channels
# Final DenseBlock
self.transUpBlocks.append(
Tiramisu._TransitionUp(
prev_block_channels, prev_block_channels, pool_factors[0]
)
)
cur_channels_count = (
prev_block_channels + skip_connection_channel_counts[-1]
)
self.denseBlocksUp.append(
Tiramisu._DenseBlock(
cur_channels_count,
growth_rate,
self.up_blocks[-1],
drop_factor,
upsample=False,
use_instance_norm=self.use_instance_norm,
)
)
cur_channels_count += growth_rate * self.up_blocks[-1]
# Final Conv layer
self.finalConv = torch.nn.Conv2d(
in_channels=cur_channels_count,
out_channels=out_channels,
kernel_size=1,
stride=1,
padding=0,
bias=True,
)
def forward(self, x):
out = self.bn_layer(self.firstconv((x)))
skip_connections = []
for i in range(len(self.down_blocks)):
out = self.denseBlocksDown[i](out)
skip_connections.append(out)
out = self.transDownBlocks[i](out)
out = self.bottleneck(out)
for i in range(len(self.up_blocks)):
skip = skip_connections.pop()
out = self.transUpBlocks[i](out, skip)
out = self.denseBlocksUp[i](out)
out = self.finalConv(out)
return out
# ----- Blocks for Tiramisu -----
class _DenseLayer(torch.nn.Sequential):
def __init__(
self, in_channels, growth_rate, p, use_instance_norm=False
):
super().__init__()
self.add_module(
"bn",
torch.nn.InstanceNorm2d(in_channels)
if use_instance_norm
else torch.nn.BatchNorm2d(in_channels),
)
self.add_module("relu", torch.nn.ReLU(True))
self.add_module(
"conv",
torch.nn.Conv2d(
in_channels,
growth_rate,
kernel_size=3,
stride=1,
padding=1,
bias=True,
),
)
self.add_module("drop", torch.nn.Dropout2d(p=p))
def forward(self, x):
return super().forward(x)
class _DenseBlock(torch.nn.Module):
def __init__(
self,
in_channels,
growth_rate,
n_layers,
p,
upsample=False,
use_instance_norm=False,
):
super().__init__()
self.upsample = upsample
self.layers = torch.nn.ModuleList(
[
Tiramisu._DenseLayer(
in_channels + i * growth_rate,
growth_rate,
p,
use_instance_norm=use_instance_norm,
)
for i in range(n_layers)
]
)
def forward(self, x):
if self.upsample:
new_features = []
# we pass all previous activations to each dense layer normally
# but we only store each layer's output in the new_features
for layer in self.layers:
out = layer(x)
x = torch.cat([x, out], dim=1)
new_features.append(out)
return torch.cat(new_features, dim=1)
else:
for layer in self.layers:
out = layer(x)
x = torch.cat([x, out], dim=1) # 1 = channel axis
return x
class _TransitionDown(torch.nn.Sequential):
def __init__(
self, in_channels, p, pool_factor, use_instance_norm=False
):
super().__init__()
self.add_module(
"bn",
torch.nn.InstanceNorm2d(in_channels)
if use_instance_norm
else torch.nn.BatchNorm2d(in_channels),
)
self.add_module("relu", torch.nn.ReLU(inplace=True))
self.add_module(
"conv",
torch.nn.Conv2d(
in_channels,
in_channels,
kernel_size=1,
stride=1,
padding=0,
bias=True,
),
)
self.add_module("drop", torch.nn.Dropout2d(p))
self.add_module(
"maxpool",
torch.nn.MaxPool2d(
kernel_size=pool_factor, stride=pool_factor
),
)
def forward(self, x):
return super().forward(x)
class _TransitionUp(torch.nn.Module):
def __init__(self, in_channels, out_channels, pool_factor):
super().__init__()
self.convTrans = torch.nn.ConvTranspose2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
stride=pool_factor,
padding=0,
bias=True,
)
def forward(self, x, skip):
out = self.convTrans(x)
out = Tiramisu._center_crop(out, skip.size(2), skip.size(3))
out = torch.cat([out, skip], dim=1)
return out
class _Bottleneck(torch.nn.Sequential):
def __init__(
self,
in_channels,
growth_rate,
n_layers,
p,
use_instance_norm=False,
):
super().__init__()
self.add_module(
"bottleneck",
Tiramisu._DenseBlock(
in_channels,
growth_rate,
n_layers,
p,
upsample=True,
use_instance_norm=use_instance_norm,
),
)
def forward(self, x):
return super().forward(x)
def _center_crop(layer, max_height, max_width):
_, _, h, w = layer.size()
xy1 = (w - max_width) // 2
xy2 = (h - max_height) // 2
return layer[:, :, xy2 : (xy2 + max_height), xy1 : (xy1 + max_width)]
# ----- ----- Dual Domain Networks ----- -----
class DDNet(InvNet):
""" Learned Primal Dual network implementation.
Inspired by https://github.com/adler-j/learned_primal_dual.
Parameters
----------
p_subnet : torch.nn.Module
Subnetwork operating in the primal (signal) domain. Can be a single
network (weight sharing between iterations) or a list of networks of
length `num_iter` (no weight sharing). Set `Ǹone` to use default
conv nets for each iteration.
d_subnet : torch.nn.Module
Subnetwork operating in the dual (measurement) domain. Can be a single
network (weight sharing between iterations) or a list of networks of
length `num_iter` (no weight sharing). Set `Ǹone` to use default
conv nets for each iteration.
num_iter : int
Number of primal dual iterations.
num_mem : int
Number of additional (memory / hidden state) channels. The respective
subnetworks need to able to process the extra channels.
op : LinearOperator
The forward operator.
use_inv : bool
Use pseudo-inverse of the operator instead of the adjoint.
(Default False)
use_fbp : bool
Use the inversion (pseudo-inverse or adjoint) as extra channel in the
primal domain. (Default False)
use_bn : bool
Use a version of batch-normalization (group-norm) in the conv nets that
are the default subnetworks (has no effect if seperate p_subnet and
d_subnet are provided).
"""
def __init__(
self,
p_subnet,
d_subnet,
num_iter,
num_mem,
op,
use_inv=False,
use_fbp=False,
use_bn=False,
):
super(DDNet, self).__init__()
self.op = op
self.num_iter = num_iter
self.num_mem = num_mem
self.use_inv = use_inv
self.use_fbp = use_fbp
self.use_bn = use_bn
if isinstance(p_subnet, list):
self.p_subnet = torch.nn.ModuleList(p_subnet)
elif p_subnet is None:
extra_channel = 2 if self.use_fbp else 1
self.p_subnet = torch.nn.ModuleList(
[
DDNet._conv_block(
self.num_mem + extra_channel,
32,
self.num_mem,
"p_it_{}".format(it),
4,
self.use_bn,
)
for it in range(self.num_iter)
]
)
else:
self.p_subnet = p_subnet
if isinstance(d_subnet, list):
self.d_subnet = torch.nn.ModuleList(d_subnet)
elif d_subnet is None:
self.d_subnet = torch.nn.ModuleList(
[
DDNet._conv_block(
self.num_mem + 2,
32,
self.num_mem,
"d_it_{}".format(it),
4,
self.use_bn,
)
for it in range(self.num_iter)
]
)
else:
self.d_subnet = d_subnet
def forward(self, inp):
# get sinogram and fbp
x, y = inp
# init primal and dual variables
primal = torch.cat([torch.zeros_like(x)] * self.num_mem, dim=1)
dual = torch.cat([torch.zeros_like(y)] * self.num_mem, dim=1)
adj_or_inv = self.op.inv if self.use_inv else self.op.adj
fac = 1.0 if self.use_inv else 0.01 # handle bad scaling of adj
for it in range(self.num_iter):
# dual variable update (sinogram domain)
dual_cat = torch.cat(
[dual, self.op(primal[:, 1:2, ...]), y], dim=1
)
if isinstance(self.d_subnet, torch.nn.ModuleList):
dual_update = self.d_subnet[it](
dual_cat
) # without weight sharing
else:
dual_update = self.d_subnet(dual_cat) # with weight sharing
dual = dual + dual_update
# primal variable update (image domain)
if self.use_fbp:
primal_cat = torch.cat(
[primal, fac * adj_or_inv(dual[:, 0:1, ...]), x], dim=1
)
else:
primal_cat = torch.cat(
[primal, fac * adj_or_inv(dual[:, 0:1, ...])], dim=1
)
if isinstance(self.p_subnet, torch.nn.ModuleList):
primal_update = self.p_subnet[it](
primal_cat
) # without weight sharing
else:
primal_update = self.p_subnet(
primal_cat
) # with weight sharing
primal = primal + primal_update
return primal[:, 0:1, ...]
@staticmethod
def _conv_block(
in_channels,
inter_channels,
out_channels,
block_name,
num_groups,
use_bn=False,
):
return torch.nn.Sequential(
OrderedDict(
[
(
block_name + "conv1",
torch.nn.Conv2d(
in_channels=in_channels,
out_channels=inter_channels,
kernel_size=3,
padding=1,
bias=True,
),
),
(
block_name + "bn_1",
torch.nn.GroupNorm(num_groups, inter_channels),
)
if use_bn
else (block_name + "no_bn_1", torch.nn.Identity()),
(block_name + "relu1", torch.nn.PReLU()),
(
block_name + "conv2",
torch.nn.Conv2d(
in_channels=inter_channels,
out_channels=inter_channels,
kernel_size=3,
padding=1,
bias=True,
),
),
(
block_name + "bn_2",
torch.nn.GroupNorm(num_groups, inter_channels),
)
if use_bn
else (block_name + "no_bn_2", torch.nn.Identity()),
(block_name + "relu2", torch.nn.PReLU()),
(
block_name + "conv3",
torch.nn.Conv2d(
in_channels=inter_channels,
out_channels=out_channels,
kernel_size=3,
padding=1,
bias=True,
),
),
]
)
)
|
# -*- coding: utf-8 -*-
#!/usr/bin/env python
"""
@Author: <NAME>
@Date: 06-Apr-2017
@Email: <EMAIL>
# @Last modified by: <NAME>
# @Last modified time: 08-Apr-2017
@License: Apache License Version 2.0
@Description:
"""
from distutils.core import setup
required = ['aiofiles>=0.3.1',
'aiomysql>=0.0.9',
'aiopg>=0.13.0',
'httptools>=0.0.9',
'peewee>=2.9.1',
'peewee-async>=0.5.7',
'psycopg2>=2.7.1',
'PyMySQL>=0.7.10',
'sanic>=0.4.1',
'ujson>=1.35']
long_description = '''# sanic-peewee
sanic-peewee is a async_peewee orm extension for sanic,
I hope users can deal with the database simplely and efectively when using sanic.
## Features
+ a peewee API similar to peewee's standard, blocking API.
+ support for async/await (PEP 492) constructs
+ use database url (peewee's playhose)
+ support pool and pg's ext (peewee-async)
+ sync api for creating and delecting tables,async api for GRUD data.
## Requirements
1. aiomysql>=0.0.9
+ aiopg>=0.13.0
+ peewee>=2.9.1
+ peewee-async>=0.5.7
+ psycopg2>=2.7.1
+ PyMySQL>=0.7.10
+ sanic>=0.4.1
## Installation
pip install sanic-peewee
## Example
```python
from sanic import Sanic
from sanic.response import text,json
from sanic_peewee import Peewee,select
from peewee import CharField, TextField
app = Sanic(__name__)
dburl = "mysql://{user}:{password}@{host}:{port}/{database}".format(
database='test1',
port=3306,
host='127.0.0.1',
user='root',
password='<PASSWORD>'
)
peewee = Peewee(dburl)
db = peewee(app)
class KeyValue(db.AsyncModel):
key = CharField(max_length=40, unique=True)
text = TextField(default='')
db.create_tables([KeyValue])
@app.route('/post/<key>/<value>')
async def post(request, key, value):
"""
Save get parameters to database
"""
obj = await KeyValue.aio.create(key=key, text=value)# use the model's async object to manage the query
return json({'object_id': obj.id})
@app.route('/get')
async def get(request):
"""
Load all objects from database
"""
# use the sanic_peewee object's async api
all_objects = await db.aio.select(db.SelectQuery(KeyValue))
serialized_obj = []
for obj in all_objects:
serialized_obj.append({
'id': obj.id,
'key': obj.key,
'value': obj.text}
)
return json({'objects': serialized_obj})
@app.route("/")
async def test(request):
return text('Hello world!')
app.run(host="0.0.0.0", port=8000, debug=True)
```
'''
setup(
name='sanic-peewee',
version='1.0.0',
author='<NAME>',
author_email='<EMAIL>',
packages=['sanic_peewee'],
license='Apache License Version 2.0',
description='a simple sanic extension for using async-peewee',
long_description=long_description,
install_requires=required,
url="https://github.com/Sanic-Extensions/sanic-peewee"
)
|
import asyncio
import json
import os
import random
import aiohttp
import discord.errors
import requests
from discord.ext import commands
reactions_random = ['👋', '♥', '⚡']
class Errors(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.Cog.listener()
async def on_message(self, message):
if f"<@{self.bot.user.id}>" in str(message.content) or f"<@!{self.bot.user.id}>" in str(message.content):
reaction = random.choice(reactions_random).strip()
await message.add_reaction(reaction)
if message.content not in [f'<@{self.bot.user.id}>', f'<@!{self.bot.user.id}>']:
return
with open('./storage/prefixes.json', 'r') as f:
prefixes = json.load(f)
prefix_server = prefixes.get(str(message.guild.id))
if prefix_server is None:
prefix_server = "bm-"
pre = prefix_server
await message.channel.send(f'Hello! I am {self.bot.user.name},\n'
f'The prefix for this server is : `{pre}`, '
f'and my help command can be accessed using `{pre}help`.')
@commands.Cog.listener() # error handling Cog, thanks @YuiiiPTChan
async def on_command_error(self, ctx, error):
if isinstance(error, commands.CommandInvokeError):
error = error.original
if isinstance(error, commands.CommandNotFound):
pass
elif isinstance(error, discord.errors.Forbidden):
await ctx.send("I do not have enough permissions to perform this action.")
elif isinstance(error, commands.MissingRequiredArgument):
await ctx.message.add_reaction("‼️".strip())
await ctx.send("An argument is missing or invalid. Check the help command for the correct usage..")
elif isinstance(error, commands.BadArgument):
await ctx.message.add_reaction("‼️".strip())
await ctx.send("A bad argument has been passed, please check the context and the needed arguments.")
elif isinstance(error, commands.NoPrivateMessage):
await ctx.message.add_reaction("‼️".strip())
await ctx.send("This command cannot be used in private messages. Please use this command in a server.")
elif isinstance(error, commands.CheckFailure):
await ctx.message.add_reaction("‼️".strip())
await ctx.send("You lack the necessary permissions to use this command.")
elif isinstance(error, aiohttp.ServerDisconnectedError):
await ctx.send("The API I use was disconnected. Please try again.")
elif isinstance(error, aiohttp.ServerTimeoutError):
await ctx.send("Timed out. Please try again later.")
elif isinstance(error, aiohttp.ClientConnectionError):
await ctx.send("Could not get response! The API I use may be down.")
elif isinstance(error, requests.ReadTimeout):
await ctx.send("Timed out. Please try again.")
elif isinstance(error, asyncio.TimeoutError):
pass
else:
raise error
@commands.Cog.listener()
async def on_guild_remove(self, guild):
owner = guild.owner
try:
await owner.send(f"Hello, it seems I have been removed from {guild.name}.\n"
f"Your server's config files will be deleted, "
f"along with the mute files, and the custom prefix.\n"
f"Thank you for having me in your server for this long.\n"
f"Until next time!")
except:
pass
if os.path.exists(f'configs/guild{guild.id}.json'):
os.remove(f'./configs/guild{guild.id}.json')
if os.path.exists(f'./storage/mute_files/guild{guild.id}.json'):
os.remove(f'./storage/mute_files/guild{guild.id}.json')
with open('./storage/prefixes.json', 'r') as prefixFile:
data = json.load(prefixFile)
if str(guild.id) in data.keys():
data.pop(str(guild.id))
with open('./storage/prefixes.json', 'w') as prefixFile:
json.dump(data, prefixFile)
def setup(bot):
bot.add_cog(Errors(bot))
|
<gh_stars>1-10
"""
References:
[1] <NAME> - Wind Turbine Aerodynamics and Vorticity Based Method, Springer, 2017
[2] <NAME>, <NAME> - Cylindrical vortex wake model: skewed cylinder, application to yawed or tilted rotors - Wind Energy, 2015
Coordinate systems
c coordinate system used in see [2], rotor in plane z_c=0
w wind coordinate system where z_w is the wind direction
theta_yaw : yaw angle, positive around y, pointing upward
x_c = x_w cost + z_w sint
y_c = y_w
z_c = -x_w sint + z_w cost
"""
#--- Legacy python 2.7
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# --- General
import matplotlib.pyplot as plt
import numpy as np
import os
# --- Local
from wiz.VortexCylinderSkewed import svc_tang_u
from wiz.VortexCylinder import vc_tang_u
from wiz.VortexRing import rings_u
from tictoc import Timer
try:
from pybra.colors import darkrainbow as cmap
except:
cmap = plt.cm.get_cmap("jet")
# --- Parameters
bWindCoord = True
bRootVortex = True
R = 1
r_hub = 0.1*R
CLIM = [0.4,1.1]
LIM = [-2,2] #
nx = 30 # Number of points for velocity evaluation
CT = 0.6
theta_yaw = 30*np.pi/180 # rad
U0 = 1
# Number of cylinders per radius
n_radial = 1 # 1: tip vortex, 2: tip&root, n: "continuous"
# --- Derived params
chi= theta_yaw*(1+0.3*(1-np.sqrt(1-CT))) # rad
if CT==0.4:
gamma_t = -0.21341 # CT=0.4
elif CT==0.6:
gamma_t = -0.40 #
else:
gamma_t = -0.60414 # CT=0.95
ny = nx
m = np.tan(chi)
print('gamma_t ',gamma_t)
print('gamma_t/2',gamma_t/2)
def Tw2c(x_w,y_w,z_w):
if bWindCoord:
x_c = x_w * np.cos(theta_yaw) + z_w * np.sin(theta_yaw)
y_c = y_w
z_c = -x_w * np.sin(theta_yaw) + z_w * np.cos(theta_yaw)
else:
x_c,y_c,z_c = x_w,y_w,z_w
return x_c,y_c,z_c
def Tc2w(x_c,y_c,z_c):
if bWindCoord:
x_w = x_c * np.cos(theta_yaw) - z_c * np.sin(theta_yaw)
y_w = y_c
z_w = x_c * np.sin(theta_yaw) + z_c * np.cos(theta_yaw)
else:
x_w,y_w,z_w = x_c,y_c,z_c
return x_w, y_w, z_w
# --- Loop on diameters
# for nD in [0,4]:
for nD in [0,4]:
z0_w = nD*2*R #Plane
# --- Flow field and speed
x_w = np.linspace(LIM[0],LIM[1],nx)
y_w = np.linspace(LIM[0],LIM[1],ny)
[X_w,Y_w]=np.meshgrid(x_w,y_w)
Z_w=X_w*0+z0_w
X_c,Y_c,Z_c = Tw2c(X_w,Y_w,Z_w)
with Timer('Computing for D={} - cylinder'.format(nD)):
ux_c,uy_c,uz_c =svc_tang_u(X_c,Y_c,Z_c,gamma_t,R,m)
ux_c0,uy_c0,uz_c0=svc_tang_u(0,0,0 ,gamma_t,R,m)
print('uz0',uz_c0)
if bRootVortex:
ux_c_root,uy_c_root,uz_c_root=svc_tang_u(X_c,Y_c,Z_c,-gamma_t,r_hub,m)
ux_c += ux_c_root
uy_c += uy_c_root
uz_c += uz_c_root
uz_c=uz_c+U0*np.cos(theta_yaw) # Adding free wind
ux_c=ux_c+U0*np.sin(theta_yaw)
ux,uy,uz = Tc2w(ux_c,uy_c,uz_c)
# --- Flow field from many rings
with Timer('Computing for D={} - rings'.format(nD)):
NRings=5000
ZRings=20*2*R
Zr = np.linspace(0,ZRings,NRings)
dz = Zr[1]-Zr[0]
dzeta = dz/np.cos(chi) # distance along the wake axis
Gamma_Rings = gamma_t*dzeta
vGamma_r=Zr*0 + Gamma_Rings
vR_r =Zr*0 + R
Xr = m*Zr
Yr = 0*Zr
ux_c0,uy_c0,uz_c0 =rings_u(0,0,0 ,vGamma_r,vR_r,Xr,Yr,Zr,polar_out=False)
ux_r_c, uy_r_c, uz_r_c =rings_u(X_c,Y_c,Z_c,vGamma_r,vR_r,Xr,Yr,Zr,polar_out=False)
print('uz0',uz_c0)
if bRootVortex:
vR_hub = vR_r*0 +r_hub
ux_r_c_root, uy_r_c_root, uz_r_c_root =rings_u(X_c,Y_c,Z_c,-vGamma_r,vR_hub,Xr,Yr,Zr,polar_out=False)
ux_r_c += ux_r_c_root
uy_r_c += uy_r_c_root
uz_r_c += uz_r_c_root
uz_r_c=uz_r_c+U0*np.cos(theta_yaw) # Adding free wind
ux_r_c=ux_r_c+U0*np.sin(theta_yaw)
ux_r,uy_r,uz_r = Tc2w(ux_r_c,uy_r_c,uz_r_c)
# --- Removing singularity
# TODO
# bTip = abs(sqrt((X-m*z0).^2+Y.^2)-R)<epsilon;
# bRoot = sqrt((X-m*z0).^2+Y.^2)<epsilon ;
# b=bTip | bRoot;
# % b=bTip;
def plot(ux,uy,uz,label='',clim=None):
Speed=np.sqrt(uz**2)
# Temporary HACK until singularity is removed
print('Min Max: ',np.min(Speed.ravel()),np.max(Speed.ravel()))
if clim is not None:
Speed[Speed>clim[1]] = clim[1]
Speed[Speed<clim[0]] = clim[0]
print('Min Max: ',np.min(Speed.ravel()),np.max(Speed.ravel()))
# rotor projection
vpsi=np.linspace(0,2*np.pi,50)
xc_w=R*np.cos(vpsi)*np.cos(theta_yaw)
yc_w=R*np.sin(vpsi)
dpi=300
fig=plt.figure()
ax=fig.add_subplot(111)
if clim is not None:
lev=np.linspace(clim[0],clim[1],30)
else:
lev=30
im=ax.contourf(X_w,Y_w,Speed,levels=lev,cmap=cmap)
ax.plot(xc_w,yc_w,'k--')
cb=fig.colorbar(im)
if clim is not None:
cb.set_clim(clim)
sp=ax.streamplot(x_w,y_w,ux,uy,color='k',linewidth=0.7,density=2)
ax.set_xlim(LIM)
ax.set_ylim(LIM)
ax.set_xlabel('x/R [-]')
ax.set_ylabel('y/R [-]')
ax.set_title('z = {}D{}'.format(int(z0_w/(2*R)),label))
fig.savefig("VC_yaw{:02d}_CT{:03d}_{:d}D{}.png".format(int(np.round(theta_yaw*180/np.pi)),int(CT*100),int(z0_w/(2*R)),label),dpi=dpi)
plot(ux ,uy ,uz ,' cylinder',clim=CLIM)
plot(ux_r,uy_r,uz_r,' rings' ,clim=CLIM)
plt.show()
|
#!/usr/bin/env python
from __future__ import division, print_function
from collections import defaultdict
import itertools
import numpy as np
from scipy import interp
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
import seaborn.apionly as sns
from sklearn.metrics import accuracy_score, confusion_matrix, roc_curve, auc
from sklearn.model_selection import cross_val_score, StratifiedShuffleSplit, KFold, StratifiedKFold
from mlxtend.feature_selection import SequentialFeatureSelector as SFS
import composition as comp
import composition.analysis.plotting as plotting
color_dict = {'light': 'C0', 'heavy': 'C1', 'total': 'C2'}
X_train_sim, X_test_sim, y_train_sim, y_test_sim, le, energy_train_sim, energy_test_sim = comp.preprocess_sim(return_energy=True)
X_test_data, energy_test_data = comp.preprocess_data(return_energy=True)
# pipeline = comp.get_pipeline('xgboost')
# clf_name = pipeline.named_steps['classifier'].__class__.__name__
# print('=' * 30)
# print(clf_name)
# scores = cross_val_score(
# estimator=pipeline, X=X_train_sim, y=y_train_sim, cv=3, n_jobs=15)
# print('CV score: {:.2%} (+/- {:.2%})'.format(scores.mean(), scores.std()))
# print('=' * 30)
# Define energy binning for this analysis
energybins = comp.analysis.get_energybins()
# # Calculate RF generalization error via 10-fold CV
# comp_list = ['light', 'heavy']
# # Split training data into CV training and testing folds
# kf = KFold(n_splits=10)
# frac_correct_folds = defaultdict(list)
# fold_num = 0
# print('Fold ', end='')
# for train_index, test_index in kf.split(X_train_sim):
# fold_num += 1
# print('{}...'.format(fold_num), end='')
# X_train_fold, X_test_fold = X_train_sim[train_index], X_train_sim[test_index]
# y_train_fold, y_test_fold = y_train_sim[train_index], y_train_sim[test_index]
#
# energy_test_fold = energy_train_sim[test_index]
#
# reco_frac, reco_frac_err = get_frac_correct(X_train_fold, X_test_fold,
# y_train_fold, y_test_fold,
# energy_test_fold, comp_list)
# for composition in comp_list:
# frac_correct_folds[composition].append(reco_frac[composition])
# frac_correct_folds['total'].append(reco_frac['total'])
# frac_correct_gen_err = {key: np.std(frac_correct_folds[key], axis=0) for key in frac_correct_folds}
df_sim = comp.load_dataframe(datatype='sim', config='IC79')
# reco_frac, reco_frac_stat_err = get_frac_correct(X_train_sim, X_test_sim,
# y_train_sim, y_test_sim,
# energy_test_sim, comp_list)
# step_x = log_energy_midpoints
# step_x = np.append(step_x[0]-log_energy_bin_width/2, step_x)
# step_x = np.append(step_x, step_x[-1]+log_energy_bin_width/2)
# # Plot fraction of events correctlt classified vs energy
# fig, ax = plt.subplots()
# for composition in comp_list + ['total']:
# err = np.sqrt(frac_correct_gen_err[composition]**2 + reco_frac_stat_err[composition]**2)
# plotting.plot_steps(log_energy_midpoints, reco_frac[composition], err, ax, color_dict[composition], composition)
# plt.xlabel('$\log_{10}(E_{\mathrm{reco}}/\mathrm{GeV})$')
# ax.set_ylabel('Fraction correctly identified')
# ax.set_ylim([0.0, 1.0])
# ax.set_xlim([6.3, 8.0])
# ax.grid(linestyle=':')
# leg = plt.legend(loc='upper center', frameon=False,
# bbox_to_anchor=(0.5, # horizontal
# 1.1),# vertical
# ncol=len(comp_list)+1, fancybox=False)
# # set the linewidth of each legend object
# for legobj in leg.legendHandles:
# legobj.set_linewidth(3.0)
#
# # place a text box in upper left in axes coords
# textstr = '$\mathrm{\underline{Training \ features}}$: \n'
# # for i, label in enumerate(feature_labels):
# # for i, idx in enumerate(sfs.k_feature_idx_):
# # # if i>1:
# # # break
# # print(feature_labels[idx])
# # # textstr += '{}) '.format(i+1) + feature_labels[idx] + '\n'
# # if (i == len(feature_labels)-1):
# # textstr += '{}) '.format(i+1) + feature_labels[idx]
# # else:
# # textstr += '{}) '.format(i+1) + feature_labels[idx] + '\n'
# props = dict(facecolor='white', linewidth=0)
# # ax.text(1.025, 0.855, textstr, transform=ax.transAxes, fontsize=8,
# # verticalalignment='top', bbox=props)
# cv_str = 'Accuracy: {:0.2f}\% (+/- {:.1}\%)'.format(scores.mean()*100, scores.std()*100)
# # print(cvstr)
# # props = dict(facecolor='white', linewidth=0)
# # ax.text(1.025, 0.9825, cvstr, transform=ax.transAxes, fontsize=8,
# # verticalalignment='top', bbox=props)
# ax.text(7.4, 0.2, cv_str,
# ha="center", va="center", size=8,
# bbox=dict(boxstyle='round', fc="white", ec="gray", lw=0.8))
# plt.show()
#
#
# # ## Spectrum
# # [ [back to top](#top) ]
#
# # In[11]:
#
# def get_num_comp_reco(X_train, y_train, X_test, log_energy_test, comp_list):
#
# pipeline.fit(X_train, y_train)
# test_predictions = pipeline.predict(X_test)
#
# # Get number of correctly identified comp in each reco energy bin
# num_reco_energy, num_reco_energy_err = {}, {}
# for composition in comp_list:
# num_reco_energy[composition] = np.histogram(
# log_energy_test[le.inverse_transform(test_predictions) == composition],
# bins=log_energy_bins)[0]
# num_reco_energy_err[composition] = np.sqrt(num_reco_energy[composition])
#
# num_reco_energy['total'] = np.histogram(log_energy_test, bins=log_energy_bins)[0]
# num_reco_energy_err['total'] = np.sqrt(num_reco_energy['total'])
#
# return num_reco_energy, num_reco_energy_err
#
#
# # In[ ]:
#
# df_sim = comp.load_dataframe(datatype='sim', config='IC79')
#
#
# # In[14]:
#
# comp_list = ['light', 'heavy']
# # Get number of events per energy bin
# num_reco_energy, num_reco_energy_err = get_num_comp_reco(X_train_sim, y_train_sim,
# X_test_data, energy_test_data,
# comp_list)
# import pprint
# pprint.pprint(num_reco_energy)
# print(np.sum(num_reco_energy['light']+num_reco_energy['heavy']))
# print(np.sum(num_reco_energy['total']))
# # Solid angle
# solid_angle = 2*np.pi*(1-np.cos(np.arccos(0.8)))
#
#
# # In[15]:
#
# # Live-time information
# goodrunlist = pd.read_table('/data/ana/CosmicRay/IceTop_GRL/IC79_2010_GoodRunInfo_4IceTop.txt', skiprows=[0, 3])
# goodrunlist.head()
#
#
# # In[16]:
#
# livetimes = goodrunlist['LiveTime(s)']
# livetime = np.sum(livetimes[goodrunlist['Good_it_L2'] == 1])
# print('livetime (seconds) = {}'.format(livetime))
# print('livetime (days) = {}'.format(livetime/(24*60*60)))
#
#
# # In[17]:
#
# fig, ax = plt.subplots()
# for composition in comp_list + ['total']:
# # Calculate dN/dE
# y = num_reco_energy[composition]
# y_err = num_reco_energy_err[composition]
# # Add time duration
# y = y / livetime
# y_err = y / livetime
# # ax.errorbar(log_energy_midpoints, y, yerr=y_err,
# # color=color_dict[composition], label=composition,
# # marker='.', linestyle='None')
# plotting.plot_steps(log_energy_midpoints, y, y_err, ax, color_dict[composition], composition)
# ax.set_yscale("log", nonposy='clip')
# plt.xlabel('$\log_{10}(E_{\mathrm{reco}}/\mathrm{GeV})$')
# ax.set_ylabel('Rate [s$^{-1}$]')
# ax.set_xlim([6.2, 8.0])
# # ax.set_ylim([10**2, 10**5])
# ax.grid(linestyle=':')
# leg = plt.legend(loc='upper center', frameon=False,
# bbox_to_anchor=(0.5, # horizontal
# 1.1),# vertical
# ncol=len(comp_list)+1, fancybox=False)
# # set the linewidth of each legend object
# for legobj in leg.legendHandles:
# legobj.set_linewidth(3.0)
#
# plt.show()
#
#
# # In[18]:
#
# eff_area, eff_area_error, energy_midpoints = comp.analysis.get_effective_area(df_sim, energy_bins)
#
#
# # In[19]:
#
# # Plot fraction of events vs energy
# fig, ax = plt.subplots()
# for composition in comp_list + ['total']:
# # Calculate dN/dE
# y = num_reco_energy[composition]/energy_bin_widths
# y_err = num_reco_energy_err[composition]/energy_bin_widths
# # Add effective area
# y, y_err = comp.analysis.ratio_error(y, y_err, eff_area, eff_area_error)
# # Add solid angle
# y = y / solid_angle
# y_err = y_err / solid_angle
# # Add time duration
# y = y / livetime
# y_err = y / livetime
# # Add energy scaling
# # energy_err = get_energy_res(df_sim, energy_bins)
# # energy_err = np.array(energy_err)
# # print(10**energy_err)
# y = energy_midpoints**2.7 * y
# y_err = energy_midpoints**2.7 * y_err
# print(y)
# print(y_err)
# # ax.errorbar(log_energy_midpoints, y, yerr=y_err, label=composition, color=color_dict[composition],
# # marker='.', markersize=8)
# plotting.plot_steps(log_energy_midpoints, y, y_err, ax, color_dict[composition], composition)
# ax.set_yscale("log", nonposy='clip')
# # ax.set_xscale("log", nonposy='clip')
# plt.xlabel('$\log_{10}(E_{\mathrm{reco}}/\mathrm{GeV})$')
# ax.set_ylabel('$\mathrm{E}^{2.7} \\frac{\mathrm{dN}}{\mathrm{dE dA d\Omega dt}} \ [\mathrm{GeV}^{1.7} \mathrm{m}^{-2} \mathrm{sr}^{-1} \mathrm{s}^{-1}]$')
# ax.set_xlim([6.3, 8])
# ax.set_ylim([10**3, 10**5])
# ax.grid(linestyle='dotted', which="both")
# leg = plt.legend(loc='upper center', frameon=False,
# bbox_to_anchor=(0.5, # horizontal
# 1.1),# vertical
# ncol=len(comp_list)+1, fancybox=False)
# # set the linewidth of each legend object
# for legobj in leg.legendHandles:
# legobj.set_linewidth(3.0)
#
# # plt.savefig('/home/jbourbeau/public_html/figures/spectrum.png')
# plt.show()
#
#
# # ## Unfolding
# # [ [back to top](#top) ]
#
# # In[20]:
#
# reco_frac['light']
#
#
# # In[21]:
#
# reco_frac['heavy']
#
#
# # In[22]:
#
# num_reco_energy['light']
#
#
# # In[23]:
#
# num_reco_energy['heavy']
#
#
# # In[24]:
#
# pipeline.fit(X_train_sim, y_train_sim)
# test_predictions = pipeline.predict(X_test_sim)
# true_comp = le.inverse_transform(y_test_sim)
# pred_comp = le.inverse_transform(test_predictions)
# print(true_comp)
# print(pred_comp)
#
#
# # In[25]:
#
# bin_idxs = np.digitize(energy_test_sim, log_energy_bins) - 1
# energy_bin_idx = np.unique(bin_idxs)
# energy_bin_idx = energy_bin_idx[1:]
# print(energy_bin_idx)
# num_reco_energy_unfolded = defaultdict(list)
# for bin_idx in energy_bin_idx:
# energy_bin_mask = bin_idxs == bin_idx
# confmat = confusion_matrix(true_comp[energy_bin_mask], pred_comp[energy_bin_mask], labels=comp_list)
# confmat = np.divide(confmat.T, confmat.sum(axis=1, dtype=float)).T
# inv_confmat = np.linalg.inv(confmat)
# counts = np.array([num_reco_energy[composition][bin_idx] for composition in comp_list])
# unfolded_counts = np.dot(inv_confmat, counts)
# # unfolded_counts[unfolded_counts < 0] = 0
# num_reco_energy_unfolded['light'].append(unfolded_counts[0])
# num_reco_energy_unfolded['heavy'].append(unfolded_counts[1])
# num_reco_energy_unfolded['total'].append(unfolded_counts.sum())
# print(num_reco_energy_unfolded)
#
#
# # In[26]:
#
# unfolded_counts.sum()
#
#
# # In[27]:
#
# fig, ax = plt.subplots()
# for composition in comp_list + ['total']:
# # Calculate dN/dE
# y = num_reco_energy_unfolded[composition]/energy_bin_widths
# y_err = np.sqrt(y)/energy_bin_widths
# # Add effective area
# y, y_err = comp.analysis.ratio_error(y, y_err, eff_area, eff_area_error)
# # Add solid angle
# y = y / solid_angle
# y_err = y_err / solid_angle
# # Add time duration
# y = y / livetime
# y_err = y / livetime
# # Add energy scaling
# # energy_err = get_energy_res(df_sim, energy_bins)
# # energy_err = np.array(energy_err)
# # print(10**energy_err)
# y = energy_midpoints**2.7 * y
# y_err = energy_midpoints**2.7 * y_err
# print(y)
# print(y_err)
# # ax.errorbar(log_energy_midpoints, y, yerr=y_err, label=composition, color=color_dict[composition],
# # marker='.', markersize=8)
# plotting.plot_steps(log_energy_midpoints, y, y_err, ax, color_dict[composition], composition)
# ax.set_yscale("log", nonposy='clip')
# # ax.set_xscale("log", nonposy='clip')
# plt.xlabel('$\log_{10}(E_{\mathrm{reco}}/\mathrm{GeV})$')
# ax.set_ylabel('$\mathrm{E}^{2.7} \\frac{\mathrm{dN}}{\mathrm{dE dA d\Omega dt}} \ [\mathrm{GeV}^{1.7} \mathrm{m}^{-2} \mathrm{sr}^{-1} \mathrm{s}^{-1}]$')
# ax.set_xlim([6.3, 8])
# ax.set_ylim([10**3, 10**5])
# ax.grid(linestyle='dotted', which="both")
# leg = plt.legend(loc='upper center', frameon=False,
# bbox_to_anchor=(0.5, # horizontal
# 1.1),# vertical
# ncol=len(comp_list)+1, fancybox=False)
# # set the linewidth of each legend object
# for legobj in leg.legendHandles:
# legobj.set_linewidth(3.0)
#
# # plt.savefig('/home/jbourbeau/public_html/figures/spectrum.png')
# plt.show()
#
#
# # ### Iterative method
#
# # Get confusion matrix for each energy bin
#
# # In[99]:
#
# bin_idxs = np.digitize(energy_test_sim, log_energy_bins) - 1
# energy_bin_idx = np.unique(bin_idxs)
# energy_bin_idx = energy_bin_idx[1:]
# print(energy_bin_idx)
# num_reco_energy_unfolded = defaultdict(list)
# response_mat = []
# for bin_idx in energy_bin_idx:
# energy_bin_mask = bin_idxs == bin_idx
# confmat = confusion_matrix(true_comp[energy_bin_mask], pred_comp[energy_bin_mask], labels=comp_list)
# confmat = np.divide(confmat.T, confmat.sum(axis=1, dtype=float)).T
# response_mat.append(confmat)
#
#
# # In[100]:
#
# response_mat
#
#
# # In[134]:
#
# r = np.dstack((np.copy(num_reco_energy['light']), np.copy(num_reco_energy['heavy'])))[0]
# for unfold_iter in range(50):
# print('Unfolding iteration {}...'.format(unfold_iter))
# if unfold_iter == 0:
# u = r
# fs = []
# for bin_idx in energy_bin_idx:
# # print(u)
# f = np.dot(response_mat[bin_idx], u[bin_idx])
# f[f < 0] = 0
# fs.append(f)
# # print(f)
# u = u + (r - fs)
# # u[u < 0] = 0
# # print(u)
# unfolded_counts_iter = {}
# unfolded_counts_iter['light'] = u[:,0]
# unfolded_counts_iter['heavy'] = u[:,1]
# unfolded_counts_iter['total'] = u.sum(axis=1)
# print(unfolded_counts_iter)
#
#
# # In[135]:
#
# fig, ax = plt.subplots()
# for composition in comp_list + ['total']:
# # Calculate dN/dE
# y = unfolded_counts_iter[composition]/energy_bin_widths
# y_err = np.sqrt(y)/energy_bin_widths
# # Add effective area
# y, y_err = comp.analysis.ratio_error(y, y_err, eff_area, eff_area_error)
# # Add solid angle
# y = y / solid_angle
# y_err = y_err / solid_angle
# # Add time duration
# y = y / livetime
# y_err = y / livetime
# # Add energy scaling
# # energy_err = get_energy_res(df_sim, energy_bins)
# # energy_err = np.array(energy_err)
# # print(10**energy_err)
# y = energy_midpoints**2.7 * y
# y_err = energy_midpoints**2.7 * y_err
# print(y)
# print(y_err)
# # ax.errorbar(log_energy_midpoints, y, yerr=y_err, label=composition, color=color_dict[composition],
# # marker='.', markersize=8)
# plotting.plot_steps(log_energy_midpoints, y, y_err, ax, color_dict[composition], composition)
# ax.set_yscale("log", nonposy='clip')
# # ax.set_xscale("log", nonposy='clip')
# plt.xlabel('$\log_{10}(E_{\mathrm{reco}}/\mathrm{GeV})$')
# ax.set_ylabel('$\mathrm{E}^{2.7} \\frac{\mathrm{dN}}{\mathrm{dE dA d\Omega dt}} \ [\mathrm{GeV}^{1.7} \mathrm{m}^{-2} \mathrm{sr}^{-1} \mathrm{s}^{-1}]$')
# ax.set_xlim([6.3, 8])
# ax.set_ylim([10**3, 10**5])
# ax.grid(linestyle='dotted', which="both")
# leg = plt.legend(loc='upper center', frameon=False,
# bbox_to_anchor=(0.5, # horizontal
# 1.1),# vertical
# ncol=len(comp_list)+1, fancybox=False)
# # set the linewidth of each legend object
# for legobj in leg.legendHandles:
# legobj.set_linewidth(3.0)
#
# # plt.savefig('/home/jbourbeau/public_html/figures/spectrum.png')
# plt.show()
#
#
# # In[106]:
#
# print(num_reco_energy)
#
#
# # In[107]:
#
# comp_list = ['light', 'heavy']
# pipeline = comp.get_pipeline('RF')
# pipeline.fit(X_train_sim, y_train_sim)
# test_predictions = pipeline.predict(X_test_sim)
# # correctly_identified_mask = (test_predictions == y_test)
# # confmat = confusion_matrix(y_true=y_test, y_pred=y_pred)/len(y_pred)
# true_comp = le.inverse_transform(y_test_sim)
# pred_comp = le.inverse_transform(test_predictions)
# confmat = confusion_matrix(true_comp, pred_comp, labels=comp_list)
#
# def plot_confusion_matrix(cm, classes,
# normalize=False,
# title='Confusion matrix',
# cmap=plt.cm.Greens):
# """
# This function prints and plots the confusion matrix.
# Normalization can be applied by setting `normalize=True`.
# """
# if normalize:
# cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
# print("Normalized confusion matrix")
# else:
# print('Confusion matrix, without normalization')
#
# print(cm)
#
# plt.imshow(cm, interpolation='None', cmap=cmap,
# vmin=0, vmax=1.0)
# plt.title(title)
# plt.colorbar()
# tick_marks = np.arange(len(classes))
# plt.xticks(tick_marks, classes, rotation=45)
# plt.yticks(tick_marks, classes)
#
# thresh = cm.max() / 2.
# for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
# plt.text(j, i, '{:0.3f}'.format(cm[i, j]),
# horizontalalignment="center",
# color="white" if cm[i, j] > thresh else "black")
#
# plt.tight_layout()
# plt.ylabel('True composition')
# plt.xlabel('Predicted composition')
#
# fig, ax = plt.subplots()
# plot_confusion_matrix(confmat, classes=['light', 'heavy'], normalize=True,
# title='Confusion matrix, without normalization')
#
# # # Plot normalized confusion matrix
# # plt.figure()
# # plot_confusion_matrix(cnf_matrix, classes=class_names, normalize=True,
# # title='Normalized confusion matrix')
#
# plt.show()
#
#
# # In[63]:
#
# comp_list = ['light', 'heavy']
# pipeline = comp.get_pipeline('RF')
# pipeline.fit(X_train_sim, y_train_sim)
# test_predictions = pipeline.predict(X_test_sim)
# # correctly_identified_mask = (test_predictions == y_test)
# # confmat = confusion_matrix(y_true=y_test, y_pred=y_pred)/len(y_pred)
# true_comp = le.inverse_transform(y_test_sim)
# pred_comp = le.inverse_transform(test_predictions)
# confmat = confusion_matrix(true_comp, pred_comp, labels=comp_list)
#
# inverse = np.linalg.inv(confmat)
# inverse
#
#
# # In[64]:
#
# confmat
#
#
# # In[66]:
#
# comp_list = ['light', 'heavy']
# # Get number of events per energy bin
# num_reco_energy, num_reco_energy_err = get_num_comp_reco(X_train_sim, y_train_sim, X_test_data, comp_list)
# # Energy-related variables
# energy_bin_width = 0.1
# energy_bins = np.arange(6.2, 8.1, energy_bin_width)
# energy_midpoints = (energy_bins[1:] + energy_bins[:-1]) / 2
# energy_bin_widths = 10**energy_bins[1:] - 10**energy_bins[:-1]
# def get_energy_res(df_sim, energy_bins):
# reco_log_energy = df_sim['lap_log_energy'].values
# MC_log_energy = df_sim['MC_log_energy'].values
# energy_res = reco_log_energy - MC_log_energy
# bin_centers, bin_medians, energy_err = comp.analysis.data_functions.get_medians(reco_log_energy,
# energy_res,
# energy_bins)
# return np.abs(bin_medians)
# # Solid angle
# solid_angle = 2*np.pi*(1-np.cos(np.arccos(0.85)))
# # solid_angle = 2*np.pi*(1-np.cos(40*(np.pi/180)))
# print(solid_angle)
# print(2*np.pi*(1-np.cos(40*(np.pi/180))))
# # Live-time information
# start_time = np.amin(df_data['start_time_mjd'].values)
# end_time = np.amax(df_data['end_time_mjd'].values)
# day_to_sec = 24 * 60 * 60.
# dt = day_to_sec * (end_time - start_time)
# print(dt)
# # Plot fraction of events vs energy
# fig, ax = plt.subplots()
# for i, composition in enumerate(comp_list):
# num_reco_bin = np.array([[i, j] for i, j in zip(num_reco_energy['light'], num_reco_energy['heavy'])])
# # print(num_reco_bin)
# num_reco = np.array([np.dot(inverse, i) for i in num_reco_bin])
# print(num_reco)
# num_reco_2 = {'light': num_reco[:, 0], 'heavy': num_reco[:, 1]}
# # Calculate dN/dE
# y = num_reco_2[composition]/energy_bin_widths
# y_err = num_reco_energy_err[composition]/energy_bin_widths
# # Add effective area
# y, y_err = comp.analysis.ratio_error(y, y_err, eff_area, eff_area_error)
# # Add solid angle
# y = y / solid_angle
# y_err = y_err / solid_angle
# # Add time duration
# y = y / dt
# y_err = y / dt
# # Add energy scaling
# energy_err = get_energy_res(df_sim, energy_bins)
# energy_err = np.array(energy_err)
# # print(10**energy_err)
# y = (10**energy_midpoints)**2.7 * y
# y_err = (10**energy_midpoints)**2.7 * y_err
# plotting.plot_steps(energy_midpoints, y, y_err, ax, color_dict[composition], composition)
# ax.set_yscale("log", nonposy='clip')
# plt.xlabel('$\log_{10}(E_{\mathrm{reco}}/\mathrm{GeV})$')
# ax.set_ylabel('$\mathrm{E}^{2.7} \\frac{\mathrm{dN}}{\mathrm{dE dA d\Omega dt}} \ [\mathrm{GeV}^{1.7} \mathrm{m}^{-2} \mathrm{sr}^{-1} \mathrm{s}^{-1}]$')
# ax.set_xlim([6.2, 8.0])
# # ax.set_ylim([10**2, 10**5])
# ax.grid()
# leg = plt.legend(loc='upper center',
# bbox_to_anchor=(0.5, # horizontal
# 1.1),# vertical
# ncol=len(comp_list)+1, fancybox=False)
# # set the linewidth of each legend object
# for legobj in leg.legendHandles:
# legobj.set_linewidth(3.0)
#
# plt.show()
#
#
# # In[44]:
#
# pipeline.get_params()['classifier__max_depth']
#
#
# # In[47]:
#
# energy_bin_width = 0.1
# energy_bins = np.arange(6.2, 8.1, energy_bin_width)
# fig, axarr = plt.subplots(1, 2)
# for composition, ax in zip(comp_list, axarr.flatten()):
# MC_comp_mask = (df_sim['MC_comp_class'] == composition)
# MC_log_energy = df_sim['MC_log_energy'][MC_comp_mask].values
# reco_log_energy = df_sim['lap_log_energy'][MC_comp_mask].values
# plotting.histogram_2D(MC_log_energy, reco_log_energy, energy_bins, log_counts=True, ax=ax)
# ax.plot([0,10], [0,10], marker='None', linestyle='-.')
# ax.set_xlim([6.2, 8])
# ax.set_ylim([6.2, 8])
# ax.set_xlabel('$\log_{10}(E_{\mathrm{MC}}/\mathrm{GeV})$')
# ax.set_ylabel('$\log_{10}(E_{\mathrm{reco}}/\mathrm{GeV})$')
# ax.set_title('{} response matrix'.format(composition))
# plt.tight_layout()
# plt.show()
#
#
# # In[10]:
#
# energy_bins = np.arange(6.2, 8.1, energy_bin_width)
# 10**energy_bins[1:] - 10**energy_bins[:-1]
#
#
# # In[ ]:
#
# probs = pipeline.named_steps['classifier'].predict_proba(X_test)
# prob_1 = probs[:, 0][MC_iron_mask]
# prob_2 = probs[:, 1][MC_iron_mask]
# # print(min(prob_1-prob_2))
# # print(max(prob_1-prob_2))
# # plt.hist(prob_1-prob_2, bins=30, log=True)
# plt.hist(prob_1, bins=np.linspace(0, 1, 50), log=True)
# plt.hist(prob_2, bins=np.linspace(0, 1, 50), log=True)
#
#
# # In[ ]:
#
# probs = pipeline.named_steps['classifier'].predict_proba(X_test)
# dp1 = (probs[:, 0]-probs[:, 1])[MC_proton_mask]
# print(min(dp1))
# print(max(dp1))
# dp2 = (probs[:, 0]-probs[:, 1])[MC_iron_mask]
# print(min(dp2))
# print(max(dp2))
# fig, ax = plt.subplots()
# # plt.hist(prob_1-prob_2, bins=30, log=True)
# counts, edges, pathes = plt.hist(dp1, bins=np.linspace(-1, 1, 100), log=True, label='Proton', alpha=0.75)
# counts, edges, pathes = plt.hist(dp2, bins=np.linspace(-1, 1, 100), log=True, label='Iron', alpha=0.75)
# plt.legend(loc=2)
# plt.show()
# pipeline.named_steps['classifier'].classes_
#
#
# # In[ ]:
#
# print(pipeline.named_steps['classifier'].classes_)
# le.inverse_transform(pipeline.named_steps['classifier'].classes_)
#
#
# # In[ ]:
#
# pipeline.named_steps['classifier'].decision_path(X_test)
#
#
# # In[48]:
#
# comp_list = ['light', 'heavy']
# pipeline = comp.get_pipeline('RF')
# pipeline.fit(X_train_sim, y_train_sim)
# # test_probs = defaultdict(list)
# fig, ax = plt.subplots()
# test_predictions = pipeline.predict(X_test_data)
# test_probs = pipeline.predict_proba(X_test_data)
# for class_ in pipeline.classes_:
# test_predictions == le.inverse_transform(class_)
# plt.hist(test_probs[:, class_], bins=np.linspace(0, 1, 50),
# histtype='step', label=composition,
# color=color_dict[composition], alpha=0.8, log=True)
# plt.ylabel('Counts')
# plt.xlabel('Testing set class probabilities')
# plt.legend()
# plt.grid()
# plt.show()
#
#
# # In[5]:
#
# pipeline = comp.get_pipeline('RF')
# pipeline.fit(X_train, y_train)
# test_predictions = pipeline.predict(X_test)
#
# comp_list = ['P', 'He', 'O', 'Fe']
# fig, ax = plt.subplots()
# test_probs = pipeline.predict_proba(X_test)
# fig, axarr = plt.subplots(2, 2, sharex=True, sharey=True)
# for composition, ax in zip(comp_list, axarr.flatten()):
# comp_mask = (le.inverse_transform(y_test) == composition)
# probs = np.copy(test_probs[comp_mask])
# print('probs = {}'.format(probs.shape))
# weighted_mass = np.zeros(len(probs))
# for class_ in pipeline.classes_:
# c = le.inverse_transform(class_)
# weighted_mass += comp.simfunctions.comp2mass(c) * probs[:, class_]
# print('min = {}'.format(min(weighted_mass)))
# print('max = {}'.format(max(weighted_mass)))
# ax.hist(weighted_mass, bins=np.linspace(0, 5, 100),
# histtype='step', label=None, color='darkgray',
# alpha=1.0, log=False)
# for c in comp_list:
# ax.axvline(comp.simfunctions.comp2mass(c), color=color_dict[c],
# marker='None', linestyle='-')
# ax.set_ylabel('Counts')
# ax.set_xlabel('Weighted atomic number')
# ax.set_title('MC {}'.format(composition))
# ax.grid()
# plt.tight_layout()
# plt.show()
#
#
# # In[15]:
#
# pipeline = comp.get_pipeline('RF')
# pipeline.fit(X_train, y_train)
# test_predictions = pipeline.predict(X_test)
#
# comp_list = ['P', 'He', 'O', 'Fe']
# fig, ax = plt.subplots()
# test_probs = pipeline.predict_proba(X_test)
# fig, axarr = plt.subplots(2, 2, sharex=True, sharey=True)
# for composition, ax in zip(comp_list, axarr.flatten()):
# comp_mask = (le.inverse_transform(y_test) == composition)
# probs = np.copy(test_probs[comp_mask])
# weighted_mass = np.zeros(len(probs))
# for class_ in pipeline.classes_:
# c = le.inverse_transform(class_)
# ax.hist(probs[:, class_], bins=np.linspace(0, 1, 50),
# histtype='step', label=c, color=color_dict[c],
# alpha=1.0, log=True)
# ax.legend(title='Reco comp', framealpha=0.5)
# ax.set_ylabel('Counts')
# ax.set_xlabel('Testing set class probabilities')
# ax.set_title('MC {}'.format(composition))
# ax.grid()
# plt.tight_layout()
# plt.show()
#
#
# # In[25]:
#
# comp_list = ['light', 'heavy']
# test_probs = defaultdict(list)
# fig, ax = plt.subplots()
# # test_probs = pipeline.predict_proba(X_test)
# for event in pipeline.predict_proba(X_test_data):
# composition = le.inverse_transform(np.argmax(event))
# test_probs[composition].append(np.amax(event))
# for composition in comp_list:
# plt.hist(test_probs[composition], bins=np.linspace(0, 1, 100),
# histtype='step', label=composition,
# color=color_dict[composition], alpha=0.8, log=False)
# plt.ylabel('Counts')
# plt.xlabel('Testing set class probabilities')
# plt.legend(title='Reco comp')
# plt.grid()
# plt.show()
#
#
# # In[ ]:
#
#
#
#
# # In[ ]:
#
#
#
|
# client.py
import socket
import time
import os
import configparser
import DBC.config as cf
import sqlite3 as lite
class pushdb():
confclient = cf.config()
# confclient.readconfig()
print(confclient.gethost())
print(confclient.getuser())
host = confclient.gethost()
user = confclient.getuser()
user += "\n"
port = confclient.getport()
# print(user)
def __init__(self, s=None):
if s is None:
self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
else:
self.s = s
def connect(self):
# self.__init__()
self.s.connect((self.host, self.port))
def readconfclient(self):
self.confclient.readconfig()
self.host = self.confclient.gethost()
self.user = self.confclient.getuser()
self.user += "\n"
self.port = self.confclient.getport()
def send_action_data(self, action):
# self.user = self.confclient.getuser()
'''
(3, 'Cleaned')")
(4, 'Serviced')")
(5, 'Cared')")
'''
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((self.host, self.port))
s.send(self.user.encode('utf-8'))
except ConnectionRefusedError:
print("Server is down")
except ConnectionError:
print("connection error")
except BrokenPipeError:
print("No connection")
except Exception as e:
print(str(e))
# self.connect()
# user = config['GW']['userid']
# print(self.user)
connected = True
while connected:
try:
print(action)
backtion = str(action)+"\n"
s.send(backtion.encode('utf-8'))
if action == 3:
# send cleaned picture
f = open('/home/pi/project/Database/PICS/cleaned.png', 'rb')
print('sending cleaned picture')
l = f.read(1024)
while l:
s.send(l)
l = f.read(1024)
f.close()
print('Cleaned picture send')
elif action == 4:
# send serviced picture
f = open('/home/pi/project/Database/PICS/serviced.png', 'rb')
print('sending serviced picture')
l = f.read(1024)
while l:
s.send(l)
l = f.read(1024)
f.close()
print('Serviced picture send')
elif action == 5:
# send cared picture
f = open('/home/pi/project/Database/PICS/cared.png', 'rb')
print('sending cared picture')
l = f.read(1024)
while l:
s.send(l)
l = f.read(1024)
f.close()
print('Cared picture send')
elif action == 12:
# send sleep database
try:
f = open('/home/pi/project/Database/sleep.db', 'rb')
print('sending sleep db')
l = f.read(1024)
while l:
s.send(l)
l = f.read(1024)
finally:
f.close()
os.remove('/home/pi/project/Database/sleep.db')
con = lite.connect('/home/pi/project/Database/sleep.db', check_same_thread=False)
cur = con.cursor()
cur.execute("CREATE TABLE IF NOT EXISTS Sleep(Id INTEGER PRIMARY KEY, Time DATETIME, X NUMERIC, Y NUMERIC, Z NUMERIC)")
con.commit
con.close()
# data = s.recv(1024).decode()
# print(data)
print('Sleep db send')
else:
f = open('/home/pi/project/Database/ActionData.db', 'rb')
print('sending action data')
l = f.read(1024)
while l:
s.send(l)
l = f.read(1024)
f.close()
print('Action data sent')
connected = False
except BrokenPipeError:
print("No connection")
connected = False
except Exception as e:
print(str(e))
connected = False
finally:
print("closing pushdb")
s.close()
|
<reponame>DAtek/symbol-detector<gh_stars>0
from asyncio import sleep, create_task, run, Task
from queue import Queue
from threading import Thread
from typing import Optional
import cv2.cv2 as cv2
import numpy as np
from symbol_detector.core import FilterProperty, filter_image, get_center, copy_drawing, draw_lines, \
resize_to_standard, ceil_blur, compare_cos
from symbol_detector.settings import config
class BaseWorker:
def __init__(self):
self._running = False
self._finished = True
self._task: Optional[Task] = None
def loop_start(self, *args, **kwargs):
if self._running or not self._finished:
return
self._running = True
self._finished = False
coroutine = self.run(*args, **kwargs)
try:
self._task = create_task(coroutine)
except RuntimeError:
Thread(target=run, kwargs={'main': coroutine}).start()
def loop_stop(self):
self._running = False
def run_end(self):
self._finished = True
@property
def running(self):
return self._running
async def run(self, *args, **kwargs):
raise NotImplementedError
class FrameProcessor(BaseWorker):
_cam = None
shape = None
""" Recording the pointer's x,y coordinates, forwarding the movement's path """
def __init__(self, callback_detect, out_queue: Queue = None):
super().__init__()
self._callback_detect = callback_detect
self._out_queue = out_queue
self._filter_property = FilterProperty(
y_min=config.y_min,
y_max=config.y_max,
cb_min=config.cb_min,
cb_max=config.cb_max,
cr_min=config.cr_min,
cr_max=config.cr_max,
blur=config.blur,
)
self._NBREAK = 10
self._points = list()
self._n_break = 0
self._sleep_time = 0.025
self._i = 0
@staticmethod
def init_camera():
FrameProcessor._cam = cv2.VideoCapture(config.camera_driver)
FrameProcessor._cam.set(cv2.CAP_PROP_AUTO_EXPOSURE, 1)
FrameProcessor._cam.set(cv2.CAP_PROP_EXPOSURE, config.camera_exposure)
FrameProcessor._cam.set(cv2.CAP_PROP_FRAME_WIDTH, config.camera_width)
FrameProcessor._cam.set(cv2.CAP_PROP_FRAME_HEIGHT, config.camera_height)
ret, frame = FrameProcessor._cam.read()
FrameProcessor.shape = frame.shape
@staticmethod
def release_camera():
if FrameProcessor._cam.isOpened():
FrameProcessor._cam.release()
async def run(self):
self._filter_property.load_from_settings(config)
self.init_camera()
while self.running and self._cam.isOpened():
point = await self.capture_point()
if not point[0] and self._sleep_time == 0.025:
self._i += 1
if self._i == 200:
self._sleep_time = 0.2
elif point[0]:
self._i = 0
self._sleep_time = 0.025
await self.analyze_point(point)
await sleep(self._sleep_time)
self.release_camera()
self.run_end()
async def capture_point(self):
is_point = False
point = 0
ret, frame = self._cam.read()
thresh = await filter_image(frame, self._filter_property)
mask = thresh > 0
if self._out_queue:
out = cv2.flip(frame, 1)
out[:, :, 0] *= mask
out[:, :, 1] *= mask
out[:, :, 2] *= mask
out = cv2.cvtColor(out, cv2.COLOR_BGR2RGBA)
self._out_queue.put(out)
cnts, hier = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
for i in range(len(cnts)):
area = cv2.contourArea(cnts[i])
if (area > config.area_min) & (area < config.area_max):
point = await get_center(cnts[i])
is_point = True
return is_point, point
async def analyze_point(self, point):
if not point[0] and len(self._points) > 3:
self._n_break += 1
if self._n_break == self._NBREAK:
_ = create_task(self._callback_detect(self._points))
self._points = list()
self._n_break = 0
elif point[0]:
self._points.append(point[1])
class ShapeDetector:
"""Recognition of shapes"""
def __init__(
self,
symbols,
image_size,
im_shape,
ref_queue: Queue = None,
result_queue: Queue = None,
callback=None,
):
super().__init__()
self._im_shape = im_shape
self._callback = callback
self._ref_queue = ref_queue
self._result_queue = result_queue
self.symbols = symbols
self.image_size = image_size
self.max_diff = (float(image_size) ** 2.0) * 255.0
self.current_image = None
self._running = False
def save_actual(self, filename):
im = copy_drawing(self.current_image)
cv2.imwrite(filename, im)
async def process(self, points):
if len(points) < 3:
return
gray_image = np.zeros([self._im_shape[0], self._im_shape[1]], np.uint8)
await draw_lines(gray_image, points, 1)
self.current_image = gray_image.copy()
gray_image = resize_to_standard(gray_image, self.image_size)
gray_image = ceil_blur(gray_image, 15, 3)
typ, ref, diff = await compare_cos(gray_image, self.symbols)
recognized = diff < config.max_rel_error
if self._result_queue and recognized:
im = np.zeros([ref.shape[0], ref.shape[1], 3], np.uint8)
im[:, :, 1] = ref
self._result_queue.put(im)
elif self._result_queue and not recognized:
im = np.zeros([ref.shape[0], ref.shape[1], 3], np.uint8)
im[:, :, 0] = ref
self._result_queue.put(im)
if self._ref_queue:
current_im = cv2.cvtColor(gray_image, cv2.COLOR_GRAY2RGBA)
self._ref_queue.put(current_im)
if recognized:
print(typ)
if self._callback:
_ = create_task(self._callback(typ))
|
"""
Copyright (C) 2010-2022 Alibaba Group Holding Limited.
"""
import os
import numpy as np
import mmcv
import matplotlib.pyplot as plt
from mmdet.core.visualization import imshow_det_bboxes
from mmdet.models import DETECTORS
from mmdet.models import SingleStageDetector
from ...core import draw_box_3d_pred, show_bev, deformable_point_vis
from ...utils.timer import default_timers
default_timers.add_timer('backbone time')
default_timers.add_timer('full model time')
@DETECTORS.register_module()
class EProPnPDet(SingleStageDetector):
def __init__(self, *args, cls_orientation=None, **kwargs):
super(EProPnPDet, self).__init__(*args, **kwargs)
self.CLS_ORIENTATION = cls_orientation
def forward_train(self,
img,
img_metas,
gt_bboxes,
depth=None,
**kwargs):
super(SingleStageDetector, self).forward_train(img, img_metas)
x = self.extract_feat(img)
losses = self.bbox_head.forward_train(x, img_metas, gt_bboxes, **kwargs)
return losses
def simple_test(self, img, img_metas, rescale=False, **kwargs):
with default_timers['backbone time']:
x = self.extract_feat(img)
return self.bbox_head.simple_test(x, img_metas, **kwargs)
def aug_test(self, imgs, img_metas, rescale=False, **kwargs):
with default_timers['backbone time']:
x = self.extract_feats(imgs)
return self.bbox_head.aug_test(x, img_metas, **kwargs)
def forward_test(self, imgs, img_metas, **kwargs):
"""
Args:
imgs (List[Tensor]): the outer list indicates test-time
augmentations and inner Tensor should have a shape NxCxHxW,
which contains all images in the batch.
img_metas (List[List[dict]]): the outer list indicates test-time
augs (multiscale, flip, etc.) and the inner list indicates
images in a batch.
"""
with default_timers['full model time']:
for var, name in [(imgs, 'imgs'), (img_metas, 'img_metas')]:
if not isinstance(var, list):
raise TypeError(f'{name} must be a list, but got {type(var)}')
num_augs = len(imgs)
if num_augs != len(img_metas):
raise ValueError(f'num of augmentations ({len(imgs)}) '
f'!= num of image meta ({len(img_metas)})')
# NOTE the batched image size information may be useful, e.g.
# in DETR, this is needed for the construction of masks, which is
# then used for the transformer_head.
for img, img_meta in zip(imgs, img_metas):
batch_size = len(img_meta)
for img_id in range(batch_size):
img_meta[img_id]['batch_input_shape'] = tuple(img.size()[-2:])
if num_augs == 1:
return self.simple_test(imgs[0], img_metas[0], **kwargs)
else:
return self.aug_test(imgs, img_metas, **kwargs)
def show_result(self,
img,
result,
ori_img=None,
cam_intrinsic=None,
gt_bboxes_3d=None,
gt_labels=None,
score_thr=0.3,
cov_scale=5.0,
bev_scale=25,
thickness=2,
win_name='',
show=False,
wait_time=0,
out_file=None,
views=['2d', '3d', 'bev']):
img = mmcv.imread(img)
ori_img = mmcv.imread(ori_img)
img_show = []
if '3d' in views:
img_pred_3d = ori_img.copy()
draw_box_3d_pred(
img_pred_3d,
result['bbox_3d_results'],
cam_intrinsic,
score_thr=score_thr)
img_show.append(img_pred_3d)
if 'bev' in views:
orientation = None
gt_orientation = None
if self.CLS_ORIENTATION is not None:
orientation = []
for i, bbox_3d_result in enumerate(result['bbox_3d_results']):
orientation.append(
np.array([self.CLS_ORIENTATION[i]] * len(bbox_3d_result)))
if gt_labels is not None:
gt_orientation = [self.CLS_ORIENTATION[label] for label in gt_labels]
viz_bev = show_bev(
ori_img, result['bbox_results'], result['bbox_3d_results'],
cam_intrinsic, width=ori_img.shape[1], height=img.shape[0], scale=bev_scale,
pose_samples=result.get('pose_samples', None),
pose_sample_weights=result.get('pose_sample_weights', None),
orientation=orientation,
gt_bboxes_3d=gt_bboxes_3d,
gt_orientation=gt_orientation,
score_thr=score_thr, thickness=2)
img_show.append(viz_bev)
if show:
if len(img_show) == 1:
img_show = img_show[0]
elif len(img_show) == 2:
img_show = np.concatenate(img_show, axis=0)
else:
raise ValueError('no view to show')
mmcv.imshow(img_show, win_name, wait_time)
if out_file is not None:
if '3d' in views:
mmcv.imwrite(img_pred_3d, out_file[:-4] + '_3d.jpg')
if 'bev' in views:
mmcv.imwrite(viz_bev, out_file[:-4] + '_bev.png')
if '2d' in views:
assert 'bbox_results' in result
multi_cls_results = np.concatenate(result['bbox_results'], axis=0)
labels = []
for i, bbox_single in enumerate(result['bbox_results']):
labels += [i] * bbox_single.shape[0]
labels = np.array(labels)
imshow_det_bboxes(
ori_img,
multi_cls_results,
labels,
class_names=self.CLASSES,
score_thr=score_thr,
thickness=thickness,
show=False,
out_file=out_file[:-4] + '_2d.jpg')
if 'score' in views:
assert 'score' in result
score = result['score'][:, :img.shape[0], :img.shape[1]].sum(axis=0)
score = (score * 256).clip(min=0, max=255).astype(np.uint8)
score = score[..., None] * 0.8 + img * 0.2
mmcv.imwrite(score, out_file[:-4] + '_score.jpg')
if 'pts' in views:
assert 'x2d' in result and 'w2d' in result
num_head = self.bbox_head.num_heads
pts_obj, pts_head, pts_xy = deformable_point_vis(
ori_img, result, score_thr, num_head)
mmcv.imwrite(pts_obj, out_file[:-4] + '_pts_obj.jpg')
mmcv.imwrite(pts_head, out_file[:-4] + '_pts_head.jpg')
mmcv.imwrite(pts_xy, out_file[:-4] + '_pts_xy.jpg')
if 'orient' in views:
assert 'orient_logprob' in result and 'bbox_results' in result
dirname = out_file[:-4] + '/'
os.makedirs(dirname, exist_ok=True)
for cls_id in range(len(self.CLASSES)):
for i, (bbox_single, rot_logprob_single) in enumerate(zip(
result['bbox_results'][cls_id], result['orient_logprob'][cls_id])):
if bbox_single[4] < score_thr:
continue
filename = '{}_{:02d}'.format(self.CLASSES[cls_id], i)
x1, y1, x2, y2 = bbox_single[:4].round().astype(np.int64)
img_patch = ori_img[y1:y2, x1:x2]
mmcv.imwrite(img_patch, dirname + filename + '.jpg')
rot_bins = len(rot_logprob_single)
radian_div_pi = np.linspace(0, 2 * (rot_bins - 1) / rot_bins, num=rot_bins)
plt.figure(figsize=(4, 2))
plt.plot(radian_div_pi, np.exp(rot_logprob_single))
plt.xlim([0, 2])
plt.gca().set_ylim(bottom=0)
plt.xticks([0, 0.5, 1, 1.5, 2],
['0', '$\pi$/2', '$\pi$', '3$\pi$/2', '2$\pi$'])
plt.gca().xaxis.grid(True)
plt.xlabel('Yaw')
plt.ylabel('Density')
plt.tight_layout()
plt.savefig(dirname + filename + '.png')
plt.savefig(dirname + filename + '.eps')
plt.close()
if not (show or out_file):
return img_show
|
<filename>test/testp4svn_actions.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''Test cases of perforce to svn replication
'''
import os
import unittest
import tempfile
from testcommon import get_p4d_from_docker
from testcommon_p4svn import replicate_P4SvnReplicate, verify_replication
from lib.buildlogger import getLogger
from lib.buildcommon import generate_random_str
import testp4svn_samples
logger = getLogger(__name__)
class P4SvnActionRepTest(testp4svn_samples.P4SvnReplicationTest):
def p4svn_action_remove_setup_env(self, depot_dir, action, **kwargs):
levels_of_dir = kwargs.get('levels_of_dir', 0)
place_holder_file = kwargs.get('place_holder_file', -1)
src_docker_cli = self.docker_p4d_clients['p4d_0']
with get_p4d_from_docker(src_docker_cli, depot_dir) as p4:
clientspec = p4.fetch_client(p4.client)
ws_root = clientspec._root
project_dir = 'a_dir'
project_dir = tempfile.mkdtemp(prefix=project_dir, dir=ws_root)
# add a file in project dir
proj_file_path = os.path.join(project_dir, 'project_file')
description = 'add a file with file name %s' % proj_file_path
with open(proj_file_path, 'wt') as f:
f.write('My name is %s!\n' % proj_file_path)
p4.run_add('-f', proj_file_path)
p4.run_submit('-d', description)
# make a directory and add files in it
file_names = ['a_file.txt', 'another_file.txt',
'yet_another_file.txt']
test_dir = tempfile.mkdtemp(dir=project_dir)
for i in range(levels_of_dir):
test_dir = tempfile.mkdtemp(dir=test_dir, prefix='%s_' % i)
if place_holder_file == i:
file_path = os.path.join(test_dir, 'place_holder')
with open(file_path, 'wt') as f:
f.write('prevent deletion of dir!\n')
p4.run_add('-f', file_path)
p4.run_submit('-d', 'add a file to prevent deletion of dir')
# add the files
for fn in file_names:
file_path = os.path.join(test_dir, fn)
description = 'add a file with file name %s' % fn
with open(file_path, 'wt') as f:
f.write('My name is %s!\n' % fn)
p4.run_add('-f', file_path)
p4.run_submit('-d', description)
if action == 'remove_one_by_one':
# remove all files one by one
for fn in file_names:
file_path = os.path.join(test_dir, fn)
description = 'remove %s' % fn
p4.run_delete(file_path)
p4.run_submit('-d', description)
elif action == 'remove_all_in_one_change':
# remove all files all together
description = ''
for fn in file_names:
file_path = os.path.join(test_dir, fn)
description += 'remove %s\n' % fn
p4.run_delete(file_path)
p4.run_submit('-d', description)
elif action in ['remove_all_add_one',
'remove_all_add_one_in_parent']:
# 1) remove_all_add_one
# remove all files all together but add a new file in
# the same directory, no directory should be deleted
# 2) remove_all_add_on_in_parent
# remove all files all together and add a new file in
# the parent directory, current directory should be deleted
description = ''
for fn in file_names:
file_path = os.path.join(test_dir, fn)
description += 'remove %s\n' % fn
p4.run_delete(file_path)
file_path = os.path.join(test_dir, 'fantastic_additional')
if action == 'remove_all_add_one_in_parent':
test_dir_parent = os.path.split(test_dir)[0]
file_path = os.path.join(test_dir_parent, 'fantastic_additional')
description = 'add a file with file name %s' % fn
with open(file_path, 'wt') as f:
f.write('My name is %s!\n' % fn)
p4.run_add('-f', file_path)
p4.run_submit('-d', description)
else:
logger.error('"%s" not yet implemented' % action)
p4.run_edit(proj_file_path)
with open(proj_file_path, 'a') as f:
f.write('My name is %s!\n' % proj_file_path)
p4.run_submit('-d', 'editing %s' % proj_file_path)
def test_p4svn_action_remove_empty_dir_one_by_one(self):
'''test that directory should be removed automatically if all files in
it are removed one by one.
'''
test_case = 'p4svn_action_remove_empty_dir_one_by_one'
depot_dir = '/depot/%s' % test_case
src_docker_cli = self.docker_p4d_clients['p4d_0']
dst_docker_cli = self.docker_svn_clients['svn_0']
self.p4svn_action_remove_setup_env(depot_dir, action='remove_one_by_one')
self.replicate_sample_dir_withdocker(depot_dir)
logger.passed(test_case)
def test_p4svn_action_remove_empty_dir_all_files_in_one_change(self):
'''test that directory should be removed automatically if all files in
it are removed in one change.
'''
test_case = 'p4svn_action_remove_empty_dir_all_files_in_one_change'
depot_dir = '/depot/%s' % test_case
src_docker_cli = self.docker_p4d_clients['p4d_0']
dst_docker_cli = self.docker_svn_clients['svn_0']
self.p4svn_action_remove_setup_env(depot_dir,
action='remove_all_in_one_change')
self.replicate_sample_dir_withdocker(depot_dir)
logger.passed(test_case)
def test_p4svn_action_remove_empty_dir_all_files_in_one_change_multi_levels(self):
'''test that directories should be removed recursivly if files in
them are removed.
'''
test_case = 'p4svn_action_remove_empty_dir_all_files_in_one_change_multi_levels'
depot_dir = '/depot/%s' % test_case
src_docker_cli = self.docker_p4d_clients['p4d_0']
dst_docker_cli = self.docker_svn_clients['svn_0']
self.p4svn_action_remove_setup_env(depot_dir,
action='remove_all_in_one_change',
levels_of_dir=2)
self.replicate_sample_dir_withdocker(depot_dir)
logger.passed(test_case)
def test_p4svn_action_remove_empty_dir_one_by_one_multi_levels(self):
'''test that directories should be removed recursivly if files in
them are removed.
'''
test_case = 'p4svn_action_remove_empty_dir_one_by_one_multi_levels'
depot_dir = '/depot/%s' % test_case
src_docker_cli = self.docker_p4d_clients['p4d_0']
dst_docker_cli = self.docker_svn_clients['svn_0']
self.p4svn_action_remove_setup_env(depot_dir,
action='remove_one_by_one',
levels_of_dir=2)
self.replicate_sample_dir_withdocker(depot_dir)
logger.passed(test_case)
def test_p4svn_action_remove_empty_dir_one_by_one_multi_levels_place_holder(self):
'''test that directory should not be removed automatically if some
file in it is still there.
'''
test_case = 'p4svn_action_remove_empty_dir_one_by_one_multi_levels_place_holder'
depot_dir = '/depot/%s' % test_case
src_docker_cli = self.docker_p4d_clients['p4d_0']
dst_docker_cli = self.docker_svn_clients['svn_0']
self.p4svn_action_remove_setup_env(depot_dir,
action='remove_one_by_one',
levels_of_dir=4,
place_holder_file=1)
self.replicate_sample_dir_withdocker(depot_dir)
logger.passed(test_case)
def test_p4svn_action_remove_empty_dir_remove_all_add_one(self):
'''test that directory should not be removed automatically if new
file is added to it.
'''
test_case = 'p4svn_action_remove_empty_dir_remove_all_add_one'
depot_dir = '/depot/%s' % test_case
src_docker_cli = self.docker_p4d_clients['p4d_0']
dst_docker_cli = self.docker_svn_clients['svn_0']
self.p4svn_action_remove_setup_env(depot_dir,
action='remove_all_add_one',
levels_of_dir=4)
self.replicate_sample_dir_withdocker(depot_dir)
logger.passed(test_case)
def test_p4svn_action_remove_empty_dir_remove_all_add_one_in_parent(self):
'''test that directory should not be removed automatically if new
file is added to it.
'''
test_case = 'p4svn_action_remove_empty_dir_remove_all_add_one_in_parent'
depot_dir = '/depot/%s' % test_case
src_docker_cli = self.docker_p4d_clients['p4d_0']
dst_docker_cli = self.docker_svn_clients['svn_0']
self.p4svn_action_remove_setup_env(depot_dir,
action='remove_all_add_one_in_parent',
levels_of_dir=4)
self.replicate_sample_dir_withdocker(depot_dir)
logger.passed(test_case)
def test_p4_action_rep_special_commitmsgs(self):
commitmsgs = {'utf-8':u'I think, therefore I am.',
'cp1251':u'мыслю, следовательно существую.',
'gb2312':u'我思故我在.',
'latin1':u'La Santé',}
pass
if __name__ == '__main__':
unittest.main()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2009, <NAME>'. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Tests for testing utils (psutil.tests namespace).
"""
import collections
import contextlib
import errno
import os
import socket
import stat
import subprocess
from psutil import FREEBSD
from psutil import NETBSD
from psutil import POSIX
from psutil._common import open_binary
from psutil._common import open_text
from psutil._common import supports_ipv6
from psutil.tests import bind_socket
from psutil.tests import bind_unix_socket
from psutil.tests import call_until
from psutil.tests import chdir
from psutil.tests import CI_TESTING
from psutil.tests import create_sockets
from psutil.tests import get_free_port
from psutil.tests import HAS_CONNECTIONS_UNIX
from psutil.tests import is_namedtuple
from psutil.tests import mock
from psutil.tests import process_namespace
from psutil.tests import PsutilTestCase
from psutil.tests import PYTHON_EXE
from psutil.tests import reap_children
from psutil.tests import retry
from psutil.tests import retry_on_failure
from psutil.tests import safe_mkdir
from psutil.tests import safe_rmpath
from psutil.tests import serialrun
from psutil.tests import system_namespace
from psutil.tests import tcp_socketpair
from psutil.tests import terminate
from psutil.tests import TestMemoryLeak
from psutil.tests import unittest
from psutil.tests import unix_socketpair
from psutil.tests import wait_for_file
from psutil.tests import wait_for_pid
import psutil
import psutil.tests
# ===================================================================
# --- Unit tests for test utilities.
# ===================================================================
class TestRetryDecorator(PsutilTestCase):
@mock.patch('time.sleep')
def test_retry_success(self, sleep):
# Fail 3 times out of 5; make sure the decorated fun returns.
@retry(retries=5, interval=1, logfun=None)
def foo():
while queue:
queue.pop()
1 / 0
return 1
queue = list(range(3))
self.assertEqual(foo(), 1)
self.assertEqual(sleep.call_count, 3)
@mock.patch('time.sleep')
def test_retry_failure(self, sleep):
# Fail 6 times out of 5; th function is supposed to raise exc.
@retry(retries=5, interval=1, logfun=None)
def foo():
while queue:
queue.pop()
1 / 0
return 1
queue = list(range(6))
self.assertRaises(ZeroDivisionError, foo)
self.assertEqual(sleep.call_count, 5)
@mock.patch('time.sleep')
def test_exception_arg(self, sleep):
@retry(exception=ValueError, interval=1)
def foo():
raise TypeError
self.assertRaises(TypeError, foo)
self.assertEqual(sleep.call_count, 0)
@mock.patch('time.sleep')
def test_no_interval_arg(self, sleep):
# if interval is not specified sleep is not supposed to be called
@retry(retries=5, interval=None, logfun=None)
def foo():
1 / 0
self.assertRaises(ZeroDivisionError, foo)
self.assertEqual(sleep.call_count, 0)
@mock.patch('time.sleep')
def test_retries_arg(self, sleep):
@retry(retries=5, interval=1, logfun=None)
def foo():
1 / 0
self.assertRaises(ZeroDivisionError, foo)
self.assertEqual(sleep.call_count, 5)
@mock.patch('time.sleep')
def test_retries_and_timeout_args(self, sleep):
self.assertRaises(ValueError, retry, retries=5, timeout=1)
class TestSyncTestUtils(PsutilTestCase):
def test_wait_for_pid(self):
wait_for_pid(os.getpid())
nopid = max(psutil.pids()) + 99999
with mock.patch('psutil.tests.retry.__iter__', return_value=iter([0])):
self.assertRaises(psutil.NoSuchProcess, wait_for_pid, nopid)
def test_wait_for_file(self):
testfn = self.get_testfn()
with open(testfn, 'w') as f:
f.write('foo')
wait_for_file(testfn)
assert not os.path.exists(testfn)
def test_wait_for_file_empty(self):
testfn = self.get_testfn()
with open(testfn, 'w'):
pass
wait_for_file(testfn, empty=True)
assert not os.path.exists(testfn)
def test_wait_for_file_no_file(self):
testfn = self.get_testfn()
with mock.patch('psutil.tests.retry.__iter__', return_value=iter([0])):
self.assertRaises(IOError, wait_for_file, testfn)
def test_wait_for_file_no_delete(self):
testfn = self.get_testfn()
with open(testfn, 'w') as f:
f.write('foo')
wait_for_file(testfn, delete=False)
assert os.path.exists(testfn)
def test_call_until(self):
ret = call_until(lambda: 1, "ret == 1")
self.assertEqual(ret, 1)
class TestFSTestUtils(PsutilTestCase):
def test_open_text(self):
with open_text(__file__) as f:
self.assertEqual(f.mode, 'rt')
def test_open_binary(self):
with open_binary(__file__) as f:
self.assertEqual(f.mode, 'rb')
def test_safe_mkdir(self):
testfn = self.get_testfn()
safe_mkdir(testfn)
assert os.path.isdir(testfn)
safe_mkdir(testfn)
assert os.path.isdir(testfn)
def test_safe_rmpath(self):
# test file is removed
testfn = self.get_testfn()
open(testfn, 'w').close()
safe_rmpath(testfn)
assert not os.path.exists(testfn)
# test no exception if path does not exist
safe_rmpath(testfn)
# test dir is removed
os.mkdir(testfn)
safe_rmpath(testfn)
assert not os.path.exists(testfn)
# test other exceptions are raised
with mock.patch('psutil.tests.os.stat',
side_effect=OSError(errno.EINVAL, "")) as m:
with self.assertRaises(OSError):
safe_rmpath(testfn)
assert m.called
def test_chdir(self):
testfn = self.get_testfn()
base = os.getcwd()
os.mkdir(testfn)
with chdir(testfn):
self.assertEqual(os.getcwd(), os.path.join(base, testfn))
self.assertEqual(os.getcwd(), base)
class TestProcessUtils(PsutilTestCase):
def test_reap_children(self):
subp = self.spawn_testproc()
p = psutil.Process(subp.pid)
assert p.is_running()
reap_children()
assert not p.is_running()
assert not psutil.tests._pids_started
assert not psutil.tests._subprocesses_started
def test_spawn_children_pair(self):
child, grandchild = self.spawn_children_pair()
self.assertNotEqual(child.pid, grandchild.pid)
assert child.is_running()
assert grandchild.is_running()
children = psutil.Process().children()
self.assertEqual(children, [child])
children = psutil.Process().children(recursive=True)
self.assertEqual(len(children), 2)
self.assertIn(child, children)
self.assertIn(grandchild, children)
self.assertEqual(child.ppid(), os.getpid())
self.assertEqual(grandchild.ppid(), child.pid)
terminate(child)
assert not child.is_running()
assert grandchild.is_running()
terminate(grandchild)
assert not grandchild.is_running()
@unittest.skipIf(not POSIX, "POSIX only")
def test_spawn_zombie(self):
parent, zombie = self.spawn_zombie()
self.assertEqual(zombie.status(), psutil.STATUS_ZOMBIE)
def test_terminate(self):
# by subprocess.Popen
p = self.spawn_testproc()
terminate(p)
self.assertProcessGone(p)
terminate(p)
# by psutil.Process
p = psutil.Process(self.spawn_testproc().pid)
terminate(p)
self.assertProcessGone(p)
terminate(p)
# by psutil.Popen
cmd = [PYTHON_EXE, "-c", "import time; time.sleep(60);"]
p = psutil.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
terminate(p)
self.assertProcessGone(p)
terminate(p)
# by PID
pid = self.spawn_testproc().pid
terminate(pid)
self.assertProcessGone(p)
terminate(pid)
# zombie
if POSIX:
parent, zombie = self.spawn_zombie()
terminate(parent)
terminate(zombie)
self.assertProcessGone(parent)
self.assertProcessGone(zombie)
class TestNetUtils(PsutilTestCase):
def bind_socket(self):
port = get_free_port()
with contextlib.closing(bind_socket(addr=('', port))) as s:
self.assertEqual(s.getsockname()[1], port)
@unittest.skipIf(not POSIX, "POSIX only")
def test_bind_unix_socket(self):
name = self.get_testfn()
sock = bind_unix_socket(name)
with contextlib.closing(sock):
self.assertEqual(sock.family, socket.AF_UNIX)
self.assertEqual(sock.type, socket.SOCK_STREAM)
self.assertEqual(sock.getsockname(), name)
assert os.path.exists(name)
assert stat.S_ISSOCK(os.stat(name).st_mode)
# UDP
name = self.get_testfn()
sock = bind_unix_socket(name, type=socket.SOCK_DGRAM)
with contextlib.closing(sock):
self.assertEqual(sock.type, socket.SOCK_DGRAM)
def tcp_tcp_socketpair(self):
addr = ("127.0.0.1", get_free_port())
server, client = tcp_socketpair(socket.AF_INET, addr=addr)
with contextlib.closing(server):
with contextlib.closing(client):
# Ensure they are connected and the positions are
# correct.
self.assertEqual(server.getsockname(), addr)
self.assertEqual(client.getpeername(), addr)
self.assertNotEqual(client.getsockname(), addr)
@unittest.skipIf(not POSIX, "POSIX only")
@unittest.skipIf(NETBSD or FREEBSD,
"/var/run/log UNIX socket opened by default")
def test_unix_socketpair(self):
p = psutil.Process()
num_fds = p.num_fds()
assert not p.connections(kind='unix')
name = self.get_testfn()
server, client = unix_socketpair(name)
try:
assert os.path.exists(name)
assert stat.S_ISSOCK(os.stat(name).st_mode)
self.assertEqual(p.num_fds() - num_fds, 2)
self.assertEqual(len(p.connections(kind='unix')), 2)
self.assertEqual(server.getsockname(), name)
self.assertEqual(client.getpeername(), name)
finally:
client.close()
server.close()
def test_create_sockets(self):
with create_sockets() as socks:
fams = collections.defaultdict(int)
types = collections.defaultdict(int)
for s in socks:
fams[s.family] += 1
# work around http://bugs.python.org/issue30204
types[s.getsockopt(socket.SOL_SOCKET, socket.SO_TYPE)] += 1
self.assertGreaterEqual(fams[socket.AF_INET], 2)
if supports_ipv6():
self.assertGreaterEqual(fams[socket.AF_INET6], 2)
if POSIX and HAS_CONNECTIONS_UNIX:
self.assertGreaterEqual(fams[socket.AF_UNIX], 2)
self.assertGreaterEqual(types[socket.SOCK_STREAM], 2)
self.assertGreaterEqual(types[socket.SOCK_DGRAM], 2)
@serialrun
class TestMemLeakClass(TestMemoryLeak):
def test_times(self):
def fun():
cnt['cnt'] += 1
cnt = {'cnt': 0}
self.execute(fun, times=10, warmup_times=15)
self.assertEqual(cnt['cnt'], 26)
def test_param_err(self):
self.assertRaises(ValueError, self.execute, lambda: 0, times=0)
self.assertRaises(ValueError, self.execute, lambda: 0, times=-1)
self.assertRaises(ValueError, self.execute, lambda: 0, warmup_times=-1)
self.assertRaises(ValueError, self.execute, lambda: 0, tolerance=-1)
self.assertRaises(ValueError, self.execute, lambda: 0, retries=-1)
@retry_on_failure()
@unittest.skipIf(CI_TESTING, "skipped on CI")
def test_leak_mem(self):
ls = []
def fun(ls=ls):
ls.append("x" * 24 * 1024)
try:
# will consume around 3M in total
self.assertRaisesRegex(AssertionError, "extra-mem",
self.execute, fun, times=50)
finally:
del ls
def test_unclosed_files(self):
def fun():
f = open(__file__)
self.addCleanup(f.close)
box.append(f)
box = []
kind = "fd" if POSIX else "handle"
self.assertRaisesRegex(AssertionError, "unclosed " + kind,
self.execute, fun)
def test_tolerance(self):
def fun():
ls.append("x" * 24 * 1024)
ls = []
times = 100
self.execute(fun, times=times, warmup_times=0,
tolerance=200 * 1024 * 1024)
self.assertEqual(len(ls), times + 1)
def test_execute_w_exc(self):
def fun():
1 / 0
self.execute_w_exc(ZeroDivisionError, fun)
with self.assertRaises(ZeroDivisionError):
self.execute_w_exc(OSError, fun)
def fun():
pass
with self.assertRaises(AssertionError):
self.execute_w_exc(ZeroDivisionError, fun)
class TestTestingUtils(PsutilTestCase):
def test_process_namespace(self):
p = psutil.Process()
ns = process_namespace(p)
ns.test()
fun = [x for x in ns.iter(ns.getters) if x[1] == 'ppid'][0][0]
self.assertEqual(fun(), p.ppid())
def test_system_namespace(self):
ns = system_namespace()
fun = [x for x in ns.iter(ns.getters) if x[1] == 'net_if_addrs'][0][0]
self.assertEqual(fun(), psutil.net_if_addrs())
class TestOtherUtils(PsutilTestCase):
def test_is_namedtuple(self):
assert is_namedtuple(collections.namedtuple('foo', 'a b c')(1, 2, 3))
assert not is_namedtuple(tuple())
if __name__ == '__main__':
from psutil.tests.runner import run_from_name
run_from_name(__file__)
|
import pytest
from poetry.packages import Locker as BaseLocker
from poetry.utils._compat import Path
from poetry.utils.exporter import Exporter
class Locker(BaseLocker):
def __init__(self):
self._locked = True
self._content_hash = self._get_content_hash()
def locked(self, is_locked=True):
self._locked = is_locked
return self
def mock_lock_data(self, data):
self._lock_data = data
def is_locked(self):
return self._locked
def is_fresh(self):
return True
def _get_content_hash(self):
return "123456789"
@pytest.fixture()
def locker():
return Locker()
def test_exporter_can_export_requirements_txt_with_standard_packages(tmp_dir, locker):
locker.mock_lock_data(
{
"package": [
{
"name": "foo",
"version": "1.2.3",
"category": "main",
"optional": False,
"python-versions": "*",
},
{
"name": "bar",
"version": "4.5.6",
"category": "main",
"optional": False,
"python-versions": "*",
},
],
"metadata": {
"python-versions": "*",
"content-hash": "123456789",
"hashes": {"foo": [], "bar": []},
},
}
)
exporter = Exporter(locker)
exporter.export("requirements.txt", Path(tmp_dir))
with (Path(tmp_dir) / "requirements.txt").open(encoding="utf-8") as f:
content = f.read()
expected = """\
bar==4.5.6
foo==1.2.3
"""
assert expected == content
def test_exporter_can_export_requirements_txt_with_standard_packages_and_hashes(
tmp_dir, locker
):
locker.mock_lock_data(
{
"package": [
{
"name": "foo",
"version": "1.2.3",
"category": "main",
"optional": False,
"python-versions": "*",
},
{
"name": "bar",
"version": "4.5.6",
"category": "main",
"optional": False,
"python-versions": "*",
},
],
"metadata": {
"python-versions": "*",
"content-hash": "123456789",
"hashes": {"foo": ["12345"], "bar": ["67890"]},
},
}
)
exporter = Exporter(locker)
exporter.export("requirements.txt", Path(tmp_dir))
with (Path(tmp_dir) / "requirements.txt").open(encoding="utf-8") as f:
content = f.read()
expected = """\
bar==4.5.6 \\
--hash=sha256:67890
foo==1.2.3 \\
--hash=sha256:12345
"""
assert expected == content
def test_exporter_can_export_requirements_txt_with_standard_packages_and_hashes_disabled(
tmp_dir, locker
):
locker.mock_lock_data(
{
"package": [
{
"name": "foo",
"version": "1.2.3",
"category": "main",
"optional": False,
"python-versions": "*",
},
{
"name": "bar",
"version": "4.5.6",
"category": "main",
"optional": False,
"python-versions": "*",
},
],
"metadata": {
"python-versions": "*",
"content-hash": "123456789",
"hashes": {"foo": ["12345"], "bar": ["67890"]},
},
}
)
exporter = Exporter(locker)
exporter.export("requirements.txt", Path(tmp_dir), with_hashes=False)
with (Path(tmp_dir) / "requirements.txt").open(encoding="utf-8") as f:
content = f.read()
expected = """\
bar==4.5.6
foo==1.2.3
"""
assert expected == content
def test_exporter_exports_requirements_txt_without_dev_packages_by_default(
tmp_dir, locker
):
locker.mock_lock_data(
{
"package": [
{
"name": "foo",
"version": "1.2.3",
"category": "main",
"optional": False,
"python-versions": "*",
},
{
"name": "bar",
"version": "4.5.6",
"category": "dev",
"optional": False,
"python-versions": "*",
},
],
"metadata": {
"python-versions": "*",
"content-hash": "123456789",
"hashes": {"foo": ["12345"], "bar": ["67890"]},
},
}
)
exporter = Exporter(locker)
exporter.export("requirements.txt", Path(tmp_dir))
with (Path(tmp_dir) / "requirements.txt").open(encoding="utf-8") as f:
content = f.read()
expected = """\
foo==1.2.3 \\
--hash=sha256:12345
"""
assert expected == content
def test_exporter_exports_requirements_txt_with_dev_packages_if_opted_in(
tmp_dir, locker
):
locker.mock_lock_data(
{
"package": [
{
"name": "foo",
"version": "1.2.3",
"category": "main",
"optional": False,
"python-versions": "*",
},
{
"name": "bar",
"version": "4.5.6",
"category": "dev",
"optional": False,
"python-versions": "*",
},
],
"metadata": {
"python-versions": "*",
"content-hash": "123456789",
"hashes": {"foo": ["12345"], "bar": ["67890"]},
},
}
)
exporter = Exporter(locker)
exporter.export("requirements.txt", Path(tmp_dir), dev=True)
with (Path(tmp_dir) / "requirements.txt").open(encoding="utf-8") as f:
content = f.read()
expected = """\
bar==4.5.6 \\
--hash=sha256:67890
foo==1.2.3 \\
--hash=sha256:12345
"""
assert expected == content
def test_exporter_can_export_requirements_txt_with_git_packages(tmp_dir, locker):
locker.mock_lock_data(
{
"package": [
{
"name": "foo",
"version": "1.2.3",
"category": "main",
"optional": False,
"python-versions": "*",
"source": {
"type": "git",
"url": "https://github.com/foo/foo.git",
"reference": "123456",
},
}
],
"metadata": {
"python-versions": "*",
"content-hash": "123456789",
"hashes": {"foo": []},
},
}
)
exporter = Exporter(locker)
exporter.export("requirements.txt", Path(tmp_dir))
with (Path(tmp_dir) / "requirements.txt").open(encoding="utf-8") as f:
content = f.read()
expected = """\
-e git+https://github.com/foo/foo.git@123456#egg=foo
"""
assert expected == content
def test_exporter_can_export_requirements_txt_with_directory_packages(tmp_dir, locker):
locker.mock_lock_data(
{
"package": [
{
"name": "foo",
"version": "1.2.3",
"category": "main",
"optional": False,
"python-versions": "*",
"source": {"type": "directory", "url": "../foo", "reference": ""},
}
],
"metadata": {
"python-versions": "*",
"content-hash": "123456789",
"hashes": {"foo": []},
},
}
)
exporter = Exporter(locker)
exporter.export("requirements.txt", Path(tmp_dir))
with (Path(tmp_dir) / "requirements.txt").open(encoding="utf-8") as f:
content = f.read()
expected = """\
-e ../foo
"""
assert expected == content
def test_exporter_can_export_requirements_txt_with_file_packages(tmp_dir, locker):
locker.mock_lock_data(
{
"package": [
{
"name": "foo",
"version": "1.2.3",
"category": "main",
"optional": False,
"python-versions": "*",
"source": {"type": "file", "url": "../foo.tar.gz", "reference": ""},
}
],
"metadata": {
"python-versions": "*",
"content-hash": "123456789",
"hashes": {"foo": []},
},
}
)
exporter = Exporter(locker)
exporter.export("requirements.txt", Path(tmp_dir))
with (Path(tmp_dir) / "requirements.txt").open(encoding="utf-8") as f:
content = f.read()
expected = """\
-e ../foo.tar.gz
"""
assert expected == content
def test_exporter_exports_requirements_txt_with_legacy_packages(tmp_dir, locker):
locker.mock_lock_data(
{
"package": [
{
"name": "foo",
"version": "1.2.3",
"category": "main",
"optional": False,
"python-versions": "*",
},
{
"name": "bar",
"version": "4.5.6",
"category": "dev",
"optional": False,
"python-versions": "*",
"source": {
"type": "legacy",
"url": "https://example.com/simple/",
"reference": "",
},
},
],
"metadata": {
"python-versions": "*",
"content-hash": "123456789",
"hashes": {"foo": ["12345"], "bar": ["67890"]},
},
}
)
exporter = Exporter(locker)
exporter.export("requirements.txt", Path(tmp_dir), dev=True)
with (Path(tmp_dir) / "requirements.txt").open(encoding="utf-8") as f:
content = f.read()
expected = """\
bar==4.5.6 \\
--index-url https://example.com/simple/ \\
--hash=sha256:67890
foo==1.2.3 \\
--hash=sha256:12345
"""
assert expected == content
|
<reponame>zju3dv/NIID-Net
# ////////////////////////////////////////////////////////////////////////////
# // This file is part of NIID-Net. For more information
# // see <https://github.com/zju3dv/NIID-Net>.
# // If you use this code, please cite the corresponding publications as
# // listed on the above website.
# //
# // Copyright (c) ZJU-SenseTime Joint Lab of 3D Vision. All Rights Reserved.
# //
# // Permission to use, copy, modify and distribute this software and its
# // documentation for educational, research and non-profit purposes only.
# //
# // The above copyright notice and this permission notice shall be included in all
# // copies or substantial portions of the Software.
# //
# // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# // SOFTWARE.
# ////////////////////////////////////////////////////////////////////////////
import os
import shutil
from itertools import chain
import torch
from torch.autograd import Variable
import torch.nn.functional as F
import numpy as np
from config import CriteriaTypes
from models.niid_net import NIIDNet
from loss import criteria_intrinsic
class TrainState(object):
isTraining = None # current training state
optim_NEM = None
optim_IID = None
optimizer = None
scheduler = None
sd_type = None
class NIIDNetManager(object):
net_name = None
model = None
train_state = TrainState()
# cnt = 0
gpu_devices = None
data_gpu = None
def __init__(self, opt):
# Network Name
self.net_name = 'NIID-Net'
# Define Model
self.model = NIIDNet()
# Criterion(metrics)
self.IID_criterion = criteria_intrinsic.CGIntrinsics_Criterion()
# GPU
self.gpu_devices = opt.gpu_devices
if self.gpu_devices is None:
if opt.isTrain:
raise Exception('Training code does not have CPU version.')
self.data_gpu = None
print('\nCPU version')
else:
print('\nGPU_devices: %s' % self.gpu_devices)
self.data_gpu = self.gpu_devices[0]
self.model = torch.nn.DataParallel(self.model.cuda(self.data_gpu), device_ids=self.gpu_devices)
self.IID_criterion.cuda(self.data_gpu)
# Load pre-trained model and set optimizer
self.reset_train_mode(opt)
def reset_train_mode(self, opt):
""" Load pre-trained model parameters and set optimizer and scheduler
"""
self.train_state = TrainState()
# Load Pretrained
if opt.pretrained_file is not None:
self._load_models(opt.pretrained_file,
load_NEM=opt.load_pretrained_NEM,
load_IID_Net=opt.load_pretrained_IID_Net)
# Set optimizer and scheduler
if opt.isTrain:
self._set_optimizers_schedulers(opt)
self.switch_to_train()
else:
self.switch_to_eval()
def _get_framework_components(self):
if self.gpu_devices is not None:
return self.model.module.NEM_coarse_model, self.model.module.NEM_refine_model, self.model.module.IID_model
else:
return self.model.NEM_coarse_model, self.model.NEM_refine_model, self.model.IID_model
def _load_models(self, file_path, load_NEM, load_IID_Net):
print('\nLoading models from: %s' % file_path)
if os.path.isfile(file_path):
checkpoint = torch.load(file_path, map_location=lambda storage, loc: storage) # load in CPU memory
else:
print("=> no checkpoint found at '{}'. Loading failed!".format(file_path))
return
NEM_coarse_model, NEM_refine_model, IID_Net = self._get_framework_components()
# Load NEM
if load_NEM:
# NEM coarse
d = checkpoint.get('NEM_coarse_state_dict')
if d is not None:
# delete_keys = ['stats_mean', 'stats_std']
# for k in delete_keys:
# d.pop(k, None)
# missing = set(self.GM_coarse_model.state_dict().keys()) - set(d.keys()) - set(delete_keys)
# if len(missing) > 0:
# raise KeyError('missing keys in state_dict: "{}"'.format(missing))
NEM_coarse_model.load_state_dict(d)
print(' load NEM_coarse_model')
else:
print(' => load NEM_coarse_model failed, no state_dict in checkpoint file!')
# NEM refine
d = checkpoint.get('NEM_refine_state_dict')
if d is not None:
NEM_refine_model.load_state_dict(d)
print(' load NEM_refine_model')
else:
print(' => load NEM_refine_model failed, no state_dict in checkpoint file!')
# Load IID-Net
if load_IID_Net:
d = checkpoint.get('IID_state_dict')
if d is not None:
IID_Net.load_state_dict(d)
print(' load IID-Net_model')
else:
print(' => load IID-Net_model failed, no state_dict in checkpoint file!')
def name(self):
return self.net_name
def switch_to_train(self, flag=True): # return old training state
if not flag:
return self.switch_to_eval()
old_isTrain = self.train_state.isTraining
if (old_isTrain is None) or (not old_isTrain):
NEM_coarse_model, NEM_refine_model, IID_Net = self._get_framework_components()
NEM_coarse_model.train(self.train_state.optim_NEM)
NEM_refine_model.train(self.train_state.optim_NEM)
if self.train_state.optim_IID is not None:
if self.train_state.optim_IID == 'full':
IID_Net.train(True)
elif self.train_state.optim_IID == 'wo_R':
IID_Net.encoder.train(True)
IID_Net.decoder_L.train(True)
IID_Net.decoder_R.train(False)
elif self.train_state.optim_IID == 'R':
IID_Net.encoder.train(False)
IID_Net.decoder_L.train(False)
IID_Net.decoder_R.train(True)
else:
raise Exception('Undefined optim_IID type: %s' % self.train_state.optim_IID)
else:
IID_Net.train(False)
self.train_state.isTraining = True
return {'flag': old_isTrain}
def switch_to_eval(self): # return old training state
old_isTrain = self.train_state.isTraining
if (old_isTrain is None) or (old_isTrain):
self.model.eval()
self.train_state.isTraining = False
return {'flag': old_isTrain}
def save(self, path, label, epoch=None, nyu_val=None, iiw_val=None, saw_val=None,
best_normal=False, best_IID=False):
NEM_coarse_model, NEM_refine_model, IID_Net = self._get_framework_components()
checkpoint = {
'epoch': epoch,
'NEM_coarse_state_dict': NEM_coarse_model.state_dict(),
'NEM_refine_state_dict': NEM_refine_model.state_dict(),
'IID_state_dict': IID_Net.state_dict(),
'nyu_val': nyu_val,
'iiw_val': iiw_val,
'saw_val': saw_val,
# 'optimizer': self.optimizer.state_dict() if self.optimizer is not None else None,
}
if not os.path.exists(path):
os.makedirs(path)
filepath = os.path.join(path, label+'_.pth.tar')
torch.save(checkpoint, filepath)
print('Save checkpoint file: %s' % filepath)
if best_normal:
shutil.copyfile(filepath, os.path.join(path, 'best_normal_model.pth.tar'))
if best_IID:
shutil.copyfile(filepath, os.path.join(path, 'best_IID_model.pth.tar'))
return filepath
def _define_lr_scheduler(self, optimizer, opt):
if opt.scheduler_type == 'plateau':
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau \
(optimizer, mode=opt.scheduler_mode, factor=opt.lr_decay, patience=opt.sd_patience, min_lr=opt.min_lr, verbose=True)
print(' ReduceLROnPlateau: lr_decay:%.6f, sd_patience:%.6f, min_lr:%.6f' %
(opt.lr_decay, opt.sd_patience, opt.min_lr))
elif opt.scheduler_type == 'exp':
scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer, gamma=opt.lr_gamma)
print(' ExponentialLR: lr_gamma:%.6f' % (opt.lr_gamma,))
elif opt.scheduler_type == 'cosine':
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=opt.lr_t_max, eta_min=opt.min_lr)
print(' CosineAnnealingLR: lr_t_max:%.6f, min_lr:%.6f' % (opt.lr_t_max, opt.min_lr))
else:
raise Exception('Error: not support lr_scheduler type: %s' % opt.scheduler_type)
return scheduler, opt.scheduler_type
def _set_optimizers_schedulers(self, opt):
# Get parameters
print('\nGetting parameters to be optimized:')
NEM_coarse_model, NEM_refine_model, IID_Net = self._get_framework_components()
learning_rate = opt.lr
optim_params = []
if opt.optim_NEM:
optim_params.append({'params': chain(NEM_coarse_model.parameters(),
NEM_refine_model.parameters()),
'lr': learning_rate,
'name': 'NEM'})
print(' parameters from NEM, lr %f' % learning_rate)
for param in chain(NEM_coarse_model.parameters(), NEM_refine_model.parameters()):
param.requires_grad = opt.optim_NEM
for param in IID_Net.parameters():
param.requires_grad = opt.optim_IID is not None
if opt.optim_IID is not None:
if opt.optim_IID == 'full':
optim_params.append({'params': IID_Net.parameters(),
'lr': learning_rate,
'name': 'IID-Net'})
print(' parameters from IID-Net, lr %f' % learning_rate)
elif opt.optim_IID == 'wo_R':
optim_params.append({'params': chain(IID_Net.encoder.parameters(),
IID_Net.decoder_L.parameters()),
'lr': learning_rate,
'name': 'IID-Net_S_branch'})
for param in IID_Net.decoder_R.parameters():
param.requires_grad = False
print(' parameters from IID-Net_S_branch, lr %f' % learning_rate)
elif opt.optim_IID == 'R':
optim_params.append({'params': IID_Net.decoder_R.parameters(),
'lr': learning_rate,
'name': 'IID-Net_R_decoder'})
for param in IID_Net.encoder.parameters():
param.requires_grad = False
for param in IID_Net.decoder_L.parameters():
param.requires_grad = False
print(' parameters from IID-Net_R_decoder, lr %f' % learning_rate)
else:
raise Exception('Undefined optim_IID type: %s' % opt.optim_IID)
# Optimizer and scheduler
option = opt
if option.use_SGD:
optimizer = torch.optim.SGD(optim_params, lr=option.lr, momentum=0.9, weight_decay=option.weight_decay)
else:
optimizer = torch.optim.Adam(optim_params, lr=option.lr, betas=(0.9, 0.999),
weight_decay=option.weight_decay)
# print(' lr:%.6f, weight_decay:%.6f' % (option.lr, option.weight_decay))
scheduler, sd_type = self._define_lr_scheduler(optimizer, option)
self.train_state.optim_NEM = opt.optim_NEM
self.train_state.optim_IID = opt.optim_IID
self.train_state.optimizer = optimizer
self.train_state.scheduler = scheduler
self.train_state.sd_type = sd_type
def _forward(self, input_srgb, pred_normal, pred_reflect, pred_shading):
out_N, out_R, out_L, out_S, rendered_img = self.model(input_srgb, pred_normal, pred_reflect, pred_shading)
return out_N, out_R, out_L, out_S, rendered_img
def optimize(self, inputs, targets, criteria_label, data_set_name):
# switch to train mode
self.switch_to_train()
# Input Data
input_srgb = Variable(inputs['input_srgb'].float().cuda(self.data_gpu), requires_grad=False)
# Forward
optimizer = self.train_state.optimizer
optimizer.zero_grad()
N, R, L, S, rendered_img = self._forward(input_srgb,
pred_normal=True,
pred_reflect=CriteriaTypes.train_reflectance(criteria_label),
pred_shading=CriteriaTypes.train_shading(criteria_label))
# torch.save({
# 'pred_N': N,
# 'pred_L': L,
# 'pred_S': S,
# 'rendered_img': rendered_img,
# 'targets': targets
# }, 'test_batch.pth.tar')
# if self.cnt % 10 == 0:
# Visualizer.vis.img_many({
# 'input_srgb': input_srgb.data.cpu()[0, :, :, :],
# 'rgb_img': targets['rgb_img'].float()[0, :, :, :],
# 'R_pred': torch.clamp(R.data.cpu()[0, :, :, :], 0, 1),
# 'rerendered_img': torch.clamp(rendered_img.data.cpu()[0, :, :, :], 0, 1),
# })
# self.cnt = 0
# self.cnt += 1
# Backward
if not CriteriaTypes.is_valid(criteria_label):
raise Exception('The criteria label [%s] is not supported' % criteria_label)
if CriteriaTypes.train_surface_normal(criteria_label):
pass
else:
targets_var = {k: Variable(targets[k].float().cuda(self.data_gpu), requires_grad=False)
for k in targets if torch.is_tensor(targets[k])}
loss = self.IID_criterion(input_srgb, N, R, L, S, targets_var,
not CriteriaTypes.warm_up_shading(criteria_label),
CriteriaTypes.train_reflectance(criteria_label),
CriteriaTypes.train_shading(criteria_label))
loss.backward()
optimizer.step()
return loss.data[0]
def predict(self, inputs, normal=False, IID=False):
# switch to eval mode
self.switch_to_eval()
# Input Data
input_srgb = inputs['input_srgb'].float()
if self.gpu_devices is not None:
input_srgb = input_srgb.cuda(self.data_gpu)
input_srgb = Variable(input_srgb, volatile=True)
# Forward
N, R, L, S, rendered_img = self._forward(input_srgb,
pred_normal=normal,
pred_reflect=IID,
pred_shading=IID)
if N is not None:
N = N / torch.norm(N, p=2, dim=1, keepdim=True).clamp(min=1e-6)
N = N.data
if R is not None:
R = R.data
if L is not None:
L = L.data
if S is not None:
S = (S.repeat(1, 3, 1, 1)).data
if rendered_img is not None:
rendered_img = rendered_img.data
return N, R, L, S, rendered_img
def predict_IID_np_for_saw_eval(self, saw_img):
# Input Data
saw_img = np.transpose(saw_img, (2, 0, 1))
input_ = torch.from_numpy(saw_img).unsqueeze(0).contiguous().float()
p_N, p_R, p_L, p_S, rendered_img = self.predict({'input_srgb': input_}, IID=True)
p_N_np = np.transpose(p_N[0, :, :, :].cpu().numpy(), (1, 2, 0))
p_L_np = np.transpose(p_L[0, :, :, :].cpu().numpy(), (1, 2, 0))
p_S_np = np.transpose(p_S[0, :, :, :].cpu().numpy(), (1, 2, 0))
p_R_np = np.transpose(p_R[0, :, :, :].cpu().numpy(), (1, 2, 0))
rendered_img_np = np.transpose(rendered_img[0, :, :, :].cpu().numpy(), (1, 2, 0))
return p_N_np, p_R_np, p_L_np, p_S_np, rendered_img_np
def set_evaluation_results(self, eval):
scheduler = self.train_state.scheduler
sd_type = self.train_state.sd_type
if sd_type == 'plateau':
scheduler.step(metrics=eval, epoch=None)
else:
scheduler.step(epoch=None)
def create_model(opt):
model = NIIDNetManager(opt)
print("\nModel [%s] is created!\n" % (model.name()))
return model
|
<reponame>tandriamil/copula-shirley
import numpy as np
import pandas as pd
from scipy.interpolate import interp1d
from diffprivlib.mechanisms import GaussianAnalytic, GeometricTruncated, LaplaceTruncated
def SampleForVine(data, ratio, x):
"""Sample ratio*n_row from data and force at least two values per column on the sample x.
Args:
data (DataFrame): A DataFrame.
ratio (float): The sampling ratio.
x (int): 1 or 2, sample to force integrity on.
Returns:
DataFrame: First sample, with ratio*n_row number of rows.
DataFrame: Second sample, with (1-ratio)*n_row number of rows.
"""
resample = True
while resample:
sample_1 = data.sample(frac=ratio)
sample_2 = data.drop(sample_1.index)
if x == 1: vals = [np.unique(sample_1[col]) for col in sample_1.columns]
else: vals = [np.unique(sample_2[col]) for col in sample_2.columns]
if np.min([len(v) for v in vals]) > 1:
resample = False
return sample_1, sample_2
def GetECDFs(ecdf_samples, epsilon=0.0, mechanism='Laplace', GS=2, delta=0.001):
"""Store differentially-private Empirical Cumulative Density Functions (ECDFs) in a Dictionary with the columns as the keys {col: ECFD}.
When epsilon=0.0 returns a non-dp ECDF.
Args:
ecdf_samples (DataFrame): A DataFrame to estimate the ECDFs from.
epsilon (float, optional): The Differential Privacy bugdet (noise injection) parameter. Defaults to 0.0.
mechanism (str, optional): The Differential Privacy random mechanism to sample noise from. Can be 'Laplace', 'Gaussian' or 'Geometric'. Defaults to 'Laplace'.
GS (int, optional): The Global Sensitivity of the function for DP. This implementation uses the bounded version of the DP. Defaults to 2.
delta (float, optional): The delta parameter to achieve (epsilon, delta)-DP with the Gaussian mechanism. Defaults to 0.001.
Returns:
dict: A Dictionary containing the ECDFs of each columns of the input DataFrame, with the columns names as the keys {col: ECFD}.
"""
nobs = ecdf_samples.shape[0]
if epsilon:
if mechanism == 'Laplace':
dp_mech = LaplaceTruncated(epsilon=epsilon, sensitivity=GS, lower=0, upper=nobs)
elif mechanism == 'Gaussian':
dp_mech = GaussianAnalytic(epsilon=epsilon, delta=delta, sensitivity=GS)
elif mechanism == 'Geometric':
dp_mech = GeometricTruncated(epsilon=epsilon, sensitivity=GS, lower=0, upper=nobs)
dp_ecdfs = {col:ECDF(ecdf_samples[col], dp_mech) for col in ecdf_samples.columns}
else:
dp_ecdfs = {col:ECDF(ecdf_samples[col]) for col in ecdf_samples.columns}
return dp_ecdfs
def TransformToPseudoObservations(model_samples, dp_ecdfs):
"""Transform the samples into pseudo-observations via the Probability Integral Tranform (PIT).
Args:
model_samples (DataFrame): A DataFrame containing the training samples for the vine-copula model.
dp_ecdfs (dict): A dictionary of ECDFs.
Returns:
matrix: (Noisy) pseudo-observations.
"""
return np.array([dp_ecdfs[col].fct(model_samples[col]) for col in model_samples.columns]).T
def TransformToNatualScale(pseudo_samples, dp_ecdfs):
"""Transform the pseudo-observations back to the natural scale of the original data via the Inverse PIT.
Args:
pseudo_samples (DataFrame): A DataFrame containing the pseudo-observations generated from the vine-copula model.
dp_ecdfs (dict): A dictionary of ECDFs.
Returns:
DataFrame: Samples with natural scale given by the ECDFs.
"""
samples = pd.DataFrame(np.array([dp_ecdfs[col].inv(pseudo_samples[col]) for col in pseudo_samples.columns]).T)
samples.columns = pseudo_samples.columns
return samples
class ECDF(object):
"""Estimate the CDF of the input. If a DP mechanism is given, estimate a noisy CDF from histogram.
Args:
x (1-D array): An array containing observations to estimate de ECDF from.
dpmechanism (object, optional): A diffprivlib.mechanisms class object to sample DP noise from. Defaults to None.
Attributes:
xs (1-D array): The x-axis values of the ECDF.
yx (1-D array): The y-axis values of the ECDF.
fct (function): The step function interpolated from the ECDF.
inv (function): The step function of the inverse ECDF.
"""
def __init__(self, x, dpmechanism=None):
def Histogram(X):
h = {np.unique(X)[i]:np.sum(X == np.unique(X)[i]) for i in np.arange(np.unique(X).size)}
k = np.fromiter(h.keys(), dtype=float)
v = np.fromiter(h.values(), dtype=float)
return k, v
self.xs = np.array(x, copy=True)
if dpmechanism is None:
self.xs = np.reshape(self.xs, self.xs.size)
self.xs = np.sort(self.xs)
self.ys = (np.searchsorted(self.xs, self.xs, side='right') + 1)/self.xs.size
self.ys = np.unique(self.ys)
self.xs = np.unique(self.xs)
else:
self.xs, self.ys = Histogram(x)
self.ys = np.fromiter([dpmechanism.randomise(np.int(self.ys[i])) for i in np.arange(self.ys.size)], self.ys.dtype, count=self.ys.size)
self.ys[self.ys < 0.0] = 0.0
self.ys = np.cumsum(self.ys)/np.sum(self.ys)
self.ys[self.ys > 1.0] = 1.0
self.fct = interp1d(self.xs, self.ys, kind='previous', fill_value=(0.0, 1.0), bounds_error=False, copy=True)
self.inv = interp1d(self.ys, self.xs, kind='next', fill_value=(np.min(self.xs), np.max(self.xs)), bounds_error=False, copy=True) |
# coding: utf-8
import datetime
import random
import pytest
from src.domain.exchange_rate import (
CurrencyExchangeAmountEntity, TimeWeightedRateEntity)
from src.interface.serializers.exchange_rate import (
CurrencySerializer, CurrencyExchangeRateConvertSerializer,
CurrencyExchangeRateAmountSerializer, CurrencyExchangeRateListSerializer,
CurrencyExchangeRateSerializer, TimeWeightedRateListSerializer,
TimeWeightedRateSerializer)
from tests.fixtures import currency, exchange_rate
@pytest.mark.unit
def test_currency_serializer(currency):
valid_data = CurrencySerializer().dump(currency)
assert valid_data['code'] == currency.code
assert valid_data['name'] == currency.name
assert valid_data['symbol'] == currency.symbol
@pytest.mark.unit
def test_currency_exchange_rate_convert_serializer(exchange_rate):
data = {
'source_currency': exchange_rate.source_currency,
'exchanged_currency': exchange_rate.exchanged_currency,
'amount': round(random.uniform(1, 100), 2)
}
valid_data = CurrencyExchangeRateConvertSerializer().load(data)
assert valid_data['source_currency'] == data['source_currency']
assert valid_data['exchanged_currency'] == data['exchanged_currency']
assert valid_data['amount'] == data['amount']
@pytest.mark.unit
def test_currency_exchange_rate_convert_serializer_validation_error(exchange_rate):
data = {
'source_currency': exchange_rate,
'exchanged_currency': exchange_rate,
'amount': 'amount'
}
invalid_data = CurrencyExchangeRateConvertSerializer().load(data)
assert 'errors' in invalid_data
assert all([key in invalid_data['errors'].keys() for key in data.keys()])
@pytest.mark.unit
def test_currency_exchange_rate_amount_serializer(exchange_rate):
data = CurrencyExchangeAmountEntity(
exchanged_currency=exchange_rate.exchanged_currency,
exchanged_amount=round(random.uniform(1, 100), 2),
rate_value=exchange_rate.rate_value
)
valid_data = CurrencyExchangeRateAmountSerializer().dump(data)
assert valid_data['exchanged_currency'] == data.exchanged_currency
assert valid_data['exchanged_amount'] == data.exchanged_amount
assert valid_data['rate_value'] == data.rate_value
@pytest.mark.unit
def test_currency_exchange_rate_list_serializer(exchange_rate):
exchanged_currency = exchange_rate.exchanged_currency * random.randint(1, 5)
date_from = datetime.date.today() + datetime.timedelta(days=-5)
date_to = datetime.date.today()
data = {
'source_currency': exchange_rate.source_currency,
'exchanged_currency': exchanged_currency,
'date_from': date_from.strftime('%Y-%m-%d'),
'date_to': date_to.strftime('%Y-%m-%d')
}
valid_data = CurrencyExchangeRateListSerializer().load(data)
assert valid_data['source_currency'] == data['source_currency']
assert valid_data['date_from'] == date_from
assert valid_data['date_to'] == date_to
@pytest.mark.unit
def test_currency_exchange_rate_list_serializer_validation_error(exchange_rate):
data = {
'source_currency': exchange_rate,
'date_from': datetime.date.today() + datetime.timedelta(days=-5),
'date_to': datetime.date.today()
}
invalid_data = CurrencyExchangeRateListSerializer().load(data)
assert 'errors' in invalid_data
assert all([key in invalid_data['errors'].keys() for key in data.keys()])
@pytest.mark.unit
def test_currency_exchange_rate_serializer(exchange_rate):
data = exchange_rate
valid_data = CurrencyExchangeRateSerializer().dump(data)
assert valid_data['exchanged_currency'] == data.exchanged_currency
assert valid_data['valuation_date'] == data.valuation_date
assert valid_data['rate_value'] == data.rate_value
@pytest.mark.unit
def test_time_weighted_rate_list_serializer(exchange_rate):
date_from = datetime.date.today() + datetime.timedelta(days=-5)
date_to = datetime.date.today()
data = {
'source_currency': exchange_rate.source_currency,
'exchanged_currency': exchange_rate.exchanged_currency,
'date_from': date_from.strftime('%Y-%m-%d'),
'date_to': date_to.strftime('%Y-%m-%d')
}
valid_data = TimeWeightedRateListSerializer().load(data)
assert valid_data['source_currency'] == data['source_currency']
assert valid_data['exchanged_currency'] == data['exchanged_currency']
assert valid_data['date_from'] == date_from
assert valid_data['date_to'] == date_to
@pytest.mark.unit
def test_time_weighted_rate_list_serializer_validation_error(exchange_rate):
data = {
'source_currency': exchange_rate,
'exchanged_currence': exchange_rate,
'date_from': datetime.date.today(),
'date_to': datetime.date.today()
}
invalid_data = TimeWeightedRateListSerializer().load(data)
assert 'errors' in invalid_data
assert all([key in invalid_data['errors'].keys() for key in data.keys()])
@pytest.mark.unit
def test_time_weighted_rate_serializer():
data = TimeWeightedRateEntity(
time_weighted_rate=random.uniform(0.5, 1.5)
)
valid_data = TimeWeightedRateSerializer().dump(data)
assert valid_data['time_weighted_rate'] == data.time_weighted_rate
|
<reponame>bowlofstew/client<gh_stars>10-100
from biicode.common.settings.arduinosettings import ArduinoSettings
from biicode.common.exception import BiiException
from biicode.client.setups.finders.arduino_sdk_finder import (valid_arduino_sdk_version,
find_arduino_sdks, print_sdks)
from biicode.client.dev.hardware.arduino.arduino_port_utils import refresh_port,\
get_boards_need_reset
SDK_GALILEO = 'galileo'
SDK_BETA = '1.5.5'
Arduino_boards = ['uno', 'leonardo', 'yun', 'zum', 'atmega328', 'diecimila', 'nano328',
'nano', 'mega2560', 'mega', 'esplora', 'micro', 'mini328',
'mini', 'ethernet', 'fio', 'bt328', 'bt', 'LilyPadUSB', 'lilypad328',
'lilypad', 'pro5v328', 'pro5v', 'pro328', 'pro', 'atmega168',
'atmega8', 'robotControl', 'robotMotor'
]
board_mapping = {'zum': 'bt328'}
def arduino_settings_args(user_io, args, settings):
''' Method to select (without any wizard) from command line your SDK,
port, board and automatic reset.
Port and SDK arguments support "default" option to make automatically
the setting selection
'''
if settings.arduino is None:
settings.arduino = ArduinoSettings()
if not args.sdk and not settings.arduino.sdk:
_, default_sdk = get_valid_sdks(user_io, settings.arduino)
user_io.out.success("Your default SDK is: %s" % default_sdk)
settings.arduino.sdk = default_sdk
else:
settings.arduino.sdk = args.sdk or settings.arduino.sdk
try:
valid_version = valid_arduino_sdk_version(settings.arduino.sdk)
except:
valid_version = None
if not valid_version:
raise BiiException("No valid Arduino SDK version could be found."
" Check if /your_SDK_path/lib/version.txt file exists")
settings.arduino.version = valid_version
if args.need_reset:
settings.arduino.automatic_reset = True if args.need_reset == 'true' else None
if (not args.port and not settings.arduino.port) or args.port == 'auto':
settings.arduino.port = refresh_port(user_io,
settings.arduino.port,
reset=settings.arduino.automatic_reset,
wizard=False)
else:
settings.arduino.port = args.port or settings.arduino.port
settings.arduino.board = args.board or settings.arduino.board
def arduino_settings_wizard(user_io, settings):
'''gets arduino settings from user. The port will always be scanned
param user_io: UserIO object
param settings: existing hive Settings
'''
if settings.arduino is None:
settings.arduino = ArduinoSettings()
_arduino_sdk_wizard(user_io, settings.arduino)
_get_board(user_io, settings.arduino)
ports_need_reset = get_boards_need_reset(settings.arduino.sdk, settings.arduino.version)
settings.arduino.automatic_reset = True if settings.arduino.board in ports_need_reset \
else None
settings.arduino.port = refresh_port(user_io,
settings.arduino.port,
reset=settings.arduino.automatic_reset,
wizard=True)
def _arduino_sdk_wizard(user_io, arduino_settings):
''' User'll set his Arduino SDK path or will select the
auto-detection of the Arduino SDK path located in
biicode_env folder.
'''
sdks, default_sdk = get_valid_sdks(user_io, arduino_settings)
sdk_path = user_io.request_string("Enter SDK number or type path", default_sdk)
sdk_path = sdk_path or default_sdk or "None"
try:
number = int(sdk_path)
except ValueError:
selected_sdk = sdk_path
selected_version = valid_arduino_sdk_version(sdk_path, user_io.out)
if not selected_version:
user_io.out.error("SDK not valid: %s" % sdk_path)
selected_version = "None"
else:
try:
selected_sdk, selected_version = sdks[number]
except IndexError:
raise BiiException("Bad Index %d, please select number or type path" % number)
arduino_settings.sdk = selected_sdk.replace('\\', '/')
arduino_settings.version = selected_version
def get_valid_sdks(user_io, arduino_settings):
sdks = find_arduino_sdks()
if not sdks:
user_io.out.warn("Biicode couldn't find a default Arduino SDK path")
filtered_sdks = []
for (sdk_path, version) in sdks:
if " " in sdk_path:
user_io.out.warn("Detected SDK(%s) in %s\nbut paths with spaces are not valid.\n"
"Please install it in another location" % (version, sdk_path))
else:
filtered_sdks.append((sdk_path, version))
sdks = filtered_sdks
print_sdks(user_io.out, sdks)
if arduino_settings.sdk:
default_sdk = arduino_settings.sdk
else:
default_sdk = None
for path, version in sdks:
if version == "1.0.6":
default_sdk = path
break
return sdks, default_sdk
def _get_board(user_io, arduino_settings):
boards = Arduino_boards
boards.sort()
while True:
selected_board = user_io.request_string("Enter board (/o list supported options)",
arduino_settings.board)
if selected_board and selected_board != '/o':
if selected_board not in boards:
user_io.out.warn("The board, %s, isn't in current supported Arduino boards "
"list options. Make sure you've all the necessary SW installed "
"in your Arduino SDK version" % selected_board)
arduino_settings.board = selected_board
break
user_io.print_options(options=boards)
|
<filename>knockoff/factory/prototype.py<gh_stars>10-100
# Copyright 2021-present, Nike, Inc.
# All rights reserved.
#
# This source code is licensed under the Apache-2.0 license found in
# the LICENSE file in the root directory of this source tree.
import os
import six
import logging
import itertools
from operator import itemgetter
import pandas as pd
from knockoff.factory.component import ComponentFunctionFactory
from knockoff.factory.counterfeit import KNOCKOFF_ATTEMPT_LIMIT_ENV
from knockoff.utilities.functools import call_with_args_kwargs
logger = logging.getLogger(__name__)
def load_prototype_from_components(source, assembler, node_name):
limit = int(os.environ.get(KNOCKOFF_ATTEMPT_LIMIT_ENV, 1000000))
dependencies = [(component['name'],
dep) for component in source.config['components']
for dep in component['source'].get('dependencies',
[])]
def sort(name_dep):
return tuple(assembler.parse_dependency(name_dep[1])[:-1])
dependencies.sort(key=sort)
names = [component['name'] for component in source.config['components']]
name_to_source = {component['name']:component['source']
for component in source.config['components']}
i = 0
records = []
# create mapping of unique key indices to unique keys
unique_constraints = {tuple(constraint): set()
for constraint in (source.config.get('unique',
[]))}
while len(records) < source.config['number'] and i < limit:
i += 1
record = {}
component_to_function_input = {}
for (node_type, name), group in itertools.groupby(dependencies,
sort):
node = assembler.blueprint.get_node(node_type, name)
sample = node.data.sample(1)
for component_name, dep in group:
strategy = name_to_source[component_name]['strategy']
if strategy == "knockoff":
(dep_type,
dep_name,
dep_sub_name) = assembler.parse_dependency(dep)
col = dep_sub_name or 0
record[component_name] = sample[col].values[0]
elif strategy == "function":
(dep_type,
dep_name,
dep_sub_name) = assembler.parse_dependency(dep)
col = dep_sub_name or 0
(component_to_function_input
.setdefault(component_name,
{}))[dep] = sample[col].values[0]
else:
raise Exception("strategy not recognized: {}"
.format(strategy))
function_factory = ComponentFunctionFactory()
for component_name, source_config in six.iteritems(name_to_source):
if component_name in record:
continue
strategy = source_config['strategy']
if strategy == 'function':
func = function_factory.get_resource(source_config['function'])
record[component_name] = \
handle_function(func,
input_args=(source_config
.get('input_args')),
input_kwargs=(source_config
.get('input_kwargs')),
func_inputs_from_dependencies=
component_to_function_input
.get(component_name))
else:
node = assembler.blueprint.get_node("component", "{}.{}"
.format(node_name,
component_name))
record[component_name] = next(node.generator)
if not _satisfies_unique_constraints(record, unique_constraints):
continue
for constraint in six.iterkeys(unique_constraints):
unique_constraints[constraint].add(itemgetter(*constraint)(record))
records.append(itemgetter(*names)(record))
if i >= limit:
logger.error("Could not generate prototype={}"
.format(node_name))
raise Exception("Attempts to create unique set reached: {}"
.format(limit))
return pd.DataFrame(records, columns=names)
def handle_function(func, input_args=None, input_kwargs=None,
func_inputs_from_dependencies=None):
args = []
kwargs = {}
for input_cfg in input_args or []:
if input_cfg['type'] == "constant":
args.append(input_cfg['value'])
elif input_cfg['type'] == 'dependency':
args.append(func_inputs_from_dependencies[input_cfg['value']])
for input_cfg in input_kwargs or []:
if input_cfg['type'] == "constant":
kwargs[input_cfg['key']] = input_cfg['value']
elif input_cfg['type'] == 'dependency':
kwargs[input_cfg['key']] = \
func_inputs_from_dependencies[input_cfg['value']]
return call_with_args_kwargs(func, tuple(args), kwargs)
def _satisfies_unique_constraints(record, constraints):
valid_record = True
for constraint in six.iterkeys(constraints):
if itemgetter(*constraint)(record) in constraints[constraint]:
valid_record = False
if not valid_record:
break
return valid_record
|
<reponame>mvdoc/mne-python
# Author: <NAME>, <<EMAIL>>
#
# License: BSD (3-clause)
import numpy as np
from numpy.testing import assert_array_equal
from nose.tools import assert_raises, assert_true, assert_equal
from ...utils import requires_sklearn_0_15
from ..search_light import _SearchLight, _GeneralizationLight
from .. import Vectorizer
def make_data():
n_epochs, n_chan, n_time = 50, 32, 10
X = np.random.rand(n_epochs, n_chan, n_time)
y = np.arange(n_epochs) % 2
for ii in range(n_time):
coef = np.random.randn(n_chan)
X[y == 0, :, ii] += coef
X[y == 1, :, ii] -= coef
return X, y
@requires_sklearn_0_15
def test_SearchLight():
"""Test _SearchLight"""
from sklearn.linear_model import Ridge, LogisticRegression
from sklearn.pipeline import make_pipeline
from sklearn.metrics import roc_auc_score
X, y = make_data()
n_epochs, _, n_time = X.shape
# init
assert_raises(ValueError, _SearchLight, 'foo')
sl = _SearchLight(Ridge())
sl = _SearchLight(LogisticRegression())
# fit
assert_equal(sl.__repr__()[:14], '<_SearchLight(')
sl.fit(X, y)
assert_equal(sl.__repr__()[-28:], ', fitted with 10 estimators>')
assert_raises(ValueError, sl.fit, X[1:], y)
assert_raises(ValueError, sl.fit, X[:, :, 0], y)
# transforms
assert_raises(ValueError, sl.predict, X[:, :, :2])
y_pred = sl.predict(X)
assert_true(y_pred.dtype == int)
assert_array_equal(y_pred.shape, [n_epochs, n_time])
y_proba = sl.predict_proba(X)
assert_true(y_proba.dtype == float)
assert_array_equal(y_proba.shape, [n_epochs, n_time, 2])
# score
score = sl.score(X, y)
assert_array_equal(score.shape, [n_time])
assert_true(np.sum(np.abs(score)) != 0)
assert_true(score.dtype == float)
# change score method
sl1 = _SearchLight(LogisticRegression(), scoring=roc_auc_score)
sl1.fit(X, y)
score1 = sl1.score(X, y)
assert_array_equal(score1.shape, [n_time])
assert_true(score1.dtype == float)
X_2d = X.reshape(X.shape[0], X.shape[1] * X.shape[2])
lg_score = LogisticRegression().fit(X_2d, y).predict_proba(X_2d)[:, 1]
assert_equal(score1[0], roc_auc_score(y, lg_score))
sl2 = _SearchLight(LogisticRegression(), scoring='roc_auc')
sl2.fit(X, y)
assert_array_equal(score1, sl2.score(X, y))
sl = _SearchLight(LogisticRegression(), scoring='foo')
sl.fit(X, y)
assert_raises(ValueError, sl.score, X, y)
sl = _SearchLight(LogisticRegression())
assert_equal(sl.scoring, None)
# n_jobs
sl = _SearchLight(LogisticRegression(), n_jobs=2)
sl.fit(X, y)
sl.predict(X)
sl.score(X, y)
# n_jobs > n_estimators
sl.fit(X[..., [0]], y)
sl.predict(X[..., [0]])
# pipeline
class _LogRegTransformer(LogisticRegression):
# XXX needs transformer in pipeline to get first proba only
def transform(self, X):
return super(_LogRegTransformer, self).predict_proba(X)[..., 1]
pipe = make_pipeline(_SearchLight(_LogRegTransformer()),
LogisticRegression())
pipe.fit(X, y)
pipe.predict(X)
# n-dimensional feature space
X = np.random.rand(10, 3, 4, 2)
y = np.arange(10) % 2
y_preds = list()
for n_jobs in [1, 2]:
pipe = _SearchLight(make_pipeline(Vectorizer(), LogisticRegression()),
n_jobs=n_jobs)
y_preds.append(pipe.fit(X, y).predict(X))
features_shape = pipe.estimators_[0].steps[0][1].features_shape_
assert_array_equal(features_shape, [3, 4])
assert_array_equal(y_preds[0], y_preds[1])
@requires_sklearn_0_15
def test_GeneralizationLight():
"""Test _GeneralizationLight"""
from sklearn.pipeline import make_pipeline
from sklearn.linear_model import LogisticRegression
X, y = make_data()
n_epochs, _, n_time = X.shape
# fit
gl = _GeneralizationLight(LogisticRegression())
assert_equal(gl.__repr__()[:22], '<_GeneralizationLight(')
gl.fit(X, y)
assert_equal(gl.__repr__()[-28:], ', fitted with 10 estimators>')
# transforms
y_pred = gl.predict(X)
assert_array_equal(y_pred.shape, [n_epochs, n_time, n_time])
assert_true(y_pred.dtype == int)
y_proba = gl.predict_proba(X)
assert_true(y_proba.dtype == float)
assert_array_equal(y_proba.shape, [n_epochs, n_time, n_time, 2])
# transform to different datasize
y_pred = gl.predict(X[:, :, :2])
assert_array_equal(y_pred.shape, [n_epochs, n_time, 2])
# score
score = gl.score(X[:, :, :3], y)
assert_array_equal(score.shape, [n_time, 3])
assert_true(np.sum(np.abs(score)) != 0)
assert_true(score.dtype == float)
# n_jobs
gl = _GeneralizationLight(LogisticRegression(), n_jobs=2)
gl.fit(X, y)
y_pred = gl.predict(X)
assert_array_equal(y_pred.shape, [n_epochs, n_time, n_time])
score = gl.score(X, y)
assert_array_equal(score.shape, [n_time, n_time])
# n_jobs > n_estimators
gl.fit(X[..., [0]], y)
gl.predict(X[..., [0]])
# n-dimensional feature space
X = np.random.rand(10, 3, 4, 2)
y = np.arange(10) % 2
y_preds = list()
for n_jobs in [1, 2]:
pipe = _GeneralizationLight(
make_pipeline(Vectorizer(), LogisticRegression()), n_jobs=n_jobs)
y_preds.append(pipe.fit(X, y).predict(X))
features_shape = pipe.estimators_[0].steps[0][1].features_shape_
assert_array_equal(features_shape, [3, 4])
assert_array_equal(y_preds[0], y_preds[1])
|
# ------------------------------------------------------------------------------
#
# Copyright 2018-2019 Fetch.AI Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""This module contains the base classes for the skills."""
import importlib.util
import inspect
import logging
import os
import re
import sys
from abc import ABC, abstractmethod
from pathlib import Path
from queue import Queue
from typing import Optional, List, Dict, Any, cast
from aea.connections.base import ConnectionStatus
from aea.configurations.base import BehaviourConfig, HandlerConfig, TaskConfig, SharedClassConfig, SkillConfig, \
ProtocolId, DEFAULT_SKILL_CONFIG_FILE
from aea.configurations.loader import ConfigLoader
from aea.context.base import AgentContext
from aea.crypto.ledger_apis import LedgerApis
from aea.decision_maker.base import OwnershipState, Preferences, GoalPursuitReadiness
from aea.mail.base import OutBox
from aea.protocols.base import Message
logger = logging.getLogger(__name__)
class SkillContext:
"""This class implements the context of a skill."""
def __init__(self, agent_context: AgentContext):
"""
Initialize a skill context.
:param agent_context: the agent's context
"""
self._agent_context = agent_context
self._in_queue = Queue() # type: Queue
self._skill = None # type: Optional[Skill]
@property
def agent_name(self) -> str:
"""Get agent name."""
return self._agent_context.agent_name
@property
def agent_public_key(self) -> str:
"""Get public key."""
return self._agent_context.public_key
@property
def agent_public_keys(self) -> Dict[str, str]:
"""Get public keys."""
return self._agent_context.public_keys
@property
def agent_addresses(self) -> Dict[str, str]:
"""Get addresses."""
return self._agent_context.addresses
@property
def agent_address(self) -> str:
"""Get address."""
return self._agent_context.address
@property
def connection_status(self) -> ConnectionStatus:
"""Get connection status."""
return self._agent_context.connection_status
@property
def outbox(self) -> OutBox:
"""Get outbox."""
return self._agent_context.outbox
@property
def message_in_queue(self) -> Queue:
"""Get message in queue."""
return self._in_queue
@property
def decision_maker_message_queue(self) -> Queue:
"""Get message queue of decision maker."""
return self._agent_context.decision_maker_message_queue
@property
def agent_ownership_state(self) -> OwnershipState:
"""Get ownership state."""
return self._agent_context.ownership_state
@property
def agent_preferences(self) -> Preferences:
"""Get preferences."""
return self._agent_context.preferences
@property
def agent_goal_pursuit_readiness(self) -> GoalPursuitReadiness:
"""Get the goal pursuit readiness."""
return self._agent_context.goal_pursuit_readiness
@property
def ledger_apis(self) -> LedgerApis:
"""Get ledger APIs."""
return self._agent_context.ledger_apis
@property
def handlers(self) -> Optional[List['Handler']]:
"""Get handlers of the skill."""
assert self._skill is not None, "Skill not initialized."
return self._skill.handlers
@property
def behaviours(self) -> Optional[List['Behaviour']]:
"""Get behaviours of the skill."""
assert self._skill is not None, "Skill not initialized."
return self._skill.behaviours
@property
def tasks(self) -> Optional[List['Task']]:
"""Get tasks of the skill."""
assert self._skill is not None, "Skill not initialized."
return self._skill.tasks
def __getattr__(self, item) -> Any:
"""Get attribute."""
return super().__getattribute__(item) # pragma: no cover
class Behaviour(ABC):
"""This class implements an abstract behaviour."""
def __init__(self, **kwargs):
"""
Initialize a behaviour.
:param skill_context: the skill context
:param kwargs: keyword arguments
"""
self._context = kwargs.pop('skill_context') # type: SkillContext
self._config = kwargs
@property
def context(self) -> SkillContext:
"""Get the context of the behaviour."""
return self._context
@property
def config(self) -> Dict[Any, Any]:
"""Get the config of the behaviour."""
return self._config
@abstractmethod
def setup(self) -> None:
"""
Implement the behaviour setup.
:return: None
"""
@abstractmethod
def act(self) -> None:
"""
Implement the behaviour.
:return: None
"""
@abstractmethod
def teardown(self) -> None:
"""
Implement the behaviour teardown.
:return: None
"""
@classmethod
def parse_module(cls, path: str, behaviours_configs: List[BehaviourConfig], skill_context: SkillContext) -> List['Behaviour']:
"""
Parse the behaviours module.
:param path: path to the Python module containing the Behaviour classes.
:param behaviours_configs: a list of behaviour configurations.
:param skill_context: the skill context
:return: a list of Behaviour.
"""
behaviours = []
behaviours_spec = importlib.util.spec_from_file_location("behaviours", location=path)
behaviour_module = importlib.util.module_from_spec(behaviours_spec)
behaviours_spec.loader.exec_module(behaviour_module) # type: ignore
classes = inspect.getmembers(behaviour_module, inspect.isclass)
behaviours_classes = list(filter(lambda x: re.match("\\w+Behaviour", x[0]), classes))
name_to_class = dict(behaviours_classes)
for behaviour_config in behaviours_configs:
behaviour_class_name = cast(str, behaviour_config.class_name)
logger.debug("Processing behaviour {}".format(behaviour_class_name))
behaviour_class = name_to_class.get(behaviour_class_name, None)
if behaviour_class is None:
logger.warning("Behaviour '{}' cannot be found.".format(behaviour_class_name))
else:
args = behaviour_config.args
assert 'skill_context' not in args.keys(), "'skill_context' is a reserved key. Please rename your arguments!"
args['skill_context'] = skill_context
behaviour = behaviour_class(**args)
behaviours.append(behaviour)
return behaviours
class Handler(ABC):
"""This class implements an abstract behaviour."""
SUPPORTED_PROTOCOL = None # type: Optional[ProtocolId]
def __init__(self, **kwargs):
"""
Initialize a handler object.
:param skill_context: the skill context
:param kwargs: keyword arguments
"""
self._context = kwargs.pop('skill_context') # type: SkillContext
self._config = kwargs
@property
def context(self) -> SkillContext:
"""Get the context of the handler."""
return self._context
@property
def config(self) -> Dict[Any, Any]:
"""Get the config of the handler."""
return self._config
@abstractmethod
def handle(self, message: Message, sender: str) -> None:
"""
Implement the reaction to a message.
:param message: the message
:param sender: the sender
:return: None
"""
@abstractmethod
def setup(self) -> None:
"""
Implement the behaviour setup.
:return: None
"""
@abstractmethod
def teardown(self) -> None:
"""
Implement the handler teardown.
:return: None
"""
@classmethod
def parse_module(cls, path: str, handler_configs: List[HandlerConfig], skill_context: SkillContext) -> List['Handler']:
"""
Parse the handler module.
:param path: path to the Python module containing the Handler class.
:param handler_configs: the list of handler configurations.
:param skill_context: the skill context
:return: an handler, or None if the parsing fails.
"""
handlers = []
handler_spec = importlib.util.spec_from_file_location("handlers", location=path)
handler_module = importlib.util.module_from_spec(handler_spec)
handler_spec.loader.exec_module(handler_module) # type: ignore
classes = inspect.getmembers(handler_module, inspect.isclass)
handler_classes = list(filter(lambda x: re.match("\\w+Handler", x[0]), classes))
name_to_class = dict(handler_classes)
for handler_config in handler_configs:
handler_class_name = cast(str, handler_config.class_name)
logger.debug("Processing handler {}".format(handler_class_name))
handler_class = name_to_class.get(handler_class_name, None)
if handler_class is None:
logger.warning("Handler '{}' cannot be found.".format(handler_class_name))
else:
args = handler_config.args
assert 'skill_context' not in args.keys(), "'skill_context' is a reserved key. Please rename your arguments!"
args['skill_context'] = skill_context
handler = handler_class(**args)
handlers.append(handler)
return handlers
class Task(ABC):
"""This class implements an abstract task."""
def __init__(self, *args, **kwargs):
"""
Initialize a task.
:param skill_context: the skill context
:param kwargs: keyword arguments.
"""
self._context = kwargs.pop('skill_context') # type: SkillContext
self._config = kwargs
@property
def context(self) -> SkillContext:
"""Get the context of the task."""
return self._context
@property
def config(self) -> Dict[Any, Any]:
"""Get the config of the task."""
return self._config
@abstractmethod
def execute(self) -> None:
"""
Run the task logic.
:return: None
"""
@abstractmethod
def setup(self) -> None:
"""
Implement the behaviour setup.
:return: None
"""
@abstractmethod
def teardown(self) -> None:
"""
Teardown the task.
:return: None
"""
@classmethod
def parse_module(cls, path: str, tasks_configs: List[TaskConfig], skill_context: SkillContext) -> List['Task']:
"""
Parse the tasks module.
:param path: path to the Python module containing the Task classes.
:param tasks_configs: a list of tasks configurations.
:param skill_context: the skill context
:return: a list of Tasks.
"""
tasks = []
tasks_spec = importlib.util.spec_from_file_location("tasks", location=path)
task_module = importlib.util.module_from_spec(tasks_spec)
tasks_spec.loader.exec_module(task_module) # type: ignore
classes = inspect.getmembers(task_module, inspect.isclass)
tasks_classes = list(filter(lambda x: re.match("\\w+Task", x[0]), classes))
name_to_class = dict(tasks_classes)
for task_config in tasks_configs:
task_class_name = task_config.class_name
logger.debug("Processing task {}".format(task_class_name))
task_class = name_to_class.get(task_class_name, None)
if task_class is None:
logger.warning("Task '{}' cannot be found.".format(task_class_name))
else:
args = task_config.args
assert 'skill_context' not in args.keys(), "'skill_context' is a reserved key. Please rename your arguments!"
args['skill_context'] = skill_context
task = task_class(**args)
tasks.append(task)
return tasks
class SharedClass(ABC):
"""This class implements an abstract shared class."""
def __init__(self, *args, **kwargs):
"""
Initialize a task.
:param skill_context: the skill context
:param kwargs: keyword arguments.
"""
self._context = kwargs.pop('skill_context') # type: SkillContext
self._config = kwargs
@property
def context(self) -> SkillContext:
"""Get the context of the task."""
return self._context
@property
def config(self) -> Dict[Any, Any]:
"""Get the config of the task."""
return self._config
@classmethod
def parse_module(cls, path: str, shared_classes_configs: List[SharedClassConfig], skill_context: SkillContext) -> List['SharedClass']:
"""
Parse the tasks module.
:param path: path to the Python skill module.
:param shared_classes_configs: a list of shared class configurations.
:param skill_context: the skill context
:return: a list of SharedClass.
"""
instances = []
shared_classes = []
shared_classes_names = set(config.class_name for config in shared_classes_configs)
# get all Python modules except the standard ones
ignore_regex = "|".join(["handlers.py", "behaviours.py", "tasks.py", "__.*"])
all_python_modules = Path(path).glob("*.py")
module_paths = set(map(str, filter(lambda x: not re.match(ignore_regex, x.name), all_python_modules)))
for module_path in module_paths:
logger.debug("Trying to load module {}".format(module_path))
module_name = module_path.replace(".py", "")
shared_class_spec = importlib.util.spec_from_file_location(module_name, location=module_path)
shared_class_module = importlib.util.module_from_spec(shared_class_spec)
shared_class_spec.loader.exec_module(shared_class_module) # type: ignore
classes = inspect.getmembers(shared_class_module, inspect.isclass)
filtered_classes = list(
filter(
lambda x:
any(re.match(shared, x[0]) for shared in shared_classes_names) and SharedClass in inspect.getmro(x[1]),
classes)
)
shared_classes.extend(filtered_classes)
name_to_class = dict(shared_classes)
for shared_class_config in shared_classes_configs:
shared_class_name = shared_class_config.class_name
logger.debug("Processing shared class {}".format(shared_class_name))
shared_class = name_to_class.get(shared_class_name, None)
if shared_class is None:
logger.warning("Shared class '{}' cannot be found.".format(shared_class_name))
else:
args = shared_class_config.args
assert 'skill_context' not in args.keys(), "'skill_context' is a reserved key. Please rename your arguments!"
args['skill_context'] = skill_context
shared_class_instance = shared_class(**args)
instances.append(shared_class_instance)
setattr(skill_context, shared_class_name.lower(), shared_class_instance)
return instances
class Skill:
"""This class implements a skill."""
def __init__(self, config: SkillConfig,
skill_context: SkillContext,
handlers: Optional[List[Handler]],
behaviours: Optional[List[Behaviour]],
tasks: Optional[List[Task]],
shared_classes: Optional[List[SharedClass]]):
"""
Initialize a skill.
:param config: the skill configuration.
:param handlers: the list of handlers to handle incoming envelopes.
:param behaviours: the list of behaviours that defines the proactive component of the agent.
:param tasks: the list of tasks executed at every iteration of the main loop.
:param shared_classes: the list of classes shared across tasks, behaviours and
"""
self.config = config
self.skill_context = skill_context
self.handlers = handlers
self.behaviours = behaviours
self.tasks = tasks
self.shared_classes = shared_classes
@classmethod
def from_dir(cls, directory: str, agent_context: AgentContext) -> 'Skill':
"""
Load a skill from a directory.
:param directory: the skill
:param agent_context: the agent's context
:return: the Skill object.
:raises Exception: if the parsing failed.
"""
# check if there is the config file. If not, then return None.
skill_loader = ConfigLoader("skill-config_schema.json", SkillConfig)
skill_config = skill_loader.load(open(os.path.join(directory, DEFAULT_SKILL_CONFIG_FILE)))
skills_spec = importlib.util.spec_from_file_location(skill_config.name, os.path.join(directory, "__init__.py"))
skill_module = importlib.util.module_from_spec(skills_spec)
sys.modules[skill_config.name + "_skill"] = skill_module
loader_contents = [path.name for path in Path(directory).iterdir()]
skills_packages = list(filter(lambda x: not x.startswith("__"), loader_contents)) # type: ignore
logger.debug("Processing the following skill package: {}".format(skills_packages))
skill_context = SkillContext(agent_context)
handlers_by_id = skill_config.handlers.read_all()
if len(handlers_by_id) > 0:
handlers_configurations = list(dict(handlers_by_id).values())
handlers = Handler.parse_module(os.path.join(directory, "handlers.py"), handlers_configurations, skill_context)
else:
handlers = []
behaviours_by_id = skill_config.behaviours.read_all()
if len(behaviours_by_id) > 0:
behaviours_configurations = list(dict(behaviours_by_id).values())
behaviours = Behaviour.parse_module(os.path.join(directory, "behaviours.py"), behaviours_configurations, skill_context)
else:
behaviours = []
tasks_by_id = skill_config.tasks.read_all()
if len(tasks_by_id) > 0:
tasks_configurations = list(dict(tasks_by_id).values())
tasks = Task.parse_module(os.path.join(directory, "tasks.py"), tasks_configurations, skill_context)
else:
tasks = []
shared_classes_by_id = skill_config.shared_classes.read_all()
if len(shared_classes_by_id) > 0:
shared_classes_configurations = list(dict(shared_classes_by_id).values())
shared_classes_instances = SharedClass.parse_module(directory, shared_classes_configurations, skill_context)
else:
shared_classes_instances = []
skill = Skill(skill_config, skill_context, handlers, behaviours, tasks, shared_classes_instances)
skill_context._skill = skill
return skill
|
import io
import os
import sys
import datetime
import multiprocessing
import subprocess
import traceback
import time
import signal
import jedi
if sys.getdefaultencoding() != 'utf-8':
reload(sys)
sys.setdefaultencoding('utf-8')
class TimeoutException(Exception): # Custom exception class
pass
def timeout_handler(signum, frame): # Custom signal handler
raise TimeoutException
# Change the behavior of SIGALRM
signal.signal(signal.SIGALRM, timeout_handler)
class Repository(object):
def __init__(self, name):
self.name = name
self.files = []
def add_file(self, file_url):
if len(file_url)>0:
self.files.append(self.url_to_path(file_url))
def url_to_path(self, file_url):
file_name = file_url[(file_url.rindex("/")+1):]
subdir = self.name.replace('/', '-')
return os.path.join(CODE_DIR, subdir, file_name)
def get_subdir(self):
subdir = self.name.replace('/', '-')
return os.path.join(CODE_DIR, subdir)
def get_folder_files(self):
subdir = self.name.replace('/', '-')
subdir = os.path.join(CODE_DIR, subdir)
tmp_files = os.listdir(subdir)
code_files = []
for i in range(len(tmp_files)):
file = os.path.join(subdir,tmp_files[i])
# skip out some lib functions
if ("/--float--" in file) or ("/--int--" in file) or ("/--str--" in file):
continue
code_files.append(file)
return code_files
def print_details(self):
print(self.name)
print(str(self.files))
def run_command(command):
proc = subprocess.Popen(command, shell=True)
proc.wait()
def read_wholefile(filename):
with open(filename, 'r') as file_obj:
text = file_obj.read()
# modify a little bit to force jedi analyse this line
return text.replace("return","_____return_value=").replace("print","_____return_value=")
def read_repos():
repo_list = []
repo = None
file = open(LIST_FILE, "r")
for line in file.readlines():
line = line.strip()
if line.startswith("=="):
if repo:
repo_list.append(repo)
repo = Repository(line.replace("=== ",""))
else:
repo.add_file(line)
repo_list.append(repo)
file.close()
return repo_list
def search_row_col(file_content, substring):
exists = []
lineCount = 0
for line in file_content.split('\n'):
lineCount += 1
if line.strip().startswith('#'):
continue
col = line.find(substring)
if col >= 0:
exists.append([lineCount, col, line])
return exists
def search_score_mag(file_content, use_score, use_magnitude):
if file_content.find('.score') >=0:
use_score = True
if file_content.find('.magnitude') >=0:
use_magnitude = True
return use_score, use_magnitude
def is_appear(content, variable):
all_names = jedi.names(content, all_scopes=True, definitions=True, references=True)
for name in all_names:
if not name.full_name:
continue
if len(name.full_name) <=0:
continue
if name.full_name.endswith("."+variable):
return True
return False
# it will quit early
def search_references(script, content, name_string):
global use_score
global use_magnitude
result = []
to_search = []
# replace("return","_____return_value=" ) to force jedi analyse return and print command
# also use += to replace append
content = content.replace("return", "_____return_value=").replace("print", "_____print=").replace(".append", "+=")
all_names = jedi.names(content,
all_scopes=True, definitions=True, references=False)
for name in all_names:
if not name.full_name:
continue
if len(name.full_name) <=0:
continue
if name_string in name.description:
# reduce some false postives
if is_appear(name.description, name_string):
left_value = name.description.strip().split('=')[0].strip()
if left_value == name_string:
continue
result.append(name)
if not "_____return_value" == name.full_name:
to_search.append(left_value)
use_score, use_magnitude = search_score_mag(name.description, use_score, use_magnitude)
# if find two variables, then quit
if use_score and use_magnitude:
return result
while True:
len1 = len(result)
to_search_new = []
for search_item in to_search:
for name in all_names:
if not name.full_name:
continue
if len(name.full_name) <=0:
continue
if search_item in name.description:
if is_appear(name.description, search_item):
left_value = name.description.strip().split('=')[0].strip()
if left_value == search_item:
continue
result.append(name)
to_search_new.append(left_value)
use_score, use_magnitude = search_score_mag(name.description, use_score, use_magnitude)
if use_score and use_magnitude:
return result
to_search = to_search_new
if len1 == len(result):
return result
return result
def find_left_value(content_line_by_line, script, line_num):
line = content_line_by_line[line_num-1]
col_num = 1+ len(line) - len(line.lstrip())
try:
line_script = script.goto(line_num, col_num)[0]
left_value = line_script.full_name.split('.')[-1]
except:
if line_num >= 1:
return find_left_value(content_line_by_line, script, line_num-1)
else:
return None
return left_value
def get_reference_by_pos(script,pos):
try:
result = script.get_references(pos[0],pos[1])[0]
except:
if pos[0] >= 1:
pos[0] = pos[0] - 1
return get_reference_by_pos(script,pos)
else:
return None
return result
# return 2 for good, 1/0 for bad, -1 for strange case
def analyze_repo(repo):
global use_score
global use_magnitude
use_score = False
use_magnitude = False
error_flag = False
# for each file
for filename in repo.files:
content = read_wholefile(filename)
# too large to process
content_line_by_line = content.split("\n")
if len(content_line_by_line) > 1000:
return -2
script = jedi.Script(content)
positions = search_row_col(content, API_NAME)
# for each API reference
for pos in positions:
result = get_reference_by_pos(script,pos)
# jedi.api.classes.Name: full_name, description, line,column defined_names(), is_definition()
if not result:
continue
if not result.full_name:
continue
names = result.full_name.split('.') # full_name sample: __main__.detect_sentiment.analyze_sentiment
appear_in_function = names[-2]
# check dataflow -> going down
left_value = find_left_value(content_line_by_line, script, pos[0])
if left_value:
result = search_references(script, content, left_value)
else:
# if jedi cannot find left value for return
if content_line_by_line[pos[0]-1].strip().startswith("return"):
use_score, use_magnitude = search_score_mag(content_line_by_line[pos[0]-1], use_score, use_magnitude)
result = []
# if find two variables, then quit
if use_score and use_magnitude:
return 2
# check function names
function_names = []
# script = jedi.Script(content)
for mention in result:
line_content = mention.description
all_names = jedi.names(line_content, all_scopes=True, definitions=True, references=True)
for name in all_names:
name2 = name.full_name.split('.')[-1]
if ("="+name2+"(") in line_content.replace(" ",""):
if not (name2=="str" or name2=="float" or name2=="int"):
function_names.append(name2)
# if it's not apeared in a function, then no need to search for its existance
if '__main__' in appear_in_function:
pass
else:
function_names.append(appear_in_function)
# start downloading related files
for function in function_names:
if ("def "+function) in content and (appear_in_function != function):
continue
if (function == "main"):
continue
command = "ruby search_inside_repo.rb " + function + " " + repo.name + " " + CODE_DIR
run_command(command)
time.sleep(1)
if use_score and use_magnitude:
return 2
elif use_score or use_magnitude:
return 1
else:
return 0
return -1
def main():
repo_list = read_repos()
for repo in repo_list:
result = -1
signal.setitimer(signal.ITIMER_REAL, TIME_LIMIT)
try:
result = analyze_repo(repo)
signal.setitimer(signal.ITIMER_REAL, 0)
except TimeoutException:
print("TIME OUT")
result = -1
except:
signal.setitimer(signal.ITIMER_REAL, 0)
# return 2 for good, 1/0 for bad, -1 for strange case
result = -1
traceback.print_exc()
finally:
# Reset the alarm
signal.setitimer(signal.ITIMER_REAL, 0)
print(repo.name+'\t'+str(result))
time.sleep(1)
if __name__ == '__main__':
API_NAME = "analyze_sentiment"
CODE_DIR = "codes"
LIST_FILE = "python_apps.txt"
# CODE_DIR = "codes_subset"
# LIST_FILE = "test.txt"
TIME_LIMIT = 120
main()
|
<filename>malware/openvc/openvc-1.0.0/openvc/cvtypes_h.py
#!/usr/bin/env python
# PyOpenCV - A Python wrapper for OpenCV 2.x using Boost.Python and NumPy
# Copyright (c) 2009, <NAME>
# All rights reserved.
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
# * Neither the name of pyopencv's copyright holders nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# For further inquiries, please contact <NAME> at <EMAIL>.
# ----------------------------------------------------------------------------
import common as _c
import cvtypes_h_ext as _ext
from cvtypes_h_ext import *
#=============================================================================
# cvtypes.h
#=============================================================================
# Defines for Distance Transform
CV_DIST_USER = -1
CV_DIST_L1 = 1
CV_DIST_L2 = 2
CV_DIST_C = 3
CV_DIST_L12 = 4
CV_DIST_FAIR = 5
CV_DIST_WELSCH = 6
CV_DIST_HUBER = 7
# Haar-like Object Detection structures
CV_HAAR_MAGIC_VAL = 0x42500000
CV_TYPE_NAME_HAAR = "opencv-haar-classifier"
CV_HAAR_FEATURE_MAX = 3
def CV_IS_HAAR_CLASSIFIER(haar_cascade):
return isinstance(haar_cascade, CvHaarClassifierCascade) and haar_cascade.flags&CV_MAGIC_MASK==CV_HAAR_MAGIC_VAL
Seq_CvConnectedComp.__old_init__ = Seq_CvConnectedComp.__init__
def _Seq_CvConnectedComp__init__(self, *args, **kwds):
Seq_CvConnectedComp.__old_init__(self, *args, **kwds)
if args:
self.depends = [args[0]]
elif kwds:
self.depends = [kwds.values()[0]]
else:
self.depends = []
_Seq_CvConnectedComp__init__.__doc__ = Seq_CvConnectedComp.__old_init__.__doc__
Seq_CvConnectedComp.__init__ = _Seq_CvConnectedComp__init__
Seq_CvConnectedComp.__iter__ = _c.__sd_iter__;
def endFindContours(scanner):
z = _ext._cvEndFindContours(scanner)
scanner._ownershiplevel = 0 # not owning the structure anymore
return z
endFindContours.__doc__ = _ext._cvEndFindContours.__doc__
CvContourScanner._ownershiplevel = 0
def _CvContourScanner__del__(self):
if self._ownershiplevel==1:
_ext._cvEndFindContours(self)
CvContourScanner.__del__ = _CvContourScanner__del__
#=============================================================================
# cv.h
#=============================================================================
#-----------------------------------------------------------------------------
# Image Processing
#-----------------------------------------------------------------------------
CV_BLUR_NO_SCALE = 0
CV_BLUR = 1
CV_GAUSSIAN = 2
CV_MEDIAN = 3
CV_BILATERAL = 4
CV_SCHARR = -1
CV_MAX_SOBEL_KSIZE = 7
CV_BGR2BGRA = 0
CV_RGB2RGBA = CV_BGR2BGRA
CV_BGRA2BGR = 1
CV_RGBA2RGB = CV_BGRA2BGR
CV_BGR2RGBA = 2
CV_RGB2BGRA = CV_BGR2RGBA
CV_RGBA2BGR = 3
CV_BGRA2RGB = CV_RGBA2BGR
CV_BGR2RGB = 4
CV_RGB2BGR = CV_BGR2RGB
CV_BGRA2RGBA = 5
CV_RGBA2BGRA = CV_BGRA2RGBA
CV_BGR2GRAY = 6
CV_RGB2GRAY = 7
CV_GRAY2BGR = 8
CV_GRAY2RGB = CV_GRAY2BGR
CV_GRAY2BGRA = 9
CV_GRAY2RGBA = CV_GRAY2BGRA
CV_BGRA2GRAY = 10
CV_RGBA2GRAY = 11
CV_BGR2BGR565 = 12
CV_RGB2BGR565 = 13
CV_BGR5652BGR = 14
CV_BGR5652RGB = 15
CV_BGRA2BGR565 = 16
CV_RGBA2BGR565 = 17
CV_BGR5652BGRA = 18
CV_BGR5652RGBA = 19
CV_GRAY2BGR565 = 20
CV_BGR5652GRAY = 21
CV_BGR2BGR555 = 22
CV_RGB2BGR555 = 23
CV_BGR5552BGR = 24
CV_BGR5552RGB = 25
CV_BGRA2BGR555 = 26
CV_RGBA2BGR555 = 27
CV_BGR5552BGRA = 28
CV_BGR5552RGBA = 29
CV_GRAY2BGR555 = 30
CV_BGR5552GRAY = 31
CV_BGR2XYZ = 32
CV_RGB2XYZ = 33
CV_XYZ2BGR = 34
CV_XYZ2RGB = 35
CV_BGR2YCrCb = 36
CV_RGB2YCrCb = 37
CV_YCrCb2BGR = 38
CV_YCrCb2RGB = 39
CV_BGR2HSV = 40
CV_RGB2HSV = 41
CV_BGR2Lab = 44
CV_RGB2Lab = 45
CV_BayerBG2BGR = 46
CV_BayerGB2BGR = 47
CV_BayerRG2BGR = 48
CV_BayerGR2BGR = 49
CV_BayerBG2RGB = CV_BayerRG2BGR
CV_BayerGB2RGB = CV_BayerGR2BGR
CV_BayerRG2RGB = CV_BayerBG2BGR
CV_BayerGR2RGB = CV_BayerGB2BGR
CV_BGR2Luv = 50
CV_RGB2Luv = 51
CV_BGR2HLS = 52
CV_RGB2HLS = 53
CV_HSV2BGR = 54
CV_HSV2RGB = 55
CV_Lab2BGR = 56
CV_Lab2RGB = 57
CV_Luv2BGR = 58
CV_Luv2RGB = 59
CV_HLS2BGR = 60
CV_HLS2RGB = 61
CV_COLORCVT_MAX = 100
CV_WARP_FILL_OUTLIERS = 8
CV_WARP_INVERSE_MAP = 16
CV_SHAPE_RECT = 0
CV_SHAPE_CROSS = 1
CV_SHAPE_ELLIPSE = 2
CV_SHAPE_CUSTOM = 100
CV_MOP_ERODE = 0
CV_MOP_DILATE = 1
CV_MOP_OPEN = 2
CV_MOP_CLOSE = 3
CV_MOP_GRADIENT = 4
CV_MOP_TOPHAT = 5
CV_MOP_BLACKHAT = 6
CV_TM_SQDIFF = 0
CV_TM_SQDIFF_NORMED = 1
CV_TM_CCORR = 2
CV_TM_CCORR_NORMED = 3
CV_TM_CCOEFF = 4
CV_TM_CCOEFF_NORMED = 5
_str = "\n 'distance_func' is a Python function declared as follows:\n def distance_func((int)a, (int)b, (object)userdata) -> (float)x\n where\n 'a' : the address of a C array of C floats representing the first vector\n 'b' : the address of a C array of C floats representing the second vector\n 'userdata' : the 'userdata' parameter of cvCalcEMD2()\n 'x' : the resultant distance"
if calcEMD2.__doc__ is None:
calcEMD2.__doc__ = _str
else:
calcEMD2.__doc__ += _str
#-----------------------------------------------------------------------------
# Contours Retrieving
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Motion Analysis
#-----------------------------------------------------------------------------
CV_LKFLOW_PYR_A_READY = 1
CV_LKFLOW_PYR_B_READY = 2
CV_LKFLOW_INITIAL_GUESSES = 4
CV_LKFLOW_GET_MIN_EIGENVALS = 8
#-----------------------------------------------------------------------------
# Planar Subdivisions
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Contour Processing and Shape Analysis
#-----------------------------------------------------------------------------
CV_POLY_APPROX_DP = 0
CV_CONTOURS_MATCH_I1 = 1
CV_CONTOURS_MATCH_I2 = 2
CV_CONTOURS_MATCH_I3 = 3
CV_CONTOUR_TREES_MATCH_I1 = 1
CV_CLOCKWISE = 1
CV_COUNTER_CLOCKWISE = 2
CV_COMP_CORREL = 0
CV_COMP_CHISQR = 1
CV_COMP_INTERSECT = 2
CV_COMP_BHATTACHARYYA= 3
CV_VALUE = 1
CV_ARRAY = 2
CV_DIST_MASK_3 = 3
CV_DIST_MASK_5 = 5
CV_DIST_MASK_PRECISE = 0
CV_CALIB_CB_FAST_CHECK = 8 # OpenCV 2.1: Equivalent C++ constant not yet available
#-----------------------------------------------------------------------------
# Feature detection
#-----------------------------------------------------------------------------
CvFeatureTree._ownershiplevel = 0
def _CvFeatureTree__del__(self):
if self._ownershiplevel==1:
_ext._cvReleaseFeatureTree(self)
CvFeatureTree.__del__ = _CvFeatureTree__del__
CvLSH._ownershiplevel = 0
def _CvLSH__del__(self):
if self._ownershiplevel==1:
_ext._cvReleaseLSH(self)
CvLSH.__del__ = _CvLSH__del__
Seq_CvSURFPoint.__old_init__ = Seq_CvSURFPoint.__init__
def _Seq_CvSURFPoint__init__(self, *args, **kwds):
Seq_CvSURFPoint.__old_init__(self, *args, **kwds)
if args:
self.depends = [args[0]]
elif kwds:
self.depends = [kwds.values()[0]]
else:
self.depends = []
_Seq_CvSURFPoint__init__.__doc__ = Seq_CvSURFPoint.__old_init__.__doc__
Seq_CvSURFPoint.__init__ = _Seq_CvSURFPoint__init__
Seq_CvSURFPoint.__iter__ = _c.__sd_iter__;
#-----------------------------------------------------------------------------
# POSIT (POSe from ITeration)
#-----------------------------------------------------------------------------
CvPOSITObject._ownershiplevel = 0
def _CvPOSITObject__del__(self):
if self._ownershiplevel==1:
_ext._cvReleasePOSITObject(self)
CvPOSITObject.__del__ = _CvPOSITObject__del__
#-----------------------------------------------------------------------------
# Camera Calibration, Pose Estimation and Stereo
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Kolmogorov-Zabin stereo-correspondence algorithm (a.k.a. KZ1)
#-----------------------------------------------------------------------------
CvStereoGCState._ownershiplevel = 0
def _CvStereoGCState__del__(self):
if self._ownershiplevel==1:
_ext._cvReleaseStereoGCState(self)
CvStereoGCState.__del__ = _CvStereoGCState__del__
#=============================================================================
# cvcompat.h
#=============================================================================
CvPoint2D64d = CvPoint2D64f
CvPoint3D64d = CvPoint3D64f
CV_MAT32F = CV_32FC1
CV_MAT3x1_32F = CV_32FC1
CV_MAT4x1_32F = CV_32FC1
CV_MAT3x3_32F = CV_32FC1
CV_MAT4x4_32F = CV_32FC1
CV_MAT64D = CV_64FC1
CV_MAT3x1_64D = CV_64FC1
CV_MAT4x1_64D = CV_64FC1
CV_MAT3x3_64D = CV_64FC1
CV_MAT4x4_64D = CV_64FC1
IPL_GAUSSIAN_5x5 = 7
# CvBox2D32f = CvBox2D
# TODO: fix these functions
# cvIntegralImage = cvIntegral
# cvMatchContours = cvMatchShapes
cvCvtPixToPlane = cvSplit
cvCvtPlaneToPix = cvMerge
cvPseudoInv = cvPseudoInverse
vector_CvSURFPoint.__old_init__ = vector_CvSURFPoint.__init__
vector_CvSURFPoint.__init__ = _c.__vector__init__
vector_CvSURFPoint.create = _c.__vector_create
vector_CvSURFPoint.__repr__ = _c.__vector__repr__
vector_CvSURFPoint.tolist = _c.__vector_tolist
vector_CvSURFPoint.fromlist = classmethod(_c.__vector_fromlist)
_z = vector_CvSURFPoint()
_z.resize(1)
vector_CvSURFPoint.elem_type = _z[0].__class__
del(_z)
vector_CvConnectedComp.__old_init__ = vector_CvConnectedComp.__init__
vector_CvConnectedComp.__init__ = _c.__vector__init__
vector_CvConnectedComp.create = _c.__vector_create
vector_CvConnectedComp.__repr__ = _c.__vector__repr__
vector_CvConnectedComp.tolist = _c.__vector_tolist
vector_CvConnectedComp.fromlist = classmethod(_c.__vector_fromlist)
_z = vector_CvConnectedComp()
_z.resize(1)
vector_CvConnectedComp.elem_type = _z[0].__class__
del(_z)
|
<gh_stars>100-1000
## Issue related to time resolution/smoothness
# http://bulletphysics.org/mediawiki-1.5.8/index.php/Stepping_The_World
from gibson.core.physics.scene_building import SinglePlayerBuildingScene
from gibson.core.physics.scene_stadium import SinglePlayerStadiumScene
import pybullet as p
import time
import random
import zmq
import math
import argparse
import os
import json
import numpy as np
from transforms3d import euler, quaternions
from gibson.core.physics.physics_object import PhysicsObject
from gibson.core.render.profiler import Profiler
import gym, gym.spaces, gym.utils, gym.utils.seeding
import sys
import yaml
class BaseEnv(gym.Env):
"""
Base class for loading environments in a Scene.
Handles scene loading, starting physical simulation
These environments create single-player scenes and behave like normal Gym environments.
Multiplayer is not yet supported
"""
def __init__(self, config, scene_type, tracking_camera):
## Properties already instantiated from SensorEnv/CameraEnv
# @self.robot
self.gui = config["mode"] == "gui"
self.model_id = config["model_id"]
self.timestep = config["speed"]["timestep"]
self.frame_skip = config["speed"]["frameskip"]
self.resolution = config["resolution"]
self.tracking_camera = tracking_camera
self.robot = None
target_orn, target_pos = config["target_orn"], self.config["target_pos"]
initial_orn, initial_pos = config["initial_orn"], self.config["initial_pos"]
if config["display_ui"]:
#self.physicsClientId = p.connect(p.DIRECT)
self.physicsClientId = p.connect(p.GUI, "--opengl2")
p.configureDebugVisualizer(p.COV_ENABLE_GUI, 0)
elif (self.gui):
self.physicsClientId = p.connect(p.GUI, "--opengl2")
else:
self.physicsClientId = p.connect(p.DIRECT)
self.camera = Camera()
self._seed()
self._cam_dist = 3
self._cam_yaw = 0
self._cam_pitch = -30
self.scene_type = scene_type
self.scene = None
def _close(self):
p.disconnect()
def parse_config(self, config):
with open(config, 'r') as f:
config_data = yaml.load(f)
return config_data
def create_scene(self):
if self.scene is not None:
return
if self.scene_type == "stadium":
self.scene = self.create_single_player_stadium_scene()
elif self.scene_type == "building":
self.scene = self.create_single_player_building_scene()
else:
raise AssertionError()
self.robot.scene = self.scene
def create_single_player_building_scene(self):
return SinglePlayerBuildingScene(self.robot, model_id=self.model_id, gravity=9.8, timestep=self.timestep, frame_skip=self.frame_skip, env=self)
def create_single_player_stadium_scene(self):
return SinglePlayerStadiumScene(self.robot, gravity=9.8, timestep=self.timestep, frame_skip=self.frame_skip, env=self)
def configure(self, args):
self.robot.args = args
def _seed(self, seed=None):
self.np_random, seed = gym.utils.seeding.np_random(seed)
return [seed]
def _reset(self):
assert self.robot is not None, "Pleases introduce robot to environment before resetting."
p.configureDebugVisualizer(p.COV_ENABLE_GUI,0)
p.configureDebugVisualizer(p.COV_ENABLE_KEYBOARD_SHORTCUTS, 0)
p.configureDebugVisualizer(p.COV_ENABLE_MOUSE_PICKING, 1)
p.configureDebugVisualizer(p.COV_ENABLE_SHADOWS, 1)
p.configureDebugVisualizer(p.COV_ENABLE_RENDERING, 1)
self.frame = 0
self.done = 0
self.reward = 0
dump = 0
state = self.robot.reset()
self.scene.episode_restart()
return state
def _render(self, mode, close):
base_pos=[0,0,0]
if (hasattr(self,'robot')):
if (hasattr(self.robot,'body_xyz')):
base_pos = self.robot.body_xyz
view_matrix = p.computeViewMatrixFromYawPitchRoll(
cameraTargetPosition=base_pos,
distance=self._cam_dist,
yaw=self._cam_yaw,
pitch=self._cam_pitch,
roll=0,
upAxisIndex=2)
proj_matrix = p.computeProjectionMatrixFOV(
fov=60, aspect=float(self._render_width)/self._render_height,
nearVal=0.1, farVal=100.0)
(_, _, px, _, _) = p.getCameraImage(
width=self._render_width, height=self._render_height, viewMatrix=view_matrix,
projectionMatrix=proj_matrix,
renderer=p.ER_BULLET_HARDWARE_OPENGL
)
rgb_array = np.array(px).reshape((self._render_width, self._render_height, -1))
if close: return None
rgb_array = rgb_array[:, :, :3]
return rgb_array
def render_physics(self):
robot_pos, _ = p.getBasePositionAndOrientation(self.robot_tracking_id)
view_matrix = p.computeViewMatrixFromYawPitchRoll(
cameraTargetPosition=robot_pos,
distance=self.tracking_camera["distance"],
yaw=self.tracking_camera["yaw"],
pitch=self.tracking_camera["pitch"],
roll=0,
upAxisIndex=2)
proj_matrix = p.computeProjectionMatrixFOV(
fov=60, aspect=float(self._render_width)/self._render_height,
nearVal=0.1, farVal=100.0)
with Profiler("render physics: Get camera image"):
(_, _, px, _, _) = p.getCameraImage(
width=self._render_width, height=self._render_height, viewMatrix=view_matrix,
projectionMatrix=proj_matrix,
renderer=p.ER_TINY_RENDERER
)
rgb_array = np.array(px).reshape((self._render_width, self._render_height, -1))
rgb_array = rgb_array[:, :, :3]
return rgb_array
def render_map(self):
base_pos=[0, 0, -3]
if (hasattr(self,'robot')):
if (hasattr(self.robot,'body_xyz')):
base_pos[0] = self.robot.body_xyz[0]
base_pos[1] = self.robot.body_xyz[1]
view_matrix = p.computeViewMatrixFromYawPitchRoll(
cameraTargetPosition=base_pos,
distance=35,
yaw=0,
pitch=-89,
roll=0,
upAxisIndex=2)
proj_matrix = p.computeProjectionMatrixFOV(
fov=60, aspect=float(self._render_width)/self._render_height,
nearVal=0.1, farVal=100.0)
(_, _, px, _, _) = p.getCameraImage(
width=self._render_width, height=self._render_height, viewMatrix=view_matrix,
projectionMatrix=proj_matrix,
renderer=p.ER_BULLET_HARDWARE_OPENGL
)
rgb_array = np.array(px).reshape((self._render_width, self._render_height, -1))
rgb_array = rgb_array[:, :, :3]
return rgb_array
def get_action_dim(self):
return len(self.robot.ordered_joints)
def get_observation_dim(self):
return 1
def _close(self):
if (self.physicsClientId>=0):
p.disconnect(self.physicsClientId)
self.physicsClientId = -1
def set_window(self, posX, posY, sizeX, sizeY):
values = {
'name': "Robot",
'gravity': 0,
'posX': int(posX),
'posY': int(posY),
'sizeX': int(sizeX),
'sizeY': int(sizeY)
}
cmd = 'wmctrl -r \"Bullet Physics\" -e {gravity},{posX},{posY},{sizeX},{sizeY}'.format(**values)
os.system(cmd)
cmd = "xdotool search --name \"Bullet Physics\" set_window --name \"Robot's world\""
os.system(cmd)
class Camera:
def __init__(self):
pass
def move_and_look_at(self,i,j,k,x,y,z):
lookat = [x,y,z]
distance = 10
yaw = 10
|
import datajoint as dj
import pathlib
import numpy as np
import pandas as pd
import re
from datetime import datetime
from ephys_loaders import neuropixels
def get_ephys_root_data_dir():
data_dir = dj.config.get('custom', {}).get('ephys_data_dir', None)
return pathlib.Path(data_dir) if data_dir else None
def get_ephys_probe_data_dir(probe_key):
root_dir = get_ephys_root_data_dir()
subj = probe_key['subject']
probe_no = probe_key['insertion_number']
sess_date_string = probe_key['session_datetime'].strftime('%m%d%y')
dir_pattern = f'*{subj}_{sess_date_string}*_imec{probe_no}'
npx_meta_pattern = f'{subj}_{sess_date_string}*imec{probe_no}.ap.meta'
try:
npx_meta_fp = next(root_dir.rglob('/'.join([dir_pattern, npx_meta_pattern])))
except StopIteration:
return None
npx_meta = neuropixels.NeuropixelsMeta(npx_meta_fp)
# ensuring time difference between behavior-start and ephys-start is no more than 2 minutes - this is to handle multiple sessions in a day
start_time_difference = abs((npx_meta.recording_time - probe_key['session_datetime']).total_seconds())
if start_time_difference <= 120:
return npx_meta_fp.parent
ks2specs = ('mean_waveforms.npy', 'spike_times.npy') # prioritize QC output, then orig
def get_ks_data_dir(probe_key):
probe_dir = get_ephys_probe_data_dir(probe_key)
ks2spec = ks2specs[0] if len(list(probe_dir.rglob(ks2specs[0]))) > 0 else ks2specs[1]
ks2files = [f.parent for f in probe_dir.rglob(ks2spec)]
if len(ks2files) > 1:
raise ValueError('Multiple Kilosort outputs found at: {}'.format([x.as_poxis() for x in ks2files]))
return ks2files[0]
def extract_clustering_info(cluster_output_dir):
creation_time = None
phy_curation_indicators = ['Merge clusters', 'Split cluster', 'Change metadata_group']
# ---- Manual curation? ----
phylog_fp = cluster_output_dir / 'phy.log'
if phylog_fp.exists():
phylog = pd.read_fwf(phylog_fp, colspecs=[(6, 40), (41, 250)])
phylog.columns = ['meta', 'detail']
curation_row = [bool(re.match('|'.join(phy_curation_indicators), str(s))) for s in phylog.detail]
is_curated = bool(np.any(curation_row))
if creation_time is None and is_curated:
row_meta = phylog.meta[np.where(curation_row)[0].max()]
datetime_str = re.search('\d{2}-\d{2}-\d{2}\s\d{2}:\d{2}:\d{2}', row_meta)
if datetime_str:
creation_time = datetime.strptime(datetime_str.group(), '%Y-%m-%d %H:%M:%S')
else:
creation_time = datetime.fromtimestamp(phylog_fp.stat().st_ctime)
time_str = re.search('\d{2}:\d{2}:\d{2}', row_meta)
if time_str:
creation_time = datetime.combine(creation_time.date(),
datetime.strptime(time_str.group(), '%H:%M:%S').time())
else:
is_curated = False
# ---- Quality control? ----
metric_fp = cluster_output_dir / 'metrics.csv'
if metric_fp.exists():
is_qc = True
if creation_time is None:
creation_time = datetime.fromtimestamp(metric_fp.stat().st_ctime)
else:
is_qc = False
if creation_time is None:
spk_fp = next(cluster_output_dir.glob('spike_times.npy'))
creation_time = datetime.fromtimestamp(spk_fp.stat().st_ctime)
return creation_time, is_curated, is_qc
|
<reponame>manvhah/sporco<gh_stars>0
# -*- coding: utf-8 -*-
# Copyright (C) 2019 by <NAME> <<EMAIL>>
# All rights reserved. BSD 3-clause License.
# This file is part of the SPORCO package. Details of the copyright
# and user license can be found in the 'LICENSE.txt' file distributed
# with the package.
"""Interpolation and regression functions."""
from __future__ import absolute_import, division
from builtins import range
import warnings
import numpy as np
import scipy.optimize as sco
from scipy.interpolate import interp2d, griddata
from sporco._util import renamed_function
__author__ = """<NAME> <<EMAIL>>"""
def bilinear_demosaic(img):
"""Demosaicing by bilinear interpolation.
The input is assumed to be an image formed with a `colour filter
array <https://en.wikipedia.org/wiki/Color_filter_array>`__ with the
pattern
::
B G B G ...
G R G R ...
B G B G ...
G R G R ...
. . . . .
. . . . .
. . . . .
Parameters
----------
img : 2d ndarray
A 2d array representing an image formed with a colour filter array
Returns
-------
imgd : 3d ndarray
Demosaiced 3d image
"""
# Interpolate red channel
x = range(1, img.shape[1], 2)
y = range(1, img.shape[0], 2)
fi = interp2d(x, y, img[1::2, 1::2])
sr = fi(range(0, img.shape[1]), range(0, img.shape[0]))
# Interpolate green channel. We can't use `interp2d` here because
# the green channel isn't arranged in a simple grid pattern. Since
# the locations of the green samples can be represented as the union
# of two grids, we use `griddata` with an array of coordinates
# constructed by stacking the coordinates of these two grids
x0, y0 = np.mgrid[0:img.shape[0]:2, 1:img.shape[1]:2]
x1, y1 = np.mgrid[1:img.shape[0]:2, 0:img.shape[1]:2]
xy01 = np.vstack((np.hstack((x0.ravel().T, x1.ravel().T)),
np.hstack((y0.ravel().T, y1.ravel().T)))).T
z = np.hstack((img[0::2, 1::2].ravel(), img[1::2, 0::2].ravel()))
x2, y2 = np.mgrid[0:img.shape[0], 0:img.shape[1]]
xy2 = np.vstack((x2.ravel(), y2.ravel())).T
sg = griddata(xy01, z, xy2, method='linear').reshape(img.shape[0:2])
if np.isnan(sg[0, 0]):
sg[0, 0] = (sg[0, 1] + sg[1, 0]) / 2.0
if np.isnan(sg[0, -1]):
sg[0, -1] = (sg[0, -2] + sg[1, -1]) / 2.0
if np.isnan(sg[-1, 0]):
sg[-1, 0] = (sg[-2, 0] + sg[-1, 1]) / 2.0
if np.isnan(sg[-1, -1]):
sg[-1, -1] = (sg[-2, -1] + sg[-1, -2]) / 2.0
# Interpolate blue channel
x = range(0, img.shape[1], 2)
y = range(0, img.shape[0], 2)
fi = interp2d(x, y, img[0::2, 0::2])
sb = fi(range(0, img.shape[1]), range(0, img.shape[0]))
return np.dstack((sr, sg, sb))
@renamed_function(depname='lstabsdev', depmod='sporco.util')
def lstabsdev(A, b):
r"""Least absolute deviations (LAD) linear regression.
Solve the linear regression problem
.. math::
\mathrm{argmin}_\mathbf{x} \; \left\| A \mathbf{x} - \mathbf{b}
\right\|_1 \;\;.
The interface is similar to that of :func:`numpy.linalg.lstsq` in
that `np.linalg.lstsq(A, b)` solves the same linear regression
problem, but with a least squares rather than a least absolute
deviations objective. Unlike :func:`numpy.linalg.lstsq`, `b` is
required to be a 1-d array. The solution is obtained via `mapping to
a linear program <https://stats.stackexchange.com/a/12564>`__.
Parameters
----------
A : (M, N) array_like
Regression coefficient matrix
b : (M,) array_like
Regression ordinate / dependent variable
Returns
-------
x : (N,) ndarray
Least absolute deviations solution
"""
M, N = A.shape
c = np.zeros((M + N,))
c[0:M] = 1.0
I = np.identity(M)
A_ub = np.hstack((np.vstack((-I, -I)), np.vstack((-A, A))))
b_ub = np.hstack((-b, b))
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=sco.OptimizeWarning)
res = sco.linprog(c, A_ub, b_ub)
if res.success is False:
raise ValueError('scipy.optimize.linprog failed with status %d' %
res.status)
return res.x[M:]
@renamed_function(depname='lstmaxdev', depmod='sporco.util')
def lstmaxdev(A, b):
r"""Least maximum deviation (least maximum error) linear regression.
Solve the linear regression problem
.. math::
\mathrm{argmin}_\mathbf{x} \; \left\| A \mathbf{x} - \mathbf{b}
\right\|_{\infty} \;\;.
The interface is similar to that of :func:`numpy.linalg.lstsq` in
that `np.linalg.lstsq(A, b)` solves the same linear regression
problem, but with a least squares rather than a least maximum
error objective. Unlike :func:`numpy.linalg.lstsq`, `b` is required
to be a 1-d array. The solution is obtained via `mapping to a linear
program <https://stats.stackexchange.com/a/12564>`__.
Parameters
----------
A : (M, N) array_like
Regression coefficient matrix
b : (M,) array_like
Regression ordinate / dependent variable
Returns
-------
x : (N,) ndarray
Least maximum deviation solution
"""
M, N = A.shape
c = np.zeros((N + 1,))
c[0] = 1.0
one = np.ones((M, 1))
A_ub = np.hstack((np.vstack((-one, -one)), np.vstack((-A, A))))
b_ub = np.hstack((-b, b))
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=sco.OptimizeWarning)
res = sco.linprog(c, A_ub, b_ub)
if res.success is False:
raise ValueError('scipy.optimize.linprog failed with status %d' %
res.status)
return res.x[1:]
|
# Copyright 2018 The Bazel Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Sets up the "infra" and "lambda" CloudFormation stacks.
These stacks are the foundations for the remote build system.
"""
import os
import time
import json
import random
import string
import boto3
from botocore.exceptions import ClientError
import rbs.common.runfiles as runfiles
import rbs.common.aws_util as aws_util
def template_body(filename):
"""Returns the body of a cloud formation template."""
return runfiles.get_data(
"cfn/" + filename, pkg="setup", prefix="bazel_cloud_infra/rbs/local/")
class CfnStackDesc(object):
"""Represents a CloudFormation stack description."""
def __init__(self, desc):
self.desc = desc
def outputs(self):
"""Returns a dictionary of output keys to output values."""
outputs = {}
for output in self.desc["Outputs"]:
outputs[output["OutputKey"]] = output["OutputValue"]
return outputs
def tags(self):
"""Returns a dictionary of tag keys to tag values."""
tags = {}
for item in self.desc["Tags"]:
tags[item["Key"]] = item["Value"]
return tags
def complete(self):
"""Whether the stack is in "complete" mode (meaning that its state is stable)."""
return self.desc["StackStatus"].endswith("_COMPLETE")
def __getitem__(self, key):
return self.desc[key]
def __str__(self):
return "%s (status: %s)" % (self.desc["StackName"],
self.desc["StackStatus"])
class CfnStack(object):
"""Represents a CloudFormation stack."""
def __init__(self, cfn, name):
self.cfn = cfn
self.stack_name = name
def describe(self):
"""Returns a `CfnStackDesc` object to describe the stack."""
stacks = self.cfn.describe_stacks(StackName=self.stack_name)["Stacks"]
if not stacks:
raise Exception("stack '%s' could not be found" % self.stack_name)
return CfnStackDesc(stacks[0])
def update_or_create(self, **kwargs):
"""Updates or creates the stack.
If the stack has already been create, the stack is updated.
It is not an error if `update_or_create_stack` is called but the stack
is up-to-date.
"""
try:
change_set_id = self.cfn.create_change_set(
StackName=self.stack_name,
ChangeSetType='UPDATE',
ChangeSetName='%s-ChangeSet-%s' % (self.stack_name,
aws_util.random_string()),
**kwargs)["Id"]
except ClientError as e:
if "does not exist" not in e.response["Error"]["Message"]:
raise e
change_set_id = self.cfn.create_change_set(
StackName=self.stack_name,
ChangeSetType='CREATE',
ChangeSetName='%s-ChangeSet-%s' % (self.stack_name,
aws_util.random_string()),
**kwargs)["Id"]
while True:
desc = self.cfn.describe_change_set(ChangeSetName=change_set_id)
status = desc["Status"]
status_reason = desc.get("StatusReason", "<empty info>")
print "%s: change set status: %s - %s" % (self.stack_name, status,
status_reason)
if status == "CREATE_COMPLETE":
break
if status == "FAILED":
if ("The submitted information didn't contain changes." in status_reason
or "No updates are to be performed." in status_reason):
print "%s: up-to-date" % self.stack_name
return None
raise Exception("Change set in unexpected state for stack %s: %s - %s" %
(self.stack_name, status, status_reason))
time.sleep(2)
return self.cfn.execute_change_set(ChangeSetName=change_set_id)
def wait_for_stack(self):
"""Waits for a stack to be "complete"/"stable"."""
while True:
desc = self.describe()
if desc.complete():
return desc
print str(desc)
time.sleep(2)
def setup_infra(lambda_config, cfn):
"""Sets up the "infra" CloudFormation stack.
The "infra" stack represents the underlying infrastructure and requires heightened
privilege for setting up VPCs, security groups and roles.
"""
infra_stack = CfnStack(cfn, name=lambda_config["stacks"]["infra"])
vpc_keys = lambda_config["vpc"].keys()
if len(vpc_keys) != 1:
raise Exception("invalid vpc keys: %s" % vpc_keys)
parameters = [
{
"ParameterKey": "LambdaFunctionName",
"ParameterValue": lambda_config["lambda"]["function_name"],
},
{
"ParameterKey": "ServerStack",
"ParameterValue": lambda_config["stacks"]["server"],
},
{
"ParameterKey": "WorkersStack",
"ParameterValue": lambda_config["stacks"]["workers"],
},
]
if vpc_keys[0] == "new":
parameters += [
{
"ParameterKey": "VpcCIDR",
"ParameterValue": lambda_config["vpc"]["new"]["vpc_cidr"],
},
{
"ParameterKey":
"PublicSubnet1CIDR",
"ParameterValue":
lambda_config["vpc"]["new"]["public_subnet1_cidr"],
},
]
elif vpc_keys[0] == "existing":
parameters += [
{
"ParameterKey": "VpcID",
"ParameterValue": lambda_config["vpc"]["existing"]["vpc_id"],
},
{
"ParameterKey":
"PublicSubnet1ID",
"ParameterValue":
lambda_config["vpc"]["existing"]["public_subnet1_id"],
},
]
else:
raise Exception("invalid vpc type '%s'" % vpc_keys[0])
if lambda_config.get("auth") and lambda_config["auth"].get("simple"):
parameters += [
{
"ParameterKey":
"SimpleAuthS3ObjectArn",
"ParameterValue":
"arn:aws:s3:::%s/%s" % (
lambda_config["auth"]["simple"]["bucket"],
lambda_config["auth"]["simple"]["key"],
),
},
]
infra_stack.update_or_create(
TemplateBody=template_body("infra.yaml"),
Parameters=parameters,
Capabilities=["CAPABILITY_IAM"])
return infra_stack.wait_for_stack().outputs()
def get_lambda_zip():
"""Returns the content of the lambda zip."""
return runfiles.get_data(
"archive.zip", pkg="setup", prefix="bazel_cloud_infra/rbs/local/")
def setup_lambda_code(lambda_config, s3):
"""Uploads the code for the lambda function."""
return aws_util.maybe_put_s3_object(
s3,
bucket=lambda_config["lambda"]["code_bucket"],
key=lambda_config["lambda"]["code_key"],
content=get_lambda_zip(),
desc="Lambda code")
def main_setup_lambda(lambda_config, cfn, s3, lambda_role):
"""Sets up the "lambda" CloudFormation stack.
This stack depends on the "infra" CloudFormation stack and sets up the backend
for the remote build system API.
"""
code_version = setup_lambda_code(lambda_config, s3)
lambda_stack = CfnStack(cfn, name=lambda_config["stacks"]["lambda"])
lambda_stack.update_or_create(
TemplateBody=template_body("lambda.yaml"),
Parameters=[
{
"ParameterKey": "Role",
"ParameterValue": lambda_role
},
{
"ParameterKey": "CodeS3Bucket",
"ParameterValue": lambda_config["lambda"]["code_bucket"]
},
{
"ParameterKey": "CodeS3Key",
"ParameterValue": lambda_config["lambda"]["code_key"]
},
{
"ParameterKey": "CodeS3ObjectVersion",
"ParameterValue": code_version
},
{
"ParameterKey": "FunctionName",
"ParameterValue": lambda_config["lambda"]["function_name"]
},
{
"ParameterKey": "LambdaConfig",
"ParameterValue": json.dumps(lambda_config, sort_keys=True),
},
{
"ParameterKey": "Debug",
"ParameterValue": os.getenv("INFRA_DEBUG", "false"),
},
])
return lambda_stack.wait_for_stack().outputs()
def infra_endpoint(restapi_id, region, stage):
"""Returns the API endpoint for the remote build system."""
return "https://{restapi_id}.execute-api.{region}.amazonaws.com/{stage}/ControlBuildInfra".format(
restapi_id=restapi_id, region=region, stage=stage)
def maybe_create_log_group(log_group, logs):
"""Creates the given log group if it does not already exists."""
try:
logs.create_log_group(logGroupName=log_group)
except ClientError as e:
if "The specified log group already exists" not in e.response["Error"][
"Message"]:
raise e
print "Log group %s: already exists" % log_group
def setup(lambda_config):
"""Sets up the "infra" and "lambda" stacks.
These stacks are the foundations for the remote build system.
"""
cfn = boto3.client('cloudformation', region_name=lambda_config["region"])
s3 = boto3.client('s3', region_name=lambda_config["region"])
logs = boto3.client('logs', region_name=lambda_config["region"])
maybe_create_log_group(lambda_config["awslogs_group"], logs)
infra_stack_outputs = setup_infra(lambda_config, cfn)
lambda_role = infra_stack_outputs["LambdaRole"]
next_lambda_config = {}
next_lambda_config.update(lambda_config)
next_lambda_config.update({
"cluster": infra_stack_outputs["ClusterName"],
})
lambda_stack_outputs = main_setup_lambda(next_lambda_config, cfn, s3,
lambda_role)
next_lambda_config.update({
"infra_endpoint":
infra_endpoint(
restapi_id=lambda_stack_outputs["RestapiId"],
region=lambda_config["region"],
stage="Prod"),
})
return next_lambda_config
def teardown(lambda_config, cfn=None):
"""Tears down all the stacks associated with the remote build system.
The remote configuration file is left intact.
"""
cfn = cfn or boto3.client(
'cloudformation', region_name=lambda_config["region"])
# TODO: parallel delete
err = False
for stack in ["workers", "server", "lambda", "infra"]:
while True:
try:
cfn.delete_stack(StackName=lambda_config["stacks"][stack])
break
except ClientError as e:
print e
if "cannot be deleted while in status" not in e.response["Error"][
"Message"]:
err = True
break
time.sleep(2)
next_lambda_config = {}
next_lambda_config.update(lambda_config)
del next_lambda_config["infra_endpoint"]
del next_lambda_config["cluster"]
return (next_lambda_config, err)
|
<filename>mian/analysis/linear_regression.py<gh_stars>1-10
# ===========================================
#
# mian Analysis Data Mining/ML Library
# @author: tbj128
#
# ===========================================
#
# Imports
#
import pandas as pd
from sklearn.impute import SimpleImputer
from sklearn.linear_model import ElasticNet, SGDRegressor
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split, KFold, StratifiedKFold
from mian.model.otu_table import OTUTable
import numpy as np
import random
class LinearRegression(object):
def run(self, user_request):
table = OTUTable(user_request.user_id, user_request.pid)
otu_table, headers, sample_labels = table.get_table_after_filtering_and_aggregation_and_low_count_exclusion(user_request)
expvar = user_request.get_custom_attr("expvar")
metadata_vals = table.get_sample_metadata().get_metadata_column_table_order(sample_labels, expvar)
return self.analyse(user_request, otu_table, headers, metadata_vals)
def analyse(self, user_request, otu_table, headers, metadata_vals):
cross_validate_set = user_request.get_custom_attr("crossValidate")
cross_validate_folds = int(user_request.get_custom_attr("crossValidateFolds"))
fix_training = user_request.get_custom_attr("fixTraining")
training_proportion = user_request.get_custom_attr("trainingProportion")
seed = int(user_request.get_custom_attr("seed")) if user_request.get_custom_attr("seed") is not "" else random.randint(0, 100000)
mixing_ratio = float(user_request.get_custom_attr("mixingRatio"))
max_iterations = int(user_request.get_custom_attr("maxIterations"))
if int(user_request.level) == -1:
# OTU tables are returned as a CSR matrix
X = pd.DataFrame.sparse.from_spmatrix(otu_table, columns=headers, index=range(otu_table.shape[0]))
else:
X = pd.DataFrame(otu_table, columns=headers, index=range(otu_table.shape[0]))
Y = np.array(metadata_vals)
def performCrossValidationForAUC(X_cv, metadata_vals_cv, Y_cv):
cv = KFold(n_splits=cross_validate_folds, shuffle=True, random_state=seed)
classifier = ElasticNet(l1_ratio=mixing_ratio, max_iter=max_iterations)
test_maes = []
test_mses = []
for i, (train, test) in enumerate(cv.split(X_cv, metadata_vals_cv)):
classifier.fit(X_cv[X_cv.index.isin(train)], Y_cv[train])
preds = classifier.predict(X_cv[X_cv.index.isin(test)])
test_mae = mean_absolute_error(Y_cv[test].astype(float), preds)
test_mse = mean_squared_error(Y_cv[test].astype(float), preds)
test_maes.append(test_mae)
test_mses.append(test_mse)
cv_obj = {
"cv_mae": np.array(test_maes).mean(),
"cv_mae_std": np.array(test_maes).std(),
"cv_mse": np.array(test_mses).mean(),
"cv_mse_std": np.array(test_mses).std()
}
return cv_obj
if cross_validate_set == "full":
cv_obj = performCrossValidationForAUC(X, metadata_vals, Y)
return {
"cv_mae": round(cv_obj["cv_mae"], 2),
"cv_mae_std": round(cv_obj["cv_mae_std"], 2),
"cv_mse": round(cv_obj["cv_mse"], 2),
"cv_mse_std": round(cv_obj["cv_mse_std"], 2)
}
else:
if fix_training == "yes":
X_train, X_test, y_train, y_test = train_test_split(X, Y, train_size=training_proportion, random_state=seed)
X_val, X_test, y_val, y_test = train_test_split(X_test, y_test, train_size=0.5, random_state=seed)
else:
# Use a random seed each time (not recommended)
X_train, X_test, y_train, y_test = train_test_split(X, Y, train_size=training_proportion)
X_val, X_test, y_val, y_test = train_test_split(X_test, y_test, train_size=0.5)
imp_mean = SimpleImputer(missing_values=np.nan, strategy='mean')
X_train = imp_mean.fit_transform(X_train)
X_val = imp_mean.transform(X_val)
X_test = imp_mean.transform(X_test)
classifier = ElasticNet(l1_ratio=mixing_ratio, fit_intercept=True, max_iter=max_iterations)
classifier.fit(X_train, y_train)
preds_train = classifier.predict(X_train)
train_mae = mean_absolute_error(y_train.astype(float), preds_train)
train_mse = mean_squared_error(y_train.astype(float), preds_train)
preds_val = classifier.predict(X_val)
val_mae = mean_absolute_error(y_val.astype(float), preds_val)
val_mse = mean_squared_error(y_val.astype(float), preds_val)
preds_test = classifier.predict(X_test)
test_mae = mean_absolute_error(y_test.astype(float), preds_test)
test_mse = mean_squared_error(y_test.astype(float), preds_test)
abundances_obj = {
"train_mae": round(train_mae, 2),
"train_mse": round(train_mse, 2),
"val_mae": round(val_mae, 2),
"val_mse": round(val_mse, 2),
"test_mae": round(test_mae, 2),
"test_mse": round(test_mse, 2),
"train_size": X_train.shape,
"val_size": X_val.shape,
"test_size": X_test.shape,
"seed": seed
}
return abundances_obj
|
#!/usr/bin/env python
# coding: utf-8
# # JupyterDash
# The `jupyter-dash` package makes it easy to develop Plotly Dash apps from the Jupyter Notebook and JupyterLab.
#
# Just replace the standard `dash.Dash` class with the `jupyter_dash.JupyterDash` subclass.
# In[21]:
port = 8050
# In[22]:
# in case we use Colab
#!pip install jupyter-dash
from jupyter_dash import JupyterDash
# In[23]:
import dash
import dash_core_components as dcc
import dash_html_components as html
import pandas as pd
# In[24]:
import plotly.graph_objs as go
from dash.dependencies import Input, Output
# In[25]:
import dash_bootstrap_components as dbc
# When running in JupyterHub or Binder, call the `infer_jupyter_config` function to detect the proxy configuration.
# In[26]:
#JupyterDash.infer_jupyter_proxy_config()
# Construct the app
# In[27]:
#external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
external_stylesheets=[dbc.themes.BOOTSTRAP]
app = JupyterDash(__name__, external_stylesheets=external_stylesheets)
# Create server variable with Flask server object for use with gunicorn
server = app.server
# In[28]:
colors = {
'background': 'grey',
'text': 'blue'
}
# Load and preprocess data
# In[29]:
df = pd.read_csv('https://plotly.github.io/datasets/country_indicators.csv')
#df
available_indicators = df[['Indicator Name']].copy()
available_indicators = available_indicators.drop_duplicates()
available_indicators
# In[30]:
ii = [{'label': i, 'value': i} for i in available_indicators]
ii
# In[31]:
available_indicators = df[['Indicator Name']].copy()
available_indicators = available_indicators.drop_duplicates()
available_indicators['label'] = available_indicators['Indicator Name']
available_indicators['value'] = available_indicators['Indicator Name']
#value_default = 'Fertility rate, total (births per woman)'
#menu_id_var = 'crossfilter-xaxis-column'
#available_indicators = available_indicators.drop(['Indicator Name'], axis=1)
available_indicators_dict = available_indicators[['label','value']].to_dict('records')
available_indicators_dict
# In[32]:
linearorlog_dict = [{'label': i, 'value': i} for i in ['Linear', 'Log']]
linearorlog_dict
# Set up layout
# from layout.my_layout import app_layout
# In[33]:
slider_row = dbc.Row(dbc.Col(html.Div(dcc.Slider(
id='crossfilter-year--slider',
min=df['Year'].min(),
max=df['Year'].max(),
value=df['Year'].max(),
marks={str(year): str(year) for year in df['Year'].unique()},
step=None
)),style={'backgroundColor': colors['background']},width=6))
#app.layout = slider_row
#app.run_server(mode="inline")
# In[34]:
top_menu_row = dbc.Container([
dbc.Row([
dbc.Col(html.Div(
dcc.Dropdown(
id='crossfilter-xaxis-column',
options=available_indicators_dict,
value='Fertility rate, total (births per woman)'
)),width=6),
dbc.Col(html.Div(
dcc.Dropdown(
id='crossfilter-yaxis-column',
options=available_indicators_dict,
value='Life expectancy at birth, total (years)'
)),width=6)]),
dbc.Row([
dbc.Col(html.Div(dcc.RadioItems(
id='crossfilter-xaxis-type',
options=linearorlog_dict,
value='Linear',
labelStyle={'display': 'inline-block'}
)),width=6),
dbc.Col(html.Div(dcc.RadioItems(
id='crossfilter-yaxis-type',
options=linearorlog_dict,
value='Linear',
labelStyle={'display': 'inline-block'}
)),width=6)])
],style={'backgroundColor': colors['background']}, fluid=True)
#app.layout = top_menu_row
#app.run_server(mode="inline")
# In[35]:
graph_layout = dbc.Container([dbc.Row([
dbc.Col([
dcc.Graph(
id='crossfilter-indicator-scatter',
hoverData={'points': [{'customdata': 'Japan'}]}
)
]),
dbc.Col([
dcc.Graph(id='x-time-series'),
dcc.Graph(id='y-time-series'),
])])],style={'backgroundColor': colors['background']}, fluid=True)
#app.layout = graph_layout
#app.run_server(mode="inline")
# In[36]:
app_layout = dbc.Container([
top_menu_row,
graph_layout,
slider_row
],style={'backgroundColor': colors['background']},fluid=True)
# In[37]:
app.layout = app_layout
# In[38]:
def create_time_series(dff, axis_type, title):
return {
'data': [dict(
x=dff['Year'],
y=dff['Value'],
mode='lines+markers'
)],
'layout': {
'height': 225,
'margin': {'l': 20, 'b': 30, 'r': 10, 't': 10},
'annotations': [{
'x': 0, 'y': 0.85, 'xanchor': 'left', 'yanchor': 'bottom',
'xref': 'paper', 'yref': 'paper', 'showarrow': False,
'align': 'left', 'bgcolor': 'rgba(255, 255, 255, 0.5)',
'text': title
}],
'yaxis': {'type': 'linear' if axis_type == 'Linear' else 'log'},
'xaxis': {'showgrid': False}
}
}
# In[39]:
@app.callback(
dash.dependencies.Output('crossfilter-indicator-scatter', 'figure'),
[dash.dependencies.Input('crossfilter-xaxis-column', 'value'),
dash.dependencies.Input('crossfilter-yaxis-column', 'value'),
dash.dependencies.Input('crossfilter-xaxis-type', 'value'),
dash.dependencies.Input('crossfilter-yaxis-type', 'value'),
dash.dependencies.Input('crossfilter-year--slider', 'value')])
def update_graph(xaxis_column_name, yaxis_column_name,
xaxis_type, yaxis_type,
year_value):
dff = df[df['Year'] == year_value]
return {
'data': [dict(
x=dff[dff['Indicator Name'] == xaxis_column_name]['Value'],
y=dff[dff['Indicator Name'] == yaxis_column_name]['Value'],
text=dff[dff['Indicator Name'] == yaxis_column_name]['Country Name'],
customdata=dff[dff['Indicator Name'] == yaxis_column_name]['Country Name'],
mode='markers',
marker={
'size': 25,
'opacity': 0.7,
'color': 'orange',
'line': {'width': 2, 'color': 'purple'}
}
)],
'layout': dict(
xaxis={
'title': xaxis_column_name,
'color' : 'red',
'type': 'linear' if xaxis_type == 'Linear' else 'log'
},
yaxis={
'title': yaxis_column_name,
'color' : 'pink',
'type': 'linear' if yaxis_type == 'Linear' else 'log'
},
margin={'l': 40, 'b': 30, 't': 10, 'r': 0},
height=450,
hovermode='closest',
paper_bgcolor='blue',
plot_bgcolor='green'
)
}
@app.callback(
dash.dependencies.Output('x-time-series', 'figure'),
[dash.dependencies.Input('crossfilter-indicator-scatter', 'hoverData'),
dash.dependencies.Input('crossfilter-xaxis-column', 'value'),
dash.dependencies.Input('crossfilter-xaxis-type', 'value')])
def update_y_timeseries(hoverData, xaxis_column_name, axis_type):
country_name = hoverData['points'][0]['customdata']
dff = df[df['Country Name'] == country_name]
dff = dff[dff['Indicator Name'] == xaxis_column_name]
title = '<b>{}</b><br>{}'.format(country_name, xaxis_column_name)
return create_time_series(dff, axis_type, title)
@app.callback(
dash.dependencies.Output('y-time-series', 'figure'),
[dash.dependencies.Input('crossfilter-indicator-scatter', 'hoverData'),
dash.dependencies.Input('crossfilter-yaxis-column', 'value'),
dash.dependencies.Input('crossfilter-yaxis-type', 'value')])
def update_x_timeseries(hoverData, yaxis_column_name, axis_type):
dff = df[df['Country Name'] == hoverData['points'][0]['customdata']]
dff = dff[dff['Indicator Name'] == yaxis_column_name]
return create_time_series(dff, axis_type, yaxis_column_name)
# Serve the app using `run_server`. Unlike the standard `Dash.run_server` method, the `JupyterDash.run_server` method doesn't block execution of the notebook. It serves the app in a background thread, making it possible to run other notebook calculations while the app is running.
#
# This makes it possible to iterativly update the app without rerunning the potentially expensive data processing steps.
# In[40]:
app.run_server(debug=True,port=port,mode='external')
# By default, `run_server` displays a URL that you can click on to open the app in a browser tab. The `mode` argument to `run_server` can be used to change this behavior. Setting `mode="inline"` will display the app directly in the notebook output cell.
# In[41]:
#app.run_server(mode="inline")
# When running in JupyterLab, with the `jupyterlab-dash` extension, setting `mode="jupyterlab"` will open the app in a tab in JupyterLab.
#
# ```python
# app.run_server(mode="jupyterlab")
# ```
# In[ ]:
|
<gh_stars>1-10
__author__ = 'jrx'
import numpy as np
from encoder.bit_density import pad_bit_array, convert_to_bit_density, convert_from_bit_density
from encoder.constants import BITS_PER_BYTE, BYTES_PER_UINT64
from encoder.utilities import add_length_info, strip_length_info
class XorEncoding:
def __init__(self, block_size, intensity):
self.bits_per_block = block_size
self.block_size = 2 ** self.bits_per_block
self.intensity = intensity
self.block_template = np.arange(self.block_size, dtype=np.uint64)
def data_capacity(self, img_size):
""" Return data capacity for given algorithm and image -- in bytes """
bits_in_image = img_size[0] * img_size[1] * img_size[2] * self.intensity
number_of_blocks = bits_in_image // self.block_size
return number_of_blocks * self.bits_per_block // BITS_PER_BYTE - BYTES_PER_UINT64
def _pack_data(self, data):
"""
:param data: 1-D uint8 raw data to pack
:return: 1-D uint64 data packed
"""
length_data = add_length_info(data)
return convert_to_bit_density(length_data, self.bits_per_block) # DATA TO ENCODE
def _unpack_data(self, packed_data):
"""
:param packed_data: 1-D uint64 packed data
:return: 1-D uint8 array unpacked
"""
payload_8bit = convert_from_bit_density(packed_data, self.bits_per_block)
return strip_length_info(payload_8bit)
def _encoding_algorithm(self, image_bits, packed_data):
"""
Encode data in image bit data
:param image_bits: flat 1-D uint8 bit array of bits in the image
:param packed_data: flat 1-D uint64 array of packed data to encode
:return: encoded 1-D uint8 bit array of bits in the image
"""
padded_image_bits = pad_bit_array(image_bits, self.block_size)
block_bits = padded_image_bits.reshape((padded_image_bits.shape[0] // self.block_size, self.block_size))
current_data = np.bitwise_xor.reduce(block_bits * self.block_template, axis=1)
original_payload_length = packed_data.shape[0]
assert len(current_data) >= len(packed_data), "Image does not have enough capacity to save this data"
# Bits to change
diff = np.bitwise_xor(current_data[:original_payload_length], packed_data)
block_bits[np.arange(original_payload_length), diff] = 1 - block_bits[np.arange(original_payload_length), diff]
return block_bits.flatten()[:image_bits.shape[0]]
def _decoding_algorithm(self, image_bits):
"""
Decoda data from image bit data
:param image_bits: A 1-d uint8 bit array of the image
:return: decoded packed 1-D uint64 data from the image
"""
padded_image_bits = pad_bit_array(image_bits, self.block_size)
block_bits = padded_image_bits.reshape((padded_image_bits.shape[0] // self.block_size, self.block_size))
return np.bitwise_xor.reduce(block_bits * self.block_template, axis=1)
def encode(self, img_data, payload):
"""
Encode image data
:param img_data: w x h x 3 uint8 numpy array of image colors
:param payload: 1-D uint8 raw payload data to encode
:return: encoded w x h x 3 uint8 numpy array of image colors
"""
""" Encode image data """
packed_payload = self._pack_data(payload)
# Prepare image data
shape = img_data.shape
initial_img_length = shape[0] * shape[1] * shape[2]
raw_data = img_data.flatten()
raw_bits = np.unpackbits(raw_data).reshape(initial_img_length, BITS_PER_BYTE)
selected_raw_bits = raw_bits[:, -self.intensity:].flatten()
# Actual encoding part
output_bits = self._encoding_algorithm(selected_raw_bits, packed_payload)
output_bits = output_bits.reshape(initial_img_length, self.intensity)
raw_bits[:, -self.intensity:] = output_bits
# Pack the result again
return np.packbits(raw_bits).reshape(shape)
def decode(self, img_data):
"""
Decode image data
:param img_data: w x h x 3 unit8 numpy array of image colors
:return: 1-d uint8 data encoded in the image
"""
# Prepare image data
shape = img_data.shape
initial_img_length = shape[0] * shape[1] * shape[2]
raw_data = img_data.flatten()
raw_bits = np.unpackbits(raw_data).reshape(initial_img_length, BITS_PER_BYTE)
selected_raw_bits = raw_bits[:, -self.intensity:].flatten()
packed_payload = self._decoding_algorithm(selected_raw_bits)
return self._unpack_data(packed_payload)
|
<reponame>hvsuchitra/tv_tracker<gh_stars>0
import sys
import re
from pathlib import Path
from PyQt5.QtCore import QBasicTimer, QThread, pyqtSignal, QRegExp, Qt, QSize, QTimeLine
from PyQt5.QtWidgets import QApplication, QWidget, QToolTip, QPushButton, QGroupBox, QHBoxLayout, QGridLayout, \
QFileDialog, QMainWindow, QLineEdit, QLabel, QTableView, QTabWidget, QRadioButton, QFrame, QSystemTrayIcon, QStyle, \
QMenu, QSpacerItem, QSizePolicy, QMessageBox, QDialog, QScrollArea, QFrame, QVBoxLayout, QStackedWidget
from PyQt5 import QtCore, QtGui
from PyQt5.QtGui import QIcon, QFont, QRegExpValidator, QPalette, QPixmap, QImage, QBrush, QPainter
sys.path.append('../../')
from common.utils.api_utils import search_show, get_image, resource_base_url
from common.utils.utils import get_path, get_binary, make_dark, StackedWidget
class GetImageThread(QThread):
signal = pyqtSignal('PyQt_PyObject')
def __init__(self, img_url, stack, show_frame, widget):
super().__init__()
self.img_url = img_url
self.stack = stack
self.show_frame = show_frame
self.widget = widget
def run(self):
print('running')
img = get_image(self.img_url)
pallete = QPalette()
label = QLabel(self.widget)
label.setPixmap(QPixmap.fromImage((QImage.fromData(img)).scaled(680 / 2, 1000 / 2, Qt.KeepAspectRatio)))
self.stack.addWidget(label)
self.widget.setAutoFillBackground(True)
pallete.setBrush(QPalette.Background, QBrush(
QPixmap.fromImage(make_dark(QImage.fromData(img), 160)).scaled(680 / 2, 1000 / 2, Qt.KeepAspectRatio)))
self.show_frame.setPalette(pallete)
class Search(QMainWindow):
def __init__(self, main_window_pos, cur_user):
super().__init__()
self.cur_user = cur_user
self.stack_widgets = []
self.main_window_pos = main_window_pos
QToolTip.setFont(QFont('SansSerif', 10))
self.setGeometry(self.main_window_pos.x(), (self.main_window_pos.y() + (self.main_window_pos.y() // 6)), 360,
480)
self.setWindowTitle('Search')
self.search_widget = QWidget()
self.grid = QGridLayout()
self.grid.setSpacing(10)
self.grid.setAlignment(Qt.AlignTop)
self.search_text_field = QLineEdit(self)
self.search_text_field.setPlaceholderText('Enter the name of a TV show you want to track')
self.search_text_field.returnPressed.connect(self.display)
self.grid.addWidget(self.search_text_field, 0, 0, 1, 3)
self.search_widget.setLayout(self.grid)
self.scroll_area = QScrollArea(self)
self.scroll_area.setWidget(self.search_widget)
self.setCentralWidget(self.scroll_area)
self.scroll_area.setWidgetResizable(True)
self.search_text_field.setText('the mick')
# self.showFullScreen()
# self.show()
def display(self):
self.setGeometry(180, 25, 1068, 1088)
for show_frame in self.stack_widgets:
self.grid.removeWidget(show_frame)
show_frame.setParent(None)
self.stack_widgets = []
row = 0
col = -1
for pos_idx, show in enumerate(search_show(self.search_text_field.text().strip())):
stack = StackedWidget(str(show['id']))
show_frame = QFrame(self)
show_frame.setFrameShape(QFrame.StyledPanel)
show_frame.setFixedSize(680 // 2, 1000 // 2)
self.stack_widgets.append(stack)
stack.addWidget(show_frame)
v_box = QVBoxLayout()
####
scroll_area = QScrollArea()
scroll_area.setWidget(show_frame)
# scroll_area.setWidgetResizable(True)
###
# get_image_thread=GetImageThread(show['image'],stack,show_frame,self)
# get_image_thread.start()
# show['image']=get_image_thread.img
show['image'] = get_image(show['image']) # this is an expensive line
for key, val in show.items():
if key == 'image':
pallete = QPalette()
label = QLabel(self)
label.setPixmap(QPixmap.fromImage(
(QImage.fromData(show['image'])).scaled(680 / 2, 1000 / 2, Qt.KeepAspectRatio)))
stack.addWidget(label)
self.setAutoFillBackground(True)
pallete.setBrush(QPalette.Background, QBrush(
QPixmap.fromImage(make_dark(QImage.fromData(val), 160)).scaled(680 / 2, 1000 / 2,
Qt.KeepAspectRatio)))
show_frame.setPalette(pallete)
# t.setPixmap(QPixmap.fromImage((QImage.fromData(val))).scaled(680/2,1000/2,Qt.KeepAspectRatio))
if key == 'id':
...
# add to database?
else:
val_label = QLabel(self)
if key == 'seriesName':
val_label.setStyleSheet('font-family:Apple Chancery;font-size:30px;color:#f07192')
val_label.setText(val)
elif key == 'status':
if val == 'Ended':
val_label.setStyleSheet('font-size:15px;color:#e63749')
elif val == 'Continuing':
val_label.setStyleSheet('font-size:15px;color:#6dfc93')
else:
val_label.setStyleSheet('font-size:15px;color:#48f0ad')
val_label.setText(f'Status : {val}')
elif key == 'overview':
if val != 'Not Available' and len(val) > 500:
val = val[:500] + '...'
val_label.setStyleSheet('font-size:15px;color:white')
val_label.setText(val)
else:
if key == 'network':
val_label.setText(f'Network : {val}')
elif key == 'firstAired':
val_label.setText(f'First Aired : {val}')
val_label.setStyleSheet('font-size:15px;color:white')
val_label.setWordWrap(True)
val_label.setAlignment(Qt.AlignCenter)
v_box.addWidget(val_label)
show_frame.setLayout(v_box)
stack.addWidget(show_frame)
row, col = (row + 1, 0) if not pos_idx % 3 else (row, col + 1)
# stack.installEventFilter(self)
stack.clicked.connect(lambda: print(self.sender().objectName()))
self.grid.addWidget(stack, row, col, 1, 1, Qt.AlignCenter)
|
import os
import sys
import grequests
import logfetch_base
import time
from termcolor import colored
import callbacks
TASK_FORMAT = '/task/{0}'
S3LOGS_URI_FORMAT = '{0}/logs{1}'
REQUEST_FORMAT = '/request/{0}?excludeMetadata=true'
FILE_REGEX="\d{13}-([^-]*)-\d{8,20}\.gz"
progress = 0
goal = 0
def download_s3_logs(args):
if not args.silent:
sys.stderr.write(colored('Checking for S3 log files', 'cyan') + '\n')
callbacks.progress = 0
logs = logs_for_all_requests(args)
async_requests = []
all_logs = []
for log_file in logs:
filename = log_file['key'].rsplit("/", 1)[1]
if log_file_in_date_range(args, log_file):
if not args.logtype or log_matches(args, filename):
logfetch_base.log(colored('Including log {0}'.format(filename), 'blue') + '\n', args, True)
if not already_downloaded(args.dest, filename):
async_requests.append(
grequests.AsyncRequest('GET', log_file['getUrl'], callback=callbacks.generate_callback(log_file['getUrl'], args.dest, filename, args.chunk_size, args.verbose, args.silent), headers=args.headers)
)
else:
logfetch_base.log(colored('Log already downloaded {0}'.format(filename), 'blue') + '\n', args, True)
all_logs.append('{0}/{1}'.format(args.dest, filename))
else:
logfetch_base.log(colored('Excluding {0} log does not match logtype argument {1}'.format(filename, args.logtype), 'magenta') + '\n', args, True)
else:
logfetch_base.log(colored('Excluding {0}, not in date range'.format(filename), 'magenta') + '\n', args, True)
if async_requests:
logfetch_base.log(colored('Starting {0} S3 Downloads with {1} parallel fetches\n'.format(len(async_requests), args.num_parallel_fetches), 'cyan'), args, False)
callbacks.goal = len(async_requests)
grequests.map(async_requests, stream=True, size=args.num_parallel_fetches)
else:
logfetch_base.log(colored('No S3 logs to download\n', 'cyan'), args, False)
logfetch_base.log(colored('All S3 logs up to date\n', 'cyan'), args, False)
all_logs = modify_download_list(all_logs)
return all_logs
def log_file_in_date_range(args, log_file):
if 'startTime' in log_file:
if 'endTime' in log_file:
return logfetch_base.date_range_overlaps(args, int(str(log_file['startTime'])[0:-3]), int(str(log_file['endTime'])[0:-3]))
else:
return logfetch_base.date_range_overlaps(args, int(str(log_file['startTime'])[0:-3]), int(str(log_file['lastModified'])[0:-3]))
elif 'endTime' in log_file:
return logfetch_base.is_in_date_range(args, int(str(log_file['endTime'])[0:-3]))
else:
return logfetch_base.is_in_date_range(args, int(str(log_file['lastModified'])[0:-3]))
def modify_download_list(all_logs):
for index, log in enumerate(all_logs):
if log.endswith('.gz') and not os.path.isfile(log) and os.path.isfile(log[:-3]):
all_logs[index] = log[:-3]
return all_logs
def already_downloaded(dest, filename):
return (os.path.isfile('{0}/{1}'.format(dest, filename.replace('.gz', '.log'))) or os.path.isfile('{0}/{1}'.format(dest, filename[:-3])) or os.path.isfile('{0}/{1}'.format(dest, filename)))
def logs_for_all_requests(args):
s3_params = {'start': int(time.mktime(args.start.timetuple()) * 1000), 'end': int(time.mktime(args.end.timetuple()) * 1000)}
if args.taskId:
return logfetch_base.get_json_response(s3_task_logs_uri(args, args.taskId), args, s3_params)
else:
tasks = logfetch_base.tasks_for_requests(args)
logs = []
tasks_progress = 0
tasks_goal = len(tasks)
for task in tasks:
s3_logs = logfetch_base.get_json_response(s3_task_logs_uri(args, task), args)
logs = logs + s3_logs if s3_logs else logs
tasks_progress += 1
logfetch_base.update_progress_bar(tasks_progress, tasks_goal, 'S3 Log Finder', args.silent or args.verbose)
found_logs = []
keys = []
for log in logs:
if not log['key'] in keys:
found_logs.append(log)
keys.append(log['key'])
logfetch_base.log(colored('\nAlso searching s3 history...\n', 'cyan'), args, False)
for request in logfetch_base.all_requests(args):
s3_logs = logfetch_base.get_json_response(s3_request_logs_uri(args, request), args, s3_params)
logs = logs + s3_logs if s3_logs else logs
for log in logs:
if not log['key'] in keys:
found_logs.append(log)
keys.append(log['key'])
return found_logs
def s3_task_logs_uri(args, idString):
return S3LOGS_URI_FORMAT.format(logfetch_base.base_uri(args), TASK_FORMAT.format(idString))
def s3_request_logs_uri(args, idString):
return S3LOGS_URI_FORMAT.format(logfetch_base.base_uri(args), REQUEST_FORMAT.format(idString))
def log_matches(args, filename):
if 'filename' in args.file_pattern:
return logfetch_base.log_matches(filename, '*{0}*'.format(args.logtype.replace('logs/', '')))
else:
sys.stderr.write(colored('Cannot match on log file names for s3 logs when filename is not in s3 pattern', 'red'))
if args.no_name_fetch_off:
sys.stderr.write(colored('Will not fetch any s3 logs beacuse --no-name-fetch-off is set, remove this setting to fetch all for this case instead', 'red'))
return False
else:
sys.stderr.write(colored('Will fetch all s3 logs, set --no-name-fetch-off to skip s3 logs instead for this case', 'red'))
return True
|
<reponame>C-BAND/jina
import os
import copy
import asyncio
import argparse
from typing import Union
from ....enums import SocketType
from ...zmq import send_ctrl_message
from ....jaml.helper import complete_path
from ....importer import ImportExtensions
from ....enums import replace_enum_to_str
from ..zmq.asyncio import AsyncNewLoopRuntime
from ....excepts import (
DaemonConnectivityError,
DaemonPeaCreationFailed,
DaemonWorkspaceCreationFailed,
)
if False:
import multiprocessing
import threading
from ....logging.logger import JinaLogger
class JinadRuntime(AsyncNewLoopRuntime):
"""Runtime procedure for JinaD."""
def __init__(
self,
args: 'argparse.Namespace',
**kwargs,
):
super().__init__(args, **kwargs)
# Need the `proper` control address to send `activate` and `deactivate` signals, from the pea in the `main`
# process.
self.ctrl_addr = self.get_control_address(args.host, args.port_ctrl)
self.timeout_ctrl = args.timeout_ctrl
self.host = args.host
self.port_expose = args.port_expose
async def async_setup(self):
"""Create Workspace, Pea on remote JinaD server"""
with ImportExtensions(required=True):
# rich & aiohttp are used in `JinaDClient`
import rich
import aiohttp
from daemon.clients import AsyncJinaDClient
assert rich
assert aiohttp
# NOTE: args.timeout_ready is always set to -1 for JinadRuntime so that wait_for_success doesn't fail in Pea,
# so it can't be used for Client timeout.
self.client = AsyncJinaDClient(
host=self.args.host, port=self.args.port_expose, logger=self.logger
)
if not await self.client.alive:
raise DaemonConnectivityError
# Create a remote workspace with upload_files
self.workspace_id = await self.client.workspaces.create(
paths=self.args.upload_files,
id=self.args.workspace_id,
complete=True,
)
if not self.workspace_id:
self.logger.critical(f'remote workspace creation failed')
raise DaemonWorkspaceCreationFailed
payload = replace_enum_to_str(vars(self._mask_args(self.args)))
# Create a remote Pea in the above workspace
success, self.pea_id = await self.client.peas.create(
workspace_id=self.workspace_id, payload=payload
)
if not success:
self.logger.critical(f'remote pea creation failed')
raise DaemonPeaCreationFailed
async def _wait_for_cancel(self):
while not self.is_cancel.is_set():
await asyncio.sleep(0.1)
await self.async_cancel()
send_ctrl_message(self.ctrl_addr, 'TERMINATE', self.timeout_ctrl)
async def async_run_forever(self):
"""
Streams log messages using websocket from remote server
"""
self.logstream = asyncio.create_task(
self._sleep_forever()
if self.args.quiet_remote_logs
else self.client.logs(id=self.pea_id)
)
async def async_cancel(self):
"""Cancels the logstream task, removes the remote Pea & Workspace"""
self.logstream.cancel()
await self.client.peas.delete(id=self.pea_id)
# NOTE: don't fail if workspace deletion fails here
await self.client.workspaces.delete(id=self.workspace_id)
async def _sleep_forever(self):
"""Sleep forever, no prince will come."""
await asyncio.sleep(1e10)
def _mask_args(self, args: 'argparse.Namespace'):
_args = copy.deepcopy(args)
# reset the runtime to ZEDRuntime or ContainerRuntime
if _args.runtime_cls == 'JinadRuntime':
# TODO: add jinahub:// and jinahub+docker:// scheme here
if _args.uses.startswith('docker://'):
_args.runtime_cls = 'ContainerRuntime'
else:
_args.runtime_cls = 'ZEDRuntime'
# TODO:/NOTE this prevents jumping from remote to another remote (Han: 2021.1.17)
# _args.host = __default_host__
# host resetting disables dynamic routing. Use `disable_remote` instead
_args.disable_remote = True
# NOTE: on remote relative filepaths should be converted to filename only
def basename(field):
if field and not field.startswith('docker://'):
try:
return os.path.basename(complete_path(field))
except FileNotFoundError:
pass
return field
for f in ('uses', 'uses_after', 'uses_before', 'py_modules'):
attr = getattr(_args, f, None)
if not attr:
continue
setattr(_args, f, [basename(m) for m in attr]) if isinstance(
attr, list
) else setattr(_args, f, basename(attr))
_args.log_config = '' # do not use local log_config
_args.upload_files = [] # reset upload files
_args.noblock_on_start = False # wait until start success
changes = []
for k, v in vars(_args).items():
if v != getattr(args, k):
changes.append(f'{k:>30s}: {str(getattr(args, k)):30s} -> {str(v):30s}')
if changes:
changes = [
'note the following arguments have been masked or altered for remote purpose:'
] + changes
self.logger.debug('\n'.join(changes))
return _args
# Static methods used by the Pea to communicate with the `Runtime` in the separate process
@staticmethod
def cancel(
cancel_event: Union['multiprocessing.Event', 'threading.Event'], **kwargs
):
"""
Signal the runtime to terminate
:param cancel_event: the cancel event to set
:param kwargs: extra keyword arguments
"""
cancel_event.set()
@staticmethod
def activate(
control_address: str,
timeout_ctrl: int,
socket_in_type: 'SocketType',
logger: 'JinaLogger',
**kwargs,
):
"""
Check if the runtime has successfully started
:param control_address: the address where the control message needs to be sent
:param timeout_ctrl: the timeout to wait for control messages to be processed
:param socket_in_type: the type of input socket, needed to know if is a dealer
:param logger: the JinaLogger to log messages
:param kwargs: extra keyword arguments
"""
def _retry_control_message(
ctrl_address: str,
timeout_ctrl: int,
command: str,
num_retry: int,
logger: 'JinaLogger',
):
from ...zmq import send_ctrl_message
for retry in range(1, num_retry + 1):
logger.debug(f'Sending {command} command for the {retry}th time')
try:
send_ctrl_message(
ctrl_address,
command,
timeout=timeout_ctrl,
raise_exception=True,
)
break
except Exception as ex:
logger.warning(f'{ex!r}')
if retry == num_retry:
raise ex
if socket_in_type == SocketType.DEALER_CONNECT:
_retry_control_message(
ctrl_address=control_address,
timeout_ctrl=timeout_ctrl,
command='ACTIVATE',
num_retry=3,
logger=logger,
)
@staticmethod
def get_control_address(host: str, port: str, **kwargs):
"""
Get the control address for a runtime with a given host and port
:param host: the host where the runtime works
:param port: the control port where the runtime listens
:param kwargs: extra keyword arguments
:return: The corresponding control address
"""
from ...zmq import Zmqlet
return Zmqlet.get_ctrl_address(host, port, False)[0]
|
from cardboard import types
from cardboard.ability import (
AbilityNotImplemented, spell, activated, triggered, static
)
from cardboard.cards import card, common, keywords, match
@card("Fight or Flight")
def fight_or_flight(card, abilities):
def fight_or_flight():
return AbilityNotImplemented
return fight_or_flight,
@card("Kavu Aggressor")
def kavu_aggressor(card, abilities):
def kavu_aggressor():
return AbilityNotImplemented
def kavu_aggressor():
return AbilityNotImplemented
def kavu_aggressor():
return AbilityNotImplemented
return kavu_aggressor, kavu_aggressor, kavu_aggressor,
@card("Assault")
def assault(card, abilities):
def assault():
return AbilityNotImplemented
def assault():
return AbilityNotImplemented
return assault, assault,
@card("Benalish Lancer")
def benalish_lancer(card, abilities):
def benalish_lancer():
return AbilityNotImplemented
def benalish_lancer():
return AbilityNotImplemented
return benalish_lancer, benalish_lancer,
@card("Kavu Scout")
def kavu_scout(card, abilities):
def kavu_scout():
return AbilityNotImplemented
return kavu_scout,
@card("Nightscape Master")
def nightscape_master(card, abilities):
def nightscape_master():
return AbilityNotImplemented
def nightscape_master():
return AbilityNotImplemented
return nightscape_master, nightscape_master,
@card("Hunting Kavu")
def hunting_kavu(card, abilities):
def hunting_kavu():
return AbilityNotImplemented
return hunting_kavu,
@card("Urborg Phantom")
def urborg_phantom(card, abilities):
def urborg_phantom():
return AbilityNotImplemented
def urborg_phantom():
return AbilityNotImplemented
return urborg_phantom, urborg_phantom,
@card("Noble Panther")
def noble_panther(card, abilities):
def noble_panther():
return AbilityNotImplemented
return noble_panther,
@card("<NAME>")
def kavu_chameleon(card, abilities):
def kavu_chameleon():
return AbilityNotImplemented
def kavu_chameleon():
return AbilityNotImplemented
return kavu_chameleon, kavu_chameleon,
@card("<NAME>")
def goham_djinn(card, abilities):
def goham_djinn():
return AbilityNotImplemented
def goham_djinn():
return AbilityNotImplemented
return goham_djinn, goham_djinn,
@card("U<NAME>o")
def urborg_volcano(card, abilities):
def urborg_volcano():
return AbilityNotImplemented
def urborg_volcano():
return AbilityNotImplemented
return urborg_volcano, urborg_volcano,
@card("Backlash")
def backlash(card, abilities):
def backlash():
return AbilityNotImplemented
return backlash,
@card("Reviving Vapors")
def reviving_vapors(card, abilities):
def reviving_vapors():
return AbilityNotImplemented
return reviving_vapors,
@card("Harsh Judgment")
def harsh_judgment(card, abilities):
def harsh_judgment():
return AbilityNotImplemented
def harsh_judgment():
return AbilityNotImplemented
return harsh_judgment, harsh_judgment,
@card("Fires of Yavimaya")
def fires_of_yavimaya(card, abilities):
def fires_of_yavimaya():
return AbilityNotImplemented
def fires_of_yavimaya():
return AbilityNotImplemented
return fires_of_yavimaya, fires_of_yavimaya,
@card("Teferi's Moat")
def teferis_moat(card, abilities):
def teferis_moat():
return AbilityNotImplemented
def teferis_moat():
return AbilityNotImplemented
return teferis_moat, teferis_moat,
@card("Lightning Dart")
def lightning_dart(card, abilities):
def lightning_dart():
return AbilityNotImplemented
return lightning_dart,
@card("Wallop")
def wallop(card, abilities):
def wallop():
return AbilityNotImplemented
return wallop,
@card("Dismantling Blow")
def dismantling_blow(card, abilities):
def dismantling_blow():
return AbilityNotImplemented
def dismantling_blow():
return AbilityNotImplemented
def dismantling_blow():
return AbilityNotImplemented
return dismantling_blow, dismantling_blow, dismantling_blow,
@card("Vigorous Charge")
def vigorous_charge(card, abilities):
def vigorous_charge():
return AbilityNotImplemented
def vigorous_charge():
return AbilityNotImplemented
return vigorous_charge, vigorous_charge,
@card("Trench Wurm")
def trench_wurm(card, abilities):
def trench_wurm():
return AbilityNotImplemented
return trench_wurm,
@card("Hanna, Ship's Navigator")
def hanna_ships_navigator(card, abilities):
def hanna_ships_navigator():
return AbilityNotImplemented
return hanna_ships_navigator,
@card("Phyrexian Reaper")
def phyrexian_reaper(card, abilities):
def phyrexian_reaper():
return AbilityNotImplemented
return phyrexian_reaper,
@card("Opt")
def opt(card, abilities):
def opt():
return AbilityNotImplemented
def opt():
return AbilityNotImplemented
return opt, opt,
@card("Ancient Kavu")
def ancient_kavu(card, abilities):
def ancient_kavu():
return AbilityNotImplemented
return ancient_kavu,
@card("Deliver")
def deliver(card, abilities):
def deliver():
return AbilityNotImplemented
def deliver():
return AbilityNotImplemented
return deliver, deliver,
@card("Mages' Contest")
def mages_contest(card, abilities):
def mages_contest():
return AbilityNotImplemented
return mages_contest,
@card("Empress Galina")
def empress_galina(card, abilities):
def empress_galina():
return AbilityNotImplemented
return empress_galina,
@card("Darigaaz, the Igniter")
def darigaaz_the_igniter(card, abilities):
def darigaaz_the_igniter():
return AbilityNotImplemented
def darigaaz_the_igniter():
return AbilityNotImplemented
return darigaaz_the_igniter, darigaaz_the_igniter,
@card("Aura Shards")
def aura_shards(card, abilities):
def aura_shards():
return AbilityNotImplemented
return aura_shards,
@card("Blazing Specter")
def blazing_specter(card, abilities):
def blazing_specter():
return AbilityNotImplemented
def blazing_specter():
return AbilityNotImplemented
return blazing_specter, blazing_specter,
@card("Strength of Unity")
def strength_of_unity(card, abilities):
def strength_of_unity():
return AbilityNotImplemented
def strength_of_unity():
return AbilityNotImplemented
return strength_of_unity, strength_of_unity,
@card("Jade Leech")
def jade_leech(card, abilities):
def jade_leech():
return AbilityNotImplemented
return jade_leech,
@card("Coastal Tower")
def coastal_tower(card, abilities):
def coastal_tower():
return AbilityNotImplemented
def coastal_tower():
return AbilityNotImplemented
return coastal_tower, coastal_tower,
@card("Bloodstone Cameo")
def bloodstone_cameo(card, abilities):
def bloodstone_cameo():
return AbilityNotImplemented
return bloodstone_cameo,
@card("Callous Giant")
def callous_giant(card, abilities):
def callous_giant():
return AbilityNotImplemented
return callous_giant,
@card("Probe")
def probe(card, abilities):
def probe():
return AbilityNotImplemented
def probe():
return AbilityNotImplemented
def probe():
return AbilityNotImplemented
return probe, probe, probe,
@card("Do or Die")
def do_or_die(card, abilities):
def do_or_die():
return AbilityNotImplemented
return do_or_die,
@card("Rainbow Crow")
def rainbow_crow(card, abilities):
def rainbow_crow():
return AbilityNotImplemented
def rainbow_crow():
return AbilityNotImplemented
return rainbow_crow, rainbow_crow,
@card("Fact or Fiction")
def fact_or_fiction(card, abilities):
def fact_or_fiction():
return AbilityNotImplemented
return fact_or_fiction,
@card("Cauldron Dance")
def cauldron_dance(card, abilities):
def cauldron_dance():
return AbilityNotImplemented
def cauldron_dance():
return AbilityNotImplemented
def cauldron_dance():
return AbilityNotImplemented
return cauldron_dance, cauldron_dance, cauldron_dance,
@card("<NAME>")
def ruham_djinn(card, abilities):
def ruham_djinn():
return AbilityNotImplemented
def ruham_djinn():
return AbilityNotImplemented
return ruham_djinn, ruham_djinn,
@card("Crystal Spray")
def crystal_spray(card, abilities):
def crystal_spray():
return AbilityNotImplemented
def crystal_spray():
return AbilityNotImplemented
return crystal_spray, crystal_spray,
@card("Collective Restraint")
def collective_restraint(card, abilities):
def collective_restraint():
return AbilityNotImplemented
return collective_restraint,
@card("Phyrexian Battleflies")
def phyrexian_battleflies(card, abilities):
def phyrexian_battleflies():
return AbilityNotImplemented
def phyrexian_battleflies():
return AbilityNotImplemented
return phyrexian_battleflies, phyrexian_battleflies,
@card("Orim's Touch")
def orims_touch(card, abilities):
def orims_touch():
return AbilityNotImplemented
def orims_touch():
return AbilityNotImplemented
return orims_touch, orims_touch,
@card("Alabaster Leech")
def alabaster_leech(card, abilities):
def alabaster_leech():
return AbilityNotImplemented
return alabaster_leech,
@card("Yawgmoth's Agenda")
def yawgmoths_agenda(card, abilities):
def yawgmoths_agenda():
return AbilityNotImplemented
def yawgmoths_agenda():
return AbilityNotImplemented
def yawgmoths_agenda():
return AbilityNotImplemented
return yawgmoths_agenda, yawgmoths_agenda, yawgmoths_agenda,
@card("Psychic Battle")
def psychic_battle(card, abilities):
def psychic_battle():
return AbilityNotImplemented
return psychic_battle,
@card("Global Ruin")
def global_ruin(card, abilities):
def global_ruin():
return AbilityNotImplemented
return global_ruin,
@card("Shivan Harvest")
def shivan_harvest(card, abilities):
def shivan_harvest():
return AbilityNotImplemented
return shivan_harvest,
@card("Overload")
def overload(card, abilities):
def overload():
return AbilityNotImplemented
def overload():
return AbilityNotImplemented
return overload, overload,
@card("Elfhame Sanctuary")
def elfhame_sanctuary(card, abilities):
def elfhame_sanctuary():
return AbilityNotImplemented
return elfhame_sanctuary,
@card("Restrain")
def restrain(card, abilities):
def restrain():
return AbilityNotImplemented
def restrain():
return AbilityNotImplemented
return restrain, restrain,
@card("Death or Glory")
def death_or_glory(card, abilities):
def death_or_glory():
return AbilityNotImplemented
return death_or_glory,
@card("Cremate")
def cremate(card, abilities):
def cremate():
return AbilityNotImplemented
def cremate():
return AbilityNotImplemented
return cremate, cremate,
@card("<NAME>")
def alloy_golem(card, abilities):
def alloy_golem():
return AbilityNotImplemented
def alloy_golem():
return AbilityNotImplemented
return alloy_golem, alloy_golem,
@card("Pulse of Llanowar")
def pulse_of_llanowar(card, abilities):
def pulse_of_llanowar():
return AbilityNotImplemented
return pulse_of_llanowar,
@card("Crusading Knight")
def crusading_knight(card, abilities):
def crusading_knight():
return AbilityNotImplemented
def crusading_knight():
return AbilityNotImplemented
return crusading_knight, crusading_knight,
@card("Lotus Guardian")
def lotus_guardian(card, abilities):
def lotus_guardian():
return AbilityNotImplemented
def lotus_guardian():
return AbilityNotImplemented
return lotus_guardian, lotus_guardian,
@card("Sulfur Vent")
def sulfur_vent(card, abilities):
def sulfur_vent():
return AbilityNotImplemented
def sulfur_vent():
return AbilityNotImplemented
def sulfur_vent():
return AbilityNotImplemented
return sulfur_vent, sulfur_vent, sulfur_vent,
@card("Slimy Kavu")
def slimy_kavu(card, abilities):
def slimy_kavu():
return AbilityNotImplemented
return slimy_kavu,
@card("Raging Kavu")
def raging_kavu(card, abilities):
def raging_kavu():
return AbilityNotImplemented
def raging_kavu():
return AbilityNotImplemented
return raging_kavu, raging_kavu,
@card("Bind")
def bind(card, abilities):
def bind():
return AbilityNotImplemented
def bind():
return AbilityNotImplemented
return bind, bind,
@card("Barrin's Spite")
def barrins_spite(card, abilities):
def barrins_spite():
return AbilityNotImplemented
return barrins_spite,
@card("Aura Mutation")
def aura_mutation(card, abilities):
def aura_mutation():
return AbilityNotImplemented
return aura_mutation,
@card("Metathran Transport")
def metathran_transport(card, abilities):
def metathran_transport():
return AbilityNotImplemented
def metathran_transport():
return AbilityNotImplemented
def metathran_transport():
return AbilityNotImplemented
return metathran_transport, metathran_transport, metathran_transport,
@card("Llanowar Cavalry")
def llanowar_cavalry(card, abilities):
def llanowar_cavalry():
return AbilityNotImplemented
return llanowar_cavalry,
@card("Loafing Giant")
def loafing_giant(card, abilities):
def loafing_giant():
return AbilityNotImplemented
return loafing_giant,
@card("Plague Spores")
def plague_spores(card, abilities):
def plague_spores():
return AbilityNotImplemented
return plague_spores,
@card("Vodalian Zombie")
def vodalian_zombie(card, abilities):
def vodalian_zombie():
return AbilityNotImplemented
return vodalian_zombie,
@card("Thornscape Master")
def thornscape_master(card, abilities):
def thornscape_master():
return AbilityNotImplemented
def thornscape_master():
return AbilityNotImplemented
return thornscape_master, thornscape_master,
@card("Hooded Kavu")
def hooded_kavu(card, abilities):
def hooded_kavu():
return AbilityNotImplemented
return hooded_kavu,
@card("Undermine")
def undermine(card, abilities):
def undermine():
return AbilityNotImplemented
return undermine,
@card("Kavu Runner")
def kavu_runner(card, abilities):
def kavu_runner():
return AbilityNotImplemented
return kavu_runner,
@card("Stormscape Apprentice")
def stormscape_apprentice(card, abilities):
def stormscape_apprentice():
return AbilityNotImplemented
def stormscape_apprentice():
return AbilityNotImplemented
return stormscape_apprentice, stormscape_apprentice,
@card("Duskwalker")
def duskwalker(card, abilities):
def duskwalker():
return AbilityNotImplemented
def duskwalker():
return AbilityNotImplemented
return duskwalker, duskwalker,
@card("Scorching Lava")
def scorching_lava(card, abilities):
def scorching_lava():
return AbilityNotImplemented
def scorching_lava():
return AbilityNotImplemented
return scorching_lava, scorching_lava,
@card("Phyrexian Slayer")
def phyrexian_slayer(card, abilities):
def phyrexian_slayer():
return AbilityNotImplemented
def phyrexian_slayer():
return AbilityNotImplemented
return phyrexian_slayer, phyrexian_slayer,
@card("Reya Dawnbringer")
def reya_dawnbringer(card, abilities):
def reya_dawnbringer():
return AbilityNotImplemented
def reya_dawnbringer():
return AbilityNotImplemented
return reya_dawnbringer, reya_dawnbringer,
@card("Breaking Wave")
def breaking_wave(card, abilities):
def breaking_wave():
return AbilityNotImplemented
def breaking_wave():
return AbilityNotImplemented
return breaking_wave, breaking_wave,
@card("Capashen Unicorn")
def capashen_unicorn(card, abilities):
def capashen_unicorn():
return AbilityNotImplemented
return capashen_unicorn,
@card("Explosive Growth")
def explosive_growth(card, abilities):
def explosive_growth():
return AbilityNotImplemented
def explosive_growth():
return AbilityNotImplemented
return explosive_growth, explosive_growth,
@card("Defiling Tears")
def defiling_tears(card, abilities):
def defiling_tears():
return AbilityNotImplemented
return defiling_tears,
@card("Dromar, the Banisher")
def dromar_the_banisher(card, abilities):
def dromar_the_banisher():
return AbilityNotImplemented
def dromar_the_banisher():
return AbilityNotImplemented
return dromar_the_banisher, dromar_the_banisher,
@card("Saproling Infestation")
def saproling_infestation(card, abilities):
def saproling_infestation():
return AbilityNotImplemented
return saproling_infestation,
@card("Rith's Attendant")
def riths_attendant(card, abilities):
def riths_attendant():
return AbilityNotImplemented
return riths_attendant,
@card("Sky Weaver")
def sky_weaver(card, abilities):
def sky_weaver():
return AbilityNotImplemented
return sky_weaver,
@card("Pincer Spider")
def pincer_spider(card, abilities):
def pincer_spider():
return AbilityNotImplemented
def pincer_spider():
return AbilityNotImplemented
def pincer_spider():
return AbilityNotImplemented
return pincer_spider, pincer_spider, pincer_spider,
@card("Phyrexian Lens")
def phyrexian_lens(card, abilities):
def phyrexian_lens():
return AbilityNotImplemented
return phyrexian_lens,
@card("Rout")
def rout(card, abilities):
def rout():
return AbilityNotImplemented
def rout():
return AbilityNotImplemented
return rout, rout,
@card("Chaotic Strike")
def chaotic_strike(card, abilities):
def chaotic_strike():
return AbilityNotImplemented
def chaotic_strike():
return AbilityNotImplemented
def chaotic_strike():
return AbilityNotImplemented
return chaotic_strike, chaotic_strike, chaotic_strike,
@card("Agonizing Demise")
def agonizing_demise(card, abilities):
def agonizing_demise():
return AbilityNotImplemented
def agonizing_demise():
return AbilityNotImplemented
return agonizing_demise, agonizing_demise,
@card("Restock")
def restock(card, abilities):
def restock():
return AbilityNotImplemented
return restock,
@card("Ver<NAME>")
def verduran_emissary(card, abilities):
def verduran_emissary():
return AbilityNotImplemented
def verduran_emissary():
return AbilityNotImplemented
return verduran_emissary, verduran_emissary,
@card("Firebrand Ranger")
def firebrand_ranger(card, abilities):
def firebrand_ranger():
return AbilityNotImplemented
return firebrand_ranger,
@card("Meteor Storm")
def meteor_storm(card, abilities):
def meteor_storm():
return AbilityNotImplemented
return meteor_storm,
@card("Stalking Assassin")
def stalking_assassin(card, abilities):
def stalking_assassin():
return AbilityNotImplemented
def stalking_assassin():
return AbilityNotImplemented
return stalking_assassin, stalking_assassin,
@card("Pure Reflection")
def pure_reflection(card, abilities):
def pure_reflection():
return AbilityNotImplemented
return pure_reflection,
@card("Spirit Weaver")
def spirit_weaver(card, abilities):
def spirit_weaver():
return AbilityNotImplemented
return spirit_weaver,
@card("Scouting Trek")
def scouting_trek(card, abilities):
def scouting_trek():
return AbilityNotImplemented
return scouting_trek,
@card("Exotic Curse")
def exotic_curse(card, abilities):
def exotic_curse():
return AbilityNotImplemented
def exotic_curse():
return AbilityNotImplemented
return exotic_curse, exotic_curse,
@card("Salt Marsh")
def salt_marsh(card, abilities):
def salt_marsh():
return AbilityNotImplemented
def salt_marsh():
return AbilityNotImplemented
return salt_marsh, salt_marsh,
@card("Pain")
def pain(card, abilities):
def pain():
return AbilityNotImplemented
def pain():
return AbilityNotImplemented
return pain, pain,
@card("Nomadic Elf")
def nomadic_elf(card, abilities):
def nomadic_elf():
return AbilityNotImplemented
return nomadic_elf,
@card("Marauding Knight")
def marauding_knight(card, abilities):
def marauding_knight():
return AbilityNotImplemented
def marauding_knight():
return AbilityNotImplemented
return marauding_knight, marauding_knight,
@card("Sway of Illusion")
def sway_of_illusion(card, abilities):
def sway_of_illusion():
return AbilityNotImplemented
def sway_of_illusion():
return AbilityNotImplemented
return sway_of_illusion, sway_of_illusion,
@card("Urborg Emissary")
def urborg_emissary(card, abilities):
def urborg_emissary():
return AbilityNotImplemented
def urborg_emissary():
return AbilityNotImplemented
return urborg_emissary, urborg_emissary,
@card("Razorfoot Griffin")
def razorfoot_griffin(card, abilities):
def razorfoot_griffin():
return AbilityNotImplemented
def razorfoot_griffin():
return AbilityNotImplemented
return razorfoot_griffin, razorfoot_griffin,
@card("Skizzik")
def skizzik(card, abilities):
def skizzik():
return AbilityNotImplemented
def skizzik():
return AbilityNotImplemented
def skizzik():
return AbilityNotImplemented
return skizzik, skizzik, skizzik,
@card("Archaeological Dig")
def archaeological_dig(card, abilities):
def archaeological_dig():
return AbilityNotImplemented
def archaeological_dig():
return AbilityNotImplemented
return archaeological_dig, archaeological_dig,
@card("Samite Ministration")
def samite_ministration(card, abilities):
def samite_ministration():
return AbilityNotImplemented
return samite_ministration,
@card("Crosis, the Purger")
def crosis_the_purger(card, abilities):
def crosis_the_purger():
return AbilityNotImplemented
def crosis_the_purger():
return AbilityNotImplemented
return crosis_the_purger, crosis_the_purger,
@card("Armored Guardian")
def armored_guardian(card, abilities):
def armored_guardian():
return AbilityNotImplemented
def armored_guardian():
return AbilityNotImplemented
return armored_guardian, armored_guardian,
@card("Saproling Symbiosis")
def saproling_symbiosis(card, abilities):
def saproling_symbiosis():
return AbilityNotImplemented
def saproling_symbiosis():
return AbilityNotImplemented
return saproling_symbiosis, saproling_symbiosis,
@card("Treva, the Renewer")
def treva_the_renewer(card, abilities):
def treva_the_renewer():
return AbilityNotImplemented
def treva_the_renewer():
return AbilityNotImplemented
return treva_the_renewer, treva_the_renewer,
@card("Bend or Break")
def bend_or_break(card, abilities):
def bend_or_break():
return AbilityNotImplemented
return bend_or_break,
@card("Crosis's Attendant")
def crosiss_attendant(card, abilities):
def crosiss_attendant():
return AbilityNotImplemented
return crosiss_attendant,
@card("Devouring Strossus")
def devouring_strossus(card, abilities):
def devouring_strossus():
return AbilityNotImplemented
def devouring_strossus():
return AbilityNotImplemented
def devouring_strossus():
return AbilityNotImplemented
return devouring_strossus, devouring_strossus, devouring_strossus,
@card("Llanowar Vanguard")
def llanowar_vanguard(card, abilities):
def llanowar_vanguard():
return AbilityNotImplemented
return llanowar_vanguard,
@card("Chromatic Sphere")
def chromatic_sphere(card, abilities):
def chromatic_sphere():
return AbilityNotImplemented
return chromatic_sphere,
@card("Planar Portal")
def planar_portal(card, abilities):
def planar_portal():
return AbilityNotImplemented
return planar_portal,
@card("Smoldering Tar")
def smoldering_tar(card, abilities):
def smoldering_tar():
return AbilityNotImplemented
def smoldering_tar():
return AbilityNotImplemented
return smoldering_tar, smoldering_tar,
@card("Halam Djinn")
def halam_djinn(card, abilities):
def halam_djinn():
return AbilityNotImplemented
def halam_djinn():
return AbilityNotImplemented
return halam_djinn, halam_djinn,
@card("Bog Initiate")
def bog_initiate(card, abilities):
def bog_initiate():
return AbilityNotImplemented
return bog_initiate,
@card("Ghitu Fire")
def ghitu_fire(card, abilities):
def ghitu_fire():
return AbilityNotImplemented
def ghitu_fire():
return AbilityNotImplemented
return ghitu_fire, ghitu_fire,
@card("Blurred Mongoose")
def blurred_mongoose(card, abilities):
def blurred_mongoose():
return AbilityNotImplemented
def blurred_mongoose():
return AbilityNotImplemented
return blurred_mongoose, blurred_mongoose,
@card("Tribal Flames")
def tribal_flames(card, abilities):
def tribal_flames():
return AbilityNotImplemented
return tribal_flames,
@card("Urza's Filter")
def urzas_filter(card, abilities):
def urzas_filter():
return AbilityNotImplemented
return urzas_filter,
@card("Rampant Elephant")
def rampant_elephant(card, abilities):
def rampant_elephant():
return AbilityNotImplemented
return rampant_elephant,
@card("Firescreamer")
def firescreamer(card, abilities):
def firescreamer():
return AbilityNotImplemented
return firescreamer,
@card("Might Weaver")
def might_weaver(card, abilities):
def might_weaver():
return AbilityNotImplemented
return might_weaver,
@card("Tangle")
def tangle(card, abilities):
def tangle():
return AbilityNotImplemented
def tangle():
return AbilityNotImplemented
return tangle, tangle,
@card("AEther Rift")
def aether_rift(card, abilities):
def aether_rift():
return AbilityNotImplemented
return aether_rift,
@card("Irrigation Ditch")
def irrigation_ditch(card, abilities):
def irrigation_ditch():
return AbilityNotImplemented
def irrigation_ditch():
return AbilityNotImplemented
def irrigation_ditch():
return AbilityNotImplemented
return irrigation_ditch, irrigation_ditch, irrigation_ditch,
@card("Rage Weaver")
def rage_weaver(card, abilities):
def rage_weaver():
return AbilityNotImplemented
return rage_weaver,
@card("Dream Thrush")
def dream_thrush(card, abilities):
def dream_thrush():
return AbilityNotImplemented
def dream_thrush():
return AbilityNotImplemented
return dream_thrush, dream_thrush,
@card("Kangee, Aerie Keeper")
def kangee_aerie_keeper(card, abilities):
def kangee_aerie_keeper():
return AbilityNotImplemented
def kangee_aerie_keeper():
return AbilityNotImplemented
def kangee_aerie_keeper():
return AbilityNotImplemented
def kangee_aerie_keeper():
return AbilityNotImplemented
return kangee_aerie_keeper, kangee_aerie_keeper, kangee_aerie_keeper, kangee_aerie_keeper,
@card("Metathran Aerostat")
def metathran_aerostat(card, abilities):
def metathran_aerostat():
return AbilityNotImplemented
def metathran_aerostat():
return AbilityNotImplemented
return metathran_aerostat, metathran_aerostat,
@card("Crypt Angel")
def crypt_angel(card, abilities):
def crypt_angel():
return AbilityNotImplemented
def crypt_angel():
return AbilityNotImplemented
return crypt_angel, crypt_angel,
@card("<NAME>")
def shivan_emissary(card, abilities):
def shivan_emissary():
return AbilityNotImplemented
def shivan_emissary():
return AbilityNotImplemented
return shivan_emissary, shivan_emissary,
@card("Thornscape Apprentice")
def thornscape_apprentice(card, abilities):
def thornscape_apprentice():
return AbilityNotImplemented
def thornscape_apprentice():
return AbilityNotImplemented
return thornscape_apprentice, thornscape_apprentice,
@card("Angelic Shield")
def angelic_shield(card, abilities):
def angelic_shield():
return AbilityNotImplemented
def angelic_shield():
return AbilityNotImplemented
return angelic_shield, angelic_shield,
@card("Armadillo Cloak")
def armadillo_cloak(card, abilities):
def armadillo_cloak():
return AbilityNotImplemented
def armadillo_cloak():
return AbilityNotImplemented
def armadillo_cloak():
return AbilityNotImplemented
return armadillo_cloak, armadillo_cloak, armadillo_cloak,
@card("Tek")
def tek(card, abilities):
def tek():
return AbilityNotImplemented
return tek,
@card("Distorting Wake")
def distorting_wake(card, abilities):
def distorting_wake():
return AbilityNotImplemented
return distorting_wake,
@card("Charging Troll")
def charging_troll(card, abilities):
def charging_troll():
return AbilityNotImplemented
def charging_troll():
return AbilityNotImplemented
return charging_troll, charging_troll,
@card("Yavimaya Barbarian")
def yavimaya_barbarian(card, abilities):
def yavimaya_barbarian():
return AbilityNotImplemented
return yavimaya_barbarian,
@card("Breath of Darigaaz")
def breath_of_darigaaz(card, abilities):
def breath_of_darigaaz():
return AbilityNotImplemented
def breath_of_darigaaz():
return AbilityNotImplemented
return breath_of_darigaaz, breath_of_darigaaz,
@card("Quirion Sentinel")
def quirion_sentinel(card, abilities):
def quirion_sentinel():
return AbilityNotImplemented
return quirion_sentinel,
@card("Winnow")
def winnow(card, abilities):
def winnow():
return AbilityNotImplemented
def winnow():
return AbilityNotImplemented
return winnow, winnow,
@card("Galina's Knight")
def galinas_knight(card, abilities):
def galinas_knight():
return AbilityNotImplemented
return galinas_knight,
@card("Vodalian Serpent")
def vodalian_serpent(card, abilities):
def vodalian_serpent():
return AbilityNotImplemented
def vodalian_serpent():
return AbilityNotImplemented
def vodalian_serpent():
return AbilityNotImplemented
return vodalian_serpent, vodalian_serpent, vodalian_serpent,
@card("Stand or Fall")
def stand_or_fall(card, abilities):
def stand_or_fall():
return AbilityNotImplemented
return stand_or_fall,
@card("Temporal Distortion")
def temporal_distortion(card, abilities):
def temporal_distortion():
return AbilityNotImplemented
def temporal_distortion():
return AbilityNotImplemented
def temporal_distortion():
return AbilityNotImplemented
return temporal_distortion, temporal_distortion, temporal_distortion,
@card("Geothermal Crevice")
def geothermal_crevice(card, abilities):
def geothermal_crevice():
return AbilityNotImplemented
def geothermal_crevice():
return AbilityNotImplemented
def geothermal_crevice():
return AbilityNotImplemented
return geothermal_crevice, geothermal_crevice, geothermal_crevice,
@card("Collapsing Borders")
def collapsing_borders(card, abilities):
def collapsing_borders():
return AbilityNotImplemented
return collapsing_borders,
@card("Vodalian Hypnotist")
def vodalian_hypnotist(card, abilities):
def vodalian_hypnotist():
return AbilityNotImplemented
return vodalian_hypnotist,
@card("Utopia Tree")
def utopia_tree(card, abilities):
def utopia_tree():
return AbilityNotImplemented
return utopia_tree,
@card("Turf Wound")
def turf_wound(card, abilities):
def turf_wound():
return AbilityNotImplemented
def turf_wound():
return AbilityNotImplemented
return turf_wound, turf_wound,
@card("Artifact Mutation")
def artifact_mutation(card, abilities):
def artifact_mutation():
return AbilityNotImplemented
return artifact_mutation,
@card("Obliterate")
def obliterate(card, abilities):
def obliterate():
return AbilityNotImplemented
def obliterate():
return AbilityNotImplemented
return obliterate, obliterate,
@card("Llanowar Elite")
def llanowar_elite(card, abilities):
def llanowar_elite():
return AbilityNotImplemented
def llanowar_elite():
return AbilityNotImplemented
def llanowar_elite():
return AbilityNotImplemented
return llanowar_elite, llanowar_elite, llanowar_elite,
@card("Sabertooth Nishoba")
def sabertooth_nishoba(card, abilities):
def sabertooth_nishoba():
return AbilityNotImplemented
return sabertooth_nishoba,
@card("Twilight's Call")
def twilights_call(card, abilities):
def twilights_call():
return AbilityNotImplemented
def twilights_call():
return AbilityNotImplemented
return twilights_call, twilights_call,
@card("Savage Offensive")
def savage_offensive(card, abilities):
def savage_offensive():
return AbilityNotImplemented
def savage_offensive():
return AbilityNotImplemented
return savage_offensive, savage_offensive,
@card("Mourning")
def mourning(card, abilities):
def mourning():
return AbilityNotImplemented
def mourning():
return AbilityNotImplemented
def mourning():
return AbilityNotImplemented
return mourning, mourning, mourning,
@card("Obsidian Acolyte")
def obsidian_acolyte(card, abilities):
def obsidian_acolyte():
return AbilityNotImplemented
def obsidian_acolyte():
return AbilityNotImplemented
return obsidian_acolyte, obsidian_acolyte,
@card("Sleeper's Robe")
def sleepers_robe(card, abilities):
def sleepers_robe():
return AbilityNotImplemented
def sleepers_robe():
return AbilityNotImplemented
def sleepers_robe():
return AbilityNotImplemented
return sleepers_robe, sleepers_robe, sleepers_robe,
@card("Blind Seer")
def blind_seer(card, abilities):
def blind_seer():
return AbilityNotImplemented
return blind_seer,
@card("Stand")
def stand(card, abilities):
def stand():
return AbilityNotImplemented
def stand():
return AbilityNotImplemented
return stand, stand,
@card("Heroes' Reunion")
def heroes_reunion(card, abilities):
def heroes_reunion():
return AbilityNotImplemented
return heroes_reunion,
@card("<NAME>")
def zanam_djinn(card, abilities):
def zanam_djinn():
return AbilityNotImplemented
def zanam_djinn():
return AbilityNotImplemented
return zanam_djinn, zanam_djinn,
@card("Drake-Skull Cameo")
def drakeskull_cameo(card, abilities):
def drakeskull_cameo():
return AbilityNotImplemented
return drakeskull_cameo,
@card("Aggressive Urge")
def aggressive_urge(card, abilities):
def aggressive_urge():
return AbilityNotImplemented
def aggressive_urge():
return AbilityNotImplemented
return aggressive_urge, aggressive_urge,
@card("Scavenged Weaponry")
def scavenged_weaponry(card, abilities):
def scavenged_weaponry():
return AbilityNotImplemented
def scavenged_weaponry():
return AbilityNotImplemented
def scavenged_weaponry():
return AbilityNotImplemented
return scavenged_weaponry, scavenged_weaponry, scavenged_weaponry,
@card("Urza's Rage")
def urzas_rage(card, abilities):
def urzas_rage():
return AbilityNotImplemented
def urzas_rage():
return AbilityNotImplemented
def urzas_rage():
return AbilityNotImplemented
return urzas_rage, urzas_rage, urzas_rage,
@card("Seashell Cameo")
def seashell_cameo(card, abilities):
def seashell_cameo():
return AbilityNotImplemented
return seashell_cameo,
@card("Wings of Hope")
def wings_of_hope(card, abilities):
def wings_of_hope():
return AbilityNotImplemented
def wings_of_hope():
return AbilityNotImplemented
return wings_of_hope, wings_of_hope,
@card("Juntu Stakes")
def juntu_stakes(card, abilities):
def juntu_stakes():
return AbilityNotImplemented
return juntu_stakes,
@card("Molimo, Maro-Sorcerer")
def molimo_marosorcerer(card, abilities):
def molimo_marosorcerer():
return AbilityNotImplemented
def molimo_marosorcerer():
return AbilityNotImplemented
return molimo_marosorcerer, molimo_marosorcerer,
@card("Rewards of Diversity")
def rewards_of_diversity(card, abilities):
def rewards_of_diversity():
return AbilityNotImplemented
return rewards_of_diversity,
@card("Hate Weaver")
def hate_weaver(card, abilities):
def hate_weaver():
return AbilityNotImplemented
return hate_weaver,
@card("Riptide Crab")
def riptide_crab(card, abilities):
def riptide_crab():
return AbilityNotImplemented
def riptide_crab():
return AbilityNotImplemented
return riptide_crab, riptide_crab,
@card("Prison Barricade")
def prison_barricade(card, abilities):
def prison_barricade():
return AbilityNotImplemented
def prison_barricade():
return AbilityNotImplemented
def prison_barricade():
return AbilityNotImplemented
return prison_barricade, prison_barricade, prison_barricade,
@card("Searing Rays")
def searing_rays(card, abilities):
def searing_rays():
return AbilityNotImplemented
return searing_rays,
@card("Wandering Stream")
def wandering_stream(card, abilities):
def wandering_stream():
return AbilityNotImplemented
return wandering_stream,
@card("Vile Consumption")
def vile_consumption(card, abilities):
def vile_consumption():
return AbilityNotImplemented
return vile_consumption,
@card("Shivan Oasis")
def shivan_oasis(card, abilities):
def shivan_oasis():
return AbilityNotImplemented
def shivan_oasis():
return AbilityNotImplemented
return shivan_oasis, shivan_oasis,
@card("Horned Cheetah")
def horned_cheetah(card, abilities):
def horned_cheetah():
return AbilityNotImplemented
return horned_cheetah,
@card("Absorb")
def absorb(card, abilities):
def absorb():
return AbilityNotImplemented
return absorb,
@card("Tsabo's Decree")
def tsabos_decree(card, abilities):
def tsabos_decree():
return AbilityNotImplemented
return tsabos_decree,
@card("Urborg Shambler")
def urborg_shambler(card, abilities):
def urborg_shambler():
return AbilityNotImplemented
return urborg_shambler,
@card("Voracious Cobra")
def voracious_cobra(card, abilities):
def voracious_cobra():
return AbilityNotImplemented
def voracious_cobra():
return AbilityNotImplemented
return voracious_cobra, voracious_cobra,
@card("Yavimaya Kavu")
def yavimaya_kavu(card, abilities):
def yavimaya_kavu():
return AbilityNotImplemented
def yavimaya_kavu():
return AbilityNotImplemented
return yavimaya_kavu, yavimaya_kavu,
@card("Mana Maze")
def mana_maze(card, abilities):
def mana_maze():
return AbilityNotImplemented
return mana_maze,
@card("Cinder Shade")
def cinder_shade(card, abilities):
def cinder_shade():
return AbilityNotImplemented
def cinder_shade():
return AbilityNotImplemented
return cinder_shade, cinder_shade,
@card("Suffering")
def suffering(card, abilities):
def suffering():
return AbilityNotImplemented
def suffering():
return AbilityNotImplemented
return suffering, suffering,
@card("Well-Laid Plans")
def welllaid_plans(card, abilities):
def welllaid_plans():
return AbilityNotImplemented
return welllaid_plans,
@card("Darigaaz's Attendant")
def darigaazs_attendant(card, abilities):
def darigaazs_attendant():
return AbilityNotImplemented
return darigaazs_attendant,
@card("Samite Archer")
def samite_archer(card, abilities):
def samite_archer():
return AbilityNotImplemented
def samite_archer():
return AbilityNotImplemented
return samite_archer, samite_archer,
@card("Repulse")
def repulse(card, abilities):
def repulse():
return AbilityNotImplemented
def repulse():
return AbilityNotImplemented
return repulse, repulse,
@card("Sunscape Apprentice")
def sunscape_apprentice(card, abilities):
def sunscape_apprentice():
return AbilityNotImplemented
def sunscape_apprentice():
return AbilityNotImplemented
return sunscape_apprentice, sunscape_apprentice,
@card("Treefolk Healer")
def treefolk_healer(card, abilities):
def treefolk_healer():
return AbilityNotImplemented
return treefolk_healer,
@card("Crimson Acolyte")
def crimson_acolyte(card, abilities):
def crimson_acolyte():
return AbilityNotImplemented
def crimson_acolyte():
return AbilityNotImplemented
return crimson_acolyte, crimson_acolyte,
@card("Rooting Kavu")
def rooting_kavu(card, abilities):
def rooting_kavu():
return AbilityNotImplemented
return rooting_kavu,
@card("Goblin Spy")
def goblin_spy(card, abilities):
def goblin_spy():
return AbilityNotImplemented
return goblin_spy,
@card("Nightscape Apprentice")
def nightscape_apprentice(card, abilities):
def nightscape_apprentice():
return AbilityNotImplemented
def nightscape_apprentice():
return AbilityNotImplemented
return nightscape_apprentice, nightscape_apprentice,
@card("Addle")
def addle(card, abilities):
def addle():
return AbilityNotImplemented
return addle,
@card("Ancient Spring")
def ancient_spring(card, abilities):
def ancient_spring():
return AbilityNotImplemented
def ancient_spring():
return AbilityNotImplemented
def ancient_spring():
return AbilityNotImplemented
return ancient_spring, ancient_spring, ancient_spring,
@card("Spreading Plague")
def spreading_plague(card, abilities):
def spreading_plague():
return AbilityNotImplemented
return spreading_plague,
@card("Prohibit")
def prohibit(card, abilities):
def prohibit():
return AbilityNotImplemented
def prohibit():
return AbilityNotImplemented
return prohibit, prohibit,
@card("Void")
def void(card, abilities):
def void():
return AbilityNotImplemented
return void,
@card("Tsabo Tavoc")
def tsabo_tavoc(card, abilities):
def tsabo_tavoc():
return AbilityNotImplemented
def tsabo_tavoc():
return AbilityNotImplemented
return tsabo_tavoc, tsabo_tavoc,
@card("<NAME>")
def andradite_leech(card, abilities):
def andradite_leech():
return AbilityNotImplemented
def andradite_leech():
return AbilityNotImplemented
return andradite_leech, andradite_leech,
@card("Whip Silk")
def whip_silk(card, abilities):
def whip_silk():
return AbilityNotImplemented
def whip_silk():
return AbilityNotImplemented
def whip_silk():
return AbilityNotImplemented
return whip_silk, whip_silk, whip_silk,
@card("Essence Leak")
def essence_leak(card, abilities):
def essence_leak():
return AbilityNotImplemented
def essence_leak():
return AbilityNotImplemented
return essence_leak, essence_leak,
@card("Tinder Farm")
def tinder_farm(card, abilities):
def tinder_farm():
return AbilityNotImplemented
def tinder_farm():
return AbilityNotImplemented
def tinder_farm():
return AbilityNotImplemented
return tinder_farm, tinder_farm, tinder_farm,
@card("<NAME>")
def benalish_trapper(card, abilities):
def benalish_trapper():
return AbilityNotImplemented
return benalish_trapper,
@card("Stormscape Master")
def stormscape_master(card, abilities):
def stormscape_master():
return AbilityNotImplemented
def stormscape_master():
return AbilityNotImplemented
return stormscape_master, stormscape_master,
@card("Dredge")
def dredge(card, abilities):
def dredge():
return AbilityNotImplemented
def dredge():
return AbilityNotImplemented
return dredge, dredge,
@card("Pyre Zombie")
def pyre_zombie(card, abilities):
def pyre_zombie():
return AbilityNotImplemented
def pyre_zombie():
return AbilityNotImplemented
return pyre_zombie, pyre_zombie,
@card("<NAME>")
def skittish_kavu(card, abilities):
def skittish_kavu():
return AbilityNotImplemented
return skittish_kavu,
@card("Rith, the Awakener")
def rith_the_awakener(card, abilities):
def rith_the_awakener():
return AbilityNotImplemented
def rith_the_awakener():
return AbilityNotImplemented
return rith_the_awakener, rith_the_awakener,
@card("<NAME>")
def keldon_necropolis(card, abilities):
def keldon_necropolis():
return AbilityNotImplemented
def keldon_necropolis():
return AbilityNotImplemented
return keldon_necropolis, keldon_necropolis,
@card("Overabundance")
def overabundance(card, abilities):
def overabundance():
return AbilityNotImplemented
return overabundance,
@card("Divine Presence")
def divine_presence(card, abilities):
def divine_presence():
return AbilityNotImplemented
return divine_presence,
@card("<NAME>")
def sulam_djinn(card, abilities):
def sulam_djinn():
return AbilityNotImplemented
def sulam_djinn():
return AbilityNotImplemented
return sulam_djinn, sulam_djinn,
@card("<NAME>")
def trollhorn_cameo(card, abilities):
def trollhorn_cameo():
return AbilityNotImplemented
return trollhorn_cameo,
@card("Coalition Victory")
def coalition_victory(card, abilities):
def coalition_victory():
return AbilityNotImplemented
return coalition_victory,
@card("Shoreline Raider")
def shoreline_raider(card, abilities):
def shoreline_raider():
return AbilityNotImplemented
return shoreline_raider,
@card("Desperate Research")
def desperate_research(card, abilities):
def desperate_research():
return AbilityNotImplemented
return desperate_research,
@card("Wayfaring Giant")
def wayfaring_giant(card, abilities):
def wayfaring_giant():
return AbilityNotImplemented
return wayfaring_giant,
@card("Reckless Assault")
def reckless_assault(card, abilities):
def reckless_assault():
return AbilityNotImplemented
return reckless_assault,
@card("Wane")
def wane(card, abilities):
def wane():
return AbilityNotImplemented
def wane():
return AbilityNotImplemented
return wane, wane,
@card("Serpentine Kavu")
def serpentine_kavu(card, abilities):
def serpentine_kavu():
return AbilityNotImplemented
return serpentine_kavu,
@card("Protective Sphere")
def protective_sphere(card, abilities):
def protective_sphere():
return AbilityNotImplemented
return protective_sphere,
@card("Tsabo's Assassin")
def tsabos_assassin(card, abilities):
def tsabos_assassin():
return AbilityNotImplemented
return tsabos_assassin,
@card("Treva's Attendant")
def trevas_attendant(card, abilities):
def trevas_attendant():
return AbilityNotImplemented
return trevas_attendant,
@card("Liberate")
def liberate(card, abilities):
def liberate():
return AbilityNotImplemented
return liberate,
@card("Canopy Surge")
def canopy_surge(card, abilities):
def canopy_surge():
return AbilityNotImplemented
def canopy_surge():
return AbilityNotImplemented
return canopy_surge, canopy_surge,
@card("Wax")
def wax(card, abilities):
def wax():
return AbilityNotImplemented
def wax():
return AbilityNotImplemented
return wax, wax,
@card("Thunderscape Apprentice")
def thunderscape_apprentice(card, abilities):
def thunderscape_apprentice():
return AbilityNotImplemented
def thunderscape_apprentice():
return AbilityNotImplemented
return thunderscape_apprentice, thunderscape_apprentice,
@card("Elvish Champion")
def elvish_champion(card, abilities):
def elvish_champion():
return AbilityNotImplemented
return elvish_champion,
@card("Kavu Lair")
def kavu_lair(card, abilities):
def kavu_lair():
return AbilityNotImplemented
return kavu_lair,
@card("Vodalian Merchant")
def vodalian_merchant(card, abilities):
def vodalian_merchant():
return AbilityNotImplemented
return vodalian_merchant,
@card("Spinal Embrace")
def spinal_embrace(card, abilities):
def spinal_embrace():
return AbilityNotImplemented
def spinal_embrace():
return AbilityNotImplemented
return spinal_embrace, spinal_embrace,
@card("Faerie Squadron")
def faerie_squadron(card, abilities):
def faerie_squadron():
return AbilityNotImplemented
def faerie_squadron():
return AbilityNotImplemented
return faerie_squadron, faerie_squadron,
@card("Barrin's Unmaking")
def barrins_unmaking(card, abilities):
def barrins_unmaking():
return AbilityNotImplemented
return barrins_unmaking,
@card("Shivan Zombie")
def shivan_zombie(card, abilities):
def shivan_zombie():
return AbilityNotImplemented
return shivan_zombie,
@card("Malice")
def malice(card, abilities):
def malice():
return AbilityNotImplemented
def malice():
return AbilityNotImplemented
return malice, malice,
@card("Reviving Dose")
def reviving_dose(card, abilities):
def reviving_dose():
return AbilityNotImplemented
def reviving_dose():
return AbilityNotImplemented
return reviving_dose, reviving_dose,
@card("Spirit of Resistance")
def spirit_of_resistance(card, abilities):
def spirit_of_resistance():
return AbilityNotImplemented
return spirit_of_resistance,
@card("Verdeloth the Ancient")
def verdeloth_the_ancient(card, abilities):
def verdeloth_the_ancient():
return AbilityNotImplemented
def verdeloth_the_ancient():
return AbilityNotImplemented
def verdeloth_the_ancient():
return AbilityNotImplemented
return verdeloth_the_ancient, verdeloth_the_ancient, verdeloth_the_ancient,
@card("Tidal Visionary")
def tidal_visionary(card, abilities):
def tidal_visionary():
return AbilityNotImplemented
return tidal_visionary,
@card("Ordered Migration")
def ordered_migration(card, abilities):
def ordered_migration():
return AbilityNotImplemented
return ordered_migration,
@card("Atalya, Samite Master")
def atalya_samite_master(card, abilities):
def atalya_samite_master():
return AbilityNotImplemented
return atalya_samite_master,
@card("Annihilate")
def annihilate(card, abilities):
def annihilate():
return AbilityNotImplemented
def annihilate():
return AbilityNotImplemented
return annihilate, annihilate,
@card("Traveler's Cloak")
def travelers_cloak(card, abilities):
def travelers_cloak():
return AbilityNotImplemented
def travelers_cloak():
return AbilityNotImplemented
def travelers_cloak():
return AbilityNotImplemented
def travelers_cloak():
return AbilityNotImplemented
return travelers_cloak, travelers_cloak, travelers_cloak, travelers_cloak,
@card("Phyrexian Delver")
def phyrexian_delver(card, abilities):
def phyrexian_delver():
return AbilityNotImplemented
return phyrexian_delver,
@card("Power Armor")
def power_armor(card, abilities):
def power_armor():
return AbilityNotImplemented
return power_armor,
@card("Llanowar Knight")
def llanowar_knight(card, abilities):
def llanowar_knight():
return AbilityNotImplemented
return llanowar_knight,
@card("Tectonic Instability")
def tectonic_instability(card, abilities):
def tectonic_instability():
return AbilityNotImplemented
return tectonic_instability,
@card("Tolarian Emissary")
def tolarian_emissary(card, abilities):
def tolarian_emissary():
return AbilityNotImplemented
def tolarian_emissary():
return AbilityNotImplemented
def tolarian_emissary():
return AbilityNotImplemented
return tolarian_emissary, tolarian_emissary, tolarian_emissary,
@card("Vicious Kavu")
def vicious_kavu(card, abilities):
def vicious_kavu():
return AbilityNotImplemented
return vicious_kavu,
@card("Sterling Grove")
def sterling_grove(card, abilities):
def sterling_grove():
return AbilityNotImplemented
def sterling_grove():
return AbilityNotImplemented
return sterling_grove, sterling_grove,
@card("Frenzied Tilling")
def frenzied_tilling(card, abilities):
def frenzied_tilling():
return AbilityNotImplemented
return frenzied_tilling,
@card("Kavu Climber")
def kavu_climber(card, abilities):
def kavu_climber():
return AbilityNotImplemented
return kavu_climber,
@card("Quirion Trailblazer")
def quirion_trailblazer(card, abilities):
def quirion_trailblazer():
return AbilityNotImplemented
return quirion_trailblazer,
@card("Hypnotic Cloud")
def hypnotic_cloud(card, abilities):
def hypnotic_cloud():
return AbilityNotImplemented
def hypnotic_cloud():
return AbilityNotImplemented
return hypnotic_cloud, hypnotic_cloud,
@card("Thicket Elemental")
def thicket_elemental(card, abilities):
def thicket_elemental():
return AbilityNotImplemented
def thicket_elemental():
return AbilityNotImplemented
return thicket_elemental, thicket_elemental,
@card("Manipulate Fate")
def manipulate_fate(card, abilities):
def manipulate_fate():
return AbilityNotImplemented
def manipulate_fate():
return AbilityNotImplemented
return manipulate_fate, manipulate_fate,
@card("Urborg Drake")
def urborg_drake(card, abilities):
def urborg_drake():
return AbilityNotImplemented
def urborg_drake():
return AbilityNotImplemented
return urborg_drake, urborg_drake,
@card("Seer's Vision")
def seers_vision(card, abilities):
def seers_vision():
return AbilityNotImplemented
def seers_vision():
return AbilityNotImplemented
return seers_vision, seers_vision,
@card("Urborg Skeleton")
def urborg_skeleton(card, abilities):
def urborg_skeleton():
return AbilityNotImplemented
def urborg_skeleton():
return AbilityNotImplemented
def urborg_skeleton():
return AbilityNotImplemented
return urborg_skeleton, urborg_skeleton, urborg_skeleton,
@card("Plague Spitter")
def plague_spitter(card, abilities):
def plague_spitter():
return AbilityNotImplemented
def plague_spitter():
return AbilityNotImplemented
return plague_spitter, plague_spitter,
@card("Dromar's Attendant")
def dromars_attendant(card, abilities):
def dromars_attendant():
return AbilityNotImplemented
return dromars_attendant,
@card("Recover")
def recover(card, abilities):
def recover():
return AbilityNotImplemented
def recover():
return AbilityNotImplemented
return recover, recover,
@card("Tower Drake")
def tower_drake(card, abilities):
def tower_drake():
return AbilityNotImplemented
def tower_drake():
return AbilityNotImplemented
return tower_drake, tower_drake,
@card("Kavu Titan")
def kavu_titan(card, abilities):
def kavu_titan():
return AbilityNotImplemented
def kavu_titan():
return AbilityNotImplemented
return kavu_titan, kavu_titan,
@card("Benalish Heralds")
def benalish_heralds(card, abilities):
def benalish_heralds():
return AbilityNotImplemented
return benalish_heralds,
@card("Benalish Emissary")
def benalish_emissary(card, abilities):
def benalish_emissary():
return AbilityNotImplemented
def benalish_emissary():
return AbilityNotImplemented
return benalish_emissary, benalish_emissary,
@card("Thunderscape Master")
def thunderscape_master(card, abilities):
def thunderscape_master():
return AbilityNotImplemented
def thunderscape_master():
return AbilityNotImplemented
return thunderscape_master, thunderscape_master,
@card("Glimmering Angel")
def glimmering_angel(card, abilities):
def glimmering_angel():
return AbilityNotImplemented
def glimmering_angel():
return AbilityNotImplemented
return glimmering_angel, glimmering_angel,
@card("Recoil")
def recoil(card, abilities):
def recoil():
return AbilityNotImplemented
return recoil,
@card("Sapphire Leech")
def sapphire_leech(card, abilities):
def sapphire_leech():
return AbilityNotImplemented
def sapphire_leech():
return AbilityNotImplemented
return sapphire_leech, sapphire_leech,
@card("Worldly Counsel")
def worldly_counsel(card, abilities):
def worldly_counsel():
return AbilityNotImplemented
return worldly_counsel,
@card("Elfhame Palace")
def elfhame_palace(card, abilities):
def elfhame_palace():
return AbilityNotImplemented
def elfhame_palace():
return AbilityNotImplemented
return elfhame_palace, elfhame_palace,
@card("Sparring Golem")
def sparring_golem(card, abilities):
def sparring_golem():
return AbilityNotImplemented
return sparring_golem,
@card("Dueling Grounds")
def dueling_grounds(card, abilities):
def dueling_grounds():
return AbilityNotImplemented
def dueling_grounds():
return AbilityNotImplemented
return dueling_grounds, dueling_grounds,
@card("Teferi's Care")
def teferis_care(card, abilities):
def teferis_care():
return AbilityNotImplemented
def teferis_care():
return AbilityNotImplemented
return teferis_care, teferis_care,
@card("Tainted Well")
def tainted_well(card, abilities):
def tainted_well():
return AbilityNotImplemented
def tainted_well():
return AbilityNotImplemented
def tainted_well():
return AbilityNotImplemented
return tainted_well, tainted_well, tainted_well,
@card("Rogue Kavu")
def rogue_kavu(card, abilities):
def rogue_kavu():
return AbilityNotImplemented
return rogue_kavu,
@card("Spite")
def spite(card, abilities):
def spite():
return AbilityNotImplemented
def spite():
return AbilityNotImplemented
return spite, spite,
@card("Kavu Monarch")
def kavu_monarch(card, abilities):
def kavu_monarch():
return AbilityNotImplemented
def kavu_monarch():
return AbilityNotImplemented
return kavu_monarch, kavu_monarch,
@card("Ruby Leech")
def ruby_leech(card, abilities):
def ruby_leech():
return AbilityNotImplemented
def ruby_leech():
return AbilityNotImplemented
return ruby_leech, ruby_leech,
@card("Viashino Grappler")
def viashino_grappler(card, abilities):
def viashino_grappler():
return AbilityNotImplemented
return viashino_grappler,
@card("Slinking Serpent")
def slinking_serpent(card, abilities):
def slinking_serpent():
return AbilityNotImplemented
return slinking_serpent,
@card("Pledge of Loyalty")
def pledge_of_loyalty(card, abilities):
def pledge_of_loyalty():
return AbilityNotImplemented
def pledge_of_loyalty():
return AbilityNotImplemented
return pledge_of_loyalty, pledge_of_loyalty,
@card("Exclude")
def exclude(card, abilities):
def exclude():
return AbilityNotImplemented
def exclude():
return AbilityNotImplemented
return exclude, exclude,
@card("Phyrexian Altar")
def phyrexian_altar(card, abilities):
def phyrexian_altar():
return AbilityNotImplemented
return phyrexian_altar,
@card("Phyrexian Infiltrator")
def phyrexian_infiltrator(card, abilities):
def phyrexian_infiltrator():
return AbilityNotImplemented
return phyrexian_infiltrator,
@card("Pouncing Kavu")
def pouncing_kavu(card, abilities):
def pouncing_kavu():
return AbilityNotImplemented
def pouncing_kavu():
return AbilityNotImplemented
def pouncing_kavu():
return AbilityNotImplemented
return pouncing_kavu, pouncing_kavu, pouncing_kavu,
@card("Tigereye Cameo")
def tigereye_cameo(card, abilities):
def tigereye_cameo():
return AbilityNotImplemented
return tigereye_cameo,
@card("Scarred Puma")
def scarred_puma(card, abilities):
def scarred_puma():
return AbilityNotImplemented
return scarred_puma,
@card("Battery")
def battery(card, abilities):
def battery():
return AbilityNotImplemented
def battery():
return AbilityNotImplemented
return battery, battery,
@card("Ardent Soldier")
def ardent_soldier(card, abilities):
def ardent_soldier():
return AbilityNotImplemented
def ardent_soldier():
return AbilityNotImplemented
def ardent_soldier():
return AbilityNotImplemented
return ardent_soldier, ardent_soldier, ardent_soldier,
@card("Tsabo's Web")
def tsabos_web(card, abilities):
def tsabos_web():
return AbilityNotImplemented
def tsabos_web():
return AbilityNotImplemented
return tsabos_web, tsabos_web,
@card("Sunscape Master")
def sunscape_master(card, abilities):
def sunscape_master():
return AbilityNotImplemented
def sunscape_master():
return AbilityNotImplemented
return sunscape_master, sunscape_master,
@card("Metathran Zombie")
def metathran_zombie(card, abilities):
def metathran_zombie():
return AbilityNotImplemented
return metathran_zombie,
@card("Teferi's Response")
def teferis_response(card, abilities):
def teferis_response():
return AbilityNotImplemented
def teferis_response():
return AbilityNotImplemented
return teferis_response, teferis_response,
@card("Wash Out")
def wash_out(card, abilities):
def wash_out():
return AbilityNotImplemented
return wash_out,
@card("Zap")
def zap(card, abilities):
def zap():
return AbilityNotImplemented
def zap():
return AbilityNotImplemented
return zap, zap, |
__author__ = 'civic'
from serial import Serial
import time
from datetime import (
datetime,
timedelta
)
import serial
import math
from .msg import (
AlarmSetting,
StopButton,
TemperatureUnit,
ToneSet,
WorkStatus,
RequestMessage,
ResponseMessage,
InitRequest,
InitResponse,
DevInfoRequest,
DevInfoResponse,
ParamPutRequest,
ParamPutResponse,
DataHeaderRequest,
DataHeaderResponse,
DataBodyRequest,
DataBodyResponse,
ClockSetRequest,
ClockSetResponse,
DevNumRequest,
DevNumResponse,
UserInfoRequest,
UserInfoResponse,
)
import six
class Device:
def __init__(self, serial_port, baudrate=115000, timeout=5):
if serial_port is not None:
self._ser = serial.Serial(serial_port, baudrate=baudrate, timeout=timeout)
self._ser.close()
self.debug = False
self.wait_time = 0.5
self.encode = 'utf8'
def _talk(self, request, response):
"""
:type request: RequestMessage
"""
ba = request.to_bytes()
if (self.debug):
print("\nba length={}".format(len(ba)))
for i, b in enumerate(ba):
if six.PY2:
six.print_("{:02X} ".format(ord(b)), sep='', end='')
else:
six.print_("{:02X} ".format(b), end='')
if (i + 1) % 16 == 0:
six.print_()
six.print_()
self._ser.write(ba)
response.read(self._ser)
return response
def init(self):
"""
:rtype: InitResponse
"""
req = InitRequest()
try:
self._ser.open()
res = self._talk(req, InitResponse())
finally:
self._ser.close()
time.sleep(self.wait_time)
return res
def get_devinfo(self):
"""
:rtype: DevInfoResponse
"""
req = DevInfoRequest()
try:
self._ser.open()
res = self._talk(req, DevInfoResponse(self.encode))
finally:
self._ser.close()
time.sleep(self.wait_time)
return res
def update(self, req):
"""
:type req: ParamPutRequest
:rtype: ParamPutResponse
"""
try:
self._ser.open()
res = self._talk(req, ParamPutResponse())
finally:
self._ser.close()
time.sleep(self.wait_time)
return res
def get_data(self, callback=None, page_size=None):
"""
:type devinfo: DevInfoResponse
:rtype:list[(int,datetime,float)]
"""
devinfo = self.get_devinfo()
header = self.get_data_header(devinfo.station_no)
if page_size is None:
if devinfo.model_no == 40: # RC-4
page_size = 100
elif devinfo.model_no == 50: #RC-5
page_size = 500
else:
raise ValueError("Unknowm model_no (%d). can't decide page_size", devinfo.model_no)
page = int(math.ceil(header.rec_count / float(page_size)))
dt = timedelta(hours=devinfo.rec_interval.hour,
minutes=devinfo.rec_interval.minute,
seconds=devinfo.rec_interval.second)
data_list = []
base_time = devinfo.start_time
no = 1
try:
self._ser.open()
for p in range(page):
req = DataBodyRequest(devinfo.station_no, p)
count = page_size if (p+1) * page_size <= devinfo.rec_count else (devinfo.rec_count % page_size)
res = DataBodyResponse(count)
self._talk(req, res)
for rec in res.records:
data_list.append((no, base_time, rec/10.0))
no += 1
base_time += dt
if callback is not None:
callback(data_list)
data_list = []
finally:
self._ser.close()
time.sleep(self.wait_time)
return data_list
def get_data_header(self, target_station_no):
"""
:rtype: DataHeaderResponse
"""
try:
self._ser.open()
req = DataHeaderRequest(target_station_no)
res = self._talk(req, DataHeaderResponse())
finally:
self._ser.close()
time.sleep(self.wait_time)
return res
def set_clock(self, station_no, set_time=None):
"""
:type station_no: int
:type set_time: datetime
:rtype:ClockSetResponse
"""
try:
self._ser.open()
if set_time is None:
set_time = datetime.now()
req = ClockSetRequest(station_no, set_time)
res = ClockSetResponse()
self._talk(req, res)
finally:
self._ser.close()
time.sleep(self.wait_time)
return res
def set_device_number(self, station_no, device_number):
"""
:type station_no: int
:type device_number: string
:rtype:DevNumResponse
"""
try:
self._ser.open()
req = DevNumRequest(station_no)
req.device_number = device_number
res = self._talk(req, DevNumResponse())
finally:
self._ser.close()
time.sleep(self.wait_time)
return res
def set_user_info(self, station_no, user_info):
"""
:type station_no: int
:type user_info: string
:rtype: UserInfo
"""
try:
self._ser.open()
req = UserInfoRequest(station_no, self.encode)
req.user_info = user_info
res = self._talk(req, UserInfoResponse())
finally:
self._ser.close()
time.sleep(self.wait_time)
return res
def raw_send(self, request_bytes, response_length):
request = RequestMessage()
request.to_bytes = lambda : request_bytes
response = ResponseMessage()
response.msg = None
def __read(ser):
response.msg = ser.read(response_length)
response.read = __read
try:
self._ser.open()
self._talk(request, response)
finally:
self._ser.close()
time.sleep(self.wait_time)
return response.msg
def get_latest(self, callback=None, page_size=None):
"""
:type devinfo: DevInfoResponse
:rtype:list[(int,datetime,float)]
"""
devinfo = self.get_devinfo()
header = self.get_data_header(devinfo.station_no)
if page_size is None:
if devinfo.model_no == 40: # RC-4
page_size = 100
elif devinfo.model_no == 50: #RC-5
page_size = 500
else:
raise ValueError("Unknowm model_no (%d). can't decide page_size", devinfo.model_no)
page = int(math.ceil(header.rec_count / float(page_size)))
dt = timedelta(hours=devinfo.rec_interval.hour,
minutes=devinfo.rec_interval.minute,
seconds=devinfo.rec_interval.second)
base_time = devinfo.start_time + dt * (header.rec_count-1)
no = header.rec_count
try:
self._ser.open()
p = page - 1
req = DataBodyRequest(devinfo.station_no, p)
count = page_size if (p+1) * page_size <= devinfo.rec_count else (devinfo.rec_count % page_size)
res = DataBodyResponse(count)
self._talk(req, res)
rec = res.records[-1]
latest = (no, base_time, rec/10.0)
if callback is not None:
callback(latest)
finally:
self._ser.close()
time.sleep(self.wait_time)
return latest
|
# -*- coding: utf-8 -*-
from django.contrib.auth.models import User
from rest_framework import status
from rest_framework.test import APITestCase
from groups.models.group_profile import GroupProfile
class GroupTestUpdate(APITestCase):
def setUp(self):
# Create some users
self.user1 = User.objects.create(username='Pepito')
self.user2 = User.objects.create(username='Benito')
self.user3 = User.objects.create(username='Menganito')
# Create some groups
self.group1 = GroupProfile.objects.create_group(name='group 1', owner=self.user1)
self.group1.user_set.add(self.user1)
self.group2 = GroupProfile.objects.create_group(name='group 2', owner=self.user2)
self.group2.user_set.add(self.user1)
self.group2.user_set.add(self.user2)
self.group3 = GroupProfile.objects.create_group(name='group 3', owner=self.user3)
self.group3.user_set.add(self.user1)
self.group3.user_set.add(self.user3)
def get_uri(self, pk):
return '/api/v1/groups/{}/'.format(pk)
def send_request_without_auth(self, pk, params):
# get uri
uri = self.get_uri(pk)
# send request
return self.client.patch(uri, params)
def send_request_with_auth(self, user, pk, params):
# get uri
uri = self.get_uri(pk)
# Force login
self.client.force_login(user)
# send request
response = self.client.patch(uri, params)
# logout
self.client.logout()
return response
def check_serializers_fields(self, payload):
self.assertIn('id', payload)
self.assertIn('name', payload)
self.assertIn('owner', payload)
def check_payload_has_changes(self, payload, params):
self.assertEqual(params.get('name'), payload.get('name'))
def send_request_with_auth(self, user, pk, params):
# get uri
uri = self.get_uri(pk)
# Force login
self.client.force_login(user)
# send request
response = self.client.put(uri, params)
# logout
self.client.logout()
return response
def launch_successfully_test(self, user, instance, params):
response = self.send_request_with_auth(user, instance.pk, params)
# Check response status code is equals to 200
self.assertEqual(response.status_code, status.HTTP_200_OK)
payload = response.data
# Check serializers fields are on payload
self.check_serializers_fields(payload)
# Check name change
self.check_payload_has_changes(payload, params)
def test_update_without_auth(self):
response = self.send_request_without_auth(self.group1, {
'name': 'group 1'
})
# Check response status code is equals to 401
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_update_user1(self):
# params
user = self.user1
group = self.group1
params = {
'name': '{}-modified'.format(group)
}
self.launch_successfully_test(user, group, params)
def test_update_user1_no_owner(self):
# params
user = self.user1
group = self.group3
params = {
'name': '{}-modified'.format(group)
}
response = self.send_request_with_auth(user, group.pk, params)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_update_user2(self):
# params
user = self.user2
group = self.group2
params = {
'name': '{}-modified'.format(group)
}
self.launch_successfully_test(user, group, params)
def test_update_user3_no_found(self):
# params
user = self.user3
group = self.group1
params = {
'name': '{}-modified'.format(group)
}
response = self.send_request_with_auth(user, group.pk, params)
# Check response status is equals to 404
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Hive Netius System
# Copyright (c) 2008-2020 Hive Solutions Lda.
#
# This file is part of Hive Netius System.
#
# Hive Netius System is free software: you can redistribute it and/or modify
# it under the terms of the Apache License as published by the Apache
# Foundation, either version 2.0 of the License, or (at your option) any
# later version.
#
# Hive Netius System is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Apache License for more details.
#
# You should have received a copy of the Apache License along with
# Hive Netius System. If not, see <http://www.apache.org/licenses/>.
__author__ = "<NAME> <<EMAIL>>"
""" The author(s) of the module """
__version__ = "1.0.0"
""" The version of the module """
__revision__ = "$LastChangedRevision$"
""" The revision number of the module """
__date__ = "$LastChangedDate$"
""" The last change date of the module """
__copyright__ = "Copyright (c) 2008-2020 Hive Solutions Lda."
""" The copyright for the module """
__license__ = "Apache License, Version 2.0"
""" The license for the module """
import struct
import netius
from .base import Middleware
class ProxyMiddleware(Middleware):
"""
Middleware that implements the PROXY protocol on creation
of a new connection enabling the passing of information
from the front-end server to a back-end server using a normal
TCP connection. This protocol has been development originally
for the integration of an HAProxy server with back-end servers.
:see: http://www.haproxy.org/download/1.5/doc/proxy-protocol.txt
"""
MAX_LENGTH = 118
""" The maximum length that the base packet may have,
this is a constant according to PROXY send """
HEADER_LENGTH_V2 = 16
""" The length of the header message of the PROXY protocol
under version 2 """
HEADER_MAGIC_V2 = b"\x0d\x0a\x0d\x0a\x00\x0d\x0a\x51\x55\x49\x54\x0a"
""" The magic byte string that starts the PROXY v2 protocol
header, should be used for runtime verifications """
TYPE_LOCAL_V2 = 0x0
TYPE_PROXY_V2 = 0x1
AF_UNSPEC_v2 = 0x0
AF_INET_v2 = 0x1
AF_INET6_v2 = 0x2
AF_UNIX_v2 = 0x3
PROTO_UNSPEC_v2 = 0x0
PROTO_STREAM_v2 = 0x1
PROTO_DGRAM_v2 = 0x2
def __init__(self, owner, version = 1):
Middleware.__init__(self, owner)
self.version = version
def start(self):
Middleware.start(self)
self.version = netius.conf("PROXY_VERSION", self.version, cast = int)
self.owner.bind("connection_c", self.on_connection_c)
def stop(self):
Middleware.stop(self)
self.owner.unbind("connection_c", self.on_connection_c)
def on_connection_c(self, owner, connection):
if self.version == 1: connection.add_starter(self._proxy_handshake_v1)
elif self.version == 2: connection.add_starter(self._proxy_handshake_v2)
else: raise netius.RuntimeError("Invalid PROXY version")
def _proxy_handshake_v1(self, connection):
cls = self.__class__
# verifies if the connection is SSL based if that's the case
# the safe (reading) mode is enabled
safe = connection.ssl
# selects the proper receive method to be used to retrieve bytes
# from the client side taking into account if the connection is
# secured with SSL or not, note that the "special" SSL receive method
# allows one to receive raw information under an SSL socket/connection
recv = connection._recv_ssl if connection.ssl else connection.recv
# in case the safe (read) mode is enabled the unit of counting
# for the receive operation is one (single byte reading) to
# allow no return of data (required for some environment eg: SSL)
count = 1 if safe else cls.MAX_LENGTH
# verifies if there's a previously set PROXY buffer defined
# for the connection and if that's the case uses it otherwise
# starts a new empty buffer from scratch
has_buffer = hasattr(connection, "_proxy_buffer")
if has_buffer: buffer = connection._proxy_buffer
else: buffer = bytearray()
# saves the "newly" created buffer as the PROXY buffer for the
# current connection (may be used latter)
connection._proxy_buffer = buffer
# iterates continuously trying to retrieve the set of data that is
# required to parse the PROXY protocol header information
while True:
# tries to receive the maximum size of data that is required
# for the handling of the PROXY information
data = self.owner.exec_safe(connection, recv, count)
# in case the received data represents that of a closed connection
# the connection is closed and the control flow returned
if data == b"": connection.close(); return
# in case the received value is false, that indicates that the
# execution has failed due to an exception (expected or unexpected)
if data == False: return
# updates the "initial" buffer length taking into account
# the current buffer and then appends the new data to it
buffer_l = len(buffer)
buffer += data
# verifies the end of line sequence is present in the buffer,
# if that's the case we've reached a positive state
is_ready = b"\r\n" in buffer
# in case the ready state has been reached, the complete set of
# data is ready to be parsed and the loop is stopped
if is_ready: break
# removes the PROXY buffer reference from the connection as
# its no longer going to be used
del connection._proxy_buffer
# determines the index/position of the end sequence in the
# buffer and then uses it to calculate the base for the
# calculus of the extra information in the data buffer
buffer_i = buffer.index(b"\r\n")
data_b = buffer_i - buffer_l + 2
# extracts the line for parsing and the extra data value (to
# be restored to connection) using the data base and the data
line = buffer[:buffer_i]
extra = data[data_b:]
# in case there's valid extra data to be restored to the connection
# performs the operation, effectively restoring it for latter
# receiving operations (just like adding it back to the socket)
if extra: connection.restore(extra)
# forces the "conversion" of the line into a string so that it may
# be properly split into its components, note that first the value
# is converted into a byte string and then into a proper string
line = bytes(line)
line = netius.legacy.str(line)
# splits the line of the protocol around its components and uses them
# to change the current connection information (as expected)
header, protocol, source, destination, source_p, destination_p = line.split(" ")
# prints a debug message about the PROXY header received, so that runtime
# debugging is possible (and expected for this is a sensible part)
self.owner.debug(
"Received header %s %s %s:%s => %s:%s" %
(header, protocol, source, source_p, destination, destination_p)
)
# re-constructs the source address from the provided information, this is
# the major and most important fix to be done
connection.address = (source, int(source_p))
# runs the end starter operation, indicating to the connection that
# the PROXY header has been properly parsed
connection.end_starter()
def _proxy_handshake_v2(self, connection):
import netius.common
cls = self.__class__
# verifies if there's a previously set PROXY buffer defined
# for the connection and if that's the case uses it otherwise
# starts a new empty buffer from scratch
has_buffer = hasattr(connection, "_proxy_buffer")
if has_buffer: buffer = connection._proxy_buffer
else: buffer = bytearray()
# saves the "newly" created buffer as the PROXY buffer for the
# current connection (may be used latter)
connection._proxy_buffer = buffer
# verifies if a PROXY header was already parsed from the current connection
# and if that was not the case runs its parsing
header = connection._proxy_header if hasattr(connection, "_proxy_header") else None
if not header:
# tries to read the PROXY v2 header bytes to be able to parse
# the body parts taking that into account
header = self._read_safe(connection, buffer, cls.HEADER_LENGTH_V2)
if not header: return
# updates the reference to the proxy header in the connection
# and clears the buffer as it's now going to be used to load
# the data from the body part
connection._proxy_header = header
buffer[:] = b""
# unpacks the PROXY v2 header into its components, notice that some of them
# contain multiple values on higher and lower bits
magic, version_type, address_protocol, body_size = struct.unpack("!12sBBH", header)
# unpacks both the version (of the protocol) and the type (of message) by
# unpacking the higher and the lower bits
version = version_type >> 4
type = version_type & 0x0f
# unpacks the type of address to be communicated and the protocol family
address = address_protocol >> 4
protocol = address_protocol & 0x0f
# runs a series of assertions on some of the basic promises of the protocol
# (if they failed connection will be dropped)
netius.verify(magic == cls.HEADER_MAGIC_V2)
netius.verify(version == 2)
# reads the body part of the PROXY message taking into account the advertised
# size of the body (from header component)
body = self._read_safe(connection, buffer, body_size)
if not body: return
if address == cls.AF_INET_v2:
source, destination, source_p, destination_p = struct.unpack("!IIHH", body)
source = netius.common.addr_to_ip4(source)
destination = netius.common.addr_to_ip4(destination)
elif address == cls.AF_INET6_v2:
source_high,\
source_low,\
destination_high,\
destination_low,\
source_p,\
destination_p = struct.unpack("!QQQQHH", body)
source = (source_high << 64) + source_low
destination = (destination_high << 64) + destination_low
source = netius.common.addr_to_ip6(source)
destination = netius.common.addr_to_ip6(destination)
else:
raise netius.RuntimeError("Unsupported or invalid PROXY header")
# removes the PROXY buffer and header references from the connection
# as they are no longer going to be used
del connection._proxy_buffer
del connection._proxy_header
# prints a debug message about the PROXY header received, so that runtime
# debugging is possible (and expected for this is a sensible part)
self.owner.debug(
"Received header v2 %d %s:%s => %s:%s" %
(protocol, source, source_p, destination, destination_p)
)
# re-constructs the source address from the provided information, this is
# the major and most important fix to be done
connection.address = (source, int(source_p))
# runs the end starter operation, indicating to the connection that
# the PROXY header has been properly parsed
connection.end_starter()
def _read_safe(self, connection, buffer, count):
"""
Reads a certain amount of data from a non blocking connection,
in case the're a blocking operation then the error is raised
and caught by the upper layers.
This method also assumes that the buffer is stored on an abstract
layer that can be used in the resume operation.
:type connection: Connection
:param connection: The connection from which the data is going
to be read.
:type buffer: bytearray
:param bytearray: The byte array where the data is going to be store
waiting for the processing.
:type count: int
:param count: The number of bytes that are going to be read from
the target connection.
:rtype: String
:return: The bytes that were read from the connection or in alternative
an invalid value meaning that the connection should be dropped.
"""
cls = self.__class__
# selects the proper receive method to be used to retrieve bytes
# from the client side taking into account if the connection is
# secured with SSL or not, note that the "special" SSL receive method
# allows one to receive raw information under an SSL socket/connection
recv = connection._recv_ssl if connection.ssl else connection.recv
# iterates continuously trying to retrieve the set of data that is
# required to parse the PROXY protocol header information
while True:
# determines the number of pending bytes in remaining to be read
# in the buffer and if that's less or equal to zero breaks the
# current loop (nothing pending to be read)
pending = count - len(buffer)
if pending <= 0: break
# tries to receive the maximum size of data that is required
# for the handling of the PROXY information
data = self.owner.exec_safe(connection, recv, pending)
# in case the received data represents that of a closed connection
# the connection is closed and the control flow returned
if data == b"": connection.close(); return None
# in case the received value is false, that indicates that the
# execution has failed due to an exception (expected or unexpected)
if data == False: return None
# adds the newly read data to the current buffer
buffer += data
# returns the valid partial value of the buffer as requested by
# the call to this method, in normal circumstances the buffer
# should only contain the requested amount of data
return bytes(buffer[:count])
|
<gh_stars>1-10
#!/usr/bin/env python
import sys
from Tkinter import *
from tkFileDialog import askopenfilenames, askdirectory
import Pmw
import cv
import chrono
class frame:
_colors = ['red', 'blue', 'green', 'yellow', 'cyan', 'magenta', 'black']
_linestyles = [' ', '-', '--', '-.', ':']
_markers = [' ', 'o', '.', '_', '*', '+', 'x', 'square', 'triangle', 'diamond']
_units = ['cm^2', 'm^2']
def __init__(self, master):
self.parent = master
self.file_names = None
self.anode_name = None
self.cathode_name = None
self.var_p1 = IntVar()
self.var_p2 = IntVar()
self.elec_area = None
Pmw.initialise()
self.create_frame(self.parent)
def create_frame(self, master):
nb = Pmw.NoteBook(master)
nb.pack(padx = 10, pady = 10, fill = BOTH, expand = 1)
################## First Tab - CV graph using one file #####################
p1 = nb.add('CV Plots')
g1_p1 = Pmw.Group(p1, tag_text='Data & Parameters')
g1_p1.pack(fill = 'both', expand = 1, padx = 6, pady = 6)
self.file_entry_p1 = Pmw.EntryField(g1_p1.interior(), labelpos = 'w', label_text = 'Data Path: ', entry_width = 40, validate = None, command = None)
self.file_entry_p1.grid(row = 0, column = 0, columnspan = 2, padx = 5, pady = 5)
open_file_p1 = Button(g1_p1.interior(), text = 'Select Files', command = self.open_files)
open_file_p1.grid(row = 0, column = 2, padx = 5, pady = 5)
self.overlay_graphs_p1 = Checkbutton(g1_p1.interior(), text = 'Overlay graphs', variable = self.var_p1)
self.overlay_graphs_p1.grid(row = 1, column = 0, padx = 5, pady = 5)
self.elec_area_entry_p1 = Pmw.EntryField(g1_p1.interior(), labelpos = 'w', label_text = 'Electrode Area: ', entry_width = 4, validate = {'validator': 'real'}, command = None)
self.elec_area_entry_p1.grid(row = 2, column = 0, padx = 5, pady = 5)
self.area_unit_entry_p1 = Pmw.OptionMenu(g1_p1.interior(), labelpos='w', label_text='', items = self._units, menubutton_width=4,)
self.area_unit_entry_p1.setvalue(self._units[0])
self.area_unit_entry_p1.grid(row = 2, column = 1, padx = 2, pady = 2)
g2_p1 = Pmw.Group(p1, tag_text='Plot Style')
g2_p1.pack(fill = 'both', expand = 1, padx = 6, pady = 6)
self.plot_color_p1 = Pmw.OptionMenu(g2_p1.interior(), labelpos='w', label_text='Plot color: ', items = self._colors, menubutton_width = 6,)
self.plot_color_p1.setvalue(self._colors[2])
self.plot_color_p1.grid(row = 0, column = 0, padx = 10, pady = 5)
self.plot_line_p1 = Pmw.OptionMenu(g2_p1.interior(), labelpos='w', label_text='Line Style: ', items = self._linestyles, menubutton_width = 2,)
self.plot_line_p1.setvalue(self._linestyles[1])
self.plot_line_p1.grid(row = 0, column = 1, padx = 10, pady = 5)
self.plot_marker_p1 = Pmw.OptionMenu(g2_p1.interior(), labelpos='w', label_text='Marker: ', items = self._markers, menubutton_width = 6,)
self.plot_marker_p1.setvalue(self._markers[1])
self.plot_marker_p1.grid(row = 0, column = 2, padx = 10, pady = 5)
close_p1 = Button(p1, text='Cancel', command=root.quit)
close_p1.pack(side = 'right', expand = 0, padx = 6, pady = 6)
generate_graphs_p1 = Button(p1, text='Generate graphs', command = self.generate_graph_p1)
generate_graphs_p1.pack(side = 'right', expand = 0, padx = 6, pady = 6)
################## Second tab - Chrono plots #####################
p2 = nb.add('Chrono Plots')
g1_p2 = Pmw.Group(p2, tag_text='Data & Parameters')
g1_p2.pack(fill = 'both', expand = 1, padx = 6, pady = 6)
self.directory_entry_p2_1 = Pmw.EntryField(g1_p2.interior(), labelpos = 'w', label_text = 'Anode Data: ', entry_width = 40, validate = None, command = None)
self.directory_entry_p2_1.grid(row = 0, column = 0, columnspan = 2, padx = 10, pady = 5)
open_directory_p2_1 = Button(g1_p2.interior(), text = 'Select Folder', command = self.open_anode)
open_directory_p2_1.grid(row = 0, column = 2, padx = 5, pady = 5)
self.directory_entry_p2_2 = Pmw.EntryField(g1_p2.interior(), labelpos = 'w', label_text = 'Cathode Data: ', entry_width = 40, entry_state = 'disabled', validate = None, command = None)
self.directory_entry_p2_2.grid(row = 1, column = 0, columnspan = 2, padx = 10, pady = 5)
open_directory_p2_2 = Button(g1_p2.interior(), text = 'Select Folder', state = DISABLED, command = self.open_cathode)
open_directory_p2_2.grid(row = 1, column = 2, padx = 5, pady = 5)
self.overlay_graphs_p2 = Checkbutton(g1_p2.interior(), text = 'Overlay graphs', variable = self.var_p2, state = DISABLED)
self.overlay_graphs_p2.grid(row = 2, column = 0, padx = 5, pady = 5)
self.overlay_graphs_p2.select()
self.elec_area_entry_p2 = Pmw.EntryField(g1_p2.interior(), labelpos = 'w', label_text = 'Electrode Area: ', entry_width = 4, validate = {'validator': 'real'}, command = None)
self.elec_area_entry_p2.grid(row = 3, column = 0, padx = 5, pady = 5)
self.area_unit_entry_p2 = Pmw.OptionMenu(g1_p2.interior(), labelpos='w', label_text='', items = self._units, menubutton_width=4,)
self.area_unit_entry_p2.setvalue(self._units[0])
self.area_unit_entry_p2.grid(row = 3, column = 1, padx = 2, pady = 2)
g2_p2 = Pmw.Group(p2, tag_text='Plot Style')
g2_p2.pack(fill = 'both', expand = 1, padx = 6, pady = 6)
anode_label = Label(g2_p2.interior(), text = 'Anode --> ')
anode_label.grid(row = 0, column = 0, padx = 5, pady = 5)
self.plot_color_p2_1 = Pmw.OptionMenu(g2_p2.interior(), labelpos = 'w', label_text = 'Plot color: ', items = self._colors, menubutton_width = 6,)
self.plot_color_p2_1.setvalue(self._colors[1])
self.plot_color_p2_1.grid(row = 0, column = 1, padx = 10, pady = 5)
self.plot_line_p2_1 = Pmw.OptionMenu(g2_p2.interior(), labelpos='w', label_text='Line Style: ', items = self._linestyles, menubutton_width = 2,)
self.plot_line_p2_1.setvalue(self._linestyles[1])
self.plot_line_p2_1.grid(row = 0, column = 2, padx = 10, pady = 5)
self.plot_marker_p2_1 = Pmw.OptionMenu(g2_p2.interior(), labelpos='w', label_text='Marker: ', items = self._markers, menubutton_width = 6,)
self.plot_marker_p2_1.setvalue(self._markers[2])
self.plot_marker_p2_1.grid(row = 0, column = 3, padx = 10, pady = 5)
cathode_label = Label(g2_p2.interior(), text = 'Cathode --> ')
cathode_label.grid(row = 1, column = 0, padx = 5, pady = 5)
self.plot_color_p2_2 = Pmw.OptionMenu(g2_p2.interior(), labelpos = 'w', label_text = 'Plot color: ', items = self._colors, menubutton_width = 6,)
self.plot_color_p2_2.setvalue(self._colors[2])
self.plot_color_p2_2.grid(row = 1, column = 1, padx = 10, pady = 5)
self.plot_line_p2_2 = Pmw.OptionMenu(g2_p2.interior(), labelpos='w', label_text='Line Style: ', items = self._linestyles, menubutton_width = 2,)
self.plot_line_p2_2.setvalue(self._linestyles[1])
self.plot_line_p2_2.grid(row = 1, column = 2, padx = 10, pady = 5)
self.plot_marker_p2_2 = Pmw.OptionMenu(g2_p2.interior(), labelpos='w', label_text='Marker: ', items = self._markers, menubutton_width = 6,)
self.plot_marker_p2_2.setvalue(self._markers[2])
self.plot_marker_p2_2.grid(row = 1, column = 3, padx = 10, pady = 5)
close_p2 = Button(p2, text = 'Cancel', command=root.quit)
close_p2.pack(side = 'right', expand = 0, padx = 6, pady = 6)
generate_graphs_p2 = Button(p2, text='Generate graphs', command = self.generate_graphs_p2)
generate_graphs_p2.pack(side = 'right', expand = 0, padx = 6, pady = 6)
nb.tab('CV Plots').focus_set()
def open_files(self):
self.file_names = askopenfilenames()
self.file_entry_p1.clear()
self.file_entry_p1.insert(0, self.file_names)
def open_anode(self):
self.anode_name = askdirectory()
self.directory_entry_p2_1.clear()
self.directory_entry_p2_1.insert(0, self.anode_name)
def open_cathode(self):
self.cathode_name = askdirectory()
self.directory_entry_p2_2.clear()
self.directory_entry_p2_2.insert(0, self.cathode_name)
def generate_graph_p1(self):
self.elec_area = self.elec_area_entry_p1.getvalue()
if self.file_names and self.elec_area:
names = self.file_names[0]
for i in range(1,len(self.file_names)):
names += ',' + self.file_names[i]
if cv.main(names, float(self.elec_area), self.area_unit_entry_p1.getvalue(), self.var_p1.get(), plot_color = self.plot_color_p1.getvalue(), line_style = self.plot_line_p1.getvalue(), marker = self.plot_marker_p1.getvalue()) == True:
dialog = Pmw.MessageDialog(self.parent, title = 'Success Message', defaultbutton = 0, message_text = 'Graphs successfully generated!')
dialog.activate()
else:
dialog = Pmw.MessageDialog(self.parent, title = 'Error Message', defaultbutton = 0, message_text = 'Failed to generate the graphs. Check error.txt file for details.')
dialog.activate()
else:
dialog = Pmw.MessageDialog(self.parent, title = 'Error Message', defaultbutton = 0, message_text = 'Please select data files and enter electrode area to generate graphs!')
dialog.activate()
def generate_graphs_p2(self):
self.elec_area = self.elec_area_entry_p2.getvalue()
if self.anode_name and self.elec_area:
if chrono.main(self.anode_name, '', float(self.elec_area), self.area_unit_entry_p2.getvalue(), anode_plot_color = self.plot_color_p2_1.getvalue(), anode_line_style = self.plot_line_p2_1.getvalue(), anode_marker = self.plot_marker_p2_1.getvalue(), cathode_plot_color = self.plot_color_p2_2.getvalue(), cathode_line_style = self.plot_line_p2_2.getvalue(), cathode_marker = self.plot_marker_p2_2.getvalue()) == True:
dialog = Pmw.MessageDialog(self.parent, title = 'Success Message', defaultbutton = 0, message_text = 'Graphs successfully generated!')
dialog.activate()
else:
dialog = Pmw.MessageDialog(self.parent, title = 'Error Message', defaultbutton = 0, message_text = 'Please select data files and enter electrode area to generate graphs!')
dialog.activate()
if __name__ == '__main__':
root = Tk()
root.title('Cyclic Voltammetry Graph Utility')
root.maxsize(width=700,height=400)
root.minsize(width=700,height=400)
root.resizable(width=NO,height=NO)
w = root.winfo_screenwidth()
h = root.winfo_screenheight()
root.update_idletasks()
rootsize = tuple(int(_) for _ in root.geometry().split('+')[0].split('x'))
x = w/2 - rootsize[0]/2
y = h/2 - rootsize[1]/2
root.geometry("%dx%d+%d+%d" % (rootsize + (x, y)))
plot = frame(root)
try:
log_file = open('log.txt', 'w')
err_file = open('error.txt', 'w')
except IOError:
sys.stderr.write('Unable to open files in write mode')
else:
sys.stdout = log_file
sys.stderr = err_file
root.mainloop()
|
<filename>ziplineST.py<gh_stars>1-10
import pytz
from datetime import datetime
from zipline.api import order, symbol, record, order_target, order_target_percent, set_benchmark
import numpy as np
# cal = get_calendar('NYSE')
# import pandas as pd
# bundle_name = 'quandl' # "a bundle name"
# ticker_name = "IBM"
# end_date = pd.Timestamp.utcnow()
# calendar_name = 'NYSE' # "the calendar name"
# window=200 # how many days you want to look back
# bundle_data = bundles.load(bundle_name)
# data_por = DataPortal(bundle_data.asset_finder,
# get_calendar(calendar_name),
# bundle_data.equity_daily_bar_reader.first_trading_day,
# equity_minute_reader=bundle_data.equity_minute_bar_reader,
# equity_daily_reader=bundle_data.equity_daily_bar_reader,
# adjustment_reader=bundle_data.adjustment_reader)
# sym = data_por.asset_finder.lookup_symbol(ticker_name, end_date)
# data = data_por.get_history_window(assets=[sym],
# end_dt=end_date,
# bar_count=window,
# frequency='1d',
# data_frequency='daily'
# ,field='close'
# )
# print(data)
# %%zipline --start 2014-1-1 --end 2018-1-1 -o dma.pickle
def initialize(context):
set_benchmark(symbol('MSFT'))
context.i = 0
context.asset = symbol('MSFT')
def handle_data(context, data):
n = 7
f = 3
context.i += 1
# if context.i < n:
# return
df = data.history(context.asset, ['price', 'open', 'high', 'low', 'close', 'volume'], bar_count=10, frequency="1d")
df = SuperTrend(df,n,f)
# print(df['SuperTrend'])
current_positions = context.portfolio.positions[context.asset].amount
# Trading logic
if (df.ix[len(df)-1,'SuperTrend'] > df.ix[len(df)-2,'close']) :
order_target_percent(context.asset, 10, stop_price = 2*df.ix[len(df)-1,'ATR'])
record(AAPL=data.current(context.asset, ['price', 'open', 'high', 'low', 'close', 'volume']), SuperTrend = df['SuperTrend'], status = "buy")
elif (df.ix[len(df)-1,'SuperTrend'] < df.ix[len(df)-2,'close']) and current_positions!=0:
order_target_percent(context.asset, 0, stop_price = 2*df.ix[len(df)-1,'ATR'])
record(AAPL=data.current(context.asset, ['price', 'open', 'high', 'low', 'close', 'volume']), SuperTrend = df['SuperTrend'], status = "sell")
else:
record(AAPL=data.current(context.asset, ['price', 'open', 'high', 'low', 'close', 'volume']), SuperTrend = df['SuperTrend'], status = "---")
def SuperTrend(df,n,f):
df['H-L']=abs(df['high']-df['low'])
df['H-PC']=abs(df['high']-df['close'].shift(1))
df['L-PC']=abs(df['low']-df['close'].shift(1))
df['TR']=df[['H-L','H-PC','L-PC']].max(axis=1)
df.dropna(inplace = True)
df['ATR']=np.nan
df.ix[n-1,'ATR']=df['TR'][:n-1].mean()
for i in range(n,len(df)):
df['ATR'][i]=(df['ATR'][i-1]*(n-1)+ df['TR'][i])/n
#Calculation of SuperTrend
df['Upper Basic']=(df['high']+df['low'])/2+(f*df['ATR'])
df['Lower Basic']=(df['high']+df['low'])/2-(f*df['ATR'])
df['Upper Band']=df['Upper Basic']
df['Lower Band']=df['Lower Basic']
for i in range(n,len(df)):
if df['close'][i-1]<=df['Upper Band'][i-1]:
df['Upper Band'][i]=min(df['Upper Basic'][i],df['Upper Band'][i-1])
else:
df['Upper Band'][i]=df['Upper Basic'][i]
for i in range(n,len(df)):
if df['close'][i-1]>=df['Lower Band'][i-1]:
df['Lower Band'][i]=max(df['Lower Basic'][i],df['Lower Band'][i-1])
else:
df['Lower Band'][i]=df['Lower Basic'][i]
df['SuperTrend']=np.nan
for i in df['SuperTrend']:
if df['close'][n-1]<=df['Upper Band'][n-1]:
df['SuperTrend'][n-1]=df['Upper Band'][n-1]
elif df['close'][n-1]>df['Upper Band'][i]:
df['SuperTrend'][n-1]=df['Lower Band'][n-1]
for i in range(n,len(df)):
if df['SuperTrend'][i-1]==df['Upper Band'][i-1] and df['close'][i]<=df['Upper Band'][i]:
df['SuperTrend'][i]=df['Upper Band'][i]
elif df['SuperTrend'][i-1]==df['Upper Band'][i-1] and df['close'][i]>=df['Upper Band'][i]:
df['SuperTrend'][i]=df['Lower Band'][i]
elif df['SuperTrend'][i-1]==df['Lower Band'][i-1] and df['close'][i]>=df['Lower Band'][i]:
df['SuperTrend'][i]=df['Lower Band'][i]
elif df['SuperTrend'][i-1]==df['Lower Band'][i-1] and df['close'][i]<=df['Lower Band'][i]:
df['SuperTrend'][i]=df['Upper Band'][i]
return df |
"""
Api request handler
TODO:
Use serializers to generate correct output
"""
import json
import logging
from TrackerDash.common import theme_helpers
from TrackerDash.database import common as db_common
from TrackerDash.database.mongo_accessor import MongoAccessor
from TrackerDash.schemas.api import Graph as GraphSchema
from TrackerDash.schemas.api import Dashboard as DashboardSchema
class APIRequest(object):
"""
Base class for an API request
"""
def __init__(self, request, request_type):
self.accessor = MongoAccessor()
self.request = request
self.request_type = request_type
self.process()
def process(self):
"""
Needs to be overridden
"""
raise NotImplementedError
class APIGETRequest(APIRequest):
def __init__(self, request, request_type):
self.response = None
super(APIGETRequest, self).__init__(request, request_type)
def render(self):
"""
return the request as readable json
"""
return json.dumps(self.response)
def process(self):
"""
process the request
should not return anything
raise exceptions here to generate a http 500 error
"""
logging.debug("Processing API GET Request: %s" % self.request_type)
rt = self.request_type
if rt == "get_dashboard_names":
self.response = self.get_dashboard_names()
elif rt == "get_dashboard_information":
self.response = self.get_dashboard_information()
elif rt == "get_graph_names":
self.response = self.get_graph_names()
elif rt == "get_graph_information":
self.response = self.get_graph_information()
elif rt == "get_data_sources":
self.response = {
"data_sources": db_common.get_configured_data_sources(
self.accessor)
}
else:
logging.info("request: %s is not implemented" % self.request_type)
raise NotImplementedError(
"request: %s is not implemented" % self.request_type)
def get_dashboard_names(self):
"""
return a list of dashboard names configured
"""
return {"dashboards": db_common.get_dashboard_names(self.accessor)}
def get_dashboard_information(self):
"""
return the dashboard documents
"""
dash_docs = self.accessor.get_all_documents_from_collection(
'dashboard')
for doc in dash_docs:
del doc["_id"]
del doc["__date"]
return {"dashboards": dash_docs}
def get_graph_information(self):
"""
get all the configured graph information
"""
graph_docs = self.accessor.get_all_documents_from_collection('graph')
for doc in graph_docs:
del doc["_id"]
del doc["__date"]
return {"graphs": graph_docs}
def get_graph_names(self):
"""
get the names of all the configured graphs
"""
graph_docs = self.get_graph_information()
graph_names = [graph["title"] for graph in graph_docs["graphs"]]
return {"graphs": graph_names}
class APIPOSTRequest(APIRequest):
def __init__(self, request, request_type):
super(APIPOSTRequest, self).__init__(request, request_type)
def get_content(self):
"""
get the content from the request
"""
return json.loads(self.request.content.readlines()[0])
def process(self):
"""
Process the twisted request.
"""
logging.debug("Processing API POST request: %s" % self.request_type)
rt = self.request_type
content = self.get_content()
# Post Raw Data To A Data Source
if rt == "post_data":
data_source = content["data_source"]
document = content["data"]
# No validation needed here as we can post to a
# non existant data source
self.accessor.add_document_to_collection_redundant(
data_source,
document,
60)
return self
# Create a new graph object
elif rt == "create_graph":
graph_data = content["data"]
schema = GraphSchema()
graph_data_validated = schema.deserialize(graph_data)
# We need to ensure that a datasource collection is present.
try:
self.accessor.create_collection(
graph_data_validated["data_source"])
except NameError:
logging.debug(
"data source for graph '%s' already exists" % (
graph_data_validated["title"], ))
self.accessor.add_document_to_collection(
"graph", graph_data_validated)
return self
# Create a new dashboard
elif rt == "create_dashboard":
dashboard_data = content["data"]
logging.info("create_dashboard request data: %r" % dashboard_data)
schema = DashboardSchema()
dashboard_data_validated = schema.deserialize(dashboard_data)
self.accessor.add_document_to_collection(
"dashboard", dashboard_data_validated)
elif rt == "set_theme":
logging.info("content for set_theme: %r" % content)
theme_helpers.set_theme(self.accessor, content)
return self
elif rt == "set_style":
logging.info("content for set_style: %r" % content)
theme_helpers.set_style(self.accessor, content)
return self
else:
logging.info("request: %s is not implemented" % self.request_type)
raise NotImplementedError(
"POST request: %s has not been implemented" % (
self.request_type, ))
|
# -*- coding: utf-8 -*-
"""
<NAME>
11/24/19
"""
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
Code Flow
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
'''
The true/false variables in the Variables Used section are to be manipulated
to determine what type of model should be trained. For each model, the data
loaded and split into training and test data. The image shape is saved for
use as parameters in the model. According to the true/false variables set,
the parameters number of epochs for which the model is trained is varied.
The data is reshaped to be represented as sparse, binary vectors. After this,
the data may be augmented.
Depending on the true/false parameters specified, the convolutional model is
built with one or many convolutional and max pooling layers and possible
dropout followed by two dense layers with relu activation and a final dense
layer with 10 units and softmax activation.
The model is then trained for the specified number of epochs and the
results in terms of the loss function and accuracy are plotted on a graph
for both the training and test data. The start and stop time for the model
training are recorded and the duration is printed along with the final test
loss and test accuracy.
'''
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
Import Libraries Section
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
import matplotlib.pyplot as plt
from keras import models
from keras import layers
from keras.layers import Dropout
import datetime
from keras.utils import to_categorical
from keras.preprocessing.image import ImageDataGenerator
import numpy as np
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
Variables Used
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
#Set as True if the model should include data augmentation
dataAugmentation=True
#Set as True if the model has at least three convolutional layers (no dropout)
threeConvLayer=True
#Set as True if the model has four convolutional layers (no dropout)
fourConvLayer=False
#Set as True if the model has dropout (three convolutional layers)
dropOut=False
'''
num_nodes (first and second) are used to set the number of hidden units in the
two convolutional layers
num_epochs is used to determine how many epochs the model should be trained for
size_batch is used to set the number of samples used to train the network
at a time
verbose_setting is used to generate output that allows the user to visualize
the progress of the training
num_classes is used to signify that there are 10 classes in the data
train/test_images are used to store the normalized input vector
train/test_labels are used to store the model factors
train/test_number_images, train/test_x_image_size, and
train/test_y_image_size are used in the convolutional layers to specify
the shape of the inputs
train_datagen is used to perform data augmentation
acc, val_acc, loss, and val_loss are used to store the results of the model
training for use in a plot to compare loss and metrics for the training and
test data
'''
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
Objective
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
'''The goal of this program is to create a ConvNet that accurately classifies
data trained from the MNIST fashion dataset. The code is split into
sections and subtasks are commented out and classified in the applicable
section
'''
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
Load Data Section
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
#Set a seed to allow for replication
np.random.seed(7)
from keras.datasets import fashion_mnist
#Split into test and training data
(train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data()
#Capture the time to allow us to calculate training duration
start_time = datetime.datetime.now()
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
Parameters Section
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
#Set a variable to represent the first dimension--number of images
train_number_images = train_images.shape[0]
test_number_images = test_images.shape[0]
#Set a variable to represent the second dimension--pixel value length
train_x_image_size = train_images.shape[1]
train_y_image_size = train_images.shape[2]
#Set a variabelt o represent the third dimension--pixel value width
test_x_image_size = test_images.shape[1]
test_y_image_size = test_images.shape[2]
#Set the number of hidden units in the first dense layer
num_nodes_first = 448
#Set the number of hidden units in the second dense layer
num_nodes_second=112 #128
#Train the dropout model through 16 epochs
if dropOut:
num_epochs=16
#Train the data augmentation model through 8 epochs
elif dataAugmentation:
num_epochs=100
#Train the four convolutional layer model through 6 epochs
elif fourConvLayer:
num_epochs=6
#Train the three convolutional layer model through 6 epochs
elif threeConvLayer:
num_epochs=6 #Four conv layer trained through 15 epochs
#Train the one convolutional layer model through 4 epochs
else:
num_epochs=4
#Set the number of samples processed before the model is updated as 30
size_batch = 30
#Return a visual progress indicator
verbose_setting = 1
#Set the number of classes as 10 since there are 10 types of clothing
num_classes = 10
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
Pretreat Data Section
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
#Reshape the training images
train_images = train_images.reshape((train_number_images, train_x_image_size , train_y_image_size, 1))
#Convert training images to a float
train_images = (train_images.astype('float32'))
#Reshape the test images
test_images = test_images.reshape((test_number_images, test_x_image_size , test_y_image_size, 1))
#Convert test images to a float
test_images = (test_images.astype('float32'))
#Use one hot encoding to transform the integer into a catagorical variable (binary vector)
train_labels = to_categorical(train_labels)
test_labels = to_categorical(test_labels)
#Perform if the model is data augmentation
if dataAugmentation:
#Create a data generator for the training data
train_datagen = ImageDataGenerator(
#Rescale the images
rescale=1./255,
#Include copies of the images rotated by up to 40 degrees
rotation_range=25,
#Shift the image horizontally by up to 25% of width
width_shift_range=0.15,
#Shift the image vertically by up to 25% of width
height_shift_range=0.15,
#Set the shear angle intensity
shear_range=0.1,
#Set the amount of zoom
zoom_range=(.9,1.1),
horizontal_flip=True,)
# The validation data does not get augmented
test_datagen = ImageDataGenerator(rescale=1./255)
#Augment the training data
train_generator = train_datagen.flow(
train_images, train_labels,
batch_size=32)
#Do not augment the validation data
validation_generator = test_datagen.flow(
test_images, test_labels,
batch_size=32)
else:
#Rescale test and training images to [0,1]
train_images = (train_images / 255)-.5
test_images = (test_images/ 255)-.5
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
Define Model Section
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
#Start the sequential model
network = models.Sequential()
#Add a convolutional layer of 32 hidden units with relu activation
network.add(layers.Conv2D(32, (3, 3), activation='relu', input_shape=(train_x_image_size , train_y_image_size, 1)))
#Add a max pooling layer to reduce dimensionality
network.add(layers.MaxPooling2D((2, 2)))
#Add dropout to reduce overfitting
if dropOut:
network.add(Dropout(0.2))
if threeConvLayer:
#Add a convolutional layer of 64 hidden units with relu activation
network.add(layers.Conv2D(64, (3, 3), activation='relu', input_shape=(train_x_image_size , train_y_image_size, 1),padding='same'))
#Add a max pooling layer to reduce dimensionality
network.add(layers.MaxPooling2D((2, 2)))
#Add dropout to reduce overfitting
if dropOut:
network.add(Dropout(0.2))
#Add a convolutional layer of 128 hidden units with relu activation
network.add(layers.Conv2D(128, (3, 3), activation='relu', input_shape=(train_x_image_size , train_y_image_size, 1),padding='same'))
#Add a max pooling layer to reduce dimensionality
network.add(layers.MaxPooling2D((2, 2)))
#Add dropout to reduce overfitting
if dropOut:
network.add(Dropout(0.2))
if fourConvLayer:
#Add a convolutional layer of 128 hidden units with relu activation
network.add(layers.Conv2D(128, (3, 3), activation='relu', input_shape=(train_x_image_size , train_y_image_size, 1),padding='same'))
#Add a max pooling layer to reduce dimensionality
network.add(layers.MaxPooling2D((2, 2)))
#Reshape the tensor to one dimension
network.add(layers.Flatten())
#Add a dense layer of num_nodes_first hidden units with relu activation
network.add(layers.Dense(num_nodes_first, activation='relu'))
#Add dropout to reduce overfitting
if dropOut:
network.add(Dropout(0.2))
#Add a dense layer of num_nodes_second hidden units with relu activation
network.add(layers.Dense(num_nodes_second, activation='relu'))
if dropOut:
network.add(Dropout(0.2))
#Add the final hidden layer used to classify the images in one of num_classes classes
network.add(layers.Dense(num_classes, activation='softmax'))
#Set the compiler to have the adam optimizer with categorical crossentropy loss and accuracy as the metric
network.compile(optimizer='adam',
loss='categorical_crossentropy',
metrics=['accuracy'])
#View a detail summary of the model structure
network.summary()
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
Fit Model Section
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
#Use fit_generator to train the model if data augmentation was used
if dataAugmentation:
history=network.fit_generator(train_generator, steps_per_epoch=len(train_images)/32, epochs=num_epochs, validation_data=validation_generator, validation_steps=len(test_images)/32)
#Use .fit to train the model
else:
history = network.fit(train_images, train_labels, epochs=num_epochs, batch_size=size_batch, verbose = verbose_setting, validation_split=0.2)
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
Show output Section
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
#Save test loss and accuracy from the non-data augmentation results
if not dataAugmentation:
test_loss, test_acc = network.evaluate(test_images, test_labels)
#Print the test loss
print('test_loss:', test_loss)
#Print the test accuracy
print('test_acc:', test_acc)
#Save the stop time to calculate the run time
stop_time = datetime.datetime.now()
#Print the run time
print ("Time required for training:",stop_time - start_time)
#Save the test accuracy of the model per epoch to plot
acc = history.history['acc']
#Save the validation accuracy of the model per epoch to plot
val_acc = history.history['val_acc']
#Save the test loss of the model per epoch to plot
loss = history.history['loss']
#Save the validation loss of the model per epoch to plot
val_loss = history.history['val_loss']
#Save the number of epochs for use in our metric plot
epochs = range(len(acc))
#Create a plot of training accuracy
plt.plot(epochs, acc, 'bo', label='Training acc')
#Add to the plot validation accuracy
plt.plot(epochs, val_acc, 'b', label='Validation acc')
#Set a title for the plot
plt.title('Training and validation accuracy')
#Set a y-axis label for the plot
plt.ylabel('accuracy')
#Set an x-axis label for the plot
plt.xlabel('epoch')
#Put a legend on the plot
plt.legend()
#Create figure
plt.figure()
#Create a plot of training loss
plt.plot(epochs, loss, 'bo', label='Training loss')
#Add to the plot validation loss
plt.plot(epochs, val_loss, 'b', label='Validation loss')
#Set a title for the plot
plt.title('Training and validation loss')
#Set a y-axis label for the plot
plt.ylabel('loss')
#Set an x-axis label for the plot
plt.xlabel('epoch')
#Put a legend on the plot
plt.legend()
#Show the plots
plt.show() |
<reponame>contrera/gammapy
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import absolute_import, division, print_function, unicode_literals
from collections import OrderedDict
from numpy.testing import assert_allclose
import pytest
from astropy import units as u
from ...utils.testing import requires_data, requires_dependency
from ...utils.testing import assert_quantity_allclose
from ..gammacat import SourceCatalogGammaCat
from ..gammacat import GammaCatResource, GammaCatResourceIndex
SOURCES = [
{
"name": "<NAME>",
"spec_type": "ecpl",
"dnde_1TeV": 1.36e-11 * u.Unit("cm-2 s-1 TeV-1"),
"dnde_1TeV_err": 7.531e-13 * u.Unit("cm-2 s-1 TeV-1"),
"flux_1TeV": 2.104e-11 * u.Unit("cm-2 s-1"),
"flux_1TeV_err": 1.973e-12 * u.Unit("cm-2 s-1"),
"eflux_1_10TeV": 9.265778680255336e-11 * u.Unit("erg cm-2 s-1"),
"eflux_1_10TeV_err": 9.590978299538194e-12 * u.Unit("erg cm-2 s-1"),
"n_flux_points": 24,
"is_pointlike": False,
"spatial_model": "SkyGaussian",
},
{
"name": "HESS J1848-018",
"spec_type": "pl",
"dnde_1TeV": 3.7e-12 * u.Unit("cm-2 s-1 TeV-1"),
"dnde_1TeV_err": 4e-13 * u.Unit("cm-2 s-1 TeV-1"),
"flux_1TeV": 2.056e-12 * u.Unit("cm-2 s-1"),
"flux_1TeV_err": 3.187e-13 * u.Unit("cm-2 s-1"),
"eflux_1_10TeV": 6.235650344765057e-12 * u.Unit("erg cm-2 s-1"),
"eflux_1_10TeV_err": 1.2210315515569183e-12 * u.Unit("erg cm-2 s-1"),
"n_flux_points": 11,
"is_pointlike": False,
"spatial_model": "SkyGaussian",
},
{
"name": "HESS J1813-178",
"spec_type": "pl2",
"dnde_1TeV": 2.678e-12 * u.Unit("cm-2 s-1 TeV-1"),
"dnde_1TeV_err": 2.55e-13 * u.Unit("cm-2 s-1 TeV-1"),
"flux_1TeV": 2.457e-12 * u.Unit("cm-2 s-1"),
"flux_1TeV_err": 3.692e-13 * u.Unit("cm-2 s-1"),
"eflux_1_10TeV": 8.923614018939419e-12 * u.Unit("erg cm-2 s-1"),
"eflux_1_10TeV_err": 1.4613807070890267e-12 * u.Unit("erg cm-2 s-1"),
"n_flux_points": 13,
"is_pointlike": False,
"spatial_model": "SkyGaussian",
},
]
@pytest.fixture(scope="session")
def gammacat():
filename = "$GAMMAPY_EXTRA/datasets/catalogs/gammacat/gammacat.fits.gz"
return SourceCatalogGammaCat(filename=filename)
@requires_data("gammapy-extra")
@requires_data("gamma-cat")
class TestSourceCatalogGammaCat:
def test_source_table(self, gammacat):
assert gammacat.name == "gamma-cat"
assert len(gammacat.table) == 162
def test_w28_alias_names(self, gammacat):
names = [
"W28",
"HESS J1801-233",
"W 28",
"SNR G6.4-0.1",
"SNR G006.4-00.1",
"GRO J1801-2320",
]
for name in names:
assert str(gammacat[name]) == str(gammacat["W28"])
def test_sort_table(self):
name = "HESS J1848-018"
sort_keys = ["ra", "dec", "reference_id"]
for sort_key in sort_keys:
# this test modifies the catalog, so we make a copy
cat = gammacat()
cat.table.sort(sort_key)
assert cat[name].name == name
def test_to_sky_models(self, gammacat):
sources = gammacat.to_sky_models()
source = sources.skymodels[0]
assert len(sources.skymodels) == 74
assert source.name == "CTA 1"
assert_allclose(source.spectral_model.parameters["index"].value, 2.2)
@requires_data("gammapy-extra")
@requires_data("gamma-cat")
class TestSourceCatalogObjectGammaCat:
def test_data(self, gammacat):
source = gammacat[0]
assert isinstance(source.data, OrderedDict)
assert source.data["common_name"] == "CTA 1"
assert_quantity_allclose(source.data["dec"], 72.782997 * u.deg)
@pytest.mark.parametrize("ref", SOURCES, ids=lambda _: _["name"])
def test_str(self, gammacat, ref):
ss = str(gammacat[ref["name"]])
assert ss == SOURCES_STR[ref["name"]]
def test_data_python_dict(self, gammacat):
source = gammacat[0]
data = source._data_python_dict
assert type(data["ra"]) == float
assert data["ra"] == 1.649999976158142
assert type(data["sed_e_min"]) == list
assert type(data["sed_e_min"][0]) == float
assert_allclose(data["sed_e_min"][0], 0.5600000023841858)
@pytest.mark.parametrize("ref", SOURCES, ids=lambda _: _["name"])
def test_spectral_model(self, gammacat, ref):
source = gammacat[ref["name"]]
spectral_model = source.spectral_model
assert source.data["spec_type"] == ref["spec_type"]
e_min, e_max, e_inf = [1, 10, 1e10] * u.TeV
dne = spectral_model(e_min)
flux = spectral_model.integral(emin=e_min, emax=e_inf)
eflux = spectral_model.energy_flux(emin=e_min, emax=e_max).to("erg cm-2 s-1")
assert_quantity_allclose(dne, ref["dnde_1TeV"], rtol=1e-3)
assert_quantity_allclose(flux, ref["flux_1TeV"], rtol=1e-3)
assert_quantity_allclose(eflux, ref["eflux_1_10TeV"], rtol=1e-3)
@requires_dependency("uncertainties")
@pytest.mark.parametrize("ref", SOURCES, ids=lambda _: _["name"])
def test_spectral_model_err(self, gammacat, ref):
source = gammacat[ref["name"]]
spectral_model = source.spectral_model
e_min, e_max, e_inf = [1, 10, 1e10] * u.TeV
dnde, dnde_err = spectral_model.evaluate_error(e_min)
flux, flux_err = spectral_model.integral_error(emin=e_min, emax=e_inf)
eflux, eflux_err = spectral_model.energy_flux_error(emin=e_min, emax=e_max).to(
"erg cm-2 s-1"
)
assert_quantity_allclose(dnde, ref["dnde_1TeV"], rtol=1e-3)
assert_quantity_allclose(flux, ref["flux_1TeV"], rtol=1e-3)
assert_quantity_allclose(eflux, ref["eflux_1_10TeV"], rtol=1e-3)
assert_quantity_allclose(dnde_err, ref["dnde_1TeV_err"], rtol=1e-3)
assert_quantity_allclose(flux_err, ref["flux_1TeV_err"], rtol=1e-3)
assert_quantity_allclose(eflux_err, ref["eflux_1_10TeV_err"], rtol=1e-3)
@pytest.mark.parametrize("ref", SOURCES, ids=lambda _: _["name"])
def test_flux_points(self, gammacat, ref):
source = gammacat[ref["name"]]
flux_points = source.flux_points
assert len(flux_points.table) == ref["n_flux_points"]
@pytest.mark.parametrize("ref", SOURCES, ids=lambda _: _["name"])
def test_spatial_model(self, gammacat, ref):
source = gammacat[ref["name"]]
spatial_model = source.spatial_model
# TODO: put better asserts on model properties
# TODO: add a point and shell source -> separate list of sources for morphology test parametrization?
assert spatial_model.__class__.__name__ == ref["spatial_model"]
assert source.is_pointlike == ref["is_pointlike"]
@pytest.mark.parametrize("ref", SOURCES, ids=lambda _: _["name"])
def test_sky_model(self, gammacat, ref):
gammacat[ref["name"]].sky_model
class TestGammaCatResource:
def setup(self):
self.resource = GammaCatResource(
source_id=42, reference_id="2010A&A...516A..62A", file_id=2
)
self.global_id = "42|2010A&A...516A..62A|2|none"
def test_global_id(self):
assert self.resource.global_id == self.global_id
def test_eq(self):
resource1 = self.resource
resource2 = GammaCatResource(source_id=42, reference_id="2010A&A...516A..62A")
assert resource1 == resource1
assert resource1 != resource2
def test_lt(self):
resource = GammaCatResource(
source_id=42, reference_id="2010A&A...516A..62A", file_id=2
)
assert not resource < resource
assert resource < GammaCatResource(
source_id=43, reference_id="2010A&A...516A..62A", file_id=2
)
assert resource < GammaCatResource(
source_id=42, reference_id="2010A&A...516A..62B", file_id=2
)
assert resource < GammaCatResource(
source_id=42, reference_id="2010A&A...516A..62A", file_id=3
)
assert resource > GammaCatResource(
source_id=41, reference_id="2010A&A...516A..62A", file_id=2
)
def test_repr(self):
expected = (
"GammaCatResource(source_id=42, reference_id='2010A&A...516A..62A', "
"file_id=2, type='none', location='none')"
)
assert repr(self.resource) == expected
def test_to_dict(self):
expected = OrderedDict(
[
("source_id", 42),
("reference_id", "2010A&A...516A..62A"),
("file_id", 2),
("type", "none"),
("location", "none"),
]
)
assert self.resource.to_dict() == expected
def test_dict_roundtrip(self):
actual = GammaCatResource.from_dict(self.resource.to_dict())
assert actual == self.resource
class TestGammaCatResourceIndex:
def setup(self):
self.resource_index = GammaCatResourceIndex(
[
GammaCatResource(source_id=99, reference_id="2014ApJ...780..168A"),
GammaCatResource(
source_id=42,
reference_id="2010A&A...516A..62A",
file_id=2,
type="sed",
),
GammaCatResource(
source_id=42, reference_id="2010A&A...516A..62A", file_id=1
),
]
)
def test_repr(self):
assert repr(self.resource_index) == "GammaCatResourceIndex(n_resources=3)"
def test_eq(self):
resource_index1 = self.resource_index
resource_index2 = GammaCatResourceIndex(resource_index1.resources[:-1])
assert resource_index1 == resource_index1
assert resource_index1 != resource_index2
def test_unique_source_ids(self):
expected = [42, 99]
assert self.resource_index.unique_source_ids == expected
def test_unique_reference_ids(self):
expected = ["2010A&A...516A..62A", "2014ApJ...780..168A"]
assert self.resource_index.unique_reference_ids == expected
def test_global_ids(self):
expected = [
"99|2014ApJ...780..168A|-1|none",
"42|2010A&A...516A..62A|2|sed",
"42|2010A&A...516A..62A|1|none",
]
assert self.resource_index.global_ids == expected
def test_sort(self):
expected = [
"42|2010A&A...516A..62A|1|none",
"42|2010A&A...516A..62A|2|sed",
"99|2014ApJ...780..168A|-1|none",
]
assert self.resource_index.sort().global_ids == expected
def test_to_list(self):
result = self.resource_index.to_list()
assert isinstance(result, list)
assert len(result) == 3
def test_list_roundtrip(self):
data = self.resource_index.to_list()
actual = GammaCatResourceIndex.from_list(data)
assert actual == self.resource_index
def test_to_table(self):
table = self.resource_index.to_table()
assert len(table) == 3
assert table.colnames == [
"source_id",
"reference_id",
"file_id",
"type",
"location",
]
def test_table_roundtrip(self):
table = self.resource_index.to_table()
actual = GammaCatResourceIndex.from_table(table)
assert actual == self.resource_index
@requires_dependency("pandas")
def test_to_pandas(self):
df = self.resource_index.to_pandas()
df2 = df.query("source_id == 42")
assert len(df2) == 2
@requires_dependency("pandas")
def test_pandas_roundtrip(self):
df = self.resource_index.to_pandas()
actual = GammaCatResourceIndex.from_pandas(df)
assert actual == self.resource_index
@requires_dependency("pandas")
def test_query(self):
resource_index = self.resource_index.query('type == "sed" and source_id == 42')
assert len(resource_index.resources) == 1
assert resource_index.resources[0].global_id == "42|2010A&A...516A..62A|2|sed"
SOURCES_STR = {}
SOURCES_STR[
"Vela X"
] = """
*** Basic info ***
Catalog row index (zero-based) : 36
Common name : <NAME>
Other names : HESS J0835-455
Location : gal
Class : pwn
TeVCat ID : 86
TeVCat 2 ID : yVoFOS
TeVCat name : TeV J0835-456
TGeVCat ID : 37
TGeVCat name : TeV J0835-4536
Discoverer : hess
Discovery date : 2006-03
Seen by : hess
Reference : 2012A&A...548A..38A
*** Position info ***
SIMBAD:
RA : 128.287 deg
DEC : -45.190 deg
GLON : 263.332 deg
GLAT : -3.106 deg
Measurement:
RA : 128.750 deg
DEC : -45.600 deg
GLON : 263.856 deg
GLAT : -3.089 deg
Position error : nan deg
*** Morphology info ***
Morphology model type : gauss
Sigma : 0.480 deg
Sigma error : 0.030 deg
Sigma2 : 0.360 deg
Sigma2 error : 0.030 deg
Position angle : 41.000 deg
Position angle error : 7.000 deg
Position angle frame : radec
*** Spectral info ***
Significance : 27.900
Livetime : 53.100 h
Spectrum type : ecpl
norm : 1.46e-11 +- 8e-13 (stat) +- 3e-12 (sys) cm-2 s-1 TeV-1
index : 1.32 +- 0.06 (stat) +- 0.12 (sys)
e_cut : 14.0 +- 1.6 (stat) +- 2.6 (stat) TeV
reference : 1.0 TeV
Energy range : (0.75, nan) TeV
theta : 1.2 deg
Derived fluxes:
Spectral model norm (1 TeV) : 1.36e-11 +- 7.53e-13 (stat) cm-2 s-1 TeV-1
Integrated flux (>1 TeV) : 2.1e-11 +- 1.97e-12 (stat) cm-2 s-1
Integrated flux (>1 TeV) : 101.425 +- 9.511 (% Crab)
Integrated flux (1-10 TeV) : 9.27e-11 +- 9.59e-12 (stat) erg cm-2 s-1
*** Spectral points ***
SED reference id : 2012A&A...548A..38A
Number of spectral points : 24
Number of upper limits : 0
e_ref dnde dnde_errn dnde_errp
TeV 1 / (cm2 s TeV) 1 / (cm2 s TeV) 1 / (cm2 s TeV)
------ --------------- --------------- ---------------
0.719 1.055e-11 3.284e-12 3.280e-12
0.868 1.304e-11 2.130e-12 2.130e-12
1.051 9.211e-12 1.401e-12 1.399e-12
1.274 8.515e-12 9.580e-13 9.610e-13
1.546 5.378e-12 7.070e-13 7.090e-13
1.877 4.455e-12 5.050e-13 5.070e-13
2.275 3.754e-12 3.300e-13 3.340e-13
2.759 2.418e-12 2.680e-13 2.700e-13
3.352 1.605e-12 1.800e-13 1.830e-13
4.078 1.445e-12 1.260e-13 1.290e-13
4.956 9.240e-13 9.490e-14 9.700e-14
6.008 7.348e-13 6.470e-14 6.710e-14
7.271 3.863e-13 4.540e-14 4.700e-14
8.795 3.579e-13 3.570e-14 3.750e-14
10.650 1.696e-13 2.490e-14 2.590e-14
12.910 1.549e-13 2.060e-14 2.160e-14
15.650 6.695e-14 1.134e-14 1.230e-14
18.880 2.105e-14 1.390e-14 1.320e-14
22.620 3.279e-14 6.830e-15 7.510e-15
26.870 3.026e-14 5.910e-15 6.660e-15
31.610 1.861e-14 4.380e-15 5.120e-15
36.970 5.653e-15 2.169e-15 2.917e-15
43.080 3.479e-15 1.641e-15 2.410e-15
52.370 1.002e-15 8.327e-16 1.615e-15
"""
SOURCES_STR[
"HESS J1848-018"
] = """
*** Basic info ***
Catalog row index (zero-based) : 134
Common name : HESS J1848-018
Other names : HESS J1848-018,1HWC J1849-017c,WR121a,W43
Location : gal
Class : unid
TeVCat ID : 187
TeVCat 2 ID : hcE3Ou
TeVCat name : TeV J1848-017
TGeVCat ID : 128
TGeVCat name : TeV J1848-0147
Discoverer : hess
Discovery date : 2008-07
Seen by : hess
Reference : 2008AIPC.1085..372C
*** Position info ***
SIMBAD:
RA : 282.120 deg
DEC : -1.792 deg
GLON : 31.000 deg
GLAT : -0.159 deg
Measurement:
RA : 282.121 deg
DEC : -1.792 deg
GLON : 31.000 deg
GLAT : -0.160 deg
Position error : nan deg
*** Morphology info ***
Morphology model type : gauss
Sigma : 0.320 deg
Sigma error : 0.020 deg
Sigma2 : nan deg
Sigma2 error : nan deg
Position angle : nan deg
Position angle error : nan deg
Position angle frame :
*** Spectral info ***
Significance : 9.000
Livetime : 50.000 h
Spectrum type : pl
norm : 3.7e-12 +- 4e-13 (stat) +- 7e-13 (sys) cm-2 s-1 TeV-1
index : 2.8 +- 0.2 (stat) +- 0.2 (sys)
reference : 1.0 TeV
Energy range : (0.9, 12.0) TeV
theta : 0.2 deg
Derived fluxes:
Spectral model norm (1 TeV) : 3.7e-12 +- 4e-13 (stat) cm-2 s-1 TeV-1
Integrated flux (>1 TeV) : 2.06e-12 +- 3.19e-13 (stat) cm-2 s-1
Integrated flux (>1 TeV) : 9.909 +- 1.536 (% Crab)
Integrated flux (1-10 TeV) : 6.24e-12 +- 1.22e-12 (stat) erg cm-2 s-1
*** Spectral points ***
SED reference id : 2008AIPC.1085..372C
Number of spectral points : 11
Number of upper limits : 0
e_ref dnde dnde_errn dnde_errp
TeV 1 / (cm2 s TeV) 1 / (cm2 s TeV) 1 / (cm2 s TeV)
------ --------------- --------------- ---------------
0.624 9.942e-12 3.301e-12 3.265e-12
0.878 6.815e-12 1.042e-12 1.029e-12
1.284 1.707e-12 3.889e-13 3.826e-13
1.881 5.027e-13 1.566e-13 1.533e-13
2.754 3.266e-13 7.526e-14 7.323e-14
4.033 8.183e-14 3.609e-14 3.503e-14
5.905 2.979e-14 1.981e-14 1.921e-14
8.648 4.022e-15 9.068e-15 8.729e-15
12.663 -6.647e-15 3.786e-15 3.675e-15
18.542 3.735e-15 2.009e-15 1.786e-15
27.173 -5.317e-16 9.236e-16 8.568e-16
"""
SOURCES_STR[
"HESS J1813-178"
] = """
*** Basic info ***
Catalog row index (zero-based) : 118
Common name : HESS J1813-178
Other names : HESS J1813-178,G12.82-0.02,PSR J1813-1749,CXOU J181335.1-174957,IGR J18135-1751,W33
Location : gal
Class : pwn
TeVCat ID : 114
TeVCat 2 ID : Unhlxa
TeVCat name : TeV J1813-178
TGeVCat ID : 116
TGeVCat name : TeV J1813-1750
Discoverer : hess
Discovery date : 2005-03
Seen by : hess,magic
Reference : 2006ApJ...636..777A
*** Position info ***
SIMBAD:
RA : 273.363 deg
DEC : -17.849 deg
GLON : 12.787 deg
GLAT : 0.000 deg
Measurement:
RA : 273.408 deg
DEC : -17.842 deg
GLON : 12.813 deg
GLAT : -0.034 deg
Position error : 0.005 deg
*** Morphology info ***
Morphology model type : gauss
Sigma : 0.036 deg
Sigma error : 0.006 deg
Sigma2 : nan deg
Sigma2 error : nan deg
Position angle : nan deg
Position angle error : nan deg
Position angle frame :
*** Spectral info ***
Significance : 13.500
Livetime : 9.700 h
Spectrum type : pl2
flux : 1.42e-11 +- 1.1e-12 (stat) +- 3e-13 (sys) cm-2 s-1
index : 2.09 +- 0.08 (stat) +- 0.2 (sys)
e_min : 0.2 TeV
e_max : nan TeV
Energy range : (nan, nan) TeV
theta : 0.15 deg
Derived fluxes:
Spectral model norm (1 TeV) : 2.68e-12 +- 2.55e-13 (stat) cm-2 s-1 TeV-1
Integrated flux (>1 TeV) : 2.46e-12 +- 3.69e-13 (stat) cm-2 s-1
Integrated flux (>1 TeV) : 11.844 +- 1.780 (% Crab)
Integrated flux (1-10 TeV) : 8.92e-12 +- 1.46e-12 (stat) erg cm-2 s-1
*** Spectral points ***
SED reference id : 2006ApJ...636..777A
Number of spectral points : 13
Number of upper limits : 0
e_ref dnde dnde_errn dnde_errp
TeV 1 / (cm2 s TeV) 1 / (cm2 s TeV) 1 / (cm2 s TeV)
------ --------------- --------------- ---------------
0.323 2.736e-11 5.690e-12 5.971e-12
0.427 1.554e-11 3.356e-12 3.559e-12
0.574 8.142e-12 1.603e-12 1.716e-12
0.777 4.567e-12 9.319e-13 1.007e-12
1.023 2.669e-12 5.586e-13 6.110e-13
1.373 1.518e-12 3.378e-13 3.721e-13
1.841 7.966e-13 2.166e-13 2.426e-13
2.476 3.570e-13 1.135e-13 1.295e-13
3.159 3.321e-13 8.757e-14 1.012e-13
4.414 1.934e-13 5.764e-14 6.857e-14
5.560 4.461e-14 2.130e-14 2.844e-14
10.765 1.318e-14 6.056e-15 1.085e-14
22.052 1.372e-14 6.128e-15 1.178e-14
"""
|
<filename>Processing/allfeatures.py
#!/usr/bin/env python3
# MIT License
# Copyright (c) 2018 <NAME>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import numpy as np
import matplotlib.pyplot as plt
import glob
import aggregate as ag
from multiprocessing import Process
from multiprocessing import Pool
import time
def ProcessFile(FName):
print(FName)
data = np.fromfile(FName,dtype=np.float64)
data.shape = (data.shape[0]//6,6)
# clean up the data set
Cleaned = ag.AggregateData(data,2)
# construct parameters for feature generation
FeatureParams = MakeFeatureParams(Cleaned.IntervalsPerDay)
SequentialFeatures = ag.GenerateFeatureSeries(Cleaned,FeatureParams)
# take off 'composite'
OutName = FName[9:]
# add 'features', take off '.dat', add '.float32'
OutName = 'features' + OutName[:-4] + '.float32'
print(OutName)
SequentialFeatures.data.astype(np.float32).tofile(OutName)
def MakeFeatureParams(IntervalsPerDay):
FeatureParams = ag.FeatureParameters(IntervalsPerDay)
# interval sizes for aggregated log price relatives
FeatureParams.StridesLPR = [
1,
2,
5,
10,
30,
60,
IntervalsPerDay,
IntervalsPerDay*2,
IntervalsPerDay*5,
IntervalsPerDay*10,
# IntervalsPerDay*20,
# IntervalsPerDay*50,
# IntervalsPerDay*100,
]
# window sizes for Simple Moving Average
FeatureParams.StridesSMA = [
10,
20,
IntervalsPerDay,
IntervalsPerDay*2,
IntervalsPerDay*5,
IntervalsPerDay*10,
IntervalsPerDay*20,
IntervalsPerDay*50,
IntervalsPerDay*100,
]
FeatureParams.VolumeIntervals = [
10,
20,
IntervalsPerDay,
IntervalsPerDay*5,
IntervalsPerDay*20,
]
return FeatureParams
def NeedToDo():
ExistingFileNames = glob.glob("features/*.float32")
ExistingFileNames = [ (x[9:]) for x in ExistingFileNames]
ExistingFileNames = [ (x[:-8]) for x in ExistingFileNames]
SymbolsList = [
# Large Cap Stock Symbols (over $10B)
"DJI","INX","IXIC","A","AAPL","ABB","ABBV","ABC","ABT","ACN","ADBE","ADI","ADM","ADP","ADS","ADSK",
"AEE","AEP","AES","AET","AFL","AIG","AIV","AIZ","AKAM","ALB","ALL","ALLE","ALXN","AMAT","AMD","AME","AMG",
"AMGN","AMP","AMT","AMZN","AN","AON","APA","APC","APD","APH","ATI","ATVI","AVB","AVGO","AVP","AVY",
"AXP","AZO","BA","BABA","BAC","BAX","BB","BBBY","BBD","BBT","BBVA","BBY","BCS","BDX","BEN","BF.B",
"BIIB","BK","BLK","BLL","BMS","BMY","BOX","BRK.B","BSMX","BSX","BUD","BWA","BXP","C","CA","CAG","CAH",
"CAT","CB","CBG","CBS","CCE","CCI","CCL","CELG","CEO","CERN","CF","CHK","CHRW","CHU","CI","CINF",
"CL","CLF","CLX","CMA","CMCSA","CME","CMG","CMI","CMS","CNP","CNX","COF","COG","COH","COL","COP","CORE","COST",
"CPB","CRM","CSCO","CSLT","CSX","CTAS","CTL","CTSH","CTXS","CVS","CVX","D","DAL","DDD","DE","DFS",
"DG","DGX","DHI","DHR","DIS","DISCA","DLPH","DLTR","DNB","DNKN","DNR","DO","DOV","DPS","DPZ","DRI","DTE",
"DUK","DVA","DVN","DWDP","EA","EBAY","ECL","ED","EFX","EIX","EL","EMN","EMR","EOG","EQR","EQT","ESRX","ESV","ETFC",
"ETN","ETR","EW","EXC","EXPD","EXPE","F","FAST","FB","FCX","FDX","FE","FFIV","FIS","FISV","FITB","FLIR","FLR",
"FLS","FMC","FOSL","FOXA","FSLR","FTI","FTR","GD","GE","GGP","GHC","GILD","GIS","GLW","GM",
"GME","GNW","GOOG","GOOGL","GPC","GPS","GRA","GRMN","GRPN","GS","GSK","GT","GWW","HAL","HAS","HBAN","HCN",
"HCP","HD","HES","HIG","HOG","HON","HP","HPQ","HRB","HRL","HRS","HSBC","HST","HSY","HUM","IBM","ICE","IFF",
"IGT","ILMN","INTC","INTU","IP","IPG","IR","IRM","ISRG","ITW","IVZ","JBL","JCI","JCP","JD","JEC","JNJ","JNPR","JPM",
"JWN","K","KEY","KHC","KIM","KLAC","KMB","KMX","KO","KORS","KR","KSS","KSU","L","LB","LEG","LEN","LH",
"LLL","LLY","LM","LMT","LNC","LOW","LRCX","LUK","LUV","LYB","M","MA","MAC","MAR","MAS","MAT",
"MCD","MCHP","MCK","MCO","MDLZ","MDT","MEET","MET","MHK","MKC","MLM","MMC","MMM","MNST","MO","MON","MOS","MPC",
"MRK","MRO","MS","MSFT","MSI","MTB","MU","MUR","MYL","NATI","NBL","NBR","NDAQ","NDXT","NEE","NEM","NFG","NFLX","NFX",
"NGG","NI","NKE","NLSN","NOC","NOV","NRG","NSC","NTAP","NTRS","NUE","NVDA","NVS","NWL","NWSA","OI","OII","OKE",
"OMC","ORCL","ORLY","OXY","P","PAYX","PBCT","PBI","PBPB","PCAR","PCG","PCLN","PDCO","PEG","PEP",
"PFE","PFG","PG","PGR","PH","PHM","PKI","PLD","PM","PNC","PNR","PNW","PPG","PPL","PRGO","PRU",
"PSA","PSX","PTR","PUK","PVH","PWR","PX","PXD","PZZA","QCOM","QEP","QQQ","R","RDC","REGN","RF","RHI","RHT","RIG","RL",
"ROK","ROP","ROST","RRC","RSG","RST","RTN","S","SBUX","SCG","SCHW","SEE","SHLD","SHW",
"SIG","SINA","SJM","SLAB","SLB","SNA","SNI","SO","SP500-10","SP500-10102020","SP500-15","SP500-20","SP500-25",
"SP500-30","SP500-35","SP500-40","SP500-45","SP500-50","SP500-55","SPG","SPGI","SPWR","SPY","SRCL","SRE","STI","STM","STT",
"STX","STZ","SWK","SWN","SYK","SYMC","SYY","T","TAP","TDC","TEL","TGT","THC","TIF","TJX","TM",
"TMK","TMO","TMUS","TOT","TRIP","TROW","TRV","TSCO","TSLA","TSN","TSS","TWTR","TWX","TXN","TXT",
"UA","UL","UNFI","UNH","UNM","UNP","UPS","URBN","USB","UTX","V","VAR","VFC","VIAB","VLO","VMC","VMW",
"VNO","VOO","VRSN","VRTX","VTR","VZ","WBA","WAT","WDC","WEC","WFC","WHR","WM","WMB","WMT","WRB","WU","WY",
"WYN","WYNN","X","XEC","XEL","XL","XLNX","XOM","XRAY","XRX","XYL","YELP","YUM","Z","ZION","ZNGA","ZTS",
]
print('len(SymbolsList) =',len(SymbolsList))
return sorted(list(set(SymbolsList)-set(ExistingFileNames)))
def main():
# FileNames = glob.glob("composite/*.dat")
SymbolsList = NeedToDo()
T0 = time.time()
FileNames = []
for sym in SymbolsList:
FileName = 'composite/' + sym + '.dat'
if(sym[0]=='.'):
FileName = 'composite/' + sym[1:] + '.dat'
FileNames.append(FileName)
p = Pool(16)
p.map(ProcessFile, FileNames)
# for a in FileNames:
# ProcessFile(a)
T1 = time.time()
print('Seconds = ',(T1-T0))
if __name__ == '__main__':
main()
|
<gh_stars>1-10
from threading import Thread
import threading
import Queue
import time
from socket import error as SocketError
import sys
try:
import requests
import curses
import click
except ImportError:
print 'Tolerance requires the following Python modules: Requests and Click. You should be able to ' \
'`sudo pip install requests click`'
sys.exit(1)
import utils
q = Queue.Queue()
result_codes = {}
_timeout = None
elapsed = []
timeout_count = 0
connection_error_count = 0
non_200_count = 0
durations = []
main_start = None
status = "Starting up"
test_number = 1
total_seconds = 0
test_start = None
test_stop = None
test_seconds = None
target_hits = None
requests_handled = 0
_tolerance = None
_url = None
hits = None
workers = None
break_out = False
import logging
logging.basicConfig(filename='log.log', level=logging.WARNING)
def do_work():
global _timeout, timeout_count, connection_error_count, main_start, status, non_200_count, total_seconds, \
test_start, test_stop, requests_handled, _tolerance, break_out
while True:
try:
url = q.get(True, 2)
except Queue.Empty:
break
status = "Running"
if test_start is None:
test_start = time.time()
if main_start is None:
main_start = time.time()
try:
start = time.time()
res = requests.get(url, timeout=_timeout)
elapsed.append(res.elapsed.total_seconds())
if '%s %s' % (res.status_code, res.reason) not in result_codes:
result_codes['%s %s' % (res.status_code, res.reason)] = 0
result_codes['%s %s' % (res.status_code, res.reason)] += 1
if res.status_code == 200:
durations.append(time.time() - start)
else:
non_200_count += 1
except requests.RequestException:
timeout_count += 1
non_200_count += 1
except SocketError:
connection_error_count += 1
non_200_count += 1
requests_handled += 1
if non_200_count > _tolerance:
break_out = True
test_stop = time.time()
with q.mutex:
q.queue.clear()
q.task_done()
status = "Failed, stopping..."
break
if requests_handled == target_hits:
test_stop = time.time()
q.task_done()
def update_ui_worker():
global main_start, total_seconds, _timeout, hits, workers, status, test_number, total_seconds, test_start, \
test_stop, requests_handled, test_seconds, _tolerance, _url, break_out
while True:
rc = utils.render_result_codes(result_codes, timeout_count, connection_error_count)
if not q.empty() and main_start:
total_seconds = time.time()-main_start
screen.addstr(1, 2, 'PAIN TOLERANCE on %s' % _url, curses.color_pair(3)|curses.A_BOLD)
screen.addstr(3, 2, 'Status: %s ' % status)
screen.addstr(5, 2, 'Trying %s hits with %s workers ' % (hits, workers))
screen.addstr(6, 2, 'Timeout: %s seconds ' % (_timeout,))
screen.addstr(6, 40, 'Tolerance: %s errors ' % (_tolerance,))
screen.addstr(7, 2, 'Active Workers: %s ' % (threading.active_count() - 2))
screen.addstr(7, 40, 'Queue: %s ' % q.qsize())
if test_start is None:
test_seconds = 0
else:
if test_stop is None:
test_seconds = time.time() - test_start
else:
test_seconds = test_stop - test_start
screen.addstr(9, 2, 'Test Seconds: %.2f ' % test_seconds)
screen.addstr(9, 40, 'Requests handled: %s ' % requests_handled)
if result_codes and test_seconds and '200 OK' in result_codes:
screen.addstr(10, 2, 'Requests per second: %.2f ' % (int(result_codes['200 OK']) / test_seconds), )
if durations:
screen.addstr(10, 40, 'Average Request: %.2f seconds ' % (reduce(lambda x, y: x + y, durations) / len(durations)))
screen.addstr(12, 2, rc)
screen.refresh()
time.sleep(0.1)
tests = [
(50, 50,),
(100, 100,),
(200, 200,),
(400, 400,),
(600, 600,),
(800, 800,),
(1000, 1000,),
(1500, 1000,),
(2000, 1000,),
(2000, 1500,),
(2000, 2000,)
]
@click.command()
@click.option('--url', prompt="URL to request")
@click.option('--timeout', default=10)
@click.option('--tolerance', default=5)
def main(url, timeout, tolerance):
global break_out, status, target_hits, timeout_count, connection_error_count, non_200_count, test_number, \
result_codes, elapsed, requests_handled, test_start, test_stop, _timeout, _tolerance, screen, hits, workers, \
_url, durations
_timeout = timeout
_tolerance = tolerance
_url = url
logging.warning('Starting up...')
# Check that the url provided is valid
try:
requests.get(url, timeout=5)
except requests.exceptions.MissingSchema:
print "Invalid URL"
sys.exit(1)
except requests.exceptions.ConnectionError:
print "Is that a valid URL? We can't connect to it."
sys.exit(1)
except Exception as e:
print "Something went wrong trying to connect... timeout?"
print e
sys.exit(1)
try:
screen = curses.initscr()
screen.border(0)
curses.start_color()
curses.init_color(0, 0, 0, 0)
curses.init_pair(2, curses.COLOR_RED, curses.COLOR_BLACK)
curses.init_pair(3, curses.COLOR_GREEN, curses.COLOR_BLACK)
curses.init_pair(10, curses.COLOR_CYAN, curses.COLOR_BLACK)
curses.init_pair(11, curses.COLOR_CYAN, curses.COLOR_BLACK)
curses.curs_set(0)
# curses.noecho()
ui = Thread(target=update_ui_worker)
ui.daemon = True
ui.start()
for test in tests:
hits = test[0]
workers = test[1]
if break_out:
break
target_hits = hits
for t in range(hits):
q.put(url)
for w in range(workers):
t = Thread(target=do_work)
t.start()
# q.join()
status = "Waiting for workers to spin down..."
while True:
if threading.active_count() <= 2:
break
if timeout_count + connection_error_count + non_200_count > tolerance:
result = 'Fail'
cp = curses.color_pair(2)|curses.A_BOLD
else:
result = 'Pass'
cp = curses.color_pair(3)|curses.A_BOLD
result_200 = result_codes.get('200 OK')
if result_200 is None:
result_200 = 0
else:
result_200 = int(result_200)
if durations:
average_request_time = reduce(lambda x, y: x + y, durations) / len(durations)
if test_seconds:
screen.addstr(13 + test_number, 2, '%s hits with %s workers: %s (%.2f RPS | %.2f ART | %d ERR) ' %
(hits, workers, result, result_200/test_seconds, average_request_time, non_200_count), cp)
if 'Fail' in result:
break_out = True
break
status = "Restarting..."
time.sleep(2)
result_codes = {}
non_200_count = 0
elapsed = []
durations = []
timeout_count = 0
connection_error_count = 0
test_number += 1
requests_handled = 0
test_start = None
test_stop = None
except KeyboardInterrupt:
with q.mutex:
q.queue.clear()
break_out = True
test_stop = time.time()
screen.addstr(16 + test_number, 2, "Test cancelled.")
logging.warning('Keyboard Exit')
finally:
curses.endwin()
logging.warning('Exit 2a')
screen.addstr(16 + test_number, 2, "Press any key to exit.")
screen.getch()
curses.endwin()
logging.warning('Exit 2')
if __name__ == "__main__":
main()
|
from ss.model.functions import Predicate, Function, rename_functions, initialize, TotalCost, Increase
from ss.model.problem import Problem
from ss.model.operators import Action, Axiom
from ss.algorithms.incremental import exhaustive, incremental
def main(n=2, verbose=False):
Item = Predicate('?b')
Part = Predicate('?r')
Class = Predicate('?c')
IsMovable = Predicate('?i', domain=[Item('?i')])
IsFixed = Predicate('?i', domain=[Item('?i')])
IsClass = Predicate('?i ?c', domain=[Item('?i'), Class('?c')])
IsArm = Predicate('?r', domain=[Part('?r')])
IsStackable = Predicate('?i1 ?i2', domain=[Item('?i1'), Item('?i2')])
HandHolding = Predicate('?r ?b', domain=[IsArm('?r'), Item('?b')])
HandEmpty = Predicate('?r', domain=[IsArm('?r')])
On = Predicate('?i1 ?i2', domain=[Item('?i1'), Item('?i2')])
Nearby = Predicate('?s', domain=[IsFixed('?s')])
Found = Predicate('?b', domain=[Item('?b')])
Localized = Predicate('?b', domain=[Item('?b')])
Holding = Predicate('?c', domain=[Class('?c')])
rename_functions(locals())
actions = [
Action(name='Pick', param='?a ?i ?s',
pre=[IsArm('?a'), IsStackable('?i', '?s'), HandEmpty('?a'),
Nearby('?s'), On('?i', '?s'), Localized('?i')],
eff=[HandHolding('?a', '?i'), ~HandEmpty('?a'), ~On('?i', '?s')]),
Action(name='Place', param='?a ?i ?s',
pre=[IsArm('?a'), IsStackable('?i', '?s'),
Nearby('?s'), HandHolding('?a', '?i'), Localized('?s')],
eff=[HandEmpty('?a'), On('?i', '?s'), ~HandHolding('?a', '?i')]),
Action(name='ScanRoom', param='?s',
pre=[IsFixed('?s'), ~Found('?s')],
eff=[Found('?s')]),
Action(name='ScanFixed', param='?s ?i',
pre=[IsStackable('?i', '?s'), ~Found('?i')],
eff=[Found('?i'), On('?i', '?s'), Increase(TotalCost(), 1)]),
Action(name='Look', param='?i',
pre=[Found('?i')],
eff=[Localized('?i')]),
Action(name='Move', param='?s',
pre=[IsFixed('?s')],
eff=[Nearby('?s')] +
[~Nearby(s) for s in ['table0', 'table1']] +
[~Localized(s) for s in ['soup0']]),
]
initial_atoms = [
initialize(TotalCost(), 0),
HandEmpty('left'),
HandEmpty('right'),
Found('table0'),
Found('table1'),
IsClass('table0', 'table'),
IsClass('table1', 'table'),
IsFixed('table0'),
IsFixed('table1'),
IsStackable('soup0', 'table0'),
IsStackable('soup0', 'table1'),
IsClass('soup0', 'soup'),
Found('soup0'),
On('soup0', 'table0'),
IsStackable('green0', 'table0'),
IsStackable('green0', 'table1'),
IsClass('green0', 'green'),
Found('green0'),
On('green0', 'table0'),
]
axioms = [
Axiom(param='?a ?i ?c',
pre=[IsArm('?a'), IsClass('?i', '?c'), HandHolding('?a', '?i')],
eff=Holding('?c')),
]
goal_literals = [Holding('green'), Holding('soup')]
problem = Problem(initial_atoms, goal_literals, actions,
axioms, [], objective=TotalCost())
print problem
plan, evaluations = incremental(problem, verbose=verbose)
print plan
if __name__ == '__main__':
main()
|
<reponame>jehboyes/planning_system
# pylint: disable=no-member
import sys
from datetime import datetime
from dateutil.relativedelta import relativedelta
import click
from getpass import getuser
from office365.runtime.auth.user_credential import UserCredential
from office365.sharepoint.client_context import ClientContext
from dominate.tags import html, body, div, table, tr, td, ul, li, span, h2, h3, b, a, br
@click.command()
@click.password_option()
@click.option("--username", "-u", type=str, help="Specify a different username.")
@click.pass_obj
def list(config, username, password):
"""
Generates a formatted list of dates in HTML from a pair
of SharePoint lists called Return Schedule
and Returns.
Sharepoint is accessed as the logged
in user and requires the user to enter their PASSWORD.
The location of the sharepoint
lists and the suffix for the Office365 login must exist
in the local config.ini file as RETURNS_SITE and OFFICE_DOMAIN
respectively.
"""
# Connect to sharepoint
site_url = config.sys_config.RETURNS_SITE
if username is None:
username = getuser()
username += config.sys_config.OFFICE_DOMAIN
user_credentials = UserCredential(username, password)
ctx = ClientContext(site_url).with_credentials(user_credentials)
# Work out date scope
date_min = datetime.today()
date_max = date_min + relativedelta(months=2, day=0, hour=0, minute=0)
# Get items with relevant dates as dictionaries
all_items = _get_list(ctx, "Return Schedule")
scoped_items = []
for item in all_items:
prop = item.properties # shortcut to item properties dict
date = datetime.strptime(prop["Date"][:10], "%Y-%m-%d")
prop["num_date"] = date
if prop["Predicted"] is True:
if date.day < 10:
date_code = "Early "
elif date.day < 20:
date_code = "Mid-"
else:
date_code = "Late "
else:
date_code = "%A %d "
prop["str_date"] = date.strftime(f"{date_code}%B")
if date_min <= date <= date_max:
scoped_items.append(prop)
# Get returns, Turn into dict with value for items
returns = _get_list(ctx, "Returns")
return_dict = {}
for item in returns:
prop = item.properties
entry = {"name": prop["Title"],
"year": prop["AcademicYear"],
"href": prop["Website"],
"compiler": prop["Compiler"],
"reviewer": prop["Reviewer"],
"items": []}
return_dict[item.get_property("SectorID")] = entry
# Add dates to return dict
for item in scoped_items:
return_dict[item["Title"]]["items"].append(item)
cell_style = """border-bottom: solid 1px black;font-family:"Calibri",sans-serif;color:black;vertical-align:top;"""
# Generate html
h = html()
bdy = h.add(body(style="""font-family:"Calibri",sans-serif;color:black;"""))
with bdy.add(div()):
h2("Upcoming dates")
span("Key statutory return dates falling within the next two months. Other dates are available on the",
a("Statutory Returns SharePoint site", href=config.RETURNS_SITE), ".", br())
span("")
tbl = table(style="border-collapse: collapse;")
for id, ret in return_dict.items():
if len(ret['items']) > 0:
with tbl.add(tr(style="vertical-align:baseline;")):
td(h3(ret['name']),
"Compiler: ", ret['compiler'], br(),
"Reviewer: ", ret['reviewer'],
style=cell_style)
url = ret['href']['Url'] if ret['href'] is not None else None
td(a(id, href=url),
style=cell_style)
current_list = ul()
for item in ret['items']:
row = li(b(item["str_date"], ": "),
item['Description'])
if item['DeadlineDate'] is True:
row += span(" (Deadline)")
current_list += row
td(current_list, style=cell_style)
h2("Policy")
span("The Statutory returns policy is available for conservatoire staff ",
a("here on SharePoint", href=config.STATUTORY_RETURNS_POLICY_URL),
" and can be provided to Luminate staff by request.")
print(h.render(), file=open("output.html", "w"))
click.echo("Printed to output.html")
def _get_list(con, name):
return con.web.lists.get_by_title(
name).items.get().execute_query()
|
<reponame>atklaus/sportsreference
import pandas as pd
import re
from .constants import SCHEDULE_SCHEME, SQUAD_URL
from datetime import datetime
from ..decorators import float_property_decorator, int_property_decorator
from .fb_utils import _lookup_team
from pyquery import PyQuery as pq
from sportsreference import utils
from sportsreference.constants import (AWAY,
DRAW,
HOME,
LOSS,
NEUTRAL,
WIN)
from urllib.error import HTTPError
class Game:
"""
A representation of a matchup between two teams.
Stores all relevant high-level match information for a game in a team's
schedule including date, time, week, opponent, and score.
Parameters
----------
game_data : string
The row containing the specified game information.
"""
def __init__(self, game_data):
self._competition = None
self._matchweek = None
self._day = None
self._date = None
self._time = None
self._datetime = None
self._venue = None
self._result = None
self._goals_for = None
self._goals_against = None
self._opponent = None
self._opponent_id = None
self._expected_goals = None
self._expected_goals_against = None
self._attendance = None
self._captain = None
self._captain_id = None
self._formation = None
self._referee = None
self._match_report = None
self._notes = None
self._parse_game_data(game_data)
def __str__(self):
"""
Return the string representation of the class.
"""
return f'{self.date} - {self.opponent}'
def __repr__(self):
"""
Return the string representation of the class.
"""
return self.__str__()
def _parse_opponent_id(self, game_data):
"""
Parse the opponent's squad ID.
The opponent field has a squad ID embedded in the URL which can be used
to more directly lookup or match an opponent. By pulling the opponent
field and removing all other unnecessary parts of the URL, the ID can
be safely parsed and returned.
Parameters
----------
game_data : string
A ``string`` containing all of the rows of stats for a given game.
Returns
-------
string
Returns a ``string`` of the opponent's squad ID.
"""
opponent = game_data(SCHEDULE_SCHEME['opponent'])
opponent_id = opponent('a').attr('href')
try:
opponent_id = re.sub(r'.*\/squads\/', '', opponent_id)
opponent_id = re.sub(r'\/.*', '', opponent_id)
except TypeError:
opponent_id = None
return opponent_id
def _parse_captain_id(self, game_data):
"""
Parse the captain's player ID.
The captain field contains a link to the captain's unique player ID in
the URL which can be used to more directly lookup or match the player.
By pulling the captain field and removing all other unnecessary parts
of the URL, the ID can be safely parsed and returned.
Parameters
----------
game_data : string
A ``string`` containing all of the rows of stats for a given game.
Returns
-------
string
Returns a ``string`` of the player's unique ID.
"""
captain = game_data(SCHEDULE_SCHEME['captain'])
captain_id = captain('a').attr('href')
try:
captain_id = re.sub(r'.*\/players\/', '', captain_id)
captain_id = re.sub(r'\/.*', '', captain_id)
except TypeError:
captain_id = None
return captain_id
def _parse_match_report(self, game_data):
"""
Parse the match report ID.
The match report field contains a link to the detailed match report via
the match report ID which is embedded in the URL. By pulling the match
report field and removing all other unnecessary parts of the URL, the
ID can be safely parsed and returned.
Parameters
----------
game_data : string
A ``string`` containing all of the rows of stats for a given game.
Returns
-------
string
Returns a ``string`` of the match report's unique ID.
"""
match_report = game_data(SCHEDULE_SCHEME['match_report'])
match_report_id = match_report('a').attr('href')
try:
match_report_id = re.sub(r'.*\/matches\/', '', match_report_id)
match_report_id = re.sub(r'\/.*', '', match_report_id)
except TypeError:
match_report_id = None
return match_report_id
def _parse_game_data(self, game_data):
"""
Parse a value for every attribute.
The function looks through every attribute with the exception of those
listed below and retrieves the value according to the parsing scheme
and index of the attribute from the passed HTML data. Once the value
is retrieved, the attribute's value is updated with the returned
result.
Note that this method is called directly once Game is invoked and does
not need to be called manually.
Parameters
----------
game_data : string
A ``string`` containing all of the rows of stats for a given game.
"""
for field in self.__dict__:
# Remove the leading '_' from the name
short_name = str(field)[1:]
if short_name == 'datetime':
continue
if short_name == 'opponent_id':
value = self._parse_opponent_id(game_data)
setattr(self, field, value)
continue
if short_name == 'captain_id':
value = self._parse_captain_id(game_data)
setattr(self, field, value)
continue
if short_name == 'match_report':
value = self._parse_match_report(game_data)
setattr(self, field, value)
continue
value = utils._parse_field(SCHEDULE_SCHEME, game_data, short_name)
setattr(self, field, value)
@property
def dataframe(self):
"""
Returns a pandas ``DataFrame`` containing all other class properties
and values. The index for the DataFrame is the match report ID.
"""
if self._goals_for is None and self._goals_against is None:
return None
fields_to_include = {
'competition': self.competition,
'matchweek': self.matchweek,
'day': self.day,
'date': self.date,
'time': self.time,
'datetime': self.datetime,
'venue': self.venue,
'result': self.result,
'goals_for': self.goals_for,
'goals_against': self.goals_against,
'shootout_scored': self.shootout_scored,
'shootout_against': self.shootout_against,
'opponent': self.opponent,
'opponent_id': self.opponent_id,
'expected_goals': self.expected_goals,
'expected_goals_against': self.expected_goals_against,
'attendance': self.attendance,
'captain': self.captain,
'captain_id': self.captain_id,
'formation': self.formation,
'referee': self.referee,
'match_report': self.match_report,
'notes': self.notes
}
return pd.DataFrame([fields_to_include], index=[self.match_report])
@property
def competition(self):
"""
Returns a ``string`` of the competitions name, such as 'Premier
League' or 'Champions Lg'.
"""
return self._competition
@property
def matchweek(self):
"""
Returns a ``string`` of the matchweek the game was played in, such
as 'Matchweek 1' or 'Group Stage'.
"""
return self._matchweek
@property
def day(self):
"""
Returns a ``string`` of the day of the week the game was played on.
"""
return self._day
@property
def date(self):
"""
Returns a ``string`` of the date the game was played in the format
'YYYY-MM-DD'.
"""
return self._date
@property
def time(self):
"""
Returns a ``string`` of the time the game started in 24-hour
format, local to the home venue.
"""
return self._time
@property
def datetime(self):
"""
Returns a ``datetime`` object representing the date and time the match
started. If the time is not present, the default time of midnight on
the given day will be used instead.
"""
try:
date = self.date.split('-')
except AttributeError:
return None
try:
time = re.sub(' .*', '', self.time)
time = time.split(':')
except TypeError:
time = None
if len(date) != 3:
return None
year, month, day = date
hour, minute = 0, 0
if time and len(time) == 2:
hour, minute = time
else:
time = None
try:
year = int(year)
month = int(month)
day = int(day)
except ValueError:
return None
try:
hour = int(hour)
minute = int(minute)
except ValueError:
# As long as we have a valid date, we can still create a meaningful
# datetime object, even if the time is invalid, so stick to the
# default hour and minute in case they can't be parsed.
hour = 0
minute = 0
datetime_ = datetime(year, month, day, hour, minute)
return datetime_
@property
def venue(self):
"""
Returns a ``string`` constant representing if the team played at
home ('Home'), on the road ('Away'), or at a neutral site
('Neutral').
"""
if not self._venue:
return None
if self._venue.upper() == 'HOME':
return HOME
if self._venue.upper() == 'AWAY':
return AWAY
if self._venue.upper() == 'NEUTRAL':
return NEUTRAL
@property
def result(self):
"""
Returns a ``string`` constant representing if the team won ('Win'),
drew ('Draw'), or lost ('Loss').
"""
if not self._result:
return None
if self._result.upper() == 'W':
return WIN
if self._result.upper() == 'D':
return DRAW
if self._result.upper() == 'L':
return LOSS
@int_property_decorator
def goals_for(self):
"""
Returns an ``int`` of the number of goals the team scored.
"""
# If the game went to a shootout, remove the penalties.
if '(' in self._goals_for and ')' in self._goals_for:
return re.sub(' .*', '', self._goals_for)
return self._goals_for
@int_property_decorator
def goals_against(self):
"""
Returns an ``int`` of the number of goals the team conceded.
"""
# If the game went to a shootout, remove the penalties.
if '(' in self._goals_against and ')' in self._goals_against:
return re.sub(' .*', '', self._goals_against)
return self._goals_against
@int_property_decorator
def shootout_scored(self):
"""
Returns an ``int`` of the number of penalties the team scored if the
game went to a shootout after normal play.
"""
penalties = re.findall(r'\(\d+\)', self._goals_for)
if penalties:
penalties = re.sub(r'\(|\)', '', penalties[0])
return penalties
@int_property_decorator
def shootout_against(self):
"""
Returns an ``int`` of the number of penalties the team conceded if the
game went to a shootout after normal play.
"""
penalties = re.findall(r'\(\d+\)', self._goals_against)
if penalties:
penalties = re.sub(r'\(|\)', '', penalties[0])
return penalties
@property
def opponent(self):
"""
Returns a ``string`` of the opponents name, such as 'Arsenal'.
"""
return self._opponent
@property
def opponent_id(self):
"""
Returns a ``string`` of the opponents squad ID, such as '18bb7c10'
for Arsenal.
"""
return self._opponent_id
@float_property_decorator
def expected_goals(self):
"""
Returns a ``float`` of the number of goals the team was expected to
score based on the quality of shots taken.
"""
return self._expected_goals
@float_property_decorator
def expected_goals_against(self):
"""
Returns a ``float`` of the number of goals the team was expected to
concede based on the quality of shots taken.
"""
return self._expected_goals_against
@int_property_decorator
def attendance(self):
"""
Returns an ``int`` of the recorded attendance at the game.
"""
try:
return self._attendance.replace(',', '')
except AttributeError:
return None
@property
def captain(self):
"""
Returns a ``string`` representing the captain's name, such as
'<NAME>'.
"""
return self._captain
@property
def captain_id(self):
"""
Returns a ``string`` of the captain's unique ID on fbref.com, such
as '21a66f6a' for <NAME>.
"""
return self._captain_id
@property
def formation(self):
"""
Returns a ``string`` of the formation the team started with during
the game, such as '4-4-2'.
"""
return self._formation
@property
def referee(self):
"""
Returns a ``string`` of the first and last name of the referee for
the match.
"""
return self._referee
@property
def match_report(self):
"""
Returns a ``string`` of the 8-digit match ID for the game.
"""
return self._match_report
@property
def notes(self):
"""
Returns a ``string`` of any notes that might be included with the
game.
"""
return self._notes
class Schedule:
"""
An object of the given team's schedule.
Generates a team's schedule for the season including wins, losses, draws,
and scores if applicable.
Parameters
----------
team_id : string
The team's 8-digit squad ID or the team's name, such as '<NAME>'.
doc : PyQuery object (optional)
If passed to the class instantiation, this will be used to pull all
information instead of making another request to the website. If the
document is not provided, it will be pulled during a later step.
"""
def __init__(self, team_id, doc=None):
self._games = []
self._pull_schedule(team_id, doc)
def __getitem__(self, index):
"""
Return a specified game.
Returns a specified game as requested by the index number in the array.
The input index is 0-based and must be within the range of the schedule
array.
Parameters
----------
index : int
The 0-based index of the game to return.
Returns
-------
Game instance
If the requested game can be found, its Game instance is returned.
Raises
------
IndexError
If the requested index is not within the bounds of the schedule.
"""
return self._games[index]
def __call__(self, date):
"""
Return a specified game.
Returns a specific game as requested by the passed datetime. The input
datetime must have the same year, month, and day, but can have any time
be used to match the game.
Parameters
----------
date : datetime
A datetime object of the month, day, and year to identify a
particular game that was played.
Returns
-------
Game instance
If the requested game can be found, its Game instance is returned.
Raises
------
ValueError
If the requested date cannot be matched with a game in the
schedule.
"""
for game in self._games:
if not game.datetime:
continue # pragma: no cover
if game.datetime.year == date.year and \
game.datetime.month == date.month and \
game.datetime.day == date.day:
return game
raise ValueError('No games found for requested date')
def __str__(self):
"""
Return the string representation of the class.
"""
games = [f'{game.date} - {game.opponent}'.strip()
for game in self._games]
return '\n'.join(games)
def __repr__(self):
"""
Return the string representation of the class.
"""
return self.__str__()
def __iter__(self):
"""
Returns an iterator of all of the games scheduled for the given team.
"""
return iter(self._games)
def __len__(self):
"""
Returns the number of scheduled games for the given team.
"""
return len(self._games)
def _add_games_to_schedule(self, schedule):
"""
Add game information to the list of games.
Create a Game instance for the given game in the schedule and add it to
the list of games the team has or will play during the season.
Parameters
----------
schedule : PyQuery object
A PyQuery object pertaining to a team's schedule table.
"""
for item in schedule:
if 'class="thead"' in str(item):
continue # pragma: no cover
game = Game(item)
self._games.append(game)
def _pull_schedule(self, team_id, doc):
"""
Download and create objects for the team's schedule.
Given the team's abbreviation, pull the squad page and parse all of the
games on the list. If a document is already provided (occurs when
called directly from the Team class), that can be used to save an extra
call to the website and games can be parsed from that object.
A Game instance is created for every item in the team's schedule and
appended to the '_games' property.
Parameters
----------
team_id : string
The team's 8-digit squad ID or the team's name, such as '<NAME>'.
doc : PyQuery object
If passed to the class instantiation, this will be used to pull all
information instead of making another request to the website. If
the document is not provided, this value will be None.
"""
if not doc:
squad_id = _lookup_team(team_id)
try:
doc = pq(SQUAD_URL % squad_id)
except HTTPError:
return
schedule = utils._get_stats_table(doc, 'table#matchlogs_all')
if not schedule:
utils._no_data_found()
return
self._add_games_to_schedule(schedule)
|
import abc
import logging
import re
from typing import Iterator, Any, Sequence, Callable, Optional, Dict
from urllib.parse import urljoin, urlparse
import scrapy
from lxml import html, etree
LOGGER = logging.getLogger(__name__)
PageCallback = Callable[[scrapy.http.Response], Iterator[Any]]
class SiteLister(abc.ABC):
"""
An object that will iterate through all recipes on a site
"""
start_url: str
@abc.abstractmethod
def start_requests(self, page_callback: PageCallback) -> Iterator[scrapy.Request]:
"""
Start making requests, using page_callback with responses for
actual recipes
"""
raise NotImplementedError
class StructuredSiteLister(SiteLister):
""" """
start_page: int = 1
@abc.abstractmethod
def get_links(self, dom: html.Element) -> Sequence[str]:
""" """
raise NotImplementedError
@abc.abstractmethod
def get_pages(self, dom: html.Element, page: Any) -> Sequence[Any]:
""" """
raise NotImplementedError
@abc.abstractmethod
def get_page_url(self, page: Any) -> str:
""" """
raise NotImplementedError
def start_requests(self, page_callback: PageCallback) -> Iterator[scrapy.Request]:
def parse_list_page(response, page: Any = self.start_page):
html_data = html.fromstring(response.body)
new_pages = list(self.get_pages(html_data, page))
for link in self.get_links(html_data):
url = urljoin(response.url, link)
yield scrapy.Request(url, callback=page_callback)
if not new_pages:
LOGGER.warning("No new page numbers found on page %s", page)
return
for page_num in new_pages:
if page == page_num:
continue
yield scrapy.Request(
self.get_page_url(page_num),
callback=parse_list_page,
cb_kwargs={"page": page_num},
dont_filter=True,
)
yield scrapy.Request(self.start_url, callback=parse_list_page, dont_filter=True)
class SitemapLister(SiteLister):
""" """
extra_start_urls = ()
recipes_path_regex: Optional[re.Pattern] = None
namespaces: Dict[str, str] = {"sm": "http://www.sitemaps.org/schemas/sitemap/0.9"}
def get_start_urls(self) -> Sequence[str]:
return [self.start_url] + list(self.extra_start_urls)
def process_start_urls( # pylint: disable=no-self-use
self, urls: Sequence[str], callback: PageCallback
) -> Iterator[scrapy.Request]:
for url in urls:
yield scrapy.Request(url, callback=callback, dont_filter=True)
def get_page_urls(self, tree: etree.Element) -> Iterator[str]:
for location in tree.findall(".//sm:url/sm:loc", namespaces=self.namespaces):
yield location.text
def start_requests(self, page_callback: PageCallback) -> Iterator[scrapy.Request]:
def parse_sitemap(response: scrapy.http.Response):
tree = etree.fromstring(response.body)
for relative_url in self.get_page_urls(tree):
url = urljoin(response.url, relative_url)
parsed_url = urlparse(url)
if (
self.recipes_path_regex is not None
and not self.recipes_path_regex.search(parsed_url.path)
):
continue
yield scrapy.Request(url, callback=page_callback)
start_urls = self.get_start_urls()
yield from self.process_start_urls(start_urls, parse_sitemap)
class TwoLevelSitemapLister(SitemapLister):
""" """
sitemap_path_regex: Optional[re.Pattern] = None
def get_sitemap_urls(self, tree: etree.Element) -> Iterator[str]:
for location in tree.findall(
".//sm:sitemap/sm:loc", namespaces=self.namespaces
):
yield location.text
def process_start_urls(
self, urls: Sequence[str], callback: PageCallback
) -> Iterator[scrapy.Request]:
def get_sitemaps(response):
tree = etree.fromstring(response.body)
for relative_url in self.get_sitemap_urls(tree):
url = urljoin(response.url, relative_url)
parsed_url = urlparse(url)
if not self.sitemap_path_regex.search(parsed_url.path):
continue
yield scrapy.Request(url, callback=callback, dont_filter=True)
yield from super().process_start_urls(urls, get_sitemaps)
|
<gh_stars>1-10
import pandas as pd
import numpy as np
import re
import matplotlib.pyplot as plt
import seaborn as sns
import string
import nltk
import warnings
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import OneHotEncoder
from sklearn.model_selection import train_test_split
import pickle
from gensim.models import Word2Vec
import gensim
import nltk
from random import shuffle
import zipfile
from sklearn.model_selection import train_test_split
pd.set_option('display.max_colwidth', -1)
warnings.filterwarnings("ignore", category=DeprecationWarning)
pd.set_option('display.max_colwidth', -1)
import theano
import os
os.environ['KERAS_BACKEND'] = 'theano'
from keras import backend as K
from keras import layers
from keras.layers import BatchNormalization, Flatten,Conv1D, Conv2D, AveragePooling1D, MaxPooling1D, GlobalMaxPooling1D
from keras.layers import Bidirectional,LSTM, Input,Dropout, Add,concatenate, Dense, Activation, ZeroPadding2D
from keras.models import Model, load_model
from keras.preprocessing import image
from keras.initializers import glorot_uniform
from keras.layers.embeddings import Embedding
from keras.utils import to_categorical
train = pd.read_csv('train_E6oV3lV.csv')
test = pd.read_csv('test_tweets_anuFYb8.csv')
sub = pd.read_csv('sample_submission_gfvA5FD.csv')
total = train.append(test, ignore_index=True)
def remove_pattern(input_txt, pattern):
r = re.findall(pattern, input_txt)
for i in r:
input_txt = re.sub(i, '', input_txt)
return(input_txt)
total['tidy_tweet'] = np.vectorize(remove_pattern)(total['tweet'], "@[\w]*")
total['tidy_tweet'] = total['tidy_tweet'].str.replace("[^a-zA-Z#]", " ")
total['tidy_tweet'] = total['tidy_tweet'].apply(lambda x: ' '.join([w for w in x.split() if len(w)>3]))
tokenized_tweet = total['tidy_tweet'].apply(lambda x: x.split())
from nltk.stem.porter import *
stemmer = PorterStemmer()
tokenized_tweet = tokenized_tweet.apply(lambda x: [stemmer.stem(i) for i in x]) # stemming
for i in range(len(tokenized_tweet)):
tokenized_tweet[i] = ' '.join(tokenized_tweet[i])
total['tidy_tweet'] = tokenized_tweet
total.tidy_tweet.fillna('',inplace=True)
t = total['tidy_tweet'].apply(lambda x: x.split())
mod = Word2Vec(t) #training word2vec model
def f1(y_true, y_pred): #f1 score metric
def recall(y_true, y_pred):
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
recall = true_positives / (possible_positives+true_positives )
return recall
def precision(y_true, y_pred):
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
precision = true_positives / (predicted_positives+true_positives )
return precision
precision = precision(y_true, y_pred)
recall = recall(y_true, y_pred)
return 2*((precision*recall)/(precision+recall ))
def sentences_to_indices(text , mod, max_len):
m = len(text)
text_indices = np.zeros((m, max_len))
for i in range(m):
j=0
for w in text[i]:
if j==max_len:
break
if w not in mod.wv.vocab:
continue
text_indices[i, j] = mod.wv.vocab[w].index # Set the (i,j)th entry of X_indices to the index of the correct word.
j = j + 1
return text_indices
def pretrained_embedding_layer(mod):
vocab_len = len(mod.wv.vocab) + 1
emb_dim = mod["father"].shape[0]
emb_matrix = np.zeros((vocab_len, emb_dim))
index=0
for word in mod.wv.vocab:
emb_matrix[index, :] = mod[word]
index+=1
embedding_layer = Embedding(vocab_len, emb_dim)
embedding_layer.build((None,))
embedding_layer.set_weights([emb_matrix])
return embedding_layer
def MODEL(input_shape,mod):
sentence_indices = Input(shape=input_shape)
embedding_layer = pretrained_embedding_layer(mod)
embeddings = embedding_layer(sentence_indices)
bigram_branch = Conv1D(filters=100, kernel_size=2, padding='valid', activation='relu', strides=1)(embeddings)
#bigram_branch = GlobalMaxPooling1D()(bigram_branch)
bigram_branch = MaxPooling1D(pool_size=2)(bigram_branch)
trigram_branch = Conv1D(filters=100, kernel_size=3, padding='valid', activation='relu', strides=1)(embeddings)
#trigram_branch = GlobalMaxPooling1D()(trigram_branch)
trigram_branch = MaxPooling1D(pool_size=2)(trigram_branch)
fourgram_branch = Conv1D(filters=100, kernel_size=4, padding='valid', activation='relu', strides=1)(embeddings)
#fourgram_branch = GlobalMaxPooling1D()(fourgram_branch)
fourgram_branch = MaxPooling1D(pool_size=2)(fourgram_branch)
merged = concatenate([bigram_branch, trigram_branch, fourgram_branch], axis=1)
X = Bidirectional(LSTM(100))(merged)
X = Dense(256,activation='relu')(X)
X = Dropout(0.2)(X)
X = Dense(2,activation='sigmoid')(X)
model = Model (inputs = sentence_indices , outputs= X, name= 'MODEL')
return(model)
max_len=20
model = MODEL((max_len,),mod)
#print(model.summary())
model.compile(loss='categorical_crossentropy',optimizer= 'adam', metrics=[f1])
x_total=total['tidy_tweet'].apply(lambda x: x.split())
y_total=total['label'].values
x_total=x_total.values
x_train = x_total[:31962]
x_test = x_total[31962:]
y_train = y_total[:31962]
x_train_indices = sentences_to_indices(x_train, mod,max_len)
x_test_indices=sentences_to_indices(x_test,mod,max_len)
y_train_ohe=to_categorical(y_train, num_classes=2)
model.fit(x_train_indices, y_train_ohe, epochs = 5, batch_size = 32, shuffle=True)
filename = 'model_cnn_ngram234_maxlen20_biLSTM.sav'
pickle.dump(model, open(filename, 'wb'))
prediction = model.predict(x_test_indices)
#plt.hist(prediction[:,1],bins=10)
prediction_int = prediction[:,1] >= 0.3
prediction_int = prediction_int.astype(np.int)
sub = pd.read_csv('sample_submission_gfvA5FD.csv')
sub['label']=prediction_int
sub.to_csv('word2vec_cnn.csv',index=False)
|
<gh_stars>0
import requests
import numpy as np
import math
import pandas as pd
PLAYERS_URL = ("https://raw.githubusercontent.com/mesosbrodleto/"
"soccerDataChallenge/master/players.json")
EVENTS_URL = ("https://raw.githubusercontent.com/mesosbrodleto/"
"soccerDataChallenge/master/worldCup-final.json")
def get_players():
"""
get the list of players
"""
return requests.get(PLAYERS_URL).json()
def get_events():
"""
get the list of events in the match
"""
return requests.get(EVENTS_URL).json()
def main():
events = get_events()
players = get_players()
player_timemean={}
player_team={}
player_name={}
goodplayers=[]
for player in players:
timeline=[]
eventdiff=[]
player_name[player['playerId']]=player['name']
for event in events:
if player['playerId']==event['playerId']:
player_team[player['playerId']]=event['teamId']
#if event['teamId']=='Croatia': print(timeline)
if event['matchPeriod']=='1H': timeline.append(event['eventSec'])
if event['matchPeriod']=='2H': timeline.append(event['eventSec']+2700)
#print(timeline)
if (max(timeline)-min(timeline))/60>45.:
goodplayers.append(player['playerId'])
for i in range(len(timeline)):
if i!=0: eventdiff.append(timeline[i]-timeline[i-1])
#print(timeline)
#print(eventdiff)
npdiff=np.array(eventdiff)
#print(npdiff.mean())
player_timemean[player['playerId']]=npdiff.mean()
player_posmean={}
#players with two events distance >45'
#print(player_timemean)
for playID in goodplayers:
positions=[]
for event in events:
if event['playerId']==playID: positions=positions+[event['positions'][0]]
xpos=[]
ypos=[]
for position in positions:
xpos.append(position['x'])
ypos.append(position['y'])
#print(playID)
npx=np.array(xpos)
npy=np.array(ypos)
#print(npx.mean())
#print(npy.mean())
distquad=0
for position in positions:
distquad=distquad+(npx.mean()-position['x'])**2+(npy.mean()-position['y'])**2
#print(distquad/len(positions))
#print(math.sqrt(distquad/len(positions)))
player_posmean[playID]=[npx.mean(),npy.mean(),distquad/len(positions)]
#print(player_posmean)
output=[]
for playID in goodplayers:
player_f={}
player_f['identificativo_calciatore']=playID
player_f['nome_calciatore']=player_name[playID]
player_f['squadra_calciatore']=player_team[playID]
player_f['posizione_media_x']=player_posmean[playID][0]
player_f['posizione_media_y']=player_posmean[playID][1]
player_f['distanza_quadratica_media']=player_posmean[playID][2]
player_f['tempo_medio_tra_eventi']=player_timemean[playID]
output.append(player_f)
pd.DataFrame(output).to_csv('problema_1.csv', index=False)
if __name__ == "__main__":
main()
|
<filename>roman_date.py
from datetime import date, datetime, timedelta
latin_months = [
"",
"IAN",
"FEB",
"MART",
"APR",
"MAI",
"IVN",
"IVL",
"AVG",
"SEPT",
"OCT",
"NOV",
"DEC",
]
latin_words = {"none": "NON.", "kalend": "KAL.", "ide": "ID."}
latin_numerals = [
"",
"I",
"II",
"III",
"IV",
"V",
"VI",
"VII",
"VIII",
"IX",
"X",
"XI",
"XII",
"XIII",
"XIV",
"XV",
"XVI",
"XVII",
"XVIII",
"XIX",
]
def roman_date(today: date) -> str:
"""
Calculate roman date given calendar date.
If you want this to be authentic, you'd better use the Julian
Calendar. But note that we don't actually know how the Julian
calendar worked in antiquity, and dating is very much a guessing
game.
"""
next_month = today.month + 1 if today.month < 12 else 1
next_month_year = today.year if next_month > 1 else today.year + 1
if today.day == 1:
kalend = date(day=2, month=today.month, year=today.year)
else:
kalend = date(day=2, month=next_month, year=next_month_year)
long_months = (3, 5, 7, 10)
if today.day > 15 and today.month in long_months:
ide_month = next_month
ide_year = next_month_year
elif today.day > 13 and today.month not in long_months:
ide_month = next_month
ide_year = next_month_year
else:
ide_month = today.month
ide_year = today.year
if ide_month in long_months:
ide = date(day=16, month=ide_month, year=ide_year)
else:
ide = date(day=14, month=ide_month, year=ide_year)
none = ide - timedelta(days=8)
# get next big date
candidates = {"none": none, "ide": ide, "kalend": kalend}
keys = sorted(candidates, key=lambda x: candidates[x] - today)
for key in keys:
candidate = candidates[key]
if candidate - today < timedelta(days=0): # in the past
continue
delta = candidate - today
if today.year % 4 == 0 and today.month == 2:
if today.day > 25:
delta += timedelta(days=1)
if today.day == 25:
return "A.D. BIS VI KAL. M."
elif today.day > 13:
delta -= timedelta(days=1)
if not delta:
if key != "kalend":
continue
return f"KAL. {latin_months[candidate.month]}"
if delta.days == 1:
return f"{latin_words[key]} {latin_months[candidate.month]}."
else:
if delta.days == 2:
return f"PRID. {latin_words[key]} {latin_months[candidate.month]}."
else:
days = delta.days
return f"A.D. {latin_numerals[days]} {latin_words[key]} {latin_months[candidate.month]}."
def test_rom_cal():
from csv import DictReader
from pathlib import Path
non_leap = {}
leap = {}
with Path("./cal.csv").open() as f:
reader = DictReader(f)
leaping = False
for row in reader:
if row["Modern"] == "Non-leap year":
continue
if row["Modern"] == "Leap year":
leaping = True
continue
if row["Modern"] == "01 Mar":
leaping = False
if leaping:
leap[row["Modern"]] = row["Full date"]
else:
non_leap[row["Modern"]] = row["Full date"]
# 2021 is not a leap year
for modern, old in non_leap.items():
print(modern, old)
d = datetime.strptime(f"{modern} 2021", "%d %b %Y").date()
assert roman_date(d) == old
# 2024 is
non_leap.update(leap)
for modern, old in non_leap.items():
print(modern, old)
d = datetime.strptime(f"{modern} 2020", "%d %b %Y").date()
assert roman_date(d) == old
if __name__ == "__main__":
roman_date(date.today())
|
#!/usr/bin/env python3
"""
Additional commands to add to the CLI beyond the OpenAPI spec.
"""
from __future__ import print_function
import functools
import os
import sys
import click
import requests
import webbrowser
import civis
from civis.io import file_to_civis, civis_to_file
# From http://patorjk.com/software/taag/#p=display&f=3D%20Diagonal&t=CIVIS
_CIVIS_ASCII_ART = r"""
,----.. ,---, ,---, .--.--.
/ / \ ,`--.' | ,---.,`--.' | / / '.
| : :| : : /__./|| : :| : /`. /
. | ;. /: | ' ,---.; ; |: | '; | |--`
. ; /--` | : |/___/ \ | || : || : ;_
; | ; ' ' ;\ ; \ ' |' ' ; \ \ `.
| : | | | | \ \ \: || | | `----. \
. | '___ ' : ; ; \ ' .' : ; __ \ \ |
' ; : .'|| | ' \ \ '| | ' / /`--' /
' | '/ :' : | \ ` ;' : |'--'. /
| : / ; |.' : \ |; |.' `--'---'
\ \ .' '---' '---" '---'
`---`
"""
@click.command('upload')
@click.argument('path')
@click.option('--name', type=str, default=None,
help="A name for the Civis File (defaults to the base file name")
@click.option('--expires-at', type=str, default=None,
help="The date and time the file will expire "
"(ISO-8601 format, e.g., \"2017-01-15\" or "
"\"2017-01-15T15:25:10Z\"). "
"Set \"never\" for the file to not expire."
"The default is the default in Civis (30 days).")
def files_upload_cmd(path, name, expires_at):
"""Upload a local file to Civis and get back the File ID."""
if name is None:
name = os.path.basename(path)
if expires_at is None:
# Use the default in Civis platform (30 days).
expires_kwarg = {}
elif expires_at.lower() == "never":
expires_kwarg = {"expires_at": None}
else:
expires_kwarg = {"expires_at": expires_at}
with open(path, 'rb') as f:
file_id = file_to_civis(f, name=name, **expires_kwarg)
print(file_id)
@click.command('download')
@click.argument('file_id', type=int)
@click.argument('path')
def files_download_cmd(file_id, path):
"""Download a Civis File to a specified local path."""
with open(path, 'wb') as f:
civis_to_file(file_id, f)
@click.command('sql')
@click.option('--dbname', '-d', type=str, required=True,
help='Execute the query on this Civis Platform database')
@click.option('--command', '-c', type=str, default=None,
help='Execute a single input command string')
@click.option('--filename', '-f', type=click.Path(exists=True),
help='Execute a query read from the given file')
@click.option('--output', '-o', type=click.Path(),
help='Download query results to this file')
@click.option('--quiet', '-q', is_flag=True, help='Suppress screen output')
@click.option('-n', type=int, default=100,
help="Display up to this many rows of the result. Max 100.")
def sql_cmd(dbname, command, filename, output, quiet, n):
"""\b Execute a SQL query in Civis Platform
If neither a command nor an input file is specified, read
the SQL command from stdin.
If writing to an output file, use a Civis SQL script and write the
entire query output to the specified file.
If not writing to an output file, use a Civis Query, and return a
preview of the results, up to a maximum of 100 rows.
"""
if filename:
with open(filename, 'rt') as f:
sql = f.read()
elif not command:
# Read the SQL query from user input. This also allows use of a heredoc
lines = []
while True:
try:
_i = input()
except (KeyboardInterrupt, EOFError):
# The end of a heredoc produces an EOFError.
break
if not _i:
break
else:
lines.append(_i)
sql = '\n'.join(lines)
else:
sql = command
if not sql:
# If the user didn't enter a query, exit.
if not quiet:
print('Did not receive a SQL query.', file=sys.stderr)
return
if not quiet:
print('\nExecuting query...', file=sys.stderr)
if output:
fut = civis.io.civis_to_csv(output, sql, database=dbname)
fut.result() # Block for completion and raise exceptions if any
if not quiet:
print("Downloaded the result of the query to %s." % output,
file=sys.stderr)
else:
fut = civis.io.query_civis(sql, database=dbname,
preview_rows=n, polling_interval=3)
cols = fut.result()['result_columns']
rows = fut.result()['result_rows']
if not quiet:
print('...Query complete.\n', file=sys.stderr)
print(_str_table_result(cols, rows))
def _str_table_result(cols, rows):
"""Turn a Civis Query result into a readable table."""
# Determine the maximum width of each column.
# First find the width of each element in each row, then find the max
# width in each position.
max_len = functools.reduce(
lambda x, y: [max(z) for z in zip(x, y)],
[[len(_v) for _v in _r] for _r in [cols] + rows])
header_str = " | ".join("{0:<{width}}".format(_v, width=_l)
for _l, _v in zip(max_len, cols))
tb_strs = [header_str, len(header_str) * '-']
for row in rows:
tb_strs.append(" | ".join("{0:>{width}}".format(_v, width=_l)
for _l, _v in zip(max_len, row)))
return '\n'.join(tb_strs)
@click.command('download')
@click.argument('notebook_id', type=int)
@click.argument('path')
def notebooks_download_cmd(notebook_id, path):
"""Download a notebook to a specified local path."""
client = civis.APIClient()
info = client.notebooks.get(notebook_id)
response = requests.get(info['notebook_url'], stream=True)
response.raise_for_status()
chunk_size = 32 * 1024
chunked = response.iter_content(chunk_size)
with open(path, 'wb') as f:
for lines in chunked:
f.write(lines)
@click.command('new')
@click.argument('language', type=click.Choice(['python3', 'python2', 'r']),
default='python3')
@click.option('--mem', type=int, default=None,
help='Memory allocated for this notebook in MiB.')
@click.option('--cpu', type=int, default=None,
help='CPU available for this notebook in 1/1000 of a core.')
def notebooks_new_cmd(language='python3', mem=None, cpu=None):
"""Create a new notebook and open it in the browser."""
client = civis.APIClient()
kwargs = {'memory': mem, 'cpu': cpu}
kwargs = {k: v for k, v in kwargs.items() if v is not None}
new_nb = client.notebooks.post(language=language, **kwargs)
print("Created new {language} notebook with ID {id} in Civis Platform"
" (https://platform.civisanalytics.com/#/notebooks/{id})."
.format(language=language, id=new_nb.id))
_notebooks_up(new_nb.id)
_notebooks_open(new_nb.id)
@click.command('up')
@click.argument('notebook_id', type=int)
@click.option('--mem', type=int, default=None,
help='Memory allocated for this notebook in MiB.')
@click.option('--cpu', type=int, default=None,
help='CPU available for this notebook in 1/1000 of a core.')
def notebooks_up(notebook_id, mem=None, cpu=None):
"""Start an existing notebook and open it in the browser."""
client = civis.APIClient()
kwargs = {'memory': mem, 'cpu': cpu}
kwargs = {k: v for k, v in kwargs.items() if v is not None}
client.notebooks.patch(notebook_id, **kwargs)
_notebooks_up(notebook_id)
_notebooks_open(notebook_id)
def _notebooks_up(notebook_id):
client = civis.APIClient()
return client.notebooks.post_deployments(notebook_id)
@click.command('down')
@click.argument('notebook_id', type=int)
def notebooks_down(notebook_id):
"""Shut down a running notebook."""
client = civis.APIClient()
nb = client.notebooks.get(notebook_id)
state = nb['most_recent_deployment']['state']
if state not in ['running', 'pending']:
print('Notebook is in state "{}" and can\'t be stopped.'.format(state))
deployment_id = nb['most_recent_deployment']['deploymentId']
client.notebooks.delete_deployments(notebook_id, deployment_id)
@click.command('open')
@click.argument('notebook_id', type=int)
def notebooks_open(notebook_id):
"""Open an existing notebook in the browser."""
_notebooks_open(notebook_id)
def _notebooks_open(notebook_id):
url = 'https://platform.civisanalytics.com/#/notebooks/{}?fullscreen=true'
url = url.format(notebook_id)
webbrowser.open(url, new=2, autoraise=True)
@click.command('civis', help="Print Civis")
def civis_ascii_art():
print(_CIVIS_ASCII_ART)
|
<reponame>dakoner/smilesparser<gh_stars>1-10
# Copyright 2016 Google Inc. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import smilesparser
serial = 0
atomdb = {}
last_atom = None
def inspect_organic_symbol(organic_symbol, indent=0):
print(" " * indent + "Organic symbol:", ''.join(organic_symbol))
def inspect_aromatic_symbol(aromatic_symbol, indent=0):
print(" " * indent + "Aromatic symbol:", ''.join(aromatic_symbol))
def inspect_element_symbol(element_symbol, indent=0):
print(" " * indent + "Element symbol:", ''.join(element_symbol))
def inspect_chiral_class(chiral_class, indent=0):
print(" " * indent + "Chiral Class:", ''.join(chiral_class))
def inspect_hcount(hcount, indent=0):
print(" " * indent + "HCount:", ''.join(hcount))
def inspect_charge(charge, indent=0):
print(" " * indent + "Charge:", ''.join(charge))
def inspect_atomspec(atomspec, indent=0):
print(" " * indent + "AtomSpec", ''.join(map(str, atomspec)))
for item in atomspec:
if isinstance(item, smilesparser.AST.AromaticSymbol):
inspect_aromatic_symbol(item.aromatic_symbol, indent+1)
elif isinstance(item, smilesparser.AST.ElementSymbol):
inspect_element_symbol(item.element_symbol, indent+1)
elif isinstance(item, smilesparser.AST.ChiralClass):
inspect_chiral_class(item.chiral_class, indent+1)
elif isinstance(item, smilesparser.AST.HCount):
inspect_hcount(item.hcount, indent+1)
elif isinstance(item, smilesparser.AST.Charge):
inspect_charge(item.charge, indent+1)
else:
print(" " * indent + str(item), dir(item))
def inspect_atom(atom, indent=0):
global last_atom
last_atom = atom
if isinstance(atom, smilesparser.AST.OrganicSymbol):
inspect_organic_symbol(atom.organic_symbol)
elif isinstance(atom, smilesparser.AST.AromaticSymbol):
inspect_aromatic_symbol(atom.aromatic_symbol)
elif isinstance(atom, smilesparser.AST.AtomSpec):
inspect_atomspec(atom.atom_spec)
else:
print(" " * indent + atom, dir(atom))
global serial
atomdb[atom] = serial
serial += 1
def inspect_bond(bond, indent=0):
print(" " * indent + "Bond:", bond)
ring_closures = {}
def inspect_ring_closure(ring_closure, indent=0):
print(" " * indent + "Ring Closure:", ring_closure)
global last_atom
if ring_closure not in ring_closures:
ring_closures[ring_closure] = last_atom
else:
first = ring_closures[ring_closure]
second = last_atom
print("bond between:", atomdb[first], "and", atomdb[second])
def inspect_chain(chain, indent=0):
# print(" " * indent + "Chain")
for item in chain:
if isinstance(item, smilesparser.AST.Bond):
inspect_bond(item.bond, indent)
elif isinstance(item, smilesparser.AST.Atom):
inspect_atom(item.atom, indent)
elif isinstance(item, smilesparser.AST.RingClosure):
inspect_ring_closure(item.ring_closure, indent)
else:
print(" " * indent + item, dir(item))
def iterate_branch(branch, indent=0):
print(" " * indent + "Branch")
for item in branch[0]:
if isinstance(item, smilesparser.AST.Bond):
inspect_bond(item.bond, indent+1)
elif isinstance(item, smilesparser.AST.SMILES):
iterate_smiles(item.smiles, indent+1)
else:
print(" " * indent + item, dir(item))
def iterate_smiles(smiles, indent=0):
# print(" " * indent + "SMILES")
for item in smiles:
if isinstance(item, smilesparser.AST.Atom):
inspect_atom(item.atom, indent)
elif isinstance(item, smilesparser.AST.Chain):
inspect_chain(item.chain, indent)
elif isinstance(item, smilesparser.AST.Branch):
iterate_branch(item, indent+1)
else:
print(" " * indent + item, dir(item))
smiles=[
# 'C(C)C',
# 'C=C(CCCC)CBr',
# 'CCC(C(C)C)C[Br+]CC',
# 'CC(=NO)C(C)=NO',
# 'c1ccccc1',
#'CCC[S@](=O)c1ccc2c(c1)[nH]/c(=N/C(=O)OC)/[nH]2',
# 'CCC(=O)O[C@]1(CC[NH+](C[C@@H]1CC=C)C)c2ccccc2',
# 'C[C@@H](c1ccc(cc1)NCC(=C)C)C(=O)[O-]',
# 'C[C@H](Cc1ccccc1)[NH2+][C@@H](C#N)c2ccccc2',
# 'C[C@@H](CC(c1ccccc1)(c2ccccc2)C(=O)N)[NH+](C)C',
# 'Cc1c(c(=O)n(n1C)c2ccccc2)NC(=O)[C@H](C)[NH+](C)C',
# 'c1ccc(cc1)[C@@H](C(=O)[O-])O',
# 'CC[C@](C)(C[NH+](C)C)OC(=O)c1ccccc1'
# 'COc1cc(c(c2c1OCO2)OC)CC=C',
# 'Cc1ccccc1NC(=O)[C@H](C)[NH+]2CCCC2',
"CC(C)CCNC(=O)c1cc(n(n1)c2ccc(cc2)F)c3cccnc3",
]
for s in smiles:
print(s)
parsed = smilesparser.SMILES.parseString(s)[0]
iterate_smiles(parsed.smiles)
|
<reponame>evenmarbles/rlpy<filename>rlpy/agent/planner/planner.py
import weakref
import numpy as np
from collections import namedtuple
from itertools import count
from ...framework.observer import Observable, Listener
class Planner(object):
"""
"""
class ValueState(Observable):
"""
"""
@property
def value(self):
return self._value
def __init__(self, value=None):
super(Planner.ValueState, self).__init__()
self._value = value if value is not None else 0.0
def _set_value(self, value):
"""Changes the value of this ValueState nd sends a notification
to all of the observers of this ValueState.
Parameters
----------
value : float
The new value of this ValueState
"""
change = value - self._value
self._value = value
self.dispatch('value_change', change)
class DecisionState(ValueState, Listener):
"""
"""
_instance = object()
MaxAction = namedtuple('MaxAction', ['action', 'dsa'])
class Action(Listener):
"""
"""
_ids = count(0)
_instance = object()
ValueStateProbability = namedtuple('ValueStateProbability', ['vs', 'proba'])
@property
def q(self):
"""float : The q-value for each state-action."""
return self._q
@property
def mdp(self):
return self._model
def __init__(self, token, parent, model):
if token is not self._instance:
raise ValueError("Use 'create' to construct {0}".format(self.__class__.__name__))
super(Planner.DecisionState.Action, self).__init__()
self._parent = parent
""":type: DecisionState"""
self._model = model
"""StateAction"""
self._q = 0.0
self._errorbound = 0.0
self._successors = {}
"""dict[MDPState, ValueStateProbability]"""
self._mid = "%s.%s:%i" % (self.__class__.__module__, self.__class__.__name__, next(self._ids))
def __repr__(self):
return self._mid
@classmethod
def create(cls, parent, model):
result = cls(cls._instance, parent, model)
parent._action_values[model._action] = result
result.compute_successors()
result.compute_value()
return result
def debug(self):
for s, vsp in self._successors.iteritems():
vs = vsp.vs
if vsp.vs is None:
vs = self._parent
print("\t\t%f: %s(%f)" % (vsp.proba, s, vs.value))
print("\t\tr = %f, Q = %f" % (self._model.reward, self.q))
def notify(self, event):
if event.name != 'value_change':
return
change = event.change
if change < 0:
change = -change
change *= self._parent._planner._gamma
self._errorbound += change
# TODO: need to move to Prioritized Sweeping?
def compute_successors(self):
model_succs = self._model.successor_probabilities
# remove erstwhile successors
for s, vsp in self._successors.items():
if s not in model_succs.keys():
if vsp.vs is not None:
vsp.vs.unsubscribe(self, 'value_change')
else:
self._parent.unsubscribe(self, 'value_change')
del self._successors[s]
for s, proba in model_succs.iteritems():
if s not in self._successors:
succ = self._parent._planner._successor_value(s)
assert succ
succ.subscribe(self, 'value_change', {
'func': {
'attrib': 'change',
'callable': lambda x: proba * x
}
})
# use None reference as a proxy for parent to avoid creating
# a cycle of strong references
if succ == self._parent:
succ = None
self._successors[s] = Planner.DecisionState.Action.ValueStateProbability(
succ if succ._mid != self._parent._mid else weakref.proxy(succ), proba)
elif self._successors[s].proba != proba:
# update existing successor
vs = self._successors[s].vs
if vs is not None:
vs.subscribe(self, 'value_change', {
'func': {
'attrib': 'change',
'callable': lambda x: proba * x
}
})
self._successors[s] = self._successors[s]._replace(proba=proba)
else:
self._parent.subscribe(self, 'value_change', {
'func': {
'attrib': 'change',
'callable': lambda x: proba * x
}
})
self._successors[s] = self._successors[s]._replace(vs=self._parent, proba=proba)
self._errorbound = np.infty
def compute_value(self):
self._q = 0.0
for vsp in self._successors.itervalues():
succ = vsp.vs if vsp.vs is not None else self._parent
self._q += vsp.proba * succ.value
self._q *= self._parent._planner._gamma
self._q += self._model.reward
self._errorbound = 0.0
def update_value(self):
if self._errorbound > 0:
self.compute_value()
# -----------------------------
# DecisionState
# -----------------------------
@property
def state(self):
return self._model.state
@property
def policy_action(self):
return self._max.action
@property
def policy_model(self):
return self._model
def __init__(self, token, planner, model):
if token is not self._instance:
raise ValueError("Use 'create' to construct {0}".format(self.__class__.__name__))
super(Planner.DecisionState, self).__init__()
self._planner = planner
""":type: Planner"""
self._model = model
""":type: StateData"""
self._action_values = {}
"""dict[MDPAction, Action]"""
self._max = Planner.DecisionState.MaxAction(None, None)
"""MaxAction"""
self._errorbound = 0.0
self._inbox = weakref.WeakSet()
""":type: set[]"""
# print("Planner.DecisionState: %s" % self._model.state)
def __del__(self):
pass
# print("Planner.DecisionState.__del__: %s" % self._model.state)
# for a in self._action_values.itervalues():
# for vsp in a._successors.itervalues():
# if vsp.vs is not None:
# del vsp
def __str__(self, level=0):
ret = "\t" * level + repr(self._model.state) + "\n"
for av in self._action_values.itervalues():
for vsp in av._successors.itervalues():
if vsp.vs._mid != self._mid:
ret += vsp.vs.__str__(level + 1)
return ret
def __repr__(self):
return self._mid
@classmethod
def create(cls, planner, model, is_completion):
result = cls(cls._instance, planner, model)
if is_completion:
planner._completions[model.state] = result
planner._nonterminals[model.state] = result
result._model.subscribe(result, 'mdp_change')
result.initialize()
return result
def debug(self):
print("State %s" % self.state)
for a, dsa in self._action_values.iteritems():
print("\tAction %s" % a)
dsa.debug()
print
def notify(self, event):
self._inbox.add(event.action)
self._planner._inbox.add(self)
def initialize(self):
"""Compute the initial value of this state.
Note that this method is separate from the constructor, since otherwise
a cycle in the MDP structure might cause the planner to try to create a
new DecisionState object for a state in which another DecisionState object
is still being constructed. This way, we can update the hash table mapping
states of DecisionState objects after construction but before initialization,
since the initialization is what causes the cycle.
"""
for act, model in self._model.state_actions.iteritems():
if len(self._action_values) <= 0 or act not in self._action_values:
# this can lead to the construction and initialization of other
# DecisionState objects
dsa = type(self).Action.create(weakref.proxy(self), model)
assert act in self._action_values
assert dsa == self._action_values[act]
if self._max.dsa is None or self._max.dsa.q < dsa.q:
self._max = Planner.DecisionState.MaxAction(act, dsa)
self._set_value(self._max.dsa.q)
def propagate_mdp_change(self):
for i in self._inbox:
self._action_values[i].compute_successors()
self._inbox.clear()
self.propagate_value_change()
def propagate_value_change(self):
original_action = self._max.action
assert self._action_values
it = self._action_values.iteritems()
act, dsa = it.next()
dsa.update_value()
self._max = Planner.DecisionState.MaxAction(act, dsa)
while True:
try:
act, dsa = it.next()
dsa.update_value()
if self._max.dsa.q < dsa.q:
self._max = Planner.DecisionState.MaxAction(act, dsa)
except StopIteration:
break
if self._max.action != original_action:
# this is a new policy action
self._planner._outbox[self] = original_action
if self._max.dsa.q != self.value:
self._set_value(self._max.dsa.q)
def propagate_policy_change(self, original):
if original != self._max.action:
self.dispatch('policy_change')
# -----------------------------
# Planner
# -----------------------------
@property
def mdp(self):
return self._mdp
def __init__(self, mdp, terminal, goal, gamma=None):
self._mdp = mdp
""":type: MDP"""
self._terminal = terminal
""":type: callable"""
self._goal = goal
""":type: callable"""
self._gamma = gamma if gamma is not None else 1.0
self._nonterminals = weakref.WeakValueDictionary()
""":type: dict[MDPState, DecisionState] : Each key is the return value of the state()
method for an MDPStateData. Each datum is the DecisionState object constructed
with that MDPStateData"""
self._completions = weakref.WeakValueDictionary()
""":type: dict[MDPState, ValueState] : Each key is a basis state that appears
in the value of successors() for sine MDPStateAction object. The data
pointers are either DecisionState objects in nonterminals or ValueState
objects representing terminal states"""
self._inbox = weakref.WeakSet()
""":type: set[DecisionState] : Includes all DecisionState objects that have not
executed propagate_value_change since their observed MDPStateData object
changed"""
self._outbox = weakref.WeakKeyDictionary()
""":type: dict[DecisionState, MDPAction] : Each key is a DecisionState object that
has changed their policy action. The data is the original policy action
before the first change. (The data can be used to avoid sending policy
change notifications unnecessarily, when a DecisionState object switches
back to its original policy action.)"""
def initialize(self):
self._mdp.initialize()
def policy(self, state):
"""Outputs (after possibly computing) the optimal policy for
the MDP.
Parameters
----------
state : MDPState
The state at which to evaluate the optimal policy
Returns
-------
DecisionState:
An object specifying the optimal child action, as well as
giving access to the StateData describing that state-action's
behavior.
"""
ds = self._policy(self.mdp.state_data(state))
self.plan()
return ds
def update(self, state, act, succ):
self._mdp.update(state, act, succ)
def plan(self):
for ds in self._inbox:
ds.propagate_mdp_change()
self._inbox.clear()
self._propagate_changes()
for ds, act in self._outbox.iteritems():
if ds is not None:
ds.propagate_policy_change(act)
self._outbox = {}
def debug(self):
print("model:")
for ds in self._nonterminals.itervalues():
ds.debug()
print("end model")
print("value function:")
self.write_value_function()
print("end value function")
print("policy:")
self.write_policy()
print("end policy")
def write_value_function(self):
for s, ds in self._nonterminals.iteritems():
print("{0} {1}".format(s, ds.value))
def write_policy(self):
policy = {}
for s, ds in self._nonterminals.iteritems():
states = policy.setdefault(ds.policy_action, [])
states.append(ds.state)
for a, states in policy.iteritems():
print("# {0}:".format(a))
for s in states:
print(s)
print('\n')
def _propagate_changes(self):
pass
def _policy(self, state_data, is_completion=False):
"""
Parameters
----------
state_data : MDPState.StateData
The state at which the policy should be evaluated.
Returns
-------
DecisionState
"""
try:
ds = self._nonterminals[state_data.state]
except KeyError:
ds = type(self).DecisionState.create(self, state_data, is_completion)
assert state_data.state in self._nonterminals
assert ds == self._nonterminals[state_data.state]
return ds
def _successor_value(self, successor):
"""
Parameters
----------
successor : MDPState
A basis state that appears in teh value of successor_probabilities()
for some MDPStateAction object
Returns
-------
ValueState
"""
try:
vs = self._completions[successor]
except KeyError:
if self._terminal(successor):
vs = Planner.ValueState(self._goal(successor))
else:
vs = self._policy(self.mdp.state_data(successor), is_completion=True)
assert successor in self._completions
assert vs == self._completions[successor]
return vs
|
"""
Copyright 2019 Johns Hopkins University (Author: <NAME>)
Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from six.moves import xrange
import os
from collections import OrderedDict as ODict
import numpy as np
import torch
import torch.nn as nn
from .utils import MetricAcc
from .loggers import LoggerList, CSVLogger, ProgLogger
class TorchTrainer(object):
def __init__(self, model, optimizer, loss, epochs, exp_path, cur_epoch=0,
device=None, metrics=None, lr_scheduler=None, loggers=None):
self.model = model
self.optimizer = optimizer
self.loss = loss
self.epochs = epochs
self.cur_epoch = cur_epoch
self.exp_path = exp_path
if loggers is None:
self.loggers = self._default_loggers()
elif isinstance(loggers, list):
self.loggers = LoggerList(loggers)
else:
self.loggers = loggers
self.lr_scheduler = lr_scheduler
self.metrics = metrics
if device is None:
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
self.device = device
def fit(self, train_data, val_data=None):
if not os.path.exists(self.exp_path):
os.makedirs(self.exp_path)
val_logs = {}
self.loggers.on_train_begin(epochs=self.epochs)
for epoch in xrange(self.cur_epoch, self.epochs):
self.loggers.on_epoch_begin(epoch, samples=len(train_data.dataset))
if self.lr_scheduler is not None:
self.lr_scheduler.epoch_begin_step(epoch)
logs = self.train_epoch(train_data)
if val_data is not None:
val_logs = self.validation_epoch(val_data)
logs.update(val_logs)
self.cur_epoch +=1
self.loggers.on_epoch_end(logs)
if self.lr_scheduler is not None:
self.lr_scheduler.epoch_end_step(logs)
self.save_checkpoint(logs)
def train_epoch(self, data_loader):
epoch_batches = len(data_loader.dataset)
total_batches = self.cur_epoch * epoch_batches
metric_acc = MetricAcc()
batch_metrics = ODict()
self.model.train()
for batch, (data, target) in enumerate(data_loader):
self.loggers.on_batch_begin(batch)
if self.lr_scheduler is not None:
self.lr_scheduler.batch_step()
data, target = data.to(self.device), target.to(self.device)
batch_size = data.shape[0]
self.optimizer.zero_grad()
output = self.model(data)
loss = self.loss(output, target)
loss.backward()
self.optimizer.step()
batch_metrics['loss'] = loss.item()
for k, metric in self.metrics.items():
batch_metrics[k] = metric(output, target)
metric_acc.update(batch_metrics, batch_size)
logs = metric_acc.metrics
logs['lr'] = self._get_lr()
self.loggers.on_batch_end(logs=logs, batch_size=batch_size)
total_batches +=1
logs = metric_acc.metrics
logs['lr'] = self._get_lr()
return logs
def validation_epoch(self, data_loader):
metric_acc = MetricAcc()
batch_metrics = ODict()
with torch.no_grad():
self.model.eval()
for batch, (data, target) in enumerate(data_loader):
data, target = data.to(self.device), target.to(self.device)
batch_size = data.shape[0]
output = self.model(data)
loss = self.loss(output, target)
batch_metrics['loss'] = loss.item()
for k, metric in self.metrics.items():
batch_metrics[k] = metric(output, target)
metric_acc.update(batch_metrics, batch_size)
logs = metric_acc.metrics
logs = ODict(('val_' + k, v) for k,v in logs.items())
return logs
def _default_loggers(self):
prog_log = ProgLogger(interval=10)
csv_log = CSVLogger(self.exp_path + '/train.log', append=self.cur_epoch>0)
return LoggerList([prog_log, csv_log])
def _get_lr(self):
for param_group in self.optimizer.param_groups:
return param_group['lr']
def checkpoint(self, logs=None):
checkpoint = {
'epoch': self.cur_epoch,
'model_state_dict': self.model.state_dict(),
'optimizer_state_dict': self.optimizer.state_dict(),
'loss_state_dict': self.loss.state_dict()
}
if self.lr_scheduler is not None:
checkpoint['lr_scheduler_state_dict'] = self.lr_scheduler.state_dict()
if logs is not None:
checkpoint['logs'] = logs
return checkpoint
def save_checkpoint(self, logs=None):
checkpoint = self.checkpoint(logs)
file_path = '%s/model_ep%04d.pth' % (self.exp_path, self.cur_epoch)
torch.save(checkpoint, file_path)
def load_checkpoint(self, file_path):
checkpoint = torch.load(file_path)
self.cur_epoch = checkpoint['epoch']
self.model.load_state_dict(checkpoint['model_state_dict'])
self.optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
self.loss.load_state_dict(checkpoint['loss_state_dict'])
if self.lr_scheduler is not None:
self.lr_scheduler.load_state_dict(checkpoint['lr_scheduler_state_dict'])
if 'logs' in checkpoint:
return checkpoint['logs']
return None
def load_last_checkpoint(self):
for epoch in xrange(self.epochs, 0, -1):
file_path = '%s/model_ep%04d.pth' % (self.exp_path, epoch)
if os.path.isfile(file_path):
return self.load_checkpoint(file_path)
return None
|
<reponame>jeffshek/pyvips<filename>pyvips/voperation.py
from __future__ import division, print_function
import logging
import pyvips
from pyvips import ffi, vips_lib, Error, _to_bytes, _to_string, GValue, \
type_map, type_from_name, nickname_find
logger = logging.getLogger(__name__)
# values for VipsArgumentFlags
_REQUIRED = 1
_CONSTRUCT = 2
_SET_ONCE = 4
_SET_ALWAYS = 8
_INPUT = 16
_OUTPUT = 32
_DEPRECATED = 64
_MODIFY = 128
# for VipsOperationFlags
_OPERATION_DEPRECATED = 8
# search an array with a predicate, recursing into subarrays as we see them
# used to find the match_image for an operation
def _find_inside(pred, thing):
if pred(thing):
return thing
if isinstance(thing, list) or isinstance(thing, tuple):
for x in thing:
result = _find_inside(pred, x)
if result is not None:
return result
return None
class Operation(pyvips.VipsObject):
"""Call libvips operations.
This class wraps the libvips VipsOperation class.
"""
# cache nickname -> docstring here
_docstring_cache = {}
def __init__(self, pointer):
# logger.debug('Operation.__init__: pointer = %s', pointer)
super(Operation, self).__init__(pointer)
self.object = ffi.cast('VipsObject*', pointer)
@staticmethod
def new_from_name(operation_name):
vop = vips_lib.vips_operation_new(_to_bytes(operation_name))
if vop == ffi.NULL:
raise Error('no such operation {0}'.format(operation_name))
return Operation(vop)
def set(self, name, flags, match_image, value):
# if the object wants an image and we have a constant, _imageize it
#
# if the object wants an image array, _imageize any constants in the
# array
if match_image:
gtype = self.get_typeof(name)
if gtype == pyvips.GValue.image_type:
value = pyvips.Image._imageize(match_image, value)
elif gtype == pyvips.GValue.array_image_type:
value = [pyvips.Image._imageize(match_image, x)
for x in value]
# MODIFY args need to be copied before they are set
if (flags & _MODIFY) != 0:
# logger.debug('copying MODIFY arg %s', name)
# make sure we have a unique copy
value = value.copy().copy_memory()
super(Operation, self).set(name, value)
def get_flags(self):
return vips_lib.vips_operation_get_flags(self.pointer)
# this is slow ... call as little as possible
def get_args(self):
args = []
def add_construct(self, pspec, argument_class,
argument_instance, a, b):
flags = argument_class.flags
if (flags & _CONSTRUCT) != 0:
name = _to_string(pspec.name)
# libvips uses '-' to separate parts of arg names, but we
# need '_' for Python
name = name.replace('-', '_')
args.append([name, flags])
return ffi.NULL
cb = ffi.callback('VipsArgumentMapFn', add_construct)
vips_lib.vips_argument_map(self.object, cb, ffi.NULL, ffi.NULL)
return args
@staticmethod
def call(operation_name, *args, **kwargs):
"""Call a libvips operation.
Use this method to call any libvips operation. For example::
black_image = pyvips.Operation.call('black', 10, 10)
See the Introduction for notes on how this works.
"""
logger.debug('VipsOperation.call: operation_name = %s', operation_name)
# logger.debug('VipsOperation.call: args = %s, kwargs =%s',
# args, kwargs)
# pull out the special string_options kwarg
string_options = kwargs.pop('string_options', '')
logger.debug('VipsOperation.call: string_options = %s', string_options)
op = Operation.new_from_name(operation_name)
arguments = op.get_args()
# logger.debug('arguments = %s', arguments)
# make a thing to quickly get flags from an arg name
flags_from_name = {}
# count required input args
n_required = 0
for name, flags in arguments:
flags_from_name[name] = flags
if ((flags & _INPUT) != 0 and
(flags & _REQUIRED) != 0 and
(flags & _DEPRECATED) == 0):
n_required += 1
if n_required != len(args):
raise Error('unable to call {0}: {1} arguments given, '
'but {2} required'.format(operation_name, len(args),
n_required))
# the first image argument is the thing we expand constants to
# match ... look inside tables for images, since we may be passing
# an array of image as a single param
match_image = _find_inside(lambda x:
isinstance(x, pyvips.Image),
args)
logger.debug('VipsOperation.call: match_image = %s', match_image)
# set any string options before any args so they can't be
# overridden
if not op.set_string(string_options):
raise Error('unable to call {0}'.format(operation_name))
# set required and optional args
n = 0
for name, flags in arguments:
if ((flags & _INPUT) != 0 and
(flags & _REQUIRED) != 0 and
(flags & _DEPRECATED) == 0):
op.set(name, flags, match_image, args[n])
n += 1
for name, value in kwargs.items():
if name not in flags_from_name:
raise Error('{0} does not support argument '
'{1}'.format(operation_name, name))
op.set(name, flags_from_name[name], match_image, value)
# build operation
vop = vips_lib.vips_cache_operation_build(op.pointer)
if vop == ffi.NULL:
raise Error('unable to call {0}'.format(operation_name))
op = Operation(vop)
# fetch required output args, plus modified input images
result = []
for name, flags in arguments:
if ((flags & _OUTPUT) != 0 and
(flags & _REQUIRED) != 0 and
(flags & _DEPRECATED) == 0):
result.append(op.get(name))
if (flags & _INPUT) != 0 and (flags & _MODIFY) != 0:
result.append(op.get(name))
# fetch optional output args
opts = {}
for name, value in kwargs.items():
flags = flags_from_name[name]
if ((flags & _OUTPUT) != 0 and
(flags & _REQUIRED) == 0 and
(flags & _DEPRECATED) == 0):
opts[name] = op.get(name)
vips_lib.vips_object_unref_outputs(op.object)
if len(opts) > 0:
result.append(opts)
if len(result) == 0:
result = None
elif len(result) == 1:
result = result[0]
logger.debug('VipsOperation.call: result = %s', result)
return result
@staticmethod
def generate_docstring(operation_name):
"""Make a google-style docstring.
This is used to generate help() output.
"""
if operation_name in Operation._docstring_cache:
return Operation._docstring_cache[operation_name]
op = Operation.new_from_name(operation_name)
if (op.get_flags() & _OPERATION_DEPRECATED) != 0:
raise Error('No such operator.',
'operator "{0}" is deprecated'.format(operation_name))
# we are only interested in non-deprecated args
args = [[name, flags] for name, flags in op.get_args()
if not flags & _DEPRECATED]
# find the first required input image arg, if any ... that will be self
member_x = None
for name, flags in args:
if ((flags & _INPUT) != 0 and
(flags & _REQUIRED) != 0 and
op.get_typeof(name) == GValue.image_type):
member_x = name
break
required_input = [name for name, flags in args
if (flags & _INPUT) != 0 and
(flags & _REQUIRED) != 0 and
name != member_x]
optional_input = [name for name, flags in args
if (flags & _INPUT) != 0 and
(flags & _REQUIRED) == 0]
required_output = [name for name, flags in args
if ((flags & _OUTPUT) != 0 and
(flags & _REQUIRED) != 0) or
((flags & _INPUT) != 0 and
(flags & _REQUIRED) != 0 and
(flags & _MODIFY) != 0)]
optional_output = [name for name, flags in args
if (flags & _OUTPUT) != 0 and
(flags & _REQUIRED) == 0]
description = op.get_description()
result = description[0].upper() + description[1:] + ".\n\n"
result += "Example:\n"
result += " " + ", ".join(required_output) + " = "
if member_x is not None:
result += member_x + "." + operation_name + "("
else:
result += "pyvips.Image." + operation_name + "("
result += ", ".join(required_input)
if len(optional_input) > 0 and len(required_input) > 0:
result += ", "
result += ", ".join([x + " = " +
GValue.gtype_to_python(op.get_typeof(x))
for x in optional_input])
result += ")\n"
def argstr(name):
return (u' {0} ({1}): {2}\n'.
format(name,
GValue.gtype_to_python(op.get_typeof(name)),
op.get_blurb(name)))
result += "\nReturns:\n"
for name in required_output:
result += argstr(name)
names = []
if member_x is not None:
names += [member_x]
names += required_input
result += "\nArgs:\n"
for name in names:
result += argstr(name)
if len(optional_input) > 0:
result += "\nKeyword args:\n"
for name in optional_input:
result += argstr(name)
if len(optional_output) > 0:
result += "\nOther Parameters:\n"
for name in optional_output:
result += argstr(name)
result += "\nRaises:\n :class:`.Error`\n"
# add to cache to save building again
Operation._docstring_cache[operation_name] = result
return result
@staticmethod
def generate_sphinx(operation_name):
"""Make a sphinx-style docstring.
This is used to generate the off-line docs.
"""
op = Operation.new_from_name(operation_name)
if (op.get_flags() & _OPERATION_DEPRECATED) != 0:
raise Error('No such operator.',
'operator "{0}" is deprecated'.format(operation_name))
# we are only interested in non-deprecated args
args = [[name, flags] for name, flags in op.get_args()
if not flags & _DEPRECATED]
# find the first required input image arg, if any ... that will be self
member_x = None
for name, flags in args:
if ((flags & _INPUT) != 0 and
(flags & _REQUIRED) != 0 and
op.get_typeof(name) == GValue.image_type):
member_x = name
break
required_input = [name for name, flags in args
if (flags & _INPUT) != 0 and
(flags & _REQUIRED) != 0 and
name != member_x]
optional_input = [name for name, flags in args
if (flags & _INPUT) != 0 and
(flags & _REQUIRED) == 0]
required_output = [name for name, flags in args
if ((flags & _OUTPUT) != 0 and
(flags & _REQUIRED) != 0) or
((flags & _INPUT) != 0 and
(flags & _REQUIRED) != 0 and
(flags & _MODIFY) != 0)]
optional_output = [name for name, flags in args
if (flags & _OUTPUT) != 0 and
(flags & _REQUIRED) == 0]
if member_x is not None:
result = '.. method:: '
else:
result = '.. staticmethod:: '
args = []
args += required_input
args += [x + ' = ' + GValue.gtype_to_python(op.get_typeof(x))
for x in optional_input]
args += [x + ' = bool'
for x in optional_output]
result += operation_name + '(' + ", ".join(args) + ')\n\n'
description = op.get_description()
result += description[0].upper() + description[1:] + '.\n\n'
result += 'Example:\n'
result += ' ' + ', '.join(required_output) + ' = '
if member_x is not None:
result += member_x + "." + operation_name + '('
else:
result += 'pyvips.Image.' + operation_name + '('
result += ', '.join(required_input)
if len(optional_input) > 0 and len(required_input) > 0:
result += ', '
result += ', '.join([x + ' = ' +
GValue.gtype_to_python(op.get_typeof(x))
for x in optional_input])
result += ')\n\n'
for name in required_input + optional_input:
result += (':param {0} {1}: {2}\n'.
format(GValue.gtype_to_python(op.get_typeof(name)),
name,
op.get_blurb(name)))
for name in optional_output:
result += (':param bool {0}: enable output: {1}\n'.
format(name,
op.get_blurb(name)))
output_types = [GValue.gtype_to_python(op.get_typeof(name))
for name in required_output]
if len(output_types) == 1:
output_type = output_types[0]
else:
output_type = 'list[' + ', '.join(output_types) + ']'
if len(optional_output) > 0:
output_types += ['Dict[str, mixed]']
output_type += ' or list[' + ', '.join(output_types) + ']'
result += ':rtype: ' + output_type + '\n'
result += ':raises Error:\n'
return result
@staticmethod
def generate_sphinx_all():
"""Generate sphinx documentation.
This generates a .rst file for all auto-generated image methods. Use it
to regenerate the docs with something like::
$ python -c \
"import pyvips; pyvips.Operation.generate_sphinx_all()" > x
And copy-paste the file contents into doc/vimage.rst in the appropriate
place.
"""
# generate list of all nicknames we can generate docstrings for
all_nicknames = []
def add_nickname(gtype, a, b):
nickname = nickname_find(gtype)
try:
Operation.generate_sphinx(nickname)
all_nicknames.append(nickname)
except Error:
pass
type_map(gtype, add_nickname)
return ffi.NULL
type_map(type_from_name('VipsOperation'), add_nickname)
all_nicknames.sort()
# remove operations we have to wrap by hand
exclude = ['scale', 'ifthenelse', 'bandjoin', 'bandrank']
all_nicknames = [x for x in all_nicknames if x not in exclude]
# Output summary table
print('.. class:: pyvips.Image\n')
print(' .. rubric:: Methods\n')
print(' .. autosummary::')
print(' :nosignatures:\n')
for nickname in all_nicknames:
print(' ~{0}'.format(nickname))
print()
# Output docs
print()
for nickname in all_nicknames:
docstr = Operation.generate_sphinx(nickname)
docstr = docstr.replace('\n', '\n ')
print(' ' + docstr)
def cache_set_max(mx):
"""Set the maximum number of operations libvips will cache."""
vips_lib.vips_cache_set_max(mx)
def cache_set_max_mem(mx):
"""Limit the operation cache by memory use."""
vips_lib.vips_cache_set_max_mem(mx)
def cache_set_max_files(mx):
"""Limit the operation cache by number of open files."""
vips_lib.vips_cache_set_max_files(mx)
def cache_set_trace(trace):
"""Turn on libvips cache tracing."""
vips_lib.vips_cache_set_trace(trace)
__all__ = [
'Operation',
'cache_set_max', 'cache_set_max_mem', 'cache_set_max_files',
'cache_set_trace'
]
|
"""
Tests for the get_string* functions of the ApplicationProperties class
"""
from application_properties import ApplicationProperties
def test_properties_get_string_with_found_value():
"""
Test fetching a configuration value that is present and string.
"""
# Arrange
config_map = {"property": "me"}
application_properties = ApplicationProperties()
application_properties.load_from_dict(config_map)
expected_value = "me"
# Act
actual_value = application_properties.get_string_property("property", "")
# Assert
assert expected_value == actual_value
def test_properties_get_string_with_found_value_but_wrong_type():
"""
Test fetching a configuration value that is present and not string.
"""
# Arrange
config_map = {"property": True}
application_properties = ApplicationProperties()
application_properties.load_from_dict(config_map)
expected_value = ""
# Act
actual_value = application_properties.get_string_property("property", "")
# Assert
assert expected_value == actual_value
def test_properties_get_string_with_not_found_value():
"""
Test fetching a configuration value that is not present and string.
"""
# Arrange
config_map = {"property": "2"}
application_properties = ApplicationProperties()
application_properties.load_from_dict(config_map)
expected_value = "3"
# Act
actual_value = application_properties.get_string_property("other_property", "3")
# Assert
assert expected_value == actual_value
def test_properties_get_string_with_not_found_value_and_no_default_value():
"""
Test fetching a configuration value that is not present, with no default, and integer.
"""
# Arrange
config_map = {"property": "2"}
application_properties = ApplicationProperties()
application_properties.load_from_dict(config_map)
# Act
actual_value = application_properties.get_string_property("other_property")
# Assert
assert actual_value is None
def test_properties_get_string_with_found_value_validated():
"""
Test fetching a configuration value that is present and adheres to the validation function.
"""
# Arrange
config_map = {"property": "2"}
application_properties = ApplicationProperties()
application_properties.load_from_dict(config_map)
expected_value = "2"
# Act
actual_value = application_properties.get_string_property(
"property", "-", lambda property_value: property_value in ["1", "2"]
)
# Assert
assert expected_value == actual_value
def __sample_string_validation_function(property_value):
"""
Simple string validation that throws an error if not "1" or "2".
"""
if property_value not in ["1", "2"]:
raise ValueError("Value '" + str(property_value) + "' is not '1' or '2'")
def test_properties_get_string_with_found_value_not_validated():
"""
Test fetching a configuration value that is present and does not adhere to the validation function.
"""
# Arrange
config_map = {"property": "3"}
application_properties = ApplicationProperties()
application_properties.load_from_dict(config_map)
expected_value = "-"
# Act
actual_value = application_properties.get_string_property(
"property", "-", __sample_string_validation_function
)
# Assert
assert expected_value == actual_value
def bad_validation_function(property_value):
"""
Test validation function that always throws an exception.
"""
raise Exception("huh? " + str(property_value))
def test_properties_get_string_with_found_value_validation_raises_error():
"""
Test fetching a configuration value that is present and the validation function raises an error.
"""
# Arrange
config_map = {"property": "1"}
application_properties = ApplicationProperties()
application_properties.load_from_dict(config_map)
expected_value = "-"
# Act
actual_value = application_properties.get_string_property(
"property", "-", bad_validation_function
)
# Assert
assert expected_value == actual_value
def test_properties_get_string_with_a_bad_property_name():
"""
Test fetching a configuration value with a bad property name.
"""
# Arrange
config_map = {"property": True}
application_properties = ApplicationProperties()
application_properties.load_from_dict(config_map)
# Act
raised_exception = None
try:
application_properties.get_string_property(1, "3")
assert False, "Should have raised an exception by now."
except ValueError as this_exception:
raised_exception = this_exception
# Assert
assert raised_exception, "Expected exception was not raised."
assert (
str(raised_exception) == "The propertyName argument must be a string."
), "Expected message was not present in exception."
def test_properties_get_string_with_a_bad_default():
"""
Test fetching a configuration value with a default value that is not a string.
"""
# Arrange
config_map = {"property": "2"}
application_properties = ApplicationProperties()
application_properties.load_from_dict(config_map)
# Act
raised_exception = None
try:
application_properties.get_string_property("property", True)
assert False, "Should have raised an exception by now."
except ValueError as this_exception:
raised_exception = this_exception
# Assert
assert raised_exception, "Expected exception was not raised."
assert (
str(raised_exception)
== "The default value for property 'property' must either be None or a 'str' value."
), "Expected message was not present in exception."
|
<gh_stars>0
import dask.dataframe as dd
"""
Loads a table file as generated by TPC-H's dbgen.
Returns an uncomputed dataframe - user must persist if desired.
`path` can be a single path or a glob path, and can be local or an S3 url.
https://docs.dask.org/en/latest/dataframe-api.html#dask.dataframe.read_table
"""
def load_part(path):
print(path)
part_df = dd.read_table(
path,
sep='|',
names=[
'p_partkey',
'p_name',
'p_mfgr',
'p_brand',
'p_type',
'p_size',
'p_container',
'p_retailprice',
'p_comment',
'(empty)',
],
)
return part_df
def load_supplier(path):
supplier_df = dd.read_table(
path,
sep='|',
names=[
's_suppkey',
's_name',
's_address',
's_nationkey',
's_phone',
's_acctbal',
's_comment',
'(empty)',
],
)
return supplier_df
def load_partsupp(path):
partsupp_df = dd.read_csv(
path,
sep = '|',
names = [
'ps_partkey',
'ps_suppkey',
'ps_availqty',
'ps_supplycost',
'ps_comment',
'(empty)',
],
)
return partsupp_df
def load_customer(path):
customer_df = dd.read_table(
path,
sep='|',
names=[
'c_custkey',
'c_name',
'c_address',
'c_nationkey',
'c_phone',
'c_acctbal',
'c_mktsegment',
'c_comment',
'(empty)',
],
)
return customer_df
def load_orders(path):
orders_df = dd.read_table(
path,
sep='|',
names=[
'o_orderkey',
'o_custkey',
'o_orderstatus',
'o_totalprice',
'o_orderdate',
'o_orderpriority',
'o_clerk',
'o_shippriority',
'o_comment',
'(empty)',
],
)
return orders_df
def load_lineitem(path):
lineitem_df = dd.read_table(
path,
sep='|',
names=[
'l_orderkey',
'l_partkey',
'l_suppkey',
'l_linenumber',
'l_quantity',
'l_extendedprice',
'l_discount',
'l_tax',
'l_returnflag',
'l_linestatus',
'l_shipdate',
'l_commitdate',
'l_receiptdate',
'l_shipinstruct',
'l_shipmode',
'l_comment',
'(empty)',
],
# blocksize= 16 * 1024 * 1024, # 64 MB?
# parse_dates = [
# 'l_shipdate',
# 'l_commitdate',
# 'l_receiptdate',
# ],
# infer_datetime_format=True,
)
return lineitem_df
def load_nation(path):
nation_df = dd.read_table(
path,
sep='|',
names=[
'n_nationkey',
'n_name',
'n_regionkey',
'n_comment',
'(empty)',
],
)
return nation_df
def load_region(path):
region_df = dd.read_table(
path,
sep='|',
names=[
'r_regionkey',
'r_name',
'r_comment',
'(empty)',
],
)
return region_df
# A helper for loading by tablename string.
loader = {
'PART': load_part,
'SUPPLIER': load_supplier,
'PARTSUPP': load_partsupp,
'CUSTOMER': load_customer,
'ORDERS': load_orders,
'LINEITEM': load_lineitem,
'NATION': load_nation,
'REGION': load_region,
}
|
<reponame>qaz734913414/nniefacelib
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
######################################################
#
# pfld.py -
# written by zhaozhichao and Hanson
#
######################################################
import torch
import torch.nn as nn
import math
import torch.nn.init as init
def conv_bn(inp, oup, kernel, stride, padding=1):
return nn.Sequential(
nn.Conv2d(inp, oup, kernel, stride, padding, bias=False),
nn.BatchNorm2d(oup),
nn.ReLU(inplace=True))
def conv_1x1_bn(inp, oup):
return nn.Sequential(
nn.Conv2d(inp, oup, 1, 1, 0, bias=False),
nn.BatchNorm2d(oup),
nn.ReLU(inplace=True))
class InvertedResidual(nn.Module):
def __init__(self, inp, oup, stride, use_res_connect, expand_ratio=6):
super(InvertedResidual, self).__init__()
self.stride = stride
assert stride in [1, 2]
self.use_res_connect = use_res_connect
self.conv = nn.Sequential(
nn.Conv2d(inp, inp * expand_ratio, 1, 1, 0, bias=False),
nn.BatchNorm2d(inp * expand_ratio),
nn.ReLU(inplace=True),
nn.Conv2d(
inp * expand_ratio,
inp * expand_ratio,
3,
stride,
1,
groups=inp * expand_ratio,
bias=False),
nn.BatchNorm2d(inp * expand_ratio),
nn.ReLU(inplace=True),
nn.Conv2d(inp * expand_ratio, oup, 1, 1, 0, bias=False),
nn.BatchNorm2d(oup),
)
def forward(self, x):
if self.use_res_connect:
return x + self.conv(x)
else:
return self.conv(x)
class PFLDInference(nn.Module):
def __init__(self):
super(PFLDInference, self).__init__()
self.conv1 = nn.Conv2d(
3, 64, kernel_size=3, stride=2, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(
64, 64, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.conv3_1 = InvertedResidual(64, 64, 2, False, 2)
self.block3_2 = InvertedResidual(64, 64, 1, True, 2)
self.block3_3 = InvertedResidual(64, 64, 1, True, 2)
self.block3_4 = InvertedResidual(64, 64, 1, True, 2)
self.block3_5 = InvertedResidual(64, 64, 1, True, 2)
self.conv4_1 = InvertedResidual(64, 128, 2, False, 2)
self.conv5_1 = InvertedResidual(128, 128, 1, False, 4)
self.block5_2 = InvertedResidual(128, 128, 1, True, 4)
self.block5_3 = InvertedResidual(128, 128, 1, True, 4)
self.block5_4 = InvertedResidual(128, 128, 1, True, 4)
self.block5_5 = InvertedResidual(128, 128, 1, True, 4)
self.block5_6 = InvertedResidual(128, 128, 1, True, 4)
self.conv6_1 = InvertedResidual(128, 16, 1, False, 2) # [16, 14, 14]
self.conv7 = conv_bn(16, 32, 3, 2) # [32, 7, 7]
self.conv8 = nn.Conv2d(32, 128, 7, 1, 0) # [128, 1, 1]
self.bn8 = nn.BatchNorm2d(128)
self.avg_pool1 = nn.AvgPool2d(14)
self.avg_pool2 = nn.AvgPool2d(7)
self.fc = nn.Linear(176, 196)
self.fc_aux = nn.Linear(176, 3)
self.conv1_aux = conv_bn(64, 128, 3, 2)
self.conv2_aux = conv_bn(128, 128, 3, 1)
self.conv3_aux = conv_bn(128, 32, 3, 2)
self.conv4_aux = conv_bn(32, 128, 7, 1)
self.max_pool1_aux = nn.MaxPool2d(3)
self.fc1_aux = nn.Linear(128, 32)
self.fc2_aux = nn.Linear(32 + 176, 3)
def forward(self, x): # x: 3, 112, 112
x = self.relu(self.bn1(self.conv1(x))) # [64, 56, 56]
x = self.relu(self.bn2(self.conv2(x))) # [64, 56, 56]
x = self.conv3_1(x)
x = self.block3_2(x)
x = self.block3_3(x)
x = self.block3_4(x)
out1 = self.block3_5(x)
x = self.conv4_1(out1)
x = self.conv5_1(x)
x = self.block5_2(x)
x = self.block5_3(x)
x = self.block5_4(x)
x = self.block5_5(x)
x = self.block5_6(x)
x = self.conv6_1(x)
x1 = self.avg_pool1(x)
x1 = x1.view(x1.size(0), -1)
x = self.conv7(x)
x2 = self.avg_pool2(x)
x2 = x2.view(x2.size(0), -1)
x3 = self.relu(self.conv8(x))
x3 = x3.view(x1.size(0), -1)
multi_scale = torch.cat([x1, x2, x3], 1)
landmarks = self.fc(multi_scale)
aux = self.conv1_aux(out1)
aux = self.conv2_aux(aux)
aux = self.conv3_aux(aux)
aux = self.conv4_aux(aux)
aux = self.max_pool1_aux(aux)
aux = aux.view(aux.size(0), -1)
aux = self.fc1_aux(aux)
aux = torch.cat([aux, multi_scale], 1)
pose = self.fc2_aux(aux)
return pose, landmarks
if __name__ == '__main__':
input = torch.randn(1, 3, 112, 112)
plfd_backbone = PFLDInference()
angle, landmarks = plfd_backbone(input)
print(plfd_backbone)
print("angle.shape:{0:}, landmarks.shape: {1:}".format(
angle.shape, landmarks.shape))
|
<reponame>ashis-Nayak-13/airbyte
#
# MIT License
#
# Copyright (c) 2020 Airbyte
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import re
from abc import ABC, abstractmethod
from typing import Any, Iterable, List, Mapping, MutableMapping, Optional, Tuple
from urllib import parse
import pendulum
import requests
from airbyte_cdk.sources import AbstractSource
from airbyte_cdk.sources.streams import Stream
from airbyte_cdk.sources.streams.http import HttpStream
from airbyte_cdk.sources.streams.http.auth import TokenAuthenticator
class OktaStream(HttpStream, ABC):
page_size = 200
def __init__(self, url_base: str, *args, **kwargs):
super().__init__(*args, **kwargs)
# Inject custom url base to the stream
self._url_base = url_base
@property
def url_base(self) -> str:
return self._url_base
def next_page_token(self, response: requests.Response) -> Optional[Mapping[str, Any]]:
# Follow the next page cursor
# https://developer.okta.com/docs/reference/api-overview/#pagination
link_regex = r'<(.+?)>; rel="(.+?)"[,\s]*'
raw_links = response.headers["link"]
for link, cursor_type in re.findall(link_regex, raw_links):
if cursor_type == "next":
parsed_link = parse.urlparse(link)
query_params = dict(parse.parse_qsl(parsed_link.query))
return query_params
return None
def request_params(
self,
stream_state: Mapping[str, Any],
stream_slice: Mapping[str, any] = None,
next_page_token: Mapping[str, Any] = None,
) -> MutableMapping[str, Any]:
return {
"limit": self.page_size,
**(next_page_token or {}),
}
def parse_response(
self,
response: requests.Response,
**kwargs,
) -> Iterable[Mapping]:
yield from response.json()
def backoff_time(self, response: requests.Response) -> Optional[float]:
# The rate limit resets on the timestamp indicated
# https://developer.okta.com/docs/reference/rate-limits
if response.status_code == requests.codes.TOO_MANY_REQUESTS:
next_reset_epoch = int(response.headers["x-rate-limit-reset"])
next_reset = pendulum.from_timestamp(next_reset_epoch)
next_reset_duration = pendulum.utcnow().diff(next_reset)
return next_reset_duration.seconds
class IncrementalOktaStream(OktaStream, ABC):
@property
@abstractmethod
def cursor_field(self) -> str:
pass
def get_updated_state(self, current_stream_state: MutableMapping[str, Any], latest_record: Mapping[str, Any]) -> Mapping[str, Any]:
lowest_date = str(pendulum.datetime.min)
return {
self.cursor_field: max(
latest_record.get(self.cursor_field, lowest_date),
current_stream_state.get(self.cursor_field, lowest_date),
)
}
def request_params(self, stream_state=None, **kwargs):
stream_state = stream_state or {}
params = super().request_params(stream_state=stream_state, **kwargs)
latest_entry = stream_state.get(self.cursor_field)
if latest_entry:
params["filter"] = f"{self.cursor_field} gt {latest_entry}"
return params
class Groups(IncrementalOktaStream):
cursor_field = "lastUpdated"
primary_key = "id"
def path(self, **kwargs) -> str:
return "groups"
class Logs(IncrementalOktaStream):
cursor_field = "published"
primary_key = "uuid"
def path(self, **kwargs) -> str:
return "logs"
class Users(IncrementalOktaStream):
cursor_field = "lastUpdated"
primary_key = "id"
def path(self, **kwargs) -> str:
return "users"
class SourceOkta(AbstractSource):
def initialize_authenticator(self, config: Mapping[str, Any]) -> TokenAuthenticator:
return TokenAuthenticator(config["token"], auth_method="SSWS")
def get_url_base(self, config: Mapping[str, Any]) -> str:
return parse.urljoin(config["base_url"], "/api/v1/")
def check_connection(self, logger, config) -> Tuple[bool, any]:
try:
auth = self.initialize_authenticator(config)
base_url = self.get_url_base(config)
url = parse.urljoin(base_url, "users")
response = requests.get(
url,
params={"limit": 1},
headers=auth.get_auth_header(),
)
if response.status_code == requests.codes.ok:
return True, None
return False, response.json()
except Exception:
return False, "Failed to authenticate with the provided credentials"
def streams(self, config: Mapping[str, Any]) -> List[Stream]:
auth = self.initialize_authenticator(config)
url_base = self.get_url_base(config)
initialization_params = {
"authenticator": auth,
"url_base": url_base,
}
return [
Groups(**initialization_params),
Logs(**initialization_params),
Users(**initialization_params),
]
|
#!/usr/bin/env python
# MIT License
#
# Copyright (c) 2018 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
import re
import yaml
import shutil
import argparse
import subprocess
import requests
def sanitizeStrList(strlist):
if (not isinstance(strlist, list)): return [ str(strlist) ]
mylist = []
for element in strlist: mylist.append(str(element))
return mylist
def expandPath(path):
expanded = os.path.expandvars(path)
expanded = os.path.expanduser(expanded)
return expanded
def getUrlFileName(url):
path = re.sub("^https?\:\/\/","",url,1, re.IGNORECASE)
path = re.sub("^[^\/]*","",path,1)
splitted = path.split("/")
filename = splitted[-1]
if filename: return filename
return "noname"
def parseDownload(download):
splittedDownload = download.strip().split(" ",1)
url = splittedDownload[0]
if len(splittedDownload) > 1: destfile = expandPath(splittedDownload[1].strip())
else: destfile = getUrlFileName(url)
if os.path.basename(destfile) == "": destfile = "{}{}".format(destfile,getUrlFileName(url))
return url,destfile
class Recipe(object):
def __init__(self, recipeDict):
if not isinstance(recipeDict, dict): raise ValueError("Invalid recipe")
self.name = recipeDict.get("name","")
if (self.name == ""): raise ValueError("Invalid name")
self.version = str(recipeDict.get("version","0"))
self.dependencies = sanitizeStrList( recipeDict.get("dependencies", []) )
self.install = sanitizeStrList( recipeDict.get("install", []) )
self.check = sanitizeStrList( recipeDict.get("check", []) )
self.download = sanitizeStrList( recipeDict.get("download", []) )
@staticmethod
def loadFile(filePath):
with open(filePath) as recipeFile:
yamlRecipe = yaml.safe_load(recipeFile)
return Recipe(yamlRecipe)
class FailedCommandError(RuntimeError):
pass
class FailedRecipeError(RuntimeError):
pass
def setEnv(cmdconfig, recipe):
os.environ["NAME"] = recipe.name
os.environ["PKGNAME"] = recipe.name
os.environ["VERSION"] = recipe.version
os.environ["PREFIX"] = cmdconfig.prefix
os.environ["SRCDIR"] = cmdconfig.srcDir
os.environ["BINDIR"] = cmdconfig.binDir
os.environ["LIBDIR"] = cmdconfig.libDir
os.environ["INCDIR"] = cmdconfig.incDir
os.environ["PKGDIR"] = cmdconfig.pkgDir
os.environ["TMPDIR"] = cmdconfig.tmpDir
pathVar = os.getenv("PATH","")
if cmdconfig.binDir not in pathVar.split(":"): os.environ["PATH"] = "{}:{}".format(cmdconfig.binDir,pathVar)
def createDir(directory):
if not os.path.exists(directory): os.makedirs(directory)
def ensureDirs(cmdconfig):
createDir(cmdconfig.prefix)
createDir(cmdconfig.srcDir)
createDir(cmdconfig.binDir)
createDir(cmdconfig.libDir)
createDir(cmdconfig.incDir)
createDir(cmdconfig.tmpDir)
createDir(cmdconfig.pkgDir)
def retrieveUrl(url, filename):
response = requests.get(url, stream=True, headers={"Accept-Encoding": "identity"})
if response.status_code != 200: raise RuntimeError("Server returned code '{}'".format(response.status_code))
with open(filename,"wb") as f: shutil.copyfileobj(response.raw,f)
def loadRemoteRecipe(url, cacheDir):
destdir = "{}/{}".format(cacheDir, re.sub("^(https?\:\/\/)","",url,1))
recipeFile = "{}/recipe.yaml".format(destdir)
createDir(destdir)
retrieveUrl(url, recipeFile)
return Recipe.loadFile(recipeFile)
def run(commands):
for command in sanitizeStrList(commands):
print command
result = subprocess.call(command, shell=True)
if (result != 0): raise FailedCommandError("Command '{}' returned with code '{}'".format(command, result))
def installDeps(dependencies, config):
for dependency in dependencies:
print "Installing dependency '{}'".format(dependency)
result = subprocess.call([config.exe, "--prefix", config.prefix, dependency])
if result != 0: raise FailedRecipeError("Failed to run dependency recipe '{}'".format(dependency))
print "Dependency '{}' installed successfully".format(dependency)
def runRecipe(recipe):
try:
print "Checking for '{}' '{}'".format(recipe.name, recipe.version)
run(recipe.check)
print "'{}' '{}' is already installed".format(recipe.name, recipe.version)
return
except (FailedCommandError) as e:
print "'{}' '{}' is not installed".format(recipe.name, recipe.version)
print "Installing '{}' '{}'".format(recipe.name, recipe.version)
try:
run(recipe.install)
print "'{}' '{}' installed successfully".format(recipe.name, recipe.version)
return
except (FailedCommandError) as e:
print "Failed to install '{}' '{}'".format(recipe.name, recipe.version)
print e
raise FailedRecipeError("Recipe for '{}' '{}' failed".format(recipe.name, recipe.version))
class CmdConfig(object):
DEFAULT_PREFIX = "{}/build".format(os.getcwd())
def __init__(self, recipeFile, prefix=DEFAULT_PREFIX):
self.recipeFile = recipeFile
self.prefix = prefix
self.srcDir = "{}/src".format(self.prefix)
self.binDir = "{}/bin".format(self.prefix)
self.libDir = "{}/lib".format(self.prefix)
self.incDir = "{}/include".format(self.prefix)
self.tmpDir = "/tmp/deptool"
self.cacheDir = "{}/var/cache/deptool".format(self.prefix)
self.pkgDir = None
self.cwd = os.getcwd()
self.exe = os.path.realpath(__file__)
def parseCmdConfig():
parser = argparse.ArgumentParser(description="Dependency resolving tool")
parser.add_argument("--prefix",dest="prefix",default=CmdConfig.DEFAULT_PREFIX,help="Sets the PREFIX environment variable that points to the directory containing all the installed files. Defaults to ${PWD}/build")
parser.add_argument("recipe",help="Recipe file's path")
args = parser.parse_args()
return CmdConfig(args.recipe, args.prefix)
if __name__ == "__main__":
cmdconfig = parseCmdConfig()
if re.match("^https?\:\/\/.*",cmdconfig.recipeFile): recipe = loadRemoteRecipe(cmdconfig.recipeFile, cmdconfig.cacheDir)
else: recipe = Recipe.loadFile(cmdconfig.recipeFile)
if not cmdconfig.pkgDir: cmdconfig.pkgDir = "{}/{}/{}".format(cmdconfig.srcDir, recipe.name, recipe.version)
setEnv(cmdconfig, recipe)
ensureDirs(cmdconfig)
os.chdir(cmdconfig.pkgDir)
try:
print "Checking for '{}' '{}'".format(recipe.name, recipe.version)
run(recipe.check)
print "'{}' '{}' is already installed".format(recipe.name, recipe.version)
exit(0)
except (FailedCommandError) as e:
print "'{}' '{}' is not installed".format(recipe.name, recipe.version)
os.chdir(cmdconfig.cwd)
print "Installing dependencies for '{}' '{}'".format(recipe.name, recipe.version)
try:
installDeps(recipe.dependencies, cmdconfig)
except (FailedRecipeError) as e:
print e
print "Recipe for '{}' '{}' failed cause can't satisfy some dependencies".format(recipe.name, recipe.version)
exit(1)
os.chdir(cmdconfig.tmpDir)
for download in recipe.download:
url,destfile = parseDownload(download)
dirname = os.path.dirname(destfile)
if dirname != "": createDir(dirname)
if os.path.exists(destfile): print "File '{}' already downloaded into '{}'. Skipping.".format(url, destfile)
else: print "Downloading '{}' into '{}'".format(url, destfile)
retrieveUrl(url, destfile)
os.chdir(cmdconfig.pkgDir)
print "Installing '{}' '{}'".format(recipe.name, recipe.version)
try:
run(recipe.install)
print "'{}' '{}' installed successfully".format(recipe.name, recipe.version)
exit(0)
except (FailedCommandError) as e:
print "Failed to install '{}' '{}'".format(recipe.name, recipe.version)
print e
print "Recipe for '{}' '{}' failed".format(recipe.name, recipe.version)
exit(1)
|
"""
A script for rules-based entity recognition. Unused in final system, but made available for further development.
"""
from spacy import load
import re
from spacy.tokens import DocBin, Doc
from spacy.training.example import Example
from spacy.scorer import Scorer
from spacy.language import Language
from spacy.pipeline import EntityRuler
@Language.factory("custom_entity_ruler")
def create_custom_entity_ruler(nlp: Language, name: str):
return custom_entity_ruler(nlp)
class custom_entity_ruler:
def __init__(self, nlp):
#### set pattern variables to add to entity_ruler ####
# procedure patterns
with open("./datasets/procedure_suffix_regex.txt") as regex_input:
PROC_SFX = [{"LOWER": {"REGEX": regex_input.read()}}, {"TEXT": "-", "OP": "?"}]
G_DENOM = [{"LOWER": "group"},
{"TEXT": {"REGEX": "(([Aa]|[Bb]|([Oo]ne)|1|([Tt]wo)|2)(?!.))"}}] # group with denomination
A_DENOM = [{"LOWER": "arm"},
{"TEXT": {"REGEX": "(([Aa]|[Bb]|([Oo]ne)|1|([Tt]wo)|2)(?!.))"}}] # arm with denomination
ADJ_GROUP = [{"POS": "ADJ", "OP": "+"}, {"LOWER": "group"}] # adj + group
NOUN_GROUP = [{"POS": "NOUN", "OP": "+"}, {"LOWER": "group"}] # noun + group
ADJ_ARM = [{"POS": "ADJ", "OP": "+"}, {"LOWER": "arm"}] # adj + arm
NOUN_ARM = [{"POS": "NOUN", "OP": "+"}, {"LOWER": "arm"}] # noun + arm
COMBO = [{"LOWER": {"REGEX": "plus|\+|with"}}]
G_OR_A = [{"LOWER": {"REGEX": "((group|arm)(?!s))"}, "OP": "?"}]
# number patterns
NUM = [{"LIKE_NUM": True}]
NUM_BRACKET = [{"TEXT": "(", "OP": "?"}, {"TEXT": "[", "OP": "?"}, {"LIKE_NUM": True}, {"TEXT": ")", "OP": "?"},
{"TEXT": "]", "OP": "?"}]
NUM_OP = [{"LIKE_NUM": True, "OP": "?"}]
NONE = [{"LOWER": "none"}]
NO = [{"LOWER": "no"}]
# percentage patterns
PERC = [{"LIKE_NUM": True}, {"IS_SPACE": True, "OP": "?"}, {"TEXT": "%"}] # percentage alone
PERC_ABS = [{"OP": "?"}, {"IS_PUNCT": True, "OP": "?"}] + PERC + [
{"IS_PUNCT": True, "OP": "?"}] # percentage after absolute value
# interval patterns
IVAL_SEP = [{"LIKE_NUM": True}, {"TEXT": "+"}, {"TEXT": "/"}, {"TEXT": "-"},
{"LIkE_NUM": True}] # seperate plus minus signs
IVAL_COMB = [{"LIKE_NUM": True}, {"TEXT": "±"}, {"LIkE_NUM": True}] # combined plus minus signs
ONE_TOKEN_IVAL = [{"TEXT": {"REGEX": "(\d+\.?\d*±\d+\.?\d*)|(\d+\.?\d*\+\/\-\d+\.?\d*)"}}]
# range patterns
RANGE = [{"TEXT": {"REGEX": "(to)|[-]"}}]
# unit patterns
UNIT = [{"LOWER": "mm", "OP": "?"},
{"TEXT": {"REGEX": "(mm)?[Hh][Gg]|mg\/m[Ll]|mg|m[Ll]"}, "OP": "?"}]
# non-result patterns to ignore
TIME = NUM_OP + \
[{"TEXT": {"REGEX": "([Yy]ears?)|([Mm]onths?)|([Ww]eeks?)|([Dd]ays?)|([Hh]ours?)|([Mm]inutes?)"}}] + \
NUM_OP + [{"LOWER": "and", "OP": "?"}] + NUM_OP # time point
TIME_RANGE = NUM + RANGE + TIME # time range
PVAL = [{"LOWER": "p"}, {"TEXT": {"REGEX": "<|="}}] + NUM # pvalue
COMP_STAT = [{"TEXT": {"REGEX": "HR|OR"}}, {"IS_PUNCT": True, "OP": "?"},
{"IS_SPACE": True, "OP": "?"}, {"IS_PUNCT": True, "OP": "?"}] + NUM # comparative statistics
# pattern variables for entity_ruler in different combinations to create more complex rule-based matching
patterns = [{"label": "INTV", "pattern": PROC_SFX + COMBO + PROC_SFX + G_OR_A}, # named combination
{"label": "INTV", "pattern": PROC_SFX + PERC + G_OR_A},
# named treatment with concentration
{"label": "INTV", "pattern": PROC_SFX + G_OR_A}, # named treatment
{"label": "INTV", "pattern": G_DENOM}, # generic group with denomination e.g. a/b/1/2
{"label": "INTV", "pattern": A_DENOM}, # generic arm with denomination e.g. a/b/1/2
{"label": "INTV", "pattern": ADJ_GROUP}, # adjective phrase group
{"label": "INTV", "pattern": NOUN_GROUP}, # noun phrase group
{"label": "INTV", "pattern": ADJ_ARM}, # adjective phrase arm
{"label": "INTV", "pattern": NOUN_ARM}, # noun phrase arm
{"label": "COMSTAT", "pattern": COMP_STAT}, # ignore pattern
{"label": "PVAL", "pattern": PVAL}, # ignore pattern
{"label": "TIMR", "pattern": TIME_RANGE}, # ignore pattern
{"label": "TIME", "pattern": TIME}, # ignore pattern
{"label": "MEAS", "pattern":
ONE_TOKEN_IVAL + UNIT + RANGE + ONE_TOKEN_IVAL + UNIT}, # range result
{"label": "MEAS", "pattern": IVAL_SEP + UNIT + RANGE + IVAL_SEP + UNIT}, # range result
{"label": "MEAS", "pattern":
NUM + UNIT + RANGE + NUM + UNIT}, # range result
{"label": "MEAS", "pattern": ONE_TOKEN_IVAL + UNIT}, # interval result
{"label": "MEAS", "pattern": IVAL_SEP + UNIT}, # interval result
{"label": "MEAS", "pattern": IVAL_COMB + UNIT}, # interval result
{"label": "MEAS", "pattern": NUM + NUM_OP + UNIT + PERC_ABS}, # perc result
{"label": "MEAS", "pattern": PERC + NUM_OP}, # perc result
{"label": "MEAS", "pattern": PERC + NUM_BRACKET}, # perc result
{"label": "MEAS", "pattern": PERC}, # perc result
{"label": "MEAS", "pattern": NUM + NUM_OP + UNIT}, # absolute (with optional interval) result
{"label": "MEAS", "pattern": NONE}, # absolute number result
{"label": "MEAS", "pattern": NO}] # absolute number result
# Add patterns
self.ruler = EntityRuler(nlp, overwrite_ents=False)
self.ruler.add_patterns(patterns)
self.ruler.initialize(lambda: [], nlp=nlp, patterns=patterns)
def __call__(self, doc: Doc) -> Doc:
# Remove ignore patterns from doc when called -- these patterns are detected as useful for context
ents = [ent for ent in self.ruler(doc).ents if ent.label_ not in ["COMSTAT","PVAL","TIMR","TIME"]]
doc.ents = ents
return doc
# load pre-trained model for feature extraction such as pos tagging
#nlp = load("C:\\Users\\jetsu\\1. Degree stuff\\COMP0073 Summer project\\spacy_re\\training\\model-best")
#nlp.add_pipe("custom_entity_ruler")
# add domain specific special cases to tokeniser
#bslash_minus = [{"ORTH": "/"}, {"ORTH": "-"}]
#nlp.tokenizer.add_special_case("/-", bslash_minus)
#def ent_match(doc,patterns):
#ents = []
#for pattern in patterns:
#print(doc.text)
#print([toks for toks in doc])
#for match in re.finditer(pattern["regex"], doc.text):
# start, end = match.span()
# print(f"The matched text: '{doc.text[start:end]}'")
# span = doc.char_span(start, end, label=pattern["label"], alignment_mode="expand")
# if span != None:
# ents.append(span)
# print([(ent, ent.label_) for ent in ents])
# doc.ents = ents
# return [(ent, ent.label_) for ent in doc.ents]
#with open("C:\\Users\\jetsu\\1. Degree stuff\\COMP0073 Summer project\\spacy_re\pre-processing\\assets\\clean_sentences_2.txt") as input:
# for line in input:
# doc = nlp(line)
# print([(ent, ent.label_) for ent in doc.ents])
# print([(toks.text, toks.pos_) for toks in doc])
# print(doc)
#def create_examples(nlp, test_data):
#doc_bin = DocBin(store_user_data=True).from_disk(test_data)
#docs = doc_bin.get_docs(nlp.vocab)
examples = []
#for gold in docs:
# examples.append(Example(nlp(gold.text, disable="ner"), gold))
#for example in examples:
# print("\n\n",example.text)
# print("predicted-->",[(ent.text,ent.label_) for ent in example.predicted.ents])
# print("gold-->", [(ent.text, ent.label_) for ent in example.reference.ents])
# scorer = Scorer(nlp)
# scores = scorer.score_spans(examples, 'ents')
# print(scores)
#test_data = "../../data/test.spacy"
#create_examples(nlp, test_data)
#text = ("""IOPs at the 3 time points assessed during 15% the 12-week visit ranged from 17.4 to 18.6 mm Hg for PF tafluprost and 17.9 to 18.5 mm Hg for PF timolol""")
#@<EMAIL>.component("result_entity_basic_elements")
#def measure_elements(doc, nlp):
# ruler = EntityRuler(nlp)
#ruler.add_patterns(patterns)
#doc = ruler(doc)
#return doc
#print(ent_match(doc,patterns))
#def rule_matcher(input):
# for text in input:
# doc = nlp(text)
# doc = ruler_one(doc)
# print([(ent, ent.label_) for ent in doc.ents])
# print(doc)
# Construction from class
#ruler = EntityRuler(nlp, overwrite_ents=True)
#match = Matcher(nlp.vocab)
#num_rule.add_patterns([{"label": "result", "pattern": [{"LIKE_NUM": True}{"LIKE_NUM},{"TEXT":"to"},{"LIKE_NUM": True}]}])
#doc = nlp(text)
#print ([(ent, ent.label_) for ent in doc.ents])
|
# _*_ encoding: utf-8 _*_
from copy import copy
from django.template import Library, loader, Context
from django.contrib.admin.templatetags.admin_static import static
from django.utils.html import format_html
from django.utils.text import capfirst
from django.utils.encoding import force_text
from django.utils.translation import ugettext_lazy as _
from django.core.serializers import serialize
from django.utils.safestring import mark_safe
from django.db.models.query import QuerySet
from django.utils import simplejson
from django.core.urlresolvers import reverse
register = Library()
@register.simple_tag(takes_context=True)
def instance_card(context, instance=None):
new_context= copy(context)
if not instance:
instance = context['instance']
else:
new_context['instance'] = instance
templates = ['{0}/{1}/{1}_card.html'.format(instance._meta.app_label,instance._meta.module_name), 'card.html']
t = loader.select_template(templates)
return t.render(new_context)
def get_edit_url(instance):
try:
return reverse('%s_%s_edit_foto' % (instance._meta.app_label, instance._meta.module_name),
args=(instance.pk,))
except:
return None
def has_edit_perm(user, instance):
return user.has_perm('{}_{}_{}'.format(instance._meta.app_label, instance._meta.module_name,'edit_foto'), obj= instance)
@register.simple_tag(takes_context=True)
def instance_photo(context, size= 64, instance= None, edit_foto = False):
no_edit_image_format = u'<img width="{2}" src="{0}" alt="{1}" />'
edit_image_format = u"""
<div class="inst-image" >
<img width="{2}" src="{0}" alt="{1}" />
<div class="foto-edit">
<a href="{3}">
<i class="icon-edit"></i>{4}
</a>
</div>
</div>
"""
if not instance:
instance = context['instance']
# user = None
# if 'request' in context:
# user = context['request'].user
# if user and edit_foto and has_edit_perm(user, instance):
# image_format = edit_image_format
# edit_url = get_edit_url(instance)
# else:
image_format = no_edit_image_format
edit_url = None
if hasattr(instance,'foto'):
if instance.foto:
url = instance.foto.url
else:
url = static("img/empty.png")
else:
if instance.icon:
return format_html(u'<div class="inst-icon" ><i class="icon-4x {}"> </i></div>',instance.icon)
else:
url = static("img/school.png")
return format_html(image_format, url, force_text(instance),size,edit_url,_("Edit"))
@register.simple_tag (takes_context=True)
def get_module_name(context):
instance = context['instance']
return instance._meta.verbose_name
@register.filter
def display_string(name):
return ' '.join([capfirst(x) for x in name.split('_')])
@register.filter
def ellipsis(value, limit=10):
try:
limit = int(limit)
except ValueError:
return value
if not isinstance(value, unicode):
value = unicode(value)
if len(value) <= limit:
return value
try:
value = u'{}{}'.format(value[:limit],'...')
except Exception as e:
return e.args
return value
@register.filter
def cap_letters(value):
return ''.join(c for c in value if c.isupper())
@register.filter
def dict_get(value, arg):
return value[arg]
@register.filter
def jsonify(obj):
if isinstance(obj, QuerySet):
return serialize('json', obj)
return mark_safe(simplejson.dumps(obj))
@register.filter
def simple_timeframe(value):
value = unicode(value)
return value.split('-')[0]
@register.filter
def can_edit(value, user):
if hasattr(user, 'roles'):
return value.has_instance_perm(user, "edit")
return False
@register.filter
def getattr(obj, attr):
if hasattr(obj,attr):
return getattr(obj,attr) |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'ui/dialogmainwindow.ui'
#
# Created by: PyQt5 UI code generator 5.10.1
#
# WARNING! All changes made in this file will be lost!
from es_common.utils.qt import QtCore, QtGui, QtWidgets
from block_manager.view import resources_rc
class Ui_DialogGUI(object):
def setupUi(self, DialogGUI):
DialogGUI.setObjectName("DialogGUI")
DialogGUI.resize(1216, 828)
DialogGUI.setAutoFillBackground(True)
self.centralWidget = QtWidgets.QWidget(DialogGUI)
self.centralWidget.setObjectName("centralWidget")
self.gridLayout_5 = QtWidgets.QGridLayout(self.centralWidget)
self.gridLayout_5.setContentsMargins(11, 11, 11, 11)
self.gridLayout_5.setSpacing(6)
self.gridLayout_5.setObjectName("gridLayout_5")
self.label_8 = QtWidgets.QLabel(self.centralWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_8.sizePolicy().hasHeightForWidth())
self.label_8.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.label_8.setFont(font)
self.label_8.setAlignment(QtCore.Qt.AlignCenter)
self.label_8.setObjectName("label_8")
self.gridLayout_5.addWidget(self.label_8, 0, 0, 1, 1)
self.line = QtWidgets.QFrame(self.centralWidget)
self.line.setFrameShape(QtWidgets.QFrame.HLine)
self.line.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line.setObjectName("line")
self.gridLayout_5.addWidget(self.line, 1, 0, 1, 1)
self.gridLayout_2 = QtWidgets.QGridLayout()
self.gridLayout_2.setSpacing(6)
self.gridLayout_2.setObjectName("gridLayout_2")
self.designPanelLayout = QtWidgets.QGridLayout()
self.designPanelLayout.setSpacing(6)
self.designPanelLayout.setObjectName("designPanelLayout")
self.tmpWidget = QtWidgets.QWidget(self.centralWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.MinimumExpanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.tmpWidget.sizePolicy().hasHeightForWidth())
self.tmpWidget.setSizePolicy(sizePolicy)
self.tmpWidget.setObjectName("tmpWidget")
self.designPanelLayout.addWidget(self.tmpWidget, 0, 0, 1, 1)
self.gridLayout_2.addLayout(self.designPanelLayout, 0, 0, 1, 1)
self.gridLayout_5.addLayout(self.gridLayout_2, 2, 0, 1, 1)
self.gridLayout_10 = QtWidgets.QGridLayout()
self.gridLayout_10.setSpacing(6)
self.gridLayout_10.setObjectName("gridLayout_10")
self.gridLayout_9 = QtWidgets.QGridLayout()
self.gridLayout_9.setSpacing(6)
self.gridLayout_9.setObjectName("gridLayout_9")
self.clearDropListButton = QtWidgets.QPushButton(self.centralWidget)
self.clearDropListButton.setObjectName("clearDropListButton")
self.gridLayout_9.addWidget(self.clearDropListButton, 0, 1, 1, 1)
self.deleteDropListItemsButton = QtWidgets.QPushButton(self.centralWidget)
self.deleteDropListItemsButton.setEnabled(False)
self.deleteDropListItemsButton.setObjectName("deleteDropListItemsButton")
self.gridLayout_9.addWidget(self.deleteDropListItemsButton, 0, 0, 1, 1)
self.saveDropListButton = QtWidgets.QPushButton(self.centralWidget)
self.saveDropListButton.setObjectName("saveDropListButton")
self.gridLayout_9.addWidget(self.saveDropListButton, 0, 2, 1, 1)
self.gridLayout_10.addLayout(self.gridLayout_9, 0, 0, 1, 1)
self.label_21 = QtWidgets.QLabel(self.centralWidget)
self.label_21.setText("")
self.label_21.setObjectName("label_21")
self.gridLayout_10.addWidget(self.label_21, 0, 1, 1, 1)
self.newBlockPushButton = QtWidgets.QPushButton(self.centralWidget)
self.newBlockPushButton.setObjectName("newBlockPushButton")
self.gridLayout_10.addWidget(self.newBlockPushButton, 0, 2, 1, 1)
self.gridLayout_5.addLayout(self.gridLayout_10, 3, 0, 1, 1)
self.line_4 = QtWidgets.QFrame(self.centralWidget)
self.line_4.setFrameShape(QtWidgets.QFrame.HLine)
self.line_4.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_4.setObjectName("line_4")
self.gridLayout_5.addWidget(self.line_4, 4, 0, 1, 1)
self.gridLayout_11 = QtWidgets.QGridLayout()
self.gridLayout_11.setSpacing(6)
self.gridLayout_11.setObjectName("gridLayout_11")
spacerItem = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.gridLayout_11.addItem(spacerItem, 0, 0, 1, 1)
self.enableMovingCheckBox = QtWidgets.QCheckBox(self.centralWidget)
self.enableMovingCheckBox.setObjectName("enableMovingCheckBox")
self.gridLayout_11.addWidget(self.enableMovingCheckBox, 0, 2, 1, 1)
self.label = QtWidgets.QLabel(self.centralWidget)
self.label.setEnabled(False)
self.label.setAlignment(QtCore.Qt.AlignRight | QtCore.Qt.AlignTrailing | QtCore.Qt.AlignVCenter)
self.label.setObjectName("label")
self.gridLayout_11.addWidget(self.label, 0, 4, 1, 1)
self.line_6 = QtWidgets.QFrame(self.centralWidget)
self.line_6.setFrameShape(QtWidgets.QFrame.VLine)
self.line_6.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_6.setObjectName("line_6")
self.gridLayout_11.addWidget(self.line_6, 0, 3, 1, 1)
self.line_5 = QtWidgets.QFrame(self.centralWidget)
self.line_5.setFrameShape(QtWidgets.QFrame.HLine)
self.line_5.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_5.setObjectName("line_5")
self.gridLayout_11.addWidget(self.line_5, 1, 0, 1, 6)
self.enagementRepetitionsSpinBox = QtWidgets.QSpinBox(self.centralWidget)
self.enagementRepetitionsSpinBox.setEnabled(False)
self.enagementRepetitionsSpinBox.setMinimum(1)
self.enagementRepetitionsSpinBox.setObjectName("enagementRepetitionsSpinBox")
self.gridLayout_11.addWidget(self.enagementRepetitionsSpinBox, 0, 5, 1, 1)
self.line_2 = QtWidgets.QFrame(self.centralWidget)
self.line_2.setFrameShape(QtWidgets.QFrame.VLine)
self.line_2.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_2.setObjectName("line_2")
self.gridLayout_11.addWidget(self.line_2, 0, 1, 1, 1)
self.gridLayout_5.addLayout(self.gridLayout_11, 5, 0, 1, 1)
DialogGUI.setCentralWidget(self.centralWidget)
self.menuBar = QtWidgets.QMenuBar(DialogGUI)
self.menuBar.setGeometry(QtCore.QRect(0, 0, 1216, 22))
self.menuBar.setObjectName("menuBar")
DialogGUI.setMenuBar(self.menuBar)
self.mainToolBar = QtWidgets.QToolBar(DialogGUI)
self.mainToolBar.setEnabled(True)
self.mainToolBar.setAllowedAreas(QtCore.Qt.AllToolBarAreas)
self.mainToolBar.setOrientation(QtCore.Qt.Horizontal)
self.mainToolBar.setToolButtonStyle(QtCore.Qt.ToolButtonTextUnderIcon)
self.mainToolBar.setObjectName("mainToolBar")
DialogGUI.addToolBar(QtCore.Qt.TopToolBarArea, self.mainToolBar)
self.statusBar = QtWidgets.QStatusBar(DialogGUI)
self.statusBar.setObjectName("statusBar")
DialogGUI.setStatusBar(self.statusBar)
self.behavioralParametersDockWidget = QtWidgets.QDockWidget(DialogGUI)
self.behavioralParametersDockWidget.setEnabled(True)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.behavioralParametersDockWidget.sizePolicy().hasHeightForWidth())
self.behavioralParametersDockWidget.setSizePolicy(sizePolicy)
self.behavioralParametersDockWidget.setMinimumSize(QtCore.QSize(340, 400))
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.behavioralParametersDockWidget.setFont(font)
self.behavioralParametersDockWidget.setFeatures(QtWidgets.QDockWidget.AllDockWidgetFeatures)
self.behavioralParametersDockWidget.setObjectName("behavioralParametersDockWidget")
self.dockWidgetContents_22 = QtWidgets.QWidget()
self.dockWidgetContents_22.setObjectName("dockWidgetContents_22")
self.gridLayout_18 = QtWidgets.QGridLayout(self.dockWidgetContents_22)
self.gridLayout_18.setContentsMargins(11, 11, 11, 11)
self.gridLayout_18.setSpacing(6)
self.gridLayout_18.setObjectName("gridLayout_18")
self.scrollArea = QtWidgets.QScrollArea(self.dockWidgetContents_22)
self.scrollArea.setEnabled(True)
self.scrollArea.setAutoFillBackground(True)
self.scrollArea.setFrameShape(QtWidgets.QFrame.Box)
self.scrollArea.setFrameShadow(QtWidgets.QFrame.Raised)
self.scrollArea.setLineWidth(1)
self.scrollArea.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOn)
self.scrollArea.setWidgetResizable(True)
self.scrollArea.setObjectName("scrollArea")
self.scrollAreaWidgetContents = QtWidgets.QWidget()
self.scrollAreaWidgetContents.setGeometry(QtCore.QRect(0, 0, 369, 800))
self.scrollAreaWidgetContents.setObjectName("scrollAreaWidgetContents")
self.gridLayout_17 = QtWidgets.QGridLayout(self.scrollAreaWidgetContents)
self.gridLayout_17.setContentsMargins(11, 11, 11, 11)
self.gridLayout_17.setSpacing(6)
self.gridLayout_17.setObjectName("gridLayout_17")
self.gridLayout_36 = QtWidgets.QGridLayout()
self.gridLayout_36.setSpacing(6)
self.gridLayout_36.setObjectName("gridLayout_36")
self.groupBox_6 = QtWidgets.QGroupBox(self.scrollAreaWidgetContents)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.groupBox_6.setFont(font)
self.groupBox_6.setObjectName("groupBox_6")
self.gridLayout_24 = QtWidgets.QGridLayout(self.groupBox_6)
self.gridLayout_24.setContentsMargins(11, 11, 11, 11)
self.gridLayout_24.setSpacing(6)
self.gridLayout_24.setObjectName("gridLayout_24")
self.gridLayout_23 = QtWidgets.QGridLayout()
self.gridLayout_23.setSpacing(6)
self.gridLayout_23.setObjectName("gridLayout_23")
self.label_19 = QtWidgets.QLabel(self.groupBox_6)
self.label_19.setObjectName("label_19")
self.gridLayout_23.addWidget(self.label_19, 3, 4, 1, 1)
self.voiceProsodySlider = QtWidgets.QSlider(self.groupBox_6)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.voiceProsodySlider.sizePolicy().hasHeightForWidth())
self.voiceProsodySlider.setSizePolicy(sizePolicy)
self.voiceProsodySlider.setMinimum(0)
self.voiceProsodySlider.setMaximum(1)
self.voiceProsodySlider.setSingleStep(1)
self.voiceProsodySlider.setPageStep(1)
self.voiceProsodySlider.setProperty("value", 0)
self.voiceProsodySlider.setOrientation(QtCore.Qt.Horizontal)
self.voiceProsodySlider.setTickPosition(QtWidgets.QSlider.TicksBelow)
self.voiceProsodySlider.setTickInterval(1)
self.voiceProsodySlider.setObjectName("voiceProsodySlider")
self.gridLayout_23.addWidget(self.voiceProsodySlider, 5, 2, 1, 1)
self.label_18 = QtWidgets.QLabel(self.groupBox_6)
self.label_18.setObjectName("label_18")
self.gridLayout_23.addWidget(self.label_18, 3, 0, 1, 1)
self.label_6 = QtWidgets.QLabel(self.groupBox_6)
self.label_6.setObjectName("label_6")
self.gridLayout_23.addWidget(self.label_6, 1, 4, 1, 1)
self.label_14 = QtWidgets.QLabel(self.groupBox_6)
self.label_14.setObjectName("label_14")
self.gridLayout_23.addWidget(self.label_14, 1, 0, 1, 1)
self.voiceSpeedSlider = QtWidgets.QSlider(self.groupBox_6)
self.voiceSpeedSlider.setMinimum(-4)
self.voiceSpeedSlider.setMaximum(4)
self.voiceSpeedSlider.setPageStep(1)
self.voiceSpeedSlider.setOrientation(QtCore.Qt.Horizontal)
self.voiceSpeedSlider.setTickPosition(QtWidgets.QSlider.TicksBelow)
self.voiceSpeedSlider.setTickInterval(2)
self.voiceSpeedSlider.setObjectName("voiceSpeedSlider")
self.gridLayout_23.addWidget(self.voiceSpeedSlider, 3, 1, 1, 3)
self.voiceSpeedLabel = QtWidgets.QLabel(self.groupBox_6)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.voiceSpeedLabel.sizePolicy().hasHeightForWidth())
self.voiceSpeedLabel.setSizePolicy(sizePolicy)
self.voiceSpeedLabel.setAlignment(QtCore.Qt.AlignCenter)
self.voiceSpeedLabel.setObjectName("voiceSpeedLabel")
self.gridLayout_23.addWidget(self.voiceSpeedLabel, 2, 1, 1, 3)
self.voicePitchSlider = QtWidgets.QSlider(self.groupBox_6)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.voicePitchSlider.sizePolicy().hasHeightForWidth())
self.voicePitchSlider.setSizePolicy(sizePolicy)
self.voicePitchSlider.setMinimum(-4)
self.voicePitchSlider.setMaximum(4)
self.voicePitchSlider.setSingleStep(1)
self.voicePitchSlider.setPageStep(1)
self.voicePitchSlider.setProperty("value", 0)
self.voicePitchSlider.setOrientation(QtCore.Qt.Horizontal)
self.voicePitchSlider.setTickPosition(QtWidgets.QSlider.TicksBelow)
self.voicePitchSlider.setTickInterval(2)
self.voicePitchSlider.setObjectName("voicePitchSlider")
self.gridLayout_23.addWidget(self.voicePitchSlider, 1, 1, 1, 3)
self.voicePitchLabel = QtWidgets.QLabel(self.groupBox_6)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.voicePitchLabel.sizePolicy().hasHeightForWidth())
self.voicePitchLabel.setSizePolicy(sizePolicy)
self.voicePitchLabel.setAlignment(QtCore.Qt.AlignCenter)
self.voicePitchLabel.setObjectName("voicePitchLabel")
self.gridLayout_23.addWidget(self.voicePitchLabel, 0, 1, 1, 3)
self.label_15 = QtWidgets.QLabel(self.groupBox_6)
self.label_15.setAlignment(QtCore.Qt.AlignCenter)
self.label_15.setObjectName("label_15")
self.gridLayout_23.addWidget(self.label_15, 4, 2, 1, 1)
spacerItem1 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.gridLayout_23.addItem(spacerItem1, 4, 1, 1, 1)
spacerItem2 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.gridLayout_23.addItem(spacerItem2, 4, 3, 1, 1)
self.label_16 = QtWidgets.QLabel(self.groupBox_6)
self.label_16.setLayoutDirection(QtCore.Qt.LeftToRight)
self.label_16.setAlignment(QtCore.Qt.AlignRight | QtCore.Qt.AlignTrailing | QtCore.Qt.AlignVCenter)
self.label_16.setObjectName("label_16")
self.gridLayout_23.addWidget(self.label_16, 5, 0, 1, 2)
self.label_17 = QtWidgets.QLabel(self.groupBox_6)
self.label_17.setObjectName("label_17")
self.gridLayout_23.addWidget(self.label_17, 5, 3, 1, 2)
self.gridLayout_24.addLayout(self.gridLayout_23, 0, 0, 1, 1)
self.gridLayout_36.addWidget(self.groupBox_6, 3, 0, 1, 1)
self.gridLayout = QtWidgets.QGridLayout()
self.gridLayout.setSpacing(6)
self.gridLayout.setObjectName("gridLayout")
self.behavioralParametersApplyToAllButton = QtWidgets.QPushButton(self.scrollAreaWidgetContents)
self.behavioralParametersApplyToAllButton.setObjectName("behavioralParametersApplyToAllButton")
self.gridLayout.addWidget(self.behavioralParametersApplyToAllButton, 1, 1, 1, 1)
self.behavioralParametersApplyComboBox = QtWidgets.QComboBox(self.scrollAreaWidgetContents)
self.behavioralParametersApplyComboBox.setObjectName("behavioralParametersApplyComboBox")
self.behavioralParametersApplyComboBox.addItem("")
self.behavioralParametersApplyComboBox.addItem("")
self.behavioralParametersApplyComboBox.addItem("")
self.behavioralParametersApplyComboBox.addItem("")
self.behavioralParametersApplyComboBox.addItem("")
self.behavioralParametersApplyComboBox.addItem("")
self.gridLayout.addWidget(self.behavioralParametersApplyComboBox, 0, 0, 1, 2)
self.behavioralParametersApplyToItemsBelowButton = QtWidgets.QPushButton(self.scrollAreaWidgetContents)
self.behavioralParametersApplyToItemsBelowButton.setObjectName("behavioralParametersApplyToItemsBelowButton")
self.gridLayout.addWidget(self.behavioralParametersApplyToItemsBelowButton, 1, 0, 1, 1)
self.gridLayout_36.addLayout(self.gridLayout, 7, 0, 1, 1)
spacerItem3 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.gridLayout_36.addItem(spacerItem3, 8, 0, 1, 1)
self.groupBox = QtWidgets.QGroupBox(self.scrollAreaWidgetContents)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.groupBox.setFont(font)
self.groupBox.setObjectName("groupBox")
self.gridLayout_6 = QtWidgets.QGridLayout(self.groupBox)
self.gridLayout_6.setContentsMargins(11, 11, 11, 11)
self.gridLayout_6.setSpacing(6)
self.gridLayout_6.setObjectName("gridLayout_6")
self.gridLayout_4 = QtWidgets.QGridLayout()
self.gridLayout_4.setSpacing(6)
self.gridLayout_4.setObjectName("gridLayout_4")
self.greenEyeColorRadioButton = QtWidgets.QRadioButton(self.groupBox)
self.greenEyeColorRadioButton.setObjectName("greenEyeColorRadioButton")
self.buttonGroup = QtWidgets.QButtonGroup(DialogGUI)
self.buttonGroup.setObjectName("buttonGroup")
self.buttonGroup.addButton(self.greenEyeColorRadioButton)
self.gridLayout_4.addWidget(self.greenEyeColorRadioButton, 1, 0, 1, 1)
self.blueEyeColorRadioButton = QtWidgets.QRadioButton(self.groupBox)
self.blueEyeColorRadioButton.setObjectName("blueEyeColorRadioButton")
self.buttonGroup.addButton(self.blueEyeColorRadioButton)
self.gridLayout_4.addWidget(self.blueEyeColorRadioButton, 1, 1, 1, 1)
self.redEyeColorRadioButton = QtWidgets.QRadioButton(self.groupBox)
self.redEyeColorRadioButton.setObjectName("redEyeColorRadioButton")
self.buttonGroup.addButton(self.redEyeColorRadioButton)
self.gridLayout_4.addWidget(self.redEyeColorRadioButton, 0, 1, 1, 1)
self.whiteEyeColorRadioButton = QtWidgets.QRadioButton(self.groupBox)
self.whiteEyeColorRadioButton.setChecked(True)
self.whiteEyeColorRadioButton.setObjectName("whiteEyeColorRadioButton")
self.buttonGroup.addButton(self.whiteEyeColorRadioButton)
self.gridLayout_4.addWidget(self.whiteEyeColorRadioButton, 0, 0, 1, 1)
self.gridLayout_6.addLayout(self.gridLayout_4, 0, 0, 1, 1)
self.gridLayout_36.addWidget(self.groupBox, 4, 0, 1, 1)
self.groupBox_4 = QtWidgets.QGroupBox(self.scrollAreaWidgetContents)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.groupBox_4.setFont(font)
self.groupBox_4.setObjectName("groupBox_4")
self.gridLayout_20 = QtWidgets.QGridLayout(self.groupBox_4)
self.gridLayout_20.setContentsMargins(11, 11, 11, 11)
self.gridLayout_20.setSpacing(6)
self.gridLayout_20.setObjectName("gridLayout_20")
self.gridLayout_19 = QtWidgets.QGridLayout()
self.gridLayout_19.setSpacing(6)
self.gridLayout_19.setObjectName("gridLayout_19")
self.label_12 = QtWidgets.QLabel(self.groupBox_4)
self.label_12.setAlignment(QtCore.Qt.AlignRight | QtCore.Qt.AlignTrailing | QtCore.Qt.AlignVCenter)
self.label_12.setObjectName("label_12")
self.gridLayout_19.addWidget(self.label_12, 0, 1, 1, 1)
self.label_13 = QtWidgets.QLabel(self.groupBox_4)
self.label_13.setAlignment(QtCore.Qt.AlignLeading | QtCore.Qt.AlignLeft | QtCore.Qt.AlignVCenter)
self.label_13.setObjectName("label_13")
self.gridLayout_19.addWidget(self.label_13, 0, 3, 1, 1)
self.gazePatternSlider = QtWidgets.QSlider(self.groupBox_4)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.gazePatternSlider.sizePolicy().hasHeightForWidth())
self.gazePatternSlider.setSizePolicy(sizePolicy)
self.gazePatternSlider.setMaximum(1)
self.gazePatternSlider.setPageStep(1)
self.gazePatternSlider.setProperty("value", 0)
self.gazePatternSlider.setSliderPosition(0)
self.gazePatternSlider.setOrientation(QtCore.Qt.Horizontal)
self.gazePatternSlider.setTickPosition(QtWidgets.QSlider.TicksBelow)
self.gazePatternSlider.setObjectName("gazePatternSlider")
self.gridLayout_19.addWidget(self.gazePatternSlider, 0, 2, 1, 1)
spacerItem4 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.gridLayout_19.addItem(spacerItem4, 0, 0, 1, 1)
spacerItem5 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.gridLayout_19.addItem(spacerItem5, 0, 4, 1, 1)
self.gridLayout_20.addLayout(self.gridLayout_19, 0, 0, 1, 1)
self.gridLayout_36.addWidget(self.groupBox_4, 1, 0, 1, 1)
self.groupBox_7 = QtWidgets.QGroupBox(self.scrollAreaWidgetContents)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.groupBox_7.setFont(font)
self.groupBox_7.setObjectName("groupBox_7")
self.gridLayout_26 = QtWidgets.QGridLayout(self.groupBox_7)
self.gridLayout_26.setContentsMargins(11, 11, 11, 11)
self.gridLayout_26.setSpacing(6)
self.gridLayout_26.setObjectName("gridLayout_26")
self.gridLayout_25 = QtWidgets.QGridLayout()
self.gridLayout_25.setSpacing(6)
self.gridLayout_25.setObjectName("gridLayout_25")
self.label_4 = QtWidgets.QLabel(self.groupBox_7)
self.label_4.setAlignment(QtCore.Qt.AlignLeading | QtCore.Qt.AlignLeft | QtCore.Qt.AlignVCenter)
self.label_4.setObjectName("label_4")
self.gridLayout_25.addWidget(self.label_4, 0, 2, 1, 1)
self.label_3 = QtWidgets.QLabel(self.groupBox_7)
self.label_3.setObjectName("label_3")
self.gridLayout_25.addWidget(self.label_3, 0, 0, 1, 1)
self.proxemicsSlider = QtWidgets.QSlider(self.groupBox_7)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.proxemicsSlider.sizePolicy().hasHeightForWidth())
self.proxemicsSlider.setSizePolicy(sizePolicy)
self.proxemicsSlider.setMinimum(0)
self.proxemicsSlider.setMaximum(10)
self.proxemicsSlider.setPageStep(1)
self.proxemicsSlider.setProperty("value", 5)
self.proxemicsSlider.setSliderPosition(5)
self.proxemicsSlider.setOrientation(QtCore.Qt.Horizontal)
self.proxemicsSlider.setTickPosition(QtWidgets.QSlider.TicksBelow)
self.proxemicsSlider.setTickInterval(2)
self.proxemicsSlider.setObjectName("proxemicsSlider")
self.gridLayout_25.addWidget(self.proxemicsSlider, 0, 1, 1, 1)
self.proxemicsLcdNumber = QtWidgets.QLCDNumber(self.groupBox_7)
self.proxemicsLcdNumber.setEnabled(True)
self.proxemicsLcdNumber.setAutoFillBackground(False)
self.proxemicsLcdNumber.setStyleSheet("background-color: rgb(204, 204, 204);")
self.proxemicsLcdNumber.setFrameShape(QtWidgets.QFrame.Panel)
self.proxemicsLcdNumber.setFrameShadow(QtWidgets.QFrame.Sunken)
self.proxemicsLcdNumber.setLineWidth(1)
self.proxemicsLcdNumber.setSmallDecimalPoint(True)
self.proxemicsLcdNumber.setProperty("value", 3.5)
self.proxemicsLcdNumber.setObjectName("proxemicsLcdNumber")
self.gridLayout_25.addWidget(self.proxemicsLcdNumber, 1, 1, 1, 1)
self.gridLayout_26.addLayout(self.gridLayout_25, 0, 0, 1, 1)
self.gridLayout_36.addWidget(self.groupBox_7, 2, 0, 1, 1)
self.gridLayout_28 = QtWidgets.QGridLayout()
self.gridLayout_28.setSpacing(6)
self.gridLayout_28.setObjectName("gridLayout_28")
self.testBehavioralParametersButton = QtWidgets.QPushButton(self.scrollAreaWidgetContents)
self.testBehavioralParametersButton.setEnabled(False)
self.testBehavioralParametersButton.setObjectName("testBehavioralParametersButton")
self.gridLayout_28.addWidget(self.testBehavioralParametersButton, 1, 0, 1, 1)
self.behavioralParametersApplyButton = QtWidgets.QPushButton(self.scrollAreaWidgetContents)
self.behavioralParametersApplyButton.setObjectName("behavioralParametersApplyButton")
self.gridLayout_28.addWidget(self.behavioralParametersApplyButton, 1, 1, 1, 1)
self.warningLabel = QtWidgets.QLabel(self.scrollAreaWidgetContents)
font = QtGui.QFont()
font.setItalic(True)
self.warningLabel.setFont(font)
self.warningLabel.setStyleSheet("color: rgb(236, 236, 236);")
self.warningLabel.setAlignment(QtCore.Qt.AlignCenter)
self.warningLabel.setObjectName("warningLabel")
self.gridLayout_28.addWidget(self.warningLabel, 0, 0, 1, 2)
self.gridLayout_36.addLayout(self.gridLayout_28, 5, 0, 1, 1)
self.groupBox_5 = QtWidgets.QGroupBox(self.scrollAreaWidgetContents)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.groupBox_5.setFont(font)
self.groupBox_5.setObjectName("groupBox_5")
self.gridLayout_22 = QtWidgets.QGridLayout(self.groupBox_5)
self.gridLayout_22.setContentsMargins(11, 11, 11, 11)
self.gridLayout_22.setSpacing(6)
self.gridLayout_22.setObjectName("gridLayout_22")
self.gridLayout_21 = QtWidgets.QGridLayout()
self.gridLayout_21.setSpacing(6)
self.gridLayout_21.setObjectName("gridLayout_21")
self.label_2 = QtWidgets.QLabel(self.groupBox_5)
self.label_2.setAlignment(QtCore.Qt.AlignLeading | QtCore.Qt.AlignLeft | QtCore.Qt.AlignVCenter)
self.label_2.setObjectName("label_2")
self.gridLayout_21.addWidget(self.label_2, 0, 3, 1, 1)
spacerItem6 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.gridLayout_21.addItem(spacerItem6, 0, 0, 1, 1)
self.gestureOpennessSlider = QtWidgets.QSlider(self.groupBox_5)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.gestureOpennessSlider.sizePolicy().hasHeightForWidth())
self.gestureOpennessSlider.setSizePolicy(sizePolicy)
self.gestureOpennessSlider.setMinimum(0)
self.gestureOpennessSlider.setMaximum(1)
self.gestureOpennessSlider.setPageStep(1)
self.gestureOpennessSlider.setProperty("value", 0)
self.gestureOpennessSlider.setOrientation(QtCore.Qt.Horizontal)
self.gestureOpennessSlider.setTickPosition(QtWidgets.QSlider.TicksBelow)
self.gestureOpennessSlider.setObjectName("gestureOpennessSlider")
self.gridLayout_21.addWidget(self.gestureOpennessSlider, 0, 2, 1, 1)
self.label_10 = QtWidgets.QLabel(self.groupBox_5)
self.label_10.setAlignment(QtCore.Qt.AlignRight | QtCore.Qt.AlignTrailing | QtCore.Qt.AlignVCenter)
self.label_10.setObjectName("label_10")
self.gridLayout_21.addWidget(self.label_10, 0, 1, 1, 1)
spacerItem7 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.gridLayout_21.addItem(spacerItem7, 0, 4, 1, 1)
self.gridLayout_22.addLayout(self.gridLayout_21, 0, 0, 1, 1)
self.gridLayout_36.addWidget(self.groupBox_5, 0, 0, 1, 1)
self.line_7 = QtWidgets.QFrame(self.scrollAreaWidgetContents)
self.line_7.setFrameShape(QtWidgets.QFrame.HLine)
self.line_7.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_7.setObjectName("line_7")
self.gridLayout_36.addWidget(self.line_7, 6, 0, 1, 1)
self.gridLayout_17.addLayout(self.gridLayout_36, 0, 0, 1, 1)
self.scrollArea.setWidget(self.scrollAreaWidgetContents)
self.gridLayout_18.addWidget(self.scrollArea, 0, 0, 1, 1)
self.behavioralParametersDockWidget.setWidget(self.dockWidgetContents_22)
DialogGUI.addDockWidget(QtCore.Qt.DockWidgetArea(2), self.behavioralParametersDockWidget)
self.blocksDockWidget = QtWidgets.QDockWidget(DialogGUI)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.blocksDockWidget.setFont(font)
self.blocksDockWidget.setFeatures(
QtWidgets.QDockWidget.DockWidgetFloatable | QtWidgets.QDockWidget.DockWidgetMovable)
self.blocksDockWidget.setObjectName("blocksDockWidget")
self.dockWidgetContents = QtWidgets.QWidget()
self.dockWidgetContents.setObjectName("dockWidgetContents")
self.gridLayout_8 = QtWidgets.QGridLayout(self.dockWidgetContents)
self.gridLayout_8.setContentsMargins(11, 11, 11, 11)
self.gridLayout_8.setSpacing(6)
self.gridLayout_8.setObjectName("gridLayout_8")
self.gridLayout_3 = QtWidgets.QGridLayout()
self.gridLayout_3.setSpacing(6)
self.gridLayout_3.setObjectName("gridLayout_3")
self.groupBox_11 = QtWidgets.QGroupBox(self.dockWidgetContents)
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.groupBox_11.setFont(font)
self.groupBox_11.setTitle("")
self.groupBox_11.setObjectName("groupBox_11")
self.gridLayout_7 = QtWidgets.QGridLayout(self.groupBox_11)
self.gridLayout_7.setContentsMargins(11, 11, 11, 11)
self.gridLayout_7.setSpacing(6)
self.gridLayout_7.setObjectName("gridLayout_7")
self.line_3 = QtWidgets.QFrame(self.groupBox_11)
self.line_3.setFrameShape(QtWidgets.QFrame.HLine)
self.line_3.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_3.setObjectName("line_3")
self.gridLayout_7.addWidget(self.line_3, 1, 0, 1, 1)
self.dragListWidget = QtWidgets.QListWidget(self.groupBox_11)
font = QtGui.QFont()
font.setPointSize(13)
font.setBold(False)
font.setWeight(50)
self.dragListWidget.setFont(font)
self.dragListWidget.setLineWidth(1)
self.dragListWidget.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOn)
self.dragListWidget.setDragEnabled(True)
self.dragListWidget.setDragDropMode(QtWidgets.QAbstractItemView.DragOnly)
self.dragListWidget.setAlternatingRowColors(True)
self.dragListWidget.setIconSize(QtCore.QSize(100, 60))
self.dragListWidget.setWordWrap(True)
self.dragListWidget.setSelectionRectVisible(True)
self.dragListWidget.setObjectName("dragListWidget")
item = QtWidgets.QListWidgetItem()
font = QtGui.QFont()
font.setPointSize(13)
font.setBold(False)
font.setWeight(50)
item.setFont(font)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(":/hreresources/pepper-icons/pepper-greet.png"), QtGui.QIcon.Normal,
QtGui.QIcon.Off)
item.setIcon(icon)
self.dragListWidget.addItem(item)
self.gridLayout_7.addWidget(self.dragListWidget, 2, 0, 1, 1)
self.dialogueBlocksCategoryComboBox = QtWidgets.QComboBox(self.groupBox_11)
self.dialogueBlocksCategoryComboBox.setObjectName("dialogueBlocksCategoryComboBox")
self.dialogueBlocksCategoryComboBox.addItem("")
self.dialogueBlocksCategoryComboBox.addItem("")
self.dialogueBlocksCategoryComboBox.addItem("")
self.dialogueBlocksCategoryComboBox.addItem("")
self.dialogueBlocksCategoryComboBox.addItem("")
self.gridLayout_7.addWidget(self.dialogueBlocksCategoryComboBox, 0, 0, 1, 1)
self.gridLayout_3.addWidget(self.groupBox_11, 0, 0, 1, 2)
self.gridLayout_8.addLayout(self.gridLayout_3, 0, 0, 1, 1)
self.blocksDockWidget.setWidget(self.dockWidgetContents)
DialogGUI.addDockWidget(QtCore.Qt.DockWidgetArea(1), self.blocksDockWidget)
self.dockWidget = QtWidgets.QDockWidget(DialogGUI)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.dockWidget.sizePolicy().hasHeightForWidth())
self.dockWidget.setSizePolicy(sizePolicy)
self.dockWidget.setMinimumSize(QtCore.QSize(98, 150))
self.dockWidget.setFloating(False)
self.dockWidget.setFeatures(QtWidgets.QDockWidget.AllDockWidgetFeatures)
self.dockWidget.setAllowedAreas(QtCore.Qt.AllDockWidgetAreas)
self.dockWidget.setObjectName("dockWidget")
self.dockWidgetContents_2 = QtWidgets.QWidget()
self.dockWidgetContents_2.setObjectName("dockWidgetContents_2")
self.gridLayout_27 = QtWidgets.QGridLayout(self.dockWidgetContents_2)
self.gridLayout_27.setContentsMargins(11, 11, 11, 11)
self.gridLayout_27.setSpacing(6)
self.gridLayout_27.setObjectName("gridLayout_27")
self.logsTextEdit = QtWidgets.QTextEdit(self.dockWidgetContents_2)
self.logsTextEdit.setAcceptDrops(False)
self.logsTextEdit.setAutoFillBackground(True)
self.logsTextEdit.setStyleSheet("background: rgb(76, 76, 76)")
self.logsTextEdit.setUndoRedoEnabled(False)
self.logsTextEdit.setReadOnly(True)
self.logsTextEdit.setAcceptRichText(True)
self.logsTextEdit.setObjectName("logsTextEdit")
self.gridLayout_27.addWidget(self.logsTextEdit, 0, 0, 1, 1)
self.dockWidget.setWidget(self.dockWidgetContents_2)
DialogGUI.addDockWidget(QtCore.Qt.DockWidgetArea(1), self.dockWidget)
self.actionConnect = QtWidgets.QAction(DialogGUI)
self.actionConnect.setCheckable(False)
self.actionConnect.setChecked(False)
self.actionConnect.setEnabled(True)
icon1 = QtGui.QIcon()
icon1.addPixmap(QtGui.QPixmap(":/hreresources/icons/connect.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.actionConnect.setIcon(icon1)
self.actionConnect.setIconVisibleInMenu(True)
self.actionConnect.setObjectName("actionConnect")
self.actionDisconnect = QtWidgets.QAction(DialogGUI)
self.actionDisconnect.setCheckable(False)
self.actionDisconnect.setEnabled(False)
icon2 = QtGui.QIcon()
icon2.addPixmap(QtGui.QPixmap(":/hreresources/icons/disconnect1.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.actionDisconnect.setIcon(icon2)
self.actionDisconnect.setObjectName("actionDisconnect")
self.actionDatabaseConnect = QtWidgets.QAction(DialogGUI)
self.actionDatabaseConnect.setCheckable(False)
self.actionDatabaseConnect.setEnabled(False)
icon3 = QtGui.QIcon()
icon3.addPixmap(QtGui.QPixmap(":/hreresources/icons/database-connect.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.actionDatabaseConnect.setIcon(icon3)
self.actionDatabaseConnect.setObjectName("actionDatabaseConnect")
self.actionWakeUp = QtWidgets.QAction(DialogGUI)
self.actionWakeUp.setCheckable(False)
self.actionWakeUp.setEnabled(False)
icon4 = QtGui.QIcon()
icon4.addPixmap(QtGui.QPixmap(":/hreresources/icons/eyeon.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.actionWakeUp.setIcon(icon4)
self.actionWakeUp.setObjectName("actionWakeUp")
self.actionRest = QtWidgets.QAction(DialogGUI)
self.actionRest.setCheckable(False)
self.actionRest.setEnabled(False)
icon5 = QtGui.QIcon()
icon5.addPixmap(QtGui.QPixmap(":/hreresources/icons/eyeoff.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.actionRest.setIcon(icon5)
self.actionRest.setObjectName("actionRest")
self.actionDatabaseDisconnect = QtWidgets.QAction(DialogGUI)
self.actionDatabaseDisconnect.setCheckable(False)
self.actionDatabaseDisconnect.setEnabled(False)
icon6 = QtGui.QIcon()
icon6.addPixmap(QtGui.QPixmap(":/hreresources/icons/database-disconnect.png"), QtGui.QIcon.Normal,
QtGui.QIcon.Off)
self.actionDatabaseDisconnect.setIcon(icon6)
self.actionDatabaseDisconnect.setObjectName("actionDatabaseDisconnect")
self.actionStartCamera = QtWidgets.QAction(DialogGUI)
self.actionStartCamera.setEnabled(False)
icon7 = QtGui.QIcon()
icon7.addPixmap(QtGui.QPixmap(":/hreresources/icons/camera-start1.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.actionStartCamera.setIcon(icon7)
self.actionStartCamera.setObjectName("actionStartCamera")
self.actionStopCamera = QtWidgets.QAction(DialogGUI)
self.actionStopCamera.setEnabled(False)
icon8 = QtGui.QIcon()
icon8.addPixmap(QtGui.QPixmap(":/hreresources/icons/camera-stop1.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.actionStopCamera.setIcon(icon8)
self.actionStopCamera.setObjectName("actionStopCamera")
self.actionDelete = QtWidgets.QAction(DialogGUI)
self.actionDelete.setEnabled(False)
icon9 = QtGui.QIcon()
icon9.addPixmap(QtGui.QPixmap(":/hreresources/icons/delete.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.actionDelete.setIcon(icon9)
self.actionDelete.setObjectName("actionDelete")
self.actionEnableTouch = QtWidgets.QAction(DialogGUI)
self.actionEnableTouch.setEnabled(False)
icon10 = QtGui.QIcon()
icon10.addPixmap(QtGui.QPixmap(":/hreresources/icons/touch.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.actionEnableTouch.setIcon(icon10)
self.actionEnableTouch.setObjectName("actionEnableTouch")
self.actionReload = QtWidgets.QAction(DialogGUI)
self.actionReload.setCheckable(False)
icon11 = QtGui.QIcon()
icon11.addPixmap(QtGui.QPixmap(":/hreresources/icons/refresh.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.actionReload.setIcon(icon11)
self.actionReload.setObjectName("actionReload")
self.actionSave = QtWidgets.QAction(DialogGUI)
self.actionSave.setEnabled(False)
icon12 = QtGui.QIcon()
icon12.addPixmap(QtGui.QPixmap(":/hreresources/icons/save.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.actionSave.setIcon(icon12)
self.actionSave.setObjectName("actionSave")
self.actionTabletShow = QtWidgets.QAction(DialogGUI)
self.actionTabletShow.setEnabled(False)
icon13 = QtGui.QIcon()
icon13.addPixmap(QtGui.QPixmap(":/hreresources/icons/tablet-show.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.actionTabletShow.setIcon(icon13)
self.actionTabletShow.setObjectName("actionTabletShow")
self.actionTabletHide = QtWidgets.QAction(DialogGUI)
self.actionTabletHide.setEnabled(False)
icon14 = QtGui.QIcon()
icon14.addPixmap(QtGui.QPixmap(":/hreresources/icons/tablet-hide.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.actionTabletHide.setIcon(icon14)
self.actionTabletHide.setObjectName("actionTabletHide")
self.actionVolumeDown = QtWidgets.QAction(DialogGUI)
self.actionVolumeDown.setEnabled(False)
icon15 = QtGui.QIcon()
icon15.addPixmap(QtGui.QPixmap(":/hreresources/icons/volume-down.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.actionVolumeDown.setIcon(icon15)
self.actionVolumeDown.setObjectName("actionVolumeDown")
self.actionVolumeUp = QtWidgets.QAction(DialogGUI)
self.actionVolumeUp.setEnabled(False)
icon16 = QtGui.QIcon()
icon16.addPixmap(QtGui.QPixmap(":/hreresources/icons/volume-up.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.actionVolumeUp.setIcon(icon16)
self.actionVolumeUp.setObjectName("actionVolumeUp")
self.actionPlay = QtWidgets.QAction(DialogGUI)
self.actionPlay.setCheckable(False)
self.actionPlay.setEnabled(False)
icon17 = QtGui.QIcon()
icon17.addPixmap(QtGui.QPixmap(":/hreresources/icons/play.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.actionPlay.setIcon(icon17)
self.actionPlay.setObjectName("actionPlay")
self.actionEnableTracking = QtWidgets.QAction(DialogGUI)
self.actionEnableTracking.setEnabled(False)
self.actionEnableTracking.setIcon(icon4)
self.actionEnableTracking.setObjectName("actionEnableTracking")
self.actionDisableTracking = QtWidgets.QAction(DialogGUI)
self.actionDisableTracking.setEnabled(False)
self.actionDisableTracking.setIcon(icon5)
self.actionDisableTracking.setObjectName("actionDisableTracking")
self.actionStop = QtWidgets.QAction(DialogGUI)
self.actionStop.setEnabled(False)
icon18 = QtGui.QIcon()
icon18.addPixmap(QtGui.QPixmap(":/hreresources/icons/stop.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.actionStop.setIcon(icon18)
self.actionStop.setObjectName("actionStop")
self.actionImportBlocks = QtWidgets.QAction(DialogGUI)
icon19 = QtGui.QIcon()
icon19.addPixmap(QtGui.QPixmap(":/hreresources/icons/import.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.actionImportBlocks.setIcon(icon19)
self.actionImportBlocks.setObjectName("actionImportBlocks")
self.actionExportBlocks = QtWidgets.QAction(DialogGUI)
icon20 = QtGui.QIcon()
icon20.addPixmap(QtGui.QPixmap(":/hreresources/icons/export.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.actionExportBlocks.setIcon(icon20)
self.actionExportBlocks.setObjectName("actionExportBlocks")
self.actionCopySettings = QtWidgets.QAction(DialogGUI)
icon21 = QtGui.QIcon()
icon21.addPixmap(QtGui.QPixmap(":/hreresources/icons/copy.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.actionCopySettings.setIcon(icon21)
self.actionCopySettings.setObjectName("actionCopySettings")
self.actionPasteSettings = QtWidgets.QAction(DialogGUI)
self.actionPasteSettings.setEnabled(False)
icon22 = QtGui.QIcon()
icon22.addPixmap(QtGui.QPixmap(":/hreresources/icons/paste.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.actionPasteSettings.setIcon(icon22)
self.actionPasteSettings.setObjectName("actionPasteSettings")
self.mainToolBar.addAction(self.actionConnect)
self.mainToolBar.addAction(self.actionDisconnect)
self.mainToolBar.addSeparator()
self.mainToolBar.addAction(self.actionDatabaseConnect)
self.mainToolBar.addAction(self.actionDatabaseDisconnect)
self.mainToolBar.addSeparator()
self.mainToolBar.addAction(self.actionWakeUp)
self.mainToolBar.addAction(self.actionRest)
self.mainToolBar.addSeparator()
self.mainToolBar.addAction(self.actionEnableTouch)
self.mainToolBar.addSeparator()
self.mainToolBar.addAction(self.actionTabletShow)
self.mainToolBar.addAction(self.actionTabletHide)
self.mainToolBar.addSeparator()
self.mainToolBar.addAction(self.actionVolumeDown)
self.mainToolBar.addAction(self.actionVolumeUp)
self.mainToolBar.addSeparator()
self.mainToolBar.addAction(self.actionPlay)
self.mainToolBar.addAction(self.actionStop)
self.mainToolBar.addSeparator()
self.mainToolBar.addAction(self.actionImportBlocks)
self.mainToolBar.addAction(self.actionExportBlocks)
self.mainToolBar.addSeparator()
self.mainToolBar.addAction(self.actionCopySettings)
self.mainToolBar.addAction(self.actionPasteSettings)
self.retranslateUi(DialogGUI)
QtCore.QMetaObject.connectSlotsByName(DialogGUI)
def retranslateUi(self, DialogGUI):
_translate = QtCore.QCoreApplication.translate
DialogGUI.setWindowTitle(_translate("DialogGUI", "Pepper Dialog"))
self.label_8.setText(_translate("DialogGUI", "DESIGN PANEL"))
self.clearDropListButton.setText(_translate("DialogGUI", "Clear"))
self.deleteDropListItemsButton.setText(_translate("DialogGUI", "Delete"))
self.saveDropListButton.setText(_translate("DialogGUI", "Save"))
self.newBlockPushButton.setText(_translate("DialogGUI", "New Block"))
self.enableMovingCheckBox.setText(_translate("DialogGUI", "Enable Moving"))
self.label.setText(_translate("DialogGUI", "# Repetitions"))
self.behavioralParametersDockWidget.setWindowTitle(_translate("DialogGUI", "Behavioral Parameters"))
self.groupBox_6.setTitle(_translate("DialogGUI", "Voice"))
self.label_19.setText(_translate("DialogGUI", "Fast"))
self.label_18.setText(_translate("DialogGUI", "Slow"))
self.label_6.setText(_translate("DialogGUI", "High"))
self.label_14.setText(_translate("DialogGUI", "Low"))
self.voiceSpeedLabel.setText(_translate("DialogGUI", "Speed"))
self.voicePitchLabel.setText(_translate("DialogGUI", "Pitch"))
self.label_15.setText(_translate("DialogGUI", "Prosody"))
self.label_16.setText(_translate("DialogGUI", "Weak"))
self.label_17.setText(_translate("DialogGUI", "Strong"))
self.behavioralParametersApplyToAllButton.setText(_translate("DialogGUI", "Apply to All"))
self.behavioralParametersApplyComboBox.setItemText(0, _translate("DialogGUI", "All Parameters"))
self.behavioralParametersApplyComboBox.setItemText(1, _translate("DialogGUI", "Gesture Openness"))
self.behavioralParametersApplyComboBox.setItemText(2, _translate("DialogGUI", "Gaze Pattern"))
self.behavioralParametersApplyComboBox.setItemText(3, _translate("DialogGUI", "Proxemic Closeness"))
self.behavioralParametersApplyComboBox.setItemText(4, _translate("DialogGUI", "Voice"))
self.behavioralParametersApplyComboBox.setItemText(5, _translate("DialogGUI", "Eye Color"))
self.behavioralParametersApplyToItemsBelowButton.setText(_translate("DialogGUI", "Apply ⇣"))
self.groupBox.setTitle(_translate("DialogGUI", "Eye Color"))
self.greenEyeColorRadioButton.setText(_translate("DialogGUI", "Green"))
self.blueEyeColorRadioButton.setText(_translate("DialogGUI", "Blue"))
self.redEyeColorRadioButton.setText(_translate("DialogGUI", "Red"))
self.whiteEyeColorRadioButton.setText(_translate("DialogGUI", "White"))
self.groupBox_4.setTitle(_translate("DialogGUI", "Gaze Pattern"))
self.label_12.setText(_translate("DialogGUI", "Fixated"))
self.label_13.setText(_translate("DialogGUI", "Diverted"))
self.groupBox_7.setTitle(_translate("DialogGUI", "Proxemic Closeness"))
self.label_4.setText(_translate("DialogGUI", "Far"))
self.label_3.setText(_translate("DialogGUI", "Close"))
self.testBehavioralParametersButton.setText(_translate("DialogGUI", "Test"))
self.behavioralParametersApplyButton.setText(_translate("DialogGUI", "Apply"))
self.warningLabel.setText(_translate("DialogGUI", "Click \'Apply\' to save the changes."))
self.groupBox_5.setTitle(_translate("DialogGUI", "Gesture Openness"))
self.label_2.setText(_translate("DialogGUI", "Open"))
self.label_10.setText(_translate("DialogGUI", "Close"))
self.blocksDockWidget.setWindowTitle(_translate("DialogGUI", "Dialogue Blocks"))
self.dragListWidget.setSortingEnabled(True)
__sortingEnabled = self.dragListWidget.isSortingEnabled()
self.dragListWidget.setSortingEnabled(False)
item = self.dragListWidget.item(0)
item.setText(_translate("DialogGUI", "Salutation"))
self.dragListWidget.setSortingEnabled(__sortingEnabled)
self.dialogueBlocksCategoryComboBox.setItemText(0, _translate("DialogGUI", "All"))
self.dialogueBlocksCategoryComboBox.setItemText(1, _translate("DialogGUI", "Opening"))
self.dialogueBlocksCategoryComboBox.setItemText(2, _translate("DialogGUI", "Service-Provision"))
self.dialogueBlocksCategoryComboBox.setItemText(3, _translate("DialogGUI", "Closing"))
self.dialogueBlocksCategoryComboBox.setItemText(4, _translate("DialogGUI", "Chit-Chat"))
self.dockWidget.setWindowTitle(_translate("DialogGUI", "Logs"))
self.actionConnect.setText(_translate("DialogGUI", "Connect"))
self.actionDisconnect.setText(_translate("DialogGUI", "Disconnect"))
self.actionDatabaseConnect.setText(_translate("DialogGUI", "DB Connect"))
self.actionWakeUp.setText(_translate("DialogGUI", "WakeUp"))
self.actionRest.setText(_translate("DialogGUI", "Rest"))
self.actionDatabaseDisconnect.setText(_translate("DialogGUI", "DB Disconnect"))
self.actionStartCamera.setText(_translate("DialogGUI", "Start"))
self.actionStopCamera.setText(_translate("DialogGUI", "Stop"))
self.actionDelete.setText(_translate("DialogGUI", "Delete"))
self.actionDelete.setToolTip(_translate("DialogGUI", "Delete"))
self.actionEnableTouch.setText(_translate("DialogGUI", "Enable Touch"))
self.actionReload.setText(_translate("DialogGUI", "Reload"))
self.actionReload.setToolTip(_translate("DialogGUI", "Reload"))
self.actionSave.setText(_translate("DialogGUI", "Save Blocks"))
self.actionTabletShow.setText(_translate("DialogGUI", "Show"))
self.actionTabletHide.setText(_translate("DialogGUI", "Hide"))
self.actionVolumeDown.setText(_translate("DialogGUI", "Vol-"))
self.actionVolumeUp.setText(_translate("DialogGUI", "Vol+"))
self.actionPlay.setText(_translate("DialogGUI", "Play"))
self.actionPlay.setToolTip(_translate("DialogGUI", "Play"))
self.actionEnableTracking.setText(_translate("DialogGUI", "Enable Tracking"))
self.actionDisableTracking.setText(_translate("DialogGUI", "Disable Tracking"))
self.actionStop.setText(_translate("DialogGUI", "Stop"))
self.actionImportBlocks.setText(_translate("DialogGUI", "Import Blocks"))
self.actionExportBlocks.setText(_translate("DialogGUI", "Export Blocks"))
self.actionCopySettings.setText(_translate("DialogGUI", "Copy Settings"))
self.actionPasteSettings.setText(_translate("DialogGUI", "Paste Settings"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
DialogGUI = QtWidgets.QMainWindow()
ui = Ui_DialogGUI()
ui.setupUi(DialogGUI)
DialogGUI.show()
sys.exit(app.exec_())
|
# Copyright 2013: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import errno
import inspect
import json
import os
import shutil
import subprocess
import tempfile
from six.moves import configparser
from rally.utils import encodeutils
class RallyCliError(Exception):
def __init__(self, cmd, code, output):
self.command = cmd
self.code = code
self.output = encodeutils.safe_decode(output)
self.msg = "Command: %s Code: %d Output: %s\n" % (self.command,
self.code,
self.output)
def __str__(self):
return self.msg
def __unicode__(self):
return self.msg
class JsonTempFile(object):
def __init__(self, config):
config_file = tempfile.NamedTemporaryFile(delete=False)
config_file.write(encodeutils.safe_encode(json.dumps(config)))
config_file.close()
self.filename = config_file.name
def __del__(self):
os.unlink(self.filename)
class TaskConfig(JsonTempFile):
pass
class Rally(object):
"""Create and represent separate rally installation.
Usage:
rally = Rally()
rally("deployment", "create", "--name", "Some Deployment Name")
output = rally("deployment list")
"""
_DEPLOYMENT_CREATE_ARGS = ""
def __init__(self, force_new_db=False, plugin_path=None):
# NOTE(sskripnick): we should change home dir to avoid races
# and do not touch any user files in ~/.rally
self.tmp_dir = tempfile.mkdtemp()
self.env = copy.deepcopy(os.environ)
self.env["HOME"] = self.tmp_dir
self.config_filename = None
self.method_name = None
self.class_name = None
caller_frame = inspect.currentframe().f_back
if caller_frame.f_code.co_name == "__call__":
caller_frame = caller_frame.f_back
self.method_name = caller_frame.f_code.co_name
if self.method_name == "setUp":
raise Exception("No rally instance should be generated in "
"setUp method")
test_object = caller_frame.f_locals["self"]
self.class_name = test_object.__class__.__name__
if force_new_db or ("RCI_KEEP_DB" not in os.environ):
config_filename = os.path.join(self.tmp_dir, "conf")
config = configparser.RawConfigParser()
config.add_section("database")
config.set("database", "connection",
"sqlite:///%s/db" % self.tmp_dir)
with open(config_filename, "w") as conf:
config.write(conf)
self.args = ["rally", "--config-file", config_filename]
subprocess.call(["rally", "--config-file", config_filename,
"db", "recreate"], env=self.env)
self.config_filename = config_filename
else:
self.args = ["rally"]
subprocess.call(["rally", "db", "recreate"], env=self.env)
if plugin_path:
self.args.extend(["--plugin-paths", os.path.abspath(plugin_path)])
self.reports_root = os.environ.get("REPORTS_ROOT",
"rally-cli-output-files")
self._created_files = []
self("deployment create --name MAIN%s" % self._DEPLOYMENT_CREATE_ARGS,
write_report=False)
def __del__(self):
shutil.rmtree(self.tmp_dir)
def _safe_make_dirs(self, dirs):
try:
os.makedirs(dirs)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(dirs):
pass
else:
raise
def gen_report_path(self, suffix=None, extension=None, keep_old=False):
"""Report file path/name modifier
:param suffix: suffix that will be appended to filename.
It will be appended before extension
:param extension: file extension.
:param keep_old: if True, previous reports will not be deleted,
but rename to 'nameSuffix.old*.extension'
:return: complete report name to write report
"""
self._safe_make_dirs("%s/%s" % (self.reports_root, self.class_name))
suff = suffix or ""
ext = extension or "txt"
path = "%s/%s/%s%s.%s" % (self.reports_root, self.class_name,
self.method_name, suff, ext)
if path not in self._created_files:
if os.path.exists(path):
if not keep_old:
os.remove(path)
else:
path_list = path.split(".")
old_suff = "old"
path_list.insert(-1, old_suff)
new_path = ".".join(path_list)
count = 0
while os.path.exists(new_path):
count += 1
path_list[-2] = "old%d" % count
new_path = ".".join(path_list)
os.rename(path, new_path)
self._created_files.append(path)
return path
def __call__(self, cmd, getjson=False, report_path=None, raw=False,
suffix=None, extension=None, keep_old=False,
write_report=True, no_logs=False):
"""Call rally in the shell
:param cmd: rally command
:param getjson: in cases, when rally prints JSON, you can catch output
deserialized
:param report_path: if present, rally command and its output will be
written to file with passed file name
:param raw: don't write command itself to report file. Only output
will be written
"""
if not isinstance(cmd, list):
cmd = cmd.split(" ")
try:
if no_logs or getjson:
cmd = self.args + ["--log-file", "/dev/null"] + cmd
with open(os.devnull, "w") as DEVNULL:
output = encodeutils.safe_decode(subprocess.check_output(
cmd, stderr=DEVNULL, env=self.env))
else:
cmd = self.args + cmd
output = encodeutils.safe_decode(subprocess.check_output(
cmd, stderr=subprocess.STDOUT, env=self.env))
if getjson:
return json.loads(output)
return output
except subprocess.CalledProcessError as e:
output = e.output
raise RallyCliError(cmd, e.returncode, e.output)
finally:
if write_report:
if not report_path:
report_path = self.gen_report_path(
suffix=suffix, extension=extension, keep_old=keep_old)
with open(report_path, "a") as rep:
if not raw:
rep.write("\n%s:\n" % " ".join(cmd))
rep.write("%s\n" % output)
def get_global(global_key, env):
home_dir = env.get("HOME")
with open("%s/.rally/globals" % home_dir) as f:
for line in f.readlines():
if line.startswith("%s=" % global_key):
key, value = line.split("=")
return value.rstrip()
return ""
|
<filename>src/DocumentTemplate/DT_InSV.py
##############################################################################
#
# Copyright (c) 2002 Zope Foundation and Contributors.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
##############################################################################
__doc__='''Sequence variables support
$Id$'''
__version__='$Revision: 1.22 $'[11:-2]
from math import sqrt
import re
try:
import Missing
mv=Missing.Value
except: mv=None
TupleType = tuple
class sequence_variables:
alt_prefix = None
def __init__(self,items=None,query_string='',start_name_re=None,
alt_prefix=''):
self.items=items
self.query_string=query_string
self.start_name_re=start_name_re
if alt_prefix:
self.alt_prefix = alt_prefix + '_'
self.data=data={
'previous-sequence': 0,
'next-sequence': 0,
'sequence-start': 1,
'sequence-end': 0,
}
def __len__(self): return 1
def number(self,index): return index+1
def even(self,index): return index%2 == 0
def odd(self,index): return index%2
def letter(self,index): return chr(ord('a')+index)
def Letter(self,index): return chr(ord('A')+index)
def key(self,index): return self.items[index][0]
def item(self,index, tt=type(())):
i=self.items[index]
if type(i) is tt and len(i)==2: return i[1]
return i
def roman(self,index): return self.Roman(index).lower()
def Roman(self,num):
# Force number to be an integer value
num = int(num)+1
# Initialize roman as an empty string
roman = ''
while num >= 1000:
num = num - 1000
roman = '%sM' % roman
while num >= 500:
num = num - 500
roman = '%sD' % roman
while num >= 100:
num = num - 100
roman = '%sC' % roman
while num >= 50:
num = num - 50
roman = '%sL' % roman
while num >= 10:
num = num - 10
roman = '%sX' % roman
while num >= 5:
num = num - 5
roman = '%sV' % roman
while num < 5 and num >= 1:
num = num - 1
roman = '%sI' % roman
# Replaces special cases in Roman Numerals
roman = roman.replace('DCCCC', 'CM')
roman = roman.replace('CCCC', 'CD')
roman = roman.replace('LXXXX', 'XC')
roman = roman.replace('XXXX', 'XL')
roman = roman.replace('VIIII', 'IX')
roman = roman.replace('IIII', 'IV')
return roman
def value(self,index,name):
data=self.data
item=self.items[index]
if type(item)==TupleType and len(item)==2:
item=item[1]
if data['mapping']: return item[name]
return getattr(item,name)
def first(self,name,key=''):
data=self.data
if data['sequence-start']: return 1
index=data['sequence-index']
return self.value(index,name) != self.value(index-1,name)
def last(self,name,key=''):
data=self.data
if data['sequence-end']: return 1
index=data['sequence-index']
return self.value(index,name) != self.value(index+1,name)
def length(self, ignored):
l=self['sequence-length']=len(self.items)
return l
def query(self, *ignored):
if self.start_name_re is None: raise KeyError, 'sequence-query'
query_string=self.query_string
while query_string and query_string[:1] in '?&':
query_string=query_string[1:]
while query_string[-1:] == '&':
query_string=query_string[:-1]
if query_string:
query_string='&%s&' % query_string
reg=self.start_name_re
if type(reg)==type(re.compile(r"")):
mo = reg.search(query_string)
if mo is not None:
v = mo.group(0)
l = mo.start(0)
query_string=(query_string[:l]+ query_string[l+len(v)-1:])
else:
l=reg.search_group(query_string, (0,))
if l:
v=l[1]
l=l[0]
query_string=(query_string[:l]+ query_string[l+len(v)-1:])
query_string='?'+query_string[1:]
else: query_string='?'
self['sequence-query']=query_string
return query_string
statistic_names=(
'total', 'count', 'min', 'max', 'median', 'mean',
'variance', 'variance-n','standard-deviation', 'standard-deviation-n',
)
def statistics(self,name,key):
items=self.items
data=self.data
mapping=data['mapping']
count=sum=sumsq=0
min=max=None
scount=smin=smax=None
values=[]
svalues=[]
for item in items:
try:
if mapping: item=item[name]
else:
try: item=getattr(item,name)
except:
if name != 'item':
raise
try:
if item is mv:
item = None
if type(item)==type(1):
s=item*long(item)
else:
s=item*item
sum=sum+item
sumsq=sumsq+s
values.append(item)
if min is None:
min=max=item
else:
if item < min: min=item
if item > max: max=item
except:
if item is not None and item is not mv:
if smin is None: smin=smax=item
else:
if item < smin: smin=item
if item > smax: smax=item
svalues.append(item)
except: pass
# Initialize all stats to empty strings:
for stat in self.statistic_names: data['%s-%s' % (stat,name)]=''
count=len(values)
try: # Numeric statistics
n=float(count)
mean=sum/n
sumsq=sumsq/n - mean*mean
data['mean-%s' % name]=mean
data['total-%s' % name]=sum
data['variance-n-%s' % name]=sumsq
data['standard-deviation-n-%s' % name]=sqrt(sumsq)
if count > 1:
sumsq=sumsq*n/(n-1)
data['variance-%s' % name]=sumsq
data['standard-deviation-%s' % name]=sqrt(sumsq)
else:
data['variance-%s' % name]=''
data['standard-deviation-%s' % name]=''
except:
if min is None: min,max,values=smin,smax,svalues
else:
if smin < min: min=smin
if smax > max: max=smax
values=values+svalues
count=len(values)
data['count-%s' % name]=count
# data['_values']=values
if min is not None:
data['min-%s' % name]=min
data['max-%s' % name]=max
values.sort()
if count==1:
data['median-%s' % name]=min
else:
n=count+1
if n/2*2==n: data['median-%s' % name]=values[n/2-1]
else:
n=n/2
try: data['median-%s' % name]=(values[n]+values[n-1])/2
except:
try: data['median-%s' % name]=(
"between %s and %s" % (values[n],values[n-1]))
except: pass
return data[key]
def next_batches(self, suffix='batches',key=''):
if suffix != 'batches': raise KeyError, key
data=self.data
sequence=self.items
try:
if not data['next-sequence']: return ()
sz=data['sequence-step-size']
start=data['sequence-step-start']
end=data['sequence-step-end']
l=len(sequence)
orphan=data['sequence-step-orphan']
overlap=data['sequence-step-overlap']
except: AttributeError, 'next-batches'
r=[]
while end < l:
start,end,spam=opt(end+1-overlap,0,sz,orphan,sequence)
v=sequence_variables(self.items,
self.query_string,self.start_name_re)
d=v.data
d['batch-start-index']=start-1
d['batch-end-index']=end-1
d['batch-size']=end+1-start
d['mapping']=data['mapping']
r.append(v)
data['next-batches']=r
return r
def previous_batches(self, suffix='batches',key=''):
if suffix != 'batches': raise KeyError, key
data=self.data
sequence=self.items
try:
if not data['previous-sequence']: return ()
sz=data['sequence-step-size']
start=data['sequence-step-start']
end=data['sequence-step-end']
l=len(sequence)
orphan=data['sequence-step-orphan']
overlap=data['sequence-step-overlap']
except: AttributeError, 'previous-batches'
r=[]
while start > 1:
start,end,spam=opt(0,start-1+overlap,sz,orphan,sequence)
v=sequence_variables(self.items,
self.query_string,self.start_name_re)
d=v.data
d['batch-start-index']=start-1
d['batch-end-index']=end-1
d['batch-size']=end+1-start
d['mapping']=data['mapping']
r.append(v)
r.reverse()
data['previous-batches']=r
return r
special_prefixes={
'first': first,
'last': last,
'previous': previous_batches,
'next': next_batches,
# These two are for backward compatability with a missfeature:
'sequence-index': lambda self, suffix, key: self['sequence-'+suffix],
'sequence-index-is': lambda self, suffix, key: self['sequence-'+suffix],
}
for n in statistic_names: special_prefixes[n]=statistics
def __setitem__(self, key, value):
self.data[key] = value
if self.alt_prefix:
if key.startswith('sequence-'): key = key[9:]
self.data[self.alt_prefix + key] = value
def __getitem__(self,key,
special_prefixes=special_prefixes,
special_prefix=special_prefixes.has_key
):
data=self.data
if data.has_key(key): return data[key]
l=key.rfind('-')
if l < 0:
alt_prefix = self.alt_prefix
if not (alt_prefix and key.startswith(alt_prefix)):
raise KeyError, key
suffix = key[len(alt_prefix):].replace('_', '-')
if '-' in suffix:
try: return self[suffix]
except KeyError: pass
prefix = 'sequence'
key = 'sequence-' + suffix
else:
suffix=key[l+1:]
prefix=key[:l]
if hasattr(self, suffix):
try: v=data[prefix+'-index']
except: pass
else: return getattr(self,suffix)(v)
if special_prefix(prefix):
return special_prefixes[prefix](self, suffix, key)
if prefix[-4:]=='-var':
prefix=prefix[:-4]
try: return self.value(data[prefix+'-index'],suffix)
except: pass
if key=='sequence-query': return self.query()
raise KeyError, key
def opt(start,end,size,orphan,sequence):
if size < 1:
if start > 0 and end > 0 and end >= start:
size=end+1-start
else: size=7
if start > 0:
try: sequence[start-1]
except: start=len(sequence)
# if start > l: start=l
if end > 0:
if end < start: end=start
else:
end=start+size-1
try: sequence[end+orphan-1]
except: end=len(sequence)
# if l - end < orphan: end=l
elif end > 0:
try: sequence[end-1]
except: end=len(sequence)
# if end > l: end=l
start=end+1-size
if start - 1 < orphan: start=1
else:
start=1
end=start+size-1
try: sequence[end+orphan-1]
except: end=len(sequence)
# if l - end < orphan: end=l
return start,end,size
|
<gh_stars>1-10
################################################################################
# Module: core.py
# Description: Helper functions
# License: Apache v2.0
# Author: <NAME>
# Web: https://github.com/pedroswits/anprx
################################################################################
import collections
import numpy as np
import pandas as pd
import networkx as nx
from sklearn.neighbors import BallTree
from .constants import PropertiesFilter
from .utils import log
###
###
def flatten(list_):
"""
Flatten a list of objects which may contain other lists as elements.
Parameters
---------
list_ : object
list
Returns
-------
generator
"""
for el in list_:
if isinstance(el, collections.Iterable) and not isinstance(el, (str, bytes)):
yield from flatten(el)
else:
yield el
###
###
def is_in(value, values_set):
"""
Computes whether an object is present in, or has at least one element that is present in, values_set. This is equivalent to computing whether two sets intersect (not disjoint), but where value does not have to be a set.
Parameters
---------
value : object
data dictionary
values_set : set
set of values
Returns
-------
bool
"""
try:
iter(value)
is_iterable = True
except TypeError:
is_iterable = False
if is_iterable and not isinstance(value, (str, bytes)):
return not set(value).isdisjoint(values_set)
else:
return value in values_set
###
###
def edges_with_at_least_one_property(G, properties):
"""
Find edges that match at least once in all property sets: (key, values)
Parameters
---------
G : nx.MultiDiGraph
a (multidi)graph
properties : dict(str : set)
properties of edges to filter by
Returns
-------
generator of (u,v,k)
generator of edges
"""
for u,v,k,d in G.edges(keys = True, data = True):
for key, values in properties.items():
if key in d.keys() and is_in(d[key], values):
yield (u,v,k)
###
###
def edges_with_all_properties(G, properties):
"""
Find edges that match always in all property sets: (key, values)
Parameters
---------
G : nx.MultiDiGraph
a (multidi)graph
properties : dict(str : set)
properties of edges to filter by
Returns
-------
generator of (u,v,k)
generator of edges
"""
for u,v,k,d in G.edges(keys = True, data = True):
nmatches = 0
for key, values in properties.items():
if key in d.keys() and is_in(d[key], values):
nmatches = nmatches + 1
else:
break
if nmatches == len(properties.keys()):
yield (u,v,k)
###
###
def edges_with_properties(G, properties, match_by = PropertiesFilter.all):
"""
Get edges with given properties
Parameters
---------
G : nx.MultiDiGraph
a (multidi)graph
properties : dict(str : set)
properties of edges to filter by
match_by : int
One of const.FILTER_PROPERTIES.
Returns
-------
generator of (u,v,k)
generator of edges
"""
if match_by == PropertiesFilter.at_least_one:
return edges_with_at_least_one_property(G, properties)
elif match_by == PropertiesFilter.all:
return edges_with_all_properties(G, properties)
else:
raise ValueError("Invalid 'match_by' value. Pick one of PropertiesFilter.{{{}}}.".format(PropertiesFilter.__order__))
###
###
def unit_vector(v):
"""
Calculate the unit vector of an array or bunch of arrays.
Parameters
---------
v : np.ndarray
vector(s)
Returns
-------
np.ndarray
unit vector(s) of v
"""
norm = np.linalg.norm(v, axis = 1)
return v / np.reshape(norm, (len(v), 1))
###
###
def dot2d(v1, v2, method = "einsum"):
"""
Vectorised dot product for 2d vectors.
Parameters
---------
v1 : np.ndarray
vectors on the left side of the dot product
v2 : np.ndarray
vectors on the right side of the dot product
method: string
method used to compute the dot product between each pair of members in v1,v2. One of {'einsum', 'loop'}
Returns
-------
np.ndarray
result of the dot products
"""
if np.shape(v1) != np.shape(v2):
raise ValueError("Input vectors don't have the same shape: {}, {}".format(np.shape(v1), np.shape(v2)))
if method == "einsum":
return np.einsum("ij, ij -> i", v1, v2)
elif method == "loop":
return np.array([i.dot(j)
for i,j in zip(v1,v2)])
else:
raise ValueError("No such method for computing the dot product.")
###
###
def angle_between(v1, v2):
"""
Calculate the acute angle, in degrees, between two vectors. Vectorised for an array of vectors.
Parameters
---------
v1 : np.ndarray
first vectors of each pair of vectors
v2 : np.ndarray
second vectors of each pair of vectors
Returns
-------
np.ndarray
acute angles between each pair of vectors
"""
if np.shape(v1) != np.shape(v2):
raise ValueError("Input vectors don't have the same shape: {}, {}".format(np.shape(v1), np.shape(v2)))
v1_u = unit_vector(v1)
v2_u = unit_vector(v2)
vdots = dot2d(v1_u, v2_u)
clipped = np.clip(vdots, -1.0, 1.0)
angles = np.rad2deg(np.arccos(clipped))
reduce_angles = np.vectorize(
lambda x: 180 - x if x > 90 else x)
return reduce_angles(angles)
###
###
def flatten_dict(dict_, parent_key='', sep='_', inherit_parent_key = True):
"""
Flatten a dict of objects which may contain other dicts as elements.
Parameters
---------
dict_ : object
dict
Borrowed from https://stackoverflow.com/a/6027615
Returns
-------
generator
"""
items = []
for k, v in dict_.items():
new_key = parent_key + sep + k if parent_key and inherit_parent_key else k
if isinstance(v, collections.MutableMapping):
items.extend(flatten_dict(v,
parent_key = new_key,
sep = sep,
inherit_parent_key = inherit_parent_key)\
.items())
else:
items.append((new_key, v))
return dict(items)
###
###
def chunks(l, n):
"""
Yield successive n-sized chunks from l.
Parameters
----------
l : list
n : size of chunk
Returns
-------
generator
"""
for i in range(0, len(l), n):
yield l[i:i + n]
###
###
def as_undirected(edges):
"""
Represent a directed edge as undirected.
Parameters
---------
edges : array-like of Edge
array of directed edges (u,v,k)
Returns
-------
list of tuples
"""
edge_set = frozenset(
[ frozenset((edge[0], edge[1]))
for edge in edges])
return [ tuple(edge) for edge in edge_set ]
|
<filename>Juego(por nombrar)/src/clases/Sprites.py<gh_stars>1-10
import pygame as pg
from pygame import sprite
from pygame.locals import *
import glob
from itertools import cycle
class Character_Sprite(sprite.Sprite):
def __init__(self, name, speed):
super().__init__()
self.animation_list = self.get_animation_list(name)
self.animations = {"WALK_D":cycle(self.animation_list["WALK_D"]), #diccionario de animaciones
"WALK_U":cycle(self.animation_list["WALK_U"]),
"WALK_L":cycle(self.animation_list["WALK_L"]),
"WALK_R":cycle(self.animation_list["WALK_R"])
}
self.__image = next(self.animations["WALK_D"])
self.walk_frame = 1 #contador de caminata, en el futuro cambiar para que el jugador no se quede pegado en una sola animación incomoda
self.going = "WALK_D"
imageRect = self.image.get_rect()
self.rect = imageRect
shape = pg.surfarray.array3d(self.__image).shape
w, h , c = shape
self.corrections = (0,h/2) #correcciones necesarias para la buena referencia de las coordenadas del personaje
self.x = 0
self.y = 0
self.speed = speed
self.radius = self.rect.width/8
def set_position(self, CAMERA_X, CAMERA_Y):
self.rect.x, self.rect.y = self.x-CAMERA_X, self.y-CAMERA_Y
@property
def image(self): #propiedad de la imagen. cambia con un ciclo dependiendo el walk_frame
if self.__image not in self.animation_list[self.going]:
self.__image = next(self.animations[self.going])
self.walk_frame = 1
else:
if self.walk_frame % 6 == 0: #Ajustar el número con FPS
self.__image = next(self.animations[self.going])
self.walk_frame += 1
else:
pass
return self.__image
def get_animation_list(self,name):
animation_list = {"WALK_U":[pg.image.load(ld_img).convert() for
ld_img in glob.glob("animations\\{}\\walk_u*".format(name))],
"WALK_D":[pg.image.load(ld_img).convert() for
ld_img in glob.glob("animations\\{}\\walk_d*".format(name))],
"WALK_L":[pg.image.load(ld_img).convert() for
ld_img in glob.glob("animations\\{}\\walk_l*".format(name))],
"WALK_R":[pg.image.load(ld_img).convert() for
ld_img in glob.glob("animations\\{}\\walk_r*".format(name))]
}
for key in animation_list: #Hace el blanco transparente
for a in animation_list[key]:
a.set_colorkey((0,0,0))
return animation_list
def move(self): #implementar
pass
def update(self, CAMERA_X, CAMERA_Y):
self.move()
self.set_position(CAMERA_X, CAMERA_Y)
class Item_Sprite(sprite.Sprite): #Rehacer i think, yo también pienso lo mismo
def __init__(self, img_pth, parent, speed = 30):
super().__init__()
self.speed = speed
self.animation_list = {"BALL_MOVE":[pg.image.load(ld_img) for ld_img in glob.glob("animations\\{}\\**".format(img_pth))]}
self.animations = {"BALL_MOVE":cycle(self.animation_list["BALL_MOVE"])}
self.__image = next(self.animations["BALL_MOVE"])
self.parent = parent
self.__animation_count = 0
self.rect = self.image.get_rect()
@property
def animation_count(self):
self.__animation_count += self.speed
return self.__animation_count
@property
def image(self):
if self.animation_count % 50 == 0 and self.animation_count != 0:
self.__animation_count = 0
self.__image = next(self.animations["BALL_MOVE"])
return self.__image
else:
return self.__image
def update(self, CAMERA_X, CAMERA_Y):
self.rect.x = self.parent.rect.x - 20
self.rect.y = self.parent.rect.y - 20
|
<filename>src/metrics/hota.py
from multiprocessing import freeze_support
import sys
import os
import argparse
from TrackEval import trackeval
import pandas as pd
os.chdir(os.path.join("..",".."))
freeze_support()
default_eval_config = trackeval.Evaluator.get_default_eval_config()
default_eval_config['DISPLAY_LESS_PROGRESS'] = False
default_dataset_config = trackeval.datasets.MotChallenge2DBox.get_default_dataset_config()
default_dataset_config["SEQ_INFO"] = None
default_dataset_config["BENCHMARK"] = 'MOT20'
default_dataset_config["DO_PREPROC"]=False
default_metrics_config = {'METRICS': ['HOTA', 'CLEAR', 'Identity'], 'THRESHOLD': 0.5}
config = {**default_eval_config, **default_dataset_config, **default_metrics_config} # Merge default configs
config['NUM_PARALLEL_CORES'] = 16
config['USE_PARALLEL']=True
parser = argparse.ArgumentParser()
for setting in config.keys():
if type(config[setting]) == list or type(config[setting]) == type(None):
parser.add_argument("--" + setting, nargs='+')
else:
parser.add_argument("--" + setting)
args = parser.parse_args().__dict__
for setting in args.keys():
if args[setting] is not None:
if type(config[setting]) == type(True):
if args[setting] == 'True':
x = True
elif args[setting] == 'False':
x = False
else:
raise Exception('Command line parameter ' + setting + 'must be True or False')
elif type(config[setting]) == type(1):
x = int(args[setting])
elif type(args[setting]) == type(None):
x = None
elif setting == 'SEQ_INFO':
x = dict(zip(args[setting], [None] * len(args[setting])))
else:
x = args[setting]
config[setting] = x
eval_config = {k: v for k, v in config.items() if k in default_eval_config.keys()}
dataset_config = {k: v for k, v in config.items() if k in default_dataset_config.keys()}
metrics_config = {k: v for k, v in config.items() if k in default_metrics_config.keys()}
evaluator = trackeval.Evaluator(eval_config)
dataset_list = [trackeval.datasets.MotChallenge2DBox(dataset_config)]
metrics_list = []
for metric in [trackeval.metrics.HOTA, trackeval.metrics.CLEAR, trackeval.metrics.Identity, trackeval.metrics.VACE]:
if metric.get_name() in metrics_config['METRICS']:
metrics_list.append(metric(metrics_config))
if len(metrics_list) == 0:
raise Exception('No metrics selected for evaluation')
output_res, output_msg = evaluator.evaluate(dataset_list, metrics_list)
trackers_to_eval = ['iou','average','weighted_by_iou']
tracker_eval = {}
for tracker in trackers_to_eval:
hota = output_res['MotChallenge2DBox'][tracker]['COMBINED_SEQ']['pedestrian']['HOTA']
mota = output_res['MotChallenge2DBox'][tracker]['COMBINED_SEQ']['pedestrian']['CLEAR']
identity = output_res['MotChallenge2DBox'][tracker]['COMBINED_SEQ']['pedestrian']['Identity']
h = pd.DataFrame(hota)[['HOTA','DetA','AssA','LocA','RHOTA']].mean().to_dict()
m = pd.Series(mota)[['MOTA','MOTP','CLR_FP','CLR_FN']].to_dict()
i = pd.Series(identity)[['IDF1','IDP','IDR']].to_dict()
h.update(m)
h.update(i)
tracker_eval[tracker] =h
eval_df = pd.DataFrame(tracker_eval).T.round(3)
eval_hota = eval_df[['HOTA','DetA','AssA','LocA','RHOTA']]
eval_mota = eval_df[['MOTA','MOTP','CLR_FP','CLR_FP','IDF1','IDR']]
eval_hota.to_csv(os.path.join("results","hota.csv"))
eval_mota.to_csv(os.path.join("results","mota.csv"))
|
import sqlite3 as sql
from datetime import timezone
from flask_sqlalchemy import SQLAlchemy
from os import path
from flask import Flask
from flask_login import LoginManager, login_manager, UserMixin
from sqlalchemy import Table, Column, Integer, ForeignKey, engine, create_engine, inspect
from sqlalchemy.sql import func
from sqlalchemy.orm import relationship, backref, session, sessionmaker
from sqlalchemy.ext.declarative import declarative_base
db = SQLAlchemy() # Inicializando SALAlchemy
DB_NAME = 'database.db' # Asignando el nombre de la base de daots
Session = sessionmaker(bind=db)
session = Session()
def crear_app():
app = Flask(__name__)
app.config['SECRET_KEY'] = '<KEY>'
app.config['UPLOAD_FOLDER']="static/images"
app.config['SQLALCHEMY_DATABASE_URI'] = f'sqlite:///{DB_NAME}'
db.init_app(app)
from .views import views
from .auth import auth
app.register_blueprint(views, ur_lprefix='/')
app.register_blueprint(auth, url_prefix='/')
# from .modelos import User, Role, Post, Comment
create_database(app)
login_manager = LoginManager()
login_manager.login_view = 'auth.login'
login_manager.init_app(app)
@login_manager.user_loader
def load_user(id):
return User.query.get(int(id))
return app
def create_database(app):
if not path.exists('acinoyx_jubatus/' + DB_NAME):
db.create_all(app=app)
print('Created Database!')
# Aqui se definen los modelos para las tablas en la base de datos
Base = declarative_base()
class Baraja(db.Model, UserMixin, Base):
__tablename__ = 'baraja'
id = db.Column(db.Integer, primary_key=True)
baraja_id1 = db.Column(db.Integer, db.ForeignKey('baraja1.id'))
baraja_id2 = db.Column(db.Integer, db.ForeignKey('baraja2.id'))
baraja_id3 = db.Column(db.Integer, db.ForeignKey('baraja3.id'))
baraja_id4 = db.Column(db.Integer, db.ForeignKey('baraja4.id'))
class Baraja1(db.Model, UserMixin, Base):
__tablename__ = 'baraja1'
id = db.Column(db.Integer, primary_key=True)
nombre = db.Column(db.String(150))
carta_id = db.Column(db.Integer, db.ForeignKey('carta.id'))
user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
baraja_id = db.Column(db.Integer, db.ForeignKey('baraja.id'))
class Baraja2(db.Model, UserMixin, Base):
__tablename__ = 'baraja2'
id = db.Column(db.Integer, primary_key=True)
nombre = db.Column(db.String(150))
carta_id = db.Column(db.Integer, db.ForeignKey('carta.id'))
user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
baraja_id = db.Column(db.Integer, db.ForeignKey('baraja.id'))
class Baraja3(db.Model, UserMixin, Base):
__tablename__ = 'baraja3'
id = db.Column(db.Integer, primary_key=True)
nombre = db.Column(db.String(150))
carta_id = db.Column(db.Integer, db.ForeignKey('carta.id'))
user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
baraja_id = db.Column(db.Integer, db.ForeignKey('baraja.id'))
class Baraja4(db.Model, UserMixin, Base):
__tablename__ = 'baraja4'
id = db.Column(db.Integer, primary_key=True)
nombre = db.Column(db.String(150))
carta_id = db.Column(db.Integer, db.ForeignKey('carta.id'))
user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
baraja_id = db.Column(db.Integer, db.ForeignKey('baraja.id'))
class Carta(db.Model, UserMixin, Base):
__tablename__ = 'carta'
id = db.Column(db.Integer, primary_key=True)
frente = db.Column(db.String(10000))
detras = db.Column(db.String(10000))
dificultad = db.Column(db.Integer)
date = db.Column(db.DateTime(timezone=True), default=func.now())
baraja_id = db.Column(db.Integer, db.ForeignKey('baraja.id'))
user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
baraja1 = db.relationship('Baraja1')
baraja2 = db.relationship('Baraja2')
baraja3 = db.relationship('Baraja3')
baraja4 = db.relationship('Baraja4')
class Note(db.Model, UserMixin, Base): #Logic for the notes in the website (it's main purpose)
__tablename__ = 'note'
id = db.Column(db.Integer, primary_key=True)
data = db.Column(db.String(10000))
date = db.Column(db.DateTime(timezone=True), default=func.now())
user_id = db.Column(db.Integer, db.ForeignKey('user.id')) #This user_id will be directly related to id integer attribute from the class User
class User(db.Model, UserMixin, Base):
__tablename__ = 'user'
id = db.Column(db.Integer, primary_key=True)
email = db.Column(db.String(150), unique=True) # Unique = True hace que no sea posible que dos usuarios tengan el mismo correo
password = db.Column(db.String(150))
user_name = db.Column(db.String(150))
cartas = db.relationship('Carta')
notes = db.relationship('Note')
baraja1 = db.relationship('Baraja1')
baraja2 = db.relationship('Baraja2')
baraja3 = db.relationship('Baraja3')
baraja4 = db.relationship('Baraja4')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.