code stringlengths 22 1.05M | apis listlengths 1 3.31k | extract_api stringlengths 75 3.25M |
|---|---|---|
# -*- coding: utf-8 -*-
"""
Solace
======
*a multilingual support system*
Solace is a multilingual support system developed at Plurk
for end user support. The application design is heavily
influenced by bulletin boards like phpBB and the new
stackoverflow programming community site.
For more information consult the `README` file or have a
look at the `website <http://opensource.plurk.com/solace/>`_.
"""
# we require setuptools because of dependencies and testing.
# we may provide a distutils fallback later.
from setuptools import setup
extra = {}
try:
import babel
except ImportError:
pass
else:
extra['message_extractors'] = {
'solace': [
('**.py', 'python', None),
('**/templates/**', 'jinja2', None),
('**.js', 'javascript', None)
]
}
try:
from solace import scripts
except ImportError:
pass
else:
extra['cmdclass'] = {
'runserver': scripts.RunserverCommand,
'initdb': scripts.InitDatabaseCommand,
'reset': scripts.ResetDatabaseCommand,
'make_testdata': scripts.MakeTestDataCommand,
'compile_catalog': scripts.CompileCatalogExCommand,
'compress_deps': scripts.CompressDependenciesCommand
}
setup(
name='Solace',
version='0.2',
license='BSD',
author='<NAME>',
author_email='<EMAIL>',
description='Multilangual User Support Platform',
long_description=__doc__,
packages=['solace', 'solace.views', 'solace.i18n', 'solace.utils'],
zip_safe=False,
platforms='any',
test_suite='solace.tests.suite',
install_requires=[
'Werkzeug>=0.5.1',
'Jinja2>=2.4',
'Babel',
'SQLAlchemy>=0.5.5',
'creoleparser',
'simplejson',
'translitcodec'
],
tests_require=[
'lxml',
'html5lib'
], **extra
)
| [
"setuptools.setup"
] | [((1275, 1780), 'setuptools.setup', 'setup', ([], {'name': '"""Solace"""', 'version': '"""0.2"""', 'license': '"""BSD"""', 'author': '"""<NAME>"""', 'author_email': '"""<EMAIL>"""', 'description': '"""Multilangual User Support Platform"""', 'long_description': '__doc__', 'packages': "['solace', 'solace.views', 'solace.i18n', 'solace.utils']", 'zip_safe': '(False)', 'platforms': '"""any"""', 'test_suite': '"""solace.tests.suite"""', 'install_requires': "['Werkzeug>=0.5.1', 'Jinja2>=2.4', 'Babel', 'SQLAlchemy>=0.5.5',\n 'creoleparser', 'simplejson', 'translitcodec']", 'tests_require': "['lxml', 'html5lib']"}), "(name='Solace', version='0.2', license='BSD', author='<NAME>',\n author_email='<EMAIL>', description=\n 'Multilangual User Support Platform', long_description=__doc__,\n packages=['solace', 'solace.views', 'solace.i18n', 'solace.utils'],\n zip_safe=False, platforms='any', test_suite='solace.tests.suite',\n install_requires=['Werkzeug>=0.5.1', 'Jinja2>=2.4', 'Babel',\n 'SQLAlchemy>=0.5.5', 'creoleparser', 'simplejson', 'translitcodec'],\n tests_require=['lxml', 'html5lib'], **extra)\n", (1280, 1780), False, 'from setuptools import setup\n')] |
#!/usr/bin/env python3
import discord
import config
import util
from functools import reduce
from handler import Handler
class ListWordsHandler(Handler):
name = "listwords"
async def message_handler(self, message, jail, bonkbot):
print("Starting listwords handler")
if self.cf.get("list_words_trigger_phrase") in message.content.lower() and util.is_mentioned(message, bonkbot):
await message.channel.send(util.list_trigger_words())
return True
return False
| [
"util.is_mentioned",
"util.list_trigger_words"
] | [((367, 402), 'util.is_mentioned', 'util.is_mentioned', (['message', 'bonkbot'], {}), '(message, bonkbot)\n', (384, 402), False, 'import util\n'), ((443, 468), 'util.list_trigger_words', 'util.list_trigger_words', ([], {}), '()\n', (466, 468), False, 'import util\n')] |
#! python3
from sys import argv
from pathlib import Path
from re import compile
from enum import Enum
from inflection import camelize
RE_CLASS = compile(r'^use [a-zA-Z_][a-zA-Z0-9_\\]*;$')
RE_PARAM = compile(r'^[a-zA-Z_][a-zA-Z0-9_]* \$[a-zA-Z_][a-zA-Z0-9_]*,?$')
PARAM_BEGIN = r'function ('
PARAM_END = r') {'
PARAM_EMPTY = r'function () {'
HEAD_END = r'?>'
FOOT_BEGIN = r'<?php'
FOOT_END = r'};'
class Stage(Enum):
CLASS = 1
PARAMETER = 2
TEMPLATE = 3
def partition(file):
classes = [
r'use Exception;',
r'use lzx\html\Template;',
]
parameters = []
template = []
stage = Stage.CLASS
for line_no, line in enumerate(file, 1):
if line_no == 1:
continue
line = line.strip()
if stage == Stage.CLASS:
if not line:
continue
if RE_CLASS.fullmatch(line):
classes.append(line)
elif PARAM_BEGIN == line or PARAM_EMPTY == line:
stage = Stage.PARAMETER
continue
else:
raise Exception(f'Error: line {line_no}: {line}')
elif stage == Stage.PARAMETER:
if RE_PARAM.fullmatch(line):
parameters.append(line.rstrip(','))
elif PARAM_END == line:
continue
elif HEAD_END == line:
stage = Stage.TEMPLATE
continue
else:
raise Exception(f'Error: line {line_no}: {line}')
elif stage == Stage.TEMPLATE:
template.append(line)
if len(template) < 2 or template[-2] != FOOT_BEGIN or template[-1] != FOOT_END:
raise Exception(
f'Error: template should end with {FOOT_BEGIN}' + "\n" + FOOT_END)
template = template[:-2]
classes = list(set(classes))
classes.sort()
return (classes, parameters, template)
def func(parameter):
var_type = parameter.split(' ')[0]
var_name = parameter.split('$')[-1]
return '''
public function get''' + camelize(var_name) + f'(): ?{var_type}' + '''
{
if (array_key_exists(''' + f"'{var_name}'" + ''', $this->data)) {
return $this->data''' + f"['{var_name}'];" + '''
}
return null;
}
public function set''' + camelize(var_name) + f'({parameter}): self' + '''
{
if ($this->cache) {
throw new Exception(self::FINALIZED);
}
$this->data''' + f"['{var_name}'] = ${var_name};" + '''
return $this;
}
'''
def tpl_str(lines):
for i in range(len(lines)):
if lines[i] == r'<?php':
lines[i] = r'<?php '
elif lines[i] == r'?>':
lines[i] = r' ?>'
out = ''.join(lines)
# preserve newline for TEXT (non-HTML) template
if out.replace('?>', '').find('>') < 0:
out = "\n".join(lines)
return out
def php(namespace, cls_name, classes, parameters, template):
return r'''<?php
declare(strict_types=1);
/**
* DO NOT EDIT
* generated by script/build_template.py
*/
namespace ''' + namespace + ''';
''' + "\n".join(classes) + r'''
class ''' + cls_name + r''' extends Template
{
public function __construct()
{
}
''' + ''.join(func(p) for p in parameters) + r'''
public function __toString()
{
if (!$this->cache) {
foreach ($this->onBeforeRender as $callback) {
$callback($this);
}
extract($this->data);
ob_start();
?>
''' + tpl_str(template) + r'''
<?php
$output = ob_get_clean();
$this->cache = trim($output);
}
return $this->cache;
}
}
'''
if __name__ == '__main__':
for input_file in (Path(__file__).parent.parent / 'server' / 'theme' / 'roselife').glob('**/*.tpl.php'):
cls_name = camelize(input_file.name.replace(
'.tpl.php', '').replace('.', '_'))
p = input_file.absolute().with_name(f'{cls_name}.php').parts
i = p.index('theme')
output_file = Path(*p[:i]).joinpath('gen', *p[i:])
output_file.parent.mkdir(parents=True, exist_ok=True)
p = output_file.parent.parts
i = p.index('theme')
namespace = 'site\\gen\\' + '\\'.join(p[i:])
php_current = ''
if output_file.exists():
with output_file.open() as output:
php_current = output.read()
with input_file.open() as input:
classes, parameters, template = partition(input)
php_new = php(namespace, cls_name, classes, parameters, template)
if php_new != php_current:
with output_file.open('w') as output:
output.write(php_new)
print(f'updated: {input_file}')
| [
"inflection.camelize",
"pathlib.Path",
"re.compile"
] | [((148, 192), 're.compile', 'compile', (['"""^use [a-zA-Z_][a-zA-Z0-9_\\\\\\\\]*;$"""'], {}), "('^use [a-zA-Z_][a-zA-Z0-9_\\\\\\\\]*;$')\n", (155, 192), False, 'from re import compile\n'), ((203, 266), 're.compile', 'compile', (['"""^[a-zA-Z_][a-zA-Z0-9_]* \\\\$[a-zA-Z_][a-zA-Z0-9_]*,?$"""'], {}), "('^[a-zA-Z_][a-zA-Z0-9_]* \\\\$[a-zA-Z_][a-zA-Z0-9_]*,?$')\n", (210, 266), False, 'from re import compile\n'), ((4041, 4053), 'pathlib.Path', 'Path', (['*p[:i]'], {}), '(*p[:i])\n', (4045, 4053), False, 'from pathlib import Path\n'), ((2298, 2316), 'inflection.camelize', 'camelize', (['var_name'], {}), '(var_name)\n', (2306, 2316), False, 'from inflection import camelize\n'), ((3734, 3748), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (3738, 3748), False, 'from pathlib import Path\n'), ((2044, 2062), 'inflection.camelize', 'camelize', (['var_name'], {}), '(var_name)\n', (2052, 2062), False, 'from inflection import camelize\n')] |
import os
import requests
from bouncer import Bouncer
from flask import Flask, url_for, redirect, \
render_template, session, request, Response, \
flash, get_flashed_messages, jsonify
from flask_login import LoginManager, login_required, login_user, \
logout_user, current_user, UserMixin
from oauthlib.oauth2 import OAuth2Error
from elasticsearch_dsl import Search
from requests_oauthlib import OAuth2Session
from requests.exceptions import HTTPError
from oauth2client.client import verify_id_token
from oauth2client.crypt import AppIdentityError
from urllib import urlencode
import urllib2
from decode_cookie import decodeFlaskCookie
from utils import redact_email, decrypt, encrypt, new_iv
import logging
basedir = os.path.abspath(os.path.dirname(__file__))
"""App Configuration"""
class Auth:
"""Google Project Credentials"""
CLIENT_ID = os.environ['GOOGLE_CLIENT_ID']
CLIENT_SECRET = os.environ['GOOGLE_CLIENT_SECRET']
DCC_DASHBOARD_HOST = 'localhost'
DCC_DASHBOARD_PORT = '5000'
DCC_DASHBOARD_PROTOCOL = 'https'
if 'DCC_DASHBOARD_HOST' in os.environ.keys():
DCC_DASHBOARD_HOST = os.environ['DCC_DASHBOARD_HOST']
if 'DCC_DASHBOARD_PORT' in os.environ.keys():
DCC_DASHBOARD_PORT = os.environ['DCC_DASHBOARD_PORT']
if 'DCC_DASHBOARD_PROTOCOL' in os.environ.keys():
DCC_DASHBOARD_PROTOCOL = os.environ['DCC_DASHBOARD_PROTOCOL']
REDIRECT_URI = DCC_DASHBOARD_PROTOCOL+'://'+DCC_DASHBOARD_HOST+'/gCallback'
AUTH_URI = 'https://accounts.google.com/o/oauth2/auth'
TOKEN_URI = 'https://accounts.google.com/o/oauth2/token'
USER_INFO = 'https://www.googleapis.com/userinfo/v2/me'
REVOKE_TOKEN = 'https://accounts.google.com/o/oauth2/revoke'
SCOPE = ['https://www.googleapis.com/auth/userinfo.profile',
'https://www.googleapis.com/auth/userinfo.email']
class Config:
"""Base config"""
APP_NAME = "Test Google Login"
SECRET_KEY = os.environ.get("SECRET_KEY") or "somethingsecret"
GOOGLE_SITE_VERIFICATION_CODE = os.environ.get("GOOGLE_SITE_VERIFICATION_CODE") or ""
# Make cookies secure so that the tokens stored in them are safe and only travel over https
SESSION_COOKIE_SECURE = True
REMEMBER_COOKIE_SECURE = True
class DevConfig(Config):
"""Dev config"""
DEBUG = True
class ProdConfig(Config):
"""Production config"""
DEBUG = False
config = {
"dev": DevConfig,
"prod": ProdConfig,
"default": DevConfig
}
"""APP creation and configuration"""
def set_prod_logging_level(logger, level):
for handler in logger.handlers:
if handler.__class__.__name__ == 'ProductionHandler':
handler.level = level
if not logger.isEnabledFor(level):
logger.setLevel(level)
"""APP creation and configuration"""
app = Flask(__name__)
app.config.from_object(config['prod'])
set_prod_logging_level(app.logger, logging.INFO)
login_manager = LoginManager(app)
login_manager.login_view = "login"
login_manager.session_protection = "strong"
# make a global bouncer instance to avoid needless re-instantiation
if os.getenv('EMAIL_WHITELIST_NAME'):
whitelist_checker = Bouncer(os.getenv('EMAIL_WHITELIST_NAME'))
else:
whitelist_checker = None
class User(UserMixin):
def __init__(self, user=None, name=None, picture=None):
"""
Pulls the user's info from the session. We use @property to keep the
session as the one source of truth, but allow access and setting of
user properties here.
"""
if user is not None:
session['email'] = user
if name is not None:
session['name'] = name
if picture is not None:
session['avatar'] = picture
# self._created_at = session.get('created_at', datetime.datetime.utcnow())
@property
def email(self):
return session.get('email', None)
@email.setter
def email(self, value):
session['email'] = value
@property
def name(self):
return session.get('name', None)
@name.setter
def name(self, value):
session['name'] = value
@property
def picture(self):
return session.get('avatar', None)
@picture.setter
def picture(self, value):
session['avatar'] = value
@property
def is_active(self):
return self.email is not None
@property
def is_authenticated(self):
return self.refresh_token is not None
@property
def is_anonymous(self):
return self.email is None
def get_id(self):
return self.email
@property
def access_token(self):
encrypted_token = session.get('access_token', None)
iv = session['access_iv']
return decrypt(encrypted_token, iv)
@access_token.setter
def access_token(self, value):
iv = new_iv()
session['access_iv'] = iv
session['access_token'] = encrypt(value, iv)
@property
def refresh_token(self):
encrypted_token = session.get('refresh_token', None)
iv = session['refresh_iv']
return decrypt(encrypted_token, iv)
@refresh_token.setter
def refresh_token(self, value):
# store the initialization vector in the session. It doesn't need to be secure
iv = new_iv()
session['refresh_iv'] = iv
session['refresh_token'] = encrypt(value, iv)
def logout(self):
"""Clean up all the stuff we left in the session cookie"""
# as per google's docs "The token can be an access token or a refresh token.
# If the token is an access token and it has a corresponding refresh token,
# the refresh token will also be revoked."
if session.get('access_token'):
res = requests.post(Auth.REVOKE_TOKEN, params={'token': session['access_token']},
headers={'content-type': 'application/x-www-form-urlencoded'})
if res.status_code != 200:
print('Failed to revoke tokens. Expected 200 response, received '
'{} with message: {}'.format(res.status_code, res.text))
for attr in 'email', 'name', 'avatar', 'access_token', 'refresh_token':
try:
del session[attr]
except KeyError:
print('Could not clear {} from session'.format(attr))
pass
@login_manager.user_loader
def load_user(user_id):
return User()
""" OAuth Session creation """
def get_google_auth(state=None, token=None):
if token:
return OAuth2Session(Auth.CLIENT_ID, token=token)
if state:
return OAuth2Session(
Auth.CLIENT_ID,
state=state,
redirect_uri=Auth.REDIRECT_URI)
oauth = OAuth2Session(
Auth.CLIENT_ID,
redirect_uri=Auth.REDIRECT_URI,
scope=Auth.SCOPE)
return oauth
def query_es_rna_seq(es_object, index, query_params, cardinality):
"""Returns the cardinality based from the inputs
GET burn_idx/_search
{
"query": {
"bool": {
"must": [
{
"regexp": {
"experimentalStrategy": "[rR][nN][aA][-][Ss][Ee][Qq]"
}
},{
"regexp":{
"software": "[Ss]pinnaker"
}
}
]
}
},
"aggs": {
"filtered_jobs":{
"cardinality": {
"field": "repoDataBundleId"
}
}
}
}
es_object -- the es object to query against
index -- the name of the index to query on
query_params -- tuple with form (query type, field, value)
cardinality -- field to get the cardinality from
"""
# Create search obejct
s = Search(using=es_object, index=index)
# Add the queries
s = reduce(lambda s, x: s.query(x[0], **{x[1]: x[2]}), query_params, s)
# Add the aggregates
s.aggs.metric("filtered_jobs", 'cardinality', field=cardinality,
precision_threshold="40000")
# Execute the query
response = s.execute()
return response.aggregations.filtered_jobs.value
@app.route('/')
def index():
"""
Render the main page.
"""
return html_rend('index')
def parse_token():
"""
Parses the Authorization token from the request header
:return: the bearer and token string
"""
authorization_header = request.headers.get("Authorization", None)
assert authorization_header is not None, "No Authorization header in the request"
parts = authorization_header.split()
# Return the bearer and token string
return parts[0], parts[1]
def new_google_access_token():
"""
Tries to get new access token.
If refresh fails an OAuth2Error will be raised
"""
refresh_token = current_user.refresh_token
oauth = get_google_auth()
extra = {
'client_id': Auth.CLIENT_ID,
'client_secret': Auth.CLIENT_SECRET,
}
# this call may throw an OAuth2Error
resp = oauth.refresh_token(Auth.TOKEN_URI, refresh_token=refresh_token, **extra)
current_user.access_token = resp['access_token']
return resp['access_token']
def make_request(url, headers):
try:
req = urllib2.Request(url, headers=headers)
handler = urllib2.urlopen(req)
content_type = handler.headers['content-type']
response = Response(handler.read(), mimetype=content_type)
content_encoding = 'content-encoding'
if content_encoding in handler.headers.keys():
response.headers[content_encoding] = handler.headers[
content_encoding]
return response
except urllib2.HTTPError as e:
return e.message, e.code
@app.route('/check_session/<cookie>')
def check_session(cookie):
if not request.headers.get("Authorization", None):
return jsonify({"error": "No Authorization header in the request"})
else:
# Make sure the auth token is the right one
try:
bearer, auth_token = parse_token()
assert bearer == "Bearer", "Authorization must start with Bearer"
assert auth_token == os.getenv("LOG_IN_TOKEN", '<PASSWORD>!')
except AssertionError as e:
response = {
'error': e.message
}
return jsonify(response)
# we have to decode the cookie manually b/c we're not getting it automatically through
# flask, rather it has to be passed to and fro with node and client and dashboard
decoded_cookie = decodeFlaskCookie(os.getenv('SECRET_KEY', 'somethingsecret'), cookie)
email = decoded_cookie['email']
if email is None:
response = {
'error': 'No user is stored in the session. The user is not '
'logged in.'
}
else:
response = {
'email': email,
'name': decoded_cookie['name'],
'avatar': decoded_cookie['avatar']
}
return jsonify(response)
def _get_user_info_from_token(token=None):
"""
Try and get the user's info. By default the access token in the session is used.
returns the response object
"""
google = get_google_auth(token={
'access_token': current_user.access_token if token is None else token})
return google.get(Auth.USER_INFO)
def get_user_info(token=None):
"""
Get user's info, retry with refreshed token if failed, and raise ValueError
or OAuth2Error if failure
If access token is provided, use that first
"""
resp = _get_user_info_from_token(token=token)
if 400 <= resp.status_code < 500:
if token:
raise ValueError('The provided token was not accepted')
# token expired, try once more
try:
new_google_access_token()
except OAuth2Error:
# erase old tokens if they're broken / expired
app.logger.warning('Could not refresh access token')
session.pop('access_token')
session.pop('refresh_token')
raise
resp = _get_user_info_from_token()
# If there is a 5xx error, or some unexpected 4xx we will return the message but
# leave the token's intact b/c they're not necessarily to blame for the error.
if resp.status_code != 200:
raise ValueError(resp.text)
return resp.json()
@app.route('/me')
def me():
"""
returns information about the user making the request.
If there are any problems getting the user's info, refreshing the token, etc
then just return the anonymous user.
"""
# Do we have an access token?
if current_user.is_anonymous:
app.logger.debug('Request %s by user anonymous', request.path)
return jsonify({'name': 'anonymous'})
try:
user_data = get_user_info()
except (ValueError, OAuth2Error):
app.logger.error('Request path %s by unknown user', request.path)
return jsonify({'name': 'anonymous'})
output = dict((k, user_data[k]) for k in ('name', 'email'))
output['avatar'] = user_data['picture']
app.logger.info('Request path %s by user with email %s', request.path, user_data['email'])
return jsonify(output)
@app.route('/authorization')
def authorization():
"""
This endpoint determines if the caller is authorized of not.
If there is a bearer token, we try and use that. Otherwise we use
the access token in the session. If the token fails, then try and
refresh.
If we get a working token, then ping google for user info, get
their email and check it against bouncer.
The user needs to be logged in with Google in order to be
authorized. The method returns the following HTTP status
codes:
204 user is authorized regardless of whether user
is on the whitelist or not
401 user info is not available
403 user is not authorized
"""
try:
# parsing succeeds if there is an auth header
bearer, auth_token = parse_token()
except AssertionError:
auth_token = None
else:
if bearer != "Bearer":
return "Authorization must start with Bearer", 401
if auth_token is None and current_user.is_anonymous:
return "No token provided", 401
# use access token in session
try:
user_data = get_user_info(auth_token)
except ValueError as e:
return e.message, 401
except OAuth2Error as e:
return 'Failed to get user info: ' + e.message, 401
# Now that we have the user data we can verify the email
if whitelist_checker is None:
app.logger.info(
'Request path %s. No whitelist; User with email %s is logged in',
request.path, user_data['email'])
return '', 204
elif whitelist_checker.is_authorized(user_data['email']):
app.logger.info(
'Request path %s. User with email %s is authorized',
request.path, user_data['email'])
return '', 204
else:
app.logger.info(
'Request path %s. User with email %s is not authorized',
request.path, user_data['email'])
return '', 403
@app.route('/<name>.html')
def html_rend(name):
"""
Render templates based on their name.
Handle the templates differently depending
on its name.
"""
data = os.environ['DCC_DASHBOARD_SERVICE']
coreClientVersion = os.getenv('DCC_CORE_CLIENT_VERSION', '1.1.0')
if name == 'index':
whitelist_validation_required = bool(os.getenv('EMAIL_WHITELIST_NAME'))
contact_email = os.getenv('CONTACT_EMAIL', '')
return render_template(name + '.html',
whitelist_validation_required=whitelist_validation_required,
contact_email=contact_email)
if name == 'unauthorized':
return render_template(name + '.html')
return render_template(name + '.html')
@app.route('/file_browser/')
def html_rend_file_browser():
"""
Helper method to redirect URLs ending in <url>/file_browser/
to the file browser page.
"""
return redirect(url_for('html_rend', name='file_browser'))
@app.route('/boardwalk')
def boardwalk():
return redirect(url_for('boardwalk'))
@app.route('/privacy')
def privacy():
return redirect(url_for('privacy'))
@app.route('/unauthorized')
def unauthorized():
account = request.args.get('account')
project = os.getenv('PROJECT_NAME', '')
contact = os.getenv('CONTACT_EMAIL', '')
return render_template('unauthorized.html',
contact=contact, project=project, account=account)
@app.route('/login')
def login():
"""
Endpoint to Login into the page
"""
if current_user.is_authenticated:
app.logger.info('Request path %s. Current user with ID %s is authenticated; redirecting to index URL', request.path, current_user.get_id())
return redirect(url_for('index'))
google = get_google_auth()
auth_url, state = google.authorization_url(
Auth.AUTH_URI, access_type='offline',
prompt='select_account consent')
session['oauth_state'] = state
app.logger.info('Request path %s. Redirecting current user with ID %s to authorization URL', request.path, current_user.get_id())
return redirect(auth_url)
@app.route('/gCallback')
def callback():
"""
Callback method required by Google's OAuth 2.0
"""
if current_user is not None and current_user.is_authenticated:
app.logger.info('Request path %s. Current user with ID %s is authenticated; redirecting to index URL', request.path, current_user.get_id())
return redirect(url_for('index'))
if 'error' in request.args:
if request.args.get('error') == 'access_denied':
if current_user is not None:
app.logger.error('Request path %s. Current user with ID %s access is denied', request.path, current_user.get_id())
else:
app.logger.error('Request path %s. Access is denied for current user None', request.path)
return 'You are denied access.'
return 'Error encountered.'
if 'code' not in request.args and 'state' not in request.args:
if current_user is not None:
app.logger.info('Request path %s. Redirecting current user with ID %s to login URL', request.path, current_user.get_id())
else:
app.logger.info('Request path %s. Redirecting current user None to login URL', request.path)
return redirect(url_for('login'))
else:
google = get_google_auth(state=session['oauth_state'])
try:
token = google.fetch_token(
Auth.TOKEN_URI,
client_secret=Auth.CLIENT_SECRET,
authorization_response=request.url)
except HTTPError:
if current_user is not None:
app.logger.error('Request path %s. Could not fetch token for current user with ID %s', request.path, current_user.get_id())
else:
app.logger.error('Request path %s. Could not fetch token for current user None', request.path)
return 'HTTPError occurred.'
# Testing the token verification step.
try:
# jwt = verify_id_token(token['id_token'], Auth.CLIENT_ID)
verify_id_token(token['id_token'], Auth.CLIENT_ID)
except AppIdentityError:
app.logger.error('Request path %s. Could not verify token for current user with ID %s', request.path, current_user.get_id())
return 'Could not verify token.'
# Check if you have the appropriate domain
# Commenting this section out to let anyone with
# a google account log in.
# if 'hd' not in jwt or jwt['hd'] != 'ucsc.edu':
# flash('You must login with a ucsc.edu account. \
# Please try again.', 'error')
# return redirect(url_for('index'))
google = get_google_auth(token=token)
resp = google.get(Auth.USER_INFO)
if resp.status_code == 200:
user_data = resp.json()
email = user_data['email']
# If so configured, check for whitelist and redirect to
# unauthorized page if not in whitelist, e.g.,
if whitelist_checker is not None and not whitelist_checker.is_authorized(email):
app.logger.info('Request path %s. User with email %s is not authorized', request.path, user_data['email'])
return redirect(url_for('unauthorized', account=redact_email(email)))
user = User()
for attr in 'email', 'name', 'picture':
setattr(user, attr, user_data[attr])
user.refresh_token = token['refresh_token']
user.access_token = token['access_token']
login_user(user)
# Empty flashed messages
get_flashed_messages()
# Set a new success flash message
flash('You are now logged in!', 'success')
app.logger.info('Request path %s. User with email %s was logged in; redirecting to index URL', request.path, user_data['email'])
return redirect(url_for('boardwalk'))
app.logger.error('Could not fetch information for current user')
return 'Could not fetch your information.'
@app.route('/logout')
@login_required
def logout():
app.logger.info('Request path %s. Current user with ID %s will be logged out', request.path, current_user.get_id())
current_user.logout()
logout_user()
return redirect(url_for('index'))
if __name__ == '__main__':
app.run(host='0.0.0.0', port=80)
| [
"flask.render_template",
"flask_login.LoginManager",
"flask.request.args.get",
"requests.post",
"oauth2client.client.verify_id_token",
"flask.Flask",
"flask.get_flashed_messages",
"utils.redact_email",
"elasticsearch_dsl.Search",
"utils.decrypt",
"flask.session.pop",
"flask.request.headers.get... | [((2813, 2828), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (2818, 2828), False, 'from flask import Flask, url_for, redirect, render_template, session, request, Response, flash, get_flashed_messages, jsonify\n'), ((2934, 2951), 'flask_login.LoginManager', 'LoginManager', (['app'], {}), '(app)\n', (2946, 2951), False, 'from flask_login import LoginManager, login_required, login_user, logout_user, current_user, UserMixin\n'), ((3103, 3136), 'os.getenv', 'os.getenv', (['"""EMAIL_WHITELIST_NAME"""'], {}), "('EMAIL_WHITELIST_NAME')\n", (3112, 3136), False, 'import os\n'), ((753, 778), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (768, 778), False, 'import os\n'), ((6752, 6831), 'requests_oauthlib.OAuth2Session', 'OAuth2Session', (['Auth.CLIENT_ID'], {'redirect_uri': 'Auth.REDIRECT_URI', 'scope': 'Auth.SCOPE'}), '(Auth.CLIENT_ID, redirect_uri=Auth.REDIRECT_URI, scope=Auth.SCOPE)\n', (6765, 6831), False, 'from requests_oauthlib import OAuth2Session\n'), ((7652, 7688), 'elasticsearch_dsl.Search', 'Search', ([], {'using': 'es_object', 'index': 'index'}), '(using=es_object, index=index)\n', (7658, 7688), False, 'from elasticsearch_dsl import Search\n'), ((8299, 8341), 'flask.request.headers.get', 'request.headers.get', (['"""Authorization"""', 'None'], {}), "('Authorization', None)\n", (8318, 8341), False, 'from flask import Flask, url_for, redirect, render_template, session, request, Response, flash, get_flashed_messages, jsonify\n'), ((13143, 13158), 'flask.jsonify', 'jsonify', (['output'], {}), '(output)\n', (13150, 13158), False, 'from flask import Flask, url_for, redirect, render_template, session, request, Response, flash, get_flashed_messages, jsonify\n'), ((15354, 15399), 'os.getenv', 'os.getenv', (['"""DCC_CORE_CLIENT_VERSION"""', '"""1.1.0"""'], {}), "('DCC_CORE_CLIENT_VERSION', '1.1.0')\n", (15363, 15399), False, 'import os\n'), ((15847, 15878), 'flask.render_template', 'render_template', (["(name + '.html')"], {}), "(name + '.html')\n", (15862, 15878), False, 'from flask import Flask, url_for, redirect, render_template, session, request, Response, flash, get_flashed_messages, jsonify\n'), ((16343, 16370), 'flask.request.args.get', 'request.args.get', (['"""account"""'], {}), "('account')\n", (16359, 16370), False, 'from flask import Flask, url_for, redirect, render_template, session, request, Response, flash, get_flashed_messages, jsonify\n'), ((16385, 16414), 'os.getenv', 'os.getenv', (['"""PROJECT_NAME"""', '""""""'], {}), "('PROJECT_NAME', '')\n", (16394, 16414), False, 'import os\n'), ((16429, 16459), 'os.getenv', 'os.getenv', (['"""CONTACT_EMAIL"""', '""""""'], {}), "('CONTACT_EMAIL', '')\n", (16438, 16459), False, 'import os\n'), ((16471, 16562), 'flask.render_template', 'render_template', (['"""unauthorized.html"""'], {'contact': 'contact', 'project': 'project', 'account': 'account'}), "('unauthorized.html', contact=contact, project=project,\n account=account)\n", (16486, 16562), False, 'from flask import Flask, url_for, redirect, render_template, session, request, Response, flash, get_flashed_messages, jsonify\n'), ((17229, 17247), 'flask.redirect', 'redirect', (['auth_url'], {}), '(auth_url)\n', (17237, 17247), False, 'from flask import Flask, url_for, redirect, render_template, session, request, Response, flash, get_flashed_messages, jsonify\n'), ((21460, 21481), 'flask_login.current_user.logout', 'current_user.logout', ([], {}), '()\n', (21479, 21481), False, 'from flask_login import LoginManager, login_required, login_user, logout_user, current_user, UserMixin\n'), ((21486, 21499), 'flask_login.logout_user', 'logout_user', ([], {}), '()\n', (21497, 21499), False, 'from flask_login import LoginManager, login_required, login_user, logout_user, current_user, UserMixin\n'), ((1096, 1113), 'os.environ.keys', 'os.environ.keys', ([], {}), '()\n', (1111, 1113), False, 'import os\n'), ((1208, 1225), 'os.environ.keys', 'os.environ.keys', ([], {}), '()\n', (1223, 1225), False, 'import os\n'), ((1324, 1341), 'os.environ.keys', 'os.environ.keys', ([], {}), '()\n', (1339, 1341), False, 'import os\n'), ((1956, 1984), 'os.environ.get', 'os.environ.get', (['"""SECRET_KEY"""'], {}), "('SECRET_KEY')\n", (1970, 1984), False, 'import os\n'), ((2042, 2089), 'os.environ.get', 'os.environ.get', (['"""GOOGLE_SITE_VERIFICATION_CODE"""'], {}), "('GOOGLE_SITE_VERIFICATION_CODE')\n", (2056, 2089), False, 'import os\n'), ((3170, 3203), 'os.getenv', 'os.getenv', (['"""EMAIL_WHITELIST_NAME"""'], {}), "('EMAIL_WHITELIST_NAME')\n", (3179, 3203), False, 'import os\n'), ((3867, 3893), 'flask.session.get', 'session.get', (['"""email"""', 'None'], {}), "('email', None)\n", (3878, 3893), False, 'from flask import Flask, url_for, redirect, render_template, session, request, Response, flash, get_flashed_messages, jsonify\n'), ((4024, 4049), 'flask.session.get', 'session.get', (['"""name"""', 'None'], {}), "('name', None)\n", (4035, 4049), False, 'from flask import Flask, url_for, redirect, render_template, session, request, Response, flash, get_flashed_messages, jsonify\n'), ((4180, 4207), 'flask.session.get', 'session.get', (['"""avatar"""', 'None'], {}), "('avatar', None)\n", (4191, 4207), False, 'from flask import Flask, url_for, redirect, render_template, session, request, Response, flash, get_flashed_messages, jsonify\n'), ((4659, 4692), 'flask.session.get', 'session.get', (['"""access_token"""', 'None'], {}), "('access_token', None)\n", (4670, 4692), False, 'from flask import Flask, url_for, redirect, render_template, session, request, Response, flash, get_flashed_messages, jsonify\n'), ((4742, 4770), 'utils.decrypt', 'decrypt', (['encrypted_token', 'iv'], {}), '(encrypted_token, iv)\n', (4749, 4770), False, 'from utils import redact_email, decrypt, encrypt, new_iv\n'), ((4845, 4853), 'utils.new_iv', 'new_iv', ([], {}), '()\n', (4851, 4853), False, 'from utils import redact_email, decrypt, encrypt, new_iv\n'), ((4922, 4940), 'utils.encrypt', 'encrypt', (['value', 'iv'], {}), '(value, iv)\n', (4929, 4940), False, 'from utils import redact_email, decrypt, encrypt, new_iv\n'), ((5011, 5045), 'flask.session.get', 'session.get', (['"""refresh_token"""', 'None'], {}), "('refresh_token', None)\n", (5022, 5045), False, 'from flask import Flask, url_for, redirect, render_template, session, request, Response, flash, get_flashed_messages, jsonify\n'), ((5096, 5124), 'utils.decrypt', 'decrypt', (['encrypted_token', 'iv'], {}), '(encrypted_token, iv)\n', (5103, 5124), False, 'from utils import redact_email, decrypt, encrypt, new_iv\n'), ((5288, 5296), 'utils.new_iv', 'new_iv', ([], {}), '()\n', (5294, 5296), False, 'from utils import redact_email, decrypt, encrypt, new_iv\n'), ((5367, 5385), 'utils.encrypt', 'encrypt', (['value', 'iv'], {}), '(value, iv)\n', (5374, 5385), False, 'from utils import redact_email, decrypt, encrypt, new_iv\n'), ((5707, 5734), 'flask.session.get', 'session.get', (['"""access_token"""'], {}), "('access_token')\n", (5718, 5734), False, 'from flask import Flask, url_for, redirect, render_template, session, request, Response, flash, get_flashed_messages, jsonify\n'), ((6556, 6598), 'requests_oauthlib.OAuth2Session', 'OAuth2Session', (['Auth.CLIENT_ID'], {'token': 'token'}), '(Auth.CLIENT_ID, token=token)\n', (6569, 6598), False, 'from requests_oauthlib import OAuth2Session\n'), ((6628, 6702), 'requests_oauthlib.OAuth2Session', 'OAuth2Session', (['Auth.CLIENT_ID'], {'state': 'state', 'redirect_uri': 'Auth.REDIRECT_URI'}), '(Auth.CLIENT_ID, state=state, redirect_uri=Auth.REDIRECT_URI)\n', (6641, 6702), False, 'from requests_oauthlib import OAuth2Session\n'), ((9123, 9160), 'urllib2.Request', 'urllib2.Request', (['url'], {'headers': 'headers'}), '(url, headers=headers)\n', (9138, 9160), False, 'import urllib2\n'), ((9179, 9199), 'urllib2.urlopen', 'urllib2.urlopen', (['req'], {}), '(req)\n', (9194, 9199), False, 'import urllib2\n'), ((9693, 9735), 'flask.request.headers.get', 'request.headers.get', (['"""Authorization"""', 'None'], {}), "('Authorization', None)\n", (9712, 9735), False, 'from flask import Flask, url_for, redirect, render_template, session, request, Response, flash, get_flashed_messages, jsonify\n'), ((9752, 9812), 'flask.jsonify', 'jsonify', (["{'error': 'No Authorization header in the request'}"], {}), "({'error': 'No Authorization header in the request'})\n", (9759, 9812), False, 'from flask import Flask, url_for, redirect, render_template, session, request, Response, flash, get_flashed_messages, jsonify\n'), ((10934, 10951), 'flask.jsonify', 'jsonify', (['response'], {}), '(response)\n', (10941, 10951), False, 'from flask import Flask, url_for, redirect, render_template, session, request, Response, flash, get_flashed_messages, jsonify\n'), ((12694, 12724), 'flask.jsonify', 'jsonify', (["{'name': 'anonymous'}"], {}), "({'name': 'anonymous'})\n", (12701, 12724), False, 'from flask import Flask, url_for, redirect, render_template, session, request, Response, flash, get_flashed_messages, jsonify\n'), ((15528, 15558), 'os.getenv', 'os.getenv', (['"""CONTACT_EMAIL"""', '""""""'], {}), "('CONTACT_EMAIL', '')\n", (15537, 15558), False, 'import os\n'), ((15574, 15700), 'flask.render_template', 'render_template', (["(name + '.html')"], {'whitelist_validation_required': 'whitelist_validation_required', 'contact_email': 'contact_email'}), "(name + '.html', whitelist_validation_required=\n whitelist_validation_required, contact_email=contact_email)\n", (15589, 15700), False, 'from flask import Flask, url_for, redirect, render_template, session, request, Response, flash, get_flashed_messages, jsonify\n'), ((15804, 15835), 'flask.render_template', 'render_template', (["(name + '.html')"], {}), "(name + '.html')\n", (15819, 15835), False, 'from flask import Flask, url_for, redirect, render_template, session, request, Response, flash, get_flashed_messages, jsonify\n'), ((16071, 16112), 'flask.url_for', 'url_for', (['"""html_rend"""'], {'name': '"""file_browser"""'}), "('html_rend', name='file_browser')\n", (16078, 16112), False, 'from flask import Flask, url_for, redirect, render_template, session, request, Response, flash, get_flashed_messages, jsonify\n'), ((16178, 16198), 'flask.url_for', 'url_for', (['"""boardwalk"""'], {}), "('boardwalk')\n", (16185, 16198), False, 'from flask import Flask, url_for, redirect, render_template, session, request, Response, flash, get_flashed_messages, jsonify\n'), ((16260, 16278), 'flask.url_for', 'url_for', (['"""privacy"""'], {}), "('privacy')\n", (16267, 16278), False, 'from flask import Flask, url_for, redirect, render_template, session, request, Response, flash, get_flashed_messages, jsonify\n'), ((17195, 17216), 'flask_login.current_user.get_id', 'current_user.get_id', ([], {}), '()\n', (17214, 17216), False, 'from flask_login import LoginManager, login_required, login_user, logout_user, current_user, UserMixin\n'), ((21433, 21454), 'flask_login.current_user.get_id', 'current_user.get_id', ([], {}), '()\n', (21452, 21454), False, 'from flask_login import LoginManager, login_required, login_user, logout_user, current_user, UserMixin\n'), ((21520, 21536), 'flask.url_for', 'url_for', (['"""index"""'], {}), "('index')\n", (21527, 21536), False, 'from flask import Flask, url_for, redirect, render_template, session, request, Response, flash, get_flashed_messages, jsonify\n'), ((5754, 5896), 'requests.post', 'requests.post', (['Auth.REVOKE_TOKEN'], {'params': "{'token': session['access_token']}", 'headers': "{'content-type': 'application/x-www-form-urlencoded'}"}), "(Auth.REVOKE_TOKEN, params={'token': session['access_token']},\n headers={'content-type': 'application/x-www-form-urlencoded'})\n", (5767, 5896), False, 'import requests\n'), ((10462, 10504), 'os.getenv', 'os.getenv', (['"""SECRET_KEY"""', '"""somethingsecret"""'], {}), "('SECRET_KEY', 'somethingsecret')\n", (10471, 10504), False, 'import os\n'), ((12897, 12927), 'flask.jsonify', 'jsonify', (["{'name': 'anonymous'}"], {}), "({'name': 'anonymous'})\n", (12904, 12927), False, 'from flask import Flask, url_for, redirect, render_template, session, request, Response, flash, get_flashed_messages, jsonify\n'), ((15469, 15502), 'os.getenv', 'os.getenv', (['"""EMAIL_WHITELIST_NAME"""'], {}), "('EMAIL_WHITELIST_NAME')\n", (15478, 15502), False, 'import os\n'), ((16818, 16839), 'flask_login.current_user.get_id', 'current_user.get_id', ([], {}), '()\n', (16837, 16839), False, 'from flask_login import LoginManager, login_required, login_user, logout_user, current_user, UserMixin\n'), ((16865, 16881), 'flask.url_for', 'url_for', (['"""index"""'], {}), "('index')\n", (16872, 16881), False, 'from flask import Flask, url_for, redirect, render_template, session, request, Response, flash, get_flashed_messages, jsonify\n'), ((17550, 17571), 'flask_login.current_user.get_id', 'current_user.get_id', ([], {}), '()\n', (17569, 17571), False, 'from flask_login import LoginManager, login_required, login_user, logout_user, current_user, UserMixin\n'), ((17597, 17613), 'flask.url_for', 'url_for', (['"""index"""'], {}), "('index')\n", (17604, 17613), False, 'from flask import Flask, url_for, redirect, render_template, session, request, Response, flash, get_flashed_messages, jsonify\n'), ((17658, 17683), 'flask.request.args.get', 'request.args.get', (['"""error"""'], {}), "('error')\n", (17674, 17683), False, 'from flask import Flask, url_for, redirect, render_template, session, request, Response, flash, get_flashed_messages, jsonify\n'), ((18462, 18478), 'flask.url_for', 'url_for', (['"""login"""'], {}), "('login')\n", (18469, 18478), False, 'from flask import Flask, url_for, redirect, render_template, session, request, Response, flash, get_flashed_messages, jsonify\n'), ((19260, 19310), 'oauth2client.client.verify_id_token', 'verify_id_token', (["token['id_token']", 'Auth.CLIENT_ID'], {}), "(token['id_token'], Auth.CLIENT_ID)\n", (19275, 19310), False, 'from oauth2client.client import verify_id_token\n'), ((20777, 20793), 'flask_login.login_user', 'login_user', (['user'], {}), '(user)\n', (20787, 20793), False, 'from flask_login import LoginManager, login_required, login_user, logout_user, current_user, UserMixin\n'), ((20843, 20865), 'flask.get_flashed_messages', 'get_flashed_messages', ([], {}), '()\n', (20863, 20865), False, 'from flask import Flask, url_for, redirect, render_template, session, request, Response, flash, get_flashed_messages, jsonify\n'), ((20924, 20966), 'flask.flash', 'flash', (['"""You are now logged in!"""', '"""success"""'], {}), "('You are now logged in!', 'success')\n", (20929, 20966), False, 'from flask import Flask, url_for, redirect, render_template, session, request, Response, flash, get_flashed_messages, jsonify\n'), ((10046, 10086), 'os.getenv', 'os.getenv', (['"""LOG_IN_TOKEN"""', '"""<PASSWORD>!"""'], {}), "('LOG_IN_TOKEN', '<PASSWORD>!')\n", (10055, 10086), False, 'import os\n'), ((10216, 10233), 'flask.jsonify', 'jsonify', (['response'], {}), '(response)\n', (10223, 10233), False, 'from flask import Flask, url_for, redirect, render_template, session, request, Response, flash, get_flashed_messages, jsonify\n'), ((11922, 11949), 'flask.session.pop', 'session.pop', (['"""access_token"""'], {}), "('access_token')\n", (11933, 11949), False, 'from flask import Flask, url_for, redirect, render_template, session, request, Response, flash, get_flashed_messages, jsonify\n'), ((11962, 11990), 'flask.session.pop', 'session.pop', (['"""refresh_token"""'], {}), "('refresh_token')\n", (11973, 11990), False, 'from flask import Flask, url_for, redirect, render_template, session, request, Response, flash, get_flashed_messages, jsonify\n'), ((18295, 18316), 'flask_login.current_user.get_id', 'current_user.get_id', ([], {}), '()\n', (18314, 18316), False, 'from flask_login import LoginManager, login_required, login_user, logout_user, current_user, UserMixin\n'), ((21136, 21156), 'flask.url_for', 'url_for', (['"""boardwalk"""'], {}), "('boardwalk')\n", (21143, 21156), False, 'from flask import Flask, url_for, redirect, render_template, session, request, Response, flash, get_flashed_messages, jsonify\n'), ((17853, 17874), 'flask_login.current_user.get_id', 'current_user.get_id', ([], {}), '()\n', (17872, 17874), False, 'from flask_login import LoginManager, login_required, login_user, logout_user, current_user, UserMixin\n'), ((19458, 19479), 'flask_login.current_user.get_id', 'current_user.get_id', ([], {}), '()\n', (19477, 19479), False, 'from flask_login import LoginManager, login_required, login_user, logout_user, current_user, UserMixin\n'), ((18924, 18945), 'flask_login.current_user.get_id', 'current_user.get_id', ([], {}), '()\n', (18943, 18945), False, 'from flask_login import LoginManager, login_required, login_user, logout_user, current_user, UserMixin\n'), ((20502, 20521), 'utils.redact_email', 'redact_email', (['email'], {}), '(email)\n', (20514, 20521), False, 'from utils import redact_email, decrypt, encrypt, new_iv\n')] |
import io, os, re
from os import path
from setuptools import find_packages
from distutils.core import setup
# pip's single-source version method as described here:
# https://python-packaging-user-guide.readthedocs.io/single_source_version/
def read(*names, **kwargs):
with io.open(
os.path.join(os.path.dirname(__file__), *names),
encoding=kwargs.get("encoding", "utf8")
) as fp:
return fp.read()
def find_version(*file_paths):
version_file = read(*file_paths)
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",
version_file, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string.")
setup(name='pynatnetclient',
version=find_version('pynatnetclient', '__init__.py'),
description='Python client to Optitrack.',
# long_description=long_description,
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/toinsson/pynatnetclient',
license='Apache',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
],
keywords='optitrack',
packages=find_packages(),
) | [
"os.path.dirname",
"setuptools.find_packages",
"re.search"
] | [((519, 592), 're.search', 're.search', (['"""^__version__ = [\'\\\\"]([^\'\\\\"]*)[\'\\\\"]"""', 'version_file', 're.M'], {}), '(\'^__version__ = [\\\'\\\\"]([^\\\'\\\\"]*)[\\\'\\\\"]\', version_file, re.M)\n', (528, 592), False, 'import io, os, re\n'), ((1213, 1228), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (1226, 1228), False, 'from setuptools import find_packages\n'), ((308, 333), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (323, 333), False, 'import io, os, re\n')] |
import os
import git
import csv
from datetime import datetime
from progress.bar import Bar
def tagAnalysis(repo: git.Repo, outputDir: str):
print("Analyzing tags")
tagInfo = []
tags = sorted(repo.tags, key=getTaggedDate)
lastTag = None
for tag in Bar('Processing').iter(tags):
commitCount = 0
if (lastTag == None):
commitCount = len(list(tag.commit.iter_items(repo, tag.commit)))
else:
sinceStr = formatDate(getTaggedDate(lastTag))
commitCount = len(list(tag.commit.iter_items(repo, tag.commit, after=sinceStr)))
tagInfo.append(dict(
path=tag.path,
date= formatDate(getTaggedDate(tag)),
commitCount= commitCount
))
lastTag = tag
# output non-tabular results
with open(os.path.join(outputDir, 'project.csv'), 'a', newline='') as f:
w = csv.writer(f, delimiter=',')
w.writerow(['Tag Count', len(tagInfo)])
# output tag info
print("Outputting CSVs")
with open(os.path.join(outputDir, 'tags.csv'), 'a', newline='') as f:
w = csv.writer(f, delimiter=',')
w.writerow(['Path', 'Date', 'Commit Count'])
for tag in sorted(tagInfo, key=lambda o: o['date']):
w.writerow([tag['path'], tag['date'], tag['commitCount']])
def getTaggedDate(tag):
date = None
if tag.tag == None:
date = tag.commit.committed_date
else:
date = tag.tag.tagged_date
date = datetime.fromtimestamp(date)
return date
def formatDate(value):
return value.strftime('%Y-%m-%d') | [
"progress.bar.Bar",
"csv.writer",
"datetime.datetime.fromtimestamp",
"os.path.join"
] | [((1574, 1602), 'datetime.datetime.fromtimestamp', 'datetime.fromtimestamp', (['date'], {}), '(date)\n', (1596, 1602), False, 'from datetime import datetime\n'), ((950, 978), 'csv.writer', 'csv.writer', (['f'], {'delimiter': '""","""'}), "(f, delimiter=',')\n", (960, 978), False, 'import csv\n'), ((1171, 1199), 'csv.writer', 'csv.writer', (['f'], {'delimiter': '""","""'}), "(f, delimiter=',')\n", (1181, 1199), False, 'import csv\n'), ((289, 306), 'progress.bar.Bar', 'Bar', (['"""Processing"""'], {}), "('Processing')\n", (292, 306), False, 'from progress.bar import Bar\n'), ((874, 912), 'os.path.join', 'os.path.join', (['outputDir', '"""project.csv"""'], {}), "(outputDir, 'project.csv')\n", (886, 912), False, 'import os\n'), ((1098, 1133), 'os.path.join', 'os.path.join', (['outputDir', '"""tags.csv"""'], {}), "(outputDir, 'tags.csv')\n", (1110, 1133), False, 'import os\n')] |
""" Configuration variables used in the application.
These variables should be setup as environment variables
in the docker-compose.yml file when launching all the services.
If these environment variables are not present, default values
are asigned to them.
"""
import os
KAFKA_ENDPOINT = os.environ.get('KAFKA_ENDPOINT') or 'localhost:9092'
KAFKA_INPUT_TOPIC = os.environ.get('INPUT_TOPIC') or 'posts'
KAFKA_LOGGING_TOPIC = os.environ.get('LOGGING_TOPIC') or 'service_logs'
SERVICE_KEY = 'topic_modeling' | [
"os.environ.get"
] | [((292, 324), 'os.environ.get', 'os.environ.get', (['"""KAFKA_ENDPOINT"""'], {}), "('KAFKA_ENDPOINT')\n", (306, 324), False, 'import os\n'), ((365, 394), 'os.environ.get', 'os.environ.get', (['"""INPUT_TOPIC"""'], {}), "('INPUT_TOPIC')\n", (379, 394), False, 'import os\n'), ((428, 459), 'os.environ.get', 'os.environ.get', (['"""LOGGING_TOPIC"""'], {}), "('LOGGING_TOPIC')\n", (442, 459), False, 'import os\n')] |
import numpy as np
import networkx as nx
import argparse
import random
from models.distance import get_dist_func
def get_fitness(solution, initial_node, node_list):
"""
Get fitness of solution encoded by permutation.
Args:
solution (numpy.ndarray): Solution encoded as a permutation
initial_node (int): Initial node in the permutation (equal to the first element - redundant)
node_list (list): List of node IDs in network
Returns:
(float): Fitness of specified solution
"""
# Append path back to initial node.
solution_aux = np.hstack((solution, initial_node))
# Compute fitness.
return np.sum([dist_func(node_list[el[0]], node_list[el[1]])
for el in [(solution_aux[idx], solution_aux[idx+1])
for idx in range(len(solution_aux)-1)]])
def get_inv_dist_mat(node_list):
"""
Get pairwise distance matrix for specified nodes in node list.
Args:
node_list (list): Nodes for which to compute the pairwise distances
Returns:
(numpy.ndarray): Matrix of pairwise distances
"""
# Initialize array.
dist_mat = np.zeros((len(node_list), len(node_list)), dtype=float)
# Compute pairwise distances
for idx1 in range(len(node_list)-1):
for idx2 in range(idx1+1, len(node_list)):
dist_mat[idx1, idx2] = dist_mat[idx2, idx1] = 1/dist_func(node_list[idx1], node_list[idx2])
# Return computed distance matrix.
return dist_mat
def aco(network, n_ants=100, max_it=500, rho=0.1, alpha=1.0, beta=1.0, q=1.0,
aug='relinking', p_mut=0.08, p_accept_worse=0.1, breeding_coeff=0.5):
"""
Perform ant colony optimization to estimate solution for travelling salesman problem.
Args:
network (object): Networkx representation of the graph
n_ants (int): Number of ants to use
max_it (int): Maximum number of iterations to perform
rho (float): Evaporation rate
alpha (float): Pheromone matrix power in transition probability matrix construction
beta (float): Inverse distance matrix power in transition probability matrix construction
q (float): Pheromone trail coefficient
aug (str): Algorithm augmentation to use. If None, use no augmentation. If equal to 'relinking' use path
relinking method. If equal to 'genetic' use replacement of worst ants with crossovers of best ants.
p_mut (float): Mutation probability
p_accept_worse (float): Probability of accepting a relinked solution that is worse than original.
breeding_coeff (float): Fraction of best ants to use in crossover and fraction of worst ants to
replace with offspring (genetic augmentation)
Returns:
(tuple): Best found solution, fitness of best solution, edgelists corresponding to solutions representing
the new global best solution.
"""
# Check aug parameter.
if aug is not None:
if aug not in {'relinking', 'genetic'}:
raise(ValueError('unknown value specified for aug parameter'))
# Initialize list for storing edge lists (for animating).
edgelists = []
# Initialize list of nodes (for converting enumerations to actual node IDs).
node_list = list(network.nodes())
# Set initial node.
initial_node = 0
# Initilize best found solution.
best_solution = {
'fitness' : np.inf,
'solution' : None
}
# Compute distance matrix for locations.
inv_dist_mat = get_inv_dist_mat(node_list)
# Initialize pheromone matrix.
pher_mat = 0.01*np.ones_like(inv_dist_mat, dtype=float)
# Initialize iteration index.
it_idx = 0
# Main iteration loop.
while it_idx < max_it:
# Increment iteration counter.
it_idx += 1
# Print iteration index and best fitness.
print('iteration: {0}'.format(it_idx))
print('best fitness: {0}'.format(best_solution['fitness']))
# Initialize array for storing ant solutions.
ant_solutions = np.empty((n_ants, len(node_list)), dtype=int)
# Initialize array for storing ant fitness values.
ant_fitness_vals = np.empty(n_ants, dtype=float)
# Build transition probability matrix.
p_mat = (pher_mat**alpha) * (inv_dist_mat**beta)
# Run ACO step.
for ant_idx in range(n_ants):
# Set initial node.
current_node = initial_node
# Get set of unvisited nodes.
unvisited = set(range(len(node_list)))
unvisited.remove(initial_node)
# Build ant's solution.
solution_nxt = np.empty(len(node_list), dtype=int)
solution_nxt[0] = initial_node
for step_idx in range(len(node_list) - 1):
unvisited_list = list(unvisited)
probs = p_mat[current_node, unvisited_list] / np.sum(p_mat[current_node, unvisited_list])
node_nxt = np.random.choice(unvisited_list, size=1, p=probs)[0]
unvisited.remove(node_nxt)
solution_nxt[step_idx+1] = node_nxt
current_node = node_nxt
# Compute fitness of solution and compare to global best.
fitness_solution = get_fitness(solution_nxt, initial_node, node_list)
ant_fitness_vals[ant_idx] = fitness_solution
if fitness_solution < best_solution['fitness']:
best_solution['fitness'] = fitness_solution
best_solution['solution'] = solution_nxt
solution_nxt_aug = np.hstack((solution_nxt, initial_node))
# Store edge list (for animating).
edgelists.append([(node_list[solution_nxt_aug[idx]], node_list[solution_nxt_aug[idx+1]])
for idx in range(len(solution_nxt_aug) - 1)])
# Store ant's solution.
ant_solutions[ant_idx, :] = solution_nxt
# Initialize matrix for accumulating pheromones (for pheromone update).
pher_add_mat = np.zeros_like(pher_mat, dtype=float)
if aug == 'relinking':
# If using relinking augmentation.
# Go over solutions.
for idx_solution in range(ant_solutions.shape[0]):
# Split solution at random point.
sec1, sec2 = np.split(ant_solutions[idx_solution], \
indices_or_sections=[np.random.randint(1, len(ant_solutions[idx_solution]))])
# Relink.
solution_mod = np.hstack((sec1, list(reversed(sec2))))
# Apply mutation with probability.
if np.random.rand() < p_mut:
p1 = np.random.randint(0, len(solution_mod))
p2 = np.random.randint(0, len(solution_mod))
solution_mod[[p1, p2]] = solution_mod[[p2, p1]]
# Compute fitness value of relinked solution.
fitness_mod = get_fitness(solution_mod, initial_node, node_list)
# If fitness better accept. Also accept with specified probability.
if (fitness_mod < ant_fitness_vals[idx_solution]) or (np.random.rand() < p_accept_worse):
ant_solutions[idx_solution, :] = solution_mod
ant_fitness_vals[idx_solution] = fitness_mod
if aug == 'genetic':
# If using genetic augmentation.
# Sort ants ant fitness values from best to worst.
p = ant_fitness_vals.argsort()
ant_fitness_vals = ant_fitness_vals[p]
ant_solutions = ant_solutions[p, :]
# Get number of new ants and initialize array for crossovers.
n_new_ants = int(np.ceil(breeding_coeff*ant_solutions.shape[0]))
ant_solutions_new = np.empty((n_new_ants, ant_solutions.shape[1]), dtype=int)
ant_fitness_vals_new = np.empty(ant_solutions_new.shape[0], dtype=float)
# Go over solutions for which to perform crossover.
for idx in range(0, ant_solutions_new.shape[0], 2):
# Get solutions and cut at random point.
ant_sol_1 = ant_solutions[idx, :]
ant_sol_2 = ant_solutions[idx+1, :]
c1 = ant_sol_1[:np.random.randint(1, len(ant_sol_1))]
c2 = ant_sol_2[:np.random.randint(1, len(ant_sol_2))]
# Append elements in second solution in order found.
offspring1 = np.hstack((c1, ant_sol_2[~np.in1d(ant_sol_2, c1)]))
offspring2 = np.hstack((c2, ant_sol_1[~np.in1d(ant_sol_1, c2)]))
# Apply mutations with specified probability.
if np.random.rand() < p_mut:
p1 = np.random.randint(0, len(offspring1))
p2 = np.random.randint(0, len(offspring1))
offspring1[[p1, p2]] = offspring1[[p2, p1]]
if np.random.rand() < p_mut:
p1 = np.random.randint(0, len(offspring2))
p2 = np.random.randint(0, len(offspring2))
offspring2[[p1, p2]] = offspring2[[p2, p1]]
# Set offspring and fitness values.
ant_solutions_new[idx, :] = offspring1
ant_solutions_new[idx+1, :] = offspring2
ant_fitness_vals_new[idx] = get_fitness(offspring1, initial_node, node_list)
ant_fitness_vals_new[idx+1] = get_fitness(offspring2, initial_node, node_list)
# Replace worst ants with offspring of best.
ant_solutions[-ant_solutions_new.shape[0]:] = ant_solutions_new
ant_fitness_vals[-len(ant_fitness_vals_new):] = ant_fitness_vals_new
# Compute and print diversity of solutions.
diversity = (np.mean(ant_fitness_vals) - np.min(ant_fitness_vals))/(np.max(ant_fitness_vals) - np.min(ant_fitness_vals))
print(diversity)
# Add pheromones to pheromone accumulation matrix (for next iteration).
for idx_sol, solution in enumerate(ant_solutions):
for idx in range(len(solution)-1):
pher_add_mat[solution[idx], solution[idx+1]] += q*(1/ant_fitness_vals[idx_sol])
pher_add_mat[solution[idx+1], solution[idx]] += q*(1/ant_fitness_vals[idx_sol])
# Update pheromone matrix.
pher_mat = (1-rho)*pher_mat + pher_add_mat
# Return best found solution, fitness value of best found solution and edgelist of network states
# corresponding to global best position updates.
return best_solution['solution'], best_solution['fitness'], edgelists
if __name__ == '__main__':
### PARSE ARGUMENTS ###
parser = argparse.ArgumentParser(description='Approximate solution to TSP using ant colony optimization.')
parser.add_argument('--num-nodes', type=int, default=50, help='Number of nodes to use')
parser.add_argument('--dist-func', type=str, default='geodesic', choices=['geodesic', 'learned'],
help='Distance function to use')
parser.add_argument('--prediction-model', type=str, default='gboosting', choices=['gboosting', 'rf'],
help='Prediction model to use for learned distance function')
parser.add_argument('--max-it', type=int, default=100, help='Maximum iterations to perform')
parser.add_argument('--n-ants', type=int, default=100, help='Number of ants to use')
parser.add_argument('--rho', type=float, default=0.1, help='Evaporation rate parameter')
parser.add_argument('--alpha', type=float, default=1.0, help='Alpha parameter in transition probability matrix update')
parser.add_argument('--beta', type=float, default=1.0, help='Beta parameter in transition probability matrix update')
parser.add_argument('--q', type=float, default=1.0, help='Pheromone update coefficient')
parser.add_argument('--aug', type=str, default=None, choices=['relinking', 'genetic'], help='Augmentation to use')
parser.add_argument('--p-mut', type=float, default=0.08, help='Mutation rate (augmentation)')
parser.add_argument('--p-accept-worse', type=float, default=0.08,
help='Probability of accepting a worse result of relinking (relinking augmentation)')
parser.add_argument('--breeding-coeff', type=float, default=0.5,
help='Fraction of best solution for which to perform crossover and fraction of worst solution to replace by offspring (genetic augmentation)')
args = parser.parse_args()
#######################
# Parse problem network.
network = nx.read_gpickle('./data/grid_data/grid_network.gpickle')
# Number of nodes to remove from network.
to_remove = network.number_of_nodes() - args.num_nodes
# Remove randomly sampled nodes to get specified number of nodes.
network.remove_nodes_from(random.sample(list(network.nodes), to_remove))
# Get distance function.
dist_func = get_dist_func(network, which=args.dist_func, prediction_model=args.prediction_model)
# Get solution using ant colony optimization.
solution_position, solution_fitness, edgelists = aco(network, n_ants=args.n_ants, max_it=args.max_it, rho=args.rho,
alpha=args.alpha, beta=args.beta, q=args.q, aug=args.aug, p_mut=args.p_mut,
p_accept_worse=args.p_accept_worse, breeding_coeff=args.breeding_coeff)
# Save list of edge lists for animation.
np.save('./results/edgelists/edgelist_tsp_ac.npy', list(map(np.vstack, edgelists)))
nx.write_gpickle(network, './results/networks/network_tsp_ac.gpickle')
# Print best solution fitness.
print('Fitness of best found solution: {0:.3f}'.format(solution_fitness))
| [
"numpy.ones_like",
"numpy.ceil",
"numpy.mean",
"numpy.random.rand",
"argparse.ArgumentParser",
"numpy.hstack",
"numpy.random.choice",
"numpy.in1d",
"models.distance.get_dist_func",
"numpy.max",
"numpy.sum",
"numpy.empty",
"numpy.min",
"networkx.read_gpickle",
"numpy.zeros_like",
"netwo... | [((601, 636), 'numpy.hstack', 'np.hstack', (['(solution, initial_node)'], {}), '((solution, initial_node))\n', (610, 636), True, 'import numpy as np\n'), ((10915, 11017), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Approximate solution to TSP using ant colony optimization."""'}), "(description=\n 'Approximate solution to TSP using ant colony optimization.')\n", (10938, 11017), False, 'import argparse\n'), ((12770, 12826), 'networkx.read_gpickle', 'nx.read_gpickle', (['"""./data/grid_data/grid_network.gpickle"""'], {}), "('./data/grid_data/grid_network.gpickle')\n", (12785, 12826), True, 'import networkx as nx\n'), ((13135, 13224), 'models.distance.get_dist_func', 'get_dist_func', (['network'], {'which': 'args.dist_func', 'prediction_model': 'args.prediction_model'}), '(network, which=args.dist_func, prediction_model=args.\n prediction_model)\n', (13148, 13224), False, 'from models.distance import get_dist_func\n'), ((13703, 13773), 'networkx.write_gpickle', 'nx.write_gpickle', (['network', '"""./results/networks/network_tsp_ac.gpickle"""'], {}), "(network, './results/networks/network_tsp_ac.gpickle')\n", (13719, 13773), True, 'import networkx as nx\n'), ((3643, 3682), 'numpy.ones_like', 'np.ones_like', (['inv_dist_mat'], {'dtype': 'float'}), '(inv_dist_mat, dtype=float)\n', (3655, 3682), True, 'import numpy as np\n'), ((4238, 4267), 'numpy.empty', 'np.empty', (['n_ants'], {'dtype': 'float'}), '(n_ants, dtype=float)\n', (4246, 4267), True, 'import numpy as np\n'), ((6160, 6196), 'numpy.zeros_like', 'np.zeros_like', (['pher_mat'], {'dtype': 'float'}), '(pher_mat, dtype=float)\n', (6173, 6196), True, 'import numpy as np\n'), ((7969, 8026), 'numpy.empty', 'np.empty', (['(n_new_ants, ant_solutions.shape[1])'], {'dtype': 'int'}), '((n_new_ants, ant_solutions.shape[1]), dtype=int)\n', (7977, 8026), True, 'import numpy as np\n'), ((8062, 8111), 'numpy.empty', 'np.empty', (['ant_solutions_new.shape[0]'], {'dtype': 'float'}), '(ant_solutions_new.shape[0], dtype=float)\n', (8070, 8111), True, 'import numpy as np\n'), ((5669, 5708), 'numpy.hstack', 'np.hstack', (['(solution_nxt, initial_node)'], {}), '((solution_nxt, initial_node))\n', (5678, 5708), True, 'import numpy as np\n'), ((7889, 7937), 'numpy.ceil', 'np.ceil', (['(breeding_coeff * ant_solutions.shape[0])'], {}), '(breeding_coeff * ant_solutions.shape[0])\n', (7896, 7937), True, 'import numpy as np\n'), ((10001, 10026), 'numpy.mean', 'np.mean', (['ant_fitness_vals'], {}), '(ant_fitness_vals)\n', (10008, 10026), True, 'import numpy as np\n'), ((10029, 10053), 'numpy.min', 'np.min', (['ant_fitness_vals'], {}), '(ant_fitness_vals)\n', (10035, 10053), True, 'import numpy as np\n'), ((10056, 10080), 'numpy.max', 'np.max', (['ant_fitness_vals'], {}), '(ant_fitness_vals)\n', (10062, 10080), True, 'import numpy as np\n'), ((10083, 10107), 'numpy.min', 'np.min', (['ant_fitness_vals'], {}), '(ant_fitness_vals)\n', (10089, 10107), True, 'import numpy as np\n'), ((4975, 5018), 'numpy.sum', 'np.sum', (['p_mat[current_node, unvisited_list]'], {}), '(p_mat[current_node, unvisited_list])\n', (4981, 5018), True, 'import numpy as np\n'), ((5046, 5095), 'numpy.random.choice', 'np.random.choice', (['unvisited_list'], {'size': '(1)', 'p': 'probs'}), '(unvisited_list, size=1, p=probs)\n', (5062, 5095), True, 'import numpy as np\n'), ((6773, 6789), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (6787, 6789), True, 'import numpy as np\n'), ((8871, 8887), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (8885, 8887), True, 'import numpy as np\n'), ((9106, 9122), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (9120, 9122), True, 'import numpy as np\n'), ((7312, 7328), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (7326, 7328), True, 'import numpy as np\n'), ((8682, 8704), 'numpy.in1d', 'np.in1d', (['ant_sol_2', 'c1'], {}), '(ant_sol_2, c1)\n', (8689, 8704), True, 'import numpy as np\n'), ((8763, 8785), 'numpy.in1d', 'np.in1d', (['ant_sol_1', 'c2'], {}), '(ant_sol_1, c2)\n', (8770, 8785), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
# Copyright © 2021 Wacom Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import io
from uim.codec.parser.uim import UIMParser
from uim.codec.parser.will import WILL2Parser
from uim.codec.writer.encoder.encoder_3_1_0 import UIMEncoder310
from uim.model.ink import InkModel
if __name__ == '__main__':
parser: UIMParser = UIMParser()
# Parse UIM v3.0.0
ink_model: InkModel = parser.parse('../ink/uim_3.0.0/1) Value of Ink 1.uim')
# Save the model, this will overwrite an existing file
with io.open('1) Value of Ink 1_3_0_0_to_3_1_0.uim', 'wb') as uim:
# Encode as UIM v3.1.0
uim.write(UIMEncoder310().encode(ink_model))
# ------------------------------------------------------------------------------------------------------------------
# Parse UIM v3.1.0
# ------------------------------------------------------------------------------------------------------------------
ink_model: InkModel = parser.parse('../ink/uim_3.1.0/1) Value of Ink 1 (3.1 delta).uim')
# Save the model, this will overwrite an existing file
with io.open('1) Value of Ink 1_3_1_0.uim', 'wb') as uim:
# Encode as UIM v3.1.0
uim.write(UIMEncoder310().encode(ink_model))
# ------------------------------------------------------------------------------------------------------------------
# Parse WILL 2 file from Inkspace (https://inkspace.wacom.com/)
# ------------------------------------------------------------------------------------------------------------------
parser: WILL2Parser = WILL2Parser()
ink_model_2: InkModel = parser.parse('../ink/will/elephant.will')
# Save the model, this will overwrite an existing file
with io.open('elephant.uim', 'wb') as uim:
# Encode as UIM v3.1.0
uim.write(UIMEncoder310().encode(ink_model_2))
| [
"uim.codec.parser.uim.UIMParser",
"uim.codec.parser.will.WILL2Parser",
"uim.codec.writer.encoder.encoder_3_1_0.UIMEncoder310",
"io.open"
] | [((886, 897), 'uim.codec.parser.uim.UIMParser', 'UIMParser', ([], {}), '()\n', (895, 897), False, 'from uim.codec.parser.uim import UIMParser\n'), ((2117, 2130), 'uim.codec.parser.will.WILL2Parser', 'WILL2Parser', ([], {}), '()\n', (2128, 2130), False, 'from uim.codec.parser.will import WILL2Parser\n'), ((1070, 1123), 'io.open', 'io.open', (['"""1) Value of Ink 1_3_0_0_to_3_1_0.uim"""', '"""wb"""'], {}), "('1) Value of Ink 1_3_0_0_to_3_1_0.uim', 'wb')\n", (1077, 1123), False, 'import io\n'), ((1643, 1687), 'io.open', 'io.open', (['"""1) Value of Ink 1_3_1_0.uim"""', '"""wb"""'], {}), "('1) Value of Ink 1_3_1_0.uim', 'wb')\n", (1650, 1687), False, 'import io\n'), ((2269, 2298), 'io.open', 'io.open', (['"""elephant.uim"""', '"""wb"""'], {}), "('elephant.uim', 'wb')\n", (2276, 2298), False, 'import io\n'), ((1181, 1196), 'uim.codec.writer.encoder.encoder_3_1_0.UIMEncoder310', 'UIMEncoder310', ([], {}), '()\n', (1194, 1196), False, 'from uim.codec.writer.encoder.encoder_3_1_0 import UIMEncoder310\n'), ((1745, 1760), 'uim.codec.writer.encoder.encoder_3_1_0.UIMEncoder310', 'UIMEncoder310', ([], {}), '()\n', (1758, 1760), False, 'from uim.codec.writer.encoder.encoder_3_1_0 import UIMEncoder310\n'), ((2356, 2371), 'uim.codec.writer.encoder.encoder_3_1_0.UIMEncoder310', 'UIMEncoder310', ([], {}), '()\n', (2369, 2371), False, 'from uim.codec.writer.encoder.encoder_3_1_0 import UIMEncoder310\n')] |
'''
Analytic Hierarchy Process, AHP.
Base on Wasserstein distance
'''
from scipy.stats import wasserstein_distance
from sklearn.decomposition import PCA
import scipy
import numpy as np
import pandas as pd
import sys
import argparse
import os
import glob
import datasets_analysis_module as dam
class idx_analysis(object):
def __init__(self):
self.all_distribution_idx = {
'c': 0, 'C': 1, '(': 2, ')': 3, '1': 4, 'O': 5, '=': 6, '2': 7, 'N': 8, 'n': 9,
'3': 10, '[': 11, ']': 12, '@': 13, 'H': 14, 'F': 15, '-': 16, '4': 17, 'S': 18, 'Cl': 19,
'/': 20, 's': 21, 'o': 22, '.': 23, 'Br': 24, '5': 25, '+': 26, '#': 27, '\\': 28, '6': 29,
'I': 30, 'P': 31, 'Si': 32, '7': 33, '8': 34, 'B': 35, '%': 36, 'Na': 37, '9': 38, '0': 39,
'K': 40, 'Sn': 41, 'Se': 42, 'Li': 43, 'Zn': 44, 'Al': 45, 'b': 46, 'As': 47, 'Mg': 48, 'p': 49,
'Ca': 50, 'se': 51, 'Ag': 52, 'Te': 53, 'Ba': 54, 'Bi': 55, 'Rb': 56, 'Cs': 57, 'Sr': 58, 'te': 59,
'Be': 60, 'length': 61, 'symbol_type': 62
}
self.all_distribution_idx_reversed = {v: k for k, v in self.all_distribution_idx.items()}
def wasserstein_dis(distr_dict_0, distr_dict_1, dis_type='wasserstein'):
minus = 1e-15
sorted_keys_0 = np.sort(list(distr_dict_0.keys()))
max_value_0 = max(distr_dict_0.values())
values_0 = minus + np.array([distr_dict_0[k] for k in sorted_keys_0])/max_value_0
sorted_keys_1 = np.sort(list(distr_dict_1.keys()))
max_value_1 = max(distr_dict_1.values())
values_1 = minus + np.array([distr_dict_1[k] for k in sorted_keys_1])/max_value_1
if dis_type == 'wasserstein':
w_dis = wasserstein_distance(values_0, values_1)
elif dis_type == 'KL':
w_dis = np.mean(scipy.special.kl_div(values_0, values_1))
else:
w_dis = np.linalg.norm(np.array(values_0) - np.array(values_1))
return np.round(w_dis, 4)
def datasets_pair_analysis(
target_set_distribution,
pretrain_sets_distribution_path='PretrainedSetsDistribution.npy'
):
if not os.path.exists(pretrain_sets_distribution_path):
print(pretrain_sets_distribution_path, 'not the right file.')
print('PretrainedSetsDistribution.npy can not be found')
pretrained_sets_distribution = np.load(pretrain_sets_distribution_path, allow_pickle=True).item()
three_sets_prefix = ['c', 'cp', 'cpz']
all_wd_values = {k: {} for k in three_sets_prefix}
for i, prefix in enumerate(three_sets_prefix):
for j in range(63):
prefix_name = f"{prefix}-{j}"
all_wd_values[prefix][j] = wasserstein_dis(
target_set_distribution[str(j)],
pretrained_sets_distribution[prefix_name]
)
return all_wd_values
def rerange_distribution(target, combined_result):
distribute_dict = {}
if target == 'length':
min_len, max_len = 1, 256
distribute_dict = {k: 0 for k in range(min_len, max_len+1)}
for k, v in combined_result.items():
if k <= min_len:
distribute_dict[min_len] += v
elif k > min_len and k < max_len:
distribute_dict[k] = v
elif k >= max_len:
distribute_dict[max_len] += v
else:
print('Unexpected key from combined_result.(target: length)')
elif target == 'symbol_type':
min_len, max_len = 1, 61
distribute_dict = {k: 0 for k in range(min_len, max_len+1)}
for k, v in combined_result.items():
if k <= min_len:
distribute_dict[min_len] += v
elif k > min_len and k < max_len:
distribute_dict[k] = v
elif k >= max_len:
distribute_dict[max_len] += v
else:
print('Unexpected key from combined_result.(target: symbol_type)')
else:
distribute_dict = {k: 0 for k in [np.round(w, 2) for w in np.arange(0.0, 1.001, 0.01)]}
for k, v in combined_result.items():
if k in distribute_dict:
distribute_dict[k] += v
else:
print('Unexpected key {:s} from combined_result.(consider_symbol {:s})'.format(str(k), target))
return distribute_dict
def linear_ridgeclassifier(x, y):
from sklearn import linear_model
cla = linear_model.RidgeClassifier()
cla.fit(x, y)
return cla.score(x, y), cla.intercept_, cla
def data_norm(*args):
assert len(args) > 0, "Datasets' length needs > 0"
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
scaler.fit(np.vstack(args))
norm_args = [scaler.transform(args[i]) for i in range(len(args))]
norm_args = norm_args if len(args) > 1 else norm_args[0]
return norm_args
def main_get_dis_customized_dataset(file='./temp_data/bbbp.smi', num_workers=1):
# savename = 'wasserstein_temp.csv'
dataname = os.path.split(file)[-1].split('.')[0]
ahp = idx_analysis()
all_features = []
target_set_distribution = {}
for k, v in ahp.all_distribution_idx.items():
ta = dam.target_analysis(k)
if k == 'length':
specific_func = ta.length_analysis
elif k == 'symbol_type':
specific_func = ta.symbol_type_analysis
else:
specific_func = ta.symbol_analysis
combined_result = dam.parallel_operation(file, num_workers, specific_func)
distribute_dict = rerange_distribution(k, combined_result)
target_set_distribution[str(v)] = distribute_dict
all_wd_values = datasets_pair_analysis(
target_set_distribution,
pretrain_sets_distribution_path='PretrainedSetsDistribution.npy',
)
# 3 to 1
for nd, (k, wd_dict) in enumerate(all_wd_values.items()):
all_features.append(list(wd_dict.values()))
final_features = pd.DataFrame(
np.reshape(all_features, [1, 63*3]), # (all_features),
index=[dataname],
columns=list(range(63*3)),
)
# final_features.to_csv(savename)
return final_features
def main_L2L(args):
filename = './wasserstein.csv' # This file contains the features used to train the decision model.
if not os.path.exists(filename):
print('No wasserstein.csv exists')
data_df = pd.read_csv(filename, header=0, index_col=0)
label = data_df['label'].values
features = data_df[[str(i) for i in range(np.shape(data_df.values)[-1]-1)]].values
# print(features.shape)
customized_dataset_feature = main_get_dis_customized_dataset(
file=args.input_dataset, num_workers=args.num_workers).values
all_features = np.vstack([features, customized_dataset_feature])
norm_all_features = data_norm(all_features)
features = norm_all_features[0: -1, :]
customized_dataset_feature = norm_all_features[-1, :]
all_score = []
all_inter = []
flag = 1
for redu_i in range(1, np.shape(features)[0]+1):
reducer = PCA(n_components=redu_i)
features_ = reducer.fit_transform(features)
score, inter_, model = linear_ridgeclassifier(features_, label)
all_score.append(score)
all_inter.append(inter_[0])
# print(redu_i, score)
if score - 1 == 0 and flag == 1:
customized_dataset_feature_ = reducer.transform(customized_dataset_feature[None, :])
get_scores = model.decision_function(customized_dataset_feature_)
# print(model.decision_function(features_))
flag = 0
# print(all_score)
# print(all_inter)
select_models = {0: 'model_chembl27', 1: 'model_chembl27_pubchem', 2: 'model_chembl27_pubchem_zinc'}
print(f'Select the pretrained {select_models[np.argmax(get_scores)]}, and the score is {np.max(get_scores)}')
def main(args):
main_L2L(args)
def parse_args(args):
parser = argparse.ArgumentParser(description='Datasets analysis')
parser.add_argument('--input_dataset', default='test.smi', type=str)
parser.add_argument('--num_workers', default=1, type=int)
args = parser.parse_args()
return args
def cli_main():
args = parse_args(sys.argv[1:])
# print(args)
main(args)
if __name__ == "__main__":
cli_main()
print('End!')
| [
"pandas.read_csv",
"numpy.array",
"numpy.arange",
"os.path.exists",
"sklearn.linear_model.RidgeClassifier",
"numpy.reshape",
"argparse.ArgumentParser",
"sklearn.decomposition.PCA",
"numpy.max",
"os.path.split",
"scipy.stats.wasserstein_distance",
"numpy.vstack",
"numpy.round",
"datasets_an... | [((1918, 1936), 'numpy.round', 'np.round', (['w_dis', '(4)'], {}), '(w_dis, 4)\n', (1926, 1936), True, 'import numpy as np\n'), ((4358, 4388), 'sklearn.linear_model.RidgeClassifier', 'linear_model.RidgeClassifier', ([], {}), '()\n', (4386, 4388), False, 'from sklearn import linear_model\n'), ((4600, 4616), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (4614, 4616), False, 'from sklearn.preprocessing import StandardScaler\n'), ((6309, 6353), 'pandas.read_csv', 'pd.read_csv', (['filename'], {'header': '(0)', 'index_col': '(0)'}), '(filename, header=0, index_col=0)\n', (6320, 6353), True, 'import pandas as pd\n'), ((6661, 6710), 'numpy.vstack', 'np.vstack', (['[features, customized_dataset_feature]'], {}), '([features, customized_dataset_feature])\n', (6670, 6710), True, 'import numpy as np\n'), ((7865, 7921), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Datasets analysis"""'}), "(description='Datasets analysis')\n", (7888, 7921), False, 'import argparse\n'), ((1690, 1730), 'scipy.stats.wasserstein_distance', 'wasserstein_distance', (['values_0', 'values_1'], {}), '(values_0, values_1)\n', (1710, 1730), False, 'from scipy.stats import wasserstein_distance\n'), ((2079, 2126), 'os.path.exists', 'os.path.exists', (['pretrain_sets_distribution_path'], {}), '(pretrain_sets_distribution_path)\n', (2093, 2126), False, 'import os\n'), ((4632, 4647), 'numpy.vstack', 'np.vstack', (['args'], {}), '(args)\n', (4641, 4647), True, 'import numpy as np\n'), ((5122, 5144), 'datasets_analysis_module.target_analysis', 'dam.target_analysis', (['k'], {}), '(k)\n', (5141, 5144), True, 'import datasets_analysis_module as dam\n'), ((5390, 5446), 'datasets_analysis_module.parallel_operation', 'dam.parallel_operation', (['file', 'num_workers', 'specific_func'], {}), '(file, num_workers, specific_func)\n', (5412, 5446), True, 'import datasets_analysis_module as dam\n'), ((5901, 5938), 'numpy.reshape', 'np.reshape', (['all_features', '[1, 63 * 3]'], {}), '(all_features, [1, 63 * 3])\n', (5911, 5938), True, 'import numpy as np\n'), ((6225, 6249), 'os.path.exists', 'os.path.exists', (['filename'], {}), '(filename)\n', (6239, 6249), False, 'import os\n'), ((6983, 7007), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': 'redu_i'}), '(n_components=redu_i)\n', (6986, 7007), False, 'from sklearn.decomposition import PCA\n'), ((1389, 1439), 'numpy.array', 'np.array', (['[distr_dict_0[k] for k in sorted_keys_0]'], {}), '([distr_dict_0[k] for k in sorted_keys_0])\n', (1397, 1439), True, 'import numpy as np\n'), ((1576, 1626), 'numpy.array', 'np.array', (['[distr_dict_1[k] for k in sorted_keys_1]'], {}), '([distr_dict_1[k] for k in sorted_keys_1])\n', (1584, 1626), True, 'import numpy as np\n'), ((2298, 2357), 'numpy.load', 'np.load', (['pretrain_sets_distribution_path'], {'allow_pickle': '(True)'}), '(pretrain_sets_distribution_path, allow_pickle=True)\n', (2305, 2357), True, 'import numpy as np\n'), ((1782, 1822), 'scipy.special.kl_div', 'scipy.special.kl_div', (['values_0', 'values_1'], {}), '(values_0, values_1)\n', (1802, 1822), False, 'import scipy\n'), ((6939, 6957), 'numpy.shape', 'np.shape', (['features'], {}), '(features)\n', (6947, 6957), True, 'import numpy as np\n'), ((7769, 7787), 'numpy.max', 'np.max', (['get_scores'], {}), '(get_scores)\n', (7775, 7787), True, 'import numpy as np\n'), ((1865, 1883), 'numpy.array', 'np.array', (['values_0'], {}), '(values_0)\n', (1873, 1883), True, 'import numpy as np\n'), ((1886, 1904), 'numpy.array', 'np.array', (['values_1'], {}), '(values_1)\n', (1894, 1904), True, 'import numpy as np\n'), ((4939, 4958), 'os.path.split', 'os.path.split', (['file'], {}), '(file)\n', (4952, 4958), False, 'import os\n'), ((7726, 7747), 'numpy.argmax', 'np.argmax', (['get_scores'], {}), '(get_scores)\n', (7735, 7747), True, 'import numpy as np\n'), ((3942, 3956), 'numpy.round', 'np.round', (['w', '(2)'], {}), '(w, 2)\n', (3950, 3956), True, 'import numpy as np\n'), ((3966, 3993), 'numpy.arange', 'np.arange', (['(0.0)', '(1.001)', '(0.01)'], {}), '(0.0, 1.001, 0.01)\n', (3975, 3993), True, 'import numpy as np\n'), ((6436, 6460), 'numpy.shape', 'np.shape', (['data_df.values'], {}), '(data_df.values)\n', (6444, 6460), True, 'import numpy as np\n')] |
""" Copyright 2016-2022 by Bitmain Technologies Inc. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import print_function
import os
#import re
import json
import copy
#import numpy as np
import mxnet as mx
from mxnet import gluon
import bmnetm
from ..common.base_splitter import Splitter
from ..external.mxnet_functions import load_json_file
from ..external.mxnet_functions import get_index_dict
from ..external.mxnet_functions import get_input_names_from_json
from ..external.mxnet_functions import get_output_names_from_json
from ..external.mxnet_functions import node_is_weight
from ..external.mxnet_functions import get_all_ops
from ..external.mxnet_functions import get_input_names_from_file
from ..external.mxnet_functions import get_output_names_from_file
from ..external.mxnet_functions import sym_has_params
from ..external.mxnet_functions import get_prefix_and_epoch
from ..external.mxnet_functions import load_mxnet_model
from ..external.mxnet_functions import infer_mxnet
def get_more_than_x(numbers, value):
""" Get numbers more than x in a list
"""
ret = list()
for i in numbers:
if i >= value:
ret.append(i)
return ret
def get_input_tensors(sub_graph):
""" Get all input tensor names of a sub_graph.
Args:
sub_graph: A SubGraph instance.
Returns:
A set contains all input tensor names of the sub_graph.
"""
input_tensors = copy.deepcopy(sub_graph.input_ops)
for name in sub_graph.input_subgraphs:
input_tensors |= sub_graph.input_subgraphs[name]
return input_tensors
def get_output_tensors(sub_graph):
""" Get all output tensor names of a sub_graph.
Args:
sub_graph: A SubGraph instance.
Returns:
A set contains all output tensor names of the sub_graph.
"""
output_tensors = copy.deepcopy(sub_graph.output_ops)
for name in sub_graph.output_subgraphs:
output_tensors |= sub_graph.output_subgraphs[name]
return output_tensors
def find_arg_nodes(nodes, input_names, ops, index_dict):
""" Find indexes of all argument nodes. Argument nodes are input tensors
and weights.
Args:
nodes: A json object contain all the nodes in a mxnet json file.
input_names: Names of input tensors.
ops: Names of operaters.
index_dict: A dict denotes relationships between name and index of nodes.
Returns:
A sorted list contains indexes of all argument nodes.
"""
arg_nodes = set(range(-len(input_names), 0))
for operator in ops:
index = index_dict[operator]
parent_ids = set([parent[0] for parent in nodes[index]["inputs"] \
if node_is_weight(nodes[parent[0]])])
arg_nodes |= parent_ids
arg_nodes_list = list(arg_nodes)
arg_nodes_list.sort()
return arg_nodes_list
def find_heads(output_tensors, index_dict):
""" Find indexes of all heads. Heads stand for output tensors.
Args:
# nodes: A json object contain all the nodes in a mxnet json file.
output_tensors: Names of output tensors.
index_dict: A dict denotes relationships between name and index of nodes.
Returns:
A sorted list contains indexes of heads.
"""
heads = list(set([index_dict[op] for op in output_tensors]))
heads.sort()
return heads
def find_split_sons(raw_nodes, parent_id, sub_ops_ids):
""" Find ids of sons given a parent id.
Args:
raw_nodes: A json object contain all the nodes of the raw mxnet json file.
parent_id: Id of a node.
sub_ops_ids: Ids of all ops in a sub graph.
Returns:
Ids of sons of the specified parent.
"""
split_ids = set()
if raw_nodes[parent_id]["op"] != "SliceChannel":
return split_ids
for op_id in sub_ops_ids:
for lst in raw_nodes[op_id]["inputs"]:
if lst[0] == parent_id:
split_ids.add(lst[1])
split_ids_list = list(split_ids)
split_ids_list.sort()
return split_ids_list
def gen_json(raw_json, sub_graph, index_dict, sub_json_path):
""" Generate json file of a subgraph.
Args:
raw_json: Json object read from json file of raw model.
sub_graph: A SubGraph instance.
index_dict: A dict denotes relationships between name and index of nodes.
sub_json_path: Path of json file to save.
Returns:
None.
"""
data = {"nodes":list(), "arg_nodes":list(), "heads":list(), "attrs":dict()}
nodes = raw_json["nodes"]
input_tensors = get_input_tensors(sub_graph)
output_tensors = get_output_tensors(sub_graph)
ops_ids = [index_dict[op] for op in sub_graph.ops]
input_ids = [index_dict[op] for op in input_tensors]
input_split_ids = list()
input_names = list()
for tensor in input_tensors:
parent_id = index_dict[tensor]
split_ids = find_split_sons(nodes, parent_id, ops_ids)
if not split_ids:
input_names.append(tensor)
data["nodes"].append({"op":"null", "name":tensor, "inputs":[]})
continue
input_split_ids.append(parent_id)
for i in split_ids:
name = tensor + "_" + str(i) + "_sophon_auto"
input_names.append(name)
data["nodes"].append({"op":"null", "name":name, "inputs":[]})
arg_nodes = find_arg_nodes(nodes, input_names, \
sub_graph.ops, index_dict)
total_node_ids = list((set(arg_nodes) | set(ops_ids)) - set(input_ids))
total_node_ids.sort()
# heads = find_heads(nodes, output_tensors, index_dict)
heads = find_heads(output_tensors, index_dict)
tmp_total_node_ids = get_more_than_x(total_node_ids, 0)
for i in tmp_total_node_ids:
#if i >= 0:
data["nodes"].append(nodes[i])
new_index_dict = get_index_dict(data["nodes"])
for node in data["nodes"]:
inputs = list()
for i in node["inputs"]:
if i[0] in input_split_ids:
new_input_name = nodes[i[0]]["name"] + "_" + str(i[1]) + \
"_sophon_auto"
inputs.append([new_index_dict[new_input_name], 0, 0])
else:
inputs.append([new_index_dict[nodes[i[0]]["name"]], i[1], i[2]])
node["inputs"] = inputs
data["arg_nodes"] = [total_node_ids.index(i) for i in arg_nodes]
data["attrs"] = raw_json["attrs"]
data["heads"] = list()
for i in heads:
if nodes[i]["op"] == "SliceChannel":
for j in range(int(nodes[i]["attrs"]["num_outputs"])):
data["heads"].append([new_index_dict[nodes[i]["name"]], j, 0])
else:
data["heads"].append([new_index_dict[nodes[i]["name"]], 0, 0])
formatted = json.dumps(data, indent=2, sort_keys=False)
with open(sub_json_path, 'w') as f_save:
f_save.write(formatted)
def gen_params(raw_params_path, sub_json_path, sub_params_path, input_tensors):
""" Get features which are intermediate results of the model.
Args:
raw_params_path: Path of params file of the raw mxnet model.
sub_json_path: Path of json file of the submodel.
sub_params_path: Path of params file of the submodel.
input_tensors: A list contains all input tensor names and shapes.
Format: [(tensor_name, numpy.ndarray), ]
Returns:
True for save parameters to file, False for no parameters and not save.
"""
sym = mx.sym.load(sub_json_path)
has_params = sym_has_params(sym, [item[0] for item in input_tensors])
output_names = get_output_names_from_file(sub_json_path)
internals = sym.get_internals()
outputs_ops = sym.get_internals().list_outputs()
outputs = list()
for name in output_names:
if name.endswith("sophon_auto"):
tokens = name.split('_')
out_name = "_".join(tokens[0:-3] + ["output" + tokens[-3]])
else:
out_name = name + '_output'
if out_name not in outputs_ops:
print("Wrong name: {}".format(name))
return None
outputs.append(internals[out_name])
inputs = list()
for item in input_tensors:
tensor_name = item[0]
inputs.append(mx.sym.var(tensor_name))
net = gluon.nn.SymbolBlock(outputs=outputs, inputs=inputs)
# Set the params
net.collect_params().load(raw_params_path, ctx=mx.cpu(), ignore_extra=True)
input_data = [mx.nd.array(item[1]) for item in input_tensors]
outputs = net(*input_data)
prefix, epoch = get_prefix_and_epoch(sub_params_path)
prefix = os.path.join(os.path.dirname(sub_params_path), prefix)
net.export(prefix, epoch=epoch)
return has_params
class MxnetSplitter(Splitter):
""" Split a Mxnet model into submodels.
"""
def initialize(self):
""" Load graph information from mxnet model descriptor.
ops: Information of all operators, exluding weight nodes.
Format: {op_name: (op_type, [parent_name])}.
input_ops: list, names of all input tensors.
output_ops: list, names of all output tensors.
json_path: Path to symbol file.
params_path: Path to parameter file.
is_dynamic: True means input tensor shapes may change.
sym_json: Json read from symbol file.
index_dict: Relationships between name and index of nodes.
Format: {node_name: node_index}
input_names: Input tensor names.
output_names: Output tensor names.
prefix: Prefix of saved model.
epoch: Epoch number of saved model.
"""
self.platform = 'mxnet'
required_args = ["json_path", "params_path", "dynamic", "input_tensors"]
for arg in required_args:
assert arg in self.model_descriptor.keys()
self.json_path = self.model_descriptor["json_path"]
self.ops, self.input_ops, self.output_ops = get_all_ops(self.json_path)
self.params_path = self.model_descriptor["params_path"]
self.sym_json = load_json_file(self.json_path)
self.index_dict = get_index_dict(self.sym_json["nodes"])
self.input_names = get_input_names_from_json(self.sym_json)
self.output_names = get_output_names_from_json(self.sym_json)
self.prefix, self.epoch = get_prefix_and_epoch(self.params_path)
self.input_tensors = self.model_descriptor["input_tensors"]
def get_op_name(self, op_name):
return op_name
def is_op_support(self, op_name):
param = {"op": self.ops[op_name][0]}
if self.ops[op_name][0] == 'null' or bmnetm.op_support(param):
return True
return False
def is_op_compute(self, op_name):
compute_list = [
'Convolution',
'Pooling',
'Activation',
'elemwise_add',
'FullyConnected',
'BatchNorm'
]
if self.ops[op_name][0] in compute_list:
return True
return False
def is_op_dangerous(self, op_name):
dangerous_list = [
]
if self.ops[op_name][0] in dangerous_list:
return True
return False
def is_input_op(self, op_name):
if op_name in self.input_ops:
return True
return False
def is_output_op(self, op_name):
if op_name in self.output_ops:
return True
return False
def get_inputs_list(self, op_name):
return self.ops[op_name][1]
def destroy(self):
pass
def save_subgraph(self, graph, save_folder, index, tensors):
""" Save submodel to files.
Args:
graph: A SubGraph instances.
save_folder: Folder path to save json file and params file.
index: Index of subgraph.
tensors: A dict contains tensor names and values.
Returns:
model_info: A dict contains model information.
Format: {"json": json_name, "params": params_name}
input_names: list, input tensor names of the submodel.
ouput_names: list, output tensor names of the submodel.
"""
model_info = dict()
json_name = '{}_{}-symbol.json'.format(self.prefix, index)
params_name = '{}_{}-{:0>4}.params'.format(self.prefix, index, self.epoch)
json_path = os.path.join(save_folder, json_name)
gen_json(self.sym_json, graph, self.index_dict, json_path)
input_names = get_input_names_from_file(json_path)
input_tensors = [(i, tensors[i]) for i in input_names]
params_path = os.path.join(save_folder, params_name)
has_params = gen_params(self.params_path, json_path, \
params_path, input_tensors)
model_info["json"] = json_name
if has_params:
model_info["params"] = params_name
input_names = get_input_names_from_file(json_path)
output_names = get_output_names_from_file(json_path)
return model_info, input_names, output_names
def infer_output_tensors(self, save_folder, model_info, input_names, \
output_names, tensors):
""" Get output shapes of the model.
Args:
save_folder: Folder path to save json files.
model_info: A dict contains model information.
Format: {"json": json_name, "params": params_name}
input_names: list, input tensor names.
ouput_names: list, output tensor names.
tensor_tensors: A dict contains tensor names and values.
Returns:
A list of numpy.ndarray, contains the output tensors.
"""
if "params" in model_info:
model = load_mxnet_model(device='cpu', folder=save_folder, \
json_file=model_info["json"], params=model_info['params'])
else:
model = load_mxnet_model(device='cpu', folder=save_folder, \
json_file=model_info["json"])
input_tensors = [(name, tensors[name]) for name in input_names]
required_outputs = [(name, None) for name in output_names]
outputs = infer_mxnet(model, input_tensors, required_outputs, device='cpu')
ret = [outputs[name] for name in output_names]
return ret
def get_tensor_dtype(self, tensor_name):
return 0
| [
"copy.deepcopy",
"mxnet.gluon.nn.SymbolBlock",
"mxnet.cpu",
"json.dumps",
"os.path.join",
"os.path.dirname",
"mxnet.sym.var",
"mxnet.sym.load",
"mxnet.nd.array",
"bmnetm.op_support"
] | [((1907, 1941), 'copy.deepcopy', 'copy.deepcopy', (['sub_graph.input_ops'], {}), '(sub_graph.input_ops)\n', (1920, 1941), False, 'import copy\n'), ((2288, 2323), 'copy.deepcopy', 'copy.deepcopy', (['sub_graph.output_ops'], {}), '(sub_graph.output_ops)\n', (2301, 2323), False, 'import copy\n'), ((6852, 6895), 'json.dumps', 'json.dumps', (['data'], {'indent': '(2)', 'sort_keys': '(False)'}), '(data, indent=2, sort_keys=False)\n', (6862, 6895), False, 'import json\n'), ((7529, 7555), 'mxnet.sym.load', 'mx.sym.load', (['sub_json_path'], {}), '(sub_json_path)\n', (7540, 7555), True, 'import mxnet as mx\n'), ((8260, 8312), 'mxnet.gluon.nn.SymbolBlock', 'gluon.nn.SymbolBlock', ([], {'outputs': 'outputs', 'inputs': 'inputs'}), '(outputs=outputs, inputs=inputs)\n', (8280, 8312), False, 'from mxnet import gluon\n'), ((8426, 8446), 'mxnet.nd.array', 'mx.nd.array', (['item[1]'], {}), '(item[1])\n', (8437, 8446), True, 'import mxnet as mx\n'), ((8583, 8615), 'os.path.dirname', 'os.path.dirname', (['sub_params_path'], {}), '(sub_params_path)\n', (8598, 8615), False, 'import os\n'), ((11979, 12015), 'os.path.join', 'os.path.join', (['save_folder', 'json_name'], {}), '(save_folder, json_name)\n', (11991, 12015), False, 'import os\n'), ((12211, 12249), 'os.path.join', 'os.path.join', (['save_folder', 'params_name'], {}), '(save_folder, params_name)\n', (12223, 12249), False, 'import os\n'), ((8227, 8250), 'mxnet.sym.var', 'mx.sym.var', (['tensor_name'], {}), '(tensor_name)\n', (8237, 8250), True, 'import mxnet as mx\n'), ((8381, 8389), 'mxnet.cpu', 'mx.cpu', ([], {}), '()\n', (8387, 8389), True, 'import mxnet as mx\n'), ((10433, 10457), 'bmnetm.op_support', 'bmnetm.op_support', (['param'], {}), '(param)\n', (10450, 10457), False, 'import bmnetm\n')] |
# https://atcoder.jp/contests/abc185/tasks/abc185_d
from math import ceil
N, M = map(int, input().split())
if M == 0:
print(1)
exit()
a_arr = list(map(int, input().split()))
a_arr.sort()
blanks = [0]
for i in range(M):
if i == 0:
blanks.append(a_arr[i] - 1)
continue
blanks.append(a_arr[i] - a_arr[i - 1] - 1)
blanks.append(N - a_arr[-1])
minimum = N
for b in blanks:
if b == 0:
continue
minimum = min(minimum, b)
ans = 0
for b in blanks:
ans += ceil(b / minimum)
print(ans)
| [
"math.ceil"
] | [((499, 516), 'math.ceil', 'ceil', (['(b / minimum)'], {}), '(b / minimum)\n', (503, 516), False, 'from math import ceil\n')] |
"""The implementation of U-Net and FCRN-A models."""
from typing import Tuple
import numpy as np
import torch
from torch import nn
from torchvision.models import resnet
from model_config import DROPOUT_PROB
class UOut(nn.Module):
"""Add random noise to every layer of the net."""
def forward(self, input_tensor: torch.Tensor):
if not self.training:
return input_tensor
with torch.cuda.device(0):
return input_tensor + 2*DROPOUT_PROB*torch.cuda.FloatTensor(
input_tensor.shape).uniform_() - DROPOUT_PROB
class ResNet(nn.Module):
def __init__(self, module, in_channels, out_channels, stride):
super().__init__()
self.module = module
self.in_channels = in_channels
self.out_channels = out_channels
self.stride = stride
def forward(self, inputs):
output = self.module(inputs)
skip = None
if self.stride != 1 or self.in_channels != self.out_channels:
skip = nn.Sequential(
nn.Conv2d(in_channels=self.in_channels, out_channels=self.out_channels,
kernel_size=1, stride=self.stride, bias=False),
nn.BatchNorm2d(self.out_channels))
identity = inputs
if skip is not None:
skip = skip.cuda()
identity = skip(inputs)
output += identity
return output
class BlockBuilder:
"""Create convolutional blocks for building neural nets."""
def __init__(self, dropout: bool):
self.dropout = dropout
def conv_block(self, channels: Tuple[int, int],
size: Tuple[int, int],
stride: Tuple[int, int] = (1, 1),
N: int = 1):
"""
Create a block with N convolutional layers with ReLU activation function.
The first layer is IN x OUT, and all others - OUT x OUT.
Args:
channels: (IN, OUT) - no. of input and output channels
size: kernel size (fixed for all convolution in a block)
stride: stride (fixed for all convolution in a block)
N: no. of convolutional layers
Returns:
A sequential container of N convolutional layers.
"""
# a single convolution + batch normalization + ReLU block
def block(in_channels):
# layers = [
# nn.Conv2d(in_channels=in_channels,
# out_channels=channels[1],
# kernel_size=size,
# stride=stride,
# bias=False,
# padding=(size[0] // 2, size[1] // 2)),
# nn.ReLU()
# ]
# if self.dropout:
# layers.append(UOut())
# layers.append(nn.BatchNorm2d(num_features=channels[1]))
layers = [ResNet(nn.Sequential(nn.Conv2d(in_channels=in_channels,
out_channels=channels[1],
kernel_size=size,
stride=stride,
bias=False,
padding=(size[0] // 2, size[1] // 2))), in_channels, channels[1], stride),
nn.ReLU()]
if self.dropout:
layers.append(UOut())
postActivation = nn.Sequential(*layers)
return nn.Sequential(postActivation, nn.BatchNorm2d(num_features=channels[1]))
# create and return a sequential container of convolutional layers
# input size = channels[0] for first block and channels[1] for all others
return nn.Sequential(*[block(channels[bool(i)]) for i in range(N)])
class ConvCat(nn.Module):
"""Convolution with upsampling + concatenate block."""
def __init__(self,
channels: Tuple[int, int],
size: Tuple[int, int],
stride: Tuple[int, int] = (1, 1),
N: int = 1,
dropout: bool = False):
"""
Create a sequential container with convolutional block (see conv_block)
with N convolutional layers and upsampling by factor 2.
"""
super(ConvCat, self).__init__()
bb = BlockBuilder(dropout)
self.conv = nn.Sequential(
bb.conv_block(channels, size, stride, N),
nn.Upsample(scale_factor=2)
)
def forward(self, to_conv: torch.Tensor, to_cat: torch.Tensor):
"""Forward pass.
Args:
to_conv: input passed to convolutional block and upsampling
to_cat: input concatenated with the output of a conv block
"""
return torch.cat([self.conv(to_conv), to_cat], dim=1)
class FCRN_A(nn.Module):
"""
Fully Convolutional Regression Network A
Ref. <NAME> et al. 'Microscopy Cell Counting with Fully Convolutional
Regression Networks'
"""
def __init__(self, N: int = 1, input_filters: int = 3, dropout: bool = True, ** kwargs):
"""
Create FCRN-A model with:
* fixed kernel size = (3, 3)
* fixed max pooling kernel size = (2, 2) and upsampling factor = 2
* no. of filters as defined in an original model:
input size -> 32 -> 64 -> 128 -> 512 -> 128 -> 64 -> 1
Args:
N: no. of convolutional layers per block (see conv_block)
input_filters: no. of input channels
"""
super(FCRN_A, self).__init__()
bb = BlockBuilder(dropout)
self.model = nn.Sequential(
# downsampling
bb.conv_block(channels=(input_filters, 32), size=(3, 3), N=N),
nn.MaxPool2d(2),
bb.conv_block(channels=(32, 64), size=(3, 3), N=N),
nn.MaxPool2d(2),
bb.conv_block(channels=(64, 128), size=(3, 3), N=N),
nn.MaxPool2d(2),
# "convolutional fully connected"
bb.conv_block(channels=(128, 512), size=(3, 3), N=N),
# upsampling
nn.Upsample(scale_factor=2),
bb.conv_block(channels=(512, 128), size=(3, 3), N=N),
nn.Upsample(scale_factor=2),
bb.conv_block(channels=(128, 64), size=(3, 3), N=N),
nn.Upsample(scale_factor=2),
bb.conv_block(channels=(64, 1), size=(3, 3), N=N),
)
def forward(self, input: torch.Tensor):
"""Forward pass."""
return self.model(input)
class UNet(nn.Module):
"""
U-Net implementation.
Ref. <NAME> et al. "U-net: Convolutional networks for biomedical
image segmentation."
"""
def __init__(self, filters: int = 64, input_filters: int = 3,
dropout: bool = False, **kwargs):
"""
Create U-Net model with:
* fixed kernel size = (3, 3)
* fixed max pooling kernel size = (2, 2) and upsampling factor = 2
* fixed no. of convolutional layers per block = 2 (see conv_block)
* constant no. of filters for convolutional layers
Args:
filters: no. of filters for convolutional layers
input_filters: no. of input channels
"""
super(UNet, self).__init__()
# first block channels size
initial_filters = (input_filters, filters)
# channels size for downsampling
down_filters = (filters, filters)
# channels size for upsampling (input doubled because of concatenate)
up_filters = (2 * filters, filters)
bb = BlockBuilder(dropout)
# downsampling
self.block1 = bb.conv_block(channels=initial_filters, size=(3, 3), N=2)
self.block2 = bb.conv_block(channels=down_filters, size=(3, 3), N=2)
self.block3 = bb.conv_block(channels=down_filters, size=(3, 3), N=2)
# upsampling
self.block4 = ConvCat(channels=down_filters, size=(3, 3), N=2)
self.block5 = ConvCat(channels=up_filters, size=(3, 3), N=2)
self.block6 = ConvCat(channels=up_filters, size=(3, 3), N=2)
# density prediction
self.block7 = bb.conv_block(channels=up_filters, size=(3, 3), N=2)
self.density_pred = nn.Conv2d(in_channels=filters, out_channels=1,
kernel_size=(1, 1), bias=False)
def forward(self, input: torch.Tensor):
"""Forward pass."""
# use the same max pooling kernel size (2, 2) across the network
pool = nn.MaxPool2d(2)
# downsampling
block1 = self.block1(input)
pool1 = pool(block1)
block2 = self.block2(pool1)
pool2 = pool(block2)
block3 = self.block3(pool2)
pool3 = pool(block3)
# upsampling
block4 = self.block4(pool3, block3)
block5 = self.block5(block4, block2)
block6 = self.block6(block5, block1)
# density prediction
block7 = self.block7(block6)
return self.density_pred(block7)
# --- PYTESTS --- #
def run_network(network: nn.Module, input_channels: int):
"""Generate a random image, run through network, and check output size."""
sample = torch.ones((1, input_channels, 224, 224))
result = network(input_filters=input_channels)(sample)
assert result.shape == (1, 1, 224, 224)
def test_UNet_color():
"""Test U-Net on RGB images."""
run_network(UNet, 3)
def test_UNet_grayscale():
"""Test U-Net on grayscale images."""
run_network(UNet, 1)
def test_FRCN_color():
"""Test FCRN-A on RGB images."""
run_network(FCRN_A, 3)
def test_FRCN_grayscale():
"""Test FCRN-A on grayscale images."""
run_network(FCRN_A, 1)
| [
"torch.nn.BatchNorm2d",
"torch.nn.ReLU",
"torch.cuda.FloatTensor",
"torch.cuda.device",
"torch.nn.Sequential",
"torch.nn.Conv2d",
"torch.nn.MaxPool2d",
"torch.nn.Upsample",
"torch.ones"
] | [((9248, 9289), 'torch.ones', 'torch.ones', (['(1, input_channels, 224, 224)'], {}), '((1, input_channels, 224, 224))\n', (9258, 9289), False, 'import torch\n'), ((8298, 8376), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': 'filters', 'out_channels': '(1)', 'kernel_size': '(1, 1)', 'bias': '(False)'}), '(in_channels=filters, out_channels=1, kernel_size=(1, 1), bias=False)\n', (8307, 8376), False, 'from torch import nn\n'), ((8576, 8591), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['(2)'], {}), '(2)\n', (8588, 8591), False, 'from torch import nn\n'), ((416, 436), 'torch.cuda.device', 'torch.cuda.device', (['(0)'], {}), '(0)\n', (433, 436), False, 'import torch\n'), ((3486, 3508), 'torch.nn.Sequential', 'nn.Sequential', (['*layers'], {}), '(*layers)\n', (3499, 3508), False, 'from torch import nn\n'), ((4493, 4520), 'torch.nn.Upsample', 'nn.Upsample', ([], {'scale_factor': '(2)'}), '(scale_factor=2)\n', (4504, 4520), False, 'from torch import nn\n'), ((5807, 5822), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['(2)'], {}), '(2)\n', (5819, 5822), False, 'from torch import nn\n'), ((5901, 5916), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['(2)'], {}), '(2)\n', (5913, 5916), False, 'from torch import nn\n'), ((5996, 6011), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['(2)'], {}), '(2)\n', (6008, 6011), False, 'from torch import nn\n'), ((6164, 6191), 'torch.nn.Upsample', 'nn.Upsample', ([], {'scale_factor': '(2)'}), '(scale_factor=2)\n', (6175, 6191), False, 'from torch import nn\n'), ((6272, 6299), 'torch.nn.Upsample', 'nn.Upsample', ([], {'scale_factor': '(2)'}), '(scale_factor=2)\n', (6283, 6299), False, 'from torch import nn\n'), ((6379, 6406), 'torch.nn.Upsample', 'nn.Upsample', ([], {'scale_factor': '(2)'}), '(scale_factor=2)\n', (6390, 6406), False, 'from torch import nn\n'), ((1041, 1163), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': 'self.in_channels', 'out_channels': 'self.out_channels', 'kernel_size': '(1)', 'stride': 'self.stride', 'bias': '(False)'}), '(in_channels=self.in_channels, out_channels=self.out_channels,\n kernel_size=1, stride=self.stride, bias=False)\n', (1050, 1163), False, 'from torch import nn\n'), ((1203, 1236), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['self.out_channels'], {}), '(self.out_channels)\n', (1217, 1236), False, 'from torch import nn\n'), ((3379, 3388), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (3386, 3388), False, 'from torch import nn\n'), ((3558, 3598), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', ([], {'num_features': 'channels[1]'}), '(num_features=channels[1])\n', (3572, 3598), False, 'from torch import nn\n'), ((2911, 3059), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': 'in_channels', 'out_channels': 'channels[1]', 'kernel_size': 'size', 'stride': 'stride', 'bias': '(False)', 'padding': '(size[0] // 2, size[1] // 2)'}), '(in_channels=in_channels, out_channels=channels[1], kernel_size=\n size, stride=stride, bias=False, padding=(size[0] // 2, size[1] // 2))\n', (2920, 3059), False, 'from torch import nn\n'), ((487, 529), 'torch.cuda.FloatTensor', 'torch.cuda.FloatTensor', (['input_tensor.shape'], {}), '(input_tensor.shape)\n', (509, 529), False, 'import torch\n')] |
import os
import argparse
import json
from datetime import datetime
import numpy as np
from sklearn.utils.class_weight import compute_class_weight
from sklearn.metrics import balanced_accuracy_score
from sklearn.metrics import confusion_matrix
from tensorflow import keras
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint
import bert # https://github.com/kpe/bert-for-tf2/
from onecycle import OneCycleScheduler # https://www.avanwyk.com/tensorflow-2-super-convergence-with-the-1cycle-policy/
from imdb import get_imdb_data
from tweets import get_tweets_data
from amazon import get_reviews_data
parser = argparse.ArgumentParser()
current_time = datetime.now().strftime("%Y%m%d-%H%M%S")
parser.add_argument("--experiment_name", type=str, default=current_time, help="Insert string defining your experiment. Defaults to datetime.now()")
parser.add_argument("--task", type=str, required=True, help="One of imdb, reviews, or tweets.")
parser.add_argument("--subtask", type=str, default="german", help="One of german or multi. Ignored for imdb task.")
parser.add_argument("--ckpt_name", type=str, default="bert_model.ckpt", help="Name of BERT checkpoint to load.")
parser.add_argument("--bert_base_path", type=str, default="D:/bert_models/", help="Where to find BERT models.")
parser.add_argument("--model_name", type=str, default=None, help="Name of BERT model. Default depends on task.")
parser.add_argument("--data_dir", type=str, default="data", help="Data directory.")
parser.add_argument("--log_dir", type=str, default="D:\\logs", help="Log directory.")
# training parameters
parser.add_argument("--batch_size", type=int, default=2, help="Batch size.")
parser.add_argument("--patience", type=int, default=3, help="Patience for early stopping.")
parser.add_argument("--learning_rate", type=float, default=2e-5, help="Learning rate.")
parser.add_argument("--max_seq_length", type=int, default=512, help="Maximum frequence length.")
parser.add_argument("--no_class_weights", action='store_true', help="Don't use class weights.")
parser.add_argument("--num_epochs", type=int, default=3, help="Maximum number of epochs.")
parser.add_argument("--test_size", type=float, default=None, help="Test size. Default depends on task.")
parser.add_argument("--num_categories", type=int, default=None, help="Number of categoroies. Defaults to 2 for imdb, 3 otherwise.")
parser.add_argument("--polarized", action='store_true', help="For reviews data: if true and num_categories=3, count only 1 and 5 as pos/neg")
# read variables
ARGS = parser.parse_args()
experiment_name = ARGS.experiment_name
batch_size = ARGS.batch_size
learning_rate = ARGS.learning_rate
max_seq_length = ARGS.max_seq_length
ckpt_name = ARGS.ckpt_name
use_class_weights = not ARGS.no_class_weights
num_epochs = ARGS.num_epochs
task = ARGS.task
bert_base_path = ARGS.bert_base_path
num_categories = ARGS.num_categories
model_name = ARGS.model_name
test_size = ARGS.test_size
subtask = ARGS.subtask
data_dir = ARGS.data_dir
log_dir = ARGS.log_dir
patience = ARGS.patience
polarized = ARGS.polarized
print('Experiment name is ' + experiment_name + '.')
if task == "imdb":
if model_name == None:
model_name = "uncased_L-12_H-768_A-12"
if num_categories == None:
num_categories = 2
elif task == "tweets":
if model_name == None:
model_name = "bert_base_german_cased" if subtask == "german" else "multi_cased_L-12_H-768_A-12"
if num_categories == None:
num_categories = 3
if test_size == None:
test_size = 0.2
elif task == "reviews":
if model_name == None:
model_name = "bert_base_german_cased" if subtask == "german" else "multi_cased_L-12_H-768_A-12"
if num_categories == None:
num_categories = 3
if test_size == None:
test_size = 0.5
else:
raise Exception('No such task.')
ARGS.model_name = model_name
ARGS.num_categories = num_categories
ARGS.test_size = test_size
log_dir = os.path.join(log_dir, experiment_name)
data_dir = os.path.join(data_dir, task)
if not os.path.exists(log_dir):
os.makedirs(log_dir)
config = vars(ARGS)
json.dump(config, open(os.path.join(log_dir, 'config.json'), 'w'), indent=4, sort_keys=True)
if subtask != 'german' and subtask != 'multi':
raise Exception("No such subtask.")
def get_data(task, subtask, num_categories, data_dir, tokenizer, max_seq_length, test_size):
if task == "imdb":
print("Ignoging test_size for imdb data.")
return get_imdb_data(data_dir, tokenizer, max_seq_length)
elif task == "tweets":
return get_tweets_data(data_dir, subtask, num_categories, tokenizer, max_seq_length, test_size)
elif task == "reviews":
return get_reviews_data(data_dir, subtask, num_categories, tokenizer, max_seq_length, test_size, polarized)
else:
raise Exception('No such task.')
if __name__ == "__main__":
bert_path = os.path.join(bert_base_path, model_name)
model_ckpt = os.path.join(bert_path, ckpt_name)
do_lower_case = model_name.find("uncased") != -1
bert.bert_tokenization.validate_case_matches_checkpoint(do_lower_case, model_ckpt)
vocab_file = os.path.join(bert_path, "vocab.txt")
tokenizer = bert.bert_tokenization.FullTokenizer(vocab_file, do_lower_case)
( train_input_ids,
train_input_masks,
train_segment_ids,
train_labels,
test_input_ids,
test_input_masks,
test_segment_ids,
test_labels
) = get_data(task, subtask, num_categories, data_dir, tokenizer, max_seq_length, test_size)
steps = np.ceil(train_input_ids.shape[0] / batch_size) * num_epochs
lr_schedule = OneCycleScheduler(learning_rate, steps)
es = EarlyStopping(monitor='val_SparseCategoricalAccuracy', mode='max', verbose=1, patience=patience)
mc = ModelCheckpoint(os.path.join(log_dir, 'best_model.h5'), monitor='val_SparseCategoricalAccuracy', mode='max', save_best_only=True, save_weights_only=True)
bert_params = bert.params_from_pretrained_ckpt(bert_path)
l_bert = bert.BertModelLayer.from_params(bert_params, name="bert")
in_id = keras.layers.Input(shape=(max_seq_length,), name="input_ids")
bert_output = l_bert(in_id)[:, 0, :]
dropout = keras.layers.Dropout(0.5)(bert_output)
dense = keras.layers.Dense(768, activation="relu")(dropout)
dropout = keras.layers.Dropout(0.5)(dense)
pred = keras.layers.Dense(num_categories, activation=None)(dropout)
model = keras.models.Model(inputs=in_id, outputs=pred)
opt = keras.optimizers.Nadam()
model.compile(loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True), optimizer=opt, metrics=['SparseCategoricalAccuracy'])
bert.load_bert_weights(l_bert, model_ckpt)
model.summary()
tensorboard_callback = keras.callbacks.TensorBoard(log_dir=log_dir, histogram_freq=0,
write_graph=False, write_images=False, update_freq=1000)
y = np.concatenate([train_labels, test_labels]).flatten()
wgt = compute_class_weight('balanced', np.unique(y), y)
if not use_class_weights:
wgt = (wgt * 0 + 1) / num_categories
print('Class weights:', wgt)
model.fit(
train_input_ids,
train_labels,
class_weight=wgt,
validation_data=(test_input_ids, test_labels),
shuffle=True,
epochs=num_epochs,
batch_size=batch_size,
callbacks=[tensorboard_callback, es, mc, lr_schedule]
)
model.load_weights(os.path.join(log_dir, 'best_model.h5'))
print("Reloaded best parameters.")
y_pred = model.predict(test_input_ids)
y_pred = np.argmax(y_pred, axis=1)
matrix = confusion_matrix(test_labels, y_pred)
print(matrix.diagonal()/matrix.sum(axis=1))
BMAC = balanced_accuracy_score(test_labels, y_pred)
print(BMAC) | [
"sklearn.metrics.balanced_accuracy_score",
"bert.load_bert_weights",
"tensorflow.keras.callbacks.EarlyStopping",
"onecycle.OneCycleScheduler",
"tensorflow.keras.layers.Dense",
"amazon.get_reviews_data",
"os.path.exists",
"tensorflow.keras.layers.Input",
"bert.BertModelLayer.from_params",
"tensorfl... | [((627, 652), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (650, 652), False, 'import argparse\n'), ((3962, 4000), 'os.path.join', 'os.path.join', (['log_dir', 'experiment_name'], {}), '(log_dir, experiment_name)\n', (3974, 4000), False, 'import os\n'), ((4012, 4040), 'os.path.join', 'os.path.join', (['data_dir', 'task'], {}), '(data_dir, task)\n', (4024, 4040), False, 'import os\n'), ((4057, 4080), 'os.path.exists', 'os.path.exists', (['log_dir'], {}), '(log_dir)\n', (4071, 4080), False, 'import os\n'), ((4086, 4106), 'os.makedirs', 'os.makedirs', (['log_dir'], {}), '(log_dir)\n', (4097, 4106), False, 'import os\n'), ((4914, 4954), 'os.path.join', 'os.path.join', (['bert_base_path', 'model_name'], {}), '(bert_base_path, model_name)\n', (4926, 4954), False, 'import os\n'), ((4972, 5006), 'os.path.join', 'os.path.join', (['bert_path', 'ckpt_name'], {}), '(bert_path, ckpt_name)\n', (4984, 5006), False, 'import os\n'), ((5064, 5150), 'bert.bert_tokenization.validate_case_matches_checkpoint', 'bert.bert_tokenization.validate_case_matches_checkpoint', (['do_lower_case', 'model_ckpt'], {}), '(do_lower_case,\n model_ckpt)\n', (5119, 5150), False, 'import bert\n'), ((5164, 5200), 'os.path.join', 'os.path.join', (['bert_path', '"""vocab.txt"""'], {}), "(bert_path, 'vocab.txt')\n", (5176, 5200), False, 'import os\n'), ((5217, 5280), 'bert.bert_tokenization.FullTokenizer', 'bert.bert_tokenization.FullTokenizer', (['vocab_file', 'do_lower_case'], {}), '(vocab_file, do_lower_case)\n', (5253, 5280), False, 'import bert\n'), ((5682, 5721), 'onecycle.OneCycleScheduler', 'OneCycleScheduler', (['learning_rate', 'steps'], {}), '(learning_rate, steps)\n', (5699, 5721), False, 'from onecycle import OneCycleScheduler\n'), ((5731, 5832), 'tensorflow.keras.callbacks.EarlyStopping', 'EarlyStopping', ([], {'monitor': '"""val_SparseCategoricalAccuracy"""', 'mode': '"""max"""', 'verbose': '(1)', 'patience': 'patience'}), "(monitor='val_SparseCategoricalAccuracy', mode='max', verbose=\n 1, patience=patience)\n", (5744, 5832), False, 'from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint\n'), ((6009, 6052), 'bert.params_from_pretrained_ckpt', 'bert.params_from_pretrained_ckpt', (['bert_path'], {}), '(bert_path)\n', (6041, 6052), False, 'import bert\n'), ((6066, 6123), 'bert.BertModelLayer.from_params', 'bert.BertModelLayer.from_params', (['bert_params'], {'name': '"""bert"""'}), "(bert_params, name='bert')\n", (6097, 6123), False, 'import bert\n'), ((6136, 6197), 'tensorflow.keras.layers.Input', 'keras.layers.Input', ([], {'shape': '(max_seq_length,)', 'name': '"""input_ids"""'}), "(shape=(max_seq_length,), name='input_ids')\n", (6154, 6197), False, 'from tensorflow import keras\n'), ((6487, 6533), 'tensorflow.keras.models.Model', 'keras.models.Model', ([], {'inputs': 'in_id', 'outputs': 'pred'}), '(inputs=in_id, outputs=pred)\n', (6505, 6533), False, 'from tensorflow import keras\n'), ((6549, 6573), 'tensorflow.keras.optimizers.Nadam', 'keras.optimizers.Nadam', ([], {}), '()\n', (6571, 6573), False, 'from tensorflow import keras\n'), ((6717, 6759), 'bert.load_bert_weights', 'bert.load_bert_weights', (['l_bert', 'model_ckpt'], {}), '(l_bert, model_ckpt)\n', (6739, 6759), False, 'import bert\n'), ((6814, 6938), 'tensorflow.keras.callbacks.TensorBoard', 'keras.callbacks.TensorBoard', ([], {'log_dir': 'log_dir', 'histogram_freq': '(0)', 'write_graph': '(False)', 'write_images': '(False)', 'update_freq': '(1000)'}), '(log_dir=log_dir, histogram_freq=0, write_graph=\n False, write_images=False, update_freq=1000)\n', (6841, 6938), False, 'from tensorflow import keras\n'), ((7648, 7673), 'numpy.argmax', 'np.argmax', (['y_pred'], {'axis': '(1)'}), '(y_pred, axis=1)\n', (7657, 7673), True, 'import numpy as np\n'), ((7687, 7724), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['test_labels', 'y_pred'], {}), '(test_labels, y_pred)\n', (7703, 7724), False, 'from sklearn.metrics import confusion_matrix\n'), ((7784, 7828), 'sklearn.metrics.balanced_accuracy_score', 'balanced_accuracy_score', (['test_labels', 'y_pred'], {}), '(test_labels, y_pred)\n', (7807, 7828), False, 'from sklearn.metrics import balanced_accuracy_score\n'), ((669, 683), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (681, 683), False, 'from datetime import datetime\n'), ((4150, 4186), 'os.path.join', 'os.path.join', (['log_dir', '"""config.json"""'], {}), "(log_dir, 'config.json')\n", (4162, 4186), False, 'import os\n'), ((4492, 4542), 'imdb.get_imdb_data', 'get_imdb_data', (['data_dir', 'tokenizer', 'max_seq_length'], {}), '(data_dir, tokenizer, max_seq_length)\n', (4505, 4542), False, 'from imdb import get_imdb_data\n'), ((5604, 5650), 'numpy.ceil', 'np.ceil', (['(train_input_ids.shape[0] / batch_size)'], {}), '(train_input_ids.shape[0] / batch_size)\n', (5611, 5650), True, 'import numpy as np\n'), ((5853, 5891), 'os.path.join', 'os.path.join', (['log_dir', '"""best_model.h5"""'], {}), "(log_dir, 'best_model.h5')\n", (5865, 5891), False, 'import os\n'), ((6253, 6278), 'tensorflow.keras.layers.Dropout', 'keras.layers.Dropout', (['(0.5)'], {}), '(0.5)\n', (6273, 6278), False, 'from tensorflow import keras\n'), ((6304, 6346), 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', (['(768)'], {'activation': '"""relu"""'}), "(768, activation='relu')\n", (6322, 6346), False, 'from tensorflow import keras\n'), ((6370, 6395), 'tensorflow.keras.layers.Dropout', 'keras.layers.Dropout', (['(0.5)'], {}), '(0.5)\n', (6390, 6395), False, 'from tensorflow import keras\n'), ((6414, 6465), 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', (['num_categories'], {'activation': 'None'}), '(num_categories, activation=None)\n', (6432, 6465), False, 'from tensorflow import keras\n'), ((7065, 7077), 'numpy.unique', 'np.unique', (['y'], {}), '(y)\n', (7074, 7077), True, 'import numpy as np\n'), ((7513, 7551), 'os.path.join', 'os.path.join', (['log_dir', '"""best_model.h5"""'], {}), "(log_dir, 'best_model.h5')\n", (7525, 7551), False, 'import os\n'), ((4585, 4677), 'tweets.get_tweets_data', 'get_tweets_data', (['data_dir', 'subtask', 'num_categories', 'tokenizer', 'max_seq_length', 'test_size'], {}), '(data_dir, subtask, num_categories, tokenizer,\n max_seq_length, test_size)\n', (4600, 4677), False, 'from tweets import get_tweets_data\n'), ((6597, 6657), 'tensorflow.keras.losses.SparseCategoricalCrossentropy', 'keras.losses.SparseCategoricalCrossentropy', ([], {'from_logits': '(True)'}), '(from_logits=True)\n', (6639, 6657), False, 'from tensorflow import keras\n'), ((6968, 7011), 'numpy.concatenate', 'np.concatenate', (['[train_labels, test_labels]'], {}), '([train_labels, test_labels])\n', (6982, 7011), True, 'import numpy as np\n'), ((4717, 4821), 'amazon.get_reviews_data', 'get_reviews_data', (['data_dir', 'subtask', 'num_categories', 'tokenizer', 'max_seq_length', 'test_size', 'polarized'], {}), '(data_dir, subtask, num_categories, tokenizer,\n max_seq_length, test_size, polarized)\n', (4733, 4821), False, 'from amazon import get_reviews_data\n')] |
# scene_manager.py
from copy import deepcopy
from . import editor_session
# Recursively updates a dictionary a with b.
# This assumes that b has a structure that is a subset of a
# Returns whether the dictionary a was modified.
def recursive_update(a, b):
modified = False
for k, v in b.items():
if isinstance(v, dict):
modified = recursive_update(a[k], v) or modified
else:
modified = a[k] != v or modified
a[k] = v
return modified
class Node(object):
NULL_ID = 0
def __init__(self):
self.id = None
self.fake_id = None
self.root = None
self.name = ""
self.local_position = [0.0, 0.0, 0.0]
self.local_scale = [1.0, 1.0, 1.0]
self.local_rotation = [0.0, 0.0, 0.0, 1.0]
self.user_data = None
self.destroyed = False
def get_root_id(self):
return self.root.id if self.root is not None else Node.NULL_ID
def is_real(self):
return self.id > Node.NULL_ID
def is_fake(self):
return self.id < Node.NULL_ID
class ComponentType(object):
def __init__(self, type_name):
self.type_name = type_name
self.instances = dict()
self.new_instances = set()
self.changed_instances = set()
self.destroyed_instances = set()
self.new_instance_callback = None
self.update_instance_callback = None
self.destroy_instance_callback = None
def get_instance(self, node):
return self.instances.get(node, None)
def set_new_instance_callback(self, callback):
self.new_instance_callback = callback
def set_update_instance_callback(self, callback):
self.update_instance_callback = callback
def set_destroy_instance_callback(self, callback):
self.destroy_instance_callback = callback
def request_new_instance(self, node):
assert(not node.destroyed and node not in self.instances)
instance = ComponentInstance(node, self)
self.instances[node] = instance
self.new_instances.add(instance)
if self.new_instance_callback is not None:
self.new_instance_callback(instance)
return instance
def request_destroy_instance(self, node):
if node not in self.instances:
return
# Get the instance
instance = self.instances[node]
if instance.destroyed:
return
# Destroy the instance
instance.destroyed = True
self.destroyed_instances.add(instance)
# If the user callback exists, run it
if self.destroy_instance_callback is not None:
self.destroy_instance_callback(instance)
class ComponentInstance(object):
def __init__(self, node, type_v):
self.type = type_v
self.node = node
self.destroyed = False
self.is_loaded = False
self.value = None
self.changed_props = dict()
self.loaded_callbacks = list()
def _set_property(self, prop_name, value):
old_value = self.value[prop_name]
if isinstance(value, dict):
assert(isinstance(old_value, dict))
changed = recursive_update(old_value, value)
else:
changed = old_value != value
self.value[prop_name] = value
return changed
def get_value_immediate(self, default=None):
if not self.is_loaded:
return default
return self.value
def get_value_async(self, callback):
if not self.is_loaded:
self.loaded_callbacks.append(lambda instance: callback(instance.value))
return
callback(self.value)
def set_value(self, value):
for prop_name, prop_val in value.items():
self.set_property(prop_name, prop_val)
def server_set_value(self, seq_num, value):
modified = False
for prop_name, prop_val in value.items():
# If this property was not expected to be changed, or it's the final change
if seq_num == 0 or prop_name not in self.changed_props or self.changed_props[prop_name] == seq_num:
modified = self._set_property(prop_name, prop_val) or modified
# Remove it from the change table
self.changed_props.pop(prop_name, None)
return modified
def get_property_immediate(self, prop_name, default=None):
if not self.is_loaded:
return default
return self.value[prop_name]
def get_sub_property_immediate(self, prop_path, default=None):
if not self.is_loaded:
return default
value = self.value
for prop_name in prop_path:
value = value[prop_name]
return value
def get_property_async(self, prop_name, callback):
if not self.is_loaded:
self.loaded_callbacks.append(lambda instance: callback(instance.value[prop_name]))
return
callback(self.value[prop_name])
def set_property(self, prop_name, value):
if not self.is_loaded:
value = deepcopy(value)
self.loaded_callbacks.append(lambda instance: instance.set_property(prop_name, value))
return
changed = self._set_property(prop_name, value)
if changed:
self.changed_props[prop_name] = None
self.type.changed_instances.add(self)
# Run the modified callback
if self.type.update_instance_callback is not None:
self.type.update_instance_callback(self)
def set_sub_property_immediate(self, prop_path, value):
if not self.is_loaded:
return False
return
outer_prop_name = prop_path[0]
inner_prop_name = prop_path[-1]
old_value = self.value
for prop_name in prop_path[:-1]:
old_value = old_value[prop_name]
modified = recursive_update(old_value, {inner_prop_name: value})
if modified:
self.changed_props[outer_prop_name] = None
self.type.changed_instances.add(self)
# Run the update callback
if self.type.update_instance_callback is not None:
self.type.update_instance_callback(self)
return True
class SceneManager(object):
def __init__(self):
self._next_fake_node_id = -1
self._new_node_callback = None
self._update_node_callback = None
self._destroy_node_callback = None
self._nodes = dict()
self._unsent_new_nodes = dict()
self._sent_new_nodes = dict()
self._destroyed_nodes = set()
self._node_changed_roots = dict()
self._node_changed_names = dict()
self._node_changed_local_transforms = dict()
self._new_components = dict()
self._components = dict()
self._sent_scene_query = False
self._save_scene_path = ''
self._generate_lightmaps_query = None
self._lightmaps_generated_callback = None
def register_handlers(self, session):
session.add_query_handler('get_scene', self._get_scene_query)
session.add_response_handler('get_scene', self._get_scene_response)
session.add_query_handler('new_node', self._new_node_query)
session.add_response_handler('new_node', self._new_node_response)
session.add_query_handler('destroy_node', self._destroy_node_query)
session.add_response_handler('destroy_node', self._destroy_node_response)
session.add_query_handler('node_root_update', self._node_root_update_query)
session.add_response_handler('node_root_update', self._node_root_update_response)
session.add_query_handler('node_name_update', self._node_name_update_query)
session.add_response_handler('node_name_update', self._node_name_update_response)
session.add_query_handler('node_local_transform_update', self._node_local_transform_update_query)
session.add_response_handler('node_local_transform_update', self._node_local_transform_update_response)
session.add_query_handler('new_component', self._new_component_query)
session.add_response_handler('new_component', self._new_component_response)
session.add_query_handler('destroy_component', self._destroy_component_query)
session.add_response_handler('destroy_component', self._destroy_component_response)
session.add_query_handler('component_property_update', self._component_property_update_query)
session.add_response_handler('component_property_update', self._component_property_update_response)
session.add_query_handler('save_scene', self._save_scene_query)
session.add_query_handler('gen_lightmaps', self._gen_lightmaps_query)
session.add_response_handler('gen_lightmaps', self._gen_lightmaps_response)
def _get_scene_query(self, seq_number, priority):
# Unused arguments
del seq_number, priority
if not self._sent_scene_query:
self._sent_scene_query = True
return True # Actual value doesn't matter
def _get_scene_response(self, seq_number, response):
# Unused arguments
del seq_number
if response is None:
return
# Store all new nodes
new_nodes = set()
root_ids = dict()
new_components = dict()
# For each node in the scene
for node_id_str, value in response['nodes'].items():
node_id = int(node_id_str)
# Insert a new entry into the nodes table
node = Node()
node.id = node_id
self._nodes[node_id] = node
root_ids[node] = value['root']
# Initialize the node
node.name = value['name']
node.local_position = value['lpos']
node.local_rotation = value['lrot']
node.local_scale = value['lscale']
# Add the node to the list of newly created nodes
new_nodes.add(node)
# Add nodes to roots
for node, root_id in root_ids.items():
node.root = self.get_node(root_id)
# For each component type
for component_type_name, instances in response['components'].items():
component_type = self._components.setdefault(component_type_name, ComponentType(component_type_name))
# Stupid serialization system corner case
if instances is None:
continue
new_instances = list()
# For each instance of this component type
for node_id_str, value in instances.items():
node_id = int(node_id_str)
node = self._nodes[node_id]
# Add the component instance object
instance = ComponentInstance(node, component_type)
component_type.instances[node] = instance
instance.value = value
instance.is_loaded = True
instance.loaded_callbacks = None
new_instances.append(instance)
if component_type.new_instance_callback is not None:
new_components[component_type] = new_instances
# Run the 'new_node' callback on all new nodes
if self._new_node_callback is not None:
for node in new_nodes:
self._new_node_callback(self, node)
# Run the 'update_node' callback on all new nodes
if self._update_node_callback is not None:
for node in new_nodes:
self._update_node_callback(self, node)
# Run the 'new_instance' callback on all components
for component_type, instances in new_components.items():
for instance in instances:
component_type.new_instance_callback(instance)
def _new_node_query(self, seq_number, priority):
# Unused arguments
del priority
if len(self._unsent_new_nodes) == 0:
return None
message = dict()
for fake_id, node in self._unsent_new_nodes.items():
if node.destroyed:
continue
# Only send fake id and name, other properties will be updated later
node_dict = message[fake_id] = dict()
node_dict['name'] = node.name
# Reset the table of unsent nodes
self._sent_new_nodes[seq_number] = self._unsent_new_nodes
self._unsent_new_nodes = dict()
return message
def _new_node_response(self, seq_number, response):
# Check if these nodes correspond to nodes that we requested
if seq_number not in self._sent_new_nodes:
# Create new nodes
new_nodes = list()
root_ids = dict()
for node_response in response.values():
node = Node()
node.id = node_response['id']
node.name = node_response['name']
node.local_position = node_response.get('lpos', [0.0, 0.0, 0.0])
node.local_rotation = node_response.get('lrot', [0.0, 0.0, 0.0, 1.0])
node.local_scale = node_response.get('lscale', [1.0, 1.0, 1.0])
root_ids[node] = node_response.get('root', Node.NULL_ID)
self._nodes[node.id] = node
new_nodes.append(node)
print("Received unrequested new node, id={}".format(node.id))
# Set node roots
for node, root_id in root_ids.items():
node.root = self.get_node(root_id)
# Call 'new_node' on all created nodes
if self._new_node_callback is not None:
for new_node in new_nodes:
self._new_node_callback(self, new_node)
# Call 'update_node' on all created nodes
if self._update_node_callback is not None:
for new_node in new_nodes:
self._update_node_callback(self, new_node)
return
# Get the nodes that were supposed to go with this sequence number
pending_nodes = self._sent_new_nodes[seq_number]
del self._sent_new_nodes[seq_number]
assert(len(pending_nodes) == len(response))
updated_nodes = list()
for fake_id_str, node_response in response.items():
fake_id = int(fake_id_str)
node = pending_nodes[fake_id]
# Apply Id
node.id = node_response['id']
self._nodes[node.id] = node
# If the node has been destroyed, don't add it to be updated
if node.destroyed:
continue
updated_nodes.append(node)
print("Allocated node id {} for fake node {}".format(node.id, node.fake_id))
# Call the update function on updated nodes
if self._update_node_callback is not None:
for node in updated_nodes:
self._update_node_callback(self, node)
def _destroy_node_query(self, seq_number, priority):
# Unused arguments
del seq_number, priority
if len(self._destroyed_nodes) == 0:
return None
message = list()
remaining = set()
for node in self._destroyed_nodes:
# If the node isn't real yet (so they created it and then immediately destroyed it), don't destroy it yet
if node.is_fake():
remaining.add(node)
continue
message.append(node.id)
self._destroyed_nodes = remaining
return message
def _destroy_node_response(self, seq_number, response):
# Unused arguments
del seq_number
destroyed_nodes = list()
# Figure out which ones haven't actually been destroyed yet
for node_id in response:
if node_id in self._nodes:
# Destroy the node
node = self._nodes[node_id]
destroyed_nodes.append(node)
# Destroy them
for node in destroyed_nodes:
self.request_destroy_node(node)
def _node_root_update_query(self, seq_number, priority):
# Unused arguments
del priority
if len(self._node_changed_roots) == 0:
return None
message = dict()
for node, existing_seq_num in list(self._node_changed_roots.items()):
# If the node was destroyed, remove it from the list and continue
if node.destroyed:
del self._node_changed_roots[node]
continue
# If this node is fake, don't add it to the query yet
if node.is_fake():
continue
# If this message has already been sent out skip it
if existing_seq_num is not None:
continue
# If this node's root is null, add it to the query
if node.root is None:
message[node.id] = Node.NULL_ID
self._node_changed_roots[node] = seq_number
continue
# If this node's root is fake, don't add it to the query yet
if node.root.is_fake():
continue
# Otherwise, add it to the message
message[node.id] = node.root.id
self._node_changed_roots[node] = seq_number
if len(message) == 0:
return None
return message
def _node_root_update_response(self, seq_number, response):
updated_nodes = list()
# For each node and root in the response
for node_id_str, root_id in response.items():
node_id = int(node_id_str)
node = self._nodes[node_id]
# If this node's root was not expected to be changed, or the change is final
if seq_number == 0 or node not in self._node_changed_roots or self._node_changed_roots[node] == seq_number:
# If the new root is different than the old
if node.get_root_id() != root_id:
node.root = self.get_node(root_id)
updated_nodes.append(node)
# Remove it from the changed root table
self._node_changed_roots.pop(node, None)
# Call the update callback, if any
if self._update_node_callback is not None:
for node in updated_nodes:
self._update_node_callback(node)
def _node_name_update_query(self, seq_number, priority):
# Unused parameters
del priority
if len(self._node_changed_names) == 0:
return None
message = dict()
for node, existing_seq_num in list(self._node_changed_names.items()):
# If the node was destroyed, remove it from the table and continue
if node.destroyed:
del self._node_changed_names[node]
continue
# If the node is fake, don't add it yet
if node.is_fake():
continue
# If the node's query hasn't been responded to yet, ignore it
if existing_seq_num is not None:
continue
# Add it to the query
message[node.id] = node.name
self._node_changed_names[node] = seq_number
if len(message) == 0:
return None
return message
def _node_name_update_response(self, seq_number, response):
updated_nodes = list()
# For each node and name in the response
for node_id_str, name in response.items():
node_id = int(node_id_str)
node = self._nodes[node_id]
# If the node's name was not expected to be changed, or the change is final
if seq_number == 0 or node not in self._node_changed_names or self._node_changed_names[node] == seq_number:
# If the new name is different from the old one
if node.name != name:
node.name = name
updated_nodes.append(node)
# Remove it from the changed table
self._node_changed_names.pop(node, None)
# Call the user callback on all updated nodes
if self._update_node_callback is not None:
for node in updated_nodes:
self._update_node_callback(self, node)
def _node_local_transform_update_query(self, seq_number, priority):
# Setting the transform is not a high priority update
if priority != editor_session.EditorSession.PRIORITY_ANY:
return None
if len(self._node_changed_local_transforms) == 0:
return None
message = dict()
for node, existing_seq_num in list(self._node_changed_local_transforms.items()):
# If the node was destroyed, remove it and continue
if node.destroyed:
del self._node_changed_local_transforms[node]
continue
# If the node is fake, don't add it yet
if node.is_fake():
continue
# If the node is in the table for a previously sent query
if existing_seq_num is not None:
continue
# Add it to the query
entry = message[node.id] = dict()
entry['lpos'] = node.local_position.copy()
entry['lrot'] = node.local_rotation.copy()
entry['lscale'] = node.local_scale.copy()
self._node_changed_local_transforms[node] = seq_number
if len(message) == 0:
return None
return message
def _node_local_transform_update_response(self, seq_number, response):
updated_nodes = list()
# For each transformed node, and it's new transform
for node_id_str, trans in response.items():
node_id = int(node_id_str)
node = self._nodes[node_id]
# If the node's name was not expected to be changed, or the change is final
if seq_number == 0 \
or node not in self._node_changed_local_transforms \
or self._node_changed_local_transforms[node] == seq_number:
# If the new transform is different than the old one
different = node.local_position != trans['lpos']
different = different or node.local_scale != trans['lscale']
different = different or node.local_rotation != trans['lrot']
# Update the node
if different:
node.local_position = trans['lpos']
node.local_scale = trans['lscale']
node.local_rotation = trans['lrot']
updated_nodes.append(node)
# Remove it from the change table
self._node_changed_local_transforms.pop(node, None)
# Call the update callback
if self._update_node_callback is not None:
for node in updated_nodes:
self._update_node_callback(self, node)
def _new_component_query(self, seq_number, priority):
# Unused arguments
del seq_number
if priority != editor_session.EditorSession.PRIORITY_ANY:
return
# Construct the message
message = dict()
# For each component type
for component_type_name, component_type in self._components.items():
remaining = set()
new_instances = list()
for instance in component_type.new_instances:
# If the node was destroyed, don't add it
if instance.node.destroyed:
continue
# If the node is fake, don't add it YET
if instance.node.is_fake():
remaining.add(instance)
continue
# Add it to the message
new_instances.append(instance.node.id)
# Reset the new instance set
component_type.new_instances = remaining
# Add it to the message only if new components were actually created
if len(new_instances) == 0:
continue
message[component_type_name] = new_instances
if len(message) == 0:
return None
return message
def _new_component_response(self, seq_number, response):
# For each component type and set of instances in the response
for component_type_name, instances in response.items():
# Get the component type object
component_type = self._components[component_type_name]
new_instances = list()
loaded_instances = list()
updated_instances = list()
# For each newly created instance
for node_id_str, value in instances.items():
node_id = int(node_id_str)
node = self._nodes[node_id]
# If an instance doesn't already exist, create it
if node not in component_type.instances:
instance = ComponentInstance(node, component_type)
component_type.instances[node] = instance
instance.is_loaded = True
instance.value = value
new_instances.append(instance)
continue
# Get the existing instance
instance = component_type.instances[node]
# If the instance hasn't been loaded
if not instance.is_loaded:
instance.value = value
instance.is_loaded = True
loaded_instances.append(instance)
continue
# Update the value
modified = instance.server_set_value(seq_number, value)
if modified:
updated_instances.append(instance)
# Call the new instance callback, if one exists
if component_type.new_instance_callback is not None:
for instance in new_instances:
component_type.new_instance_callback(instance)
# Run callbacks for loaded instances
for instance in loaded_instances:
for callback in instance.loaded_callbacks:
callback(instance)
instance.loaded_callbacks = None
# Run the instance update callback, if one exists
if component_type.update_instance_callback is not None:
for instance in updated_instances:
component_type.update_instance_callback(instance)
def _destroy_component_query(self, seq_number, priority):
# Unused arguments
del seq_number, priority
# Create the message
message = dict()
for component_type_name, component_type in self._components.items():
destroyed_instances = list()
remaining = set()
for instance in component_type.destroyed_instances:
# If the node was destroyed, don't add it; it will be destroyed anyway (or was already)
if instance.node.destroyed:
continue
# If the node is fake, don't add it YET
if instance.node.is_fake():
remaining.add(instance)
continue
# If the instance hasn't been loaded yet, don't add it YET (it might not have been created yet)
if not instance.is_loaded:
remaining.add(instance)
continue
# Add it to the destroyed list
destroyed_instances.append(instance.node.id)
# Reset the destroyed instance set
component_type.destroyed_instances = remaining
# Only add the list to the query if it actually has anything
if len(destroyed_instances) == 0:
continue
message[component_type_name] = destroyed_instances
if len(message) == 0:
return None
return message
def _destroy_component_response(self, seq_number, response):
# Unused arguments
del seq_number
# For each component type with destroyed instances
for component_type_name, instance_ids in response.items():
component_type = self._components[component_type_name]
destroyed_instances = list()
# For each destroyed instance
for node_id in instance_ids:
# If the node has been destroyed, skip it
if node_id not in self._nodes:
continue
# Get the node
node = self._nodes[node_id]
# If the instance has already been destroyed, skip it
if node not in component_type.instances:
continue
# Get the instance
instance = component_type.instances[node]
destroyed_instances.append(instance)
# Remove the instance
instance.destroyed = True
del component_type.instances[node]
# Run the user callback
if component_type.destroy_instance_callback is not None:
for instance in destroyed_instances:
component_type.destroy_instance_callback(instance)
def _component_property_update_query(self, seq_number, priority):
# Unused parameters
del priority
message = dict()
# For each component type
for component_type_name, component_type in self._components.items():
updated_instances = dict()
# For each instance of this component type that was changed
remaining = set()
for changed_instance in component_type.changed_instances:
updated_props = dict()
# If this instance is destroyed, skip it
if changed_instance.destroyed or changed_instance.node.destroyed:
continue
# If the instance is not real, or it hasn't been loaded yet, don't add it YET
if changed_instance.node.is_fake() or not changed_instance.is_loaded:
remaining.add(changed_instance)
continue
# For each property of this instance that was changed
for changed_prop_name, existing_seq_num in changed_instance.changed_props.items():
# If this property change has not been sent yet, add it to the query
if existing_seq_num is None:
updated_props[changed_prop_name] = deepcopy(changed_instance.value[changed_prop_name])
changed_instance.changed_props[changed_prop_name] = seq_number
# Only add this instance as changed if something was actually changed
if len(updated_props) == 0:
continue
updated_instances[changed_instance.node.id] = updated_props
# Reset the set of changed instances
component_type.changed_instances = remaining
# Only add this component type if something was actually changed
if len(updated_instances) == 0:
continue
message[component_type_name] = updated_instances
# Only send the message if something was changed
if len(message) == 0:
return None
return message
def _component_property_update_response(self, seq_number, response):
# For each component type in the response
for component_type_name, instances in response.items():
component_type = self._components[component_type_name]
updated_instances = list()
# For each instance in the response
for node_id_str, value in instances.items():
node_id = int(node_id_str)
node = self._nodes[node_id]
# Get the component instance
instance = component_type.instances[node]
# Set the value
modified = instance.server_set_value(seq_number, value)
if modified:
updated_instances.append(instance)
# If there's a callback for this component type
if component_type.update_instance_callback is not None:
for instance in updated_instances:
component_type.update_instance_callback(instance)
def _save_scene_query(self, seq_number, priority):
# Unused arguments
del seq_number, priority
if len(self._save_scene_path) == 0:
return None
message = {
'path': self._save_scene_path,
}
self._save_scene_path = ''
return message
def _gen_lightmaps_query(self, seq_number, priority):
# Unused arguments
del seq_number, priority
message = self._generate_lightmaps_query
self._generate_lightmaps_query = None
return message
def _gen_lightmaps_response(self, seq_number, response):
# Unused parameters
del seq_number
if self._lightmaps_generated_callback is not None:
self._lightmaps_generated_callback(response)
def get_node(self, node_id):
if node_id == Node.NULL_ID:
return None
return self._nodes[node_id]
def get_component_type(self, component_type_name):
return self._components[component_type_name]
def get_node_components(self, node):
result = list()
for component_type in self._components.values():
instance = component_type.get_instance(node)
if instance is not None:
result.append(instance)
return result
def set_new_node_callback(self, callback):
self._new_node_callback = callback
def set_update_node_callback(self, callback):
self._update_node_callback = callback
def set_destroy_node_callback(self, callback):
self._destroy_node_callback = callback
def set_new_component_callback(self, component_type_name, callback):
# Get or set the component type, since this may be called before any queries are run
component = self._components.setdefault(component_type_name, ComponentType(component_type_name))
component.set_new_instance_callback(callback)
def set_update_component_callback(self, component_type_name, callback):
# Get or set the component type, since this may be called before any queries are run
component = self._components.setdefault(component_type_name, ComponentType(component_type_name))
component.set_update_instance_callback(callback)
def set_destroy_component_callback(self, component_type_name, callback):
# Get or set the component type, since this may be called before any queries are run
component = self._components.setdefault(component_type_name, ComponentType(component_type_name))
component.set_destroy_instance_callback(callback)
def set_lightmaps_generated_callback(self, callback):
self._lightmaps_generated_callback = callback
def save_scene(self, path):
self._save_scene_path = path
def generate_lightmaps(self, light_dir, light_intensity, ambient, num_indirect_sample_sets, num_accumulation_steps, num_post_steps, lightmap_path):
self._generate_lightmaps_query = {
'light_direction': light_dir,
'light_intensity': light_intensity,
'ambient': ambient,
'num_indirect_sample_sets': num_indirect_sample_sets,
'num_accumulation_steps': num_accumulation_steps,
'post_process_steps': num_post_steps,
'lightmap_path': lightmap_path
}
def request_new_node(self, user_data):
# Reserve fake node id
fake_id = self._next_fake_node_id
self._next_fake_node_id -= 1
# Construct the node object
node = Node()
node.id = fake_id
node.fake_id = fake_id
node.user_data = user_data
# Insert it into the table
self._nodes[fake_id] = node
self._unsent_new_nodes[fake_id] = node
return node
def request_destroy_node(self, node):
# If the node has already been destroyed, just continue
if node.destroyed:
return
# Find all of the nodes's children
children = list()
for child_node in self._nodes.values():
if child_node.root == node:
children.append(child_node)
# Destroy the children
for child_node in children:
self.request_destroy_node(child_node)
# Destroy all of the components
for component_type in self._components.values():
component_type.request_destroy_instance(node)
# Remove the node from the node dictionary
del self._nodes[node.id]
if node.fake_id is not None:
del self._nodes[node.fake_id]
# Add it to the destroyed nodes set
self._destroyed_nodes.add(node)
# Run the callback
if self._destroy_node_callback is not None:
self._destroy_node_callback(self, node)
def mark_name_dirty(self, node):
assert(node in self._nodes.values())
self._node_changed_names[node] = None
def mark_root_dirty(self, node):
assert(node in self._nodes.values())
self._node_changed_roots[node] = None
def mark_local_transform_dirty(self, node):
assert(node in self._nodes.values())
self._node_changed_local_transforms[node] = None
| [
"copy.deepcopy"
] | [((5095, 5110), 'copy.deepcopy', 'deepcopy', (['value'], {}), '(value)\n', (5103, 5110), False, 'from copy import deepcopy\n'), ((30687, 30738), 'copy.deepcopy', 'deepcopy', (['changed_instance.value[changed_prop_name]'], {}), '(changed_instance.value[changed_prop_name])\n', (30695, 30738), False, 'from copy import deepcopy\n')] |
#!/usr/bin/python
from pyspark.sql import SparkSession
spark = (
SparkSession.builder.master("yarn")
.appName("bigquery-analytics-avg-temperature")
.getOrCreate()
)
bucket = "01-logistics-backup"
spark.conf.set("temporaryGcsBucket", bucket)
history = (
spark.read.format("bigquery").option("table", "vehicle_analytics.history").load()
)
history.createOrReplaceTempView("history")
avg_temperature = spark.sql(
"SELECT vehicle_id, date, AVG(temperature) AS avg_temperature FROM history GROUP BY vehicle_id, date"
)
avg_temperature.show()
avg_temperature.printSchema()
avg_temperature.write.format("bigquery").option(
"table", "vehicle_analytics.avg_temperature"
).mode("append").save()
| [
"pyspark.sql.SparkSession.builder.master"
] | [((70, 105), 'pyspark.sql.SparkSession.builder.master', 'SparkSession.builder.master', (['"""yarn"""'], {}), "('yarn')\n", (97, 105), False, 'from pyspark.sql import SparkSession\n')] |
# -*- coding: utf-8 -*-
import gensim
import numpy as np
from sklearn.cluster import MiniBatchKMeans
def read_data_batches(path, batch_size=50, minlength=5):
"""
Reading batched texts of given min. length
:param path: path to the text file ``one line -- one normalized sentence''
:return: batches iterator
"""
batch = []
for line in open(path, encoding="utf-8"):
line = line.strip().split()
# lines with less than `minlength` words are omitted
if len(line) >= minlength:
batch.append(line)
if len(batch) >= batch_size:
yield batch
batch = []
if len(batch) > 0:
yield batch
def text2vectors(text, w2v_model, maxlen, vocabulary):
"""
Token sequence -- to a list of word vectors;
if token not in vocabulary, it is skipped; the rest of
the slots up to `maxlen` are replaced with zeroes
:param text: list of tokens
:param w2v_model: gensim w2v model
:param maxlen: max. length of the sentence; the rest is just cut away
:return:
"""
acc_vecs = []
for word in text:
if word in w2v_model.wv.index_to_key and (vocabulary is None or word in vocabulary):
acc_vecs.append(w2v_model.wv[word])
# padding for consistent length with ZERO vectors
if len(acc_vecs) < maxlen:
acc_vecs.extend([np.zeros(w2v_model.vector_size)] * (maxlen - len(acc_vecs)))
return acc_vecs
def get_w2v(path):
"""
Reading word2vec model given the path
"""
return gensim.models.Word2Vec.load(path)
def read_data_tensors(path, word_vectors_path=None,
batch_size=50, vocabulary=None,
maxlen=100, pad_value=0, minsentlength=5):
"""
Data for training the NN -- from text file to word vectors sequences batches
:param path:
:param word_vectors_path:
:param batch_size:
:param vocabulary:
:param maxlen:
:param pad_value:
:param minsentlength:
:return:
"""
w2v_model = get_w2v(word_vectors_path)
for batch in read_data_batches(path, batch_size, minsentlength):
batch_vecs = []
batch_texts = []
for text in batch:
vectors_as_list = text2vectors(text, w2v_model, maxlen, vocabulary)
batch_vecs.append(np.asarray(vectors_as_list[:maxlen], dtype=np.float32))
batch_texts.append(text)
yield np.stack(batch_vecs, axis=0), batch_texts
def get_centroids(w2v_model, aspects_count):
"""
Clustering all word vectors with K-means and returning L2-normalizes
cluster centroids; used for ABAE aspects matrix initialization
"""
km = MiniBatchKMeans(n_clusters=aspects_count, verbose=0, n_init=100)
m = []
for k in w2v_model.wv.key_to_index:
m.append(w2v_model.wv[k])
m = np.matrix(m)
km.fit(m)
clusters = km.cluster_centers_
# L2 normalization
norm_aspect_matrix = clusters / np.linalg.norm(clusters, axis=-1, keepdims=True)
return norm_aspect_matrix
if __name__ == "__main__":
for b in read_data_tensors("preprocessed_data/listings.txt", "word_vectors/listings.w2v", batch_size=3):
print(b[0].shape, b[1][:2])
| [
"gensim.models.Word2Vec.load",
"sklearn.cluster.MiniBatchKMeans",
"numpy.asarray",
"numpy.stack",
"numpy.zeros",
"numpy.linalg.norm",
"numpy.matrix"
] | [((1577, 1610), 'gensim.models.Word2Vec.load', 'gensim.models.Word2Vec.load', (['path'], {}), '(path)\n', (1604, 1610), False, 'import gensim\n'), ((2729, 2793), 'sklearn.cluster.MiniBatchKMeans', 'MiniBatchKMeans', ([], {'n_clusters': 'aspects_count', 'verbose': '(0)', 'n_init': '(100)'}), '(n_clusters=aspects_count, verbose=0, n_init=100)\n', (2744, 2793), False, 'from sklearn.cluster import MiniBatchKMeans\n'), ((2889, 2901), 'numpy.matrix', 'np.matrix', (['m'], {}), '(m)\n', (2898, 2901), True, 'import numpy as np\n'), ((3012, 3060), 'numpy.linalg.norm', 'np.linalg.norm', (['clusters'], {'axis': '(-1)', 'keepdims': '(True)'}), '(clusters, axis=-1, keepdims=True)\n', (3026, 3060), True, 'import numpy as np\n'), ((2358, 2412), 'numpy.asarray', 'np.asarray', (['vectors_as_list[:maxlen]'], {'dtype': 'np.float32'}), '(vectors_as_list[:maxlen], dtype=np.float32)\n', (2368, 2412), True, 'import numpy as np\n'), ((2466, 2494), 'numpy.stack', 'np.stack', (['batch_vecs'], {'axis': '(0)'}), '(batch_vecs, axis=0)\n', (2474, 2494), True, 'import numpy as np\n'), ((1401, 1432), 'numpy.zeros', 'np.zeros', (['w2v_model.vector_size'], {}), '(w2v_model.vector_size)\n', (1409, 1432), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
# pylint: disable=missing-docstring,unused-import,reimported
import io
import pytest # type: ignore
import odata_url_parser.cli as cli
import odata_url_parser.odata_url_parser as oup
def test_main_ok_minimal(capsys):
job = ['does not matter']
report_expected = job[0]
assert cli.main(job) is None
out, err = capsys.readouterr()
assert out.strip() == report_expected.strip()
| [
"odata_url_parser.cli.main"
] | [((315, 328), 'odata_url_parser.cli.main', 'cli.main', (['job'], {}), '(job)\n', (323, 328), True, 'import odata_url_parser.cli as cli\n')] |
#!/usr/bin/env python
import sys,os,commands
from CommonMethods import *
def main():
if len(sys.argv) < 3:
error = "Usage: cpFromCastor fromDir toDir (optional filter)"
exit(error)
user = os.getenv("USER")
castorDir = "/castor/cern.ch/cms/store/caf/user/" + user + "/" + sys.argv[1] + "/"
filter = ""
if len(sys.argv) > 3:
filter = sys.argv[3]
fileList = ls(castorDir,filter)
destDir = sys.argv[2]
copiedFiles = cp(castorDir,destDir,fileList)
if len(copiedFiles) != len(fileList):
error = "ERROR: I couldn't copy all files from castor"
exit(error)
if __name__ == "__main__":
main()
| [
"os.getenv"
] | [((212, 229), 'os.getenv', 'os.getenv', (['"""USER"""'], {}), "('USER')\n", (221, 229), False, 'import sys, os, commands\n')] |
#Dependencies, libraries, and imports
from matplotlib import style
style.use('fivethirtyeight')
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import datetime as dt
#SQLalchemy libraries and functions
import sqlalchemy
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine, func, inspect, MetaData
from sqlalchemy import Column, Integer, String, Float
from sqlalchemy.ext.declarative import declarative_base
#VROOM, VROOM!
engine = create_engine("sqlite:///Resources/hawaii.sqlite")
#Use automap to get table structures and reflect into classes
Base = automap_base()
Base.prepare(engine, reflect=True)
#See what classes have been created. Classes created should match tables found by Inspector
classes_created = Base.classes.keys()
#Single variable to represent each Class associated with the automapped Base
Measurement = Base.classes.measurement
Station = Base.classes.station
#Classes are now all setup. Start query session.
session = Session(engine)
# Design a query to retrieve the last 12 months of precipitation data and plot the results
#Find the earliest date in the Measurement table by query. Convert to python dictionary, read date as text, convert to datetime.
earliest_date_query = session.query(Measurement.date).order_by(Measurement.date).first()
ed_dict=earliest_date_query._asdict()
earliest_date = ed_dict['date']
earliest_date_dt = dt.datetime.strptime(earliest_date, "%Y-%m-%d")
#Find the latest date in the Measurement table by query. Convert to python dictionary, read date as text, convert to datetime.
latest_date_query = session.query(Measurement.date).order_by(Measurement.date.desc()).first()
ld_dict=latest_date_query._asdict()
latest_date = ld_dict['date']
latest_date_dt = dt.datetime.strptime(latest_date, "%Y-%m-%d")
# Calculate the date 1 year ago from the latest data point in the database
year_ago_latest_dt = latest_date_dt - dt.timedelta(days=365)
year_ago_latest = dt.datetime.strftime(year_ago_latest_dt, "%Y-%m-%d")
# What are the most active stations? (i.e. what stations have the most rows)?
stat_freq = session.query(Measurement.station, func.count(Measurement.station)).group_by(Measurement.station).order_by(func.count(Measurement.station).desc()).all()
max_stat_freq = stat_freq[0][0]
session.close()
#BEGIN FLASK APP
from flask import Flask, jsonify
app = Flask(__name__)
@app.route("/")
def welcome():
print("Server received request for 'Home' page...")
return (
f"Welcome to Surf's Up weather API!<br>"
f"We collect precipitation and temperature data from weather stations on the island of Oahu in Hawaii.<br><br>"
f"Earliest date of data = {earliest_date}<br>"
f"Latest date of data = {latest_date}<br><br>"
f"Available URL Routes:<br><br>"
f"Below URL returns JSON of precipitation on Oahu on each day between {year_ago_latest} and {latest_date}.<br>"
f"Copy this URL to browser:<br>"
f"/api/v1.0/precipitation<br><br>"
f"Below URL returns JSON of temperature at station {max_stat_freq} on Oahu on each day between {year_ago_latest} and {latest_date}.<br>"
f"Copy this URL to browser:<br>"
f"/api/v1.0/temperature<br><br>"
f"Below URL returns JSON of the weather stations on Oahu.<br>"
f"Copy this URL to browser:<br>"
f"/api/v1.0/stations<br><br>"
f"Below URL returns the max, min, and avg temperature on Oahu encompassing the START and END dates provided by the user in the URL.<br>"
f"If no END date provided in the URL then END date is assume to be {latest_date}<br>"
f"Copy this URL to browser and replace START/END with dates in YYYY-MM-DD format:<br>"
f"/api/v1.0/START/END"
)
@app.route("/api/v1.0/precipitation/")
def precipitation():
print("Server received request for 'Precipitation' page...")
session = Session(engine)
#Query precipitation observations for last year
date_prcp_query = session.query(Measurement.date, Measurement.prcp).\
filter(Measurement.date<=latest_date_dt).filter(Measurement.date>=year_ago_latest_dt)
session.close()
#Save as df and grab only the max precip observation for each day in the last year
date_prcp_df = pd.DataFrame(date_prcp_query, columns=['Date', 'Precipitation'])
date_prcp_df.set_index('Date', inplace=True)
date_prcp_df.dropna(inplace=True)
date_prcp_df.sort_index(inplace=True)
date_prcp_max = date_prcp_df.groupby('Date')[['Precipitation']].max()
#Turn into dictionary for jsonification
prcp_query_dict = date_prcp_max.to_dict()
return jsonify(prcp_query_dict)
@app.route("/api/v1.0/stations/")
def stations():
print("Server received request for 'Stations' page...")
session = Session(engine)
#Query all the weather station details
station_query = session.query(Station.station, Station.name, Station.latitude, Station.longitude, Station.elevation)
station_df = pd.DataFrame(station_query, columns = ['station', 'name', 'latitude', 'longitude', 'elevation'])
station_df.set_index('station', inplace=True)
station_df.dropna(inplace=True)
session.close()
#Make a dictionary of weather station characteristics for jsonification
station_dict = station_df.to_dict(orient='index')
return jsonify(station_dict)
@app.route("/api/v1.0/temperature/")
def temperatures():
print("Server received request for 'Temperatures' page...")
session = Session(engine)
#Query temperature observations for the last year at the station with the most observations
tobs_date_query = session.query(Measurement.date, Measurement.tobs).\
filter(Measurement.date<=latest_date_dt).filter(Measurement.date>=year_ago_latest_dt).\
filter(Measurement.station==max_stat_freq)
session.close()
#Save query as df
tobs_date_df = pd.DataFrame(tobs_date_query, columns=['Date','Temperature'])
tobs_date_df.set_index('Date', inplace=True)
tobs_date_df.dropna(inplace=True)
#Transform df into dictionary for jsonification
tobs_date_dict = tobs_date_df.to_dict()
return jsonify(tobs_date_dict)
@app.route("/api/v1.0/<start>/")
def temp_start(start):
#Control on START date within database date range
if start<earliest_date or start>latest_date:
return (
f"START must be between {earliest_date} and {latest_date}.<br>"
f"/api/v1.0/START"
)
print("Server received request for 'Min, Max, Avg Start End' page...")
session = Session(engine)
#Query max, min, and avg temperature between START date and last date in database
TMAX = session.query(func.max(Measurement.tobs)).\
filter(Measurement.date<=latest_date).filter(Measurement.date>=start).all()
TMIN = session.query(func.min(Measurement.tobs)).\
filter(Measurement.date<=latest_date).filter(Measurement.date>=start).all()
TAVG = session.query(func.avg(Measurement.tobs)).\
filter(Measurement.date<=latest_date).filter(Measurement.date>=start).all()
session.close()
#Round TAVG for presentation
TAVG = round(TAVG[0][0],1)
#Calc number of days in the query for information
days_obs = latest_date_dt - dt.datetime.strptime(start, "%Y-%m-%d")
days_obs = days_obs.days
return (
f"The maximum temperature on Oahu for the {days_obs} days between {start} and {latest_date} was {TMAX[0][0]}.<br>"
f"The minimum temperature on Oahu for the {days_obs} days between {start} and {latest_date} was {TMIN[0][0]}.<br>"
f"The average temperature on Oahu for the {days_obs} days between {start} and {latest_date} was {TAVG}.<br>"
)
@app.route("/api/v1.0/<start>/<end>/")
def temp_start_end(start, end):
#Check START and END dates are within the range of database dates
if start<earliest_date or start>latest_date or end<earliest_date or end>latest_date:
return (
f"START and END must be between {earliest_date} and {latest_date}.<br>"
f"/api/v1.0/START/END"
)
#Allow for START and END interchanged in URL
if end<start:
start_temp = start
start = end
end = start_temp
print("Server received request for 'Min, Max, Avg Start End' page...")
session = Session(engine)
#Query max, min, and avg temperature between START date and END date in database
TMAX = session.query(func.max(Measurement.tobs)).\
filter(Measurement.date<=end).filter(Measurement.date>=start).all()
TMIN = session.query(func.min(Measurement.tobs)).\
filter(Measurement.date<=end).filter(Measurement.date>=start).all()
TAVG = session.query(func.avg(Measurement.tobs)).\
filter(Measurement.date<=end).filter(Measurement.date>=start).all()
session.close()
#Round TAVG for presentation
TAVG = round(TAVG[0][0],1)
#Calc number of days in the query for information
days_obs = dt.datetime.strptime(end, "%Y-%m-%d") - dt.datetime.strptime(start, "%Y-%m-%d")
days_obs = days_obs.days
return (
f"The maximum temperature on Oahu for the {days_obs} days between {start} and {end} was {TMAX[0][0]}.<br>"
f"The minimum temperature on Oahu for the {days_obs} days between {start} and {end} was {TMIN[0][0]}.<br>"
f"The average temperature on Oahu for the {days_obs} days between {start} and {end} was {TAVG}.<br>"
)
if __name__ == "__main__":
app.run(debug=True)
| [
"sqlalchemy.func.count",
"sqlalchemy.func.min",
"flask.Flask",
"datetime.datetime.strptime",
"sqlalchemy.ext.automap.automap_base",
"sqlalchemy.create_engine",
"sqlalchemy.orm.Session",
"sqlalchemy.func.max",
"matplotlib.style.use",
"sqlalchemy.func.avg",
"pandas.DataFrame",
"datetime.timedelt... | [((67, 95), 'matplotlib.style.use', 'style.use', (['"""fivethirtyeight"""'], {}), "('fivethirtyeight')\n", (76, 95), False, 'from matplotlib import style\n'), ((524, 574), 'sqlalchemy.create_engine', 'create_engine', (['"""sqlite:///Resources/hawaii.sqlite"""'], {}), "('sqlite:///Resources/hawaii.sqlite')\n", (537, 574), False, 'from sqlalchemy import create_engine, func, inspect, MetaData\n'), ((645, 659), 'sqlalchemy.ext.automap.automap_base', 'automap_base', ([], {}), '()\n', (657, 659), False, 'from sqlalchemy.ext.automap import automap_base\n'), ((1034, 1049), 'sqlalchemy.orm.Session', 'Session', (['engine'], {}), '(engine)\n', (1041, 1049), False, 'from sqlalchemy.orm import Session\n'), ((1450, 1497), 'datetime.datetime.strptime', 'dt.datetime.strptime', (['earliest_date', '"""%Y-%m-%d"""'], {}), "(earliest_date, '%Y-%m-%d')\n", (1470, 1497), True, 'import datetime as dt\n'), ((1803, 1848), 'datetime.datetime.strptime', 'dt.datetime.strptime', (['latest_date', '"""%Y-%m-%d"""'], {}), "(latest_date, '%Y-%m-%d')\n", (1823, 1848), True, 'import datetime as dt\n'), ((2004, 2056), 'datetime.datetime.strftime', 'dt.datetime.strftime', (['year_ago_latest_dt', '"""%Y-%m-%d"""'], {}), "(year_ago_latest_dt, '%Y-%m-%d')\n", (2024, 2056), True, 'import datetime as dt\n'), ((2408, 2423), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (2413, 2423), False, 'from flask import Flask, jsonify\n'), ((1963, 1985), 'datetime.timedelta', 'dt.timedelta', ([], {'days': '(365)'}), '(days=365)\n', (1975, 1985), True, 'import datetime as dt\n'), ((3942, 3957), 'sqlalchemy.orm.Session', 'Session', (['engine'], {}), '(engine)\n', (3949, 3957), False, 'from sqlalchemy.orm import Session\n'), ((4305, 4369), 'pandas.DataFrame', 'pd.DataFrame', (['date_prcp_query'], {'columns': "['Date', 'Precipitation']"}), "(date_prcp_query, columns=['Date', 'Precipitation'])\n", (4317, 4369), True, 'import pandas as pd\n'), ((4675, 4699), 'flask.jsonify', 'jsonify', (['prcp_query_dict'], {}), '(prcp_query_dict)\n', (4682, 4699), False, 'from flask import Flask, jsonify\n'), ((4825, 4840), 'sqlalchemy.orm.Session', 'Session', (['engine'], {}), '(engine)\n', (4832, 4840), False, 'from sqlalchemy.orm import Session\n'), ((5022, 5120), 'pandas.DataFrame', 'pd.DataFrame', (['station_query'], {'columns': "['station', 'name', 'latitude', 'longitude', 'elevation']"}), "(station_query, columns=['station', 'name', 'latitude',\n 'longitude', 'elevation'])\n", (5034, 5120), True, 'import pandas as pd\n'), ((5367, 5388), 'flask.jsonify', 'jsonify', (['station_dict'], {}), '(station_dict)\n', (5374, 5388), False, 'from flask import Flask, jsonify\n'), ((5526, 5541), 'sqlalchemy.orm.Session', 'Session', (['engine'], {}), '(engine)\n', (5533, 5541), False, 'from sqlalchemy.orm import Session\n'), ((5912, 5974), 'pandas.DataFrame', 'pd.DataFrame', (['tobs_date_query'], {'columns': "['Date', 'Temperature']"}), "(tobs_date_query, columns=['Date', 'Temperature'])\n", (5924, 5974), True, 'import pandas as pd\n'), ((6169, 6192), 'flask.jsonify', 'jsonify', (['tobs_date_dict'], {}), '(tobs_date_dict)\n', (6176, 6192), False, 'from flask import Flask, jsonify\n'), ((6577, 6592), 'sqlalchemy.orm.Session', 'Session', (['engine'], {}), '(engine)\n', (6584, 6592), False, 'from sqlalchemy.orm import Session\n'), ((8338, 8353), 'sqlalchemy.orm.Session', 'Session', (['engine'], {}), '(engine)\n', (8345, 8353), False, 'from sqlalchemy.orm import Session\n'), ((7267, 7306), 'datetime.datetime.strptime', 'dt.datetime.strptime', (['start', '"""%Y-%m-%d"""'], {}), "(start, '%Y-%m-%d')\n", (7287, 7306), True, 'import datetime as dt\n'), ((8985, 9022), 'datetime.datetime.strptime', 'dt.datetime.strptime', (['end', '"""%Y-%m-%d"""'], {}), "(end, '%Y-%m-%d')\n", (9005, 9022), True, 'import datetime as dt\n'), ((9025, 9064), 'datetime.datetime.strptime', 'dt.datetime.strptime', (['start', '"""%Y-%m-%d"""'], {}), "(start, '%Y-%m-%d')\n", (9045, 9064), True, 'import datetime as dt\n'), ((2255, 2286), 'sqlalchemy.func.count', 'func.count', (['Measurement.station'], {}), '(Measurement.station)\n', (2265, 2286), False, 'from sqlalchemy import create_engine, func, inspect, MetaData\n'), ((2183, 2214), 'sqlalchemy.func.count', 'func.count', (['Measurement.station'], {}), '(Measurement.station)\n', (2193, 2214), False, 'from sqlalchemy import create_engine, func, inspect, MetaData\n'), ((6704, 6730), 'sqlalchemy.func.max', 'func.max', (['Measurement.tobs'], {}), '(Measurement.tobs)\n', (6712, 6730), False, 'from sqlalchemy import create_engine, func, inspect, MetaData\n'), ((6843, 6869), 'sqlalchemy.func.min', 'func.min', (['Measurement.tobs'], {}), '(Measurement.tobs)\n', (6851, 6869), False, 'from sqlalchemy import create_engine, func, inspect, MetaData\n'), ((6982, 7008), 'sqlalchemy.func.avg', 'func.avg', (['Measurement.tobs'], {}), '(Measurement.tobs)\n', (6990, 7008), False, 'from sqlalchemy import create_engine, func, inspect, MetaData\n'), ((8464, 8490), 'sqlalchemy.func.max', 'func.max', (['Measurement.tobs'], {}), '(Measurement.tobs)\n', (8472, 8490), False, 'from sqlalchemy import create_engine, func, inspect, MetaData\n'), ((8595, 8621), 'sqlalchemy.func.min', 'func.min', (['Measurement.tobs'], {}), '(Measurement.tobs)\n', (8603, 8621), False, 'from sqlalchemy import create_engine, func, inspect, MetaData\n'), ((8726, 8752), 'sqlalchemy.func.avg', 'func.avg', (['Measurement.tobs'], {}), '(Measurement.tobs)\n', (8734, 8752), False, 'from sqlalchemy import create_engine, func, inspect, MetaData\n')] |
from collections import defaultdict
import functools
import json
import logging
import re
import subprocess
import threading
import time
import os
from .abstract_connection import AbstractConnection, RcloneException
class RcloneConnection(AbstractConnection):
def __init__(self):
self._job_status = defaultdict(functools.partial(defaultdict, str)) # Mapping from id to status dict
self._job_text = defaultdict(str)
self._job_error_text = defaultdict(str)
self._job_percent = defaultdict(int)
self._job_exitstatus = {}
self._stop_events = {} # Mapping from id to threading.Event
self._latest_job_id = 0
def verify(self, data):
credentials = self._formatCredentials(data, name='current')
user = data.owner
bucket = getattr(data, 'bucket', None)
if bucket is None:
bucket = ''
command = [
'sudo',
'-E',
'-u', user,
'rclone',
'lsjson',
'current:{}'.format(bucket),
]
self._logCommand(command, credentials)
try:
result = self._execute(command, credentials)
return {
'result': True,
'message': 'Success',
}
except subprocess.CalledProcessError as e:
returncode = e.returncode
return {
'result': False,
'message': 'Exit status {}'.format(returncode),
}
def ls(self, data, path):
credentials = self._formatCredentials(data, name='current')
user = data.owner
command = [
'sudo',
'-E',
'-u', user,
'rclone',
'lsjson',
'current:{}'.format(path),
]
self._logCommand(command, credentials)
try:
result = self._execute(command, credentials)
files = json.loads(result)
return {
'files': files,
'path': path,
}
except subprocess.CalledProcessError as e:
raise RcloneException(sanitize(str(e)))
def mkdir(self, data, path):
credentials = self._formatCredentials(data, name='current')
user = data.owner
command = [
'sudo',
'-E',
'-u', user,
'rclone',
'touch',
'current:{}/.keep'.format(path),
]
self._logCommand(command, credentials)
try:
result = self._execute(command, credentials)
return {
'message': 'Success',
}
except subprocess.CalledProcessError as e:
raise RcloneException(sanitize(str(e)))
def copy(self,
src_data,
src_resource_path,
dst_data,
dst_resource_path,
user,
copy_links,
job_id=None
):
credentials = {}
if src_data is None: # Local
src = src_resource_path
else:
credentials.update(self._formatCredentials(src_data, name='src'))
src = 'src:{}'.format(src_resource_path)
if dst_data is None: # Local
dst = dst_resource_path
else:
credentials.update(self._formatCredentials(dst_data, name='dst'))
dst = 'dst:{}'.format(dst_resource_path)
if copy_links:
option_copy_links = '--copy-links'
else:
option_copy_links = ''
command = [
'sudo',
'-E',
'-u', user,
'rclone',
'copyto',
src,
dst,
option_copy_links,
'--progress',
'--stats', '2s',
]
command = [cmd for cmd in command if len(cmd) > 0]
self._logCommand(command, credentials)
if job_id is None:
job_id = self._get_next_job_id()
else:
if self._job_id_exists(job_id):
raise ValueError('rclone copy job with ID {} already exists'.fromat(job_id))
self._stop_events[job_id] = threading.Event()
try:
self._execute_interactive(command, credentials, job_id)
except subprocess.CalledProcessError as e:
raise RcloneException(sanitize(str(e)))
return job_id
def copy_text(self, job_id):
return self._job_text[job_id]
def copy_error_text(self, job_id):
return self._job_error_text[job_id]
def copy_percent(self, job_id):
return self._job_percent[job_id]
def copy_stop(self, job_id):
self._stop_events[job_id].set()
def copy_finished(self, job_id):
return self._stop_events[job_id].is_set()
def copy_exitstatus(self, job_id):
return self._job_exitstatus.get(job_id, -1)
def _logCommand(self, command, credentials):
bash_command = "{} {}".format(
' '.join("{}='{}'".format(key, value) for key, value in credentials.items()),
' '.join(command),
)
logging.info(sanitize(bash_command))
def _formatCredentials(self, data, name):
"""
Credentials are of the form
RCLONE_CONFIG_CURRENT_TYPE=s3
^ ^ ^ ^
[mandatory ][name ][key][value]
"""
prefix = "RCLONE_CONFIG_{}".format(name.upper())
credentials = {}
credentials['{}_TYPE'.format(prefix)] = data.type
def _addCredential(env_key, data_key, *, value_functor=None):
value = getattr(data, data_key, None)
if value is not None:
if value_functor is not None:
value = value_functor(value)
credentials[env_key] = value
if data.type == 's3':
_addCredential(
'{}_REGION'.format(prefix),
's3_region'
)
_addCredential(
'{}_ACCESS_KEY_ID'.format(prefix),
's3_access_key_id'
)
_addCredential(
'{}_SECRET_ACCESS_KEY'.format(prefix),
's3_secret_access_key'
)
_addCredential(
'{}_ENDPOINT'.format(prefix),
's3_endpoint'
)
_addCredential(
'{}_V2_AUTH'.format(prefix),
's3_v2_auth'
)
elif data.type == 'azureblob':
_addCredential(
'{}_ACCOUNT'.format(prefix),
'azure_account'
)
_addCredential(
'{}_KEY'.format(prefix),
'azure_key'
)
_addCredential(
'{}_SAS_URL'.format(prefix),
'azure_sas_url'
)
elif data.type == 'swift':
_addCredential(
'{}_USER'.format(prefix),
'swift_user'
)
_addCredential(
'{}_KEY'.format(prefix),
'swift_key'
)
_addCredential(
'{}_AUTH'.format(prefix),
'swift_auth'
)
_addCredential(
'{}_TENANT'.format(prefix),
'swift_tenant'
)
elif data.type == 'google cloud storage':
_addCredential(
'{}_CLIENT_ID'.format(prefix),
'gcp_client_id'
)
_addCredential(
'{}_SERVICE_ACCOUNT_CREDENTIALS'.format(prefix),
'gcp_service_account_credentials'
)
_addCredential(
'{}_PROJECT_NUMBER'.format(prefix),
'gcp_project_number'
)
_addCredential(
'{}_OBJECT_ACL'.format(prefix),
'gcp_object_acl'
)
_addCredential(
'{}_BUCKET_ACL'.format(prefix),
'gcp_bucket_acl'
)
elif data.type == 'sftp':
_addCredential(
'{}_HOST'.format(prefix),
'sftp_host',
)
_addCredential(
'{}_PORT'.format(prefix),
'sftp_port',
)
_addCredential(
'{}_USER'.format(prefix),
'sftp_user',
)
_addCredential(
'{}_PASS'.format(prefix),
'sftp_pass',
value_functor=self._obscure,
)
elif data.type == 'dropbox':
_addCredential(
'{}_TOKEN'.format(prefix),
'dropbox_token',
)
elif data.type == 'onedrive':
_addCredential(
'{}_TOKEN'.format(prefix),
'onedrive_token',
)
_addCredential(
'{}_DRIVE_ID'.format(prefix),
'onedrive_drive_id',
)
_addCredential(
'{}_DRIVE_TYPE'.format(prefix),
'onedrive_drive_type',
)
elif data.type == 'webdav':
_addCredential(
'{}_URL'.format(prefix),
'webdav_url',
)
_addCredential(
'{}_USER'.format(prefix),
'webdav_user',
)
_addCredential(
'{}_PASS'.format(prefix),
'webdav_pass',
value_functor=self._obscure,
)
else:
logging.error("Connection type unknown: {}".format(data.type))
return credentials
def _get_next_job_id(self):
self._latest_job_id += 1
while self._job_id_exists(self._latest_job_id):
self._latest_job_id += 1
return self._latest_job_id
def _job_id_exists(self, job_id):
return job_id in self._job_status
def _obscure(self, password):
"""
Calls `rclone obscure password` and returns the result
"""
return self._execute(["rclone", "obscure", password])
def _execute(self, command, env={}):
full_env = os.environ.copy()
full_env.update(env)
try:
byteOutput = subprocess.check_output(
command,
stderr=subprocess.PIPE,
env=full_env
)
output = byteOutput.decode('UTF-8').rstrip()
return output
except subprocess.CalledProcessError as err:
if (err.stderr is None):
raise
stderr = err.stderr.decode('UTF-8').strip()
if len(stderr) == 0:
raise
raise RcloneException(stderr)
def _execute_interactive(self, command, env, job_id):
thread = threading.Thread(target=self.__execute_interactive, kwargs={
'command': command,
'env': env,
'job_id': job_id,
})
thread.daemon = True
thread.start()
def __execute_interactive(self, command, env={}, job_id=0):
stop_event = self._stop_events[job_id]
full_env = os.environ.copy()
full_env.update(env)
process = subprocess.Popen(
command,
env=full_env,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
reset_sequence1 = '\x1b[2K\x1b[0' # + 'G'
reset_sequence2 = '\x1b[2K\x1b[A\x1b[2K\x1b[A\x1b[2K\x1b[A\x1b[2K\x1b[A\x1b[2K\x1b[A\x1b[2K\x1b[A\x1b[2K\x1b[0' # + 'G'
while not stop_event.is_set():
line = process.stdout.readline().decode('utf-8')
if len(line) == 0:
if process.poll() is not None:
stop_event.set()
else:
time.sleep(0.5)
continue
line = line.strip()
q1 = line.find(reset_sequence1)
if q1 != -1:
line = line[q1 + len(reset_sequence1):]
q2 = line.find(reset_sequence2)
if q2 != -1:
line = line[q2 + len(reset_sequence1):]
line = line.replace(reset_sequence1, '')
line = line.replace(reset_sequence2, '')
match = re.search(r'(ERROR.*)', line)
if match is not None:
error = match.groups()[0]
logging.error(error)
self._job_error_text[job_id] += error
self._job_error_text[job_id] += '\n'
continue
match = re.search(r'([A-Za-z ]+):\s*(.*)', line)
if match is None:
logging.info("No match in {}".format(line))
time.sleep(0.5)
continue
key, value = match.groups()
self._job_status[job_id][key] = value
self.__process_status(job_id)
self._job_percent[job_id] = 100
self.__process_status(job_id)
exitstatus = process.poll()
self._job_exitstatus[job_id] = exitstatus
for _ in range(1000):
line = process.stderr.readline().decode('utf-8')
if len(line) == 0:
break
line = line.strip()
self._job_error_text[job_id] += line
self._job_error_text[job_id] += '\n'
logging.info("Copy process exited with exit status {}".format(exitstatus))
stop_event.set() # Just in case
def __process_status(self, job_id):
self.__process_text(job_id)
self.__process_percent(job_id)
def __process_text(self, job_id):
headers = [
'GTransferred',
'Errors',
'Checks',
'Transferred',
'Elapsed time',
'Transferring',
]
status = self._job_status[job_id]
text = '\n'.join(
'{:>12}: {}'.format(header, status[header])
for header in headers
)
self._job_text[job_id] = text
def __process_percent(self, job_id):
status = self._job_status[job_id]
match = re.search(r'(\d+)\%', status['GTransferred'])
if match is not None:
self._job_percent[job_id] = match[1]
return
match = re.search(r'(\d+)\%', status['Transferred'])
if match is not None:
self._job_percent[job_id] = match[1]
return
self._job_percent[job_id] = -1
def sanitize(string):
sanitizations_regs = [
# s3
(r"(RCLONE_CONFIG_\S*_ACCESS_KEY_ID=')(\S*)(\S\S\S\S')", r"\1***\3"),
(r"(RCLONE_CONFIG_\S*_SECRET_ACCESS_KEY=')(\S*)(')", r"\1***\3"),
# Azure
(r"(RCLONE_CONFIG_\S*_KEY=')(\S*)(')", r"\1***\3"),
(r"(RCLONE_CONFIG_\S*_SAS_URL=')(\S*)(')", r"\1***\3"),
# Swift
(r"(RCLONE_CONFIG_\S*_KEY=')(\S*)(')", r"\1***\3"),
# GCP
(r"(RCLONE_CONFIG_\S*_CLIENT_ID=')(\S*)(\S\S\S\S')", r"\1***\3"),
(r"(RCLONE_CONFIG_\S*_SERVICE_ACCOUNT_CREDENTIALS=')([^']*)(')", r"\1{***}\3"),
# SFTP / WebDAV
(r"(RCLONE_CONFIG_\S*_PASS=')([^']*)(')", r"\1{***}\3"),
# Dropbox / Onedrive
(r"(RCLONE_CONFIG_\S*_TOKEN=')([^']*)(')", r"\1{***}\3"),
]
for regex, replace in sanitizations_regs:
string = re.sub(regex, replace, string)
return string
def main():
import time
import os
class CloudConnection:
pass
data = CloudConnection()
data.__dict__ = {
'type': 's3',
'region': os.environ['MOTUZ_REGION'],
'access_key_id': os.environ['MOTUZ_ACCESS_KEY_ID'],
'secret_access_key': os.environ['MOTUZ_SECRET_ACCESS_KEY'],
}
connection = RcloneConnection()
# result = connection.ls('/fh-ctr-mofuz-test/hello/world')
job_id = 123
import random
connection.copy(
src_data=None, # Local
src_resource_path='/tmp/motuz/mb_blob.bin',
dst_data=data,
dst_resource_path='/fh-ctr-mofuz-test/hello/world/{}'.format(random.randint(10, 10000)),
job_id=job_id
)
while not connection.copy_finished(job_id):
print(connection.copy_percent(job_id))
time.sleep(0.1)
if __name__ == '__main__':
main()
| [
"subprocess.check_output",
"json.loads",
"subprocess.Popen",
"os.environ.copy",
"time.sleep",
"threading.Event",
"collections.defaultdict",
"functools.partial",
"re.sub",
"threading.Thread",
"logging.error",
"random.randint",
"re.search"
] | [((421, 437), 'collections.defaultdict', 'defaultdict', (['str'], {}), '(str)\n', (432, 437), False, 'from collections import defaultdict\n'), ((469, 485), 'collections.defaultdict', 'defaultdict', (['str'], {}), '(str)\n', (480, 485), False, 'from collections import defaultdict\n'), ((514, 530), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (525, 530), False, 'from collections import defaultdict\n'), ((4187, 4204), 'threading.Event', 'threading.Event', ([], {}), '()\n', (4202, 4204), False, 'import threading\n'), ((10227, 10244), 'os.environ.copy', 'os.environ.copy', ([], {}), '()\n', (10242, 10244), False, 'import os\n'), ((10870, 10984), 'threading.Thread', 'threading.Thread', ([], {'target': 'self.__execute_interactive', 'kwargs': "{'command': command, 'env': env, 'job_id': job_id}"}), "(target=self.__execute_interactive, kwargs={'command':\n command, 'env': env, 'job_id': job_id})\n", (10886, 10984), False, 'import threading\n'), ((11212, 11229), 'os.environ.copy', 'os.environ.copy', ([], {}), '()\n', (11227, 11229), False, 'import os\n'), ((11278, 11393), 'subprocess.Popen', 'subprocess.Popen', (['command'], {'env': 'full_env', 'stdin': 'subprocess.PIPE', 'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.PIPE'}), '(command, env=full_env, stdin=subprocess.PIPE, stdout=\n subprocess.PIPE, stderr=subprocess.PIPE)\n', (11294, 11393), False, 'import subprocess\n'), ((14186, 14232), 're.search', 're.search', (['"""(\\\\d+)\\\\%"""', "status['GTransferred']"], {}), "('(\\\\d+)\\\\%', status['GTransferred'])\n", (14195, 14232), False, 'import re\n'), ((14348, 14393), 're.search', 're.search', (['"""(\\\\d+)\\\\%"""', "status['Transferred']"], {}), "('(\\\\d+)\\\\%', status['Transferred'])\n", (14357, 14393), False, 'import re\n'), ((15399, 15429), 're.sub', 're.sub', (['regex', 'replace', 'string'], {}), '(regex, replace, string)\n', (15405, 15429), False, 'import re\n'), ((16282, 16297), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (16292, 16297), False, 'import time\n'), ((325, 360), 'functools.partial', 'functools.partial', (['defaultdict', 'str'], {}), '(defaultdict, str)\n', (342, 360), False, 'import functools\n'), ((1953, 1971), 'json.loads', 'json.loads', (['result'], {}), '(result)\n', (1963, 1971), False, 'import json\n'), ((10312, 10382), 'subprocess.check_output', 'subprocess.check_output', (['command'], {'stderr': 'subprocess.PIPE', 'env': 'full_env'}), '(command, stderr=subprocess.PIPE, env=full_env)\n', (10335, 10382), False, 'import subprocess\n'), ((12352, 12380), 're.search', 're.search', (['"""(ERROR.*)"""', 'line'], {}), "('(ERROR.*)', line)\n", (12361, 12380), False, 'import re\n'), ((12648, 12688), 're.search', 're.search', (['"""([A-Za-z ]+):\\\\s*(.*)"""', 'line'], {}), "('([A-Za-z ]+):\\\\s*(.*)', line)\n", (12657, 12688), False, 'import re\n'), ((12474, 12494), 'logging.error', 'logging.error', (['error'], {}), '(error)\n', (12487, 12494), False, 'import logging\n'), ((12795, 12810), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (12805, 12810), False, 'import time\n'), ((16121, 16146), 'random.randint', 'random.randint', (['(10)', '(10000)'], {}), '(10, 10000)\n', (16135, 16146), False, 'import random\n'), ((11898, 11913), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (11908, 11913), False, 'import time\n')] |
import os
import re
import sys
from cffi import FFI
from pyvx import __backend_version__
mydir = os.path.dirname(os.path.abspath(__file__))
def build(name, openvx_install, default):
pwd = os.getcwd()
os.chdir(os.path.dirname(mydir))
assert name != 'default'
hdr = os.path.join(openvx_install, 'include', 'VX', 'vx.h')
if not os.path.exists(hdr):
print("ERROR: Can't find header", hdr)
exit(-1)
lib = os.path.join(openvx_install, 'bin', 'libopenvx.so')
if not os.path.exists(lib):
print("ERROR: Can't find lib", lib)
exit(-1)
defs= dict(VX_API_ENTRY='', VX_API_CALL='', VX_CALLBACK='', VX_MAX_KERNEL_NAME='256')
if os.name == 'nt':
defs['VX_API_CALL'] = '__stdcall'
defs['VX_CALLBACK'] = '__stdcall'
ffi = FFI()
# vx.h
vx = open(os.path.join(mydir, "cdefs", "vx.h")).read()
vx = re.subn(r'(#define\s+[^\s]+)\s.*', r'\1 ...', vx)[0] # Remove specifics from #defines
ffi.cdef(vx)
# vx_vendors.h
ffi.cdef(open(os.path.join(mydir, "cdefs", "vx_vendors.h")).read())
# vx_types.h
types = open(os.path.join(mydir, "cdefs", "vx_types.h")).read()
for k,v in defs.items():
types = types.replace(k, v)
types = re.subn(r'(#define\s+[^\s]+)\s.*', r'\1 ...', types)[0] # Remove specifics from #defines
types = re.subn(r'(/\*.*?\*/)', r'', types)[0] # Remove some one line comments
types = re.subn(r'=.*,', r'= ...,', types)[0] # Remove specifics from enums
types = re.subn(r'\[\s*[^\s]+?.*?\]', r'[...]', types)[0] # Remove specific array sizes
ffi.cdef(types)
ffi.cdef('''
char *_get_FMT_REF(void);
char *_get_FMT_SIZE(void);
int _get_KERNEL_BASE(int vendor, int lib);
char *_get_backend_version();
char *_get_backend_name();
char *_get_backend_install_path();
''')
# vx_kernels.h
kernels = open(os.path.join(mydir, "cdefs", "vx_kernels.h")).read()
kernels = re.subn(r'=.*,', r'= ...,', kernels)[0] # Remove specifics from enums
ffi.cdef(kernels)
# vx_api.h
api = open(os.path.join(mydir, "cdefs", "vx_api.h")).read()
for k, v in defs.items():
api = api.replace(k, v)
ffi.cdef(api)
# vx_nodes.h
nodes = open(os.path.join(mydir, "cdefs", "vx_nodes.h")).read()
for k, v in defs.items():
nodes = nodes.replace(k, v)
ffi.cdef(nodes)
# vxu.h
vxu = open(os.path.join(mydir, "cdefs", "vxu.h")).read()
for k, v in defs.items():
vxu = vxu.replace(k, v)
ffi.cdef(vxu)
ffi.set_source("pyvx.backend.%s" % name, """
#include <VX/vx.h>
#include <VX/vxu.h>
char *_get_FMT_REF(void) {return VX_FMT_REF;}
char *_get_FMT_SIZE(void) {return VX_FMT_SIZE;}
int _get_KERNEL_BASE(int vendor, int lib) {return VX_KERNEL_BASE(vendor, lib);}
char *_get_backend_version() {return "%s";}
char *_get_backend_name() {return "%s";}
char *_get_backend_install_path() {return "%s";}
""" % (__backend_version__.decode("utf8"), name, openvx_install),
include_dirs=[os.path.join(openvx_install, 'include')],
library_dirs=[os.path.join(openvx_install, 'bin')],
extra_link_args=['-Wl,-rpath=' + os.path.abspath(os.path.join(openvx_install, 'bin'))],
libraries=['openvx', 'vxu'])
ffi.compile()
default_file_name = os.path.join('pyvx', 'backend', '_default.py')
if default or not os.path.exists(default_file_name):
fd = open(default_file_name, 'w')
fd.write("from pyvx.backend.%s import ffi, lib\n" % name)
fd.close()
import pyvx.backend as backend
assert backend.ffi.string(backend.lib._get_backend_version()) == __backend_version__
assert backend.ffi.string(backend.lib._get_backend_name()).decode("utf8") == name
assert backend.ffi.string(backend.lib._get_backend_install_path()).decode("utf8") == openvx_install
names = {}
exec("import pyvx.backend.%s as backend" % name, names)
backend = names['backend']
assert backend.ffi.string(backend.lib._get_backend_version()) == __backend_version__
assert backend.ffi.string(backend.lib._get_backend_name()).decode("utf8") == name
assert backend.ffi.string(backend.lib._get_backend_install_path()).decode("utf8") == openvx_install
print('')
print("Succesfully built backend pyvx.backend.%s in %s" % (name, mydir))
print('')
if __name__ == '__main__':
args = sys.argv[1:]
default = '--default' in args
if default:
args.remove('--default')
if len(args) == 2:
name, openvx_install = args
build(name, openvx_install, default)
else:
print("Usage: %s [--default] <name> <openvx install path>" % sys.argv[0])
| [
"pyvx.backend.lib._get_backend_name",
"os.path.exists",
"pyvx.backend.lib._get_backend_version",
"cffi.FFI",
"os.path.join",
"pyvx.__backend_version__.decode",
"pyvx.backend.lib._get_backend_install_path",
"os.getcwd",
"os.path.dirname",
"os.path.abspath",
"re.subn"
] | [((116, 141), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (131, 141), False, 'import os\n'), ((196, 207), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (205, 207), False, 'import os\n'), ((285, 338), 'os.path.join', 'os.path.join', (['openvx_install', '"""include"""', '"""VX"""', '"""vx.h"""'], {}), "(openvx_install, 'include', 'VX', 'vx.h')\n", (297, 338), False, 'import os\n'), ((446, 497), 'os.path.join', 'os.path.join', (['openvx_install', '"""bin"""', '"""libopenvx.so"""'], {}), "(openvx_install, 'bin', 'libopenvx.so')\n", (458, 497), False, 'import os\n'), ((801, 806), 'cffi.FFI', 'FFI', ([], {}), '()\n', (804, 806), False, 'from cffi import FFI\n'), ((3448, 3494), 'os.path.join', 'os.path.join', (['"""pyvx"""', '"""backend"""', '"""_default.py"""'], {}), "('pyvx', 'backend', '_default.py')\n", (3460, 3494), False, 'import os\n'), ((221, 243), 'os.path.dirname', 'os.path.dirname', (['mydir'], {}), '(mydir)\n', (236, 243), False, 'import os\n'), ((350, 369), 'os.path.exists', 'os.path.exists', (['hdr'], {}), '(hdr)\n', (364, 369), False, 'import os\n'), ((509, 528), 'os.path.exists', 'os.path.exists', (['lib'], {}), '(lib)\n', (523, 528), False, 'import os\n'), ((887, 938), 're.subn', 're.subn', (['"""(#define\\\\s+[^\\\\s]+)\\\\s.*"""', '"""\\\\1 ..."""', 'vx'], {}), "('(#define\\\\s+[^\\\\s]+)\\\\s.*', '\\\\1 ...', vx)\n", (894, 938), False, 'import re\n'), ((1247, 1301), 're.subn', 're.subn', (['"""(#define\\\\s+[^\\\\s]+)\\\\s.*"""', '"""\\\\1 ..."""', 'types'], {}), "('(#define\\\\s+[^\\\\s]+)\\\\s.*', '\\\\1 ...', types)\n", (1254, 1301), False, 'import re\n'), ((1348, 1383), 're.subn', 're.subn', (['"""(/\\\\*.*?\\\\*/)"""', '""""""', 'types'], {}), "('(/\\\\*.*?\\\\*/)', '', types)\n", (1355, 1383), False, 'import re\n'), ((1431, 1463), 're.subn', 're.subn', (['"""=.*,"""', '"""= ...,"""', 'types'], {}), "('=.*,', '= ...,', types)\n", (1438, 1463), False, 'import re\n'), ((1511, 1559), 're.subn', 're.subn', (['"""\\\\[\\\\s*[^\\\\s]+?.*?\\\\]"""', '"""[...]"""', 'types'], {}), "('\\\\[\\\\s*[^\\\\s]+?.*?\\\\]', '[...]', types)\n", (1518, 1559), False, 'import re\n'), ((1980, 2014), 're.subn', 're.subn', (['"""=.*,"""', '"""= ...,"""', 'kernels'], {}), "('=.*,', '= ...,', kernels)\n", (1987, 2014), False, 'import re\n'), ((3517, 3550), 'os.path.exists', 'os.path.exists', (['default_file_name'], {}), '(default_file_name)\n', (3531, 3550), False, 'import os\n'), ((4147, 4181), 'pyvx.backend.lib._get_backend_version', 'backend.lib._get_backend_version', ([], {}), '()\n', (4179, 4181), True, 'import pyvx.backend as backend\n'), ((833, 869), 'os.path.join', 'os.path.join', (['mydir', '"""cdefs"""', '"""vx.h"""'], {}), "(mydir, 'cdefs', 'vx.h')\n", (845, 869), False, 'import os\n'), ((1117, 1159), 'os.path.join', 'os.path.join', (['mydir', '"""cdefs"""', '"""vx_types.h"""'], {}), "(mydir, 'cdefs', 'vx_types.h')\n", (1129, 1159), False, 'import os\n'), ((1913, 1957), 'os.path.join', 'os.path.join', (['mydir', '"""cdefs"""', '"""vx_kernels.h"""'], {}), "(mydir, 'cdefs', 'vx_kernels.h')\n", (1925, 1957), False, 'import os\n'), ((2103, 2143), 'os.path.join', 'os.path.join', (['mydir', '"""cdefs"""', '"""vx_api.h"""'], {}), "(mydir, 'cdefs', 'vx_api.h')\n", (2115, 2143), False, 'import os\n'), ((2267, 2309), 'os.path.join', 'os.path.join', (['mydir', '"""cdefs"""', '"""vx_nodes.h"""'], {}), "(mydir, 'cdefs', 'vx_nodes.h')\n", (2279, 2309), False, 'import os\n'), ((2432, 2469), 'os.path.join', 'os.path.join', (['mydir', '"""cdefs"""', '"""vxu.h"""'], {}), "(mydir, 'cdefs', 'vxu.h')\n", (2444, 2469), False, 'import os\n'), ((3045, 3079), 'pyvx.__backend_version__.decode', '__backend_version__.decode', (['"""utf8"""'], {}), "('utf8')\n", (3071, 3079), False, 'from pyvx import __backend_version__\n'), ((3137, 3176), 'os.path.join', 'os.path.join', (['openvx_install', '"""include"""'], {}), "(openvx_install, 'include')\n", (3149, 3176), False, 'import os\n'), ((3212, 3247), 'os.path.join', 'os.path.join', (['openvx_install', '"""bin"""'], {}), "(openvx_install, 'bin')\n", (3224, 3247), False, 'import os\n'), ((3753, 3787), 'pyvx.backend.lib._get_backend_version', 'backend.lib._get_backend_version', ([], {}), '()\n', (3785, 3787), True, 'import pyvx.backend as backend\n'), ((1028, 1072), 'os.path.join', 'os.path.join', (['mydir', '"""cdefs"""', '"""vx_vendors.h"""'], {}), "(mydir, 'cdefs', 'vx_vendors.h')\n", (1040, 1072), False, 'import os\n'), ((4236, 4267), 'pyvx.backend.lib._get_backend_name', 'backend.lib._get_backend_name', ([], {}), '()\n', (4265, 4267), True, 'import pyvx.backend as backend\n'), ((4322, 4361), 'pyvx.backend.lib._get_backend_install_path', 'backend.lib._get_backend_install_path', ([], {}), '()\n', (4359, 4361), True, 'import pyvx.backend as backend\n'), ((3318, 3353), 'os.path.join', 'os.path.join', (['openvx_install', '"""bin"""'], {}), "(openvx_install, 'bin')\n", (3330, 3353), False, 'import os\n'), ((3846, 3877), 'pyvx.backend.lib._get_backend_name', 'backend.lib._get_backend_name', ([], {}), '()\n', (3875, 3877), True, 'import pyvx.backend as backend\n'), ((3936, 3975), 'pyvx.backend.lib._get_backend_install_path', 'backend.lib._get_backend_install_path', ([], {}), '()\n', (3973, 3975), True, 'import pyvx.backend as backend\n')] |
import requests
from bs4 import BeautifulSoup
class TrackInfo:
"""
An object containing track information and operations necessary for
scraping the info off of 1001tracklists.com
"""
def __init__(self, url):
self.url = url
self.tracklist_id = self.url.split('tracklist/')[1].split('/')[0] # Get id from url
self.tracks = []
self.track_names = []
self.artist_names = []
self.spotify_links = []
self._soup = self._get_soup()
self._track_soup = self._soup.find_all("div", class_="fontL")
self.fill_info()
def _get_soup(self):
"""Get HTML soup of current webpage"""
headers = {'User-Agent': 'Mozilla/5.0'}
page = requests.get(self.url, headers=headers)
soup = BeautifulSoup(page.content, "html.parser")
return soup
def get_tracklist_title(self):
"""Scrapes the webpage for the tracklist title"""
title = self._soup.find("h1", id="pageTitle")
return(title.text)
def fill_info(self):
"""Fill class arrays with all links, artist, and track on the page"""
print(f"Generating data for{self.get_tracklist_title()}")
for tracks in self._track_soup:
track = tracks.find("meta", itemprop="name")['content']
self.tracks.append(track)
split = track.split(' - ')
track_name, artist_name = split[1], split[0]
self.track_names.append(track_name)
self.artist_names.append(artist_name)
def main():
track = TrackInfo("https://www.1001tracklists.com/tracklist/9l2wdv1/two-friends-big-bootie-mix-018-2020-10-26.html")
for song in track.tracks:
print(song)
if __name__ == "__main__":
main() | [
"bs4.BeautifulSoup",
"requests.get"
] | [((736, 775), 'requests.get', 'requests.get', (['self.url'], {'headers': 'headers'}), '(self.url, headers=headers)\n', (748, 775), False, 'import requests\n'), ((791, 833), 'bs4.BeautifulSoup', 'BeautifulSoup', (['page.content', '"""html.parser"""'], {}), "(page.content, 'html.parser')\n", (804, 833), False, 'from bs4 import BeautifulSoup\n')] |
import os
import sys
import datetime
import logging
import traceback
from extensions import db
from sqlalchemy import exc, func
sys.path.append(os.getcwd())
def handle_exception():
# Get exception information
exception_details = sys.exc_info()
# Rollback the db (so the session doesn't crash)
db.session.rollback()
# Log the error message
error = "Gbmodel - {}: {}".format(exception_details[0].__name__, exception_details[1])
logging.error(error)
traceback.print_tb(exception_details[2])
class professors(db.Model):
"""
Class for the professors table
Table column data imported automatically
"""
__table__ = db.Model.metadata.tables['professors']
def get_professor(self, id):
"""
Get a professor with the given id
Input: professor id
Output: the professor object associated with the given id
"""
try:
result = professors.query.filter(professors.id == id).first()
except exc.SQLAlchemyError:
handle_exception()
result = None
if result is None:
return False
return result
def get_all_professors(self):
"""
Get a list of all professors in the database (by id)
Input: none
Output: a list of professors
"""
try:
profs = professors().query.all()
lists = []
for i in profs:
temp = i
lists.append(temp)
except exc.SQLAlchemyError:
handle_exception()
profs = None
if profs is None:
return False
return lists
def check_professor(self, prof_id):
"""
Checks if professor ID exists in the DB
Input: professor ID given
Output: True if it exists, False otherwise
"""
try:
prof_id = prof_id.strip().lower()
result = professors().query.filter_by(id=prof_id).first()
except exc.SQLAlchemyError:
handle_exception()
result = None
if result is not None:
return True
return False
def prof_id(self, name):
"""
Gets the id of the professor with the given name, if he is found. Returns -1 otherwise
Input: professor name
Output: return professor's id
"""
try:
prof = professors.query.filter_by(name=name).first()
except exc.SQLAlchemyError:
handle_exception()
prof = None
if prof is None:
return -1
return prof.id
class teams(db.Model):
__table__ = db.Model.metadata.tables['teams']
def get_max_team_id(self):
"""
Calculate the next id for a newly added team
if the table is empty, returns 1
Otherwise, return the max id+1
"""
try:
max_id = db.session.query(func.max(teams.id)).scalar()
except exc.SQLAlchemyError:
handle_exception()
max_id = None
if max_id is None:
return 1
else:
return max_id + 1
def check_dup_team(self, t_name, session_id):
"""
Check if the new team name already existed in the given session
Input: name of the new team and session id of the selected session
Output: return False if the team already exists, True otherwise
"""
try:
result = teams().query.filter_by(name=t_name,
session_id=session_id).first()
except exc.SQLAlchemyError:
handle_exception()
result = None
if result is not None:
return False
return True
def insert_team(self, session_id, t_name):
"""
Insert a team to database
Input: self, session id and name of the new team
"""
id = self.get_max_team_id()
new_team = teams(id=id, session_id=session_id, name=t_name)
db.session.add(new_team)
db.session.commit()
return id
def get_team_session_id(self, session_id):
"""
Get a list of all of the teams in a session
Input: session id of the selected session
Output: list of teams and their info from the selected session
"""
try:
if str(session_id) == '0':
team = teams.query.filter_by(session_id=session_id).all()
return team
elif session_id:
team = teams.query.filter_by(session_id=session_id).all()
return team
else:
return None
except exc.SQLAlchemyError:
handle_exception()
return None
def remove_team_from_session(self, name, session_id):
"""
Remove a team and all the students from that team
Input: name of the team and session id
Output: True if the operation completed successfully. False if something went wrong
"""
try:
student = students()
removed_student = removed_students()
result = teams.query.filter(teams.name == name,
teams.session_id == session_id).first()
# get students to delete
tid = result.id
list_students = student.get_students(tid)
if list_students is not None:
for i in list_students:
result = students.query.filter(students.name == i,
students.session_id == session_id).first()
removed_student.add_student(result)
student_list = students.query.filter(students.tid == tid,
students.session_id == session_id).all()
# remove reports
reviews = reports.query.filter(reports.tid == tid).all()
for review in reviews:
db.session.delete(review)
# remove students
for i in student_list:
db.session.delete(i)
db.session.commit()
team = teams.query.filter(teams.id == tid, teams.session_id == session_id).first()
db.session.delete(team)
db.session.commit()
return True
except exc.SQLAlchemyError:
handle_exception()
return False
def remove_team(self, name, session_id):
"""
Remove a team and all the students from that team
Input: name of the team and session id
Output: delete a team
move all student in the team to unassigned student
"""
try:
# Get the team slated for removal
teams_obj = teams()
team = teams_obj.query.filter(teams.name == name,
teams.session_id == session_id).first()
# Get the students on the team
student_list = students.query.filter(students.tid == team.id,
students.session_id == session_id).all()
# If we are trying to remove a team with students on it...
if student_list:
# Jump ship if the team is the empty team. We don't delete the empty team if there are
# students in it
if name == "":
return False
# Otherwise, move all the students on the team to the empty team
empty_team_id = teams_obj.get_tid_from_name("", session_id)
if empty_team_id is None:
empty_team_id = teams_obj.insert_team(session_id, "")
for student in student_list:
student.midterm_done = False
student.final_done = False
student.tid = empty_team_id
# Remove all of the review submitted with team id
reviews = reports.query.filter(reports.tid == team.id).all()
for review in reviews:
db.session.delete(review)
# Now, remove the team
db.session.delete(team)
# Commit db changes
db.session.commit()
# Indicate operation successful
return True
except exc.SQLAlchemyError:
# Log exception, and rollback db changes
handle_exception()
return False
def dashboard(self, session_id):
"""
Return a lists of sessions from the database
and a list of teams + students from a selected session
Input: session id of the selected session
"""
student = students()
session = capstone_session()
today = datetime.datetime.now()
sessions = session.get_sessions()
if self.get_team_session_id(session_id) is None:
return None, sessions
tids = [row.id for row in self.get_team_session_id(session_id)]
team_names = [row.name for row in self.get_team_session_id(session_id)]
lists = [[] for _ in range(len(tids))]
flag = 0
for i in range(len(tids)):
# Get min and max
try:
# Query to get the min & max student points of their final
final_points = db.session.query(
func.max(reports.points).label("max_points"),
func.min(reports.points).label("min_points"),
reports.reviewee).filter_by(tid=tids[i], is_final=True).filter(
reports.reviewee != reports.reviewer).group_by(reports.reviewee)
# Query to get the min & max student points of their midterm
midterm_points = db.session.query(
func.max(reports.points).label("max_points"),
func.min(reports.points).label("min_points"),
reports.reviewee).filter_by(tid=tids[i], is_final=False).filter(
reports.reviewee != reports.reviewer).group_by(reports.reviewee)
# Query to get the students in the students table
team_members = student.query.filter_by(tid=tids[i], session_id=session_id)
except exc.SQLAlchemyError:
handle_exception()
return 'Error'
temp = [team_names[i]]
for team_member in team_members:
# Checks whether the review is within the midterm dates
if session.check_review_state(session_id, today) == "midterm":
for m in midterm_points:
if (team_member.id == m.reviewee): # If the student's ID matches the review ID
params = {"name": team_member.name,
"id": team_member.id,
"active": "Midterm: ",
"min_points": m.min_points,
"max_points": m.max_points,
"lead": int(team_member.is_lead)}
temp.append(params)
flag = 1
# Checks whether the review is within the final dates
elif session.check_review_state(session_id, today) == "final":
for f in final_points:
if (team_member.id == f.reviewee): # If the student's ID matches the review ID
params = {"name": team_member.name,
"id": team_member.id,
"active": "Final: ",
"min_points": f.min_points,
"max_points": f.max_points,
"lead": int(team_member.is_lead)}
temp.append(params)
flag = 1
if flag == 0:
params = {"name": team_member.name,
"id": team_member.id,
"active": "",
"min_points": "",
"max_points": "",
"lead": int(team_member.is_lead)}
temp.append(params)
flag = 0
lists[i] = temp
return lists, sessions
def get_team_from_id(self, team_id):
"""
Get the team object associated with the given id
Input: team_id
Output: a team object, if found. None otherwise
"""
try:
result = teams.query.filter(teams.id == team_id).first()
except exc.SQLAlchemyError:
handle_exception()
return None
return result
# Return a tid.
def get_tid_from_name(self, team_name, ses_id):
"""
Get the team with the given name in the session identified by the given session id
Input: self, team_name, session_id
Output: the team, if we found it
"""
try:
result = teams.query.filter(teams.name == team_name,
teams.session_id == ses_id).first()
except exc.SQLAlchemyError:
handle_exception()
return None
if result is not None:
return result.id
else:
return None
class students(db.Model):
__table__ = db.Model.metadata.tables['students']
def check_dup_student(self, id, session_id):
"""
Check if a student already exits in a session
Input: id of the student and selected session id
Output: return False if the student was already in
return True otherwise
"""
try:
result = students.query.filter_by(id=id, session_id=session_id).first()
except exc.SQLAlchemyError:
handle_exception()
result = None
if result is not None:
return False
return True
def insert_student(self, name, email_address, id, session_id, t_name):
"""
Add new student
Input: student name, student email address, student id, team name and id of the selected session
Output: return False if student id already exists in the current session
add student to the database and return True otherwise
"""
try:
result = teams.query.filter(teams.name == t_name, teams.session_id == session_id).first()
tid = result.id
new_student = students(id=id,
tid=tid,
session_id=session_id,
name=name,
email_address=email_address,
is_lead=False,
midterm_done=False,
final_done=False,
active="open")
db.session.add(new_student)
db.session.commit()
except exc.SQLAlchemyError:
handle_exception()
return False
return True
def get_students(self, tid):
"""
Get a list of the names of all students from a given team
Input: team id, session id
Output: list of student names, if everything succeeds. None otherwise
"""
try:
result = [r.name for r in students.query.filter_by(tid=tid)]
except exc.SQLAlchemyError:
handle_exception()
return None
return result
def get_team_members(self, tid):
"""
Get all members of a team
Input: team id as tid
Output: A list of student objects representing the students on that team
"""
try:
mems = students.query.filter_by(tid=tid).distinct().all()
except exc.SQLAlchemyError:
handle_exception()
return None
return mems
def get_students_in_session(self, session_id):
"""
Gets a list of students in the given session, ordered by team (in ascending order)
Input: session_id
Output: the list of students
"""
# https://stackoverflow.com/questions/4186062/sqlalchemy-order-by-descending
# https://docs.sqlalchemy.org/en/13/orm/query.html
try:
results = students.query.filter(
students.session_id == session_id).order_by(students.tid.asc()).all()
except exc.SQLAlchemyError:
handle_exception()
return None
return results
def get_user_sessions(self, student_id):
"""
Returns all capstone sessions that a user belongs to
Input: student_id: The database id of the student to retrieve capstone session ids for
output: an array of objects representing the rows for each capstone the student belongs to
"""
try:
results = [] # to store objects
# get all matching records
student_records = students.query.filter_by(id=student_id).all()
if student_records is not None:
# for each record, add the capstone the id points to
for rec in student_records:
cap = capstone_session().get_sess_by_id(rec.session_id)
if cap is not None:
results.append(cap)
return results
except exc.SQLAlchemyError:
handle_exception()
return None
def get_student_in_session(self, sid, session_id):
"""
Get a student from the students table
Input: student id, session id
Output: the student that we found, or none if nothing was found
"""
try:
result = students.query.filter(students.id == sid, students.session_id == session_id).first()
except exc.SQLAlchemyError:
handle_exception()
return None
return result
def remove_student(self, sts, t_name, session_id):
"""
Remove a list of selected students
Input: list of students, team name and session id
Output: return False of the list of student is empty or if something went wrong
otherwise, remove student from the team
"""
try:
if t_name is None or sts is None:
return False
removed_student = removed_students()
team = teams.query.filter(teams.name == t_name,
teams.session_id == session_id).first()
for i in sts:
student = students.query.filter(students.name == i,
students.tid == team.id,
students.session_id == session_id).first()
removed_student.add_student(student)
st = students.query.filter(students.id == student.id,
students.session_id == session_id).first()
db.session.delete(st)
db.session.commit()
except exc.SQLAlchemyError:
handle_exception()
return False
return True
def validate(self, id):
"""
validate cas username with student id in the database
Input: student id
Output: object of found student
"""
try:
result = students.query.filter_by(id=id).first()
except exc.SQLAlchemyError:
handle_exception()
result = None
if result is None:
return False
else:
return result
# Get the single student matching the id passed in
# input: student id of the student to retrieve
# output: the student's capstone session id value
def get_student(self, s_id):
try:
return students.query.filter_by(id=s_id).first()
except exc.SQLAlchemyError:
handle_exception()
return None
def update_team(self, name, s_id, t_id):
try:
students.query.filter_by(name=name,
session_id=s_id).\
update(dict(tid=t_id))
db.session.commit()
return True
except exc.SQLAlchemyError:
handle_exception()
return False
def check_team_lead(self, s_id, sess_id):
"""
Check if the student passed in by id is the team lead
Input: student id of the student to check
Output: True if the student is a team lead, False otherwise
"""
try:
student = students.query.filter(students.id == s_id, students.session_id == sess_id).first()
if student.is_lead == 1:
return True
else:
return False
except exc.SQLAlchemyError:
handle_exception()
return False
def get_unassigned_students(self, s_id):
"""
Get students from a session that do not have a team.
Input: session id to grab students
Output: Students who have no team.
"""
try:
empty_team = teams.query.filter_by(name="", session_id=s_id).first()
if empty_team:
return students.query.filter_by(session_id=s_id, tid=empty_team.id).all()
else:
return None
# https://stackoverflow.com/questions/6470428/catch-multiple-exceptions-in-one-line-except-block
except (exc.SQLAlchemyError, AttributeError):
handle_exception()
return None
def edit_student(self, id, new_name, new_email):
"""
Allows students to edit their name and email address
Input: student's new email and name and current user id
Output: apply new name and email to students in student table
"""
try:
# Find the student
student = students.query.filter(students.id == id).all()
if student is None:
return False
# Change name and/or email, if either of them are non-blank
for i in student:
if new_name != '':
i.name = new_name
if new_email != '':
i.email_address = new_email
db.session.commit()
return True
except exc.SQLAlchemyError:
handle_exception()
return False
def set_lead(self, session_id, team_name, lead):
"""
Professor can set a lead for each team
Input: self, chosen session id, team name and lead name
Output: set True to team lead and False to the rest of students in the team
"""
# Sanity check inputs
if team_name is None or lead is None:
return False
# Set team lead status
try:
# Find the team
team = teams.query.filter(teams.session_id == session_id, teams.name == team_name).first()
if team is None:
return False
# Get list of students in the given team
student = students.query.filter(students.tid == team.id).all()
for i in student:
if i.name == lead:
i.is_lead = True
else:
i.is_lead = False
db.session.commit()
return True
except exc.SQLAlchemyError:
handle_exception()
return False
def set_active(self, session_id, option):
"""
Sets the active attribute in student
For a student to be able to access their reviews, "open" must be set
Inputs: The capstone session id of the class to set as active or not. Option as 'open' or 'close'.
"Open" to allow students to submit/edit reviews, "close" to not allow review submission.
Outputs: True to indicate success, False to indicate an error.
"""
try:
student = students.query.filter(students.session_id == session_id).all()
# check option, set accordingly
if option == "open":
for i in student:
i.active = 'open'
db.session.commit()
elif option == "close":
for i in student:
i.active = 'close'
db.session.commit()
else:
# mismatch, return false
return False
# success, so return true
return True
except exc.SQLAlchemyError:
handle_exception()
return False
class capstone_session(db.Model):
__table__ = db.Model.metadata.tables['capstone_session']
def get_max(self):
"""
Calculate the next id for a newly added session
if the table is empty, returns 1
Otherwise, return the max id+1
"""
try:
max_id = db.session.query(func.max(capstone_session.id)).scalar()
except exc.SQLAlchemyError:
handle_exception()
max_id = None
if max_id is None:
return 1
else:
return max_id + 1
def insert_session(self, term, year, professor_id):
"""
Add a current session (only if it wasn't in the database)
Input: starting term and year of the session
Output: return id of the added session
"""
term = term.strip().lower()
year = year.strip().lower()
e_term = None
e_year = 0
terms = ["fall", "winter", "spring", "summer"]
for i in range(len(terms)):
if terms[i] == term:
e_term = terms[(i+1) % 4]
e_term = e_term.capitalize()
if term == 'fall':
e_year = int(year)+1
else:
e_year = year
id = self.get_max()
term = term.capitalize()
year = year.capitalize()
prof_id = professor_id.lower()
new_sess = capstone_session(id=id,
start_term=term,
start_year=year,
end_term=e_term,
end_year=e_year,
professor_id=prof_id)
db.session.add(new_sess)
db.session.commit()
return id
def remove_session(self, session_id):
"""
Removes an entire session with all the teams and students
Input: session id
"""
try:
team = teams()
session_teams = team.query.filter_by(session_id=session_id).all()
del_session = capstone_session.query.filter(capstone_session.id == session_id).first()
for t in session_teams:
team_name = t.name
team.remove_team_from_session(team_name, session_id)
db.session.delete(del_session)
db.session.commit()
return True
except exc.SQLAlchemyError:
handle_exception()
return None
def get_sess_by_id(self, id):
"""
Get the capstone session object associated with the given id
inputs: id of capstone session to retrieve
outputs: capstone session object if found, none otherwise
"""
try:
# query for session and return
return capstone_session.query.filter_by(id=id).first()
except exc.SQLAlchemyError:
handle_exception()
return None
def check_term_name(self, s_term):
"""
Checks if the name of the term is valid
Input: start term of new session
Output: return True if valid, False otherwise
"""
s_term = s_term.strip().lower()
terms = ["fall", "winter", "spring", "summer"]
for i in range(len(terms)):
if terms[i] == s_term:
return True
return False
def check_term_year(self, s_year):
"""
Checks if the year of the term is valid
Input: start year of new session
Output: return False if invalid, True otherwise
"""
check_year = s_year.isdigit()
if not check_year:
return False
return True
def check_session_id_valid(self, v_id):
"""
Checks if the returned session ID is greater than
or equal to 0
"""
check_id = v_id.isdigit()
if check_id < 0:
return False
return True
def check_dup_session(self, s_term, s_year, p_id):
"""
Check if the new session name already exists in the database
Input: start term & year of the new session
Output: return False if the team already exists, True otherwise
"""
try:
s_term = s_term.strip().lower().capitalize()
s_year = s_year.strip().lower().capitalize()
p_id = p_id.strip().lower()
result = capstone_session().query.filter_by(
start_term=s_term, start_year=s_year, professor_id=p_id).first()
except exc.SQLAlchemyError:
handle_exception()
result = None
if result is not None:
return False
return True
def get_session_id(self, term, year, prof):
"""
Get id of a selected session
Input: term and year
Output: if the term and year are not found, add them to the database and
return added session id. Otherwise, return the id of the session
"""
prof_id = professors().prof_id(prof)
try:
id = capstone_session.query.filter(capstone_session.start_term == term,
capstone_session.start_year == year,
capstone_session.professor_id == prof_id).first()
except exc.SQLAlchemyError:
handle_exception()
id = None
if id is None:
prof_id = professors().prof_id(prof)
return self.insert_session(term, str(year), prof_id)
else:
return id.id
def get_sessions(self):
"""
Get a list of session to display on the drop downs
Input: only self
Output: list of sessions (includes start term, year and professor name)
"""
caps = capstone_session.query.all()
lists = []
for i in caps:
prof = professors.query.filter(professors.id == i.professor_id).first()
if prof is not None:
temp = str(i.start_term) + " - " + str(i.start_year) + " (" + str(prof.name) + ")"
lists.append(temp)
return lists
def get_active_sessions(self):
"""
Get a list of active capstone sessions
Input: self
Output: the list of currently active capstone sessions
"""
# Calculate the start term and year of the sessions we expect to be active
currentDate = datetime.datetime.now()
month = int(currentDate.month)
if month in range(1, 3):
# Fall term of last year
start_term_1 = "Fall"
start_year_1 = currentDate.year - 1
# Winter term of current year
start_term_2 = "Winter"
start_year_2 = currentDate.year
else:
# Both terms will start in the same year
start_year_1 = currentDate.year
start_year_2 = currentDate.year
# Winter and Spring terms
if month in range(3, 6):
start_term_1 = "Winter"
start_term_2 = "Spring"
# Spring and Summer terms
elif month in range(6, 9):
start_term_1 = "Spring"
start_term_2 = "Summer"
# Summer and Fall terms
else:
start_term_1 = "Summer"
start_term_2 = "Fall"
# Query the db for active sessions using the start term and year information we calculated above
try:
# https://stackoverflow.com/questions/7942547/using-or-in-sqlalchemy
# Algorithm: SELECT * FROM CAPSTONE_SESSION WHERE
# (start_term = start_term_1 AND start_year = start_year_1)
# OR
# (start_term = start_term_2 AND start_year = start_year_2)
return capstone_session.query.filter(((capstone_session.start_year == start_year_1) &
(capstone_session.start_term == start_term_1)) |
((capstone_session.start_year == start_year_2) &
(capstone_session.start_term == start_term_2))).all()
except exc.SQLAlchemyError:
handle_exception()
return None
def check_dates(self, start, end):
"""
Check if start and end dates are valid
Input: start and end dates
Output: Return 0 if valid (both start and end date being empty is valid)
Return 1 if start date is after the end date
Return 2 if either start date or end date is empty (but not both)
"""
params = {'start': start, 'end': end}
if params['start'] and params['end']:
if int(params['start']) > int(params['end']):
return 1
else:
return 0
elif params['start'] is None and params['end'] is None:
return 0
return 2
def date_error(self, params):
"""
This method handles error message for inserting dates
Input: parameter of dates (start/end dates for midterm/final)
Output: error message
"""
error_msg = None
for i in params:
if params[i]:
params[i] = params[i].replace('-', '')
else:
params[i] = None
mid = self.check_dates(params['midterm_start'], params['midterm_end'])
final = self.check_dates(params['final_start'], params['final_end'])
if mid == 2:
error_msg = "Please fill out both start and end dates for the Midterm dates"
return error_msg
if final == 2:
error_msg = "Please fill out both start and end dates for the Final dates"
return error_msg
elif mid == 1 or final == 1:
error_msg = "Please choose an end date that starts after the start date"
return error_msg
return error_msg
def split_dates(self, params):
"""
Split dates into integer year, month and day
to convert the string to datetime object
Input: parameter of dates
Outout: parameter of datetime objects
"""
for i in params:
if params[i]:
params[i] = params[i].split('-')
params[i] = datetime.datetime(int(params[i][0]), int(params[i][1]), int(params[i][2]))
else:
params[i] = None
return params
def insert_dates(self, midterm_start, midterm_end, final_start, final_end, session_id):
"""
Insert a start and end date for midterm and final review
Input: start and end date for midterm review and final reviews
Output: update the dates in the database
"""
review_dates = {'midterm_start': midterm_start,
'midterm_end': midterm_end,
'final_start': final_start,
'final_end': final_end}
dates = self.split_dates(review_dates)
params = {'midterm_start': dates['midterm_start'],
'midterm_end': dates['midterm_end'],
'final_start': dates['final_start'],
'final_end': dates['final_end'],
'session_id': session_id}
for i in params:
if params[i]:
params[i] = params[i]
else:
params[i] = None
session = capstone_session.query.filter(capstone_session.id == session_id).first()
session.midterm_start = params['midterm_start']
session.midterm_end = params['midterm_end']
session.final_start = params['final_start']
session.final_end = params['final_end']
db.session.commit()
return True
def check_review_state(self, session_id, date):
"""
Given a capstone session id to check and a date,
this method determines the currently available review if any
Inputs: a capstone session id and a date which should be a python date time object
Outputs: 'final' if date is after the final start date for the session
'midterm' if the date is between the midterm and final start dates.
'error' otherwise
"""
try:
# get the session
session = capstone_session.query.filter(capstone_session.id == session_id).first()
# check if final exists:
if session.final_start is not None:
# if after final period, return final
if date >= session.final_start:
return 'final'
elif session.midterm_start is not None:
# otherwise if midterm exists, check if after midterm and return if so
if date >= session.midterm_start:
return 'midterm'
else:
return 'Error'
elif session.midterm_start is not None:
# if only midterm exists, check midterm
if date >= session.midterm_start:
return 'midterm'
else:
# no dates set, so error
return 'Error'
except exc.SQLAlchemyError:
handle_exception()
return 'Error'
def check_not_late(Self, session_id, date, type):
"""
This method is for determining is a review is late. It receives the type of review to check
and compares the date sent into the method with the review's end period
Inputs: session_id -- the value of the id for the capstone session to check
date: the date that the review is submitted, type: "midterm" or "final" should be received
Outputs: True -- the review is within the open period (the review is NOT late)
or False -- the review IS late or an error was experienced
"""
try:
# get the session
session = capstone_session.query.filter(capstone_session.id == session_id).first()
# check the type:
if type == 'midterm':
# check if midterm date exists
if session.midterm_end is not None:
# check date to see if its currently or before the midterm start state
if date <= session.midterm_end:
# on time
return True
else:
# late
return False
else:
# error
return False
elif type == 'final':
# check if final date exists
if session.final_end is not None:
# check date
if date <= session.final_end:
# on time
return True
else:
# late
return False
else:
# error
return False
else:
# error
return False
except exc.SQLAlchemyError:
handle_exception()
return False
class reports(db.Model):
__table__ = db.Model.metadata.tables['reports']
def get_reports_for_student(self, student_id, session_id, is_final=None):
"""
Gets all available reports for a student, optionally filtering to only midterms or finals
Input: student id, session_id and is_final (is_final indicates if we are filtering for final reviews
or not. is_final = true indicates we are looking for final reviews. is_final = false indicates
we are looking for midterm reviews. is_final = None indicates we want both.
Output: the available reports for the student
"""
try:
reviews = {}
if is_final is not None:
reviews = reports.query.filter(reports.reviewee == student_id,
reports.session_id == session_id,
reports.is_final == is_final).all()
else:
reviews = reports.query.filter(reports.reviewee == student_id,
reports.session_id == session_id).all()
return reviews
except exc.SQLAlchemyError:
handle_exception()
return None
def get_report(self, reviewer_id, reviewee_id, team_id, is_final):
"""
Get a review from the database using the given information
Input: reviewer_id (a student id), reviewee_id (a student id), team_id, is_final (indicates if the
review is a final review or not)
Output: the review, if it was found, or None if it wasn't or if there was a problem
"""
try:
return reports.query.filter(reports.reviewer == reviewer_id,
reports.tid == team_id,
reports.is_final == is_final,
reports.reviewee == reviewee_id).first()
except exc.SQLAlchemyError:
handle_exception()
return None
def get_team_reports(self, tid, is_final):
"""
This method is for getting the reports of an entire team
Inputs: tid -- team id of reports to retrieve, is_final - if it's the second term
Outputs: result - all report objects for the team
"""
try:
result = reports.query.filter(reports.tid == tid,
reports.is_final == is_final).distinct().all()
return result
except exc.SQLAlchemyError:
handle_exception()
return None
def insert_report(self, sess_id, time, reviewer, tid, reviewee, tech,
ethic, com, coop, init, focus, cont, lead, org, dlg,
points, strn, wkn, traits, learned, proud, is_final, late):
"""
Stages a report to be inserted into the database -- This does NOT commit the add!
Inputs: Arguments for each individual field of the report
Outputs: true if adding was successful, false if not
"""
try:
# Build Report object from method input
new_report = reports(session_id=sess_id,
time=time,
reviewer=reviewer,
tid=tid,
reviewee=reviewee,
tech_mastery=tech,
work_ethic=ethic,
communication=com,
cooperation=coop,
initiative=init,
team_focus=focus,
contribution=cont,
leadership=lead,
organization=org,
delegation=dlg,
points=points,
strengths=strn,
weaknesses=wkn,
traits_to_work_on=traits,
what_you_learned=learned,
proud_of_accomplishment=proud,
is_final=is_final,
is_late=late)
# add the report and return true for success
db.session.add(new_report)
print('Adding Report to Session')
return True
except exc.SQLAlchemyError:
# if error, return false
handle_exception()
return False
def commit_reports(self, id, state, sess_id, success):
"""
Method to commit changes to the DB through the model while updating the user's state
input: None
output: True if successful, false otherwise
"""
# if adding reports was not successful, rollback changes to session
try:
if success is False:
try:
print('Rolling Back Reports')
db.session.rollback()
except exc.SQLAlchemyError:
return False
return False
# update appropriate student 'done' attribute
print('Finding Student')
student = students.query.filter_by(id=id, session_id=sess_id).first()
if state == 'midterm':
student.midterm_done = 1
elif state == 'final':
student.final_done = 1
else:
return False
print('Committing Reports')
db.session.commit()
return True
except exc.SQLAlchemyError:
handle_exception()
print('Rolling Back Reports')
return False
def commit_updates(self, success):
"""
This method is for committing review updates
input: success -- a boolean object indicating whether to proceed
with committing (true) or to roll back (false)
output: False -- commit was not made, True - commit was made successfully
"""
try:
if success is False:
print('Rolling Back Edits')
db.session.rollback()
return False
else:
print('Committing Edits')
db.session.commit()
return True
except exc.SQLAlchemyError:
handle_exception()
print('Rolling Back Edits')
return False
class removed_students(db.Model):
__table__ = db.Model.metadata.tables['removed_students']
def add_student(self, s):
"""
Insert removed students into remocved_students table
Input: student info
Output: return False if the info is empty
Otherwise, add student to the list and return True
"""
if s is None:
return False
current_date = datetime.datetime.now()
removed_student = removed_students(id=s.id,
tid=s.tid,
session_id=s.session_id,
name=s.name,
is_lead=s.is_lead,
midterm_done=s.midterm_done,
final_done=s.final_done,
removed_date=current_date)
db.session.add(removed_student)
db.session.commit()
return True
| [
"extensions.db.session.rollback",
"sqlalchemy.func.min",
"traceback.print_tb",
"sqlalchemy.func.max",
"os.getcwd",
"sys.exc_info",
"extensions.db.session.add",
"datetime.datetime.now",
"extensions.db.session.commit",
"extensions.db.session.delete",
"logging.error"
] | [((145, 156), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (154, 156), False, 'import os\n'), ((240, 254), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (252, 254), False, 'import sys\n'), ((313, 334), 'extensions.db.session.rollback', 'db.session.rollback', ([], {}), '()\n', (332, 334), False, 'from extensions import db\n'), ((459, 479), 'logging.error', 'logging.error', (['error'], {}), '(error)\n', (472, 479), False, 'import logging\n'), ((484, 524), 'traceback.print_tb', 'traceback.print_tb', (['exception_details[2]'], {}), '(exception_details[2])\n', (502, 524), False, 'import traceback\n'), ((4036, 4060), 'extensions.db.session.add', 'db.session.add', (['new_team'], {}), '(new_team)\n', (4050, 4060), False, 'from extensions import db\n'), ((4069, 4088), 'extensions.db.session.commit', 'db.session.commit', ([], {}), '()\n', (4086, 4088), False, 'from extensions import db\n'), ((8818, 8841), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (8839, 8841), False, 'import datetime\n'), ((26615, 26639), 'extensions.db.session.add', 'db.session.add', (['new_sess'], {}), '(new_sess)\n', (26629, 26639), False, 'from extensions import db\n'), ((26648, 26667), 'extensions.db.session.commit', 'db.session.commit', ([], {}), '()\n', (26665, 26667), False, 'from extensions import db\n'), ((31350, 31373), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (31371, 31373), False, 'import datetime\n'), ((36734, 36753), 'extensions.db.session.commit', 'db.session.commit', ([], {}), '()\n', (36751, 36753), False, 'from extensions import db\n'), ((47209, 47232), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (47230, 47232), False, 'import datetime\n'), ((47743, 47774), 'extensions.db.session.add', 'db.session.add', (['removed_student'], {}), '(removed_student)\n', (47757, 47774), False, 'from extensions import db\n'), ((47783, 47802), 'extensions.db.session.commit', 'db.session.commit', ([], {}), '()\n', (47800, 47802), False, 'from extensions import db\n'), ((6293, 6316), 'extensions.db.session.delete', 'db.session.delete', (['team'], {}), '(team)\n', (6310, 6316), False, 'from extensions import db\n'), ((6329, 6348), 'extensions.db.session.commit', 'db.session.commit', ([], {}), '()\n', (6346, 6348), False, 'from extensions import db\n'), ((8205, 8228), 'extensions.db.session.delete', 'db.session.delete', (['team'], {}), '(team)\n', (8222, 8228), False, 'from extensions import db\n'), ((8274, 8293), 'extensions.db.session.commit', 'db.session.commit', ([], {}), '()\n', (8291, 8293), False, 'from extensions import db\n'), ((15127, 15154), 'extensions.db.session.add', 'db.session.add', (['new_student'], {}), '(new_student)\n', (15141, 15154), False, 'from extensions import db\n'), ((15167, 15186), 'extensions.db.session.commit', 'db.session.commit', ([], {}), '()\n', (15184, 15186), False, 'from extensions import db\n'), ((20470, 20489), 'extensions.db.session.commit', 'db.session.commit', ([], {}), '()\n', (20487, 20489), False, 'from extensions import db\n'), ((27214, 27244), 'extensions.db.session.delete', 'db.session.delete', (['del_session'], {}), '(del_session)\n', (27231, 27244), False, 'from extensions import db\n'), ((27257, 27276), 'extensions.db.session.commit', 'db.session.commit', ([], {}), '()\n', (27274, 27276), False, 'from extensions import db\n'), ((44618, 44644), 'extensions.db.session.add', 'db.session.add', (['new_report'], {}), '(new_report)\n', (44632, 44644), False, 'from extensions import db\n'), ((45863, 45882), 'extensions.db.session.commit', 'db.session.commit', ([], {}), '()\n', (45880, 45882), False, 'from extensions import db\n'), ((6021, 6046), 'extensions.db.session.delete', 'db.session.delete', (['review'], {}), '(review)\n', (6038, 6046), False, 'from extensions import db\n'), ((6129, 6149), 'extensions.db.session.delete', 'db.session.delete', (['i'], {}), '(i)\n', (6146, 6149), False, 'from extensions import db\n'), ((6166, 6185), 'extensions.db.session.commit', 'db.session.commit', ([], {}), '()\n', (6183, 6185), False, 'from extensions import db\n'), ((8131, 8156), 'extensions.db.session.delete', 'db.session.delete', (['review'], {}), '(review)\n', (8148, 8156), False, 'from extensions import db\n'), ((19264, 19285), 'extensions.db.session.delete', 'db.session.delete', (['st'], {}), '(st)\n', (19281, 19285), False, 'from extensions import db\n'), ((19302, 19321), 'extensions.db.session.commit', 'db.session.commit', ([], {}), '()\n', (19319, 19321), False, 'from extensions import db\n'), ((22590, 22609), 'extensions.db.session.commit', 'db.session.commit', ([], {}), '()\n', (22607, 22609), False, 'from extensions import db\n'), ((23641, 23660), 'extensions.db.session.commit', 'db.session.commit', ([], {}), '()\n', (23658, 23660), False, 'from extensions import db\n'), ((46474, 46495), 'extensions.db.session.rollback', 'db.session.rollback', ([], {}), '()\n', (46493, 46495), False, 'from extensions import db\n'), ((46601, 46620), 'extensions.db.session.commit', 'db.session.commit', ([], {}), '()\n', (46618, 46620), False, 'from extensions import db\n'), ((24520, 24539), 'extensions.db.session.commit', 'db.session.commit', ([], {}), '()\n', (24537, 24539), False, 'from extensions import db\n'), ((45306, 45327), 'extensions.db.session.rollback', 'db.session.rollback', ([], {}), '()\n', (45325, 45327), False, 'from extensions import db\n'), ((2935, 2953), 'sqlalchemy.func.max', 'func.max', (['teams.id'], {}), '(teams.id)\n', (2943, 2953), False, 'from sqlalchemy import exc, func\n'), ((24669, 24688), 'extensions.db.session.commit', 'db.session.commit', ([], {}), '()\n', (24686, 24688), False, 'from extensions import db\n'), ((25264, 25293), 'sqlalchemy.func.max', 'func.max', (['capstone_session.id'], {}), '(capstone_session.id)\n', (25272, 25293), False, 'from sqlalchemy import exc, func\n'), ((9417, 9441), 'sqlalchemy.func.max', 'func.max', (['reports.points'], {}), '(reports.points)\n', (9425, 9441), False, 'from sqlalchemy import exc, func\n'), ((9483, 9507), 'sqlalchemy.func.min', 'func.min', (['reports.points'], {}), '(reports.points)\n', (9491, 9507), False, 'from sqlalchemy import exc, func\n'), ((9850, 9874), 'sqlalchemy.func.max', 'func.max', (['reports.points'], {}), '(reports.points)\n', (9858, 9874), False, 'from sqlalchemy import exc, func\n'), ((9916, 9940), 'sqlalchemy.func.min', 'func.min', (['reports.points'], {}), '(reports.points)\n', (9924, 9940), False, 'from sqlalchemy import exc, func\n')] |
from setuptools import setup, find_packages
REQUIRES = [
'Flask>=1.1.1',
'Flask-SocketIO>=4.2.1',
'Flask-Login>=0.4.1',
'requests>=2.22.0',
'pytz>=2019.2',
'paho-mqtt>=1.4.0',
'RPi.GPIO>=0.7.0',
]
setup(
name='AlarmPI',
version='4.7',
description='Home Security System',
author='bkbilly',
author_email='<EMAIL>',
packages=find_packages(),
install_requires=REQUIRES,
# long_description=open('README.md').read()
)
| [
"setuptools.find_packages"
] | [((377, 392), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (390, 392), False, 'from setuptools import setup, find_packages\n')] |
from keras.layers.pooling import AveragePooling2D, MaxPooling2D
from keras.applications.mobilenet_v2 import MobileNetV2
from keras.applications.nasnet import NASNetMobile
from keras.applications import ResNet50V2
from keras.layers.core import Dropout
from keras.layers.core import Flatten
from keras.layers.core import Dense
from keras.layers.core import Dense, Dropout, Activation, Flatten
from keras.layers.convolutional import Convolution2D, MaxPooling2D
from keras.layers import LSTM
from keras.models import Sequential
from keras.layers import BatchNormalization
from keras.layers import Input, Conv2D
from keras.models import Model
from collections import deque
import tensorflow as tf
import numpy as np
def create_model_head(baseModel):
headModel = baseModel.output
headModel = AveragePooling2D(pool_size=(3, 3))(headModel)
headModel = Flatten(name="flatten")(headModel)
headModel = Dense(64, activation="relu")(headModel)
headModel = Dropout(0.5)(headModel)
headModel = Dense(1, activation="sigmoid")(headModel)
new_model = Model(inputs=baseModel.input, outputs=headModel)
return new_model
def load_mobilenetv2():
weights_path = '../Models/Trained models/mobileNetv2.h5'
baseNet = MobileNetV2(weights=None, include_top=False, input_tensor=Input(shape=(224, 224, 3)))
model = create_model_head(baseNet)
model.load_weights(weights_path)
return model
def load_nasnetmobile():
weights_path = '../Models/Trained models/nasnetMobile.h5'
baseNet = NASNetMobile(weights=None, include_top=False, input_tensor=Input(shape=(224, 224, 3)))
model = create_model_head(baseNet)
model.load_weights(weights_path)
return model
def load_resnet50():
weights_path = '../Models/Trained models/resnet50v2.h5'
baseNet = ResNet50V2(weights=None, include_top=False, input_tensor=Input(shape=(224, 224, 3)))
model = create_model_head(baseNet)
model.load_weights(weights_path)
return model
def load_FireNet():
model = Sequential()
data_input_shape = (224,224,3)
model.add(Convolution2D(128, (3,3),padding='same',activation='relu', input_shape=data_input_shape))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.1))
model.add(Convolution2D(64, (3,3),padding='same',activation='relu'))
model.add(BatchNormalization())
model.add(AveragePooling2D(pool_size=(2, 2)))
model.add(Dropout(0.2))
model.add(Convolution2D(128, (3,3),padding='same',activation='relu'))
model.add(BatchNormalization())
model.add(AveragePooling2D(pool_size=(2, 2)))
model.add(Dropout(0.2))
model.add(Convolution2D(64, (3,3),padding='same',activation='relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.1))
model.add(Flatten())
model.add(Dense(64, activation='relu', name='high_output'))
model.add(Dropout(0.5))
model.add(Dense(32, activation='relu'))
model.add(Dropout(0.25))
model.add(Dense(1, activation='sigmoid'))
weights_path = '../Models/Trained models/FireNet_large_new.h5'
model.load_weights(weights_path)
return model
def load_FireNetStack():
model = load_FireNet()
intermediate_layer_model = Model(inputs=model.input,
outputs=model.get_layer('high_output').output)
return intermediate_layer_model
def load_FireNetMobile():
model = Sequential()
data_input_shape = (64,64,3)
model.add(Convolution2D(64, (3,3),padding='same',activation='relu', input_shape=data_input_shape))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.1))
model.add(Convolution2D(32, (5,5),padding='same',activation='relu'))
model.add(BatchNormalization())
model.add(AveragePooling2D(pool_size=(2, 2)))
model.add(Dropout(0.1))
model.add(Convolution2D(32, (3,3),padding='same',activation='relu'))
model.add(BatchNormalization())
model.add(AveragePooling2D(pool_size=(2, 2)))
model.add(Dropout(0.1))
model.add(Flatten())
model.add(Dense(64, activation='relu', name='low_output'))
model.add(Dropout(0.5))
model.add(Dense(1, activation='sigmoid'))
weights_path = '../Models/Trained models/FireNetMobile.h5'
model.load_weights(weights_path)
return model
def load_FireNetMobileStack():
model = load_FireNetMobile()
intermediate_layer_model = Model(inputs=model.input,
outputs=model.get_layer('low_output').output)
return intermediate_layer_model
def load_LSTM():
n_timesteps = 10
n_features = 640
model2 = Sequential()
model2.add(LSTM(100, input_shape=(n_timesteps, n_features), return_sequences=True))
model2.add(Dropout(0.5))
model2.add(LSTM(200, return_sequences=False))
model2.add(Dropout(0.5))
model2.add(Dense(100, activation='relu'))
model2.add(Dense(1, activation='sigmoid'))
weights_path = '../Models/Trained models/LSTM.h5'
model2.load_weights(weights_path)
return model2
| [
"keras.layers.core.Flatten",
"keras.layers.convolutional.Convolution2D",
"keras.models.Sequential",
"keras.layers.LSTM",
"keras.layers.core.Dense",
"keras.layers.Input",
"keras.models.Model",
"keras.layers.convolutional.MaxPooling2D",
"keras.layers.BatchNormalization",
"keras.layers.core.Dropout",... | [((1064, 1112), 'keras.models.Model', 'Model', ([], {'inputs': 'baseModel.input', 'outputs': 'headModel'}), '(inputs=baseModel.input, outputs=headModel)\n', (1069, 1112), False, 'from keras.models import Model\n'), ((2006, 2018), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (2016, 2018), False, 'from keras.models import Sequential\n'), ((3461, 3473), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (3471, 3473), False, 'from keras.models import Sequential\n'), ((4696, 4708), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (4706, 4708), False, 'from keras.models import Sequential\n'), ((796, 830), 'keras.layers.pooling.AveragePooling2D', 'AveragePooling2D', ([], {'pool_size': '(3, 3)'}), '(pool_size=(3, 3))\n', (812, 830), False, 'from keras.layers.pooling import AveragePooling2D, MaxPooling2D\n'), ((858, 881), 'keras.layers.core.Flatten', 'Flatten', ([], {'name': '"""flatten"""'}), "(name='flatten')\n", (865, 881), False, 'from keras.layers.core import Dense, Dropout, Activation, Flatten\n'), ((909, 937), 'keras.layers.core.Dense', 'Dense', (['(64)'], {'activation': '"""relu"""'}), "(64, activation='relu')\n", (914, 937), False, 'from keras.layers.core import Dense, Dropout, Activation, Flatten\n'), ((965, 977), 'keras.layers.core.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (972, 977), False, 'from keras.layers.core import Dense, Dropout, Activation, Flatten\n'), ((1005, 1035), 'keras.layers.core.Dense', 'Dense', (['(1)'], {'activation': '"""sigmoid"""'}), "(1, activation='sigmoid')\n", (1010, 1035), False, 'from keras.layers.core import Dense, Dropout, Activation, Flatten\n'), ((2069, 2165), 'keras.layers.convolutional.Convolution2D', 'Convolution2D', (['(128)', '(3, 3)'], {'padding': '"""same"""', 'activation': '"""relu"""', 'input_shape': 'data_input_shape'}), "(128, (3, 3), padding='same', activation='relu', input_shape=\n data_input_shape)\n", (2082, 2165), False, 'from keras.layers.convolutional import Convolution2D, MaxPooling2D\n'), ((2173, 2193), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (2191, 2193), False, 'from keras.layers import BatchNormalization\n'), ((2209, 2239), 'keras.layers.convolutional.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (2221, 2239), False, 'from keras.layers.convolutional import Convolution2D, MaxPooling2D\n'), ((2255, 2267), 'keras.layers.core.Dropout', 'Dropout', (['(0.1)'], {}), '(0.1)\n', (2262, 2267), False, 'from keras.layers.core import Dense, Dropout, Activation, Flatten\n'), ((2284, 2344), 'keras.layers.convolutional.Convolution2D', 'Convolution2D', (['(64)', '(3, 3)'], {'padding': '"""same"""', 'activation': '"""relu"""'}), "(64, (3, 3), padding='same', activation='relu')\n", (2297, 2344), False, 'from keras.layers.convolutional import Convolution2D, MaxPooling2D\n'), ((2357, 2377), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (2375, 2377), False, 'from keras.layers import BatchNormalization\n'), ((2393, 2427), 'keras.layers.pooling.AveragePooling2D', 'AveragePooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (2409, 2427), False, 'from keras.layers.pooling import AveragePooling2D, MaxPooling2D\n'), ((2443, 2455), 'keras.layers.core.Dropout', 'Dropout', (['(0.2)'], {}), '(0.2)\n', (2450, 2455), False, 'from keras.layers.core import Dense, Dropout, Activation, Flatten\n'), ((2472, 2533), 'keras.layers.convolutional.Convolution2D', 'Convolution2D', (['(128)', '(3, 3)'], {'padding': '"""same"""', 'activation': '"""relu"""'}), "(128, (3, 3), padding='same', activation='relu')\n", (2485, 2533), False, 'from keras.layers.convolutional import Convolution2D, MaxPooling2D\n'), ((2546, 2566), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (2564, 2566), False, 'from keras.layers import BatchNormalization\n'), ((2582, 2616), 'keras.layers.pooling.AveragePooling2D', 'AveragePooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (2598, 2616), False, 'from keras.layers.pooling import AveragePooling2D, MaxPooling2D\n'), ((2632, 2644), 'keras.layers.core.Dropout', 'Dropout', (['(0.2)'], {}), '(0.2)\n', (2639, 2644), False, 'from keras.layers.core import Dense, Dropout, Activation, Flatten\n'), ((2661, 2721), 'keras.layers.convolutional.Convolution2D', 'Convolution2D', (['(64)', '(3, 3)'], {'padding': '"""same"""', 'activation': '"""relu"""'}), "(64, (3, 3), padding='same', activation='relu')\n", (2674, 2721), False, 'from keras.layers.convolutional import Convolution2D, MaxPooling2D\n'), ((2734, 2754), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (2752, 2754), False, 'from keras.layers import BatchNormalization\n'), ((2770, 2800), 'keras.layers.convolutional.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (2782, 2800), False, 'from keras.layers.convolutional import Convolution2D, MaxPooling2D\n'), ((2816, 2828), 'keras.layers.core.Dropout', 'Dropout', (['(0.1)'], {}), '(0.1)\n', (2823, 2828), False, 'from keras.layers.core import Dense, Dropout, Activation, Flatten\n'), ((2845, 2854), 'keras.layers.core.Flatten', 'Flatten', ([], {}), '()\n', (2852, 2854), False, 'from keras.layers.core import Dense, Dropout, Activation, Flatten\n'), ((2870, 2918), 'keras.layers.core.Dense', 'Dense', (['(64)'], {'activation': '"""relu"""', 'name': '"""high_output"""'}), "(64, activation='relu', name='high_output')\n", (2875, 2918), False, 'from keras.layers.core import Dense, Dropout, Activation, Flatten\n'), ((2934, 2946), 'keras.layers.core.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (2941, 2946), False, 'from keras.layers.core import Dense, Dropout, Activation, Flatten\n'), ((2962, 2990), 'keras.layers.core.Dense', 'Dense', (['(32)'], {'activation': '"""relu"""'}), "(32, activation='relu')\n", (2967, 2990), False, 'from keras.layers.core import Dense, Dropout, Activation, Flatten\n'), ((3006, 3019), 'keras.layers.core.Dropout', 'Dropout', (['(0.25)'], {}), '(0.25)\n', (3013, 3019), False, 'from keras.layers.core import Dense, Dropout, Activation, Flatten\n'), ((3035, 3065), 'keras.layers.core.Dense', 'Dense', (['(1)'], {'activation': '"""sigmoid"""'}), "(1, activation='sigmoid')\n", (3040, 3065), False, 'from keras.layers.core import Dense, Dropout, Activation, Flatten\n'), ((3522, 3617), 'keras.layers.convolutional.Convolution2D', 'Convolution2D', (['(64)', '(3, 3)'], {'padding': '"""same"""', 'activation': '"""relu"""', 'input_shape': 'data_input_shape'}), "(64, (3, 3), padding='same', activation='relu', input_shape=\n data_input_shape)\n", (3535, 3617), False, 'from keras.layers.convolutional import Convolution2D, MaxPooling2D\n'), ((3625, 3645), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (3643, 3645), False, 'from keras.layers import BatchNormalization\n'), ((3661, 3691), 'keras.layers.convolutional.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (3673, 3691), False, 'from keras.layers.convolutional import Convolution2D, MaxPooling2D\n'), ((3707, 3719), 'keras.layers.core.Dropout', 'Dropout', (['(0.1)'], {}), '(0.1)\n', (3714, 3719), False, 'from keras.layers.core import Dense, Dropout, Activation, Flatten\n'), ((3736, 3796), 'keras.layers.convolutional.Convolution2D', 'Convolution2D', (['(32)', '(5, 5)'], {'padding': '"""same"""', 'activation': '"""relu"""'}), "(32, (5, 5), padding='same', activation='relu')\n", (3749, 3796), False, 'from keras.layers.convolutional import Convolution2D, MaxPooling2D\n'), ((3809, 3829), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (3827, 3829), False, 'from keras.layers import BatchNormalization\n'), ((3845, 3879), 'keras.layers.pooling.AveragePooling2D', 'AveragePooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (3861, 3879), False, 'from keras.layers.pooling import AveragePooling2D, MaxPooling2D\n'), ((3895, 3907), 'keras.layers.core.Dropout', 'Dropout', (['(0.1)'], {}), '(0.1)\n', (3902, 3907), False, 'from keras.layers.core import Dense, Dropout, Activation, Flatten\n'), ((3924, 3984), 'keras.layers.convolutional.Convolution2D', 'Convolution2D', (['(32)', '(3, 3)'], {'padding': '"""same"""', 'activation': '"""relu"""'}), "(32, (3, 3), padding='same', activation='relu')\n", (3937, 3984), False, 'from keras.layers.convolutional import Convolution2D, MaxPooling2D\n'), ((3997, 4017), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (4015, 4017), False, 'from keras.layers import BatchNormalization\n'), ((4033, 4067), 'keras.layers.pooling.AveragePooling2D', 'AveragePooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (4049, 4067), False, 'from keras.layers.pooling import AveragePooling2D, MaxPooling2D\n'), ((4083, 4095), 'keras.layers.core.Dropout', 'Dropout', (['(0.1)'], {}), '(0.1)\n', (4090, 4095), False, 'from keras.layers.core import Dense, Dropout, Activation, Flatten\n'), ((4112, 4121), 'keras.layers.core.Flatten', 'Flatten', ([], {}), '()\n', (4119, 4121), False, 'from keras.layers.core import Dense, Dropout, Activation, Flatten\n'), ((4137, 4184), 'keras.layers.core.Dense', 'Dense', (['(64)'], {'activation': '"""relu"""', 'name': '"""low_output"""'}), "(64, activation='relu', name='low_output')\n", (4142, 4184), False, 'from keras.layers.core import Dense, Dropout, Activation, Flatten\n'), ((4200, 4212), 'keras.layers.core.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (4207, 4212), False, 'from keras.layers.core import Dense, Dropout, Activation, Flatten\n'), ((4228, 4258), 'keras.layers.core.Dense', 'Dense', (['(1)'], {'activation': '"""sigmoid"""'}), "(1, activation='sigmoid')\n", (4233, 4258), False, 'from keras.layers.core import Dense, Dropout, Activation, Flatten\n'), ((4724, 4795), 'keras.layers.LSTM', 'LSTM', (['(100)'], {'input_shape': '(n_timesteps, n_features)', 'return_sequences': '(True)'}), '(100, input_shape=(n_timesteps, n_features), return_sequences=True)\n', (4728, 4795), False, 'from keras.layers import LSTM\n'), ((4812, 4824), 'keras.layers.core.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (4819, 4824), False, 'from keras.layers.core import Dense, Dropout, Activation, Flatten\n'), ((4841, 4874), 'keras.layers.LSTM', 'LSTM', (['(200)'], {'return_sequences': '(False)'}), '(200, return_sequences=False)\n', (4845, 4874), False, 'from keras.layers import LSTM\n'), ((4891, 4903), 'keras.layers.core.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (4898, 4903), False, 'from keras.layers.core import Dense, Dropout, Activation, Flatten\n'), ((4920, 4949), 'keras.layers.core.Dense', 'Dense', (['(100)'], {'activation': '"""relu"""'}), "(100, activation='relu')\n", (4925, 4949), False, 'from keras.layers.core import Dense, Dropout, Activation, Flatten\n'), ((4966, 4996), 'keras.layers.core.Dense', 'Dense', (['(1)'], {'activation': '"""sigmoid"""'}), "(1, activation='sigmoid')\n", (4971, 4996), False, 'from keras.layers.core import Dense, Dropout, Activation, Flatten\n'), ((1293, 1319), 'keras.layers.Input', 'Input', ([], {'shape': '(224, 224, 3)'}), '(shape=(224, 224, 3))\n', (1298, 1319), False, 'from keras.layers import Input, Conv2D\n'), ((1576, 1602), 'keras.layers.Input', 'Input', ([], {'shape': '(224, 224, 3)'}), '(shape=(224, 224, 3))\n', (1581, 1602), False, 'from keras.layers import Input, Conv2D\n'), ((1851, 1877), 'keras.layers.Input', 'Input', ([], {'shape': '(224, 224, 3)'}), '(shape=(224, 224, 3))\n', (1856, 1877), False, 'from keras.layers import Input, Conv2D\n')] |
import pyglet
import os
from classes.car import Car
from classes.improvedCircuit import circuit
from classes.Vector import Vector2D
### MAIN LOOP
# config = pyglet.gl.Config(sample_buffers=1, samples=4)
window = pyglet.window.Window(resizable=False, width=1920, height=1080, vsync=True)
#inner_points = [[18,3],[8,3],[5,4],[3,6],[2,9],[2,12],[3,14],[4,14],[6,12],[7,8],[8,7],[12,6],[16,6],[19,9],[20,11],[16,13],[13,12],[12,14],[13,15],[17,16],[20,15],[22,13],[23,8],[21,5]] #Bonk Circuit
#outer_points = [[18,0],[8,0],[2,3],[0,9],[0,14],[2,16],[5,16],[8,12],[9,9],[12,8],[15,8],[17,10],[16,11],[12,10],[11,11],[10,13],[10,15],[12,17],[17,17],[20,16],[23,14],[25,8],[23,4]] #Bonk Circuit
#inner_points = [[20,3],[7,3],[6,4.5],[7,6],[15,6],[17.5,9.5],[17.5,12],[15,15],[7,15],[6,16.5],[7,18],[20,18],[21,16.5],[21,4.5]] #Sigma Falls
#outer_points = [[21,0],[6,0],[2.5,3],[2.5,6.5],[6,9],[13,9],[14,10.5],[13,12],[6,12],[2.5,15],[2.5,18.5],[6,21],[21,21],[23.5,19],[23.5,2.5]] #Sigma Falls
#inner = [Vector2D(i[0],i[1]) for i in inner_points]
#outer = [Vector2D(i[0],i[1]) for i in outer_points]
#checkpoints = [[[10,-1],[10,4]],[[4,1],[6,4]],[[0,6],[3,7]],[[-1,13],[3,12]],[[4,13],[7,15]],[[6,9],[10,11]],[[11,5],[12,9]],[[15,10],[18,7]],[[15,10],[14,13]],[[9,14],[13,13]],[[15,17],[16,15]],[[21,12],[24,15]],[[22,8],[25,6]],[[19,5],[20,1]],[[15,-1],[15,4]]]
#circuit_checkpoints = []
#for i, checkpoint in enumerate(checkpoints):
# circuit_checkpoints.append([])
# for point in checkpoint:
# circuit_checkpoints[i].append(Vector2D(point[0],point[1]))
dir_path = os.path.dirname(os.path.realpath(__file__))
path = dir_path + '/' + 'circuits/SIGMA_FALLS_GA.json'
batch = pyglet.graphics.Batch()
topper = pyglet.graphics.OrderedGroup(3)
foreground = pyglet.graphics.OrderedGroup(2)
background = pyglet.graphics.OrderedGroup(1)
circuitLayer = pyglet.graphics.OrderedGroup(0)
running = True
circ = circuit.fromJSON(path, window=[1920,1080], method="fromFullPoints")
car = Car(circ.startingPoint.x,circ.startingPoint.y)
car.position = circ.startingPoint
backGround = pyglet.sprite.Sprite(circ.background, x=0,y=0, batch=batch, group=circuitLayer)
foreGround = pyglet.sprite.Sprite(circ.backgroundTopper, x=0,y=0, batch=batch, group=topper)
key = pyglet.window.key
key_handler = key.KeyStateHandler()
speed = 1.0
@window.event
def on_close():
running = False
@window.event
def on_draw():
render()
def update(dt):
window.push_handlers(key_handler)
if(running):
car.update(dt, key, key_handler)
circ.carCollidedWithCheckpoint(car)
hitbox = car.generateHitbox()
car.mathIntersect(circ.vertices)
if circ.collidedWithCar(hitbox) == True:
car.dead = True
circ.reset()
car.reset()
else:
pyglet.app.exit()
def render():
window.clear()
e = car.draw(batch, foreground)
#a = car.eyes(batch, background)
#b = circ.draw(batch, window.get_size(), background)
#c = car.hitbox(batch, background)
d = car.intersectEyes(batch, circ.vertices, background)
#f = circ.generateVisualCheckpoints(batch, foreground)
moreLines = []
for line in circ.vertices:
pointA, pointB = line.getEndPoints()
#moreLines.append(pyglet.shapes.Line(pointA.x, pointA.y, pointB.x, pointB.y, width=5, color=(255,0,0), batch=batch, group=foreground))
#moreLines.append(pyglet.shapes.Circle(pointA.x, pointA.y, 10, color=(0,255,0), batch=batch, group=foreground))
#moreLines.append(pyglet.shapes.Circle(pointB.x, pointB.y, 10, color=(255,0,0),batch=batch, group=foreground))
batch.draw()
if __name__ == "__main__":
pyglet.clock.schedule_interval(update, 1/60.0)
pyglet.app.run() | [
"pyglet.app.exit",
"pyglet.clock.schedule_interval",
"pyglet.app.run",
"pyglet.graphics.OrderedGroup",
"pyglet.graphics.Batch",
"os.path.realpath",
"pyglet.sprite.Sprite",
"classes.improvedCircuit.circuit.fromJSON",
"classes.car.Car",
"pyglet.window.Window"
] | [((213, 287), 'pyglet.window.Window', 'pyglet.window.Window', ([], {'resizable': '(False)', 'width': '(1920)', 'height': '(1080)', 'vsync': '(True)'}), '(resizable=False, width=1920, height=1080, vsync=True)\n', (233, 287), False, 'import pyglet\n'), ((1688, 1711), 'pyglet.graphics.Batch', 'pyglet.graphics.Batch', ([], {}), '()\n', (1709, 1711), False, 'import pyglet\n'), ((1721, 1752), 'pyglet.graphics.OrderedGroup', 'pyglet.graphics.OrderedGroup', (['(3)'], {}), '(3)\n', (1749, 1752), False, 'import pyglet\n'), ((1766, 1797), 'pyglet.graphics.OrderedGroup', 'pyglet.graphics.OrderedGroup', (['(2)'], {}), '(2)\n', (1794, 1797), False, 'import pyglet\n'), ((1811, 1842), 'pyglet.graphics.OrderedGroup', 'pyglet.graphics.OrderedGroup', (['(1)'], {}), '(1)\n', (1839, 1842), False, 'import pyglet\n'), ((1858, 1889), 'pyglet.graphics.OrderedGroup', 'pyglet.graphics.OrderedGroup', (['(0)'], {}), '(0)\n', (1886, 1889), False, 'import pyglet\n'), ((1914, 1982), 'classes.improvedCircuit.circuit.fromJSON', 'circuit.fromJSON', (['path'], {'window': '[1920, 1080]', 'method': '"""fromFullPoints"""'}), "(path, window=[1920, 1080], method='fromFullPoints')\n", (1930, 1982), False, 'from classes.improvedCircuit import circuit\n'), ((1989, 2036), 'classes.car.Car', 'Car', (['circ.startingPoint.x', 'circ.startingPoint.y'], {}), '(circ.startingPoint.x, circ.startingPoint.y)\n', (1992, 2036), False, 'from classes.car import Car\n'), ((2084, 2169), 'pyglet.sprite.Sprite', 'pyglet.sprite.Sprite', (['circ.background'], {'x': '(0)', 'y': '(0)', 'batch': 'batch', 'group': 'circuitLayer'}), '(circ.background, x=0, y=0, batch=batch, group=circuitLayer\n )\n', (2104, 2169), False, 'import pyglet\n'), ((2177, 2262), 'pyglet.sprite.Sprite', 'pyglet.sprite.Sprite', (['circ.backgroundTopper'], {'x': '(0)', 'y': '(0)', 'batch': 'batch', 'group': 'topper'}), '(circ.backgroundTopper, x=0, y=0, batch=batch, group=topper\n )\n', (2197, 2262), False, 'import pyglet\n'), ((1596, 1622), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (1612, 1622), False, 'import os\n'), ((3672, 3720), 'pyglet.clock.schedule_interval', 'pyglet.clock.schedule_interval', (['update', '(1 / 60.0)'], {}), '(update, 1 / 60.0)\n', (3702, 3720), False, 'import pyglet\n'), ((3723, 3739), 'pyglet.app.run', 'pyglet.app.run', ([], {}), '()\n', (3737, 3739), False, 'import pyglet\n'), ((2804, 2821), 'pyglet.app.exit', 'pyglet.app.exit', ([], {}), '()\n', (2819, 2821), False, 'import pyglet\n')] |
from __future__ import absolute_import
import os
import errno
import numpy as np
def mkdir_if_missing(dir_path):
try:
os.makedirs(dir_path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
def get_free_gpu():
os.system('nvidia-smi -q -d Memory |grep -A4 GPU|grep Free >tmp')
memory_available = [int(x.split()[2]) for x in open('tmp', 'r').readlines()]
print('Assigning workflow to GPU: ' + str(np.argmax(memory_available)))
return np.argmax(memory_available)
| [
"os.system",
"numpy.argmax",
"os.makedirs"
] | [((260, 325), 'os.system', 'os.system', (['"""nvidia-smi -q -d Memory |grep -A4 GPU|grep Free >tmp"""'], {}), "('nvidia-smi -q -d Memory |grep -A4 GPU|grep Free >tmp')\n", (269, 325), False, 'import os\n'), ((494, 521), 'numpy.argmax', 'np.argmax', (['memory_available'], {}), '(memory_available)\n', (503, 521), True, 'import numpy as np\n'), ((133, 154), 'os.makedirs', 'os.makedirs', (['dir_path'], {}), '(dir_path)\n', (144, 154), False, 'import os\n'), ((453, 480), 'numpy.argmax', 'np.argmax', (['memory_available'], {}), '(memory_available)\n', (462, 480), True, 'import numpy as np\n')] |
import os
import sys
import json
import argparse
script_content = """\
#!/bin/sh
gpython=${PYENV_ROOT}/versions/$(pyenv global)/bin/python
gproj=${PYENV_ROOT}/versions/$(pyenv global)/bin/proj
if [[ $1 =~ ^[^\-] ]] ; then
result=$(exec $gpython $gproj --echo $1)
exit_code=$?
if test $exit_code -eq 0 ; then
if test $# -eq 1 ; then
unset PROJ_ARGS
else
PROJ_ARGS=${@:2}
fi
# deactivate if the end script is setted
deactivate_script=$(exec $gpython $gproj --deactivate)
deactivate_exit_code=$?
if test $deactivate_exit_code -eq 0 ; then
source $deactivate_script
fi
# change directory
cd $result
echo "Project:" `pwd`
# activate if the start script is setted
activate_script=$(exec $gpython $gproj --activate)
activate_exit_code=$?
if test $activate_exit_code -eq 0 ; then
source $activate_script
fi
elif test $exit_code -eq 1 ; then
echo $result
fi
elif [ $# -eq 1 ] && [ "$1" = "--activate" ] || [ "$1" = "--deactivate" ] ; then
specified_script=$(exec $gpython $gproj $1)
exit_code=$?
if test $exit_code -eq 0 ; then
source $specified_script
fi
else
(exec $gpython $gproj "$@")
fi
"""
projrc_content ="""\
alias proj='source ~/.config/proj/proj'
"""
conf_dir = os.path.expanduser('~/.config/proj')
proj_script = os.path.join(conf_dir, 'proj')
projrc = os.path.join(conf_dir, 'projrc')
project_settings = os.path.join(conf_dir, 'projects.json')
local_conf_dir = os.path.expanduser('./.proj')
def check_config():
if not os.path.exists(conf_dir):
print('proj config directory does not exists.')
print(f'Creating at \'{conf_dir}\'')
os.makedirs(conf_dir)
if not os.path.exists(proj_script):
with open(proj_script, 'w') as f:
f.write(script_content)
if not os.path.exists(projrc):
with open(projrc, 'w') as f:
f.write(projrc_content)
def load_config():
if not os.path.exists(project_settings):
return {}
with open(project_settings, 'r') as f:
projects = json.load(f)
return projects
def main():
check_config()
projects = load_config()
parser = argparse.ArgumentParser()
# echo project path
parser.add_argument('--echo', nargs='?', default=None, const='', metavar='project_name')
# register current directory as [project_name]
parser.add_argument('--init', nargs='?', default=None, const='', metavar='project_name')
# remove registered project from list
parser.add_argument('--remove', nargs='?', default=None, const='', metavar='project_name')
# register startup script for the current project
## startup script is executed when you enter the project by proj command
parser.add_argument('--startwith', nargs='?', default=None, const='', metavar='file_name')
parser.add_argument('--echo-startwith', nargs='?', default=None, const='', metavar='file_name')
# register leaving script for the current project
## leaving script is executed when you leave the project by proj command
parser.add_argument('--endwith', nargs='?', default=None, const='', metavar='file_name')
parser.add_argument('--echo-endwith', nargs='?', default=None, const='', metavar='file_name')
# set alias
## if local alias;
## this alias is automatically activated when you enter the project by proj command,
## and automatically unaliased when you leave the project by proj command.
## the configuration is saved in '.proj/aliases'
## if global alias;
## this alias is always activated automatically.
## the configuration is saved in '~/.config/proj/aliases'
parser.add_argument('--alias')
# remove alias
parser.add_argument('--unalias')
# activate local project settings
## 1. activate local aliases
## 2. run the script file which registered as --startwith
parser.add_argument('--activate', action='store_true')
# deactivate local project settings
## 1. run the script file which registered as --endwith
## 2. deactivate local aliases
parser.add_argument('--deactivate', action='store_true')
# backup local setting to the directory which registered as --set-origin
parser.add_argument('--backup')
# restore local setting from the directory which registered as --set-origin
parser.add_argument('--restore')
# set backup directory
parser.add_argument('--set-origin')
# set remote backup
parser.add_argument('--remote-backup')
# show config and status of the project
parser.add_argument('--show')
#parser.add_argument('--global') #globalで設定
args = parser.parse_args()
if args.echo is not None:
if args.echo in projects:
print(projects[args.echo])
sys.exit(0)
else:
print(f'Error: project \'{args.echo}\' is not registered.')
sys.exit(1)
local_conf = {
'start': '',
'end': '',
}
if args.activate:
if os.path.exists(os.path.join(local_conf_dir, 'config.json')):
with open(os.path.join(local_conf_dir, 'config.json'), 'r') as f:
local_conf = json.load(f)
if 'start' in local_conf and local_conf['start'] != '':
abspath = os.path.abspath(local_conf_dir)
script_file = os.path.join(abspath, 'scripts', local_conf['start'])
if os.path.exists(script_file):
print(script_file)
sys.exit(0)
sys.exit(1)
if args.deactivate:
if os.path.exists(os.path.join(local_conf_dir, 'config.json')):
with open(os.path.join(local_conf_dir, 'config.json'), 'r') as f:
local_conf = json.load(f)
if 'end' in local_conf and local_conf['end'] != '':
abspath = os.path.abspath(local_conf_dir)
script_file = os.path.join(abspath, 'scripts', local_conf['end'])
if os.path.exists(script_file):
print(script_file)
sys.exit(0)
sys.exit(1)
if args.init is not None:
if os.path.exists(os.path.join(local_conf_dir, 'config.json')):
print('already registered')
sys.exit(0)
if args.init == '':
print(f'Error: project name required.')
sys.exit(1)
elif args.init in projects:
print(f'Error: project \'{args.init}\' is already registered.')
print(f'project directory -> {projects[args.init]}')
sys.exit(1)
else:
print('OK:', os.getcwd())
projects[args.init] = os.getcwd()
with open(project_settings, 'w') as f:
json.dump(projects, f, indent=2)
with open(os.path.join(local_conf_dir, 'config.json'), 'w') as f:
json.dump(local_conf, f, indent=2)
sys.exit(0)
if args.remove is not None:
if args.remove in projects:
path = projects[args.remove]
projects.pop(args.remove)
with open(project_settings, 'w') as f:
json.dump(projects, f, indent=2)
print('removed:', args.remove, path)
sys.exit(0)
else:
print(f'Error: project \'{args.echo}\' is not registered.')
sys.exit(1)
#if args.set_startup is not None:
# if args.set_startup
for k, v in projects.items():
print(k, ':', v)
sys.exit(0)
if __name__ == '__main__':
main()
| [
"os.path.exists",
"argparse.ArgumentParser",
"os.makedirs",
"json.dump",
"os.path.join",
"os.getcwd",
"sys.exit",
"json.load",
"os.path.abspath",
"os.path.expanduser"
] | [((1403, 1439), 'os.path.expanduser', 'os.path.expanduser', (['"""~/.config/proj"""'], {}), "('~/.config/proj')\n", (1421, 1439), False, 'import os\n'), ((1454, 1484), 'os.path.join', 'os.path.join', (['conf_dir', '"""proj"""'], {}), "(conf_dir, 'proj')\n", (1466, 1484), False, 'import os\n'), ((1494, 1526), 'os.path.join', 'os.path.join', (['conf_dir', '"""projrc"""'], {}), "(conf_dir, 'projrc')\n", (1506, 1526), False, 'import os\n'), ((1546, 1585), 'os.path.join', 'os.path.join', (['conf_dir', '"""projects.json"""'], {}), "(conf_dir, 'projects.json')\n", (1558, 1585), False, 'import os\n'), ((1604, 1633), 'os.path.expanduser', 'os.path.expanduser', (['"""./.proj"""'], {}), "('./.proj')\n", (1622, 1633), False, 'import os\n'), ((2304, 2329), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (2327, 2329), False, 'import argparse\n'), ((7566, 7577), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (7574, 7577), False, 'import sys\n'), ((1666, 1690), 'os.path.exists', 'os.path.exists', (['conf_dir'], {}), '(conf_dir)\n', (1680, 1690), False, 'import os\n'), ((1801, 1822), 'os.makedirs', 'os.makedirs', (['conf_dir'], {}), '(conf_dir)\n', (1812, 1822), False, 'import os\n'), ((1835, 1862), 'os.path.exists', 'os.path.exists', (['proj_script'], {}), '(proj_script)\n', (1849, 1862), False, 'import os\n'), ((1954, 1976), 'os.path.exists', 'os.path.exists', (['projrc'], {}), '(projrc)\n', (1968, 1976), False, 'import os\n'), ((2082, 2114), 'os.path.exists', 'os.path.exists', (['project_settings'], {}), '(project_settings)\n', (2096, 2114), False, 'import os\n'), ((2196, 2208), 'json.load', 'json.load', (['f'], {}), '(f)\n', (2205, 2208), False, 'import json\n'), ((5630, 5641), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (5638, 5641), False, 'import sys\n'), ((6168, 6179), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (6176, 6179), False, 'import sys\n'), ((4912, 4923), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (4920, 4923), False, 'import sys\n'), ((5022, 5033), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (5030, 5033), False, 'import sys\n'), ((5149, 5192), 'os.path.join', 'os.path.join', (['local_conf_dir', '"""config.json"""'], {}), "(local_conf_dir, 'config.json')\n", (5161, 5192), False, 'import os\n'), ((5402, 5433), 'os.path.abspath', 'os.path.abspath', (['local_conf_dir'], {}), '(local_conf_dir)\n', (5417, 5433), False, 'import os\n'), ((5460, 5513), 'os.path.join', 'os.path.join', (['abspath', '"""scripts"""', "local_conf['start']"], {}), "(abspath, 'scripts', local_conf['start'])\n", (5472, 5513), False, 'import os\n'), ((5529, 5556), 'os.path.exists', 'os.path.exists', (['script_file'], {}), '(script_file)\n', (5543, 5556), False, 'import os\n'), ((5693, 5736), 'os.path.join', 'os.path.join', (['local_conf_dir', '"""config.json"""'], {}), "(local_conf_dir, 'config.json')\n", (5705, 5736), False, 'import os\n'), ((5942, 5973), 'os.path.abspath', 'os.path.abspath', (['local_conf_dir'], {}), '(local_conf_dir)\n', (5957, 5973), False, 'import os\n'), ((6000, 6051), 'os.path.join', 'os.path.join', (['abspath', '"""scripts"""', "local_conf['end']"], {}), "(abspath, 'scripts', local_conf['end'])\n", (6012, 6051), False, 'import os\n'), ((6067, 6094), 'os.path.exists', 'os.path.exists', (['script_file'], {}), '(script_file)\n', (6081, 6094), False, 'import os\n'), ((6237, 6280), 'os.path.join', 'os.path.join', (['local_conf_dir', '"""config.json"""'], {}), "(local_conf_dir, 'config.json')\n", (6249, 6280), False, 'import os\n'), ((6335, 6346), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (6343, 6346), False, 'import sys\n'), ((6439, 6450), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (6447, 6450), False, 'import sys\n'), ((7312, 7323), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (7320, 7323), False, 'import sys\n'), ((7422, 7433), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (7430, 7433), False, 'import sys\n'), ((5302, 5314), 'json.load', 'json.load', (['f'], {}), '(f)\n', (5311, 5314), False, 'import json\n'), ((5609, 5620), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (5617, 5620), False, 'import sys\n'), ((5846, 5858), 'json.load', 'json.load', (['f'], {}), '(f)\n', (5855, 5858), False, 'import json\n'), ((6147, 6158), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (6155, 6158), False, 'import sys\n'), ((6640, 6651), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (6648, 6651), False, 'import sys\n'), ((6738, 6749), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (6747, 6749), False, 'import os\n'), ((6991, 7002), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (6999, 7002), False, 'import sys\n'), ((7218, 7250), 'json.dump', 'json.dump', (['projects', 'f'], {'indent': '(2)'}), '(projects, f, indent=2)\n', (7227, 7250), False, 'import json\n'), ((5217, 5260), 'os.path.join', 'os.path.join', (['local_conf_dir', '"""config.json"""'], {}), "(local_conf_dir, 'config.json')\n", (5229, 5260), False, 'import os\n'), ((5761, 5804), 'os.path.join', 'os.path.join', (['local_conf_dir', '"""config.json"""'], {}), "(local_conf_dir, 'config.json')\n", (5773, 5804), False, 'import os\n'), ((6691, 6702), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (6700, 6702), False, 'import os\n'), ((6817, 6849), 'json.dump', 'json.dump', (['projects', 'f'], {'indent': '(2)'}), '(projects, f, indent=2)\n', (6826, 6849), False, 'import json\n'), ((6944, 6978), 'json.dump', 'json.dump', (['local_conf', 'f'], {'indent': '(2)'}), '(local_conf, f, indent=2)\n', (6953, 6978), False, 'import json\n'), ((6872, 6915), 'os.path.join', 'os.path.join', (['local_conf_dir', '"""config.json"""'], {}), "(local_conf_dir, 'config.json')\n", (6884, 6915), False, 'import os\n')] |
# coding: utf-8
"""
Relay
~~~~~
Relay is an irc micro-framework that smells too much like a web framework
Copyright (c) 2015, ldesgoui <relay at ldesgoui dot xyz>
See LICENSE for more informations.
"""
from collections import defaultdict
import logging
import os
import socket
from . import constants
from . import parse
class Relay(object):
DEFAULT_ROUTE = ":{sender} {command} {args}"
DEFAULT_CONFIG = dict(user="", port=6667)
def __init__(self, name):
self.handlers = defaultdict(set)
self.client = dict(Relay.DEFAULT_CONFIG)
self.logger = logging.getLogger(name)
self.state = defaultdict(dict)
def __repr__(self):
classname = self.__class__.__name__
try:
client = "{nick}!{user}@{host}:{port}".format(**self.client)
except KeyError:
client = "not fully configured"
routes = len(self.handlers)
handlers = sum(map(len, self.handlers.values()))
return "<{} {}, {} routes, {} handlers>".format(
classname, client, routes, handlers)
__str__ = __repr__
def handler(self, arg):
""" @register decorator """
def decorator(func, route=arg):
func.relay_route = route
self.register(func)
return func
if callable(arg):
""" decorator was not given arguments, it takes DEFAULT_ROUTE """
return decorator(func=arg, route=Relay.DEFAULT_ROUTE)
return decorator
def register(self, func, route=None):
"""
Used to register a function as a handler
This function's arguments should match the routes's results
or at least catch *args and **kwargs.
This cannot be used with bound methods, as of yet.
"""
if route is not None and hasattr(func, "relay_route"):
self.logger.warn("Overriding route for `{}`: from `{}` to `{}`"
.format(func, func.relay_route, route))
if route is None:
if not hasattr(func, "relay_route"):
raise AttributeError("Cannot register a handler with no route")
else:
route = func.relay_route
self.logger.debug("Registering handle: `{route}` -> `{func}`"
.format(route=route, func=func.__qualname__))
self.handlers[route].add(func)
def _from_env(self, values):
if values is True:
values = ["host", "port", "user", "nick", "password"]
if not isinstance(values, dict):
values = {key: "RELAY_{}".format(key.upper()) for key in values}
config = dict()
for key, env_key in values.items():
val = os.getenv(env_key, None)
if not val:
continue
config[key] = val
self.config(**config)
def config(self, **options):
for key, val in options.items():
if key == 'from_env':
self._from_env(val)
continue
if key not in ["host", "port", "user", "nick", "password"]:
continue
self.client[key] = val
return self
def run(self, **options):
"""
The client in itself
TODO: make this better, faster, stronger :)
"""
if 'host' not in self.client or 'nick' not in self.client:
raise ValueError("Cannot run, missing configuration.")
self.logger.info("Connecting")
sock = socket.socket()
sock.connect((self.client['host'], self.client['port']))
self.logger.info("Connected")
def send(message):
sock.send(("{message}\r\n".format(message=message)).encode())
self.logger.debug("Send: {message}".format(message=message))
self.send = send
send("NICK {nick}".format(**self.client))
user = self.client.get('user', None) or self.client['nick']
send("USER {0} {0} {0} :{0}".format(user))
if 'password' in self.client:
send("PASS {password}".format(**self.client))
data = sock.makefile()
while 42:
for line in data:
line = line.strip()
if not line:
continue
self.logger.debug("Recv: {message}".format(message=line))
for route, handlers in self.handlers.items():
try:
args, kwargs = parse.match(route, line)
except ValueError:
continue
for handler in handlers:
outs = handler(*args, state=self.state[handler], **kwargs)
for out in outs or []:
send(out.format(*args, **kwargs))
def _register(route):
def decorator(func):
func.relay_route = route
return func
return decorator
@_register("PING :{ball}")
def auto_pong(*args, **kwargs):
""" answer to PING requests """
yield "PONG :{ball}"
def auto_join(channels):
@_register(Relay.DEFAULT_ROUTE)
def auto_join_closure(*args, **kwargs):
""" always re-join channels {} """.format(channels)
command = kwargs['command']
if command == '376':
yield "JOIN {}".format(", ".join(channels))
args = kwargs['arguments'].split(' ')
if command == 'KICK' and self.config['nick'] in args[1]:
yield "JOIN {}".format(args[0])
return auto_join_closure
| [
"logging.getLogger",
"collections.defaultdict",
"socket.socket",
"os.getenv"
] | [((499, 515), 'collections.defaultdict', 'defaultdict', (['set'], {}), '(set)\n', (510, 515), False, 'from collections import defaultdict\n'), ((587, 610), 'logging.getLogger', 'logging.getLogger', (['name'], {}), '(name)\n', (604, 610), False, 'import logging\n'), ((632, 649), 'collections.defaultdict', 'defaultdict', (['dict'], {}), '(dict)\n', (643, 649), False, 'from collections import defaultdict\n'), ((3504, 3519), 'socket.socket', 'socket.socket', ([], {}), '()\n', (3517, 3519), False, 'import socket\n'), ((2722, 2746), 'os.getenv', 'os.getenv', (['env_key', 'None'], {}), '(env_key, None)\n', (2731, 2746), False, 'import os\n')] |
from urwid import Edit, IntEdit, LineBox
from src.models.grade import AssignmentType, grade
from src.models.state import state
from src.views.widgets.form import Form
from src.views.widgets.radio import RadioGroup
class GradePanel(Form):
def __init__(self):
grading_directory = LineBox(Edit(("header", "Grading directory\n\n"), state.grading_directory))
subdirectories = LineBox(Edit(("header", "Subdirectories\n\n"), state.subdirectories))
assignment_type = RadioGroup("Assignment type", AssignmentType, state.assignment_type)
deadline = LineBox(Edit(("header", "Deadline\n\n"), state.deadline))
assignment_sname = LineBox(Edit(("header", "Assignment short name\n\n"), state.assignment_sname))
assignment_lname = LineBox(Edit(("header", "Assignment long name\n\n"), state.assignment_lname))
grid_elements = [
{"grading_directory": grading_directory, "subdirectories": subdirectories},
{"assignment_type": assignment_type, "deadline": deadline},
{"assignment_sname": assignment_sname, "assignment_lname": assignment_lname},
]
super().__init__("Grade", grid_elements, grade)
| [
"urwid.Edit",
"src.views.widgets.radio.RadioGroup"
] | [((491, 559), 'src.views.widgets.radio.RadioGroup', 'RadioGroup', (['"""Assignment type"""', 'AssignmentType', 'state.assignment_type'], {}), "('Assignment type', AssignmentType, state.assignment_type)\n", (501, 559), False, 'from src.views.widgets.radio import RadioGroup\n'), ((302, 368), 'urwid.Edit', 'Edit', (["('header', 'Grading directory\\n\\n')", 'state.grading_directory'], {}), "(('header', 'Grading directory\\n\\n'), state.grading_directory)\n", (306, 368), False, 'from urwid import Edit, IntEdit, LineBox\n'), ((403, 463), 'urwid.Edit', 'Edit', (["('header', 'Subdirectories\\n\\n')", 'state.subdirectories'], {}), "(('header', 'Subdirectories\\n\\n'), state.subdirectories)\n", (407, 463), False, 'from urwid import Edit, IntEdit, LineBox\n'), ((587, 635), 'urwid.Edit', 'Edit', (["('header', 'Deadline\\n\\n')", 'state.deadline'], {}), "(('header', 'Deadline\\n\\n'), state.deadline)\n", (591, 635), False, 'from urwid import Edit, IntEdit, LineBox\n'), ((672, 741), 'urwid.Edit', 'Edit', (["('header', 'Assignment short name\\n\\n')", 'state.assignment_sname'], {}), "(('header', 'Assignment short name\\n\\n'), state.assignment_sname)\n", (676, 741), False, 'from urwid import Edit, IntEdit, LineBox\n'), ((778, 846), 'urwid.Edit', 'Edit', (["('header', 'Assignment long name\\n\\n')", 'state.assignment_lname'], {}), "(('header', 'Assignment long name\\n\\n'), state.assignment_lname)\n", (782, 846), False, 'from urwid import Edit, IntEdit, LineBox\n')] |
# Trinket IO demo
# Welcome to CircuitPython 3.1.1 :)
import board
import adafruit_dotstar as dotstar
import time
import busio
import struct
from adafruit_hid.keyboard import Keyboard
from adafruit_hid.mouse import Mouse
from qwertyMAC import *
overlay = webaccess
# One pixel connected internally!
dot = dotstar.DotStar(board.APA102_SCK, board.APA102_MOSI, 1, brightness=0.7)
uart = busio.UART(board.TX, board.RX, baudrate=115200)
#while True:
# for led in range(0,9):
# onMsg = struct.pack('bbb',2,led,3)
# uart.write(onMsg)
# time.sleep(.3)
# offMsg = struct.pack('bbb',2,led,0)
# uart.write(offMsg)
# time.sleep(.2)
kbd = Keyboard()
mouse = Mouse()
lastKey = 0
######################### MAIN LOOP ##############################
CLIENT = True
dot[0] = (0,0,60)
if CLIENT:
dot[0] = (255,0,255)
WRITE_DELAY=.005
POLL = struct.pack('b',1)
cellData = bytearray(2)
i = 0
shiftState= False
altState = False
ctrlState = False
commandState = False
dragState = False
#def updateToggles(key, pressed):
# print(("TOGGLE:",key, pressed))
# if (key == Keycode.LEFT_SHIFT):
# shiftState = pressed
# if (pressed):
# msg = struct.pack('bbb',1,1)
# uart.write(msg)
# else:
# msg = struct.pack('bbb',1,0)
# uart.write(msg)
def pressKey(newKey):
global kbd,shiftState,ctrlState,altState,commandState,uart
if (newKey == Keycode.LEFT_SHIFT):
shiftState = not shiftState
val = 0
if (shiftState):
val = 1
msg = struct.pack('bbb',2,1,val)
uart.write(msg)
kbd.press(newKey)
return
if (newKey == Keycode.CONTROL):
ctrlState= not ctrlState
val = 0
if (ctrlState):
val = 1
msg = struct.pack('bbb',2,5,val)
uart.write(msg)
kbd.press(newKey)
return
if (newKey == Keycode.LEFT_ALT):
altState = not altState
val = 0
if (altState):
val = 1
msg = struct.pack('bbb',2,2,val)
uart.write(msg)
kbd.press(newKey)
return
if (newKey == Keycode.COMMAND):
commandState = not commandState
val = 0
if (commandState):
val = 1
msg = struct.pack('bbb',2,6,val)
uart.write(msg)
kbd.press(newKey)
return
keys = [newKey]
if (shiftState):
print("Adding Shift")
keys.append(Keycode.LEFT_SHIFT)
shiftState = False
msg = struct.pack('bbb',2,1,0)
uart.write(msg)
if (altState):
print("Adding ALT")
keys.append(Keycode.LEFT_ALT)
altState = False
msg = struct.pack('bbb',2,2,0)
uart.write(msg)
if (ctrlState):
print("Adding CONTROl")
keys.append(Keycode.CONTROL)
ctrlState= False
msg = struct.pack('bbb',2,1,0)
uart.write(msg)
if (commandState):
print("Adding COMMAND")
keys.append(Keycode.COMMAND)
commandState= False
msg = struct.pack('bbb',2,6,0)
uart.write(msg)
kbd.press(*keys)
overlayId = 0
while True:
time.sleep(0.025) # make bigger to slow down
uart.reset_input_buffer()
# print("SENDING POLL")
uart.write(POLL)
uart.write(struct.pack('BBB',1 if shiftState else 0,
1 if altState else 0,
1 if ctrlState else 0))
time.sleep(WRITE_DELAY)
response = uart.read(1)
if response is None:
print("No response")
continue
newOverlay=response[0]
if (newOverlay != overlayId):
print(("New Overlay: ", newOverlay))
overlayId = newOverlay
if (overlayId == 0):
overlay = webaccess
elif(overlayId == 5):
overlay = qwerty
time.sleep(WRITE_DELAY)
response = uart.read(1)
if response is None:
continue
numCells = response[0]
# print("Got Count: ", numCells)
cellCount = 0
while (cellCount < numCells):
uart.readinto(cellData)
# print("Got Data: ", cellData)
(idx,) = struct.unpack('<H', cellData)
col = idx//24
row = idx % 24
action = overlay[row//3][col//2]
# print((row//3, col//2, action),end=',')
newKey = overlay[row//3][col//2]
if (action > 0):
if (lastKey != 0):
if (lastKey != newKey):
kbd.release(lastKey)
pressKey(newKey)
else:
pressKey(newKey)
lastKey = newKey
else:
if (action < -99):
#These are shortcuts
index = (-1 * action) - 100
sc = shortcuts[index]
#Reset the lights & states for a shortcut
shiftState = False
msg = struct.pack('bbb',2,1,0)
uart.write(msg)
altState = False
msg = struct.pack('bbb',2,2,0)
uart.write(msg)
ctrlState = False
msg = struct.pack('bbb',2,5,0)
uart.write(msg)
commandState = False
msg = struct.pack('bbb',2,6,0)
uart.write(msg)
#if this is a list, we send each item
if (type(sc) is list):
print(sc)
for codes in sc:
if (type(codes) is tuple):
kbd.press(*codes)
kbd.release_all()
else:
kbd.press(codes)
kbd.release(codes)
time.sleep(.1)
else:
kbd.press(*sc)
kbd.release_all
elif (action == MOUSE_NW):
mouse.move(-MOUSE_INCR,-MOUSE_INCR)
elif (action == MOUSE_N):
mouse.move(0,-MOUSE_INCR)
elif (action == MOUSE_NE):
mouse.move(MOUSE_INCR,-MOUSE_INCR)
elif (action == MOUSE_W):
mouse.move(-MOUSE_INCR,0)
elif (action == MOUSE_E):
mouse.move(MOUSE_INCR,0)
elif (action == MOUSE_SW):
mouse.move(-MOUSE_INCR,MOUSE_INCR)
elif (action == MOUSE_S):
mouse.move(0,MOUSE_INCR)
elif (action == MOUSE_SE):
mouse.move(MOUSE_INCR,MOUSE_INCR)
elif (action == MOUSE_CLICK):
mouse.click(Mouse.LEFT_BUTTON)
time.sleep(.3)
elif (action == MOUSE_RIGHT_CLICK):
mouse.click(Mouse.RIGHT_BUTTON)
time.sleep(.3)
elif (action == MOUSE_DBL_CLICK):
mouse.click(Mouse.LEFT_BUTTON)
mouse.click(Mouse.LEFT_BUTTON)
time.sleep(.3)
elif (action == MOUSE_DRAG):
print(("Mouse drag: ",dragState))
if lastKey != MOUSE_DRAG:
dragState = True
lastKey = MOUSE_DRAG
mouse.press(Mouse.LEFT_BUTTON)
else:
dragState = False
mouse.release(Mouse.LEFT_BUTTON)
time.sleep(.3)
lastKey = newKey
cellCount = cellCount + 1
if (cellCount == 0):
if (lastKey != 0):
lastKey = 0
kbd.release_all() | [
"adafruit_hid.keyboard.Keyboard",
"adafruit_hid.mouse.Mouse",
"struct.pack",
"time.sleep",
"struct.unpack",
"busio.UART",
"adafruit_dotstar.DotStar"
] | [((308, 379), 'adafruit_dotstar.DotStar', 'dotstar.DotStar', (['board.APA102_SCK', 'board.APA102_MOSI', '(1)'], {'brightness': '(0.7)'}), '(board.APA102_SCK, board.APA102_MOSI, 1, brightness=0.7)\n', (323, 379), True, 'import adafruit_dotstar as dotstar\n'), ((388, 435), 'busio.UART', 'busio.UART', (['board.TX', 'board.RX'], {'baudrate': '(115200)'}), '(board.TX, board.RX, baudrate=115200)\n', (398, 435), False, 'import busio\n'), ((677, 687), 'adafruit_hid.keyboard.Keyboard', 'Keyboard', ([], {}), '()\n', (685, 687), False, 'from adafruit_hid.keyboard import Keyboard\n'), ((696, 703), 'adafruit_hid.mouse.Mouse', 'Mouse', ([], {}), '()\n', (701, 703), False, 'from adafruit_hid.mouse import Mouse\n'), ((881, 900), 'struct.pack', 'struct.pack', (['"""b"""', '(1)'], {}), "('b', 1)\n", (892, 900), False, 'import struct\n'), ((3173, 3190), 'time.sleep', 'time.sleep', (['(0.025)'], {}), '(0.025)\n', (3183, 3190), False, 'import time\n'), ((3473, 3496), 'time.sleep', 'time.sleep', (['WRITE_DELAY'], {}), '(WRITE_DELAY)\n', (3483, 3496), False, 'import time\n'), ((3858, 3881), 'time.sleep', 'time.sleep', (['WRITE_DELAY'], {}), '(WRITE_DELAY)\n', (3868, 3881), False, 'import time\n'), ((1580, 1609), 'struct.pack', 'struct.pack', (['"""bbb"""', '(2)', '(1)', 'val'], {}), "('bbb', 2, 1, val)\n", (1591, 1609), False, 'import struct\n'), ((1815, 1844), 'struct.pack', 'struct.pack', (['"""bbb"""', '(2)', '(5)', 'val'], {}), "('bbb', 2, 5, val)\n", (1826, 1844), False, 'import struct\n'), ((2049, 2078), 'struct.pack', 'struct.pack', (['"""bbb"""', '(2)', '(2)', 'val'], {}), "('bbb', 2, 2, val)\n", (2060, 2078), False, 'import struct\n'), ((2294, 2323), 'struct.pack', 'struct.pack', (['"""bbb"""', '(2)', '(6)', 'val'], {}), "('bbb', 2, 6, val)\n", (2305, 2323), False, 'import struct\n'), ((2539, 2566), 'struct.pack', 'struct.pack', (['"""bbb"""', '(2)', '(1)', '(0)'], {}), "('bbb', 2, 1, 0)\n", (2550, 2566), False, 'import struct\n'), ((2712, 2739), 'struct.pack', 'struct.pack', (['"""bbb"""', '(2)', '(2)', '(0)'], {}), "('bbb', 2, 2, 0)\n", (2723, 2739), False, 'import struct\n'), ((2889, 2916), 'struct.pack', 'struct.pack', (['"""bbb"""', '(2)', '(1)', '(0)'], {}), "('bbb', 2, 1, 0)\n", (2900, 2916), False, 'import struct\n'), ((3072, 3099), 'struct.pack', 'struct.pack', (['"""bbb"""', '(2)', '(6)', '(0)'], {}), "('bbb', 2, 6, 0)\n", (3083, 3099), False, 'import struct\n'), ((3315, 3406), 'struct.pack', 'struct.pack', (['"""BBB"""', '(1 if shiftState else 0)', '(1 if altState else 0)', '(1 if ctrlState else 0)'], {}), "('BBB', 1 if shiftState else 0, 1 if altState else 0, 1 if\n ctrlState else 0)\n", (3326, 3406), False, 'import struct\n'), ((4163, 4192), 'struct.unpack', 'struct.unpack', (['"""<H"""', 'cellData'], {}), "('<H', cellData)\n", (4176, 4192), False, 'import struct\n'), ((4910, 4937), 'struct.pack', 'struct.pack', (['"""bbb"""', '(2)', '(1)', '(0)'], {}), "('bbb', 2, 1, 0)\n", (4921, 4937), False, 'import struct\n'), ((5022, 5049), 'struct.pack', 'struct.pack', (['"""bbb"""', '(2)', '(2)', '(0)'], {}), "('bbb', 2, 2, 0)\n", (5033, 5049), False, 'import struct\n'), ((5135, 5162), 'struct.pack', 'struct.pack', (['"""bbb"""', '(2)', '(5)', '(0)'], {}), "('bbb', 2, 5, 0)\n", (5146, 5162), False, 'import struct\n'), ((5251, 5278), 'struct.pack', 'struct.pack', (['"""bbb"""', '(2)', '(6)', '(0)'], {}), "('bbb', 2, 6, 0)\n", (5262, 5278), False, 'import struct\n'), ((5757, 5772), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (5767, 5772), False, 'import time\n'), ((6648, 6663), 'time.sleep', 'time.sleep', (['(0.3)'], {}), '(0.3)\n', (6658, 6663), False, 'import time\n'), ((6775, 6790), 'time.sleep', 'time.sleep', (['(0.3)'], {}), '(0.3)\n', (6785, 6790), False, 'import time\n'), ((6946, 6961), 'time.sleep', 'time.sleep', (['(0.3)'], {}), '(0.3)\n', (6956, 6961), False, 'import time\n'), ((7352, 7367), 'time.sleep', 'time.sleep', (['(0.3)'], {}), '(0.3)\n', (7362, 7367), False, 'import time\n')] |
# Generated by Django 3.0.6 on 2020-07-10 22:53
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Person',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('idPerson', models.IntegerField()),
],
),
migrations.CreateModel(
name='Todo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('text', models.CharField(max_length=350)),
('ittodo', models.IntegerField()),
('person', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='App.Person')),
],
),
]
| [
"django.db.models.ForeignKey",
"django.db.models.AutoField",
"django.db.models.CharField",
"django.db.models.IntegerField"
] | [((335, 428), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (351, 428), False, 'from django.db import migrations, models\n'), ((456, 477), 'django.db.models.IntegerField', 'models.IntegerField', ([], {}), '()\n', (475, 477), False, 'from django.db import migrations, models\n'), ((607, 700), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (623, 700), False, 'from django.db import migrations, models\n'), ((724, 756), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(350)'}), '(max_length=350)\n', (740, 756), False, 'from django.db import migrations, models\n'), ((786, 807), 'django.db.models.IntegerField', 'models.IntegerField', ([], {}), '()\n', (805, 807), False, 'from django.db import migrations, models\n'), ((837, 916), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""App.Person"""'}), "(on_delete=django.db.models.deletion.CASCADE, to='App.Person')\n", (854, 916), False, 'from django.db import migrations, models\n')] |
import numpy as np
import networkx as nx
from commons import *
from tqdm import tqdm
def apply_rrt(state_space, starting_state, target_space, obstacle_map, granularity=0.1, d_threshold=0.5,
n_samples=1000, find_optimal=True):
tree = nx.DiGraph()
tree.add_node(starting_state)
final_state = None
min_cost = None
for i in tqdm(range(n_samples)):
# select node to expand
m_g, random_point = select_node_to_expand(tree, state_space)
# sample a new point
m_new = sample_new_point(m_g, random_point, d_threshold)
# check if m_new lies in space_region
if not lies_in_area(m_new, state_space):
continue
# check if path between(m_g,m_new) defined by motion-model is collision free
if not is_collision_free(m_g, m_new, obstacle_map, granularity):
continue
# if path is free, add new node to tree
tree.add_weighted_edges_from([(m_g, m_new, cartesian_distance(m_g, m_new))])
if lies_in_area(m_new, target_space):
if final_state is None:
final_state = m_new
min_cost = nx.dijkstra_path_length(tree, starting_state, m_new)
if not find_optimal:
break
else:
# if new final state has shorter cost, set it as final state
cost = nx.dijkstra_path_length(tree, starting_state, m_new)
if cost < min_cost:
final_state = m_new
min_cost = cost
if final_state is None:
print("Target not reached.")
return tree, final_state
| [
"networkx.DiGraph",
"networkx.dijkstra_path_length"
] | [((253, 265), 'networkx.DiGraph', 'nx.DiGraph', ([], {}), '()\n', (263, 265), True, 'import networkx as nx\n'), ((1155, 1207), 'networkx.dijkstra_path_length', 'nx.dijkstra_path_length', (['tree', 'starting_state', 'm_new'], {}), '(tree, starting_state, m_new)\n', (1178, 1207), True, 'import networkx as nx\n'), ((1389, 1441), 'networkx.dijkstra_path_length', 'nx.dijkstra_path_length', (['tree', 'starting_state', 'm_new'], {}), '(tree, starting_state, m_new)\n', (1412, 1441), True, 'import networkx as nx\n')] |
"""add-sign-hash-table
Revision ID: b829c4a4c128
Revises: cc<PASSWORD>
Create Date: 2021-05-25 16:04:18.028626
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'b829c4a4c128'
down_revision = 'cc5dce03ad39'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('sign_hash',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('signhash', sa.String(length=500), nullable=True),
sa.PrimaryKeyConstraint('id')
)
with op.batch_alter_table('sign_hash', schema=None) as batch_op:
batch_op.create_index(batch_op.f('ix_sign_hash_signhash'), ['signhash'], unique=True)
op.create_table('user_sign_hashes',
sa.Column('user_id', sa.Integer(), nullable=True),
sa.Column('sign_hash', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['sign_hash'], ['sign_hash.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['metamask_user.id'], )
)
op.drop_table('user_signs')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('user_signs',
sa.Column('user_id', sa.BIGINT(), autoincrement=False, nullable=True),
sa.Column('sign_id', sa.BIGINT(), autoincrement=False, nullable=True),
sa.ForeignKeyConstraint(['sign_id'], ['zodiacs.id'], name='user_signs_sign_id_fkey'),
sa.ForeignKeyConstraint(['user_id'], ['metamask_user.id'], name='user_signs_user_id_fkey')
)
op.drop_table('user_sign_hashes')
with op.batch_alter_table('sign_hash', schema=None) as batch_op:
batch_op.drop_index(batch_op.f('ix_sign_hash_signhash'))
op.drop_table('sign_hash')
# ### end Alembic commands ###
| [
"sqlalchemy.ForeignKeyConstraint",
"alembic.op.drop_table",
"sqlalchemy.PrimaryKeyConstraint",
"alembic.op.batch_alter_table",
"sqlalchemy.Integer",
"sqlalchemy.String",
"sqlalchemy.BIGINT"
] | [((1030, 1057), 'alembic.op.drop_table', 'op.drop_table', (['"""user_signs"""'], {}), "('user_signs')\n", (1043, 1057), False, 'from alembic import op\n'), ((1557, 1590), 'alembic.op.drop_table', 'op.drop_table', (['"""user_sign_hashes"""'], {}), "('user_sign_hashes')\n", (1570, 1590), False, 'from alembic import op\n'), ((1730, 1756), 'alembic.op.drop_table', 'op.drop_table', (['"""sign_hash"""'], {}), "('sign_hash')\n", (1743, 1756), False, 'from alembic import op\n'), ((539, 568), 'sqlalchemy.PrimaryKeyConstraint', 'sa.PrimaryKeyConstraint', (['"""id"""'], {}), "('id')\n", (562, 568), True, 'import sqlalchemy as sa\n'), ((584, 630), 'alembic.op.batch_alter_table', 'op.batch_alter_table', (['"""sign_hash"""'], {'schema': 'None'}), "('sign_hash', schema=None)\n", (604, 630), False, 'from alembic import op\n'), ((895, 951), 'sqlalchemy.ForeignKeyConstraint', 'sa.ForeignKeyConstraint', (["['sign_hash']", "['sign_hash.id']"], {}), "(['sign_hash'], ['sign_hash.id'])\n", (918, 951), True, 'import sqlalchemy as sa\n'), ((959, 1017), 'sqlalchemy.ForeignKeyConstraint', 'sa.ForeignKeyConstraint', (["['user_id']", "['metamask_user.id']"], {}), "(['user_id'], ['metamask_user.id'])\n", (982, 1017), True, 'import sqlalchemy as sa\n'), ((1366, 1455), 'sqlalchemy.ForeignKeyConstraint', 'sa.ForeignKeyConstraint', (["['sign_id']", "['zodiacs.id']"], {'name': '"""user_signs_sign_id_fkey"""'}), "(['sign_id'], ['zodiacs.id'], name=\n 'user_signs_sign_id_fkey')\n", (1389, 1455), True, 'import sqlalchemy as sa\n'), ((1456, 1551), 'sqlalchemy.ForeignKeyConstraint', 'sa.ForeignKeyConstraint', (["['user_id']", "['metamask_user.id']"], {'name': '"""user_signs_user_id_fkey"""'}), "(['user_id'], ['metamask_user.id'], name=\n 'user_signs_user_id_fkey')\n", (1479, 1551), True, 'import sqlalchemy as sa\n'), ((1600, 1646), 'alembic.op.batch_alter_table', 'op.batch_alter_table', (['"""sign_hash"""'], {'schema': 'None'}), "('sign_hash', schema=None)\n", (1620, 1646), False, 'from alembic import op\n'), ((439, 451), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (449, 451), True, 'import sqlalchemy as sa\n'), ((496, 517), 'sqlalchemy.String', 'sa.String', ([], {'length': '(500)'}), '(length=500)\n', (505, 517), True, 'import sqlalchemy as sa\n'), ((804, 816), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (814, 816), True, 'import sqlalchemy as sa\n'), ((861, 873), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (871, 873), True, 'import sqlalchemy as sa\n'), ((1237, 1248), 'sqlalchemy.BIGINT', 'sa.BIGINT', ([], {}), '()\n', (1246, 1248), True, 'import sqlalchemy as sa\n'), ((1312, 1323), 'sqlalchemy.BIGINT', 'sa.BIGINT', ([], {}), '()\n', (1321, 1323), True, 'import sqlalchemy as sa\n')] |
# This file is part of the Blockchain-based Fair Exchange Benchmark Tool
# https://gitlab.com/MatthiasLohr/bfebench
#
# Copyright 2021-2022 <NAME> <<EMAIL>>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from shutil import rmtree
from tempfile import mkdtemp
from time import sleep
from unittest import TestCase
from bfebench.utils.json_stream import (
JsonObjectSocketStreamForwarder,
JsonObjectUnixDomainSocketClientStream,
JsonObjectUnixDomainSocketServerStream,
)
class JsonObjectSocketStream(TestCase):
def setUp(self) -> None:
self._tmp_dir = mkdtemp(prefix="bfebench-test-")
def tearDown(self) -> None:
rmtree(self._tmp_dir)
@property
def tmp_dir(self) -> str:
return self._tmp_dir
def test_server_client(self) -> None:
# init
server = JsonObjectUnixDomainSocketServerStream(os.path.join(self._tmp_dir, "socket"))
client = JsonObjectUnixDomainSocketClientStream(os.path.join(self._tmp_dir, "socket"))
# provide time to set up the server
sleep(0.1)
# send message from client to server
client.send_object({"foo": "bar"})
received, bytes_count = server.receive_object()
self.assertEqual(received, {"foo": "bar"})
self.assertEqual(bytes_count, 14)
# send message from server to client
server.send_object({"reply": 42})
received, bytes_count = client.receive_object()
self.assertEqual(received, {"reply": 42})
self.assertEqual(bytes_count, 13)
# send nested message from client to server
nested_test_data = {
"list": ["a", "b"],
"object": {"foo": "bar"},
"list_with_objects": [{"a": 1}, {"b": 2}],
"object_with_lists": {"a": [1, 2], "b": [3, 4]},
"object_with_objects": {"a": {"foo": "bar"}},
}
client.send_object(nested_test_data)
received, bytes_count = server.receive_object()
self.assertEqual(nested_test_data, received)
class JsonObjectSocketStreamForwarderTest(TestCase):
def setUp(self) -> None:
self._tmp_dir = mkdtemp(prefix="bfebench-test-")
def tearDown(self) -> None:
rmtree(self._tmp_dir, ignore_errors=True)
@property
def tmp_dir(self) -> str:
return self._tmp_dir
def test_forward(self) -> None:
s1 = JsonObjectUnixDomainSocketServerStream(os.path.join(self._tmp_dir, "s1"))
s2 = JsonObjectUnixDomainSocketServerStream(os.path.join(self._tmp_dir, "s2"))
forwarder = JsonObjectSocketStreamForwarder(s1, s2)
forwarder.start()
sleep(0.1)
c1 = JsonObjectUnixDomainSocketClientStream(os.path.join(self._tmp_dir, "s1"))
c2 = JsonObjectUnixDomainSocketClientStream(os.path.join(self._tmp_dir, "s2"))
sleep(0.1)
c1.send_object({"foo": "bar"})
received, bytes_count = c2.receive_object()
self.assertEqual(received, {"foo": "bar"})
self.assertEqual(bytes_count, 14)
stats = forwarder.get_stats()
self.assertEqual(stats.count_1to2, 1)
self.assertEqual(stats.bytes_1to2, 14)
| [
"os.path.join",
"time.sleep",
"bfebench.utils.json_stream.JsonObjectSocketStreamForwarder",
"tempfile.mkdtemp",
"shutil.rmtree"
] | [((1088, 1120), 'tempfile.mkdtemp', 'mkdtemp', ([], {'prefix': '"""bfebench-test-"""'}), "(prefix='bfebench-test-')\n", (1095, 1120), False, 'from tempfile import mkdtemp\n'), ((1162, 1183), 'shutil.rmtree', 'rmtree', (['self._tmp_dir'], {}), '(self._tmp_dir)\n', (1168, 1183), False, 'from shutil import rmtree\n'), ((1559, 1569), 'time.sleep', 'sleep', (['(0.1)'], {}), '(0.1)\n', (1564, 1569), False, 'from time import sleep\n'), ((2642, 2674), 'tempfile.mkdtemp', 'mkdtemp', ([], {'prefix': '"""bfebench-test-"""'}), "(prefix='bfebench-test-')\n", (2649, 2674), False, 'from tempfile import mkdtemp\n'), ((2716, 2757), 'shutil.rmtree', 'rmtree', (['self._tmp_dir'], {'ignore_errors': '(True)'}), '(self._tmp_dir, ignore_errors=True)\n', (2722, 2757), False, 'from shutil import rmtree\n'), ((3064, 3103), 'bfebench.utils.json_stream.JsonObjectSocketStreamForwarder', 'JsonObjectSocketStreamForwarder', (['s1', 's2'], {}), '(s1, s2)\n', (3095, 3103), False, 'from bfebench.utils.json_stream import JsonObjectSocketStreamForwarder, JsonObjectUnixDomainSocketClientStream, JsonObjectUnixDomainSocketServerStream\n'), ((3139, 3149), 'time.sleep', 'sleep', (['(0.1)'], {}), '(0.1)\n', (3144, 3149), False, 'from time import sleep\n'), ((3334, 3344), 'time.sleep', 'sleep', (['(0.1)'], {}), '(0.1)\n', (3339, 3344), False, 'from time import sleep\n'), ((1372, 1409), 'os.path.join', 'os.path.join', (['self._tmp_dir', '"""socket"""'], {}), "(self._tmp_dir, 'socket')\n", (1384, 1409), False, 'import os\n'), ((1467, 1504), 'os.path.join', 'os.path.join', (['self._tmp_dir', '"""socket"""'], {}), "(self._tmp_dir, 'socket')\n", (1479, 1504), False, 'import os\n'), ((2921, 2954), 'os.path.join', 'os.path.join', (['self._tmp_dir', '"""s1"""'], {}), "(self._tmp_dir, 's1')\n", (2933, 2954), False, 'import os\n'), ((3008, 3041), 'os.path.join', 'os.path.join', (['self._tmp_dir', '"""s2"""'], {}), "(self._tmp_dir, 's2')\n", (3020, 3041), False, 'import os\n'), ((3203, 3236), 'os.path.join', 'os.path.join', (['self._tmp_dir', '"""s1"""'], {}), "(self._tmp_dir, 's1')\n", (3215, 3236), False, 'import os\n'), ((3290, 3323), 'os.path.join', 'os.path.join', (['self._tmp_dir', '"""s2"""'], {}), "(self._tmp_dir, 's2')\n", (3302, 3323), False, 'import os\n')] |
from django.utils import timezone
from django.shortcuts import get_object_or_404
from backend.cuida24.serializers import *
logger = logging.getLogger("mylogger")
def habitsFrontToBackJSON(request_data, user):
request_data['caregiver'] = get_object_or_404(Caregiver, info=user.pk).pk
return request_data
def SOSFrontToBackJSON(request_data, user):
request_data['caregiver'] = get_object_or_404(Caregiver, info=user.pk).pk
request_data['patient'] = get_object_or_404(Patient, caregiver=request_data['caregiver']).pk
return request_data
def getGoals(caregiver):
date_now = timezone.now()
goals = Goal.objects.filter(disable=False)
choices_value = dict(Goal.TYPE)
return_data = {}
for goal in goals:
dateB = goal.dateBegin
dateE = goal.dateEnd
logger.info(goal.dateBegin)
logger.info(date_now)
logger.info(goal.dateEnd)
if dateB <= date_now <= dateE:
realized = 0
if goal.type == 'AF' or goal.type == 'LS' or goal.type == 'LI':
realized = Activity.objects.filter(type=goal.type, caregiver=caregiver,
date__range=(dateB, dateE)).count()
if goal.type == 'WT':
realized = Water.objects.filter(caregiver=caregiver,
date__range=(dateB, dateE)).count()
if goal.type == 'NP':
realized = Nap.objects.filter(caregiver=caregiver,
date__range=(dateB, dateE)).count()
if goal.type == 'SP':
realized = Sleep.objects.filter(caregiver=caregiver,
date__range=(dateB, dateE)).count()
if goal.type == 'SS':
realized = SOS.objects.filter(caregiver=caregiver,
date__range=(dateB, dateE)).count()
if goal.type == 'PA' or goal.type == 'LM' or goal.type == 'AL' or goal.type == 'LT' or goal.type == 'JT':
realized = Meal.objects.filter(type=goal.type, caregiver=caregiver,
date__range=(dateB, dateE)).count()
if goal.type == 'CB' or goal.type == 'FT' or goal.type == 'VG' or goal.type == 'FB' or goal.type == 'PC' or goal.type == 'RF' or goal.type == 'AL':
realized = Meal.objects.filter(food=goal.type, caregiver=caregiver,
date__range=(dateB, dateE)).count()
return_data[str(goal.type)] = {'type': choices_value[goal.type], 'realized': realized, 'goal': goal.goal}
return return_data
| [
"django.utils.timezone.now",
"django.shortcuts.get_object_or_404"
] | [((600, 614), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (612, 614), False, 'from django.utils import timezone\n'), ((245, 287), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['Caregiver'], {'info': 'user.pk'}), '(Caregiver, info=user.pk)\n', (262, 287), False, 'from django.shortcuts import get_object_or_404\n'), ((392, 434), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['Caregiver'], {'info': 'user.pk'}), '(Caregiver, info=user.pk)\n', (409, 434), False, 'from django.shortcuts import get_object_or_404\n'), ((468, 531), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['Patient'], {'caregiver': "request_data['caregiver']"}), "(Patient, caregiver=request_data['caregiver'])\n", (485, 531), False, 'from django.shortcuts import get_object_or_404\n')] |
# -*- coding: utf-8 -*-
"""
Low level tool for writing percent difference reports. Typically, this
is called via: :func:`cla.DR_Results.rptpct`.
"""
from io import StringIO
from types import SimpleNamespace
import warnings
import numpy as np
import matplotlib.pyplot as plt
from pyyeti import ytools, locate, writer
from ._utilities import _get_rpt_headers, _get_numform, _proc_filterval
from ._magpct import magpct
__all__ = ["rptpct1"]
# FIXME: We need the str/repr formatting used in Numpy < 1.14.
try:
np.set_printoptions(legacy="1.13")
except TypeError:
pass
def _apply_pv(value, pv, oldlen):
# if value has a len that's > 1, try to partition it down;
# otherwise, return it as is:
try:
n = len(value)
except TypeError:
return value
else:
if n == 1:
return value
# `value` is a vector with len > 1 ... ensure it is a true numpy
# array:
value = np.atleast_1d(value)
# oldlen is either 0 (for `value` vectors that are expected to be
# full size ... currently, only the `filterval` and
# `magpct_filterval` vectors), or it is the length of the
# dimension that the `value` index type of partition vector
# (currently, only the `ignorepv` vector) was originally defined
# to partition.
if oldlen == 0:
# `value` is `filterval` or `magpct_filterval` ... these just
# need to be partitioned down:
newvalue = value[pv]
else:
# `value` is `ignorepv` ... it needs to be redefined to
# correspond to reduced size:
truefalse = locate.index2bool(value, oldlen)
newvalue = truefalse[pv].nonzero()[0]
return newvalue
def _align_mxmn(mxmn1, mxmn2, labels2, row_number, infodct):
if infodct["labels"] and infodct["labels"] != labels2:
n = len(infodct["labels"])
pv1, pv2 = locate.list_intersect(infodct["labels"], labels2)
mxmn1 = mxmn1[pv1]
mxmn2 = mxmn2[pv2]
infodct["labels"] = [infodct["labels"][i] for i in pv1]
row_number = row_number[pv1]
infodct["filterval"] = _apply_pv(infodct["filterval"], pv1, 0)
infodct["magpct_filterval"] = _apply_pv(infodct["magpct_filterval"], pv1, 0)
infodct["ignorepv"] = _apply_pv(infodct["ignorepv"], pv1, n)
return mxmn1, mxmn2, row_number
def _get_filtline(filterval):
if len(filterval) > 1:
filtline = "Filter: <defined row-by-row>\n"
else:
filtline = f"Filter: {filterval[0]}\n"
return filtline
def _get_noteline(use_range, names, prtbads, flagbads):
noteline = "Notes: "
tab = " "
if not use_range:
noteline += "% Diff = +/- abs(({0}-{1})/{1})*100\n".format(*names)
else:
noteline += "% Diff = +/- abs({0}-{1})/max(abs({1}(max,min)))*100\n".format(
*names
)
noteline += tab + "Sign set such that positive % differences indicate exceedances\n"
prtbad, prtbadh, prtbadl = prtbads
flagbad, flagbadh, flagbadl = flagbads
if prtbad is not None or prtbadh is not None or prtbadl is not None:
if prtbad is not None:
prtbad = abs(prtbad)
noteline += tab + f"Printing rows where abs(% Diff) > {prtbad}%\n"
elif prtbadh is not None:
noteline += tab + f"Printing rows where % Diff > {prtbadh}%\n"
else:
noteline += tab + f"Printing rows where % Diff < {prtbadl}%\n"
if flagbad is not None or flagbadh is not None or flagbadl is not None:
if flagbad is not None:
flagbad = abs(flagbad)
noteline += tab + f"Flagging (*) rows where abs(% Diff) > {flagbad}%\n"
elif flagbadh is not None:
noteline += tab + f"Flagging (*) rows where % Diff > {flagbadh}%\n"
else:
noteline += tab + f"Flagging (*) rows where % Diff < {flagbadl}%\n"
return noteline
def _get_badpv(pct, pv, bad, badh, badl, defaultpv=False):
if bad is not None or badh is not None or badl is not None:
badpv = pv.copy()
if bad is not None:
badpv &= abs(pct) > bad
elif badh is not None:
badpv &= pct > badh
else:
badpv &= pct < badl
else:
badpv = np.empty(len(pct), bool)
badpv[:] = defaultpv
return badpv
def _get_pct_diff(a, b, filt, pv, nastring, mxmn_b=None, ismax=True, flagbads=None):
# either can pass filter to be kept:
pv &= (abs(a) > filt) | (abs(b) > filt)
if mxmn_b is not None:
denom = np.nanmax(abs(mxmn_b), axis=1)
else:
denom = abs(b)
# put 1's in for filtered values ... this is temporary
a = a.copy()
b = b.copy()
a[~pv] = 1.0
b[~pv] = 1.0
z = denom == 0.0
denom[z] = 1.0
pct = 100 * abs(a - b) / denom
pct[z] = 100.0 # np.inf
# make less extreme values negative
neg = a < b if ismax else a > b
pct[neg] *= -1.0
# put nan's in for the filtered or n/a rows:
pct[~pv] = np.nan
# make 7 char version:
spct = [f"{p:7.2f}" for p in pct]
badpv = _get_badpv(pct, pv, *flagbads, False)
for j in badpv.nonzero()[0]:
spct[j] += "*"
for j in (~pv).nonzero()[0]:
spct[j] = nastring
return pct, spct
def _get_histogram_str(desc, hdr, pctinfo):
pctcount = pctinfo["hsto"]
s = [
(f"\n\n {desc} - {hdr} Comparison Histogram\n\n"),
(" % Diff Count Percent\n -------- -------- -------\n"),
]
with StringIO() as f:
writer.vecwrite(f, " {:8.2f} {:8.0f} {:7.2f}\n", pctcount)
s.append(f.getvalue())
s.append("\n")
# total_percent_10 will either be 0 or 1000:
# - 0 if all % diffs are "n/a"
# - 1000 otherwise
total_percent_10 = np.round(pctcount[:, 2].sum() * 10)
last = -1.0
for pdiff in [1, 2, 5, 10, 15, 20, 25, 50, 100, 500]:
pvdiff = abs(pctcount[:, 0]) <= pdiff
num = pctcount[pvdiff, 2].sum()
if num > last:
s.append(f" {num:.1f}% of values are within {pdiff}%\n")
if np.round(num * 10) == total_percent_10:
break
last = num
pct = pctinfo["pct"]
n = len(pct)
if n == 0:
s.append(
"\n % Diff Statistics: [Min, Max, Mean, StdDev]"
" = [n/a, n/a, n/a, n/a]\n"
)
else:
stddev = 0.0 if n <= 1 else pct.std(ddof=1)
s.append(
"\n % Diff Statistics: [Min, Max, Mean, StdDev]"
f" = [{pct.min():.2f}, {pct.max():.2f}, {pct.mean():.4f}, {stddev:.4f}]\n"
)
return "".join(s)
def _proc_pct(
ext1,
ext2,
filterval,
magpct_filterval,
*,
names,
mxmn1,
comppv,
mxmn_b,
ismax,
histogram_inc,
prtbads,
flagbads,
numform,
valhdr,
maxhdr,
minhdr,
absmhdr,
pdhdr,
nastring,
doabsmax,
shortabsmax,
print_info,
):
# handle magpct stuff here:
mag = ext1[comppv], ext2[comppv]
if magpct_filterval is not None and len(magpct_filterval) > 1:
magfilt = magpct_filterval[comppv]
else:
magfilt = magpct_filterval
pv = comppv.copy()
pct, spct = _get_pct_diff(
ext1,
ext2,
filterval,
pv,
nastring,
mxmn_b=mxmn_b,
ismax=ismax,
flagbads=flagbads,
)
pct_ret = pct[pv]
hsto = ytools.histogram(pct_ret, histogram_inc)
# for trimming down if prtbad set:
prtpv = _get_badpv(pct, pv, *prtbads, True)
pctlen = max(len(pdhdr), len(max(spct, key=len)))
sformatpd = f"{{:{pctlen}}}"
# for writer.formheader:
numlen = max(13, len(max(names, key=len)), len(numform.format(np.pi)))
if not doabsmax:
print_info.headers1.extend([*names, ""])
print_info.headers2.extend([valhdr, valhdr, pdhdr])
print_info.formats.extend([numform, numform, sformatpd])
print_info.printargs.extend([ext1, ext2, spct])
print_info.widths.extend([numlen, numlen, pctlen])
print_info.seps.extend([4, 2, 2])
print_info.justs.extend(["c", "c", "c"])
elif shortabsmax:
print_info.headers1.extend([*names, ""])
print_info.headers2.extend([absmhdr, absmhdr, pdhdr])
print_info.formats.extend([numform, numform, sformatpd])
print_info.printargs.extend([ext1, ext2, spct])
print_info.widths.extend([numlen, numlen, pctlen])
print_info.seps.extend([4, 2, 2])
print_info.justs.extend(["c", "c", "c"])
else:
print_info.headers1.extend([names[0], names[0], names[0], names[1], ""])
print_info.headers2.extend([maxhdr, minhdr, absmhdr, absmhdr, pdhdr])
print_info.formats.extend([numform, numform, numform, numform, sformatpd])
print_info.printargs.extend([mxmn1[:, 0], mxmn1[:, 1], ext1, ext2, spct])
print_info.widths.extend([numlen, numlen, numlen, numlen, pctlen])
print_info.seps.extend([4, 2, 2, 2, 2])
print_info.justs.extend(["c", "c", "c", "c", "c"])
return dict(
pct=pct_ret, spct=spct, hsto=hsto, prtpv=prtpv, mag=mag, magfilt=magfilt
)
def _figure_on(name, doabsmax, show_figures):
figsize = [8.5, 11.0]
if doabsmax:
figsize[1] /= 3.0
if show_figures:
plt.figure(name, figsize=figsize)
plt.clf()
else:
plt.figure(figsize=figsize)
def _figure_off(show_figures):
if not show_figures:
plt.close()
def _prep_subplot(pctinfo, sp):
if "mx" in pctinfo:
# if not just doing absmax
if sp > 311:
plt.subplot(sp, sharex=plt.gca())
else:
plt.subplot(sp)
def _plot_magpct(
pctinfo,
names,
desc,
doabsmax,
filename,
magpct_options,
use_range,
maxhdr,
minhdr,
absmhdr,
show_figures,
tight_layout_args,
):
ptitle = f"{desc} - {{}} Comparison vs Magnitude"
xl = f"{names[1]} Magnitude"
yl = f"% Diff of {names[0]} vs {names[1]}"
_figure_on("Magpct - " + desc, doabsmax, show_figures)
try:
for lbl, hdr, sp, ismax in (
("mx", maxhdr, 311, True),
("mn", minhdr, 312, False),
("amx", absmhdr, 313, True),
):
_prep_subplot(pctinfo, sp)
if lbl in pctinfo:
if use_range:
ref = pctinfo["amx"]["mag"][1]
else:
ref = None
magpct(
pctinfo[lbl]["mag"][0],
pctinfo[lbl]["mag"][1],
Ref=ref,
ismax=ismax,
filterval=pctinfo[lbl]["magfilt"],
**magpct_options,
)
plt.title(ptitle.format(hdr))
plt.xlabel(xl)
plt.ylabel(yl)
plt.grid(True)
plt.tight_layout(**tight_layout_args)
if isinstance(filename, str):
plt.savefig(filename + ".magpct.png")
finally:
_figure_off(show_figures)
def _plot_histogram(
pctinfo,
names,
desc,
doabsmax,
filename,
histogram_inc,
maxhdr,
minhdr,
absmhdr,
show_figures,
tight_layout_args,
):
ptitle = f"{desc} - {{}} Comparison Histogram"
xl = f"% Diff of {names[0]} vs {names[1]}"
yl = "Percent Occurrence (%)"
_figure_on("Histogram - " + desc, doabsmax, show_figures)
try:
for lbl, hdr, sp in (
("mx", maxhdr, 311),
("mn", minhdr, 312),
("amx", absmhdr, 313),
):
_prep_subplot(pctinfo, sp)
if lbl in pctinfo:
width = histogram_inc
x = pctinfo[lbl]["hsto"][:, 0]
y = pctinfo[lbl]["hsto"][:, 2]
colors = ["b"] * len(x)
ax = abs(x)
pv1 = ((ax > 5) & (ax <= 10)).nonzero()[0]
pv2 = (ax > 10).nonzero()[0]
for pv, c in ((pv1, "m"), (pv2, "r")):
for i in pv:
colors[i] = c
plt.bar(x, y, width=width, color=colors, align="center")
plt.title(ptitle.format(hdr))
plt.xlabel(xl)
plt.ylabel(yl)
x = abs(max(plt.xlim(), key=abs))
if x < 5:
plt.xlim(-5, 5)
plt.grid(True)
plt.tight_layout(**tight_layout_args)
if isinstance(filename, str):
plt.savefig(filename + ".histogram.png")
finally:
_figure_off(show_figures)
def rptpct1(
mxmn1,
mxmn2,
filename,
*,
title="PERCENT DIFFERENCE REPORT",
names=("Self", "Reference"),
desc=None,
filterval=None,
labels=None,
units=None,
ignorepv=None,
uf_reds=None,
use_range=True,
numform=None,
prtbad=None,
prtbadh=None,
prtbadl=None,
flagbad=None,
flagbadh=None,
flagbadl=None,
dohistogram=True,
histogram_inc=1.0,
domagpct=True,
magpct_options=None,
doabsmax=False,
shortabsmax=False,
roundvals=-1,
rowhdr="Row",
deschdr="Description",
maxhdr="Maximum",
minhdr="Minimum",
absmhdr="Abs-Max",
perpage=-1,
tight_layout_args=None,
show_figures=False,
align_by_label=True,
):
"""
Write a percent difference report between 2 sets of max/min data
Parameters
----------
mxmn1 : 2d array_like or SimpleNamespace
The max/min data to compare to the `mxmn2` set. If 2-column
array_like, its columns are: [max, min]. If SimpleNamespace,
it must be as defined in :class:`DR_Results` and have these
members:
.. code-block:: none
.ext = [max, min]
.drminfo = SimpleNamespace which has (at least):
.desc = one line description of category
.filterval = the filter value; (see `filterval`
description below)
.labels = a list of descriptions; one per row
.ignorepv = these rows will get 'n/a' for % diff
.units = string with units
.uf_reds = uncertainty factors
Note that the inputs `desc`, `labels`, etc, override the
values above.
mxmn2 : 2d array_like or SimpleNamespace
The reference set of max/min data. Format is the same as
`mxmn1`.
.. note::
If both `mxmn1` and `mxmn2` are SimpleNamespaces and have
the ``.drminfo.labels`` attribute, this routine will, by
default, use the labels to align the data sets for
comparison. To prevent this, set the `align_by_label`
parameter to False.
filename : string or file_like or 1 or None
Either a name of a file, or is a file_like object as returned
by :func:`open` or :class:`io.StringIO`. Input as integer 1 to
write to stdout. Can also be the name of a directory or None;
in these cases, a GUI is opened for file selection.
title : string; must be named; optional
Title for the report
names : list/tuple; must be named; optional
Two (short) strings identifying the two sets of data
desc : string or None; must be named; optional
A one line description of the table. Overrides
`mxmn1.drminfo.desc`. If neither are input,
'No description provided' is used.
filterval : scalar, 1d array_like or None; must be named; optional
Numbers with absolute value <= than `filterval` will get a
'n/a' % diff. If vector, length must match number of rows in
`mxmn1` and `mxmn2` data. Overrides `mxmn1.drminfo.filterval`.
If neither are input, `filterval` is set to 1.e-6.
labels : list or None; must be named; optional
A list of strings briefly describing each row. Overrides
`mxmn1.drminfo.labels`. If neither are input,
``['Row 1','Row 2',...]`` is used.
units : string or None; must be named; optional
Specifies the units. Overrides `mxmn1.drminfo.units`. If
neither are input, 'Not specified' is used.
ignorepv : 1d array or None; must be named; optional
0-offset index vector specifying which rows of `mxmn1` to
ignore (they get the 'n/a' % diff). Overrides
`mxmn1.drminfo.ignorepv`. If neither are input, no rows are
ignored (though `filterval` is still used).
.. note::
`ignorepv` applies *before* any alignment by labels is
done (when `align_by_label` is True, which is the
default).
uf_reds : 1d array or None; must be named; optional
Uncertainty factors: [rigid, elastic, dynamic, static].
Overrides `mxmn1.drminfo.uf_reds`. If neither is input,
'Not specified' is used.
use_range : bool; must be named, optional
If True, the denominator of the % diff calc for both the max
& min for each row is the absolute maximum of the reference
max & min for that row. If False, the denominator is the
applicable reference max or min. A quick example shows why
``use_range=True`` might be useful:
.. code-block:: none
If [max1, min1] = [12345, -10] and
[max2, min2] = [12300, 50]
Then:
% diff = [0.37%, 0.49%] if use_range is True
% diff = [0.37%, 120.00%] if use_range is False
Note that the sign of the % diff is defined such that a
positive % diff means an exceedance: where ``max1 > max2`` or
``min1 < min2``.
`use_range` is ignored if `doabsmax` is True.
numform : string or None; must be named; optional
Format of the max & min numbers. If None, it is set internally
to be 13 chars wide and depends on the range of numbers to
print:
- if range is "small", numform='{:13.xf}' where "x" ranges
from 0 to 7
- if range is "large", numform='{:13.6e}'
prtbad : scalar or None; must be named; optional
Only print rows where ``abs(%diff) > prtbad``. For example, to
print rows off by more than 5%, use ``prtbad=5``. `prtbad`
takes precedence over `prtbadh` and `prtbadl`.
prtbadh : scalar or None; must be named; optional
Only print rows where ``%diff > prtbadh``. Handy for showing
just the exceedances. `prtbadh` takes precedence over
`prtbadl`.
prtbadl : scalar or None; must be named; optional
Only print rows where ``%diff < prtbadl``. Handy for showing
where reference rows are higher.
flagbad : scalar or None; must be named; optional
Flag % diffs where ``abs(%diff) > flagbad``. Works similar to
`prtbad`. The flag is an asterisk (*).
flagbadh : scalar or None; must be named; optional
Flag % diffs where ``%diff > flagbadh``. Works similar to
`prtbadh`. Handy for flagging exceedances. `flagbadh` takes
precedence over `flagbadl`.
flagbadl : scalar or None; must be named; optional
Flag % diffs where ``%diff < flagbadl``. Works similar to
`prtbadl`.
dohistogram : bool; must be named; optional
If True, plot the histograms. Plots will be written to
"`filename`.histogram.png".
histogram_inc : scalar; must be named; optional
The histogram increment; defaults to 1.0 (for 1%).
domagpct : bool; must be named; optional
If True, plot the percent differences versus magnitude via
:func:`magpct`. Plots will be written to
"`filename`.magpct.png". Filtering for the "magpct" plot is
controlled by the ``magpct_options['filterval']`` and
``magpct_options['symlogy']`` options. By default, all percent
differences are shown, but the larger values (according to the
`filterval` filter) are emphasized by using a mixed linear/log
y-axis. The percent differences for the `ignorepv` rows are
not plotted.
magpct_options : None or dict; must be named; optional
If None, it is internally reset to::
magpct_options = {'filterval': 'filterval'}
Use this parameter to provide any options to :func:`magpct`
but note that the `filterval` option for :func:`magpct` is
treated specially. Here, in addition to any of the values that
:func:`magpct` accepts, it can also be set to the string
"filterval" as in the default case shown above. In that case,
``magpct_options['filterval']`` gets internally reset to the
initial value of `filterval` (which is None by default).
.. note::
The call to :func:`magpct` is *after* applying `ignorepv`
and doing any data aligning by labels.
.. note::
The two filter value options (`filterval` and
``magpct_options['filterval']``) have different defaults:
None and 'filterval`, respectively. They also differ on how
the ``None`` setting is used: for `filterval`, None is
replaced by 1.e-6 while for `magpct_filterval`, None means
that the "magpct" plot will not have any filters applied at
all.
.. note::
The above means that, if you accept the default values for
`filterval` and for ``magpct_options['filterval']``, then
tables and the histogram plots will use a `filterval` of
1.e-6 while the "magpct" plots will use no filter (it
compares everything except perfect zeros).
doabsmax : bool; must be named; optional
If True, compare only absolute maximums.
shortabsmax : bool; must be named; optional
If True, set ``doabsmax=True`` and do not print the max1 and
min1 columns.
roundvals : integer; must be named; optional
Round max & min numbers at specified decimal. If negative, no
rounding.
rowhdr : string; must be named; optional
Header for row number column
deschdr : string; must be named; optional
Header for description column
maxhdr : string; must be named; optional
Header for the column 1 data
minhdr : string; must be named; optional
Header for the column 2 data
absmhdr : string; must be named; optional
Header for abs-max column
perpage : integer; must be named; optional
The number of lines to write perpage. If < 1, there is no
limit (one page).
tight_layout_args : dict or None; must be named; optional
Arguments for :func:`matplotlib.pyplot.tight_layout`. If None,
defaults to ``{'pad': 3.0}``.
show_figures : bool; must be named; optional
If True, plot figures will be displayed on the screen for
interactive viewing. Warning: there may be many figures.
align_by_label : bool; must be named; optional
If True, use labels to align the two sets of data for
comparison. See note above under the `mxmn2` option.
Returns
-------
pdiff_info : dict
Dictionary with 'amx' (abs-max), 'mx' (max), and 'mn' keys:
.. code-block:: none
<class 'dict'>[n=3]
'amx': <class 'dict'>[n=5]
'hsto' : float64 ndarray 33 elems: (11, 3)
'mag' : [n=2]: (float64 ndarray: (100,), ...
'pct' : float64 ndarray 100 elems: (100,)
'prtpv': bool ndarray 100 elems: (100,)
'spct' : [n=100]: [' -2.46', ' -1.50', ...
'mn' : <class 'dict'>[n=5]
'hsto' : float64 ndarray 33 elems: (11, 3)
'mag' : [n=2]: (float64 ndarray: (100,), ...
'pct' : float64 ndarray 100 elems: (100,)
'prtpv': bool ndarray 100 elems: (100,)
'spct' : [n=100]: [' 1.55', ' 1.53', ...
'mx' : <class 'dict'>[n=5]
'hsto' : float64 ndarray 27 elems: (9, 3)
'mag' : [n=2]: (float64 ndarray: (100,), ...
'pct' : float64 ndarray 100 elems: (100,)
'prtpv': bool ndarray 100 elems: (100,)
'spct' : [n=100]: [' -2.46', ' -1.50', ...
Where:
.. code-block:: none
'hsto' : output of :func:`histogram`: [center, count, %]
'mag' : inputs to :func:`magpct`
'pct' : percent differences
'prtpv' : rows to print partition vector
'spct' : string version of 'pct'
Examples
--------
>>> import numpy as np
>>> from pyyeti import cla
>>> ext1 = [[120.0, -8.0],
... [8.0, -120.0]]
>>> ext2 = [[115.0, -5.0],
... [10.0, -125.0]]
Run :func:`rptpct1` multiple times to get a more complete picture
of all the output (the table is very wide). Also, the plots will
be turned off for this example.
First, the header:
>>> opts = {'domagpct': False, 'dohistogram': False}
>>> dct = cla.rptpct1(ext1, ext2, 1, **opts) # doctest: +ELLIPSIS
PERCENT DIFFERENCE REPORT
<BLANKLINE>
Description: No description provided
Uncertainty: Not specified
Units: Not specified
Filter: 1e-06
Notes: % Diff = +/- abs(Self-Reference)/max(abs(Reference...
Sign set such that positive % differences indicate...
Date: ...
...
Then, the max/min/absmax percent difference table in 3 calls:
>>> dct = cla.rptpct1(ext1, ext2, 1, **opts) # doctest: +ELLIPSIS
PERCENT DIFFERENCE REPORT
...
Self Reference ...
Row Description Maximum Maximum % Diff ...
------- ----------- ------------- ------------- ------- ...
1 Row 1 120.00000 115.00000 4.35 ...
2 Row 2 8.00000 10.00000 -1.60 ...
...
>>> dct = cla.rptpct1(ext1, ext2, 1, **opts) # doctest: +ELLIPSIS
PERCENT DIFFERENCE REPORT
...
... Self Reference ...
Row Description ... Minimum Minimum % Diff ...
------- ----------- ...------------- ------------- ------- ...
1 Row 1 ... -8.00000 -5.00000 2.61 ...
2 Row 2 ... -120.00000 -125.00000 -4.00 ...
...
>>> dct = cla.rptpct1(ext1, ext2, 1, **opts) # doctest: +ELLIPSIS
PERCENT DIFFERENCE REPORT
...
... Self Reference
Row Description ... Abs-Max Abs-Max % Diff
------- ----------- ...------------- ------------- -------
1 Row 1 ... 120.00000 115.00000 4.35
2 Row 2 ... 120.00000 125.00000 -4.00
...
Finally, the histogram summaries:
>>> dct = cla.rptpct1(ext1, ext2, 1, **opts) # doctest: +ELLIPSIS
PERCENT DIFFERENCE REPORT
...
No description provided - Maximum Comparison Histogram
<BLANKLINE>
% Diff Count Percent
-------- -------- -------
-2.00 1 50.00
4.00 1 50.00
<BLANKLINE>
0.0% of values are within 1%
50.0% of values are within 2%
100.0% of values are within 5%
<BLANKLINE>
% Diff Statistics: [Min, Max, Mean, StdDev] = [-1.60, 4.35,...
<BLANKLINE>
<BLANKLINE>
No description provided - Minimum Comparison Histogram
<BLANKLINE>
% Diff Count Percent
-------- -------- -------
-4.00 1 50.00
3.00 1 50.00
<BLANKLINE>
0.0% of values are within 1%
100.0% of values are within 5%
<BLANKLINE>
% Diff Statistics: [Min, Max, Mean, StdDev] = [-4.00, 2.61,...
<BLANKLINE>
<BLANKLINE>
No description provided - Abs-Max Comparison Histogram
<BLANKLINE>
% Diff Count Percent
-------- -------- -------
-4.00 1 50.00
4.00 1 50.00
<BLANKLINE>
0.0% of values are within 1%
100.0% of values are within 5%
<BLANKLINE>
% Diff Statistics: [Min, Max, Mean, StdDev] = [-4.00, 4.35,...
"""
if tight_layout_args is None:
tight_layout_args = {"pad": 3.0}
if magpct_options is None:
magpct_options = {"filterval": "filterval"}
else:
magpct_options = magpct_options.copy()
# magpct_options['filterval'] get special treatment:
magpct_filterval = magpct_options["filterval"]
del magpct_options["filterval"]
if isinstance(magpct_filterval, str):
if magpct_filterval != "filterval":
raise ValueError(
"``magpct_options['filterval']`` is an invalid "
f"string: {magpct_filterval!r} (can only "
"be 'filterval' if a string)"
)
# copy the initial `filterval` setting:
magpct_filterval = filterval
infovars = (
"desc",
"filterval",
"magpct_filterval",
"labels",
"units",
"ignorepv",
"uf_reds",
)
dct = locals()
infodct = {n: dct[n] for n in infovars}
del dct
# check mxmn1:
if isinstance(mxmn1, SimpleNamespace):
sns = mxmn1.drminfo
for key, value in infodct.items():
if value is None:
infodct[key] = getattr(sns, key, None)
del sns
mxmn1 = mxmn1.ext
else:
mxmn1 = np.atleast_2d(mxmn1)
row_number = np.arange(1, mxmn1.shape[0] + 1)
# check mxmn2:
if isinstance(mxmn2, SimpleNamespace) and getattr(mxmn2, "drminfo", None):
labels2 = mxmn2.drminfo.labels
mxmn2 = mxmn2.ext
if align_by_label:
# use labels and labels2 to align data; this is in case
# the two sets of results recover some of the same items,
# but not all
mxmn1, mxmn2, row_number = _align_mxmn(
mxmn1, mxmn2, labels2, row_number, infodct
)
else:
mxmn2 = np.atleast_2d(mxmn2)
desc = infodct["desc"]
if desc is None:
desc = "No description provided"
R = mxmn1.shape[0]
if R != mxmn2.shape[0]:
raise ValueError(
f"`mxmn1` and `mxmn2` have a different number of rows: "
f"{R} vs {mxmn2.shape[0]} for category with `desc` = {desc}"
)
filterval = infodct["filterval"]
magpct_filterval = infodct["magpct_filterval"]
labels = infodct["labels"]
units = infodct["units"]
ignorepv = infodct["ignorepv"]
uf_reds = infodct["uf_reds"]
del infodct
if filterval is None:
filterval = 1.0e-6
filterval = _proc_filterval(filterval, R, "filterval")
magpct_filterval = _proc_filterval(
magpct_filterval, R, "magpct_options['filterval']"
)
if labels is None:
labels = [f"Row {i + 1:6d}" for i in range(R)]
elif len(labels) != R:
raise ValueError(
"length of `labels` does not match number"
f" of rows in `mxmn1`: {len(labels)} vs {R} for "
f"category with `desc` = {desc}"
)
if units is None:
units = "Not specified"
if numform is None:
numform = _get_numform(mxmn1)
pdhdr = "% Diff"
nastring = "n/a "
comppv = np.ones(R, bool)
if ignorepv is not None:
comppv[ignorepv] = False
# for row labels:
w = max(11, len(max(labels, key=len)))
frm = f"{{:{w}}}"
# start preparing for writer.formheader:
print_info = SimpleNamespace(
headers1=["", ""],
headers2=[rowhdr, deschdr],
formats=["{:7d}", frm],
printargs=[row_number, labels],
widths=[7, w],
seps=[0, 2],
justs=["c", "l"],
)
if shortabsmax:
doabsmax = True
if doabsmax:
use_range = False
if roundvals > -1:
mxmn1 = np.round(mxmn1, roundvals)
mxmn2 = np.round(mxmn2, roundvals)
prtbads = (prtbad, prtbadh, prtbadl)
flagbads = (flagbad, flagbadh, flagbadl)
# compute percent differences
pctinfo = {}
kwargs = dict(
names=names,
mxmn1=mxmn1,
comppv=comppv,
histogram_inc=histogram_inc,
numform=numform,
prtbads=prtbads,
flagbads=flagbads,
maxhdr=maxhdr,
minhdr=minhdr,
absmhdr=absmhdr,
pdhdr=pdhdr,
nastring=nastring,
doabsmax=doabsmax,
shortabsmax=shortabsmax,
print_info=print_info,
)
with warnings.catch_warnings():
warnings.filterwarnings("ignore", r"All-NaN (slice|axis) encountered")
mx1 = np.nanmax(abs(mxmn1), axis=1)
mx2 = np.nanmax(abs(mxmn2), axis=1)
if not doabsmax:
max1, min1 = mxmn1[:, 0], mxmn1[:, 1]
max2, min2 = mxmn2[:, 0], mxmn2[:, 1]
mxmn_b = mxmn2 if use_range else None
prtpv = np.zeros(R, bool)
for i in zip(
("mx", "mn", "amx"),
(max1, min1, mx1),
(max2, min2, mx2),
(True, False, True),
(maxhdr, minhdr, absmhdr),
):
lbl, ext1, ext2, ismax, valhdr = i
pctinfo[lbl] = _proc_pct(
ext1,
ext2,
filterval,
magpct_filterval,
mxmn_b=mxmn_b,
ismax=ismax,
valhdr=valhdr,
**kwargs,
)
prtpv |= pctinfo[lbl]["prtpv"]
prtpv &= comppv
else:
pctinfo["amx"] = _proc_pct(
mx1,
mx2,
filterval,
magpct_filterval,
mxmn_b=None,
ismax=True,
valhdr=absmhdr,
**kwargs,
)
prtpv = pctinfo["amx"]["prtpv"]
hu, frm = writer.formheader(
[print_info.headers1, print_info.headers2],
print_info.widths,
print_info.formats,
sep=print_info.seps,
just=print_info.justs,
)
# format page header:
misc = _get_filtline(filterval) + _get_noteline(use_range, names, prtbads, flagbads)
hdrs = _get_rpt_headers(desc=desc, uf_reds=uf_reds, units=units, misc=misc)
header = title + "\n\n" + hdrs + "\n"
imode = plt.isinteractive()
plt.interactive(show_figures)
try:
if domagpct:
_plot_magpct(
pctinfo,
names,
desc,
doabsmax,
filename,
magpct_options,
use_range,
maxhdr,
minhdr,
absmhdr,
show_figures,
tight_layout_args,
)
if dohistogram:
_plot_histogram(
pctinfo,
names,
desc,
doabsmax,
filename,
histogram_inc,
maxhdr,
minhdr,
absmhdr,
show_figures,
tight_layout_args,
)
finally:
plt.interactive(imode)
# write results
@ytools.write_text_file
def _wtcmp(f, header, hu, frm, printargs, perpage, prtpv, pctinfo, desc):
prtpv = prtpv.nonzero()[0]
if perpage < 1:
# one additional in case size is zero
perpage = prtpv.size + 1
pages = (prtpv.size + perpage - 1) // perpage
if prtpv.size < len(printargs[0]):
for i, item in enumerate(printargs):
printargs[i] = [item[j] for j in prtpv]
tabhead = header + hu
pager = "\n" # + chr(12)
for p in range(pages):
if p > 0:
f.write(pager)
f.write(tabhead)
b = p * perpage
e = b + perpage
writer.vecwrite(f, frm, *printargs, so=slice(b, e))
f.write(pager)
for lbl, hdr in zip(("mx", "mn", "amx"), (maxhdr, minhdr, absmhdr)):
if lbl in pctinfo:
f.write(_get_histogram_str(desc, hdr, pctinfo[lbl]))
_wtcmp(
filename, header, hu, frm, print_info.printargs, perpage, prtpv, pctinfo, desc
)
return pctinfo
| [
"matplotlib.pyplot.grid",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.interactive",
"pyyeti.writer.vecwrite",
"numpy.arange",
"numpy.atleast_2d",
"pyyeti.locate.list_intersect",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.close",
"pyyeti.writer.formheader",
"io.StringIO",
"pyyeti.locate.... | [((514, 548), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'legacy': '"""1.13"""'}), "(legacy='1.13')\n", (533, 548), True, 'import numpy as np\n'), ((933, 953), 'numpy.atleast_1d', 'np.atleast_1d', (['value'], {}), '(value)\n', (946, 953), True, 'import numpy as np\n'), ((7410, 7450), 'pyyeti.ytools.histogram', 'ytools.histogram', (['pct_ret', 'histogram_inc'], {}), '(pct_ret, histogram_inc)\n', (7426, 7450), False, 'from pyyeti import ytools, locate, writer\n'), ((29772, 29804), 'numpy.arange', 'np.arange', (['(1)', '(mxmn1.shape[0] + 1)'], {}), '(1, mxmn1.shape[0] + 1)\n', (29781, 29804), True, 'import numpy as np\n'), ((31580, 31596), 'numpy.ones', 'np.ones', (['R', 'bool'], {}), '(R, bool)\n', (31587, 31596), True, 'import numpy as np\n'), ((31810, 31984), 'types.SimpleNamespace', 'SimpleNamespace', ([], {'headers1': "['', '']", 'headers2': '[rowhdr, deschdr]', 'formats': "['{:7d}', frm]", 'printargs': '[row_number, labels]', 'widths': '[7, w]', 'seps': '[0, 2]', 'justs': "['c', 'l']"}), "(headers1=['', ''], headers2=[rowhdr, deschdr], formats=[\n '{:7d}', frm], printargs=[row_number, labels], widths=[7, w], seps=[0, \n 2], justs=['c', 'l'])\n", (31825, 31984), False, 'from types import SimpleNamespace\n'), ((34068, 34217), 'pyyeti.writer.formheader', 'writer.formheader', (['[print_info.headers1, print_info.headers2]', 'print_info.widths', 'print_info.formats'], {'sep': 'print_info.seps', 'just': 'print_info.justs'}), '([print_info.headers1, print_info.headers2], print_info.\n widths, print_info.formats, sep=print_info.seps, just=print_info.justs)\n', (34085, 34217), False, 'from pyyeti import ytools, locate, writer\n'), ((34511, 34530), 'matplotlib.pyplot.isinteractive', 'plt.isinteractive', ([], {}), '()\n', (34528, 34530), True, 'import matplotlib.pyplot as plt\n'), ((34535, 34564), 'matplotlib.pyplot.interactive', 'plt.interactive', (['show_figures'], {}), '(show_figures)\n', (34550, 34564), True, 'import matplotlib.pyplot as plt\n'), ((1586, 1618), 'pyyeti.locate.index2bool', 'locate.index2bool', (['value', 'oldlen'], {}), '(value, oldlen)\n', (1603, 1618), False, 'from pyyeti import ytools, locate, writer\n'), ((1861, 1910), 'pyyeti.locate.list_intersect', 'locate.list_intersect', (["infodct['labels']", 'labels2'], {}), "(infodct['labels'], labels2)\n", (1882, 1910), False, 'from pyyeti import ytools, locate, writer\n'), ((5512, 5522), 'io.StringIO', 'StringIO', ([], {}), '()\n', (5520, 5522), False, 'from io import StringIO\n'), ((5537, 5603), 'pyyeti.writer.vecwrite', 'writer.vecwrite', (['f', '""" {:8.2f} {:8.0f} {:7.2f}\n"""', 'pctcount'], {}), "(f, ' {:8.2f} {:8.0f} {:7.2f}\\n', pctcount)\n", (5552, 5603), False, 'from pyyeti import ytools, locate, writer\n'), ((9302, 9335), 'matplotlib.pyplot.figure', 'plt.figure', (['name'], {'figsize': 'figsize'}), '(name, figsize=figsize)\n', (9312, 9335), True, 'import matplotlib.pyplot as plt\n'), ((9344, 9353), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (9351, 9353), True, 'import matplotlib.pyplot as plt\n'), ((9372, 9399), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (9382, 9399), True, 'import matplotlib.pyplot as plt\n'), ((9466, 9477), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (9475, 9477), True, 'import matplotlib.pyplot as plt\n'), ((10880, 10917), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '(**tight_layout_args)\n', (10896, 10917), True, 'import matplotlib.pyplot as plt\n'), ((12411, 12448), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '(**tight_layout_args)\n', (12427, 12448), True, 'import matplotlib.pyplot as plt\n'), ((29734, 29754), 'numpy.atleast_2d', 'np.atleast_2d', (['mxmn1'], {}), '(mxmn1)\n', (29747, 29754), True, 'import numpy as np\n'), ((30311, 30331), 'numpy.atleast_2d', 'np.atleast_2d', (['mxmn2'], {}), '(mxmn2)\n', (30324, 30331), True, 'import numpy as np\n'), ((32165, 32191), 'numpy.round', 'np.round', (['mxmn1', 'roundvals'], {}), '(mxmn1, roundvals)\n', (32173, 32191), True, 'import numpy as np\n'), ((32208, 32234), 'numpy.round', 'np.round', (['mxmn2', 'roundvals'], {}), '(mxmn2, roundvals)\n', (32216, 32234), True, 'import numpy as np\n'), ((32797, 32822), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (32820, 32822), False, 'import warnings\n'), ((32832, 32901), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""', '"""All-NaN (slice|axis) encountered"""'], {}), "('ignore', 'All-NaN (slice|axis) encountered')\n", (32855, 32901), False, 'import warnings\n'), ((33166, 33183), 'numpy.zeros', 'np.zeros', (['R', 'bool'], {}), '(R, bool)\n', (33174, 33183), True, 'import numpy as np\n'), ((35333, 35355), 'matplotlib.pyplot.interactive', 'plt.interactive', (['imode'], {}), '(imode)\n', (35348, 35355), True, 'import matplotlib.pyplot as plt\n'), ((6093, 6111), 'numpy.round', 'np.round', (['(num * 10)'], {}), '(num * 10)\n', (6101, 6111), True, 'import numpy as np\n'), ((9664, 9679), 'matplotlib.pyplot.subplot', 'plt.subplot', (['sp'], {}), '(sp)\n', (9675, 9679), True, 'import matplotlib.pyplot as plt\n'), ((10857, 10871), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (10865, 10871), True, 'import matplotlib.pyplot as plt\n'), ((10968, 11005), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(filename + '.magpct.png')"], {}), "(filename + '.magpct.png')\n", (10979, 11005), True, 'import matplotlib.pyplot as plt\n'), ((12388, 12402), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (12396, 12402), True, 'import matplotlib.pyplot as plt\n'), ((12499, 12539), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(filename + '.histogram.png')"], {}), "(filename + '.histogram.png')\n", (12510, 12539), True, 'import matplotlib.pyplot as plt\n'), ((10799, 10813), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['xl'], {}), '(xl)\n', (10809, 10813), True, 'import matplotlib.pyplot as plt\n'), ((10830, 10844), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['yl'], {}), '(yl)\n', (10840, 10844), True, 'import matplotlib.pyplot as plt\n'), ((12099, 12155), 'matplotlib.pyplot.bar', 'plt.bar', (['x', 'y'], {'width': 'width', 'color': 'colors', 'align': '"""center"""'}), "(x, y, width=width, color=colors, align='center')\n", (12106, 12155), True, 'import matplotlib.pyplot as plt\n'), ((12218, 12232), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['xl'], {}), '(xl)\n', (12228, 12232), True, 'import matplotlib.pyplot as plt\n'), ((12249, 12263), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['yl'], {}), '(yl)\n', (12259, 12263), True, 'import matplotlib.pyplot as plt\n'), ((9627, 9636), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (9634, 9636), True, 'import matplotlib.pyplot as plt\n'), ((12360, 12375), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(-5)', '(5)'], {}), '(-5, 5)\n', (12368, 12375), True, 'import matplotlib.pyplot as plt\n'), ((12292, 12302), 'matplotlib.pyplot.xlim', 'plt.xlim', ([], {}), '()\n', (12300, 12302), True, 'import matplotlib.pyplot as plt\n')] |
import argparse
from attacks.image_save_runner import ImageSaveAttackRunner
from attacks.selective_universal import SelectiveUniversal
from dataset import Dataset
from models import create_ensemble
from models.model_configs import config_from_string
parser = argparse.ArgumentParser(description='Defence')
parser.add_argument('--input_dir', metavar='DIR',
help='Input directory with images.')
parser.add_argument('--output_dir', metavar='FILE',
help='Output directory to save images.')
parser.add_argument('--max_epsilon', type=int, default=16, metavar='N',
help='Maximum size of adversarial perturbation. (default: 16.0)')
parser.add_argument('--npy_files', nargs='+', type=str)
parser.add_argument('--ensemble', nargs='+', help='Class names for the defensive ensemble.')
parser.add_argument('--ensemble_weights', nargs='+', type=float,
help='Weights for weighted geometric mean of output probs')
parser.add_argument('--checkpoint_paths', nargs='+', help='Paths to checkpoint files for each model.')
parser.add_argument('--try_mirrors', action='store_true', default=False)
def main():
args = parser.parse_args()
dataset = Dataset(args.input_dir, target_file='')
cfgs = [config_from_string(s) for s in args.ensemble]
target_model = create_ensemble(cfgs, args.ensemble_weights, args.checkpoint_paths).cuda()
target_model.eval()
attack = SelectiveUniversal(
target_model,
args.npy_files,
max_epsilon=args.max_epsilon,
try_mirrors = args.try_mirrors
)
runner = ImageSaveAttackRunner(dataset, args.output_dir)
# Only supports batch size of 1
runner.run(attack, 1)
if __name__ == '__main__':
main()
| [
"dataset.Dataset",
"argparse.ArgumentParser",
"models.model_configs.config_from_string",
"attacks.selective_universal.SelectiveUniversal",
"models.create_ensemble",
"attacks.image_save_runner.ImageSaveAttackRunner"
] | [((261, 307), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Defence"""'}), "(description='Defence')\n", (284, 307), False, 'import argparse\n'), ((1215, 1254), 'dataset.Dataset', 'Dataset', (['args.input_dir'], {'target_file': '""""""'}), "(args.input_dir, target_file='')\n", (1222, 1254), False, 'from dataset import Dataset\n'), ((1447, 1560), 'attacks.selective_universal.SelectiveUniversal', 'SelectiveUniversal', (['target_model', 'args.npy_files'], {'max_epsilon': 'args.max_epsilon', 'try_mirrors': 'args.try_mirrors'}), '(target_model, args.npy_files, max_epsilon=args.\n max_epsilon, try_mirrors=args.try_mirrors)\n', (1465, 1560), False, 'from attacks.selective_universal import SelectiveUniversal\n'), ((1610, 1657), 'attacks.image_save_runner.ImageSaveAttackRunner', 'ImageSaveAttackRunner', (['dataset', 'args.output_dir'], {}), '(dataset, args.output_dir)\n', (1631, 1657), False, 'from attacks.image_save_runner import ImageSaveAttackRunner\n'), ((1268, 1289), 'models.model_configs.config_from_string', 'config_from_string', (['s'], {}), '(s)\n', (1286, 1289), False, 'from models.model_configs import config_from_string\n'), ((1334, 1401), 'models.create_ensemble', 'create_ensemble', (['cfgs', 'args.ensemble_weights', 'args.checkpoint_paths'], {}), '(cfgs, args.ensemble_weights, args.checkpoint_paths)\n', (1349, 1401), False, 'from models import create_ensemble\n')] |
from google.appengine.ext import ndb
CACHE_DATA = {}
def get(cache_key):
full_cache_key = '{}:{}'.format(cache_key, ndb.get_context().__hash__())
return CACHE_DATA.get(full_cache_key, None)
def set(cache_key, value):
full_cache_key = '{}:{}'.format(cache_key, ndb.get_context().__hash__())
CACHE_DATA[full_cache_key] = value
| [
"google.appengine.ext.ndb.get_context"
] | [((124, 141), 'google.appengine.ext.ndb.get_context', 'ndb.get_context', ([], {}), '()\n', (139, 141), False, 'from google.appengine.ext import ndb\n'), ((278, 295), 'google.appengine.ext.ndb.get_context', 'ndb.get_context', ([], {}), '()\n', (293, 295), False, 'from google.appengine.ext import ndb\n')] |
"""
Tools for calculations
"""
import warnings
from aiida.tools import CalculationTools
from aiida.common import InputValidationError
from aiida.orm import CalcJobNode, Dict
from aiida.common.links import LinkType
from aiida.plugins import DataFactory
from aiida.engine import CalcJob, ProcessBuilder
from aiida_castep.common import INPUT_LINKNAMES, OUTPUT_LINKNAMES
__all__ = [
'CastepCalcTools', 'create_restart', 'castep_input_summary',
'update_parameters', 'use_pseudos_from_family'
]
class CastepCalcTools(CalculationTools):
def get_castep_input_summary(self):
return castep_input_summary(self._node)
def compare_with(self, the_other_calc, reverse=False):
"""
Compare with another calculation
Look for difference in get_castep_input_summary functions
:params node: pk or uuid or node
:params reverse: reverse the comparison, by default this node
is the "new" and the one compared with is "old".
"""
if isinstance(the_other_calc, (int, str)):
from aiida.orm import load_node
calc2 = load_node(the_other_calc)
else:
calc2 = the_other_calc
from deepdiff import DeepDiff
this_param = castep_input_summary(self._node)
other_param = castep_input_summary(calc2)
if reverse is True:
res = DeepDiff(this_param, other_param)
else:
res = DeepDiff(other_param, this_param)
return res
def create_restart(self,
ignore_state=False,
restart_mode='restart',
use_output_structure=False,
**kwargs):
if self._node.exit_status != 0 and not ignore_state:
raise RuntimeError(
'exit_status is not 0. Set ignore_state to ignore')
builder = create_restart(self._node.get_builder_restart(),
calcjob=self._node,
restart_mode=restart_mode,
**kwargs)
# Carry over the label
builder.metadata.label = self._node.label
if use_output_structure is True:
builder[
INPUT_LINKNAMES['structure']] = self._node.outputs.__getattr__(
OUTPUT_LINKNAMES['structure'])
if restart_mode == 'continuation' or kwargs.get('reuse'):
builder[INPUT_LINKNAMES[
'parent_calc_folder']] = self._node.outputs.__getattr__(
'remote_folder')
return builder
def use_pseudos_from_family(builder, family_name):
"""
Set the pseudos port namespace for a builder using pseudo family name
:note: The structure must already be set in the builder.
:param builder: ProcessBuilder instance to be processed, it must have a structure
:param family_name: the name of the group containing the pseudos
:returns: The same builder with the pseudopotential set
"""
from collections import defaultdict
from aiida_castep.data import get_pseudos_from_structure
# A dict {kind_name: pseudo_object}
# But we want to run with use_pseudo(pseudo, kinds)
structure = builder.get(INPUT_LINKNAMES['structure'], None)
if structure is None:
raise RuntimeError('The builder must have a StructureData')
kind_pseudo_dict = get_pseudos_from_structure(structure, family_name)
for kind, pseudo in kind_pseudo_dict.items():
builder.pseudos.__setattr__(kind, pseudo)
return builder
def castep_input_summary(calc):
"""
Convenient fuction for getting a summary of the
input of this calculation
:param calc: A CalcJobNode or ProcessBuilder or a nested input dictionary
:returns: A dictionary
"""
out_info = {}
# Check what is passed
if isinstance(calc, CalcJobNode):
inp_dict = calc.get_incoming(link_type=(LinkType.INPUT_CALC,
LinkType.INPUT_WORK)).nested()
options = calc.get_options()
metadata = {} # Metadata is empty when Node is passed
is_node = True
elif isinstance(calc, ProcessBuilder):
# Case of builder
inp_dict = calc._data
metadata = calc.metadata._data
options = calc.metadata.get('options', {})
is_node = False
elif isinstance(calc, dict):
# Case of a input dictionary
inp_dict = calc
metadata = calc.get('metadata', {})
options = metadata.get('options', {})
is_node = False
def get_node(label):
"""Get node from input dictionary"""
return inp_dict.get(INPUT_LINKNAMES[label])
in_param = get_node('parameters')
in_kpn = get_node('kpoints')
in_settings = get_node('settings')
in_structure = get_node('structure')
in_code = inp_dict.get('code')
in_remote = get_node('parent_calc_folder')
pseudos = inp_dict.get('pseudos')
param_dict = in_param.get_dict()
out_info.update(param_dict)
out_info["kpoints"] = in_kpn.get_description()
out_info["structure"] = {
"formula": in_structure.get_formula(),
"cell": in_structure.cell,
"label": in_structure.label
}
out_info["code"] = in_code
out_info["computer"] = calc.computer if is_node else in_code.computer
out_info["resources"] = options.get('resources')
out_info["custom_scheduler_commands"] = options.get(
'custom_scheduler_commands')
out_info["qos"] = options.get('qos')
out_info["account"] = options.get('account')
out_info["wallclock"] = options.get('max_wallclock_seconds')
out_info["label"] = calc.label if is_node else metadata.get('label')
out_info["description"] = calc.description if is_node else metadata.get(
'description')
# Show the parent calculation whose RemoteData is linked to the node
if in_remote is not None:
input_calc = [
n.node for n in in_remote.get_incoming(link_type=LinkType.CREATE)
]
assert len(
input_calc
) < 2, "More than one JobCalculation found, something seriously wrong"
if input_calc:
input_calc = input_calc[0]
out_info["parent_calc"] = {
"pk": input_calc.pk,
"label": input_calc.label
}
out_info["parent_calc_folder"] = in_remote
if in_settings is not None:
out_info["settings"] = in_settings.get_dict()
out_info["pseudos"] = pseudos
return out_info
def update_parameters(inputs, force=False, delete=None, **kwargs):
"""
Convenient function to update the parameters of the calculation.
Will atomiatically set the PARAM or CELL field in unstored
ParaemterData linked to the calculation.
If no ``Dict`` is linked to the calculation, a new node will be
created.
..note:
This method relies on the help information to check and assign
keywords to PARAM or CELL field of the Dict
(i.e for generating .param and .cell file)
calc.update_parameters(task="singlepoint")
:param force: flag to force the update even if the Dict node is stored.
:param delete: A list of the keywords to be deleted.
"""
param_node = inputs.get(INPUT_LINKNAMES['parameters'])
# Create the node if none is found
if param_node is None:
warnings.warn("No existing Dict node found, creating a new one.")
param_node = Dict(dict={"CELL": {}, "PARAM": {}})
inputs[INPUT_LINKNAMES['parameters']] = param_node
if isinstance(param_node, Dict) and param_node.is_stored:
if force:
# Create a new node if the existing node is stored
param_node = Dict(dict=param_node.get_dict())
inputs[INPUT_LINKNAMES['parameters']] = param_node
else:
raise RuntimeError("The input Dict<{}> is already stored".format(
param_node.pk))
# If the `node` is just a plain dict, we keep it that way
if isinstance(param_node, Dict):
param_dict = param_node.get_dict()
py_dict = False
else:
param_dict = param_node
py_dict = True
# Update the dictionary
from .helper import HelperCheckError, CastepHelper
helper = CastepHelper()
dict_update, not_found = helper._from_flat_dict(kwargs)
if not_found:
suggest = [helper.get_suggestion(i) for i in not_found]
error_string = "Following keys are invalid -- "
for error_key, sug in zip(not_found, suggest):
error_string += "{}: {}; ".format(error_key, sug)
raise HelperCheckError(error_string)
else:
param_dict["PARAM"].update(dict_update["PARAM"])
param_dict["CELL"].update(dict_update["CELL"])
# Delete any keys as requested
if delete:
for key in delete:
tmp1 = param_dict["PARAM"].pop(key, None)
tmp2 = param_dict["CELL"].pop(key, None)
if (tmp1 is None) and (tmp2 is None):
warnings.warn("Key '{}' not found".format(key))
# Apply the change to the node
if py_dict:
inputs[INPUT_LINKNAMES['parameters']] = param_dict
else:
param_node.set_dict(param_dict)
return inputs
def create_restart(inputs,
entry_point='castep.castep',
calcjob=None,
param_update=None,
param_delete=None,
restart_mode='restart',
use_castep_bin=False,
parent_folder=None,
reuse=False):
"""
Function to create a restart for a calculation.
:param inputs: A builder or nested dictionary
:param entry_point: Name of the entry points
:param param_update: Update the parameters
:param param_delete: A list of parameters to be deleted
:param restart_mode: Mode of the restart, 'continuation' or 'restart'
:param use_castep_bin: Use hte 'castep_bin' file instead of check
:param parent_folder: Remote folder to be used for restart
:param reuse: Use the reuse mode
"""
from aiida.plugins import CalculationFactory
from aiida.engine import ProcessBuilder
# Create the builder, in any case
if isinstance(inputs, dict):
processclass = CalculationFactory(entry_point)
builder = processclass.get_builder()
elif isinstance(inputs, ProcessBuilder):
builder = inputs._process_class.get_builder()
builder._update(inputs)
# Update list
update = {}
delete = []
# Set the restart tag
suffix = '.check' if not use_castep_bin else '.castep_bin'
if restart_mode == 'continuation':
update['continuation'] = 'parent/' + builder.metadata.seedname + suffix
delete.append('reuse')
elif restart_mode == 'restart' and reuse:
update['reuse'] = 'parent/' + builder.metadata.seedname + suffix
delete.append('continuation')
elif restart_mode is None:
delete.extend(['continuation', 'reuse'])
elif restart_mode != 'restart':
raise RuntimeError('Unknown restart mode: ' + restart_mode)
if param_update:
update.update(param_update)
if param_delete:
delete.extend(param_delete)
new_builder = update_parameters(builder,
force=True,
delete=delete,
**update)
# Set the parent folder
if parent_folder is not None:
new_builder[INPUT_LINKNAMES['parent_calc_folder']] = parent_folder
return new_builder
def validate_input_param(input_dict, allow_flat=False):
"""
Validate inputs parameters
:param input_dict: A Dict instance or python dict instance
"""
from .helper import CastepHelper
if isinstance(input_dict, Dict):
py_dict = input_dict.get_dict()
else:
py_dict = input_dict
helper = CastepHelper()
helper.check_dict(py_dict, auto_fix=False, allow_flat=allow_flat)
def input_param_validator(input_dict, port=None):
"""
Validator used for input ports
"""
from .helper import HelperCheckError
try:
validate_input_param(input_dict)
except HelperCheckError as error:
return error.args[0]
def flat_input_param_validator(input_dict, port=None):
"""
Validator that allows allow_flat parameter format
"""
from .helper import HelperCheckError
try:
validate_input_param(input_dict, allow_flat=True)
except HelperCheckError as error:
return error.args[0]
def check_restart(builder, verbose=False):
"""
Check the RemoteData reference by the builder is satisfied
:returns: True if OK
:raises: InputValidationError if error is found
"""
import os
from .utils import _lowercase_dict
def _print(inp):
if verbose:
print(inp)
paramdict = builder[INPUT_LINKNAMES['parameters']].get_dict()['PARAM']
paramdict = _lowercase_dict(paramdict, "paramdict")
stemp = paramdict.get("reuse", None)
if not stemp:
stemp = paramdict.get("continuation", None)
if stemp is not None:
fname = os.path.split(stemp)[-1]
_print("This calculation requires a restart file: '{}'".format(fname))
else:
# No restart file needed
_print("This calculation does not require a restart file.")
return True
# Now check if the remote folder has this file
remote_data = builder.get(INPUT_LINKNAMES["parent_calc_folder"])
if not remote_data:
raise InputValidationError(
"Restart requires "
"parent_folder to be specified".format(fname))
else:
_print("Checking remote directory")
folder_list = remote_data.listdir()
if fname not in folder_list:
raise InputValidationError(
"Restart file {}"
" is not in the remote folder".format(fname))
else:
_print("Check finished, restart file '{}' exists.".format(fname))
return True
| [
"aiida.orm.Dict",
"aiida.orm.load_node",
"deepdiff.DeepDiff",
"os.path.split",
"aiida_castep.data.get_pseudos_from_structure",
"aiida.plugins.CalculationFactory",
"warnings.warn"
] | [((3393, 3443), 'aiida_castep.data.get_pseudos_from_structure', 'get_pseudos_from_structure', (['structure', 'family_name'], {}), '(structure, family_name)\n', (3419, 3443), False, 'from aiida_castep.data import get_pseudos_from_structure\n'), ((7393, 7458), 'warnings.warn', 'warnings.warn', (['"""No existing Dict node found, creating a new one."""'], {}), "('No existing Dict node found, creating a new one.')\n", (7406, 7458), False, 'import warnings\n'), ((7480, 7516), 'aiida.orm.Dict', 'Dict', ([], {'dict': "{'CELL': {}, 'PARAM': {}}"}), "(dict={'CELL': {}, 'PARAM': {}})\n", (7484, 7516), False, 'from aiida.orm import CalcJobNode, Dict\n'), ((10317, 10348), 'aiida.plugins.CalculationFactory', 'CalculationFactory', (['entry_point'], {}), '(entry_point)\n', (10335, 10348), False, 'from aiida.plugins import CalculationFactory\n'), ((1105, 1130), 'aiida.orm.load_node', 'load_node', (['the_other_calc'], {}), '(the_other_calc)\n', (1114, 1130), False, 'from aiida.orm import load_node\n'), ((1369, 1402), 'deepdiff.DeepDiff', 'DeepDiff', (['this_param', 'other_param'], {}), '(this_param, other_param)\n', (1377, 1402), False, 'from deepdiff import DeepDiff\n'), ((1435, 1468), 'deepdiff.DeepDiff', 'DeepDiff', (['other_param', 'this_param'], {}), '(other_param, this_param)\n', (1443, 1468), False, 'from deepdiff import DeepDiff\n'), ((13210, 13230), 'os.path.split', 'os.path.split', (['stemp'], {}), '(stemp)\n', (13223, 13230), False, 'import os\n')] |
from nose.tools import eq_
import amo.tests
from addons.models import (Addon, attach_categories, attach_tags,
attach_translations)
from addons.search import extract
class TestExtract(amo.tests.TestCase):
fixtures = ['base/users', 'base/addon_3615']
def setUp(self):
super(TestExtract, self).setUp()
self.attrs = ('id', 'slug', 'created', 'last_updated',
'weekly_downloads', 'average_daily_users', 'status',
'type', 'hotness', 'is_disabled', 'premium_type')
self.transforms = (attach_categories, attach_tags, attach_translations)
def _extract(self):
qs = Addon.objects.filter(id__in=[3615])
for t in self.transforms:
qs = qs.transform(t)
self.addon = list(qs)[0]
return extract(self.addon)
def test_extract_attributes(self):
extracted = self._extract()
for attr in self.attrs:
eq_(extracted[attr], getattr(self.addon, attr))
| [
"addons.models.Addon.objects.filter",
"addons.search.extract"
] | [((674, 709), 'addons.models.Addon.objects.filter', 'Addon.objects.filter', ([], {'id__in': '[3615]'}), '(id__in=[3615])\n', (694, 709), False, 'from addons.models import Addon, attach_categories, attach_tags, attach_translations\n'), ((825, 844), 'addons.search.extract', 'extract', (['self.addon'], {}), '(self.addon)\n', (832, 844), False, 'from addons.search import extract\n')] |
import torch.optim as optim
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import numpy as np
import torchvision
import torch
from torchvision import models, datasets
class CRNN_Base(nn.Module):
def __init__(self, class_num, c, h, w, k, filters, poolings, dropout_rate, gru_dropout=0.3, gru_units=32):
super(CRNN_Base, self).__init__()
input_shape = (c, h, w)
# CNN
self.bn0 = nn.BatchNorm2d(num_features=c)
self.pad1 = nn.ZeroPad2d((int(k/2), int(k/2), int(k/2), int(k/2)))
self.conv1 = nn.Conv2d(c, filters[0], kernel_size=k, stride=1)
self.act1 = nn.ELU()
self.bn1 = nn.BatchNorm2d(num_features=filters[0])
self.maxPool1 = nn.MaxPool2d(kernel_size=poolings[0], stride=poolings[0])
self.drouput1 = nn.Dropout2d(dropout_rate)
self.pad2 = nn.ZeroPad2d((int(k/2), int(k/2), int(k/2), int(k/2)))
self.conv2 = nn.Conv2d(filters[0], filters[1], kernel_size=k)
self.act2 = nn.ELU()
self.bn2 = nn.BatchNorm2d(num_features=filters[1])
self.maxPool2 = nn.MaxPool2d(kernel_size=poolings[1], stride=poolings[1])
self.drouput2 = nn.Dropout2d(dropout_rate)
self.pad3 = nn.ZeroPad2d((int(k/2), int(k/2), int(k/2), int(k/2)))
self.conv3 = nn.Conv2d(filters[1], filters[2], kernel_size=k)
self.act3 = nn.ELU()
self.bn3 = nn.BatchNorm2d(num_features=filters[2])
self.maxPool3 = nn.MaxPool2d(kernel_size=poolings[2], stride=poolings[2])
self.drouput3 = nn.Dropout2d(dropout_rate)
self.pad4 = nn.ZeroPad2d((int(k/2), int(k/2), int(k/2), int(k/2)))
self.conv4 = nn.Conv2d(filters[2], filters[3], kernel_size=k)
self.act4 = nn.ELU()
self.bn4 = nn.BatchNorm2d(num_features=filters[3])
self.maxPool4 = nn.MaxPool2d(kernel_size=poolings[3],stride=poolings[3])
self.drouput4 = nn.Dropout2d(dropout_rate)
# Output is (m, chan, freq, time) -> Needs to be reshaped for feeding to GRU units
# We will handle the reshape in the forward method
# RNN
self.gru = nn.GRU(input_size=256, hidden_size=32, batch_first=True, num_layers=2, dropout=gru_dropout)
#self.gru2 = nn.GRU(input_size=32, hidden_size=32, batch_first=True, dropout=gru_dropout)
# Dense and softmax
self.dense1 = nn.Linear(32, class_num)
self.softm = nn.Softmax(dim=-1)
def forward(self, x):
# CNN forward
x = self.bn0(x)
x = self.pad1(x)
x = self.conv1(x)
x = self.act1(x)
x = self.bn1(x)
x = self.maxPool1(x)
x = self.drouput1(x)
x = self.pad2(x)
x = self.conv2(x)
x = self.act2(x)
x = self.bn2(x)
x = self.maxPool2(x)
x = self.drouput2(x)
x = self.pad3(x)
x = self.conv3(x)
x = self.act3(x)
x = self.bn3(x)
x = self.maxPool3(x)
x = self.drouput3(x)
x = self.pad4(x)
x = self.conv4(x)
x = self.act4(x)
x = self.bn4(x)
x = self.maxPool4(x)
x = self.drouput4(x)
# Reshape
x = x.permute(0,3,2,1)
x = torch.reshape(x, (int(x.shape[0]), int(x.shape[1]), int(x.shape[2]*x.shape[3])))
# RNN forward
x = self.gru(x)[1][0]
# Dense and softmax forward
x = self.dense1(x)
x = self.softm(x)
return x
class CRNN_Larger(nn.Module):
def __init__(self, class_num, c, h, w, k, filters, poolings, dropout_rate, gru_dropout=0.3, gru_units=32):
super(CRNN_Larger, self).__init__()
input_shape = (c, h, w)
# CNN
self.bn0 = nn.BatchNorm2d(num_features=c)
self.pad1 = nn.ZeroPad2d((int(k/2), int(k/2), int(k/2), int(k/2)))
self.conv1 = nn.Conv2d(c, filters[0], kernel_size=k, stride=1)
self.act1 = nn.ELU()
self.bn1 = nn.BatchNorm2d(num_features=filters[0])
self.maxPool1 = nn.MaxPool2d(kernel_size=poolings[0], stride=poolings[0])
self.drouput1 = nn.Dropout2d(dropout_rate)
self.pad2 = nn.ZeroPad2d((int(k/2), int(k/2), int(k/2), int(k/2)))
self.conv2 = nn.Conv2d(filters[0], filters[1], kernel_size=k)
self.act2 = nn.ELU()
self.bn2 = nn.BatchNorm2d(num_features=filters[1])
self.maxPool2 = nn.MaxPool2d(kernel_size=poolings[1], stride=poolings[1])
self.drouput2 = nn.Dropout2d(dropout_rate)
self.pad3 = nn.ZeroPad2d((int(k/2), int(k/2), int(k/2), int(k/2)))
self.conv3 = nn.Conv2d(filters[1], filters[2], kernel_size=k)
self.act3 = nn.ELU()
self.bn3 = nn.BatchNorm2d(num_features=filters[2])
self.maxPool3 = nn.MaxPool2d(kernel_size=poolings[2], stride=poolings[2])
self.drouput3 = nn.Dropout2d(dropout_rate)
self.pad4 = nn.ZeroPad2d((int(k/2), int(k/2), int(k/2), int(k/2)))
self.conv4 = nn.Conv2d(filters[2], filters[3], kernel_size=k)
self.act4 = nn.ELU()
self.bn4 = nn.BatchNorm2d(num_features=filters[3])
self.maxPool4 = nn.MaxPool2d(kernel_size=poolings[3],stride=poolings[3])
self.drouput4 = nn.Dropout2d(dropout_rate)
self.pad5 = nn.ZeroPad2d((int(k/2), int(k/2), int(k/2), int(k/2)))
self.conv5 = nn.Conv2d(filters[3], filters[4], kernel_size=k)
self.act5 = nn.ELU()
self.bn5 = nn.BatchNorm2d(num_features=filters[4])
self.maxPool5 = nn.MaxPool2d(kernel_size=poolings[4],stride=poolings[4])
self.drouput5 = nn.Dropout2d(dropout_rate)
# Output is (m, chan, freq, time) -> Needs to be reshaped for feeding to GRU units
# We will handle the reshape in the forward method
# RNN
self.gru = nn.GRU(input_size=1024, hidden_size=32, batch_first=True, num_layers=2, dropout=gru_dropout)
# Dense and softmax
self.dense1 = nn.Linear(32, class_num)
self.softm = nn.Softmax(dim=-1)
def forward(self, x):
# CNN forward
x = self.bn0(x)
x = self.pad1(x)
x = self.conv1(x)
x = self.act1(x)
x = self.bn1(x)
x = self.maxPool1(x)
x = self.drouput1(x)
x = self.pad2(x)
x = self.conv2(x)
x = self.act2(x)
x = self.bn2(x)
x = self.maxPool2(x)
x = self.drouput2(x)
x = self.pad3(x)
x = self.conv3(x)
x = self.act3(x)
x = self.bn3(x)
x = self.maxPool3(x)
x = self.drouput3(x)
x = self.pad4(x)
x = self.conv4(x)
x = self.act4(x)
x = self.bn4(x)
x = self.maxPool4(x)
x = self.drouput4(x)
x = self.pad5(x)
x = self.conv5(x)
x = self.act5(x)
x = self.bn5(x)
x = self.maxPool5(x)
x = self.drouput5(x)
# Reshape
x = x.permute(0,3,2,1)
x = torch.reshape(x, (int(x.shape[0]), int(x.shape[1]), int(x.shape[2]*x.shape[3])))
# RNN forward
x = self.gru(x)[1][0]
# Dense and softmax forward
x = self.dense1(x)
x = self.softm(x)
return x
class CRNN_ResNet18(nn.Module):
def __init__(self, class_num, c, h, w, k, filters, poolings, dropout_rate, gru_dropout=0.3, gru_units=32):
# Backbone
super(CRNN_ResNet18, self).__init__()
input_shape = (c, h, w)
self.backbone = torchvision.models.resnet18(pretrained=True)
modules = list(self.backbone.children())[:-1]
self.backbone = nn.Sequential(*modules)
ct = 0
for child in self.backbone.children():
ct += 1
if ct < 7:
for param in child.parameters():
param.requires_grad = False
# RNN
self.gru = nn.GRU(input_size=512, hidden_size=32, batch_first=True, num_layers=3, dropout=gru_dropout)
#self.gru2 = nn.GRU(input_size=32, hidden_size=32, batch_first=True, dropout=gru_dropout)
# Dense and softmax
self.dense1 = nn.Linear(32, class_num)
self.softm = nn.Softmax(dim=-1)
def forward(self, x):
# Backbone forward
x = self.backbone(x)
# Reshape
x = x.permute(0,3,2,1)
x = torch.reshape(x, (int(x.shape[0]), int(x.shape[1]), int(x.shape[2]*x.shape[3])))
# RNN forward
x = self.gru(x)[1][0]
# Dense and softmax forward
x = self.dense1(x)
x = self.softm(x)
return x | [
"torch.nn.BatchNorm2d",
"torch.nn.Softmax",
"torch.nn.Sequential",
"torch.nn.Dropout2d",
"torchvision.models.resnet18",
"torch.nn.Conv2d",
"torch.nn.MaxPool2d",
"torch.nn.Linear",
"torch.nn.ELU",
"torch.nn.GRU"
] | [((458, 488), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', ([], {'num_features': 'c'}), '(num_features=c)\n', (472, 488), True, 'import torch.nn as nn\n'), ((585, 634), 'torch.nn.Conv2d', 'nn.Conv2d', (['c', 'filters[0]'], {'kernel_size': 'k', 'stride': '(1)'}), '(c, filters[0], kernel_size=k, stride=1)\n', (594, 634), True, 'import torch.nn as nn\n'), ((655, 663), 'torch.nn.ELU', 'nn.ELU', ([], {}), '()\n', (661, 663), True, 'import torch.nn as nn\n'), ((683, 722), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', ([], {'num_features': 'filters[0]'}), '(num_features=filters[0])\n', (697, 722), True, 'import torch.nn as nn\n'), ((747, 804), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', ([], {'kernel_size': 'poolings[0]', 'stride': 'poolings[0]'}), '(kernel_size=poolings[0], stride=poolings[0])\n', (759, 804), True, 'import torch.nn as nn\n'), ((829, 855), 'torch.nn.Dropout2d', 'nn.Dropout2d', (['dropout_rate'], {}), '(dropout_rate)\n', (841, 855), True, 'import torch.nn as nn\n'), ((953, 1001), 'torch.nn.Conv2d', 'nn.Conv2d', (['filters[0]', 'filters[1]'], {'kernel_size': 'k'}), '(filters[0], filters[1], kernel_size=k)\n', (962, 1001), True, 'import torch.nn as nn\n'), ((1022, 1030), 'torch.nn.ELU', 'nn.ELU', ([], {}), '()\n', (1028, 1030), True, 'import torch.nn as nn\n'), ((1050, 1089), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', ([], {'num_features': 'filters[1]'}), '(num_features=filters[1])\n', (1064, 1089), True, 'import torch.nn as nn\n'), ((1114, 1171), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', ([], {'kernel_size': 'poolings[1]', 'stride': 'poolings[1]'}), '(kernel_size=poolings[1], stride=poolings[1])\n', (1126, 1171), True, 'import torch.nn as nn\n'), ((1196, 1222), 'torch.nn.Dropout2d', 'nn.Dropout2d', (['dropout_rate'], {}), '(dropout_rate)\n', (1208, 1222), True, 'import torch.nn as nn\n'), ((1320, 1368), 'torch.nn.Conv2d', 'nn.Conv2d', (['filters[1]', 'filters[2]'], {'kernel_size': 'k'}), '(filters[1], filters[2], kernel_size=k)\n', (1329, 1368), True, 'import torch.nn as nn\n'), ((1389, 1397), 'torch.nn.ELU', 'nn.ELU', ([], {}), '()\n', (1395, 1397), True, 'import torch.nn as nn\n'), ((1417, 1456), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', ([], {'num_features': 'filters[2]'}), '(num_features=filters[2])\n', (1431, 1456), True, 'import torch.nn as nn\n'), ((1481, 1538), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', ([], {'kernel_size': 'poolings[2]', 'stride': 'poolings[2]'}), '(kernel_size=poolings[2], stride=poolings[2])\n', (1493, 1538), True, 'import torch.nn as nn\n'), ((1563, 1589), 'torch.nn.Dropout2d', 'nn.Dropout2d', (['dropout_rate'], {}), '(dropout_rate)\n', (1575, 1589), True, 'import torch.nn as nn\n'), ((1687, 1735), 'torch.nn.Conv2d', 'nn.Conv2d', (['filters[2]', 'filters[3]'], {'kernel_size': 'k'}), '(filters[2], filters[3], kernel_size=k)\n', (1696, 1735), True, 'import torch.nn as nn\n'), ((1756, 1764), 'torch.nn.ELU', 'nn.ELU', ([], {}), '()\n', (1762, 1764), True, 'import torch.nn as nn\n'), ((1784, 1823), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', ([], {'num_features': 'filters[3]'}), '(num_features=filters[3])\n', (1798, 1823), True, 'import torch.nn as nn\n'), ((1848, 1905), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', ([], {'kernel_size': 'poolings[3]', 'stride': 'poolings[3]'}), '(kernel_size=poolings[3], stride=poolings[3])\n', (1860, 1905), True, 'import torch.nn as nn\n'), ((1929, 1955), 'torch.nn.Dropout2d', 'nn.Dropout2d', (['dropout_rate'], {}), '(dropout_rate)\n', (1941, 1955), True, 'import torch.nn as nn\n'), ((2140, 2235), 'torch.nn.GRU', 'nn.GRU', ([], {'input_size': '(256)', 'hidden_size': '(32)', 'batch_first': '(True)', 'num_layers': '(2)', 'dropout': 'gru_dropout'}), '(input_size=256, hidden_size=32, batch_first=True, num_layers=2,\n dropout=gru_dropout)\n', (2146, 2235), True, 'import torch.nn as nn\n'), ((2381, 2405), 'torch.nn.Linear', 'nn.Linear', (['(32)', 'class_num'], {}), '(32, class_num)\n', (2390, 2405), True, 'import torch.nn as nn\n'), ((2427, 2445), 'torch.nn.Softmax', 'nn.Softmax', ([], {'dim': '(-1)'}), '(dim=-1)\n', (2437, 2445), True, 'import torch.nn as nn\n'), ((3716, 3746), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', ([], {'num_features': 'c'}), '(num_features=c)\n', (3730, 3746), True, 'import torch.nn as nn\n'), ((3843, 3892), 'torch.nn.Conv2d', 'nn.Conv2d', (['c', 'filters[0]'], {'kernel_size': 'k', 'stride': '(1)'}), '(c, filters[0], kernel_size=k, stride=1)\n', (3852, 3892), True, 'import torch.nn as nn\n'), ((3913, 3921), 'torch.nn.ELU', 'nn.ELU', ([], {}), '()\n', (3919, 3921), True, 'import torch.nn as nn\n'), ((3941, 3980), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', ([], {'num_features': 'filters[0]'}), '(num_features=filters[0])\n', (3955, 3980), True, 'import torch.nn as nn\n'), ((4005, 4062), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', ([], {'kernel_size': 'poolings[0]', 'stride': 'poolings[0]'}), '(kernel_size=poolings[0], stride=poolings[0])\n', (4017, 4062), True, 'import torch.nn as nn\n'), ((4087, 4113), 'torch.nn.Dropout2d', 'nn.Dropout2d', (['dropout_rate'], {}), '(dropout_rate)\n', (4099, 4113), True, 'import torch.nn as nn\n'), ((4211, 4259), 'torch.nn.Conv2d', 'nn.Conv2d', (['filters[0]', 'filters[1]'], {'kernel_size': 'k'}), '(filters[0], filters[1], kernel_size=k)\n', (4220, 4259), True, 'import torch.nn as nn\n'), ((4280, 4288), 'torch.nn.ELU', 'nn.ELU', ([], {}), '()\n', (4286, 4288), True, 'import torch.nn as nn\n'), ((4308, 4347), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', ([], {'num_features': 'filters[1]'}), '(num_features=filters[1])\n', (4322, 4347), True, 'import torch.nn as nn\n'), ((4372, 4429), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', ([], {'kernel_size': 'poolings[1]', 'stride': 'poolings[1]'}), '(kernel_size=poolings[1], stride=poolings[1])\n', (4384, 4429), True, 'import torch.nn as nn\n'), ((4454, 4480), 'torch.nn.Dropout2d', 'nn.Dropout2d', (['dropout_rate'], {}), '(dropout_rate)\n', (4466, 4480), True, 'import torch.nn as nn\n'), ((4578, 4626), 'torch.nn.Conv2d', 'nn.Conv2d', (['filters[1]', 'filters[2]'], {'kernel_size': 'k'}), '(filters[1], filters[2], kernel_size=k)\n', (4587, 4626), True, 'import torch.nn as nn\n'), ((4647, 4655), 'torch.nn.ELU', 'nn.ELU', ([], {}), '()\n', (4653, 4655), True, 'import torch.nn as nn\n'), ((4675, 4714), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', ([], {'num_features': 'filters[2]'}), '(num_features=filters[2])\n', (4689, 4714), True, 'import torch.nn as nn\n'), ((4739, 4796), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', ([], {'kernel_size': 'poolings[2]', 'stride': 'poolings[2]'}), '(kernel_size=poolings[2], stride=poolings[2])\n', (4751, 4796), True, 'import torch.nn as nn\n'), ((4821, 4847), 'torch.nn.Dropout2d', 'nn.Dropout2d', (['dropout_rate'], {}), '(dropout_rate)\n', (4833, 4847), True, 'import torch.nn as nn\n'), ((4945, 4993), 'torch.nn.Conv2d', 'nn.Conv2d', (['filters[2]', 'filters[3]'], {'kernel_size': 'k'}), '(filters[2], filters[3], kernel_size=k)\n', (4954, 4993), True, 'import torch.nn as nn\n'), ((5014, 5022), 'torch.nn.ELU', 'nn.ELU', ([], {}), '()\n', (5020, 5022), True, 'import torch.nn as nn\n'), ((5042, 5081), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', ([], {'num_features': 'filters[3]'}), '(num_features=filters[3])\n', (5056, 5081), True, 'import torch.nn as nn\n'), ((5106, 5163), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', ([], {'kernel_size': 'poolings[3]', 'stride': 'poolings[3]'}), '(kernel_size=poolings[3], stride=poolings[3])\n', (5118, 5163), True, 'import torch.nn as nn\n'), ((5187, 5213), 'torch.nn.Dropout2d', 'nn.Dropout2d', (['dropout_rate'], {}), '(dropout_rate)\n', (5199, 5213), True, 'import torch.nn as nn\n'), ((5311, 5359), 'torch.nn.Conv2d', 'nn.Conv2d', (['filters[3]', 'filters[4]'], {'kernel_size': 'k'}), '(filters[3], filters[4], kernel_size=k)\n', (5320, 5359), True, 'import torch.nn as nn\n'), ((5380, 5388), 'torch.nn.ELU', 'nn.ELU', ([], {}), '()\n', (5386, 5388), True, 'import torch.nn as nn\n'), ((5408, 5447), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', ([], {'num_features': 'filters[4]'}), '(num_features=filters[4])\n', (5422, 5447), True, 'import torch.nn as nn\n'), ((5472, 5529), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', ([], {'kernel_size': 'poolings[4]', 'stride': 'poolings[4]'}), '(kernel_size=poolings[4], stride=poolings[4])\n', (5484, 5529), True, 'import torch.nn as nn\n'), ((5553, 5579), 'torch.nn.Dropout2d', 'nn.Dropout2d', (['dropout_rate'], {}), '(dropout_rate)\n', (5565, 5579), True, 'import torch.nn as nn\n'), ((5764, 5860), 'torch.nn.GRU', 'nn.GRU', ([], {'input_size': '(1024)', 'hidden_size': '(32)', 'batch_first': '(True)', 'num_layers': '(2)', 'dropout': 'gru_dropout'}), '(input_size=1024, hidden_size=32, batch_first=True, num_layers=2,\n dropout=gru_dropout)\n', (5770, 5860), True, 'import torch.nn as nn\n'), ((5908, 5932), 'torch.nn.Linear', 'nn.Linear', (['(32)', 'class_num'], {}), '(32, class_num)\n', (5917, 5932), True, 'import torch.nn as nn\n'), ((5954, 5972), 'torch.nn.Softmax', 'nn.Softmax', ([], {'dim': '(-1)'}), '(dim=-1)\n', (5964, 5972), True, 'import torch.nn as nn\n'), ((7414, 7458), 'torchvision.models.resnet18', 'torchvision.models.resnet18', ([], {'pretrained': '(True)'}), '(pretrained=True)\n', (7441, 7458), False, 'import torchvision\n'), ((7537, 7560), 'torch.nn.Sequential', 'nn.Sequential', (['*modules'], {}), '(*modules)\n', (7550, 7560), True, 'import torch.nn as nn\n'), ((7805, 7900), 'torch.nn.GRU', 'nn.GRU', ([], {'input_size': '(512)', 'hidden_size': '(32)', 'batch_first': '(True)', 'num_layers': '(3)', 'dropout': 'gru_dropout'}), '(input_size=512, hidden_size=32, batch_first=True, num_layers=3,\n dropout=gru_dropout)\n', (7811, 7900), True, 'import torch.nn as nn\n'), ((8046, 8070), 'torch.nn.Linear', 'nn.Linear', (['(32)', 'class_num'], {}), '(32, class_num)\n', (8055, 8070), True, 'import torch.nn as nn\n'), ((8092, 8110), 'torch.nn.Softmax', 'nn.Softmax', ([], {'dim': '(-1)'}), '(dim=-1)\n', (8102, 8110), True, 'import torch.nn as nn\n')] |
import pandas as pd
from sqlalchemy import create_engine
from config import DATABASE_URI
from predictions.common_predictor import CommonPredictor
pd.set_option("display.width", 1000)
pd.set_option("display.max_columns", 50)
class DotaPredictor(CommonPredictor):
def __init__(self, debug: bool = False):
super().__init__(debug=debug)
self.training_columns = [
"kills",
"deaths",
"assists",
"worth",
"last_hits",
"denies",
"gold_min",
"xp_min",
"dmg_heroes",
"healing",
"dmg_buildings",
"total_win_pct",
"c_kills",
"c_deaths",
"c_assists",
"c_worth",
"c_last_hits",
"c_denies",
"c_gold_min",
"c_xp_min",
"c_dmg_heroes",
"c_healing",
"c_dmg_buildings",
"c_total_win_pct",
]
self.y_col_name = "win" #'t1_winner'
if __name__ == "__main__":
DB_URL = f"{DATABASE_URI}dota"
ENGINE = create_engine(DB_URL)
df = pd.read_sql_table("match_stats_all", con=ENGINE)
# spocitam jaky je winrate mezi teamy.
totals = df.groupby(["t1_id", "t2_id"])["t1_id"].count()
wins = df[df["t1_winner"] == True].groupby(["t1_id", "t2_id"])["t1_id"].count()
win_pcts = wins.divide(totals).reset_index(name="winrate").fillna(0)
win_pcts["win"] = win_pcts["winrate"] >= 0.5
df = df.drop("t1_winner", axis=1).drop_duplicates()
df["joinon"] = df[["t1_id", "t2_id"]].astype(str).apply("-".join, 1)
win_pcts["joinon"] = win_pcts[["t1_id", "t2_id"]].astype(str).apply("-".join, 1)
df = pd.merge(df, win_pcts, on="joinon")
df.drop(
["t1_id_x", "t2_id_x", "t1_id_y", "t2_id_y", "joinon", "winrate"],
axis=1,
inplace=True,
)
# df.drop_duplicates()
# y = df.pop("t1_winner")
df.fillna(0, inplace=True)
import mlflow
from config import ROOT_DIR
mlflow.set_tracking_uri(f"file:///{ROOT_DIR}/mlruns")
mlflow.set_experiment("hazard_dota")
pred = DotaPredictor(debug=False)
pred.main_train(df, run_name="save run", n_runs=50)
print()
| [
"mlflow.set_tracking_uri",
"sqlalchemy.create_engine",
"pandas.merge",
"mlflow.set_experiment",
"pandas.set_option",
"pandas.read_sql_table"
] | [((147, 183), 'pandas.set_option', 'pd.set_option', (['"""display.width"""', '(1000)'], {}), "('display.width', 1000)\n", (160, 183), True, 'import pandas as pd\n'), ((184, 224), 'pandas.set_option', 'pd.set_option', (['"""display.max_columns"""', '(50)'], {}), "('display.max_columns', 50)\n", (197, 224), True, 'import pandas as pd\n'), ((1114, 1135), 'sqlalchemy.create_engine', 'create_engine', (['DB_URL'], {}), '(DB_URL)\n', (1127, 1135), False, 'from sqlalchemy import create_engine\n'), ((1146, 1194), 'pandas.read_sql_table', 'pd.read_sql_table', (['"""match_stats_all"""'], {'con': 'ENGINE'}), "('match_stats_all', con=ENGINE)\n", (1163, 1194), True, 'import pandas as pd\n'), ((1731, 1766), 'pandas.merge', 'pd.merge', (['df', 'win_pcts'], {'on': '"""joinon"""'}), "(df, win_pcts, on='joinon')\n", (1739, 1766), True, 'import pandas as pd\n'), ((2043, 2096), 'mlflow.set_tracking_uri', 'mlflow.set_tracking_uri', (['f"""file:///{ROOT_DIR}/mlruns"""'], {}), "(f'file:///{ROOT_DIR}/mlruns')\n", (2066, 2096), False, 'import mlflow\n'), ((2101, 2137), 'mlflow.set_experiment', 'mlflow.set_experiment', (['"""hazard_dota"""'], {}), "('hazard_dota')\n", (2122, 2137), False, 'import mlflow\n')] |
from django.conf.urls import url
from . import views
from django.conf import settings
from django.conf.urls.static import static
urlpatterns=[
url('^$',views.diet,name = 'diet'),
url('^$',views.health_of_day,name='healthToday'),
url(r'^search/', views.search_results, name='search_results'),
url(r'^image/(\d+)',views.image,name ='image'),
url(r'^vegetable/',views.vegetable,name ='vegetable'),
url(r'^fruit/',views.fruit,name ='fruit'),
url(r'^protein/',views.protein,name ='protein'),
url(r'^cereal/',views.cereal,name ='cereal'),
url(r'^diary/',views.diary,name ='diary'),
]
if settings.DEBUG:
urlpatterns+= static(settings.MEDIA_URL, document_root = settings.MEDIA_ROOT) | [
"django.conf.urls.static.static",
"django.conf.urls.url"
] | [((151, 185), 'django.conf.urls.url', 'url', (['"""^$"""', 'views.diet'], {'name': '"""diet"""'}), "('^$', views.diet, name='diet')\n", (154, 185), False, 'from django.conf.urls import url\n'), ((191, 241), 'django.conf.urls.url', 'url', (['"""^$"""', 'views.health_of_day'], {'name': '"""healthToday"""'}), "('^$', views.health_of_day, name='healthToday')\n", (194, 241), False, 'from django.conf.urls import url\n'), ((247, 307), 'django.conf.urls.url', 'url', (['"""^search/"""', 'views.search_results'], {'name': '"""search_results"""'}), "('^search/', views.search_results, name='search_results')\n", (250, 307), False, 'from django.conf.urls import url\n'), ((314, 361), 'django.conf.urls.url', 'url', (['"""^image/(\\\\d+)"""', 'views.image'], {'name': '"""image"""'}), "('^image/(\\\\d+)', views.image, name='image')\n", (317, 361), False, 'from django.conf.urls import url\n'), ((366, 419), 'django.conf.urls.url', 'url', (['"""^vegetable/"""', 'views.vegetable'], {'name': '"""vegetable"""'}), "('^vegetable/', views.vegetable, name='vegetable')\n", (369, 419), False, 'from django.conf.urls import url\n'), ((425, 466), 'django.conf.urls.url', 'url', (['"""^fruit/"""', 'views.fruit'], {'name': '"""fruit"""'}), "('^fruit/', views.fruit, name='fruit')\n", (428, 466), False, 'from django.conf.urls import url\n'), ((472, 519), 'django.conf.urls.url', 'url', (['"""^protein/"""', 'views.protein'], {'name': '"""protein"""'}), "('^protein/', views.protein, name='protein')\n", (475, 519), False, 'from django.conf.urls import url\n'), ((525, 569), 'django.conf.urls.url', 'url', (['"""^cereal/"""', 'views.cereal'], {'name': '"""cereal"""'}), "('^cereal/', views.cereal, name='cereal')\n", (528, 569), False, 'from django.conf.urls import url\n'), ((575, 616), 'django.conf.urls.url', 'url', (['"""^diary/"""', 'views.diary'], {'name': '"""diary"""'}), "('^diary/', views.diary, name='diary')\n", (578, 616), False, 'from django.conf.urls import url\n'), ((662, 723), 'django.conf.urls.static.static', 'static', (['settings.MEDIA_URL'], {'document_root': 'settings.MEDIA_ROOT'}), '(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n', (668, 723), False, 'from django.conf.urls.static import static\n')] |
import os
import sys
from gradslam.config import CfgNode as CN
cfg = CN()
cfg.TRAIN = CN()
cfg.TRAIN.HYPERPARAM_1 = 0.9
| [
"gradslam.config.CfgNode"
] | [((72, 76), 'gradslam.config.CfgNode', 'CN', ([], {}), '()\n', (74, 76), True, 'from gradslam.config import CfgNode as CN\n'), ((90, 94), 'gradslam.config.CfgNode', 'CN', ([], {}), '()\n', (92, 94), True, 'from gradslam.config import CfgNode as CN\n')] |
#
# GitHubNeo.py
#
# note: i tried using bulbs, which would be easier to
# migrate to other tinkerpop graph engines, but had
# trouble authenticating
#
#
"""
package oompa.tracking.github
experiments with working on github graphs in neo
uses py2neo
TODO: i think bulb seems to have better object modeling (but doesn't work for me)
"""
from datetime import timedelta
from datetime import datetime
import py2neo
from oompa.tracking.github import github_utils
Node = py2neo.Node
Relationship = py2neo.Relationship
"""
misc neo notes:
visit: http://localhost:7474/
default user - neo4j
part of neo walkthrough
CREATE (ee:Person { name: "Emil", from: "Sweden", klout: 99 })
() means "node"
{} surround attrs
Person is the label
MATCH (ee:Person) WHERE ee.name = "Emil" RETURN ee;
complex creation:
MATCH (ee:Person) WHERE ee.name = "Emil"
CREATE (js:Person { name: "Johan", from: "Sweden", learn: "surfing" }),
(ir:Person { name: "Ian", from: "England", title: "author" }),
(rvb:Person { name: "Rik", from: "Belgium", pet: "Orval" }),
(ally:Person { name: "Allison", from: "California", hobby: "surfing" }),
(ee)-[:KNOWS {since: 2001}]->(js),(ee)-[:KNOWS {rating: 5}]->(ir),
(js)-[:KNOWS]->(ir),(js)-[:KNOWS]->(rvb),
(ir)-[:KNOWS]->(js),(ir)-[:KNOWS]->(ally),
(rvb)-[:KNOWS]->(ally)
pattern matching:
MATCH (ee:Person)-[:KNOWS]-(friends)
WHERE ee.name = "Emil" RETURN ee, friends
Pattern matching can be used to make recommendations. Johan is
learning to surf, so he may want to find a new friend who already
does:
MATCH (js:Person)-[:KNOWS]-()-[:KNOWS]-(surfer)
WHERE js.name = "Johan" AND surfer.hobby = "surfing"
RETURN DISTINCT surfer
() empty parenthesis to ignore these nodes
DISTINCT because more than one path will match the pattern
surfer will contain Allison, a friend of a friend who surfs
"""
def parseFreshness(freshness):
"""
TODO: not fully general/bulletproof yet
"""
pieces = freshness.split()
days = 0
hours = 0
minutes = 0
for piece in pieces:
unit = piece[-1]
value = int(piece[:-1])
if unit == "d":
days = value
elif unit == "h":
hours = value
elif unit == "m":
minutes = value
else:
raise Exception("unknown time unit", unit, freshness)
pass
freshness_delta = timedelta(days = days, hours = hours, minutes = minutes)
return freshness_delta
class GitHubNeo:
"""
interface for lazy github graph in neo4j
- update
- list
- track
- discover
"""
# ISO format
_dtFormat = "%Y-%m-%dT%H:%M:%S"
def __init__(self, config, githubHelper):
# XXX get from config
neo_url = config.get("neo.github.url")
neo_user = config.get("neo.github.user")
neo_passwd = config.get("neo.github.<PASSWORD>")
# TODO: derive from the url
neo_host = "localhost:7474"
# TODO: if freshness, parse it to a real latency (e.g., "4d" -> seconds)
self.freshness = config.get("neo.github.freshness")
if self.freshness:
self.freshness = parseFreshness(self.freshness)
pass
py2neo.authenticate(neo_host, neo_user, neo_passwd)
self.graph = py2neo.Graph(neo_url)
self.githubHelper = githubHelper
self._establishNeoSchema()
return
def _establishNeoSchema(self):
"""
set up constraints on relationships and nodes in neo graph
note: i believe that schema constraints are volatile, per-session
if i don't apply these contraints, on a graph that had them
in previous sessions, i can violate the previous contraints
"""
schema = self.graph.schema
try:
schema.create_uniqueness_constraint("User", "name")
schema.create_uniqueness_constraint("Organization", "name")
schema.create_uniqueness_constraint("Repository", "name")
# except py2neo.error.ConstraintViolationException:
except:
# already established
return
# TODO: User
# TODO: Organization
# TODO: relationships
return
def query(self, query):
"""
submit arbitrary cypher-syntax query to graph
query is a string
"""
for record in self.graph.cypher.execute(query):
yield record
return
def getNodeType(self, node):
"""
return the type of the given node
"""
# XXX still figuring out LabelSet - don't know how to get values as list
return node.labels.copy().pop()
def getNode(self, nodeName, nodeType = None):
"""
returns a neo Node
TODO: if nodeType specified, use it (esp User vs Organization)
"""
# print("NT: %s" % nodeType)
typeSpec = ""
# XXX figure out best way to support this
# if nodeType is not None:
# typeSpec = ":%s" % nodeType
# pass
# XXX this does not feel like "The Best Way" to simply get a node
query = 'MATCH (node {name:"%s"}%s) RETURN node' % ( nodeName, typeSpec )
# print(" Q: %r" % query)
records = list(self.query(query))
if not records:
return None
if len(records) > 1:
print("XXX getNode() plural records: %s %s (%r)" % ( nodeType, nodeType, typeSpec ))
for record in records:
print(" R: %r" % ( record, ))
pass
xxx
pass
record = records[0]
return self.graph.hydrate(record[0])
def _now(self):
return datetime.utcnow().replace(microsecond = 0)
def _parseISODTStr(self, dtStr):
return datetime.strptime(dtStr, self._dtFormat)
def createNode(self, name, nodeType):
# we don't want the microsecond junk in time string
now = self._now()
node = Node(nodeType, name = name, createdDT = now.isoformat())
self.graph.create(node)
return node
def getOrAddNode(self, name, nodeType = None):
node = self.getNode(name, nodeType)
if node is not None:
# print("# node already in graph")
return node
# print("# node not in graph - have to create")
if nodeType is None:
for nodeType, name, obj in self.githubHelper.getKindNameAndObject([ name, ]):
# only one
break
if nodeType is None:
print(" could not determine nodeType for name: %s" % name)
xxx
pass
pass
return self.createNode(name, nodeType)
#
# neo edge relationships for lists "away from" certain types of github objects
#
# some list names have an alias, because the list name is confusing
#
# TODO: test if simple rule of removing the final "s" works. that would be simpler
# - there are a couple of exceptions
#
_relationships = {
"Repository" : [
( "stargazers", "starred", "from", ),
( "subscribers", "subscriber", "from", ),
( "contributors", "contributor", "from", ),
( "forks", "forkOf", "from", ),
# ...
],
"User" : [
( "followers", "follows", "from", ),
( "following", "follows", "to", ),
( "starred_repositories", "starred", "to", ),
( "subscriptions", "subscriber", "to", ),
( "organizations", "memberOf", "to", ),
],
}
def updateRelationships(self, obj, slot, relationshipLabel, direction, destNode):
"""
obj is a github3 object (repository, user, organization)
direction is "from" or "to"
TODO: support attribute decorators
generates stream of entitySpec
"""
destNodeType = self.getNodeType(destNode)
graph = self.graph
print("updateRelationships: %-25s - %-25s %4s - %s" % (
slot, relationshipLabel, direction, destNode.properties["name"] ))
# XXX need otherNodeLabelGetter
# - .name, .login, ...
# determine neighbor nodeType by slot name
# TODO: use a dictionary - simpler
neighborNodeType = None
# XXX just figure this out by what we get back
if slot in [ "stargazers", "contributors", ]:
neighborNodeType = "User"
elif slot in [ "followers", "following" ]:
neighborNodeType = "User"
elif slot == "organizations":
neighborNodeType = "Organization"
elif slot == "starred_repositories":
neighborNodeType = "Repository"
elif slot == "subscriptions":
neighborNodeType = "Repository"
elif slot == "forks":
neighborNodeType = "Repository"
elif slot == "subscribers":
# i think that things can subsribe to users or orgs, too
# this is currently just Users subscribed to Repository
neighborNodeType = "User"
else:
print(" XXX slot not handled in switch yet - %r" % slot)
xxx
pass
if neighborNodeType == "User":
nodeNameAttr = "login"
elif neighborNodeType == "Organization":
nodeNameAttr = "name"
elif neighborNodeType == "Repository":
nodeNameAttr = "full_name"
else:
xxx
pass
# print("# nodeNameAttr: %s - %s - %s" % ( slot, neighborNodeType, nodeNameAttr ))
# TODO: get all of them, and batch-update
neighbors = []
for value in getattr(obj, slot)():
# if slot == "forks":
# print(" fork obj")
# github_utils.dumpSlotValues(obj)
# value is another github object (User, ...)
# name = value.name
# name = str(value)
nodeName = getattr(value, nodeNameAttr)
neighbors.append(( neighborNodeType, nodeName ))
pass
# TODO: batch-update
neighbors = sorted(neighbors, key = lambda _tuple : _tuple[1])
for neighborNodeType, nodeName in neighbors:
# only if verbose tracing
# print(" %s: %r" % ( relationshipLabel, nodeName ))
srcNode = Node(neighborNodeType, name = nodeName)
# graph.merge_one(Relationship(srcNode, relationshipLabel, destNode))
# XXX try/except is sloppy - i don't get merge vs create yet
if direction == "from":
relationship = Relationship(srcNode, relationshipLabel, destNode)
else:
relationship = Relationship(destNode, relationshipLabel, srcNode)
pass
try:
graph.create(relationship)
except:
# already exists
pass
yield ( neighborNodeType, nodeName )
pass
# need to flush anything?
return
def _getRelationshipTuples(self, nodeType, relationships = None):
"""
TODO: memoize
"""
# print("_getRelationshipTuples(): %s %s" % ( nodeType, relationships ))
#
# XXX still working out the best way to normalize github
# relationships to neo relationships
#
for relationshipTuple in self._relationships[nodeType]:
listName, relationshipLabel, direction = relationshipTuple
if relationships:
keep = False
for rel in relationships:
if rel == relationshipLabel:
keep = True
break
pass
if not keep:
continue
pass
yield relationshipTuple
pass
return
def updateGithubObj(self, githubObj, node, relationships = None):
"""
githubObj is a GitHub3 Repository, User, or Organization
node is a py2neo Node
"""
# starting to generalize
graph = self.graph
nodeType = self.getNodeType(node)
# note that full_name is something that i attach
if nodeType == "Repository":
name = githubObj.full_name
else:
name = githubObj.name
pass
# TODO: want to report different things for different object -
# user needs login and name
print("GitHubNeo.updateGithubObj(): %s" % name.encode("utf8"))
relationshipTuples = list(self._getRelationshipTuples(nodeType, relationships))
# TODO: *local* LRU cache user and repo - may also be on contributes, subscribes.
# make sure we only pay github points once
for listName, relationshipLabel, direction in relationshipTuples:
for entitySpec in self.updateRelationships(githubObj,
listName,
relationshipLabel,
direction,
node):
yield entitySpec
pass
pass
node.properties["updatedDT"] = datetime.utcnow().replace(microsecond = 0)
node.push()
# what else should go in graph?
# .parent
# .source
# .description
# .homepage
# .language
# .last_modified
# .updated_at
# branches()
# code_frequency()
# XXX blocked - requires authentication
# dumpList(obj, "collaborators")
# comments()
# commit_activity()
# commits()
# contributor_statistics()
# github_utils.dumpList(obj, "contributors")
# default_branch
# deployments() ???
# events()
# github_utils.dumpList(obj, "forks")
# hooks() ???
# issues()
# keys() ???
# labels() ??? i think these are tags used in issues/planning
# github_utils.dumpList(obj, "languages")
# milestones()
# notifications()
# open_issues_count ???
# owner (a User object)
# pull_requests
# refs() ???
# releases() ???
# size (what are units?)
# statuses() ?
# github_utils.dumpList(obj, "subscribers")
# tags()
# i think that tree is some sort of file tree. (i was hoping it was fork ancestry)
# tree = obj.tree()
# print("TREE: %s" % tree)
# teams()
# { "Last-Modified": "",
# "all": [0, 0, 1, 1, ..., (52 weeks?) ],
# "owner": [ 0, 0, 0, 0, ... ] }
# print(" weekly_commit_count: %s" % obj.weekly_commit_count())
return
def updateUser(self, githubObj, node):
"""
user is a GitHub3 User
TODO: refactor - merge with updateRepository - just a generic
"""
graph = self.graph
nodeType = self.getNodeType(node)
# was it forked from something?
print("GitHubNeo.updateUser(): %s - %r" % ( user.login, user.name ))
nodeType = "User"
node = Node(nodeType, name = user.login)
# use merge_one to create if it does not already exist
# XXX merge_one does not persist the node?
# graph.merge_one(node)
graph.create(node)
for listNameTuple in self._relationships[nodeType]:
if isinstance(listNameTuple, tuple):
listNameTuple, relationshipLabel = listNameTuple
else:
listName = listNameTuple
relationshipLabel = listName
pass
for entitySpec in self.updateRelationships(user, listName, relationshipLabel, node):
yield entitySpec
pass
pass
return
def updateOrganization(self, name, org):
"""
org is a GitHub3 Organization
"""
graph = self.graph
# was it forked from something?
print("GitHubNeo.updateOrg(): %s" % user)
xxx
print(" bio: %s" % obj.bio)
print(" company: %s" % obj.company)
print(" location: %s" % obj.location)
dumpList(obj, "public_members")
dumpList(obj, "repositories")
return
def _nodeFreshEnough(self, node):
updatedDTStr = node.properties.get("updatedDT")
if updatedDTStr:
age = self._now() - self._parseISODTStr(updatedDTStr)
if age <= self.freshness:
return True
pass
return False
def _getCachedNeighbors(self, node, relationships = None):
nodeType = self.getNodeType(node)
# print(" _getCachedNeighbors(): %-12s %s" % ( nodeType, node.properties["name"] ))
if relationships is None:
#
# list of ( githubSlot, neoRelationLabel )
#
relationships = self._relationships[nodeType]
neoRelationships = []
for relationshipInfo in relationships:
if isinstance(relationshipInfo, tuple):
neoRelationship = relationshipInfo[1]
else:
neoRelationship = relationshipInfo
pass
neoRelationships.append(neoRelationship)
pass
pass
else:
#
# map relationships in to neo relationships
#
neoRelationships = relationships
pass
i = 1
for neoRelationship in neoRelationships:
# print(" neoRelationship: %s" % neoRelationship)
neighbors = []
for rel in node.match(neoRelationship):
if node == rel.start_node:
neighborNode = rel.end_node
else:
neighborNode = rel.start_node
pass
# XXX expensive. we can already know this, from the neoRelationship
neighborNodeType = self.getNodeType(neighborNode)
# yield neighborNodeType, neighborNode.properties["name"]
neighbors.append(( neighborNodeType, neighborNode.properties["name"], 1 ))
i += 1
pass
if neighbors:
print(" %5d %s" % ( len(neighbors), neoRelationship ))
pass
# XXX optional. user may want to sort by something else
# (added date - but that's not supported yet)
neighbors = sorted(neighbors, key = lambda _tuple : _tuple[1])
for neighborTuple in neighbors:
yield neighborTuple
pass
pass
return
def update(self, entitySpecs, numHops = None, relationships = None):
"""update the edges/relationships around the specified node names
creates the nodes if they don't already exist
entitySpecs is list of github names - Repository, User,
Organization. can by type-hinted - org:..., user:...,
repo:..., or else we guess, using the helper
if staleness constraints specified, will use what's in cache
if new enough (to save github points)
TODO: maybe able to specify only certain relationship types to update
"""
if numHops is None:
numHops = 1
pass
hop = 1
# list of entities left to check, and their hop
#
# a seed is at hop 1 (versus 0)
boundary = []
for entitySpec in entitySpecs:
boundary.append(( entitySpec, hop ))
pass
helper = self.githubHelper
freshness = self.freshness
while boundary:
entitySpec, _hop = boundary[0]
boundary = boundary[1:]
print("GitHubNeo.update: %s %5d %5d %s" %
( _hop, len(boundary), helper.checkRatePointsLeft(), entitySpec, ))
nodeType = None
extra = None
if isinstance(entitySpec, tuple):
nodeType = entitySpec[0]
name = entitySpec[1]
if len(entitySpec) > 2:
extra = entitySpec[2]
pass
pass
else:
name = entitySpec
pass
node = self.getOrAddNode(name, nodeType)
nodeType = self.getNodeType(node)
if freshness is not None and self._nodeFreshEnough(node):
# print(" using cached relationships: %s - %s" % ( nodeType, name ))
neighbors = self._getCachedNeighbors(node, relationships = relationships)
else:
githubObj = helper.getGithubObject(name, nodeType)
neighbors = self.updateGithubObj(githubObj, node, relationships = relationships)
pass
# need to drain the stream, even if we don't add them to boundary
neighbors = list(neighbors)
if _hop < numHops:
for _entitySpec in neighbors:
boundary.append(( _entitySpec, _hop + 1 ))
# print(" added to boundary: %s %s" % ( _hop + 1, _entitySpec ))
pass
pass
# print("")
pass
return
pass
| [
"py2neo.Graph",
"datetime.datetime.utcnow",
"datetime.datetime.strptime",
"py2neo.authenticate",
"datetime.timedelta"
] | [((2428, 2478), 'datetime.timedelta', 'timedelta', ([], {'days': 'days', 'hours': 'hours', 'minutes': 'minutes'}), '(days=days, hours=hours, minutes=minutes)\n', (2437, 2478), False, 'from datetime import timedelta\n'), ((3276, 3327), 'py2neo.authenticate', 'py2neo.authenticate', (['neo_host', 'neo_user', 'neo_passwd'], {}), '(neo_host, neo_user, neo_passwd)\n', (3295, 3327), False, 'import py2neo\n'), ((3357, 3378), 'py2neo.Graph', 'py2neo.Graph', (['neo_url'], {}), '(neo_url)\n', (3369, 3378), False, 'import py2neo\n'), ((5948, 5988), 'datetime.datetime.strptime', 'datetime.strptime', (['dtStr', 'self._dtFormat'], {}), '(dtStr, self._dtFormat)\n', (5965, 5988), False, 'from datetime import datetime\n'), ((5851, 5868), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (5866, 5868), False, 'from datetime import datetime\n'), ((13752, 13769), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (13767, 13769), False, 'from datetime import datetime\n')] |
from src.dgol_worker.cell_env import CellEnv
ce = CellEnv() | [
"src.dgol_worker.cell_env.CellEnv"
] | [((51, 60), 'src.dgol_worker.cell_env.CellEnv', 'CellEnv', ([], {}), '()\n', (58, 60), False, 'from src.dgol_worker.cell_env import CellEnv\n')] |
from flask import Flask, Response, send_from_directory
import random, time
app = Flask(__name__, static_folder='www')
@app.route('/')
def index():
return ''
@app.route('/stream')
def stream():
def event():
while True:
yield "data: " + random.choice(['a', 'b', 'c', 'd']) + "nn"
with app.app_context():
time.sleep(1)
return Response(event(), mimetype="text/event-stream")
@app.route('/static/<path:path>')
def static_f(path):
return app.send_static_file(path)
if __name__ == '__main__':
app.run(debug=True) | [
"random.choice",
"time.sleep",
"flask.Flask"
] | [((81, 117), 'flask.Flask', 'Flask', (['__name__'], {'static_folder': '"""www"""'}), "(__name__, static_folder='www')\n", (86, 117), False, 'from flask import Flask, Response, send_from_directory\n'), ((318, 331), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (328, 331), False, 'import random, time\n'), ((244, 279), 'random.choice', 'random.choice', (["['a', 'b', 'c', 'd']"], {}), "(['a', 'b', 'c', 'd'])\n", (257, 279), False, 'import random, time\n')] |
import setuptools
with open("README.md", "r") as f:
long_description = f.read()
setuptools.setup(
name="django-osm-field",
author="<NAME>",
author_email="<EMAIL>",
description="Django OpenStreetMap Field",
license="MIT",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/MarkusH/django-osm-field",
project_urls={
"CI": "https://github.com/MarkusH/django-osm-field/actions", # noqa
"Changelog": "https://github.com/MarkusH/django-osm-field/blob/main/CHANGELOG.md", # noqa
"Issues": "https://github.com/MarkusH/django-osm-field/issues", # noqa
},
packages=setuptools.find_packages(
exclude=[
"*.example",
"*.example.*",
"example.*",
"example",
"*.tests",
"*.tests.*",
"tests.*",
"tests",
],
),
include_package_data=True,
install_requires=["Django>=2.2"],
extras_require={
"dev": ["pre-commit"],
"docs": [
"Django",
"sphinx_rtd_theme",
"Sphinx>=3.0,<3.4",
],
"test": [
"coverage[toml]>=5,<6",
"Django",
],
},
setup_requires=["setuptools_scm>=5<6"],
use_scm_version=True,
keywords="OpenStreetMap, OSM, Django, Geo, Geoposition",
classifiers=[
"Development Status :: 4 - Beta",
"Environment :: Web Environment",
"Framework :: Django",
"Framework :: Django :: 2.2",
"Framework :: Django :: 3.0",
"Framework :: Django :: 3.1",
"Framework :: Django :: 3.2",
"Framework :: Django :: 4.0",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
],
python_requires=">=3.5",
)
| [
"setuptools.find_packages"
] | [((687, 821), 'setuptools.find_packages', 'setuptools.find_packages', ([], {'exclude': "['*.example', '*.example.*', 'example.*', 'example', '*.tests', '*.tests.*',\n 'tests.*', 'tests']"}), "(exclude=['*.example', '*.example.*', 'example.*',\n 'example', '*.tests', '*.tests.*', 'tests.*', 'tests'])\n", (711, 821), False, 'import setuptools\n')] |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted.
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
# AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,
# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
# PERFORMANCE OF THIS SOFTWARE.
# ------------------------------------------------------------------------------
"""
This script processes all the files listed in the archive register. A checksum
is computed, the file copied to Arkivum, and a checksum made to confirm
the copy. The original is removed when the file has been confirmed as ingested
by Arkivum and a symbolic link is made from the original location to the
archived file.
"""
import sys
import os
import shutil
import time
import configparser
import hashlib
import requests
import urllib
# Get rid of the Unverified HTTPS request warning
try:
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
except:
pass
from stat import *
from zlib import adler32
import re
import platform
import smtplib
from email.mime.application import MIMEApplication
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.utils import COMMASPACE, formatdate
from optparse import OptionParser, OptionGroup
import gdsc.omero
###############################################################################
def init_options():
"""Initialise the program options"""
parser = OptionParser(usage="usage: %prog [options] list",
description="Program to archive files to Arkivum",
add_help_option=True, version="%prog 1.0")
group = OptionGroup(parser, "Archive")
group.add_option("--archive_log", dest="archive_log",
default=gdsc.omero.ARCHIVE_LOG,
help="Directory for archive logs [%default]")
group.add_option("--archive_job", dest="archive_job",
default=gdsc.omero.ARCHIVE_JOB,
help="Directory for archive jobs [%default]")
group.add_option("--arkivum_root", dest="arkivum_root",
default=gdsc.omero.ARKIVUM_ROOT,
help="Arkivum root (for the mounted appliance) [%default]")
group.add_option("--arkivum_path", dest="arkivum_path",
default=gdsc.omero.ARKIVUM_PATH,
help="Arkivum path (directory to copy files) [%default]")
group.add_option("--to_archive", dest="to_archive",
default=gdsc.omero.TO_ARCHIVE_REGISTER,
help="To-Archive register [%default]")
group.add_option("--archived", dest="archived",
default=gdsc.omero.ARCHIVED_REGISTER,
help="Archived register [%default]")
parser.add_option_group(group)
group = OptionGroup(parser, "Arkivum")
# Decide if this should be:
# amber (copied to data centres)
# green (tape sent to escrow)
group.add_option("--state", dest="state",
default='green',
help="Replication state for deletion [%default]")
parser.add_option_group(group)
return parser
###############################################################################
def log(msg):
"""
Print a message
@param msg: The message
"""
print(msg)
def error(msg):
"""
Print an error message
@param msg: The message
"""
print("ERROR:", msg)
def fatal(msg):
"""
Print a fatal error
@param msg: The message
"""
print("FATAL:", msg)
def die(msg):
"""
Print a fatal error then exit
@param msg: The message
"""
fatal(msg)
sys.exit(1)
###############################################################################
def validate_email(userEmail):
"""
Checks that a valid email address is present
@param userEmail: The e-mail address
"""
# Validate with a regular expression. Not perfect but it will do.
return re.match("^[a-zA-Z0-9._%-]+@[a-zA-Z0-9._%-]+.[a-zA-Z]{2,6}$",
userEmail)
def send_email(userEmail, job_file, result):
"""
E-mail the result to the user.
@param userEmail: The e-mail address
@param job_file : The job file
@param result : The result status
"""
send_to = []
# Comment this out to prevent admin receiving all the emails
send_to.extend(gdsc.omero.ADMIN_EMAILS)
if validate_email(userEmail):
send_to.append(userEmail)
if not send_to:
return
name = os.path.basename(job_file)
msg = MIMEMultipart()
msg['From'] = gdsc.omero.ADMIN_EMAIL
msg['To'] = COMMASPACE.join(send_to)
msg['Date'] = formatdate(localtime=True)
msg['Subject'] = '[OMERO Job] Archive Job : ' + result
msg.attach(MIMEText("""OMERO Archive Job : %s
Result : %s
Your archive job file is attached.
---
OMERO @ %s """ % (name, result, platform.node())))
with open(job_file, "rb") as f:
name = name + '.txt'
part = MIMEApplication(
f.read(),
Name=name
)
part['Content-Disposition'] = ('attachment; filename="%s"' % name)
msg.attach(part)
smtpObj = smtplib.SMTP('localhost')
smtpObj.sendmail(gdsc.omero.ADMIN_EMAIL, send_to, msg.as_string())
smtpObj.quit()
def email_results(userEmail, job_file, result):
"""
E-mail the result to the user.
@param userEmail: The e-mail address
@param job_file : The job file
@param result : The result status
"""
send_email(userEmail, job_file, result)
def addState(state, size):
"""
Increment the count of files in their given state
@param state: The state
@param size: The byte size of the file
"""
global state_count, state_size
state_count[state] = get_key_number(state_count, state) + 1
state_size[state] = get_key_number(state_size, state) + size
def get_key(j, key):
"""
Get the key value from the dictionary object
@param j: The dictionary object
@param key: The key value (or empty string)
"""
return j[key] if key in j else '';
def get_key_number(j, key):
"""
Get the key value from the dictionary object
@param j: The dictionary object
@param key: The key value (or zero)
"""
return j[key] if key in j else 0;
def get_info(rel_path):
"""
Get the file information from the Arkivum REST API
@param rel_path: The path to the file on the Arkivum server
"""
# Do not verify the SSL certificate
r = requests.get('https://'+
gdsc.omero.ARKIVUM_SERVER+
'/api/2/files/fileInfo/'+urllib.quote(rel_path),
verify=False)
# What to do here? Arkivum has a 10 minute delay
# between copying a file and the ingest starting. So it may
# not show in the API just yet.
if r.status_code == 200:
try:
return r.json()
except:
pass
else:
error("REST API response code: "+str(r.status_code))
return {}
def get_option(config, option, section = gdsc.omero.ARK_ARKIVUM_ARCHIVER):
"""
Get the option from the Arkivum section (or return None)
@param config: The ConfigParser
@param option: The option
@param section: The section
"""
if config.has_option(section, option):
return config.get(section, option)
return None
def process(path):
"""
Archive the file
@param path: The file path
"""
global options, state_count, state_size
log("Processing file " + path)
if os.path.islink(path):
warn("Skipping symlink: %s" % path)
return gdsc.omero.JOB_IGNORE
r = os.stat(path)
if not S_ISREG(r.st_mode):
raise Exception("File does not exist: %s" % path)
# Record steps to the .ark file
ark_file = gdsc.omero.get_ark_path(options.archive_log, path)
if not os.path.isfile(ark_file):
raise Exception("Missing archive record file: %s" % ark_file)
log(" Archive record = " + ark_file)
config = configparser.RawConfigParser()
config.read(ark_file)
if not config.has_section(gdsc.omero.ARK_ARKIVUM_ARCHIVER):
config.add_section(gdsc.omero.ARK_ARKIVUM_ARCHIVER)
archived = False
try:
# Create the path in the archive
full_path = path
drive, path = os.path.splitdrive(path)
path, filename = os.path.split(path)
# Check path is relative (so can be joined)
index = 0
while os.path.isabs(path[index:]):
index = index + 1
directory = os.path.join(options.arkivum_root,
options.arkivum_path, path[index:])
if not os.path.exists(directory):
os.makedirs(directory)
# Checksum the file & copy to the archive
ark_path = os.path.join(directory, filename)
log(" Archive path = " + ark_path)
# Store the relative path to the file from the base Arkivum directory
rel_path = os.path.join(options.arkivum_path, path[index:], filename)
# Use the Arkivum default checksums; MD5 and Adler32
md5Digest = get_option(config, 'md5')
adler32Digest = get_option(config, 'adler32')
size = get_option(config, 'size')
if size:
try:
size = int(size)
except:
pass
# Store when the file was copied
file_copied = False
try:
timestamp = float(get_option(config, 'timestamp'))
except:
timestamp = 0
if not (os.path.exists(ark_path)):
# Copy to Arkivum and checksum
log(" Copying to Arkivum")
md5Hasher = hashlib.md5()
adler32sum = 1
size = 0
blocksize = 65536
with open(full_path, 'rb') as f:
with open(ark_path, 'wb') as f2:
buf = f.read(blocksize)
while len(buf) > 0:
size = size + len(buf)
f2.write(buf)
md5Hasher.update(buf)
adler32sum = adler32(buf, adler32sum)
buf = f.read(blocksize)
md5Digest = md5Hasher.hexdigest()
adler32Digest = str(adler32sum & 0xffffffff)
config.set(gdsc.omero.ARK_ARKIVUM_ARCHIVER, 'md5', md5Digest)
config.set(gdsc.omero.ARK_ARKIVUM_ARCHIVER, 'adler32',
adler32Digest)
config.set(gdsc.omero.ARK_ARKIVUM_ARCHIVER, 'size', str(size))
config.set(gdsc.omero.ARK_ARKIVUM_ARCHIVER, 'path', ark_path)
r = os.stat(ark_path)
timestamp = r.st_mtime
config.set(gdsc.omero.ARK_ARKIVUM_ARCHIVER, 'copied',
time.ctime(timestamp))
config.set(gdsc.omero.ARK_ARKIVUM_ARCHIVER, 'timestamp',
str(timestamp))
file_copied = True
elif not (size and md5Digest and adler32Digest):
# This occurs when the path to Arkivum already exists.
# (Possible if the first copy failed part way through.)
# Compute the checksum on the original file so the script will
# error later if Arkivum has a bad copy.
log(" Computing checksums")
md5Hasher = hashlib.md5()
adler32sum = 1
size = 0
blocksize = 65536
with open(full_path, 'rb') as f:
buf = f.read(blocksize)
while len(buf) > 0:
size = size + len(buf)
md5Hasher.update(buf)
adler32sum = adler32(buf, adler32sum)
buf = f.read(blocksize)
md5Digest = md5Hasher.hexdigest()
adler32Digest = str(adler32sum & 0xffffffff)
config.set(gdsc.omero.ARK_ARKIVUM_ARCHIVER, 'md5', md5Digest)
config.set(gdsc.omero.ARK_ARKIVUM_ARCHIVER, 'adler32',
adler32Digest)
config.set(gdsc.omero.ARK_ARKIVUM_ARCHIVER, 'size', str(size))
config.set(gdsc.omero.ARK_FILE_ARCHIVER, 'path', ark_path)
# Report the checksums
log(" MD5 = " + md5Digest)
log(" Adler32 = " + adler32Digest)
log(" Size = %d" % size)
# Checksum the archive copy
log(" Verifying transfer ...")
# Connect to the Arkivum server and get the file information
info = get_info(rel_path)
# Arkivum has a 10 minute ingest delay which means that the API
# may not have a response directly after a file copy. In this case
# it is fine to just return Running. Re-running this later should find
# the file.
if (len(info) == 0):
msg = "No file information available from Arkivum"
if (file_copied or
time.time() - timestamp < 600):
# Initial copy / less than 10 minutes
error(msg)
addState('unknown', size)
return gdsc.omero.JOB_RUNNING
else:
# Arkivum should have responded
raise Exception(msg)
ingestState = get_key(info, 'ingestState')
log(" Ingest state = " + ingestState)
if ingestState != 'FINAL':
# Wait until Arkivum has processed the file
msg = "Waiting for ingest to complete"
if (file_copied or
time.time() - timestamp < 6000):
# Initial copy / less than 100 minutes
log(" " + msg)
else:
# Arkivum should have ingested by now so show an error
error(msg)
addState('initial', size)
return gdsc.omero.JOB_RUNNING
size2 = get_key(info, 'size')
# Compare size
if (size != size2):
raise Exception("Archived file has different size: %d != %d" %
(size, size2))
log(" Size OK")
# Compare checksums
md5Digest2 = get_key(info, 'md5')
# Note:
# The adler32 value is used by Arkivum but not available via the API.
# For now we will just store it but not check it.
if (md5Digest != md5Digest2):
raise Exception("Archived file has different checksum")
log(" MD5 OK")
# Get the archive state
state = get_key(info, 'replicationState')
log(" Arkivum replication state = " + state)
# TODO? - log when the file changes state, e.g. red > amber > green
config.set(gdsc.omero.ARK_ARKIVUM_ARCHIVER, 'State', state)
# summarise the amount of data in each replication state
addState(state, size)
if state == options.state:
# Delete the original if the archiving is complete
os.remove(full_path)
status = "Archived"
config.set(gdsc.omero.ARK_ARKIVUM_ARCHIVER, 'Archived',
time.ctime())
archived = True
# Create a symlink to the Arkivum location allowing access
# (albeit at a reduced speed if the file is not on the appliance)
# This is only available on unix
try:
os.symlink(ark_path, full_path)
log(" Created link from original path to Arkivum")
except:
pass
else:
status = "Pending"
log(" Status = " + status)
finally:
# Record the stage we reached
with open(ark_file, 'w') as f:
config.write(f)
if archived:
return gdsc.omero.JOB_FINISHED
return gdsc.omero.JOB_RUNNING
def process_job(job_file):
"""
Process the archive job file
@param job_file: The job file path
"""
global options, file_status, paths
log("Processing job " + job_file)
# Open the job file
job = configparser.RawConfigParser()
job.optionxform = lambda option: option
job.read(job_file)
# Clear previous job errors
if (job.has_option(gdsc.omero.JOB_INFO, 'error')):
job.remove_option(gdsc.omero.JOB_INFO, 'error')
# Count the number of files to process
size = 0
for (path, status) in job.items(gdsc.omero.JOB_FILES):
if path in file_status:
# This has already been done
continue
if status == gdsc.omero.JOB_RUNNING:
size = size + 1
if size:
job.set(gdsc.omero.JOB_INFO, 'status', gdsc.omero.JOB_RUNNING)
# Process the files
log("Processing %d file%s" % (size, '' if size == 1 else 's'))
error_flag = False
running = 0
for (path, status) in job.items(gdsc.omero.JOB_FILES):
new_status = file_status.get(path)
if new_status:
# To prevent double processing of files, update the status
# if this is not the first time we see this file.
#
# Note: this appears to be poor management of the status as it is
# replicated through all job files which must be kept in sync.
# However the status can be determined in this script in the
# process() method. This allows a job file to have its status set
# to running for all files to allow restarting the job.
# Also note that tagging of images for archiving has respected
# the many-to-many image-to-file relationship and should prevent
# an image that has been tagged as archived from being processed
# again. This only occurs when the tag has been added again
# for testing or when testing by manually
# manipulating the job files.
job.set(gdsc.omero.JOB_FILES, path, new_status)
status = new_status
if status == gdsc.omero.JOB_RUNNING:
# This is still running
running = running + 1
elif status == gdsc.omero.JOB_RUNNING:
# This is the first time we process this 'running' file
try:
# The process method returns the status or throws an exception
status = process(path)
if status == gdsc.omero.JOB_FINISHED:
# This has been archived
# Build a list of paths that have been archived
paths.append(path)
else:
# This is still running
running = running + 1
except Exception as e:
status = gdsc.omero.JOB_ERROR
# Record the error in the job file
job.set(gdsc.omero.JOB_INFO, 'error', str(e))
error("An error occurred: %s" % e)
# Record the status of this file the first time it is processed
file_status[path] = status
# Record the status change in the job file
if status != gdsc.omero.JOB_RUNNING:
job.set(gdsc.omero.JOB_FILES, path, status)
if status == gdsc.omero.JOB_ERROR:
error_flag = True
break
# If finished or error then move the job file
dir = ''
email_address = ''
if error_flag:
dir = os.path.join(options.archive_job, gdsc.omero.JOB_ERROR)
# If an error then only email the admin
elif running == 0:
dir = os.path.join(options.archive_job, gdsc.omero.JOB_FINISHED)
# Only email the user when finished
email_address = get_option(job, 'email', gdsc.omero.JOB_INFO)
if dir:
# This is complete
status = os.path.basename(dir)
job.set(gdsc.omero.JOB_INFO, 'complete', time.strftime("%c"))
job.set(gdsc.omero.JOB_INFO, 'status', status)
# Save changes to the job file
with open(job_file, 'w') as f:
job.write(f)
if dir:
# This is complete. E-mail the job file to the user/admin
email_results(email_address, job_file, status)
# Move to the processed folder
log("Moving %s to %s" % (job_file, dir))
shutil.move(job_file, dir)
def check_dir(path, carp=True):
"""
Check the path exists
@param path: The path
@param carp: Raise exception if the path does not exist, otherwise warn
"""
if not os.path.isdir(path):
if carp:
raise Exception("Path is not a directory: %s" % path)
else:
error("Path is not a directory: %s" % path)
def banner(title):
"""
Write a banner
@param title the banner title
"""
size = len(title)
banner = '-=' * int(size/2)
if (len(banner) < size):
banner = banner + '-'
log(banner)
log(title)
log(banner)
# Gather our code in a main() function
def main():
parser = init_options()
global options, state_count, state_size, file_status, paths
state_count = {}
state_size = {}
file_status = {}
paths = []
(options, args) = parser.parse_args()
try:
pid_file = gdsc.omero.PIDFile(
os.path.join(options.archive_job,
os.path.basename(__file__) + '.pid'))
except Exception as e:
die("Cannot start process: %s" % e)
banner("Archive Files to Arkivum")
try:
check_dir(options.archive_log)
check_dir(options.arkivum_root)
check_dir(os.path.join(options.arkivum_root, options.arkivum_path))
check_dir(options.archive_job)
check_dir(os.path.join(options.archive_job, gdsc.omero.JOB_RUNNING))
check_dir(os.path.join(options.archive_job, gdsc.omero.JOB_FINISHED))
check_dir(os.path.join(options.archive_job, gdsc.omero.JOB_ERROR))
# Get the running job files
job_dir = os.path.join(options.archive_job, gdsc.omero.JOB_RUNNING)
_, _, filenames = next(os.walk(job_dir), (None, None, []))
n = len(filenames)
log("Processing %d job%s" % (n, gdsc.omero.pleural(n)))
for path in filenames:
process_job(os.path.join(job_dir, path))
# Open the registers
register = gdsc.omero.Register(options.to_archive, False)
archived = gdsc.omero.Register(options.archived)
# Add all running files to the to_archive register.
# Note: If the script errors part way through the jobs then this
# will be incomplete. The register is only used for reporting so
# this is not a blocker.
# TODO - create a script that can create the to_archive register from
# the currently running job files
running = []
for (k, v) in file_status.items():
if v == gdsc.omero.JOB_RUNNING:
running.append(k)
register.save(running)
# Add archived files to the archived register
size = len(paths)
if size:
log("Archived %d file%s" % (size, '' if size == 1 else 's'))
archived.add_list(paths)
# Summarise the amount of data in each replication state
banner("Replication State Summary")
for key in state_count:
bytes = state_size[key]
log("State %s : %d file%s : %d byte%s (%s)" % (key,
state_count[key], gdsc.omero.pleural(state_count[key]),
bytes, gdsc.omero.pleural(bytes), gdsc.omero.convert(bytes)))
except Exception as e:
fatal("An error occurred: %s" % e)
pid_file.delete()
# Standard boilerplate to call the main() function to begin
# the program.
if __name__ == '__main__':
main()
| [
"platform.node",
"urllib.quote",
"sys.exit",
"os.path.islink",
"os.walk",
"os.remove",
"os.path.exists",
"time.ctime",
"os.path.splitdrive",
"shutil.move",
"zlib.adler32",
"os.path.split",
"os.path.isdir",
"optparse.OptionGroup",
"requests.packages.urllib3.disable_warnings",
"os.path.i... | [((1404, 1470), 'requests.packages.urllib3.disable_warnings', 'requests.packages.urllib3.disable_warnings', (['InsecureRequestWarning'], {}), '(InsecureRequestWarning)\n', (1446, 1470), False, 'import requests\n'), ((1981, 2134), 'optparse.OptionParser', 'OptionParser', ([], {'usage': '"""usage: %prog [options] list"""', 'description': '"""Program to archive files to Arkivum"""', 'add_help_option': '(True)', 'version': '"""%prog 1.0"""'}), "(usage='usage: %prog [options] list', description=\n 'Program to archive files to Arkivum', add_help_option=True, version=\n '%prog 1.0')\n", (1993, 2134), False, 'from optparse import OptionParser, OptionGroup\n'), ((2190, 2220), 'optparse.OptionGroup', 'OptionGroup', (['parser', '"""Archive"""'], {}), "(parser, 'Archive')\n", (2201, 2220), False, 'from optparse import OptionParser, OptionGroup\n'), ((3359, 3389), 'optparse.OptionGroup', 'OptionGroup', (['parser', '"""Arkivum"""'], {}), "(parser, 'Arkivum')\n", (3370, 3389), False, 'from optparse import OptionParser, OptionGroup\n'), ((4212, 4223), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (4220, 4223), False, 'import sys\n'), ((4524, 4596), 're.match', 're.match', (['"""^[a-zA-Z0-9._%-]+@[a-zA-Z0-9._%-]+.[a-zA-Z]{2,6}$"""', 'userEmail'], {}), "('^[a-zA-Z0-9._%-]+@[a-zA-Z0-9._%-]+.[a-zA-Z]{2,6}$', userEmail)\n", (4532, 4596), False, 'import re\n'), ((5072, 5098), 'os.path.basename', 'os.path.basename', (['job_file'], {}), '(job_file)\n', (5088, 5098), False, 'import os\n'), ((5110, 5125), 'email.mime.multipart.MIMEMultipart', 'MIMEMultipart', ([], {}), '()\n', (5123, 5125), False, 'from email.mime.multipart import MIMEMultipart\n'), ((5183, 5207), 'email.utils.COMMASPACE.join', 'COMMASPACE.join', (['send_to'], {}), '(send_to)\n', (5198, 5207), False, 'from email.utils import COMMASPACE, formatdate\n'), ((5226, 5252), 'email.utils.formatdate', 'formatdate', ([], {'localtime': '(True)'}), '(localtime=True)\n', (5236, 5252), False, 'from email.utils import COMMASPACE, formatdate\n'), ((5733, 5758), 'smtplib.SMTP', 'smtplib.SMTP', (['"""localhost"""'], {}), "('localhost')\n", (5745, 5758), False, 'import smtplib\n'), ((8121, 8141), 'os.path.islink', 'os.path.islink', (['path'], {}), '(path)\n', (8135, 8141), False, 'import os\n'), ((8233, 8246), 'os.stat', 'os.stat', (['path'], {}), '(path)\n', (8240, 8246), False, 'import os\n'), ((8601, 8631), 'configparser.RawConfigParser', 'configparser.RawConfigParser', ([], {}), '()\n', (8629, 8631), False, 'import configparser\n'), ((16534, 16564), 'configparser.RawConfigParser', 'configparser.RawConfigParser', ([], {}), '()\n', (16562, 16564), False, 'import configparser\n'), ((8450, 8474), 'os.path.isfile', 'os.path.isfile', (['ark_file'], {}), '(ark_file)\n', (8464, 8474), False, 'import os\n'), ((8902, 8926), 'os.path.splitdrive', 'os.path.splitdrive', (['path'], {}), '(path)\n', (8920, 8926), False, 'import os\n'), ((8952, 8971), 'os.path.split', 'os.path.split', (['path'], {}), '(path)\n', (8965, 8971), False, 'import os\n'), ((9057, 9084), 'os.path.isabs', 'os.path.isabs', (['path[index:]'], {}), '(path[index:])\n', (9070, 9084), False, 'import os\n'), ((9137, 9207), 'os.path.join', 'os.path.join', (['options.arkivum_root', 'options.arkivum_path', 'path[index:]'], {}), '(options.arkivum_root, options.arkivum_path, path[index:])\n', (9149, 9207), False, 'import os\n'), ((9388, 9421), 'os.path.join', 'os.path.join', (['directory', 'filename'], {}), '(directory, filename)\n', (9400, 9421), False, 'import os\n'), ((9565, 9623), 'os.path.join', 'os.path.join', (['options.arkivum_path', 'path[index:]', 'filename'], {}), '(options.arkivum_path, path[index:], filename)\n', (9577, 9623), False, 'import os\n'), ((19843, 19898), 'os.path.join', 'os.path.join', (['options.archive_job', 'gdsc.omero.JOB_ERROR'], {}), '(options.archive_job, gdsc.omero.JOB_ERROR)\n', (19855, 19898), False, 'import os\n'), ((20214, 20235), 'os.path.basename', 'os.path.basename', (['dir'], {}), '(dir)\n', (20230, 20235), False, 'import os\n'), ((20684, 20710), 'shutil.move', 'shutil.move', (['job_file', 'dir'], {}), '(job_file, dir)\n', (20695, 20710), False, 'import shutil\n'), ((20900, 20919), 'os.path.isdir', 'os.path.isdir', (['path'], {}), '(path)\n', (20913, 20919), False, 'import os\n'), ((22346, 22403), 'os.path.join', 'os.path.join', (['options.archive_job', 'gdsc.omero.JOB_RUNNING'], {}), '(options.archive_job, gdsc.omero.JOB_RUNNING)\n', (22358, 22403), False, 'import os\n'), ((7191, 7213), 'urllib.quote', 'urllib.quote', (['rel_path'], {}), '(rel_path)\n', (7203, 7213), False, 'import urllib\n'), ((9256, 9281), 'os.path.exists', 'os.path.exists', (['directory'], {}), '(directory)\n', (9270, 9281), False, 'import os\n'), ((9295, 9317), 'os.makedirs', 'os.makedirs', (['directory'], {}), '(directory)\n', (9306, 9317), False, 'import os\n'), ((10141, 10165), 'os.path.exists', 'os.path.exists', (['ark_path'], {}), '(ark_path)\n', (10155, 10165), False, 'import os\n'), ((10277, 10290), 'hashlib.md5', 'hashlib.md5', ([], {}), '()\n', (10288, 10290), False, 'import hashlib\n'), ((11238, 11255), 'os.stat', 'os.stat', (['ark_path'], {}), '(ark_path)\n', (11245, 11255), False, 'import os\n'), ((15460, 15480), 'os.remove', 'os.remove', (['full_path'], {}), '(full_path)\n', (15469, 15480), False, 'import os\n'), ((19984, 20042), 'os.path.join', 'os.path.join', (['options.archive_job', 'gdsc.omero.JOB_FINISHED'], {}), '(options.archive_job, gdsc.omero.JOB_FINISHED)\n', (19996, 20042), False, 'import os\n'), ((20285, 20304), 'time.strftime', 'time.strftime', (['"""%c"""'], {}), "('%c')\n", (20298, 20304), False, 'import time\n'), ((21964, 22020), 'os.path.join', 'os.path.join', (['options.arkivum_root', 'options.arkivum_path'], {}), '(options.arkivum_root, options.arkivum_path)\n', (21976, 22020), False, 'import os\n'), ((22079, 22136), 'os.path.join', 'os.path.join', (['options.archive_job', 'gdsc.omero.JOB_RUNNING'], {}), '(options.archive_job, gdsc.omero.JOB_RUNNING)\n', (22091, 22136), False, 'import os\n'), ((22156, 22214), 'os.path.join', 'os.path.join', (['options.archive_job', 'gdsc.omero.JOB_FINISHED'], {}), '(options.archive_job, gdsc.omero.JOB_FINISHED)\n', (22168, 22214), False, 'import os\n'), ((22234, 22289), 'os.path.join', 'os.path.join', (['options.archive_job', 'gdsc.omero.JOB_ERROR'], {}), '(options.archive_job, gdsc.omero.JOB_ERROR)\n', (22246, 22289), False, 'import os\n'), ((22435, 22451), 'os.walk', 'os.walk', (['job_dir'], {}), '(job_dir)\n', (22442, 22451), False, 'import os\n'), ((11377, 11398), 'time.ctime', 'time.ctime', (['timestamp'], {}), '(timestamp)\n', (11387, 11398), False, 'import time\n'), ((11928, 11941), 'hashlib.md5', 'hashlib.md5', ([], {}), '()\n', (11939, 11941), False, 'import hashlib\n'), ((15602, 15614), 'time.ctime', 'time.ctime', ([], {}), '()\n', (15612, 15614), False, 'import time\n'), ((15872, 15903), 'os.symlink', 'os.symlink', (['ark_path', 'full_path'], {}), '(ark_path, full_path)\n', (15882, 15903), False, 'import os\n'), ((22619, 22646), 'os.path.join', 'os.path.join', (['job_dir', 'path'], {}), '(job_dir, path)\n', (22631, 22646), False, 'import os\n'), ((5447, 5462), 'platform.node', 'platform.node', ([], {}), '()\n', (5460, 5462), False, 'import platform\n'), ((21708, 21734), 'os.path.basename', 'os.path.basename', (['__file__'], {}), '(__file__)\n', (21724, 21734), False, 'import os\n'), ((10715, 10739), 'zlib.adler32', 'adler32', (['buf', 'adler32sum'], {}), '(buf, adler32sum)\n', (10722, 10739), False, 'from zlib import adler32\n'), ((12259, 12283), 'zlib.adler32', 'adler32', (['buf', 'adler32sum'], {}), '(buf, adler32sum)\n', (12266, 12283), False, 'from zlib import adler32\n'), ((13472, 13483), 'time.time', 'time.time', ([], {}), '()\n', (13481, 13483), False, 'import time\n'), ((14064, 14075), 'time.time', 'time.time', ([], {}), '()\n', (14073, 14075), False, 'import time\n')] |
import torch.nn as nn
import numpy as np
import torch
import os
from detectron2.config import get_cfg
from detectron2.engine import DefaultTrainer, default_argument_parser, default_setup, launch
from detectron2.evaluation import COCOEvaluator, verify_results
from yolov3 import add_yolov3_config
def load_darknet_weights(weights, modules):
with open(weights, 'rb') as f:
# (int32) version info: major, minor, revision
version = np.fromfile(f, dtype=np.int32, count=3)
# (int64) number of images seen during training
seen = np.fromfile(f, dtype=np.int64, count=1)
# the rest are weights
weights = np.fromfile(f, dtype=np.float32)
print(version, seen)
print(weights.shape)
ptr = 0
paired_modules = []
param_count = 0
for i, module in enumerate(modules):
if isinstance(module, nn.Conv2d):
if not module.bias is None:
paired_modules.append([module])
param_count += module.weight.numel()
param_count += module.bias.numel()
else:
paired_modules.append([module, modules[i+1]])
param_count += module.weight.numel()
param_count += modules[i+1].bias.numel() * 4
print("param_count:", param_count)
for conv_bn_modules in paired_modules:
conv = conv_bn_modules[0]
bn = conv_bn_modules[1] if len(conv_bn_modules) == 2 else None
out_channel, in_channel, kernel_h, kernel_w = conv.weight.size()
if bn:
assert bn.bias.size()[0] == out_channel, "conv and bn is not paired"
# Bias
bn_b = torch.from_numpy(weights[ptr:ptr + out_channel]).view_as(bn.bias)
bn.bias.data.copy_(bn_b)
ptr += out_channel
# Weight
bn_w = torch.from_numpy(weights[ptr:ptr + out_channel]).view_as(bn.weight)
bn.weight.data.copy_(bn_w)
ptr += out_channel
# Running Mean
bn_rm = torch.from_numpy(weights[ptr:ptr + out_channel]).view_as(bn.running_mean)
bn.running_mean.data.copy_(bn_rm)
ptr += out_channel
# Running Var
bn_rv = torch.from_numpy(weights[ptr:ptr + out_channel]).view_as(bn.running_var)
bn.running_var.data.copy_(bn_rv)
ptr += out_channel
else:
# Load conv. bias
conv_b = torch.from_numpy(weights[ptr:ptr + out_channel]).view_as(conv.bias)
conv.bias.data.copy_(conv_b)
ptr += out_channel
# Load conv. weights
num_w = conv.weight.numel()
conv_w = torch.from_numpy(weights[ptr:ptr + num_w]).view_as(conv.weight)
conv.weight.data.copy_(conv_w)
ptr += num_w
print("parsed:", ptr)
print("succeed.")
def setup(args):
"""
Create configs and perform basic setups.
"""
cfg = get_cfg()
add_yolov3_config(cfg)
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.freeze()
default_setup(cfg, args)
return cfg
def main(args):
cfg = setup(args)
model = DefaultTrainer.build_model(cfg)
modules = model.get_conv_bn_modules()
for m in modules:
print(m.weight.size())
load_darknet_weights(args.initial_weights, modules)
save_path = os.path.join(args.output_dir, "yolov3.pth")
torch.save(model.state_dict(), save_path)
print("model save to", save_path)
if __name__ == "__main__":
parser = default_argument_parser()
parser.add_argument("--initial_weights", metavar="FILE", help="path to initial weights file")
parser.add_argument("--output_dir", help="dir to save weights file")
args = parser.parse_args()
print("Command Line Args:", args)
launch(
main,
args.num_gpus,
num_machines=args.num_machines,
machine_rank=args.machine_rank,
dist_url=args.dist_url,
args=(args,),
)
| [
"numpy.fromfile",
"yolov3.add_yolov3_config",
"detectron2.config.get_cfg",
"detectron2.engine.DefaultTrainer.build_model",
"os.path.join",
"torch.from_numpy",
"detectron2.engine.launch",
"detectron2.engine.default_setup",
"detectron2.engine.default_argument_parser"
] | [((2929, 2938), 'detectron2.config.get_cfg', 'get_cfg', ([], {}), '()\n', (2936, 2938), False, 'from detectron2.config import get_cfg\n'), ((2943, 2965), 'yolov3.add_yolov3_config', 'add_yolov3_config', (['cfg'], {}), '(cfg)\n', (2960, 2965), False, 'from yolov3 import add_yolov3_config\n'), ((3064, 3088), 'detectron2.engine.default_setup', 'default_setup', (['cfg', 'args'], {}), '(cfg, args)\n', (3077, 3088), False, 'from detectron2.engine import DefaultTrainer, default_argument_parser, default_setup, launch\n'), ((3156, 3187), 'detectron2.engine.DefaultTrainer.build_model', 'DefaultTrainer.build_model', (['cfg'], {}), '(cfg)\n', (3182, 3187), False, 'from detectron2.engine import DefaultTrainer, default_argument_parser, default_setup, launch\n'), ((3355, 3398), 'os.path.join', 'os.path.join', (['args.output_dir', '"""yolov3.pth"""'], {}), "(args.output_dir, 'yolov3.pth')\n", (3367, 3398), False, 'import os\n'), ((3525, 3550), 'detectron2.engine.default_argument_parser', 'default_argument_parser', ([], {}), '()\n', (3548, 3550), False, 'from detectron2.engine import DefaultTrainer, default_argument_parser, default_setup, launch\n'), ((3795, 3929), 'detectron2.engine.launch', 'launch', (['main', 'args.num_gpus'], {'num_machines': 'args.num_machines', 'machine_rank': 'args.machine_rank', 'dist_url': 'args.dist_url', 'args': '(args,)'}), '(main, args.num_gpus, num_machines=args.num_machines, machine_rank=\n args.machine_rank, dist_url=args.dist_url, args=(args,))\n', (3801, 3929), False, 'from detectron2.engine import DefaultTrainer, default_argument_parser, default_setup, launch\n'), ((452, 491), 'numpy.fromfile', 'np.fromfile', (['f'], {'dtype': 'np.int32', 'count': '(3)'}), '(f, dtype=np.int32, count=3)\n', (463, 491), True, 'import numpy as np\n'), ((563, 602), 'numpy.fromfile', 'np.fromfile', (['f'], {'dtype': 'np.int64', 'count': '(1)'}), '(f, dtype=np.int64, count=1)\n', (574, 602), True, 'import numpy as np\n'), ((652, 684), 'numpy.fromfile', 'np.fromfile', (['f'], {'dtype': 'np.float32'}), '(f, dtype=np.float32)\n', (663, 684), True, 'import numpy as np\n'), ((2664, 2706), 'torch.from_numpy', 'torch.from_numpy', (['weights[ptr:ptr + num_w]'], {}), '(weights[ptr:ptr + num_w])\n', (2680, 2706), False, 'import torch\n'), ((1672, 1720), 'torch.from_numpy', 'torch.from_numpy', (['weights[ptr:ptr + out_channel]'], {}), '(weights[ptr:ptr + out_channel])\n', (1688, 1720), False, 'import torch\n'), ((1846, 1894), 'torch.from_numpy', 'torch.from_numpy', (['weights[ptr:ptr + out_channel]'], {}), '(weights[ptr:ptr + out_channel])\n', (1862, 1894), False, 'import torch\n'), ((2031, 2079), 'torch.from_numpy', 'torch.from_numpy', (['weights[ptr:ptr + out_channel]'], {}), '(weights[ptr:ptr + out_channel])\n', (2047, 2079), False, 'import torch\n'), ((2228, 2276), 'torch.from_numpy', 'torch.from_numpy', (['weights[ptr:ptr + out_channel]'], {}), '(weights[ptr:ptr + out_channel])\n', (2244, 2276), False, 'import torch\n'), ((2442, 2490), 'torch.from_numpy', 'torch.from_numpy', (['weights[ptr:ptr + out_channel]'], {}), '(weights[ptr:ptr + out_channel])\n', (2458, 2490), False, 'import torch\n')] |
"""This module contains script entrypoints for shreddit.
"""
import argparse
import yaml
import logging
import os
import pkg_resources
from shreddit import default_config
from shreddit.shredder import Shredder
CONFIG_FILE_PATH = "/app/config/shreddit.yml"
def generate_empty_config(path: str):
print("Writing shreddit.yml file...")
with open(path, "wb") as f_out:
f_out.write(pkg_resources.resource_string("shreddit", "shreddit.yml.example"))
def main():
if not os.path.isfile(CONFIG_FILE_PATH):
print("No shreddit configuration file was found or provided.")
generate_empty_config(CONFIG_FILE_PATH)
return
with open(CONFIG_FILE_PATH) as fh:
# Not doing a simple update() here because it's preferable to only set attributes that are "whitelisted" as
# configuration options in the form of default values.
user_config = yaml.safe_load(fh)
for option in default_config:
if option in user_config:
default_config[option] = user_config[option]
shredder = Shredder(default_config)
shredder.shred()
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
print("Shreddit aborted by user")
quit()
| [
"os.path.isfile",
"shreddit.shredder.Shredder",
"yaml.safe_load",
"pkg_resources.resource_string"
] | [((1068, 1092), 'shreddit.shredder.Shredder', 'Shredder', (['default_config'], {}), '(default_config)\n', (1076, 1092), False, 'from shreddit.shredder import Shredder\n'), ((487, 519), 'os.path.isfile', 'os.path.isfile', (['CONFIG_FILE_PATH'], {}), '(CONFIG_FILE_PATH)\n', (501, 519), False, 'import os\n'), ((896, 914), 'yaml.safe_load', 'yaml.safe_load', (['fh'], {}), '(fh)\n', (910, 914), False, 'import yaml\n'), ((395, 460), 'pkg_resources.resource_string', 'pkg_resources.resource_string', (['"""shreddit"""', '"""shreddit.yml.example"""'], {}), "('shreddit', 'shreddit.yml.example')\n", (424, 460), False, 'import pkg_resources\n')] |
from PyPDF3 import PdfFileReader
class BNPConverter:
def __init__(self, input_file, start_number=1):
self.input_file = input_file
self.start_number = start_number
def get_text_lines(self):
pdf_data = PdfFileReader(self.input_file)
text_lines = []
for page in range(pdf_data.getNumPages()):
page_text = pdf_data.getPage(page).extractText()
text_lines += page_text.split('\n')
return text_lines
| [
"PyPDF3.PdfFileReader"
] | [((235, 265), 'PyPDF3.PdfFileReader', 'PdfFileReader', (['self.input_file'], {}), '(self.input_file)\n', (248, 265), False, 'from PyPDF3 import PdfFileReader\n')] |
from flask import Blueprint
from flask import g
from flask import flash
from flask import redirect
from flask import render_template
from flask import request
from flask import url_for
from werkzeug.exceptions import abort
from werkzeug.utils import secure_filename
from bson import ObjectId
from blog.auth import login_required
from blog.db import get_db
bp = Blueprint("user", __name__)
@bp.route("/profile")
@login_required
def profile():
return render_template("user/profile.html")
@bp.route("/posts-list/")
@login_required
def posts_list():
db = get_db()
posts = db.post.find({"author_id": ObjectId(g.user["_id"])})
posts = [post for post in posts]
return render_template("user/post_list.html", posts=posts)
@bp.route("/create", methods=("GET", "POST"))
@login_required
def create_post():
if request.method == "POST":
title = request.form.get('title')
content = request.form.get('content')
category = request.form.get('category')
tags = request.form.getlist('tags')
print(tags)
db = get_db()
mytags = list(db.tag.find())
mytags = [mytag['name'] for mytag in mytags]
print(mytags)
for tag in tags:
if tag not in mytags:
print(tag)
db.tag.insert_one(
{"name": tag})
activition = request.form.get('activition')
f = request.files.get('image')
if f:
fname = secure_filename(f.filename)
f.save('blog/static/media/' + fname)
image = fname
else:
image = None
error = None
if not title:
error = "پست شما نیاز به یک اسم دارد."
if not content:
error = "شما مطلبی ننوشته اید!!"
if error is not None:
flash(error)
else:
like, dislike = [], []
db = get_db()
db.post.insert_one({"title": title, "content": content, "category": category, "tag": tags, "image": image,
"activition": activition,
"author_username": g.user["username"], "author_id": g.user["_id"],
"author_image": g.user["image"], "like": like, "dislike":dislike})
db.post.create_index([('title', 'text'), ('content', 'text'), ('author_username', 'text')])
return redirect(url_for("blog.index"))
return render_template("user/create_post.html")
@bp.route("/edit/<string:post_id>", methods=("GET", "POST"))
@login_required
def edit_post(post_id):
db = get_db()
posts = db.post.find({"_id": ObjectId(post_id)})
li = [p for p in posts]
post = li[0]
if request.method == "POST":
title = request.form.get('title')
content = request.form.get('content')
tags = request.form.getlist('tags')
db = get_db()
mytags = list(db.tag.find())
for tag in tags:
if tag not in mytags:
db.post.insert_one(
{"name": tag})
activition = request.form.get('activition')
db.post.update({
'_id': li[0]['_id']
}, {
'$set': {
"title": title, "content": content, "tag": tags, "activition": activition,
}
}, upsert=False, multi=False)
return redirect(url_for("blog.index"))
else:
return render_template("user/edit_post.html", post=post)
| [
"flask.render_template",
"flask.flash",
"flask.request.form.getlist",
"flask.request.form.get",
"flask.url_for",
"werkzeug.utils.secure_filename",
"flask.request.files.get",
"bson.ObjectId",
"flask.Blueprint",
"blog.db.get_db"
] | [((375, 402), 'flask.Blueprint', 'Blueprint', (['"""user"""', '__name__'], {}), "('user', __name__)\n", (384, 402), False, 'from flask import Blueprint\n'), ((475, 511), 'flask.render_template', 'render_template', (['"""user/profile.html"""'], {}), "('user/profile.html')\n", (490, 511), False, 'from flask import render_template\n'), ((589, 597), 'blog.db.get_db', 'get_db', ([], {}), '()\n', (595, 597), False, 'from blog.db import get_db\n'), ((716, 767), 'flask.render_template', 'render_template', (['"""user/post_list.html"""'], {'posts': 'posts'}), "('user/post_list.html', posts=posts)\n", (731, 767), False, 'from flask import render_template\n'), ((2530, 2570), 'flask.render_template', 'render_template', (['"""user/create_post.html"""'], {}), "('user/create_post.html')\n", (2545, 2570), False, 'from flask import render_template\n'), ((2689, 2697), 'blog.db.get_db', 'get_db', ([], {}), '()\n', (2695, 2697), False, 'from blog.db import get_db\n'), ((907, 932), 'flask.request.form.get', 'request.form.get', (['"""title"""'], {}), "('title')\n", (923, 932), False, 'from flask import request\n'), ((952, 979), 'flask.request.form.get', 'request.form.get', (['"""content"""'], {}), "('content')\n", (968, 979), False, 'from flask import request\n'), ((1000, 1028), 'flask.request.form.get', 'request.form.get', (['"""category"""'], {}), "('category')\n", (1016, 1028), False, 'from flask import request\n'), ((1045, 1073), 'flask.request.form.getlist', 'request.form.getlist', (['"""tags"""'], {}), "('tags')\n", (1065, 1073), False, 'from flask import request\n'), ((1109, 1117), 'blog.db.get_db', 'get_db', ([], {}), '()\n', (1115, 1117), False, 'from blog.db import get_db\n'), ((1418, 1448), 'flask.request.form.get', 'request.form.get', (['"""activition"""'], {}), "('activition')\n", (1434, 1448), False, 'from flask import request\n'), ((1462, 1488), 'flask.request.files.get', 'request.files.get', (['"""image"""'], {}), "('image')\n", (1479, 1488), False, 'from flask import request\n'), ((2852, 2877), 'flask.request.form.get', 'request.form.get', (['"""title"""'], {}), "('title')\n", (2868, 2877), False, 'from flask import request\n'), ((2897, 2924), 'flask.request.form.get', 'request.form.get', (['"""content"""'], {}), "('content')\n", (2913, 2924), False, 'from flask import request\n'), ((2941, 2969), 'flask.request.form.getlist', 'request.form.getlist', (['"""tags"""'], {}), "('tags')\n", (2961, 2969), False, 'from flask import request\n'), ((2984, 2992), 'blog.db.get_db', 'get_db', ([], {}), '()\n', (2990, 2992), False, 'from blog.db import get_db\n'), ((3189, 3219), 'flask.request.form.get', 'request.form.get', (['"""activition"""'], {}), "('activition')\n", (3205, 3219), False, 'from flask import request\n'), ((3541, 3590), 'flask.render_template', 'render_template', (['"""user/edit_post.html"""'], {'post': 'post'}), "('user/edit_post.html', post=post)\n", (3556, 3590), False, 'from flask import render_template\n'), ((638, 661), 'bson.ObjectId', 'ObjectId', (["g.user['_id']"], {}), "(g.user['_id'])\n", (646, 661), False, 'from bson import ObjectId\n'), ((1525, 1552), 'werkzeug.utils.secure_filename', 'secure_filename', (['f.filename'], {}), '(f.filename)\n', (1540, 1552), False, 'from werkzeug.utils import secure_filename\n'), ((1889, 1901), 'flask.flash', 'flash', (['error'], {}), '(error)\n', (1894, 1901), False, 'from flask import flash\n'), ((1971, 1979), 'blog.db.get_db', 'get_db', ([], {}), '()\n', (1977, 1979), False, 'from blog.db import get_db\n'), ((2732, 2749), 'bson.ObjectId', 'ObjectId', (['post_id'], {}), '(post_id)\n', (2740, 2749), False, 'from bson import ObjectId\n'), ((3489, 3510), 'flask.url_for', 'url_for', (['"""blog.index"""'], {}), "('blog.index')\n", (3496, 3510), False, 'from flask import url_for\n'), ((2493, 2514), 'flask.url_for', 'url_for', (['"""blog.index"""'], {}), "('blog.index')\n", (2500, 2514), False, 'from flask import url_for\n')] |
#
# Created on Sat Dec 25 2021
#
# The MIT License (MIT)
# Copyright (c) 2021 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software
# and associated documentation files (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial
# portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
# TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# Django
from django.shortcuts import render
from django.shortcuts import render, HttpResponseRedirect
from django.views.generic import ListView, CreateView, UpdateView, DeleteView
from django.shortcuts import reverse
from django.urls import reverse_lazy
from django.http import HttpResponse
from django.template.loader import get_template
from django.template import context
from django_renderpdf.views import PDFView
from django.contrib import messages
# Models
from .models import *
# Forms
from .forms import *
class ChasisListView(ListView):
model = Chasis
paginate_by = 10
def get_context_data(self, *, object_list=None, **kwargs):
context = super().get_context_data(**kwargs)
context['title']='Listado de Chasis'
return context
class ChasisCreateView(CreateView):
model = Chasis
form_class = ChasisForm
template_name = 'perifericos/chasis_form.html'
success_url = reverse_lazy('chasislist')
def post(self, request, *args, **kwargs):
print(request.POST)
form = ChasisForm(request.POST)
if form.is_valid():
form.save()
messages.success(request, 'Guardado exitoso')
return HttpResponseRedirect(self.success_url)
self.object = None
context = self.get_context_data(**kwargs)
context['form'] = form
return render(request, self.template_name, context)
print(form.errors)
def get_context_data(self, *, object_list=None, **kwargs):
context = super().get_context_data(**kwargs)
context['title']='Creacion de Chasis'
return context
# def get_success_url(self):
# return reverse('marcalist')
class ChasisUpdateView(UpdateView):
model = Chasis
form_class = ChasisForm
template_name = 'perifericos/chasis_update.html'
success_url = reverse_lazy('chasislist')
def get_context_data(self, *, object_list=None, **kwargs):
print(self.object)
context = super().get_context_data(**kwargs)
context['title'] = 'Edicion de Chasis'
return context
class ChasisDeleteView(DeleteView):
model = Chasis
success_url = reverse_lazy('chasislist')
class ChasisPDF(PDFView):
template_name = 'report.html'
def get_context_data(self, *args, **kwargs):
"""Pass some extra context to the template."""
context = super().get_context_data(*args, **kwargs)
context['chasis'] = Chasis.objects.all()
return context
| [
"django.shortcuts.render",
"django.shortcuts.HttpResponseRedirect",
"django.contrib.messages.success",
"django.urls.reverse_lazy"
] | [((2066, 2092), 'django.urls.reverse_lazy', 'reverse_lazy', (['"""chasislist"""'], {}), "('chasislist')\n", (2078, 2092), False, 'from django.urls import reverse_lazy\n'), ((2985, 3011), 'django.urls.reverse_lazy', 'reverse_lazy', (['"""chasislist"""'], {}), "('chasislist')\n", (2997, 3011), False, 'from django.urls import reverse_lazy\n'), ((3302, 3328), 'django.urls.reverse_lazy', 'reverse_lazy', (['"""chasislist"""'], {}), "('chasislist')\n", (3314, 3328), False, 'from django.urls import reverse_lazy\n'), ((2500, 2544), 'django.shortcuts.render', 'render', (['request', 'self.template_name', 'context'], {}), '(request, self.template_name, context)\n', (2506, 2544), False, 'from django.shortcuts import render, HttpResponseRedirect\n'), ((2273, 2318), 'django.contrib.messages.success', 'messages.success', (['request', '"""Guardado exitoso"""'], {}), "(request, 'Guardado exitoso')\n", (2289, 2318), False, 'from django.contrib import messages\n'), ((2338, 2376), 'django.shortcuts.HttpResponseRedirect', 'HttpResponseRedirect', (['self.success_url'], {}), '(self.success_url)\n', (2358, 2376), False, 'from django.shortcuts import render, HttpResponseRedirect\n')] |
import matplotlib.pyplot as plt
import numpy as np
import cv2
from skimage.data import astronaut
from skimage.color import rgb2gray
from skimage.filters import sobel
from skimage.segmentation import felzenszwalb, slic, quickshift, watershed
from skimage.segmentation import mark_boundaries
from skimage.util import img_as_float
image_path=r'/home/ccjunio/PycharmProjects/thermo_images/testes/images/dorso_costa00.jpeg'
img=cv2.imread(image_path)
# img = img_as_float(astronaut()[::2, ::2])
segments_fz = felzenszwalb(img, scale=100, sigma=0.5, min_size=50)
segments_slic = slic(img, n_segments=250, compactness=10, sigma=1)
segments_quick = quickshift(img, kernel_size=3, max_dist=6, ratio=0.5)
gradient = sobel(rgb2gray(img))
segments_watershed = watershed(gradient, markers=250, compactness=0.001)
print("Felzenszwalb number of segments: {}".format(len(np.unique(segments_fz))))
print('SLIC number of segments: {}'.format(len(np.unique(segments_slic))))
print('Quickshift number of segments: {}'.format(len(np.unique(segments_quick))))
fig, ax = plt.subplots(2, 2, figsize=(10, 10), sharex=True, sharey=True)
ax[0, 0].imshow(mark_boundaries(img, segments_fz))
ax[0, 0].set_title("Felzenszwalbs's method")
ax[0, 1].imshow(mark_boundaries(img, segments_slic))
ax[0, 1].set_title('SLIC')
ax[1, 0].imshow(mark_boundaries(img, segments_quick))
ax[1, 0].set_title('Quickshift')
ax[1, 1].imshow(mark_boundaries(img, segments_watershed))
# ax[1, 1].imshow(mark_boundaries(img, segments_watershed))
ax[1, 1].set_title('Compact watershed')
for a in ax.ravel():
a.set_axis_off()
plt.tight_layout()
plt.show() | [
"skimage.color.rgb2gray",
"skimage.segmentation.mark_boundaries",
"numpy.unique",
"matplotlib.pyplot.show",
"skimage.segmentation.watershed",
"skimage.segmentation.felzenszwalb",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.tight_layout",
"skimage.segmentation.quickshift",
"skimage.segmentatio... | [((425, 447), 'cv2.imread', 'cv2.imread', (['image_path'], {}), '(image_path)\n', (435, 447), False, 'import cv2\n'), ((507, 559), 'skimage.segmentation.felzenszwalb', 'felzenszwalb', (['img'], {'scale': '(100)', 'sigma': '(0.5)', 'min_size': '(50)'}), '(img, scale=100, sigma=0.5, min_size=50)\n', (519, 559), False, 'from skimage.segmentation import felzenszwalb, slic, quickshift, watershed\n'), ((576, 626), 'skimage.segmentation.slic', 'slic', (['img'], {'n_segments': '(250)', 'compactness': '(10)', 'sigma': '(1)'}), '(img, n_segments=250, compactness=10, sigma=1)\n', (580, 626), False, 'from skimage.segmentation import felzenszwalb, slic, quickshift, watershed\n'), ((644, 697), 'skimage.segmentation.quickshift', 'quickshift', (['img'], {'kernel_size': '(3)', 'max_dist': '(6)', 'ratio': '(0.5)'}), '(img, kernel_size=3, max_dist=6, ratio=0.5)\n', (654, 697), False, 'from skimage.segmentation import felzenszwalb, slic, quickshift, watershed\n'), ((751, 802), 'skimage.segmentation.watershed', 'watershed', (['gradient'], {'markers': '(250)', 'compactness': '(0.001)'}), '(gradient, markers=250, compactness=0.001)\n', (760, 802), False, 'from skimage.segmentation import felzenszwalb, slic, quickshift, watershed\n'), ((1053, 1115), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(2)'], {'figsize': '(10, 10)', 'sharex': '(True)', 'sharey': '(True)'}), '(2, 2, figsize=(10, 10), sharex=True, sharey=True)\n', (1065, 1115), True, 'import matplotlib.pyplot as plt\n'), ((1582, 1600), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (1598, 1600), True, 'import matplotlib.pyplot as plt\n'), ((1601, 1611), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1609, 1611), True, 'import matplotlib.pyplot as plt\n'), ((715, 728), 'skimage.color.rgb2gray', 'rgb2gray', (['img'], {}), '(img)\n', (723, 728), False, 'from skimage.color import rgb2gray\n'), ((1133, 1166), 'skimage.segmentation.mark_boundaries', 'mark_boundaries', (['img', 'segments_fz'], {}), '(img, segments_fz)\n', (1148, 1166), False, 'from skimage.segmentation import mark_boundaries\n'), ((1229, 1264), 'skimage.segmentation.mark_boundaries', 'mark_boundaries', (['img', 'segments_slic'], {}), '(img, segments_slic)\n', (1244, 1264), False, 'from skimage.segmentation import mark_boundaries\n'), ((1309, 1345), 'skimage.segmentation.mark_boundaries', 'mark_boundaries', (['img', 'segments_quick'], {}), '(img, segments_quick)\n', (1324, 1345), False, 'from skimage.segmentation import mark_boundaries\n'), ((1396, 1436), 'skimage.segmentation.mark_boundaries', 'mark_boundaries', (['img', 'segments_watershed'], {}), '(img, segments_watershed)\n', (1411, 1436), False, 'from skimage.segmentation import mark_boundaries\n'), ((859, 881), 'numpy.unique', 'np.unique', (['segments_fz'], {}), '(segments_fz)\n', (868, 881), True, 'import numpy as np\n'), ((932, 956), 'numpy.unique', 'np.unique', (['segments_slic'], {}), '(segments_slic)\n', (941, 956), True, 'import numpy as np\n'), ((1013, 1038), 'numpy.unique', 'np.unique', (['segments_quick'], {}), '(segments_quick)\n', (1022, 1038), True, 'import numpy as np\n')] |
# mpcdata/tests/test_query.py
# import pytest
# Third-party imports
import os
# Import the specific package/module/function we are testing
import mpcdata.params as params
# from .context import mpcdata
def test_required_dictionaries_exist():
"""
Does params.py contain all of the required dictionaries ?
"""
assert hasattr(params, 'urlIDDict') # This is also testing that it's been pulled in from params_masterlists
assert hasattr(params, 'dirDict')
assert hasattr(params, 'fileDict')
assert hasattr(params, 'downloadSpecDict')
def test_required_directory_paths_exist():
"""
Does dirDict contain the required directory paths ?
"""
for item in ['top','code','share','external','internal','test']:
assert item in params.dirDict
def test_expected_directory_paths():
"""
Does dirDict contain the expected directory paths ?
"""
testDir = os.path.realpath(os.path.dirname( __file__ ))
topDir = os.path.realpath(os.path.dirname( testDir ))
shareDir = os.path.join(topDir, 'share')
externalDir = os.path.join(topDir, 'share','data_external')
internalDir = os.path.join(topDir, 'share','data_internal')
testDir = os.path.join(topDir, 'share','data_test')
devDir = os.path.join(topDir, 'share','data_dev')
assert topDir == params.dirDict['top']
assert shareDir == params.dirDict['share']
assert externalDir == params.dirDict['external']
assert internalDir == params.dirDict['internal']
assert testDir == params.dirDict['test']
assert devDir == params.dirDict['dev']
def test_required_filepaths_are_defined():
"""
Does fileDict contain the required directory paths ?
"""
for item in ['external','internal']:#,'test','dev']:
assert item in params.fileDict
def test_required_specs_exist_for_data_downloads():
"""
Does downloadSpecDict contain the required paths ?
"""
for item in ['attemptsMax']:
assert item in params.downloadSpecDict
| [
"os.path.dirname",
"os.path.join"
] | [((1039, 1068), 'os.path.join', 'os.path.join', (['topDir', '"""share"""'], {}), "(topDir, 'share')\n", (1051, 1068), False, 'import os\n'), ((1087, 1133), 'os.path.join', 'os.path.join', (['topDir', '"""share"""', '"""data_external"""'], {}), "(topDir, 'share', 'data_external')\n", (1099, 1133), False, 'import os\n'), ((1151, 1197), 'os.path.join', 'os.path.join', (['topDir', '"""share"""', '"""data_internal"""'], {}), "(topDir, 'share', 'data_internal')\n", (1163, 1197), False, 'import os\n'), ((1215, 1257), 'os.path.join', 'os.path.join', (['topDir', '"""share"""', '"""data_test"""'], {}), "(topDir, 'share', 'data_test')\n", (1227, 1257), False, 'import os\n'), ((1275, 1316), 'os.path.join', 'os.path.join', (['topDir', '"""share"""', '"""data_dev"""'], {}), "(topDir, 'share', 'data_dev')\n", (1287, 1316), False, 'import os\n'), ((929, 954), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (944, 954), False, 'import os\n'), ((993, 1017), 'os.path.dirname', 'os.path.dirname', (['testDir'], {}), '(testDir)\n', (1008, 1017), False, 'import os\n')] |
#45-crie um programa que faça o computador jogar jokenpo com voce.
print('=====JOKENPO=====')
print('')
from random import randint
from time import sleep
itens = ('pedra','papel','tesoura')
computador = randint(0, 2)
print('''FAÇA SUA ESCOLHA
[ 0 ] pedra
[ 1 ] papel
[ 2 ] tesoura
''')
jogador = int(input('Qual a sua jogada ? '))
print('JO')
sleep(1)
print('KEN')
sleep(1)
print('PO')
sleep(1)
print('computador jogou {}.'.format(itens[computador]))
print('jogador jogou {}.'.format(itens[jogador]))
if computador == 0: #computador jogou pedra
if jogador == 0:
print('EMPATE')
elif jogador == 1:
print('JOGADOR VENCE')
elif jogador == 2:
print('COMPUTADOR VENCE')
else:
print('jogada invalida')
elif computador == 1: #computador jogou papel
if jogador == 0:
print('COMPUTADOR VENCE')
elif jogador == 1:
print('EMPATE')
elif jogador == 2:
print('JOGADOR VENCE')
else:
print('jogada invalida')
elif computador == 2: #computador jogou tesoura
if jogador == 0:
print('JOGADOR VENCE')
elif jogador == 1:
print('COMPUTADOR VENCE')
elif jogador == 2:
print('EMPATE')
else:
print('jogada invalida')
#FIM//A\\ | [
"random.randint",
"time.sleep"
] | [((203, 216), 'random.randint', 'randint', (['(0)', '(2)'], {}), '(0, 2)\n', (210, 216), False, 'from random import randint\n'), ((345, 353), 'time.sleep', 'sleep', (['(1)'], {}), '(1)\n', (350, 353), False, 'from time import sleep\n'), ((367, 375), 'time.sleep', 'sleep', (['(1)'], {}), '(1)\n', (372, 375), False, 'from time import sleep\n'), ((388, 396), 'time.sleep', 'sleep', (['(1)'], {}), '(1)\n', (393, 396), False, 'from time import sleep\n')] |
from flask import Flask
from config import DefaultConfig
# factory method for creating app objects
def create_app(config=DefaultConfig()):
app = Flask(__name__, instance_relative_config=True)
app.config.from_object(config)
# initialize database and migrations
from meep.models import db, migrate
from meep.models import Address, AreaOfEffect
from meep.models import Coordinate, FuelType
from meep.models import Owner, Line, Project
from meep.models import Radius, Site
db.init_app(app)
migrate.init_app(app, db)
# register blueprints
from meep.resources import project
app.register_blueprint(project.project)
return app
| [
"meep.models.migrate.init_app",
"config.DefaultConfig",
"meep.models.db.init_app",
"flask.Flask"
] | [((124, 139), 'config.DefaultConfig', 'DefaultConfig', ([], {}), '()\n', (137, 139), False, 'from config import DefaultConfig\n'), ((152, 198), 'flask.Flask', 'Flask', (['__name__'], {'instance_relative_config': '(True)'}), '(__name__, instance_relative_config=True)\n', (157, 198), False, 'from flask import Flask\n'), ((509, 525), 'meep.models.db.init_app', 'db.init_app', (['app'], {}), '(app)\n', (520, 525), False, 'from meep.models import db, migrate\n'), ((530, 555), 'meep.models.migrate.init_app', 'migrate.init_app', (['app', 'db'], {}), '(app, db)\n', (546, 555), False, 'from meep.models import db, migrate\n')] |
import os
import uuid
import pytest # type: ignore
from hopeit.testing.apps import execute_event
from hopeit.server.version import APPS_API_VERSION
from model import Something
from simple_example.collector.collect_spawn import ItemsInfo, ItemsCollected
APP_VERSION = APPS_API_VERSION.replace('.', "x")
@pytest.fixture
def sample_file_ids():
ids = [str(uuid.uuid4()), str(uuid.uuid4())]
for test_id in ids:
json_str = '{"id": "' + test_id + '", "user": {"id": "u1", "name": "test_user"}, ' \
+ '"status": {"ts": "2020-05-01T00:00:00Z", "type": "NEW"}, "history": []}'
os.makedirs(f'/tmp/simple_example.{APP_VERSION}.fs.data_path/', exist_ok=True)
with open(f'/tmp/simple_example.{APP_VERSION}.fs.data_path/{test_id}.json', 'w') as f:
f.write(json_str)
f.flush()
return ids
@pytest.mark.asyncio
async def test_find_two_items(app_config, sample_file_ids): # noqa: F811
payload = ItemsInfo(*sample_file_ids)
result, pp_result, response = await execute_event(app_config=app_config,
event_name='collector.collect_spawn',
payload=payload,
postprocess=True)
assert isinstance(result, list)
assert len(result) == 2
assert result[0].id == sample_file_ids[0]
assert result[1].id == sample_file_ids[1]
assert pp_result == 2
@pytest.mark.asyncio
async def test_find_one_item(app_config, sample_file_ids): # noqa: F811
payload = ItemsInfo(sample_file_ids[0], str(uuid.uuid4()))
result, pp_result, response = await execute_event(app_config=app_config,
event_name='collector.collect_spawn',
payload=payload,
postprocess=True)
assert isinstance(result, Something)
assert result.id == sample_file_ids[0]
assert pp_result == 1
@pytest.mark.asyncio
async def test_find_no_items(app_config, sample_file_ids): # noqa: F811
payload = ItemsInfo(str(uuid.uuid4()), str(uuid.uuid4()))
result, pp_result, response = await execute_event(app_config=app_config,
event_name='collector.collect_spawn',
payload=payload,
postprocess=True)
assert result == ItemsCollected([])
assert pp_result == 0
| [
"simple_example.collector.collect_spawn.ItemsCollected",
"os.makedirs",
"uuid.uuid4",
"simple_example.collector.collect_spawn.ItemsInfo",
"hopeit.server.version.APPS_API_VERSION.replace",
"hopeit.testing.apps.execute_event"
] | [((272, 306), 'hopeit.server.version.APPS_API_VERSION.replace', 'APPS_API_VERSION.replace', (['"""."""', '"""x"""'], {}), "('.', 'x')\n", (296, 306), False, 'from hopeit.server.version import APPS_API_VERSION\n'), ((969, 996), 'simple_example.collector.collect_spawn.ItemsInfo', 'ItemsInfo', (['*sample_file_ids'], {}), '(*sample_file_ids)\n', (978, 996), False, 'from simple_example.collector.collect_spawn import ItemsInfo, ItemsCollected\n'), ((617, 695), 'os.makedirs', 'os.makedirs', (['f"""/tmp/simple_example.{APP_VERSION}.fs.data_path/"""'], {'exist_ok': '(True)'}), "(f'/tmp/simple_example.{APP_VERSION}.fs.data_path/', exist_ok=True)\n", (628, 695), False, 'import os\n'), ((1037, 1150), 'hopeit.testing.apps.execute_event', 'execute_event', ([], {'app_config': 'app_config', 'event_name': '"""collector.collect_spawn"""', 'payload': 'payload', 'postprocess': '(True)'}), "(app_config=app_config, event_name='collector.collect_spawn',\n payload=payload, postprocess=True)\n", (1050, 1150), False, 'from hopeit.testing.apps import execute_event\n'), ((1690, 1803), 'hopeit.testing.apps.execute_event', 'execute_event', ([], {'app_config': 'app_config', 'event_name': '"""collector.collect_spawn"""', 'payload': 'payload', 'postprocess': '(True)'}), "(app_config=app_config, event_name='collector.collect_spawn',\n payload=payload, postprocess=True)\n", (1703, 1803), False, 'from hopeit.testing.apps import execute_event\n'), ((2270, 2383), 'hopeit.testing.apps.execute_event', 'execute_event', ([], {'app_config': 'app_config', 'event_name': '"""collector.collect_spawn"""', 'payload': 'payload', 'postprocess': '(True)'}), "(app_config=app_config, event_name='collector.collect_spawn',\n payload=payload, postprocess=True)\n", (2283, 2383), False, 'from hopeit.testing.apps import execute_event\n'), ((2563, 2581), 'simple_example.collector.collect_spawn.ItemsCollected', 'ItemsCollected', (['[]'], {}), '([])\n', (2577, 2581), False, 'from simple_example.collector.collect_spawn import ItemsInfo, ItemsCollected\n'), ((363, 375), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (373, 375), False, 'import uuid\n'), ((382, 394), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (392, 394), False, 'import uuid\n'), ((1635, 1647), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (1645, 1647), False, 'import uuid\n'), ((2196, 2208), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (2206, 2208), False, 'import uuid\n'), ((2215, 2227), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (2225, 2227), False, 'import uuid\n')] |
from flask import Flask, jsonify, render_template
import pandas as pd
import os
import pymongo
from flask import send_from_directory
from pymongo import MongoClient
# initialize flask app
app = Flask(__name__)
app.config['JSON_SORT_KEYS'] = False
# read the data and merge it
df_labels = pd.read_csv('train_labels.csv')
df_values = pd.read_csv('train_values.csv')
merged_df = pd.merge(df_values, df_labels, how='inner', on='patient_id')
# filter dataframe for with and w/o HD
merged_df_1 = merged_df.drop(merged_df.index[(merged_df.heart_disease_present.eq(0))])
merged_df_0 = merged_df.drop(merged_df.index[(merged_df.heart_disease_present.eq(1))])
conn = os.environ.get('MONGODB_URI')
if not conn:
conn = 'mongodb://localhost:27017/'
client = MongoClient(conn)
db = client.heart_data
collection = db.train_values
listt = []
for obj in collection.find():
obj.pop("_id")
listt.append(obj)
#build out the routes
@app.route('/')
def home():
return render_template('index.html')
@app.route('/favicon.ico')
def favicon():
return send_from_directory(os.path.join(app.root_path,'static','images'),
'favicon.ico', mimetype='image/png')
@app.route('/analysis')
def analysis():
return render_template('analysis.html')
@app.route('/prediction')
def predict():
return render_template('health-prediction.html')
@app.route('/data')
def data():
return render_template('data.html')
@app.route('/chart')
def chart():
# build a dictionary to jsonify into a route
my_data = {"age_hd": list(merged_df_1['age']), "age_no_hd": list(merged_df_0['age'])}
return jsonify(my_data)
@app.route('/table')
def tab_content():
return jsonify(listt)
if __name__ == '__main__':
app.run(debug=True)
| [
"flask.render_template",
"pandas.read_csv",
"flask.Flask",
"pandas.merge",
"os.environ.get",
"os.path.join",
"pymongo.MongoClient",
"flask.jsonify"
] | [((195, 210), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (200, 210), False, 'from flask import Flask, jsonify, render_template\n'), ((291, 322), 'pandas.read_csv', 'pd.read_csv', (['"""train_labels.csv"""'], {}), "('train_labels.csv')\n", (302, 322), True, 'import pandas as pd\n'), ((335, 366), 'pandas.read_csv', 'pd.read_csv', (['"""train_values.csv"""'], {}), "('train_values.csv')\n", (346, 366), True, 'import pandas as pd\n'), ((379, 439), 'pandas.merge', 'pd.merge', (['df_values', 'df_labels'], {'how': '"""inner"""', 'on': '"""patient_id"""'}), "(df_values, df_labels, how='inner', on='patient_id')\n", (387, 439), True, 'import pandas as pd\n'), ((662, 691), 'os.environ.get', 'os.environ.get', (['"""MONGODB_URI"""'], {}), "('MONGODB_URI')\n", (676, 691), False, 'import os\n'), ((754, 771), 'pymongo.MongoClient', 'MongoClient', (['conn'], {}), '(conn)\n', (765, 771), False, 'from pymongo import MongoClient\n'), ((968, 997), 'flask.render_template', 'render_template', (['"""index.html"""'], {}), "('index.html')\n", (983, 997), False, 'from flask import Flask, jsonify, render_template\n'), ((1239, 1271), 'flask.render_template', 'render_template', (['"""analysis.html"""'], {}), "('analysis.html')\n", (1254, 1271), False, 'from flask import Flask, jsonify, render_template\n'), ((1325, 1366), 'flask.render_template', 'render_template', (['"""health-prediction.html"""'], {}), "('health-prediction.html')\n", (1340, 1366), False, 'from flask import Flask, jsonify, render_template\n'), ((1411, 1439), 'flask.render_template', 'render_template', (['"""data.html"""'], {}), "('data.html')\n", (1426, 1439), False, 'from flask import Flask, jsonify, render_template\n'), ((1625, 1641), 'flask.jsonify', 'jsonify', (['my_data'], {}), '(my_data)\n', (1632, 1641), False, 'from flask import Flask, jsonify, render_template\n'), ((1700, 1714), 'flask.jsonify', 'jsonify', (['listt'], {}), '(listt)\n', (1707, 1714), False, 'from flask import Flask, jsonify, render_template\n'), ((1072, 1119), 'os.path.join', 'os.path.join', (['app.root_path', '"""static"""', '"""images"""'], {}), "(app.root_path, 'static', 'images')\n", (1084, 1119), False, 'import os\n')] |
import torch
import yaml
import argparse
from dataset.BSD500 import BSD500Dataset
from models.HED import HED
###############
# parse cfg
###############
parser = argparse.ArgumentParser()
parser.add_argument('--cfg', dest='cfg', required=True, help='path to config file')
args = parser.parse_known_args()
args = parser.parse_args()
#print(args)
cfg_file = args.cfg
print('cfg_file: ', cfg_file)
with open('config/'+cfg_file, 'r') as f:
cfg = yaml.load(f)
print(cfg)
########################################
model = HED(cfg)
| [
"models.HED.HED",
"yaml.load",
"argparse.ArgumentParser"
] | [((166, 191), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (189, 191), False, 'import argparse\n'), ((530, 538), 'models.HED.HED', 'HED', (['cfg'], {}), '(cfg)\n', (533, 538), False, 'from models.HED import HED\n'), ((452, 464), 'yaml.load', 'yaml.load', (['f'], {}), '(f)\n', (461, 464), False, 'import yaml\n')] |
# Generated by Django 2.2.5 on 2019-12-03 08:20
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('forums', '0006_auto_20191203_0758'),
]
operations = [
migrations.AlterField(
model_name='post',
name='thread',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='posts',
to='forums.Thread'),
),
]
| [
"django.db.models.ForeignKey"
] | [((365, 474), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""posts"""', 'to': '"""forums.Thread"""'}), "(on_delete=django.db.models.deletion.CASCADE, related_name\n ='posts', to='forums.Thread')\n", (382, 474), False, 'from django.db import migrations, models\n')] |
import cv2
OPENCV_OBJECT_TRACKERS = {
"csrt": cv2.TrackerCSRT_create,
"kcf": cv2.TrackerKCF_create,
"mil": cv2.TrackerMIL_create
}
class Track:
"""
Seguimiento de una persona
"""
def __init__(self, tracker_name, first_frame, bbox, id, references):
self._tracker = OPENCV_OBJECT_TRACKERS[tracker_name]()
self._bbox = bbox
self._tracker.init(first_frame, bbox)
self._frame_height, self._frame_width, _ = first_frame.shape
self._id = id
self._status = ""
self._references = references
self._x = 0
self._y = 0
self._x_last = 0
self._y_last = 0
self._timeout = 0
self._update_centroid()
def update(self, frame):
"""
Actualizar posiciones de seguimiento
:param frame: Imagen
"""
success, self._bbox = self._tracker.update(frame)
self._update_centroid()
def _update_centroid(self):
"""
Calcular el centro del bbox del track
"""
self._x_last = self._x
self._y_last = self._y
self._x = int(self._bbox[0] + (self._bbox[2]) / 2)
self._y = int(self._bbox[1] + (self._bbox[3]) / 2)
def is_finish_track(self):
"""
Comprobar si ha finalizado el seguimiento
"""
bb_area = self._bbox[2] * self._bbox[3]
xmin = max(0, self._bbox[0])
ymin = max(0, self._bbox[1])
xmax = min(self._frame_width, self._bbox[0] + self._bbox[2])
ymax = min(self._frame_height, self._bbox[1] + self._bbox[3])
bb_inner_area = (xmax - xmin) * (ymax - ymin)
try:
percent_in_area = bb_inner_area / bb_area
except ZeroDivisionError:
return False
if percent_in_area < 0.8:
return True
return False
def get_bbox(self):
"""
:return: bbox de track
"""
return self._bbox
def update_bbox(self, bbox):
"""
Actualizar las posiciones
:param bbox:
"""
self._bbox = bbox
self._update_centroid()
def get_id(self):
"""
:return: identificador del track
"""
return self._id
def check_bb_size(self):
"""
Comprobar si el tamaño de la bbox es aceptable
:return: boolean
"""
if (self._bbox[2] > self._frame_width / 3) or (self._bbox[3] > self._frame_height / 3):
return False
return True
def get_status(self):
"""
:return: Estado del track
"""
return self._status
def update_status(self):
"""
Actualizar estado del track
"""
self._ref_left()
self._ref_right()
self._ref_door()
def _ref_left(self):
"""
Comprobar si el track esta en la región de ref. izquierda
"""
left_ref = self._references["left"]
if left_ref[0] < self._x < left_ref[1] and not "L" in self._status:
self._status = self._status + "L"
def _ref_right(self):
"""
Comprobar si el track esta en la región de ref. derecha
"""
right_ref = self._references["right"]
if right_ref[0] < self._x < right_ref[1] and not "R" in self._status:
self._status = self._status + "R"
def _ref_door(self):
"""
Comprobar si el track esta en la región de ref. puerta
"""
door_ref = self._references["door"]
if door_ref[0] < self._y < door_ref[1] and not "P" in self._status:
self._status = self._status + "P"
def is_timeout(self):
"""
Comprobar si se ha producido un timeout del track
"""
if self._x == self._x_last and self._y == self._y_last:
self._timeout = self._timeout + 1
else:
self._timeout = 0
if self._timeout >= 5:
return True
else:
return False
class Tracker:
"""
Controlar el seguimiento de las personas
"""
references = {"left": (20, 120), "right": (320, 400), "door": (60, 120)}
TRACKER_TYPE = "csrt"
CONF_THRESHOLD = 0.82
NMS_THRESHOLD = 0.1
def __init__(self):
self._trackers = []
self._last_bboxes = None
self._track_id = 0
self.counter_enter = 0
self.counter_pass = 0
def refresh_bbox(self, bboxes, better_bb_index):
"""
Actualizar las bbox
:param bboxes: bboxes actuales
:param better_bb_index: Indices de la mejor bbox propuesta
:return: tupla com la bbox actualizada
"""
import operator
bb1 = tuple(map(operator.mul, bboxes[better_bb_index], (.6, .6, .6, .6)))
bb2 = tuple(map(operator.mul, bboxes[int(not better_bb_index)], (.4, .4, .4, .4)))
return tuple(map(operator.add, bb1, bb2))
def update_trackers_by_dets(self, frame, bboxes):
"""
Actualizar las bboxes de los tracks existentes o crear un nuevo track
:param frame: Imagen
:param bboxes: Nuevos bboxes detectadas por el detector
"""
for bbox in bboxes:
add_new = True
for tr in self._trackers:
bb = [bbox, tr.get_bbox()]
indicates = cv2.dnn.NMSBoxes(bb, [1., .9], self.CONF_THRESHOLD, self.NMS_THRESHOLD)
if indicates.size == 1:
add_new = False
new_bbox = self.refresh_bbox(bb, indicates[0][0])
tr.update_bbox(new_bbox)
if add_new:
new_track = Track("csrt", frame, bbox, self._track_id, references=self.references)
if not new_track.is_finish_track() and new_track.check_bb_size():
self._trackers.append(new_track)
self._track_id += 1
def get_counter_pass(self):
"""
Contador de personas que no entran en la tienda
:return: Nº de personas que pasan de largo
"""
return self.counter_pass
def get_counter_enter(self):
"""
Contador de personas que entran en la tienda
:return: Nº de personas que entran
"""
return self.counter_enter
def check_trackers(self):
"""
Comprobar el estado del seguimiento de las personas para sumar contadores
"""
for tr in self._trackers:
status = tr.get_status()
if len(status) >= 2:
if status == "LR" or status == "RL":
self.counter_pass = self.counter_pass + 1
elif status == "LP" or status == "RP":
self.counter_enter = self.counter_enter + 1
self.remove_track(tr)
def remove_track(self, tr):
"""
Remover un tracker de la lista de trackers
:param tr: Tracker a eliminar
"""
index = self._trackers.index(tr)
self._trackers.pop(index)
del tr
def track(self, frame):
"""
Actualizar el seguimiento de las personas:
- Actualizar estados
- Eliminar tracker finalizados o con timeout
:param frame: Imagen
"""
for track in self._trackers:
track.update(frame)
track.update_status()
if track.is_timeout():
self.remove_track(track)
def f(tr):
return not tr.is_finish_track()
self._trackers = list(filter(f, self._trackers))
return frame
| [
"cv2.dnn.NMSBoxes"
] | [((5571, 5644), 'cv2.dnn.NMSBoxes', 'cv2.dnn.NMSBoxes', (['bb', '[1.0, 0.9]', 'self.CONF_THRESHOLD', 'self.NMS_THRESHOLD'], {}), '(bb, [1.0, 0.9], self.CONF_THRESHOLD, self.NMS_THRESHOLD)\n', (5587, 5644), False, 'import cv2\n')] |
""" General purpose functions """
import hashlib
LOGGING_FORMAT = '%(asctime)s %(levelname)s: %(message)s'
def hash_from_strings(items):
""" Produce a hash value from the combination of all str elements """
JOIN_KEY = '+|+'
item_text = JOIN_KEY.join(items).encode('utf-8')
return hashlib.sha256(item_text).hexdigest()
| [
"hashlib.sha256"
] | [((299, 324), 'hashlib.sha256', 'hashlib.sha256', (['item_text'], {}), '(item_text)\n', (313, 324), False, 'import hashlib\n')] |
from .changemanager_base import BaseChangeManager
from ..utils.psdict import PsDict
from ..table.tablechanges import TableChanges
from .slot import Slot
import copy
class DictChangeManager(BaseChangeManager):
"""
Manage changes that occured in a DataFrame between runs.
"""
def __init__(self,
slot,
buffer_created=True,
buffer_updated=True,
buffer_deleted=True,
buffer_exposed=False,
buffer_masked=False):
super(DictChangeManager, self).__init__(
slot,
buffer_created,
buffer_updated,
buffer_deleted,
buffer_exposed,
buffer_masked)
self._last_dict = None
data = slot.data()
if data.changes is None:
data.changes = TableChanges()
def reset(self, name=None):
super(DictChangeManager, self).reset(name)
self._last_dict = None
def update(self, run_number, data, mid):
# pylint: disable=unused-argument
assert isinstance(data, PsDict)
if data is None or (run_number != 0 and
run_number <= self._last_update):
return
data.fix_indices()
last_dict = self._last_dict
if last_dict is None:
data.changes.add_created(data.ids)
else:
data.changes.add_created(data.new_indices(last_dict))
data.changes.add_updated(data.updated_indices(last_dict))
data.changes.add_deleted(data.deleted_indices(last_dict))
changes = data.compute_updates(self._last_update, run_number, mid)
self._last_dict = copy.copy(data)
self._last_update = run_number
self._row_changes.combine(changes,
self.created.buffer,
self.updated.buffer,
self.deleted.buffer)
Slot.add_changemanager_type(PsDict, DictChangeManager)
| [
"copy.copy"
] | [((1699, 1714), 'copy.copy', 'copy.copy', (['data'], {}), '(data)\n', (1708, 1714), False, 'import copy\n')] |
import os
import torch
from classifier.classes.utils.Params import Params
class Loader:
def __init__(self, modality: str, for_submodule: bool = False):
self._modality = modality
self._modality_params = Params.load_modality_params(self._modality)
experiment_params = Params.load_experiment_params()
dataset_params = Params.load_dataset_params(experiment_params["dataset_name"])
self._path_to_modalities = dataset_params["paths"]
self._network_type = experiment_params["train"]["network_type"]
if for_submodule:
multimodal_network_params = Params.load_network_params(self._network_type)
self._network_type = multimodal_network_params["submodules"][self._modality]["architecture"]
path_to_modality = self._path_to_modalities[self._modality]
self._path_to_data = os.path.join(path_to_modality, self._modality_params["path_to_data"])
self._file_format = self._modality_params["file_format"]
def _get_path_to_item(self, path_to_input: str) -> str:
"""
Creates the path to the data item for the specified modality
:param path_to_input: the path to the data item related to the main modality
:return: the path to the eye-tracking sequence data item
"""
split_path = path_to_input.split(os.sep)
file_name = str(split_path[-1]).split(".")[0] + "." + self._file_format
label = str(split_path[-2])
return os.path.join(self._path_to_data, label, file_name)
def load(self, path_to_input: str) -> torch.Tensor:
"""
Loads a data item from the dataset
:param path_to_input: the path to the data item to be loaded (referred to the main modality)
:return: the loaded data item
"""
pass
| [
"classifier.classes.utils.Params.Params.load_dataset_params",
"os.path.join",
"classifier.classes.utils.Params.Params.load_experiment_params",
"classifier.classes.utils.Params.Params.load_modality_params",
"classifier.classes.utils.Params.Params.load_network_params"
] | [((227, 270), 'classifier.classes.utils.Params.Params.load_modality_params', 'Params.load_modality_params', (['self._modality'], {}), '(self._modality)\n', (254, 270), False, 'from classifier.classes.utils.Params import Params\n'), ((300, 331), 'classifier.classes.utils.Params.Params.load_experiment_params', 'Params.load_experiment_params', ([], {}), '()\n', (329, 331), False, 'from classifier.classes.utils.Params import Params\n'), ((357, 418), 'classifier.classes.utils.Params.Params.load_dataset_params', 'Params.load_dataset_params', (["experiment_params['dataset_name']"], {}), "(experiment_params['dataset_name'])\n", (383, 418), False, 'from classifier.classes.utils.Params import Params\n'), ((867, 936), 'os.path.join', 'os.path.join', (['path_to_modality', "self._modality_params['path_to_data']"], {}), "(path_to_modality, self._modality_params['path_to_data'])\n", (879, 936), False, 'import os\n'), ((1486, 1536), 'os.path.join', 'os.path.join', (['self._path_to_data', 'label', 'file_name'], {}), '(self._path_to_data, label, file_name)\n', (1498, 1536), False, 'import os\n'), ((617, 663), 'classifier.classes.utils.Params.Params.load_network_params', 'Params.load_network_params', (['self._network_type'], {}), '(self._network_type)\n', (643, 663), False, 'from classifier.classes.utils.Params import Params\n')] |
import pytest
from hyperloop.Python.mission import lat_long
import numpy as np
from openmdao.api import Group, Problem
def create_problem(component):
root = Group()
prob = Problem(root)
prob.root.add('comp', component)
return prob
class TestMissionDrag(object):
def test_case1_vs_npss(self):
component = lat_long.LatLong()
prob = create_problem(component)
prob.setup()
prob['comp.x'] = 100.0
prob['comp.y'] = 100.0
prob['comp.lat_origin'] = 35.0
prob['comp.long_origin'] = -121.0
prob['comp.R_E'] = 6378.0
prob.run()
assert np.isclose(prob['comp.lat'], 35.898335, rtol = 0.01)
assert np.isclose(prob['comp.long'], -119.891025, rtol = 0.01) | [
"hyperloop.Python.mission.lat_long.LatLong",
"numpy.isclose",
"openmdao.api.Problem",
"openmdao.api.Group"
] | [((162, 169), 'openmdao.api.Group', 'Group', ([], {}), '()\n', (167, 169), False, 'from openmdao.api import Group, Problem\n'), ((181, 194), 'openmdao.api.Problem', 'Problem', (['root'], {}), '(root)\n', (188, 194), False, 'from openmdao.api import Group, Problem\n'), ((338, 356), 'hyperloop.Python.mission.lat_long.LatLong', 'lat_long.LatLong', ([], {}), '()\n', (354, 356), False, 'from hyperloop.Python.mission import lat_long\n'), ((634, 684), 'numpy.isclose', 'np.isclose', (["prob['comp.lat']", '(35.898335)'], {'rtol': '(0.01)'}), "(prob['comp.lat'], 35.898335, rtol=0.01)\n", (644, 684), True, 'import numpy as np\n'), ((702, 755), 'numpy.isclose', 'np.isclose', (["prob['comp.long']", '(-119.891025)'], {'rtol': '(0.01)'}), "(prob['comp.long'], -119.891025, rtol=0.01)\n", (712, 755), True, 'import numpy as np\n')] |
# coding: utf-8
# Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
import os
import pkgutil
from os.path import abspath
from inspect import getsourcefile
CLIENT_MAP = {}
MODULE_TO_TYPE_MAPPINGS = {}
ALL_SERVICES_DIR = "services"
this_file_path = abspath(getsourcefile(lambda: 0))
if "site-packages" in this_file_path or "dist-packages" in this_file_path:
python_cli_root_dir = this_file_path[0:this_file_path.index("oci_cli")]
else:
python_cli_root_dir = this_file_path[0:this_file_path.index("/src/oci_cli")]
services_dir = os.path.join(python_cli_root_dir, ALL_SERVICES_DIR)
# Import client mappings from platformization directories.
# This imports the generated client_mappings which populates CLIENT_MAP and MODULE_TO_TYPE_MAPPINGS.
for importer1, modname1, ispkg1 in pkgutil.iter_modules(path=[services_dir]):
for importer, modname, ispkg in pkgutil.iter_modules(path=[services_dir + '/' + modname1 + '/src']):
if ispkg and modname.startswith("oci_cli_"):
oci_cli_module_name = modname.split(".")[0]
service_name = oci_cli_module_name[8:]
oci_cli_module = __import__(ALL_SERVICES_DIR + '.' + modname1 + '.src.' + oci_cli_module_name)
services_dir = oci_cli_module.__path__[0]
service_dir = os.path.join(services_dir, modname1, 'src', oci_cli_module_name)
generated_module = "client_mappings"
if os.path.isfile(os.path.join(service_dir, 'generated', generated_module + ".py")):
__import__(ALL_SERVICES_DIR + '.' + modname1 + '.src.' + oci_cli_module_name + ".generated." + generated_module)
| [
"inspect.getsourcefile",
"os.path.join",
"pkgutil.iter_modules"
] | [((563, 614), 'os.path.join', 'os.path.join', (['python_cli_root_dir', 'ALL_SERVICES_DIR'], {}), '(python_cli_root_dir, ALL_SERVICES_DIR)\n', (575, 614), False, 'import os\n'), ((811, 852), 'pkgutil.iter_modules', 'pkgutil.iter_modules', ([], {'path': '[services_dir]'}), '(path=[services_dir])\n', (831, 852), False, 'import pkgutil\n'), ((284, 309), 'inspect.getsourcefile', 'getsourcefile', (['(lambda : 0)'], {}), '(lambda : 0)\n', (297, 309), False, 'from inspect import getsourcefile\n'), ((890, 957), 'pkgutil.iter_modules', 'pkgutil.iter_modules', ([], {'path': "[services_dir + '/' + modname1 + '/src']"}), "(path=[services_dir + '/' + modname1 + '/src'])\n", (910, 957), False, 'import pkgutil\n'), ((1306, 1370), 'os.path.join', 'os.path.join', (['services_dir', 'modname1', '"""src"""', 'oci_cli_module_name'], {}), "(services_dir, modname1, 'src', oci_cli_module_name)\n", (1318, 1370), False, 'import os\n'), ((1450, 1514), 'os.path.join', 'os.path.join', (['service_dir', '"""generated"""', "(generated_module + '.py')"], {}), "(service_dir, 'generated', generated_module + '.py')\n", (1462, 1514), False, 'import os\n')] |
# reference: http://icrawler.readthedocs.io/en/latest/usage.html
from icrawler.builtin import GoogleImageCrawler
import os
dataset_base_dir = 'D:/Workspace/Dataset/fake_image_detection/task_2'
keyword_lists = ['snapchat face swap', 'MSQRD']
for keyword in keyword_lists:
folder_path = dataset_base_dir + '/' + keyword
if not os.path.exists(folder_path):
os.makedirs(folder_path)
print(folder_path + ' is created!')
else:
pass
google_crawler = GoogleImageCrawler(parser_threads=2, downloader_threads=4,
storage={'root_dir': folder_path})
keyword_comma = keyword.replace(' ', ',')
google_crawler.crawl(keyword=keyword, max_num=10000)
print('Crawling ' + keyword + ' is done')
# ()()
# ('')HAANJU.YOO
| [
"os.path.exists",
"icrawler.builtin.GoogleImageCrawler",
"os.makedirs"
] | [((488, 586), 'icrawler.builtin.GoogleImageCrawler', 'GoogleImageCrawler', ([], {'parser_threads': '(2)', 'downloader_threads': '(4)', 'storage': "{'root_dir': folder_path}"}), "(parser_threads=2, downloader_threads=4, storage={\n 'root_dir': folder_path})\n", (506, 586), False, 'from icrawler.builtin import GoogleImageCrawler\n'), ((337, 364), 'os.path.exists', 'os.path.exists', (['folder_path'], {}), '(folder_path)\n', (351, 364), False, 'import os\n'), ((374, 398), 'os.makedirs', 'os.makedirs', (['folder_path'], {}), '(folder_path)\n', (385, 398), False, 'import os\n')] |
# Databricks notebook source
# MAGIC %run ../app/bootstrap
# COMMAND ----------
from pyspark.sql.dataframe import DataFrame
from datalakebundle.imports import transformation
# COMMAND ----------
datasets = [
{
"id": "123",
"name": "knihydobrovsky_cz",
"custom_attrs": {
105: "EXT_ID",
104: "ADFORM_ID",
2: "GA_ID",
},
},
{
"id": "4564",
"name": "knihomol_cz",
"custom_attrs": {
3: "EXT_ID",
2: "GA_ID",
},
},
]
# TODO 2: tahání configu a předávání přes globální proměnnou
@transformation("%datalake.base_base_path%")
def get_config(base_base_path: str):
return base_base_path
base_path = get_config.result
# TODO 1: cyklus
for dataset in datasets:
# TODO 3: use logger instead of print
print(dataset['name'])
dataset_name = dataset['name']
@transformation()
def load_visits():
return spark.read.format("delta").load(base_path + "/bronze/raw/visits/" + dataset_name)
def load_custom_attrs():
return spark.read.format("delta").load(base_path + "/bronze/raw/custom_attrs/" + dataset_name)
# TODO 4: rule of thumb: one notebook should always produce/output one dataset
@transformation(load_visits)
def save_visits(df: DataFrame):
df.write.format("delta").save(base_path + "/silver/parsed/visits/" + dataset_name, mode="append")
@transformation(load_custom_attrs)
def save_custom_attrs(df: DataFrame):
df.write.format("delta").save(base_path + "/silver/parsed/custom_attrs/" + dataset_name, mode="append")
| [
"datalakebundle.imports.transformation"
] | [((618, 661), 'datalakebundle.imports.transformation', 'transformation', (['"""%datalake.base_base_path%"""'], {}), "('%datalake.base_base_path%')\n", (632, 661), False, 'from datalakebundle.imports import transformation\n'), ((909, 925), 'datalakebundle.imports.transformation', 'transformation', ([], {}), '()\n', (923, 925), False, 'from datalakebundle.imports import transformation\n'), ((1268, 1295), 'datalakebundle.imports.transformation', 'transformation', (['load_visits'], {}), '(load_visits)\n', (1282, 1295), False, 'from datalakebundle.imports import transformation\n'), ((1444, 1477), 'datalakebundle.imports.transformation', 'transformation', (['load_custom_attrs'], {}), '(load_custom_attrs)\n', (1458, 1477), False, 'from datalakebundle.imports import transformation\n')] |
from pyspark.sql.types import StructField
from cishouseholds.pyspark_utils import convert_cerberus_schema_to_pyspark
def test_conversion():
cerberus_schema = {"id": {"type": "string"}, "whole_number": {"type": "integer"}}
pyspark_schema = convert_cerberus_schema_to_pyspark(cerberus_schema)
assert len(pyspark_schema) == len(cerberus_schema)
assert sorted([column_schema.name for column_schema in pyspark_schema]) == sorted(cerberus_schema.keys())
assert all(isinstance(column_schema, StructField) for column_schema in pyspark_schema)
| [
"cishouseholds.pyspark_utils.convert_cerberus_schema_to_pyspark"
] | [((251, 302), 'cishouseholds.pyspark_utils.convert_cerberus_schema_to_pyspark', 'convert_cerberus_schema_to_pyspark', (['cerberus_schema'], {}), '(cerberus_schema)\n', (285, 302), False, 'from cishouseholds.pyspark_utils import convert_cerberus_schema_to_pyspark\n')] |
from typing import List, Optional, Type
import pyspark.sql.functions as F
from pyspark.sql import DataFrame as SparkDataFrame
from pyspark.sql.types import DataType
import src.sparkcleaner.helpers.verify as verify
def remove_leading_zeros(df: SparkDataFrame,
col_name: str,
maintain_type: bool = True) -> SparkDataFrame:
"""Remove leading zeros from column using regex.
Parameters
----------
(required) df: pyspark.sql.DataFrame
Pyspark DataFrame containing column to be processed
(required) col_name: str
name of column to remove leading zeros from
(optional) maintain_type: bool = True
If false, returns col as str.
If true, returns col as type before function call
Returns
----------
pyspark.sql.DataFrame
processed column in place
See Also
----------
pyspark.sql.functions.regexp_replace()
pyspark.sql.Column.cast()
Example
----------
my_df = remove_leading_zeros(my_df, "MY_COL", False)
"""
_rlz_func_verify_input_types(df, col_name, maintain_type)
original_type: DataType = df.schema[col_name].dataType
df = df.withColumn(col_name, F.regexp_replace(F.col(col_name),
r'^[0]*', ""))
df = _if_maintain_type_cast_original_type(df, col_name,
maintain_type, original_type)
return df
def _rlz_func_verify_input_types(df: SparkDataFrame,
col_name: str,
maintain_type: bool) -> None:
input_vals: List[type] = [SparkDataFrame, str, bool]
expected_vals: List[type] = [type(df),
type(col_name),
type(maintain_type)]
verify.verify_func_input(input_vals, expected_vals)
def _if_maintain_type_cast_original_type(df: SparkDataFrame,
col_name: str,
maintain_type: bool,
original_type: DataType) -> SparkDataFrame:
if maintain_type:
df = df.withColumn(col_name,
F.col(col_name)
.cast(original_type)
.alias(col_name))
return df
def keep_alphanumeric_string(df: SparkDataFrame,
col_name: str,
maintain_type: bool = True,
keep_spaces: bool = True) -> SparkDataFrame:
"""Removes all non-alphanumeric characters from column using regex
Parameters
----------
(required) df: pyspark.sql.DataFrame
Pyspark DataFrame containing column to be processed
(required) col_name: str
name of column to remove non-alphanumeric characters from
(optional) maintain_type: bool = True
If false, returns col as str.
If true, returns col as type before function call
(optional) keep_spaces: bool = True
If false, removes all spaces from col
If true, leaves spaces in col
Returns
----------
pyspark.sql.DataFrame
processed column in place
See Also
----------
pyspark.sql.functions.regexp_replace()
pyspark.sql.Column.cast()
Example
----------
my_df = keep_alphanumeric_string(my_df, "MY_COL", False)
"""
_kes_func_verify_input_types(df, col_name, maintain_type, keep_spaces)
original_type: DataType = df.schema[col_name].dataType
regex: str = _set_regex(keep_spaces)
df = df.withColumn(col_name, F.regexp_replace(F.col(col_name),
regex, ""))
df = _if_maintain_type_cast_original_type(df, col_name,
maintain_type, original_type)
return df
def _set_regex(keep_spaces: bool) -> str:
if keep_spaces:
regex: str = r'[^A-Za-z0-9 ]' # spaces & alphanumeric
else:
regex: str = r'[^A-Za-z0-9]' # alphanumeric
return regex
def _kes_func_verify_input_types(df: SparkDataFrame,
col_name: str,
maintain_type: bool,
keep_spaces: bool) -> None:
input_vals: List[type] = [SparkDataFrame, str, bool, bool]
expected_vals: List[type] = [type(df),
type(col_name),
type(maintain_type),
type(keep_spaces)]
verify.verify_func_input(input_vals, expected_vals)
| [
"src.sparkcleaner.helpers.verify.verify_func_input",
"pyspark.sql.functions.col"
] | [((1820, 1871), 'src.sparkcleaner.helpers.verify.verify_func_input', 'verify.verify_func_input', (['input_vals', 'expected_vals'], {}), '(input_vals, expected_vals)\n', (1844, 1871), True, 'import src.sparkcleaner.helpers.verify as verify\n'), ((4594, 4645), 'src.sparkcleaner.helpers.verify.verify_func_input', 'verify.verify_func_input', (['input_vals', 'expected_vals'], {}), '(input_vals, expected_vals)\n', (4618, 4645), True, 'import src.sparkcleaner.helpers.verify as verify\n'), ((1232, 1247), 'pyspark.sql.functions.col', 'F.col', (['col_name'], {}), '(col_name)\n', (1237, 1247), True, 'import pyspark.sql.functions as F\n'), ((3674, 3689), 'pyspark.sql.functions.col', 'F.col', (['col_name'], {}), '(col_name)\n', (3679, 3689), True, 'import pyspark.sql.functions as F\n'), ((2224, 2239), 'pyspark.sql.functions.col', 'F.col', (['col_name'], {}), '(col_name)\n', (2229, 2239), True, 'import pyspark.sql.functions as F\n')] |
from tkinter import Tk, Canvas
# This is an emulated display with the same API interface as for the Unicorn HAT/pHAT hardware.
# Thus, it relies upon (in part) code from: https://github.com/pimoroni/unicorn-hat/blob/master/library/UnicornHat/unicornhat.py
# Note that only the pHAT is supported, and rotation of the display is not supported.
class EmulatedGUI():
def __init__(self, master):
self.master = master
master.title("Emulated")
self.map = []
self.pixels = [None for x in range(64)]
self.pixel_colours = ["#000000" for x in range(64)]
self.brightness = 1.0
def setup(self, pxmap):
self.map = pxmap
# Add GUI elements in a grid
row = 0
col = 0
for list in self.map:
col = 0
for index in list:
pixel = Canvas(self.master, width=30, height=30)
pixel.grid(row=row, column=col)
pixel.configure(background="black", highlightbackground="black", bd=1)
self.pixels[index] = pixel
col = col + 1
row = row + 1
def set_pixel(self, idx, r, g, b):
colour = '#%02x%02x%02x' % (r, g, b)
self.pixel_colours[idx] = colour
def set_brightness(self, brightness):
self.brightness = brightness
def update(self):
try:
index = 0
for pixel in self.pixels:
pixel.configure(background=self.pixel_colours[index])
index = index + 1
except:
pass
class EmulatedDisplay():
def __init__(self):
self.wx = 8
self.wy = 8
self.map = self.PHAT
self.pixels = [(0,0,0) for x in range(64)]
self.brightness_val = 0.2
self.is_setup = False
self.gui = None
# Modifed from the UnicornHAT Python library
# Available: https://github.com/pimoroni/unicorn-hat/blob/master/library/UnicornHat/unicornhat.py
@property
def PHAT(self):
return [[24, 16, 8, 0],
[25, 17, 9, 1],
[26, 18, 10, 2],
[27, 19, 11, 3],
[28, 20, 12, 4],
[29, 21, 13, 5],
[30, 22, 14, 6],
[31, 23, 15, 7]]
def set_layout(self, pixel_map):
self.map = self.PHAT
def setup(self):
if self.is_setup == True:
return
# Start the GUI loop
self.root = Tk()
# Ensure we stay above other windows
self.root.attributes("-topmost", True)
self.root.configure(background='black')
self.root.lift()
self.gui = EmulatedGUI(self.root)
self.gui.setup(self.map)
self.is_setup = True
try:
self.root.mainloop()
except KeyboardInterrupt:
pass
def get_shape(self):
return (len(self.map), len(self.map[0]))
def rotation(self, r=0):
pass
def get_rotation(self):
return 0
def brightness(self, b=0.2):
self.brightness_val = b
if self.gui is not None:
self.gui.set_brightness(b)
def get_brightness():
return self.brightness_val
def clear():
for x in range(64):
self.pixels[x] = (0, 0, 0)
def off():
self.clear()
self.show()
def get_index_from_xy(self, x, y):
self.wx = len(self.map) - 1
self.wy = len(self.map[0]) - 1
y = (self.wy)-y
if self.rotation == 90 and self.wx == self.wy:
x, y = y, (self.wx)-x
elif self.rotation == 180:
x, y = (self.wx)-x, (self.wy)-y
elif self.rotation == 270 and self.wx == self.wy:
x, y = (self.wy)-y, x
try:
index = self.map[x][y]
except IndexError:
index = None
return index
def set_pixel(self, x, y, r=None, g=None, b=None):
if self.is_setup is False:
return
if type(r) is tuple:
r, g, b = r
elif type(r) is str:
try:
r, g, b = COLORS[r.lower()]
except KeyError:
raise ValueError('Invalid color!')
index = self.get_index_from_xy(x, y)
if index is not None:
self.pixels[index] = (r, g, b)
self.gui.set_pixel(index, r, g, b)
def get_pixel(self, x, y):
index = self.get_index_from_xy(x, y)
if index is not None:
return self.pixels[index]
def set_all(self, r, g=None, b=None):
shade_pixels(lambda x, y: (r, g, b))
def shade_pixels(self, shader):
width, height = self.get_shape()
for x in range(width):
for y in range(height):
r, g, b = shader(x, y)
self.set_pixel(x, y, r, g, b)
def set_pixels(self, pixels):
self.shade_pixels(lambda x, y: pixels[y][x])
def get_pixels(self):
width, height = self.get_shape()
return [[self.get_pixel(x, y) for x in range(width)] for y in range(height)]
def show(self):
if self.is_setup is False:
return
self.gui.update() | [
"tkinter.Canvas",
"tkinter.Tk"
] | [((2613, 2617), 'tkinter.Tk', 'Tk', ([], {}), '()\n', (2615, 2617), False, 'from tkinter import Tk, Canvas\n'), ((900, 940), 'tkinter.Canvas', 'Canvas', (['self.master'], {'width': '(30)', 'height': '(30)'}), '(self.master, width=30, height=30)\n', (906, 940), False, 'from tkinter import Tk, Canvas\n')] |
# Python plan -> Open Workbench XML converter.
#
# Python plan defines a Work Breakdown Structure where
# tasks are dictionaries and children are defined in a list.
# Children can contain sequences, to simplify data input;
# sequenced tasks are automatically chained (dependencies).
import sys
import math
from datetime import datetime, timedelta
from .keywords import *
from .tasks import *
# Start date is a monday. End-date calculation needs to add 2 days per 5 (for weekends);
# starting on a monday simplifies calculation of the extra.
_global_start_date = datetime(year=2016, month=10, day=10)
def _insert_dependency(deps, successor, predecessor):
if successor not in deps:
deps[successor] = {}
deps[successor][predecessor] = True
def _validate_tasks(id_to_task, deps):
for task in id_to_task.values():
for predecessor_id in task[DEPS]:
if predecessor_id not in id_to_task:
sys.stderr.write('WARNING: ID={task[ID]} NAME={task[NAME]} : unknown dependency "{predecessor_id}"\n'.format(**locals()))
_insert_dependency(deps, task[ID], predecessor_id)
def _date_as_owb_string(date):
return date.strftime('%Y-%m-%dT%H:%M:%S')
def _output_tasks_recursive(outfile, task, level):
_effort_in_days = task.get(EFFORT, 0)
_effort_in_calendar_days = _effort_in_days + math.floor((_effort_in_days - 1) / 5) * 2
_category = parse_category(task[NAME])
_name = xml_escape_attr(task[NAME])
_id = xml_escape_attr(task[ID])
_desc = xml_escape_attr(task.get(DESC, ' '))
_level = level
_summary = 'true' if has_children(task) else 'false'
_start_date = _date_as_owb_string(_global_start_date)
_end_date = _date_as_owb_string(_global_start_date + timedelta(days=_effort_in_calendar_days))
task_xml = '''
<Task
category="{_category}" start="{_start_date}" finish="{_end_date}"
proxy="false"
critical="false" status="0" outlineLevel="{_level}" summary="{_summary}"
milestone="false" name={_name} taskID={_id} fixed="false"
locked="false" key="false" percComp="0.0" totalSlack="9.0" unplanned="false">
<Notes>
<Note
createdBy="Unknown" createdDate="2016-10-09T05:45:21" content={_desc}/>
</Notes>
</Task>
'''
formatted_task = task_xml.lstrip('\n').format(**locals())
outfile.write(formatted_task)
children = task.get(CHILDREN, None)
if children:
for child in children:
if isinstance(child, str):
continue
else:
_output_tasks_recursive(outfile, child, level+1)
def _output_tasks(outfile, plan):
prefix = '''
<Tasks>
'''
suffix = '''
</Tasks>
'''
outfile.write(prefix.lstrip('\n'))
_output_tasks_recursive(outfile, plan, 1)
outfile.write(suffix.lstrip('\n'))
# returns dict(leaf predecessor_id, True) to be used as a set
#
# OWB ignores dependencies on non-leaf tasks; therefore we must
# recursively resolve the dependencies down to leaf nodes.
def _get_leaf_predecessor_ids(id_to_task, predecessor_id, leaf_predecessor_ids):
def _recursive_resolve(id):
task = id_to_task[id]
if has_children(task):
for child in task[CHILDREN]:
if isinstance(child, str):
continue
_recursive_resolve(child[ID])
else:
leaf_predecessor_ids[id] = True
_recursive_resolve(predecessor_id)
def _output_dependencies(outfile, id_to_task, deps):
prefix = '''
<Dependencies>
'''
suffix = '''
</Dependencies>
'''
outfile.write(prefix.lstrip('\n'))
for successor_id,predecessor_ids in sorted(deps.items()):
if has_children(id_to_task[successor_id]):
continue
leaf_predecessor_ids = {} # id:True
for predecessor_id in predecessor_ids.keys():
_get_leaf_predecessor_ids(id_to_task, predecessor_id, leaf_predecessor_ids)
for leaf_predecessor_id in sorted(leaf_predecessor_ids.keys()):
outfile.write(''' <Dependency
predecessorID="{leaf_predecessor_id}" startFinishType="0" lag="0.0" lagType="0" successorID="{successor_id}"/>
'''.format(**locals()))
outfile.write(suffix.lstrip('\n'))
def _output_main_file(outfile, plan):
prefix = '''
<?xml version="1.0"?>
<WORKBENCH_PROJECT>
<BaseCalendars>
<Calendar
name="Standard">
</Calendar>
</BaseCalendars>
<Projects>
<Project
UID="AJO44]`-U_```!/5"LU<!```?P```0" closed="false" active="true" approved="false"
start="2016-10-10T08:00:00" openForTimeEntry="true" format="0" trackMode="0" finish="2016-10-10T08:00:00"
priority="10" finishImposed="false" cpmType="0" name="Project Plan" startImposed="false"
program="false">
'''
suffix = '''
</Project>
</Projects>
</WORKBENCH_PROJECT>'''
# key = ID string, value = task dict
id_to_task = {}
sanitize_tasks(plan, id_to_task, add_child_dependencies=True)
# key = successor, value = {predecessor:True}
deps = {}
_validate_tasks(id_to_task, deps)
outfile.write(prefix.lstrip('\n'))
_output_tasks(outfile, plan)
_output_dependencies(outfile, id_to_task, deps)
outfile.write(suffix.lstrip('\n'))
def plan_to_owb_xml(filename, plan):
with open(filename, 'wt') as outfile:
_output_main_file(outfile, plan)
| [
"datetime.datetime",
"datetime.timedelta",
"math.floor"
] | [((573, 610), 'datetime.datetime', 'datetime', ([], {'year': '(2016)', 'month': '(10)', 'day': '(10)'}), '(year=2016, month=10, day=10)\n', (581, 610), False, 'from datetime import datetime, timedelta\n'), ((1356, 1393), 'math.floor', 'math.floor', (['((_effort_in_days - 1) / 5)'], {}), '((_effort_in_days - 1) / 5)\n', (1366, 1393), False, 'import math\n'), ((1758, 1798), 'datetime.timedelta', 'timedelta', ([], {'days': '_effort_in_calendar_days'}), '(days=_effort_in_calendar_days)\n', (1767, 1798), False, 'from datetime import datetime, timedelta\n')] |
import rdkit.Chem as Chem
import pickle
def smi_tokenizer(smi):
"""
Tokenize a SMILES molecule or reaction
"""
import re
pattern = "(\[[^\]]+]|Br?|Cl?|N|O|S|P|F|I|b|c|n|o|s|p|\(|\)|\.|=|#|-|\+|\\\\|\/|:|~|@|\?|>|\*|\$|\%[0-9]{2}|[0-9])"
regex = re.compile(pattern)
tokens = [token for token in regex.findall(smi)]
assert smi == ''.join(tokens)
return ' '.join(tokens)
def canonicalize_smiles(smiles):
try:
mol = Chem.MolFromSmiles(smiles)
except:
mol = None
if mol is None:
return ''
else:
return Chem.MolToSmiles(mol)
def canonicalize(smiles=None, smiles_list=None):
"""Return the canonicalized version of the given smiles or smiles list"""
assert (smiles is None) != (smiles_list is None) # Only take one input
if smiles is not None:
return canonicalize_smiles(smiles)
elif smiles_list is not None:
# Convert smiles to mol and back to cannonicalize
new_smiles_list = []
for smiles in smiles_list:
new_smiles_list.append(canonicalize_smiles(smiles))
return new_smiles_list
def read_txt(file_path, detokenize=False):
out_list = []
with open(file_path, "r") as f:
while True:
line = f.readline().rstrip()
if not line:
break
if detokenize:
line = "".join(line.split(" "))
out_list.append(line)
return out_list
def read_file(file_path, beam_size=1, max_read=-1, parse_func=None):
read_file = open(file_path, 'r+')
output_list = [] # List of beams if beam_size is > 1 else list of smiles
cur_beam = [] # Keep track of the current beam
for line in read_file.readlines():
if parse_func is None:
parse = line.strip().replace(' ', '') # default parse function
if ',' in parse:
# If output separated by commas, return first by default
parse = parse.split(',')[0]
else:
parse = parse_func(line)
cur_beam.append(parse)
if len(cur_beam) == beam_size:
if beam_size == 1:
output_list.append(cur_beam[0])
else:
output_list.append(cur_beam)
if max_read != -1 and len(output_list) >= max_read:
break
cur_beam = []
read_file.close()
return output_list
def remove_atom_mapping(smiles):
mol = Chem.MolFromSmiles(smiles)
mol = Chem.RemoveHs(mol)
for atom in mol.GetAtoms():
atom.ClearProp('molAtomMapNumber')
smiles = Chem.MolToSmiles(mol)
return smiles
def txt2pkl(txt_path, pkl_path):
input_list = read_txt(txt_path, detokenize=True)
input_list = [[line] for line in input_list]
with open(pkl_path, 'wb') as f:
pickle.dump(input_list, f)
if __name__ == '__main__':
txt2pkl(txt_path='/data/junsu_data/ssl-rxn/retro_smiles_transformer/dataset/schneider50k/backward/src-train.txt',
pkl_path='/home/junsu/workspace/retro_star/biased_one_step/data/cooked_schneider50k/src-train.pkl')
| [
"pickle.dump",
"re.compile",
"rdkit.Chem.MolFromSmiles",
"rdkit.Chem.MolToSmiles",
"rdkit.Chem.RemoveHs"
] | [((271, 290), 're.compile', 're.compile', (['pattern'], {}), '(pattern)\n', (281, 290), False, 'import re\n'), ((2471, 2497), 'rdkit.Chem.MolFromSmiles', 'Chem.MolFromSmiles', (['smiles'], {}), '(smiles)\n', (2489, 2497), True, 'import rdkit.Chem as Chem\n'), ((2508, 2526), 'rdkit.Chem.RemoveHs', 'Chem.RemoveHs', (['mol'], {}), '(mol)\n', (2521, 2526), True, 'import rdkit.Chem as Chem\n'), ((2615, 2636), 'rdkit.Chem.MolToSmiles', 'Chem.MolToSmiles', (['mol'], {}), '(mol)\n', (2631, 2636), True, 'import rdkit.Chem as Chem\n'), ((464, 490), 'rdkit.Chem.MolFromSmiles', 'Chem.MolFromSmiles', (['smiles'], {}), '(smiles)\n', (482, 490), True, 'import rdkit.Chem as Chem\n'), ((586, 607), 'rdkit.Chem.MolToSmiles', 'Chem.MolToSmiles', (['mol'], {}), '(mol)\n', (602, 607), True, 'import rdkit.Chem as Chem\n'), ((2836, 2862), 'pickle.dump', 'pickle.dump', (['input_list', 'f'], {}), '(input_list, f)\n', (2847, 2862), False, 'import pickle\n')] |
from queue import Queue
from datetime import datetime, timedelta
from .INewslistScraper import INewslistScraper
from .. import article
from .. import driver
class Scraper(INewslistScraper):
def __init__(self, limit: int = 100):
INewslistScraper.__init__(self, limit)
self._tag_to_url = {
"politics" : "https://www.ukr.net/news/politika.html",
"economics" : "https://www.ukr.net/news/jekonomika.html",
"accidents" : "https://www.ukr.net/news/proisshestvija.html",
"society" : "https://www.ukr.net/news/society.html",
"technologies" : "https://www.ukr.net/news/tehnologii.html",
"science" : "https://www.ukr.net/news/science.html",
"auto" : "https://www.ukr.net/news/avto.html",
"sport" : "https://www.ukr.net/news/sport.html",
"health" : "https://www.ukr.net/news/zdorove.html",
"celebrities" : "https://www.ukr.net/news/show_biznes.html",
"global" : "https://www.ukr.net/news/za_rubezhom.html",
"fun" : "https://www.ukr.net/news/kurezy.html",
"photoreport" : "https://www.ukr.net/news/fotoreportazh.html",
"video" : "https://www.ukr.net/news/video.html"
}
self.driver = driver.driver
self.xpath = {
"absolute_article_path" : '//*[@id="main"]/div/article/section'
}
self.monthshorts = [u"січ", u"лют", u"бер", u"кві", u"тра", \
u"чер", u"лип", u"сер", u"вер", u"жов", u"лис", u"гру"]
def _load_more(self):
self.driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
def _date_from_ukr_to_datetime(self, s: str, index: int):
mon = s[s.find(' ') + 1 :]
day = int(s[: s.find(' ')])
return datetime(datetime.today().year, self.monthshorts.index(mon) + 1, \
day, index // 60, index % 60)
def _convert_datetime(self, s: str, index: int):
s = s.strip()
if s.find(':') != -1:
h = int(s[:2])
m = int(s[3:])
return datetime.today() + timedelta(hours=h, minutes=m)
return self._date_from_ukr_to_datetime(s, index)
def _parse_by_tag(self, tag, url, queue: Queue):
dr = self.driver
dr.get(url)
elems = dr.find_elements_by_xpath(self.xpath["absolute_article_path"])
prev_cnt = 0
while len(elems) < self.limit and len(elems) != prev_cnt:
self._load_more()
prev_cnt = len(elems)
elems = dr.find_elements_by_xpath(self.xpath["absolute_article_path"])
for e, index in zip(elems, range(self.limit)):
dt = e.find_element_by_tag_name("time")
dt = self._convert_datetime(dt.text, index)
e = e.find_element_by_tag_name("div")
e = e.find_element_by_tag_name("div")
link = e.find_element_by_tag_name("a")
e_url = link.get_attribute("href")
e_headline = link.text
queue.put_nowait(article.Article(e_url, e_headline, dt, tags=[tag]))
def push_articles_list(self, queue: Queue):
for tag in self._tag_to_url:
self._parse_by_tag(tag, self._tag_to_url[tag], queue)
| [
"datetime.datetime.today",
"datetime.timedelta"
] | [((1812, 1828), 'datetime.datetime.today', 'datetime.today', ([], {}), '()\n', (1826, 1828), False, 'from datetime import datetime, timedelta\n'), ((2087, 2103), 'datetime.datetime.today', 'datetime.today', ([], {}), '()\n', (2101, 2103), False, 'from datetime import datetime, timedelta\n'), ((2106, 2135), 'datetime.timedelta', 'timedelta', ([], {'hours': 'h', 'minutes': 'm'}), '(hours=h, minutes=m)\n', (2115, 2135), False, 'from datetime import datetime, timedelta\n')] |
#!/usr/bin/python
import regret as r
import sys
import os
n=int(sys.argv[1])
fout=open(sys.argv[3],'w')
print >>fout, n
for i in range(0,n):
print >>fout, i, r.mult_valuation(sys.argv[2],i)
| [
"regret.mult_valuation"
] | [((169, 201), 'regret.mult_valuation', 'r.mult_valuation', (['sys.argv[2]', 'i'], {}), '(sys.argv[2], i)\n', (185, 201), True, 'import regret as r\n')] |
from __future__ import print_function
import os
def build_is_triggered():
"""
If a build is being triggered via Github directly (either by a comment, or
automatically) then the ``ghprb`` will probably be involded. When that is
the case, that plugin injects a wealth of environment variables, which can
tell us if the build is really being handled by the plugin.
"""
ghprb_env_vars = [
'ghprbActualCommit', 'ghprbTriggerAuthor', 'ghprbTargetBranch',
'ghprbTriggerAuthorLogin', 'ghprbCredentialsId', 'ghprbGhRepository',
]
return all([bool(os.environ.get(var, False)) for var in ghprb_env_vars])
def construct_url():
"""
Helper to join the different parts of Github's API url to be able to post
the notification status
"""
GITHUB_REPOSITORY = os.getenv('GITHUB_REPOSITORY')
GITHUB_SHA = os.getenv('GITHUB_SHA')
base_url = "https://api.github.com/repos/"
repository = GITHUB_REPOSITORY.strip('/').strip('"')
repo_url = os.path.join(base_url, repository)
status_url = os.path.join(repo_url, 'statuses')
full_url = os.path.join(status_url, GITHUB_SHA)
print('request url: %s' % full_url)
return full_url
| [
"os.path.join",
"os.environ.get",
"os.getenv"
] | [((818, 848), 'os.getenv', 'os.getenv', (['"""GITHUB_REPOSITORY"""'], {}), "('GITHUB_REPOSITORY')\n", (827, 848), False, 'import os\n'), ((866, 889), 'os.getenv', 'os.getenv', (['"""GITHUB_SHA"""'], {}), "('GITHUB_SHA')\n", (875, 889), False, 'import os\n'), ((1010, 1044), 'os.path.join', 'os.path.join', (['base_url', 'repository'], {}), '(base_url, repository)\n', (1022, 1044), False, 'import os\n'), ((1062, 1096), 'os.path.join', 'os.path.join', (['repo_url', '"""statuses"""'], {}), "(repo_url, 'statuses')\n", (1074, 1096), False, 'import os\n'), ((1112, 1148), 'os.path.join', 'os.path.join', (['status_url', 'GITHUB_SHA'], {}), '(status_url, GITHUB_SHA)\n', (1124, 1148), False, 'import os\n'), ((593, 619), 'os.environ.get', 'os.environ.get', (['var', '(False)'], {}), '(var, False)\n', (607, 619), False, 'import os\n')] |
#!/usr/bin/env python
"""
Dummy DARPA scoring server
"""
import os
import sys
import csv
import math
import json
import logging
from collections import defaultdict
from http.server import BaseHTTPRequestHandler, HTTPServer
from mimetypes import guess_type
g_logger = logging.getLogger(__name__)
def dist3d(xyz, xyz2):
return math.sqrt(sum([(a-b)**2 for a, b in zip(xyz, xyz2)]))
class GameLogic:
def __init__(self, filename):
self.score = 0
self.artf = defaultdict(list)
with open(filename) as csvfile:
reader = csv.reader(csvfile)
for raw in reader:
if 'artf' in raw:
continue
# artifact name, x, y, z
artf = raw[0]
self.artf[artf].append(tuple(float(x) for x in raw[1:]))
def report_artf(self, artf, xyz):
if artf in self.artf:
best = None
for artf_xyz in self.artf[artf]:
if best is None or dist3d(xyz, artf_xyz) < best[0]:
best = dist3d(xyz, artf_xyz), artf_xyz
if best is not None and best[0] < 5.0: # DARPA 5m limit
self.artf[artf].remove(best[1])
self.score += 1
return True
return False
class MyHandler(BaseHTTPRequestHandler):
def do_GET(self):
g_logger.info(f"GET: {self.path}")
s = self.path.split('/')
g_logger.info(str(s))
self.send_response(200)
self.send_header("Content-Type", "application/json")
self.end_headers()
self.wfile.write(b'{"score":%d,"remaining_reports":97,"current_team":"robotika","run_clock":1502.8}' % self.server.game_logic.score)
def do_POST(self):
g_logger.info(f"POST: {self.path}")
s = self.path.split('/')
assert self.headers['Content-Type'] == 'application/json', self.headers['Content-Type']
assert 'artifact_reports' in s, s
size = int(self.headers['Content-Length'])
data = self.rfile.read(size)
g_logger.info(f'DATA {data}')
d = json.loads(data)
self.server.game_logic.report_artf(d['type'], (d['x'], d['y'], d['z']))
self.send_response(201)
self.send_header("Content-Type", "application/json")
self.end_headers()
self.wfile.write(b'{"url":"http://10.100.2.200:8000/api/reports/3/","id":3,"x":1.0,"y":2.0,"z":4.0,"type":"Cell Phone","submitted_datetime":"2020-02-18T22:40:05.009145+00:00","run_clock":1505.0,"team":"robotika","run":"0.0.2","report_status":"scored","score_change":1}')
def main():
import argparse
logging.basicConfig(
level=logging.DEBUG,
format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s',
datefmt='%Y-%m-%d %H:%M',
)
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('filename', help='CSV file with header "artf, x, y, z"')
args = parser.parse_args()
try:
server = HTTPServer(('',8888), MyHandler)
server.game_logic = GameLogic(args.filename)
print('started httpserver...')
server.serve_forever()
except KeyboardInterrupt:
print('keyboard interrupt')
server.socket.close()
if __name__ == '__main__':
main()
# vim: expandtab sw=4 ts=4
| [
"logging.getLogger",
"logging.basicConfig",
"json.loads",
"argparse.ArgumentParser",
"http.server.HTTPServer",
"collections.defaultdict",
"csv.reader"
] | [((272, 299), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (289, 299), False, 'import logging\n'), ((2630, 2768), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.DEBUG', 'format': '"""%(asctime)s %(name)-12s %(levelname)-8s %(message)s"""', 'datefmt': '"""%Y-%m-%d %H:%M"""'}), "(level=logging.DEBUG, format=\n '%(asctime)s %(name)-12s %(levelname)-8s %(message)s', datefmt=\n '%Y-%m-%d %H:%M')\n", (2649, 2768), False, 'import logging\n'), ((2803, 2847), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '__doc__'}), '(description=__doc__)\n', (2826, 2847), False, 'import argparse\n'), ((486, 503), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (497, 503), False, 'from collections import defaultdict\n'), ((2095, 2111), 'json.loads', 'json.loads', (['data'], {}), '(data)\n', (2105, 2111), False, 'import json\n'), ((2986, 3019), 'http.server.HTTPServer', 'HTTPServer', (["('', 8888)", 'MyHandler'], {}), "(('', 8888), MyHandler)\n", (2996, 3019), False, 'from http.server import BaseHTTPRequestHandler, HTTPServer\n'), ((565, 584), 'csv.reader', 'csv.reader', (['csvfile'], {}), '(csvfile)\n', (575, 584), False, 'import csv\n')] |
import pytest
from scrapy import Request
from scrapy.http import Response
from scrapy.spiders import Spider
from scrapy.utils.test import get_crawler
from twisted.internet.error import DNSLookupError
from scrapy_fake_useragent.middleware import RetryUserAgentMiddleware
@pytest.fixture
def retry_middleware_response(request):
"""
Fixture to simplify creating a crawler
with an activated middleware and going through
the request-response cycle.
Executes process_response() method of the middleware.
"""
settings, status = request.param
crawler = get_crawler(Spider, settings_dict=settings)
spider = crawler._create_spider('foo')
mw = RetryUserAgentMiddleware.from_crawler(crawler)
req = Request('http://www.scrapytest.org/')
rsp = Response(req.url, body=b'', status=status)
yield mw.process_response(req, rsp, spider)
@pytest.fixture
def retry_middleware_exception(request):
"""
Fixture to simplify creating a crawler
with an activated retry middleware and going through
the request-response cycle.
Executes process_exception() method of the middleware.
"""
settings, exception = request.param
crawler = get_crawler(Spider, settings_dict=settings)
spider = crawler._create_spider('foo')
mw = RetryUserAgentMiddleware.from_crawler(crawler)
req = Request('http://www.scrapytest.org/')
yield mw.process_exception(req, exception, spider)
@pytest.mark.parametrize(
'retry_middleware_response',
(({'FAKEUSERAGENT_FALLBACK': 'firefox'}, 503), ),
indirect=True
)
def test_random_ua_set_on_response(retry_middleware_response):
assert 'User-Agent' in retry_middleware_response.headers
@pytest.mark.parametrize(
'retry_middleware_exception',
(({'FAKEUSERAGENT_FALLBACK': 'firefox'},
DNSLookupError('Test exception')), ),
indirect=True
)
def test_random_ua_set_on_exception(retry_middleware_exception):
assert 'User-Agent' in retry_middleware_exception.headers
| [
"scrapy_fake_useragent.middleware.RetryUserAgentMiddleware.from_crawler",
"twisted.internet.error.DNSLookupError",
"scrapy.utils.test.get_crawler",
"pytest.mark.parametrize",
"scrapy.Request",
"scrapy.http.Response"
] | [((1450, 1571), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""retry_middleware_response"""', "(({'FAKEUSERAGENT_FALLBACK': 'firefox'}, 503),)"], {'indirect': '(True)'}), "('retry_middleware_response', (({\n 'FAKEUSERAGENT_FALLBACK': 'firefox'}, 503),), indirect=True)\n", (1473, 1571), False, 'import pytest\n'), ((583, 626), 'scrapy.utils.test.get_crawler', 'get_crawler', (['Spider'], {'settings_dict': 'settings'}), '(Spider, settings_dict=settings)\n', (594, 626), False, 'from scrapy.utils.test import get_crawler\n'), ((679, 725), 'scrapy_fake_useragent.middleware.RetryUserAgentMiddleware.from_crawler', 'RetryUserAgentMiddleware.from_crawler', (['crawler'], {}), '(crawler)\n', (716, 725), False, 'from scrapy_fake_useragent.middleware import RetryUserAgentMiddleware\n'), ((737, 774), 'scrapy.Request', 'Request', (['"""http://www.scrapytest.org/"""'], {}), "('http://www.scrapytest.org/')\n", (744, 774), False, 'from scrapy import Request\n'), ((785, 827), 'scrapy.http.Response', 'Response', (['req.url'], {'body': "b''", 'status': 'status'}), "(req.url, body=b'', status=status)\n", (793, 827), False, 'from scrapy.http import Response\n'), ((1199, 1242), 'scrapy.utils.test.get_crawler', 'get_crawler', (['Spider'], {'settings_dict': 'settings'}), '(Spider, settings_dict=settings)\n', (1210, 1242), False, 'from scrapy.utils.test import get_crawler\n'), ((1295, 1341), 'scrapy_fake_useragent.middleware.RetryUserAgentMiddleware.from_crawler', 'RetryUserAgentMiddleware.from_crawler', (['crawler'], {}), '(crawler)\n', (1332, 1341), False, 'from scrapy_fake_useragent.middleware import RetryUserAgentMiddleware\n'), ((1353, 1390), 'scrapy.Request', 'Request', (['"""http://www.scrapytest.org/"""'], {}), "('http://www.scrapytest.org/')\n", (1360, 1390), False, 'from scrapy import Request\n'), ((1819, 1851), 'twisted.internet.error.DNSLookupError', 'DNSLookupError', (['"""Test exception"""'], {}), "('Test exception')\n", (1833, 1851), False, 'from twisted.internet.error import DNSLookupError\n')] |
"""Base Manager class."""
import logging
from pathlib import Path
from typing import Optional
import nowcasting_dataset.utils as nd_utils
from nowcasting_dataset import config
from nowcasting_dataset.data_sources import ALL_DATA_SOURCE_NAMES, MAP_DATA_SOURCE_NAME_TO_CLASS
logger = logging.getLogger(__name__)
class ManagerBase:
"""The Manager initializes and manage a dict of DataSource objects.
Attrs:
config: Configuration object.
data_sources: dict[str, DataSource]
data_source_which_defines_geospatial_locations: DataSource: The DataSource used to compute the
geospatial locations of each example.
save_batches_locally_and_upload: bool: Set to True by `load_yaml_configuration()` if
`config.process.upload_every_n_batches > 0`.
"""
def __init__(self) -> None: # noqa: D107
self.config = None
self.data_sources = {}
self.data_source_which_defines_geospatial_locations = None
def load_yaml_configuration(self, filename: str, set_git: bool = True) -> None:
"""Load YAML config from `filename`."""
logger.debug(f"Loading YAML configuration file {filename}")
self.config = config.load_yaml_configuration(filename)
if set_git:
self.config = config.set_git_commit(self.config)
self.save_batches_locally_and_upload = self.config.process.upload_every_n_batches > 0
logger.debug(f"config={self.config}")
def initialize_data_sources(
self, names_of_selected_data_sources: Optional[list[str]] = ALL_DATA_SOURCE_NAMES
) -> None:
"""Initialize DataSources specified in the InputData configuration.
For each key in each DataSource's configuration object, the string `<data_source_name>_`
is removed from the key before passing to the DataSource constructor. This allows us to
have verbose field names in the configuration YAML files, whilst also using standard
constructor arguments for DataSources.
"""
for data_source_name in names_of_selected_data_sources:
logger.debug(f"Creating {data_source_name} DataSource object.")
config_for_data_source = getattr(self.config.input_data, data_source_name)
if config_for_data_source is None:
logger.info(f"No configuration found for {data_source_name}.")
continue
config_for_data_source = config_for_data_source.dict()
config_for_data_source.pop("log_level")
# save config to data source logger
data_source_logger = logging.getLogger(
f"nowcasting_dataset.data_sources.{data_source_name}"
)
data_source_logger.debug(
f"The configuration for {data_source_name} is {config_for_data_source}"
)
# Strip `<data_source_name>_` from the config option field names.
config_for_data_source = nd_utils.remove_regex_pattern_from_keys(
config_for_data_source, pattern_to_remove=f"^{data_source_name}_"
)
data_source_class = MAP_DATA_SOURCE_NAME_TO_CLASS[data_source_name]
try:
data_source = data_source_class(**config_for_data_source)
except Exception:
logger.exception(f"Exception whilst instantiating {data_source_name}!")
raise
self.data_sources[data_source_name] = data_source
# Set data_source_which_defines_geospatial_locations:
try:
self.data_source_which_defines_geospatial_locations = self.data_sources[
self.config.input_data.data_source_which_defines_geospatial_locations
]
except KeyError:
if self._locations_csv_file_exists():
logger.info(
f"{self.config.input_data.data_source_which_defines_geospatial_locations=}"
" is not a member of the DataSources, but that does not matter because the CSV"
" files which specify the locations of the examples already exists!"
)
else:
msg = (
"input_data.data_source_which_defines_geospatial_locations="
f"{self.config.input_data.data_source_which_defines_geospatial_locations}"
" is not a member of the DataSources, so cannot set"
" self.data_source_which_defines_geospatial_locations!"
f" The available DataSources are: {list(self.data_sources.keys())}"
)
logger.error(msg)
raise RuntimeError(msg)
else:
logger.info(
f"DataSource `{data_source_name}` set as"
" data_source_which_defines_geospatial_locations."
)
def _locations_csv_file_exists(self):
return False
def _filename_of_locations_csv_file(self, split_name: str) -> Path:
return self.config.output_data.filepath / split_name
| [
"logging.getLogger",
"nowcasting_dataset.config.set_git_commit",
"nowcasting_dataset.utils.remove_regex_pattern_from_keys",
"nowcasting_dataset.config.load_yaml_configuration"
] | [((285, 312), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (302, 312), False, 'import logging\n'), ((1190, 1230), 'nowcasting_dataset.config.load_yaml_configuration', 'config.load_yaml_configuration', (['filename'], {}), '(filename)\n', (1220, 1230), False, 'from nowcasting_dataset import config\n'), ((1277, 1311), 'nowcasting_dataset.config.set_git_commit', 'config.set_git_commit', (['self.config'], {}), '(self.config)\n', (1298, 1311), False, 'from nowcasting_dataset import config\n'), ((2593, 2665), 'logging.getLogger', 'logging.getLogger', (['f"""nowcasting_dataset.data_sources.{data_source_name}"""'], {}), "(f'nowcasting_dataset.data_sources.{data_source_name}')\n", (2610, 2665), False, 'import logging\n'), ((2952, 3062), 'nowcasting_dataset.utils.remove_regex_pattern_from_keys', 'nd_utils.remove_regex_pattern_from_keys', (['config_for_data_source'], {'pattern_to_remove': 'f"""^{data_source_name}_"""'}), "(config_for_data_source,\n pattern_to_remove=f'^{data_source_name}_')\n", (2991, 3062), True, 'import nowcasting_dataset.utils as nd_utils\n')] |
import cherrypy
import requests
import json
from CommunicationLayer import ServiceRegistry
@cherrypy.popargs('imageName')
class ImageAPI(object):
address = "http://127.0.0.1:8761/"
@cherrypy.expose()
def index(self, imageName):
#Get data centaras
servicesArray = ServiceRegistry.getServices("Data")
s = requests.Session()
for service in servicesArray:
response = s.get(service["ServiceAddress"]+"/image/"+imageName,)
if response.status_code >= 200 and response.status_code < 300:
cherrypy.response.headers["Content-Type"] = 'image/jpeg'
return response.content
raise cherrypy.HTTPError(404, "Your image could not be found in any active service")
| [
"requests.Session",
"cherrypy.popargs",
"cherrypy.expose",
"cherrypy.HTTPError",
"CommunicationLayer.ServiceRegistry.getServices"
] | [((94, 123), 'cherrypy.popargs', 'cherrypy.popargs', (['"""imageName"""'], {}), "('imageName')\n", (110, 123), False, 'import cherrypy\n'), ((194, 211), 'cherrypy.expose', 'cherrypy.expose', ([], {}), '()\n', (209, 211), False, 'import cherrypy\n'), ((296, 331), 'CommunicationLayer.ServiceRegistry.getServices', 'ServiceRegistry.getServices', (['"""Data"""'], {}), "('Data')\n", (323, 331), False, 'from CommunicationLayer import ServiceRegistry\n'), ((345, 363), 'requests.Session', 'requests.Session', ([], {}), '()\n', (361, 363), False, 'import requests\n'), ((684, 762), 'cherrypy.HTTPError', 'cherrypy.HTTPError', (['(404)', '"""Your image could not be found in any active service"""'], {}), "(404, 'Your image could not be found in any active service')\n", (702, 762), False, 'import cherrypy\n')] |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
SeeKeR Search Decision Tasks.
"""
from typing import Optional
from parlai.core.opt import Opt
from parlai.core.params import ParlaiParser
from parlai.core.teachers import MultiTaskTeacher
import parlai.tasks.convai2.agents as convai2
import parlai.tasks.empathetic_dialogues.agents as ed
import parlai.tasks.wizard_of_internet.agents as woi
import parlai.tasks.wizard_of_wikipedia.agents as wow
import parlai.tasks.squad.agents as squad
import parlai.tasks.triviaqa.agents as triviaqa
import parlai.tasks.natural_questions.agents as nq
import parlai.tasks.msc.agents as msc
import parlai.utils.logging as logging
import projects.seeker.tasks.mutators # type: ignore
class WoiSearchDecisionTeacher(woi.DefaultTeacher):
def __init__(self, opt, shared=None):
mutators = '+'.join(
[
'flatten',
'woi_dropout_retrieved_docs',
'woi_maybe_generate_search_query_mutator',
'skip_retrieval_mutator',
]
)
if opt.get('mutators'):
mutators = '+'.join([mutators, opt['mutators']])
logging.warning(f'overriding mutators to {mutators}')
opt['mutators'] = mutators
super().__init__(opt, shared)
self.id = 'WoiSearchDecisionTeacher'
class WowSearchDecisionTeacher(wow.DefaultTeacher):
def __init__(self, opt, shared=None):
mutators = '+'.join(
[
'flatten',
'wow_maybe_generate_search_query_mutator',
'skip_retrieval_mutator',
]
)
if opt.get('mutators'):
mutators = '+'.join([mutators, opt['mutators']])
logging.warning(f'overriding mutators to {mutators}')
opt['mutators'] = mutators
opt['add_missing_turns'] = 'all'
super().__init__(opt, shared)
self.id = 'WowSearchDecisionTeacher'
class SquadSearchDecisionTeacher(squad.OpensquadTeacher):
def __init__(self, opt, shared=None):
mutators = '+'.join(
['do_generate_search_query_mutator', 'skip_retrieval_mutator']
)
if opt.get('mutators'):
mutators = '+'.join([mutators, opt['mutators']])
logging.warning(f'overriding mutators to {mutators}')
opt['mutators'] = mutators
super().__init__(opt, shared)
self.id = 'SquadSearchDecisionTeacher'
class TriviaQASearchDecisionTeacher(triviaqa.NoEvidenceWebTeacher):
def __init__(self, opt, shared=None):
mutators = '+'.join(
['do_generate_search_query_mutator', 'skip_retrieval_mutator']
)
if opt.get('mutators'):
mutators = '+'.join([mutators, opt['mutators']])
logging.warning(f'overriding mutators to {mutators}')
opt['mutators'] = mutators
super().__init__(opt, shared)
self.id = 'TriviaQASearchDecisionTeacher'
class NQSearchDecisionTeacher(nq.NaturalQuestionsOpenTeacher):
def __init__(self, opt, shared=None):
mutators = '+'.join(
['do_generate_search_query_mutator', 'skip_retrieval_mutator']
)
if opt.get('mutators'):
mutators = '+'.join([mutators, opt['mutators']])
logging.warning(f'overriding mutators to {mutators}')
opt['mutators'] = mutators
super().__init__(opt, shared)
self.id = 'NQSearchDecisionTeacher'
def get_dialogue_task_mutators(opt: Opt) -> str:
"""
Set the mutators appropriately for the dialogue tasks.
"""
mutators = '+'.join(
[
'flatten',
'skip_retrieval_mutator',
'bst_tasks_maybe_generate_search_query_mutator',
]
)
if opt.get('mutators'):
mutators = '+'.join([mutators, opt['mutators']])
logging.warning(f'overriding mutators to {mutators}')
return mutators
class Convai2SearchDecisionTeacher(convai2.NormalizedTeacher):
def __init__(self, opt, shared=None):
opt['mutators'] = get_dialogue_task_mutators(opt)
opt['task'] += ':no_cands'
super().__init__(opt, shared)
self.id = 'Convai2SearchDecisionTeacher'
class EDSearchDecisionTeacher(ed.DefaultTeacher):
def __init__(self, opt, shared=None):
opt['mutators'] = get_dialogue_task_mutators(opt)
super().__init__(opt, shared)
self.id = 'EDSearchDecisionTeacher'
class MSCSearchDecisionTeacher(msc.DefaultTeacher):
def __init__(self, opt, shared=None):
opt['mutators'] = get_dialogue_task_mutators(opt)
opt['include_session1'] = False
super().__init__(opt, shared)
self.id = 'MSCSearchDecisionTeacher'
class SearchDecisionTeacher(MultiTaskTeacher):
@classmethod
def add_cmdline_args(
cls, parser: ParlaiParser, partial_opt: Optional[Opt] = None
) -> ParlaiParser:
WoiSearchDecisionTeacher.add_cmdline_args(parser, partial_opt)
WowSearchDecisionTeacher.add_cmdline_args(parser, partial_opt)
SquadSearchDecisionTeacher.add_cmdline_args(parser, partial_opt)
TriviaQASearchDecisionTeacher.add_cmdline_args(parser, partial_opt)
NQSearchDecisionTeacher.add_cmdline_args(parser, partial_opt)
Convai2SearchDecisionTeacher.add_cmdline_args(parser, partial_opt)
EDSearchDecisionTeacher.add_cmdline_args(parser, partial_opt)
MSCSearchDecisionTeacher.add_cmdline_args(parser, partial_opt)
return parser
def __init__(self, opt, shared=None):
tasks = [
f"projects.seeker.tasks.search_decision:{teacher}"
for teacher in [
'WoiSearchDecisionTeacher',
'WowSearchDecisionTeacher',
'SquadSearchDecisionTeacher',
'TriviaQASearchDecisionTeacher',
'NQSearchDecisionTeacher',
'Convai2SearchDecisionTeacher',
'EDSearchDecisionTeacher',
'MSCSearchDecisionTeacher',
]
]
opt['task'] = ','.join(tasks)
super().__init__(opt, shared)
class DefaultTeacher(SearchDecisionTeacher):
pass
| [
"parlai.utils.logging.warning"
] | [((3964, 4017), 'parlai.utils.logging.warning', 'logging.warning', (['f"""overriding mutators to {mutators}"""'], {}), "(f'overriding mutators to {mutators}')\n", (3979, 4017), True, 'import parlai.utils.logging as logging\n'), ((1311, 1364), 'parlai.utils.logging.warning', 'logging.warning', (['f"""overriding mutators to {mutators}"""'], {}), "(f'overriding mutators to {mutators}')\n", (1326, 1364), True, 'import parlai.utils.logging as logging\n'), ((1875, 1928), 'parlai.utils.logging.warning', 'logging.warning', (['f"""overriding mutators to {mutators}"""'], {}), "(f'overriding mutators to {mutators}')\n", (1890, 1928), True, 'import parlai.utils.logging as logging\n'), ((2405, 2458), 'parlai.utils.logging.warning', 'logging.warning', (['f"""overriding mutators to {mutators}"""'], {}), "(f'overriding mutators to {mutators}')\n", (2420, 2458), True, 'import parlai.utils.logging as logging\n'), ((2906, 2959), 'parlai.utils.logging.warning', 'logging.warning', (['f"""overriding mutators to {mutators}"""'], {}), "(f'overriding mutators to {mutators}')\n", (2921, 2959), True, 'import parlai.utils.logging as logging\n'), ((3405, 3458), 'parlai.utils.logging.warning', 'logging.warning', (['f"""overriding mutators to {mutators}"""'], {}), "(f'overriding mutators to {mutators}')\n", (3420, 3458), True, 'import parlai.utils.logging as logging\n')] |
from django.contrib import admin
from django.urls import path, include
from django101 import cities
from django101.cities.views import index, list_phones, test_index, create_person
urlpatterns = [
path('admin/', admin.site.urls),
path('test/', test_index),
path('create/', create_person, name='create person'),
path('cities/', include('django101.cities.urls')),
path('', include('django101.people.urls')),
]
| [
"django.urls.path",
"django.urls.include"
] | [((212, 243), 'django.urls.path', 'path', (['"""admin/"""', 'admin.site.urls'], {}), "('admin/', admin.site.urls)\n", (216, 243), False, 'from django.urls import path, include\n'), ((250, 275), 'django.urls.path', 'path', (['"""test/"""', 'test_index'], {}), "('test/', test_index)\n", (254, 275), False, 'from django.urls import path, include\n'), ((282, 334), 'django.urls.path', 'path', (['"""create/"""', 'create_person'], {'name': '"""create person"""'}), "('create/', create_person, name='create person')\n", (286, 334), False, 'from django.urls import path, include\n'), ((357, 389), 'django.urls.include', 'include', (['"""django101.cities.urls"""'], {}), "('django101.cities.urls')\n", (364, 389), False, 'from django.urls import path, include\n'), ((406, 438), 'django.urls.include', 'include', (['"""django101.people.urls"""'], {}), "('django101.people.urls')\n", (413, 438), False, 'from django.urls import path, include\n')] |
from __future__ import print_function
from datetime import date, datetime, timedelta
import os
import tempfile
PERIODS = {
'y': {'name': 'yesterday', 'description': 'Yesterday'},
'lw': {'name': 'lastweek', 'description': 'Last work week'},
'cw': {'name': 'currentweek', 'description': 'Current work week'},
'flw': {'name': 'fulllastweek', 'description': 'Last full week'},
'fcw': {'name': 'fullcurrentweek', 'description': 'Current full week'}
}
def time_entry_list(from_date, to_date, user, redmine):
print("Fetching time entries from {} to {} for {}...".format(from_date, to_date, user))
print()
# Get yesterday's time entries
time_entries = redmine.time_entry.filter(user_id=user.id, from_date=from_date, to_date=to_date, sort='hours:desc')
if time_entries:
sum = 0
# Print scrum update template
report = "Time entries:\n"
for entry in time_entries:
report += entry_bullet_point(entry)
sum += entry.hours
report += "\n" + str(sum) + " hours.\n"
else:
report = "No time entries.\n"
print(report)
def entry_bullet_point(entry):
if hasattr(entry, 'issue'):
issue_id = '#' + str(entry.issue.id)
else:
issue_id = 'none'
item = '* {} / {} hours ({})'.format(entry.comments, str(entry.hours), issue_id)
item = item + ' [' + str(entry.id) + ']'
item = item + ' ' + str(entry.activity)
return item + "\n"
def handle_date_calculation_value(date_value):
if date_value[:1] == '+' or date_value[:1] == '-':
date_value_raw = date.today() + timedelta(int(date_value))
date_value = date_value_raw.strftime('%Y-%m-%d')
return date_value
def weekday_of_week(day_of_week, weeks_previous=0):
days_ahead_of_weekday_last_week = date.today().weekday() + (weeks_previous * 7) - day_of_week
last_weekday = datetime.now() - timedelta(days=days_ahead_of_weekday_last_week)
return last_weekday.strftime("%Y-%m-%d")
def weekday_last_week(day_of_week):
return weekday_of_week(day_of_week, 1)
def resolve_period_abbreviation(period):
period = period.lower()
if period in PERIODS:
return PERIODS[period]['name']
if period in {abbr: item.get('name') for abbr, item in PERIODS.items()}.values():
return period
return None
def resolve_period(period):
if period == 'yesterday':
yesterday = handle_date_calculation_value('-1')
return {'start': yesterday, 'end': yesterday}
if period == 'lastweek':
start_date = weekday_last_week(0) # last Monday
end_date = weekday_last_week(4) # last Friday
return {'start': start_date, 'end': end_date}
if period == 'currentweek':
start_date = weekday_of_week(0) # this Monday
end_date = weekday_of_week(4) # this Friday
return {'start': start_date, 'end': end_date}
if period == 'fulllastweek':
start_date = weekday_of_week(6, 2) # last Sunday
end_date = weekday_of_week(5, 1) # last Saturday
return {'start': start_date, 'end': end_date}
if period == 'fullcurrentweek':
start_date = weekday_last_week(6) # this Sunday
end_date = weekday_of_week(5) # this Saturday
return {'start': start_date, 'end': end_date}
def resolve_activity_alias(activity_name, aliases):
if activity_name in aliases:
return resolve_activity_alias(aliases[activity_name], aliases)
else:
return activity_name
def resolve_issue_template(issue_name, templates):
if issue_name in templates:
return templates[issue_name]
def template_field(issue_name, field, templates):
template = resolve_issue_template(issue_name, templates)
if template and field in template:
return template[field]
def resolve_issue_alias(issue_id, templates):
resolved_id = template_field(issue_id, 'id', templates)
if resolved_id:
return resolve_issue_alias(resolved_id, templates)
else:
return issue_id
def get_cache_filename(type_name):
return os.path.join(tempfile.gettempdir(), 'redmein-{}'.format(type_name))
| [
"datetime.datetime.now",
"datetime.timedelta",
"tempfile.gettempdir",
"datetime.date.today"
] | [((1888, 1902), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1900, 1902), False, 'from datetime import date, datetime, timedelta\n'), ((1905, 1952), 'datetime.timedelta', 'timedelta', ([], {'days': 'days_ahead_of_weekday_last_week'}), '(days=days_ahead_of_weekday_last_week)\n', (1914, 1952), False, 'from datetime import date, datetime, timedelta\n'), ((4095, 4116), 'tempfile.gettempdir', 'tempfile.gettempdir', ([], {}), '()\n', (4114, 4116), False, 'import tempfile\n'), ((1595, 1607), 'datetime.date.today', 'date.today', ([], {}), '()\n', (1605, 1607), False, 'from datetime import date, datetime, timedelta\n'), ((1809, 1821), 'datetime.date.today', 'date.today', ([], {}), '()\n', (1819, 1821), False, 'from datetime import date, datetime, timedelta\n')] |
import os
import re
import statistics
def find_all_key_files_path(directory, keyfile_name):
fn = re.compile(".*"+keyfile_name + ".*")
path=[]
for root, dirs, files in os.walk(directory):
for file in files:
if fn.match(file) is not None:
#print(file)
path.append(os.path.join(root, file))
return path
if __name__ == '__main__':
# experiment setup params.
root_path = "./"
epoch = 0
# find result files by recursively opening directories.
result_files = find_all_key_files_path(root_path, ".csv")
print(result_files)
print()
# open files
results = []
for result_file in result_files:
# find the best one
f_learning_log = open(result_file, "r")
i = 0
bestpol_id = -1
bestpol_score = -99999.0
scores=[]
for line in f_learning_log.readlines():
if (i % 5) == 0 and (i!=0):
scores.append(float(line.split(",")[-1]))
id = int(line.split(",")[0])
print(scores)
print(id)
if bestpol_score < statistics.mean(scores):
bestpol_id = id
bestpol_score = statistics.mean(scores)
scores = []
elif i != 0:
scores.append(float(line.split(",")[-1]))
i += 1
print(bestpol_id)
print(bestpol_score)
#
dirname = result_file.split("_results")[0] + "saves/"
print(dirname)
f_saves = open(dirname+"bestpol-cvar.txt", "w")
f_saves.write(str(bestpol_id))
f_saves.close()
f_learning_log.close() | [
"statistics.mean",
"os.path.join",
"os.walk",
"re.compile"
] | [((102, 140), 're.compile', 're.compile', (["('.*' + keyfile_name + '.*')"], {}), "('.*' + keyfile_name + '.*')\n", (112, 140), False, 'import re\n'), ((180, 198), 'os.walk', 'os.walk', (['directory'], {}), '(directory)\n', (187, 198), False, 'import os\n'), ((327, 351), 'os.path.join', 'os.path.join', (['root', 'file'], {}), '(root, file)\n', (339, 351), False, 'import os\n'), ((1144, 1167), 'statistics.mean', 'statistics.mean', (['scores'], {}), '(scores)\n', (1159, 1167), False, 'import statistics\n'), ((1241, 1264), 'statistics.mean', 'statistics.mean', (['scores'], {}), '(scores)\n', (1256, 1264), False, 'import statistics\n')] |
# -*- coding: utf-8 -*-
"""Module where all interfaces, events and exceptions live."""
from . import _
from plone.app.vocabularies.catalog import CatalogSource
from plone.namedfile.field import NamedBlobImage
from plone.supermodel import model
from z3c.relationfield.schema import RelationChoice
from zope import schema
from zope.publisher.interfaces.browser import IDefaultBrowserLayer
launches = CatalogSource(portal_type=("Document", "News Item"))
class IParrucFlexsliderLayer(IDefaultBrowserLayer):
"""Marker interface that defines a browser layer."""
class ISlide(model.Schema):
image = NamedBlobImage(
title=_("Immagine slide"),
description=_(u"Dimensione consigliata 1200x300"),
required=True,
)
bw = schema.Bool(
title=_(u"Convertire in bianco e nero?"),
default=True,
)
link = RelationChoice(
title=_(u"Contenuto dal linkare nella slide"),
source=launches,
required=False,
)
model.primary('image')
| [
"plone.app.vocabularies.catalog.CatalogSource",
"plone.supermodel.model.primary"
] | [((401, 453), 'plone.app.vocabularies.catalog.CatalogSource', 'CatalogSource', ([], {'portal_type': "('Document', 'News Item')"}), "(portal_type=('Document', 'News Item'))\n", (414, 453), False, 'from plone.app.vocabularies.catalog import CatalogSource\n'), ((990, 1012), 'plone.supermodel.model.primary', 'model.primary', (['"""image"""'], {}), "('image')\n", (1003, 1012), False, 'from plone.supermodel import model\n')] |
#!/usr/bin/env python
# encoding: utf-8
__author__ = 'hasee'
import json
from datetime import datetime
class BlockInfo(object):
def __init__(self):
# 块hash
self.block_id = ''
# 块高度
self.block_num = 0
# 块大小
self.block_size = 0
# 上个块的块hash
self.previous = ''
# 块中交易信息摘要
self.trx_digest = ''
# 出块代理
self.miner = ''
# 出块时间
self.block_time = ''
# 块中交易
self.transactions = []
# 块中交易总数量
self.trx_count = 0
# 出块奖励
self.block_bonus = 0
# 块交易金额
self.trx_amount = 0
#块手续费
self.trx_fee = 0
def from_block_resp(self, block_result):
self.block_id = (block_result.get("hash"))
self.block_num = int(block_result.get("number"),16)
self.block_size = int(block_result.get("size"),16)
self.previous = (block_result.get("parentHash"))
self.trx_digest = (block_result.get("transactionsRoot"))
self.block_time = datetime.fromtimestamp(int(block_result.get("timestamp"),16)).strftime("%Y-%m-%d %H:%M:%S")
self.transactions = block_result.get("transactions")
self.block_bonus = 5.0
self.trx_count = len(self.transactions)
self.amount = 0.0
self.fee = 0.0
def get_json_data(self):
return {"blockHash":self.block_id,"chainId":"eth","blockNumber":self.block_num,"blockSize":self.block_size,
"previous":self.previous,"trxDigest":self.trx_digest,"transactionsCount":self.trx_count,
"trxamount":self.trx_amount,"trxfee":self.trx_fee,"createtime":datetime.now().strftime("%Y-%m-%d %H:%M:%S")}
| [
"datetime.datetime.now"
] | [((1648, 1662), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1660, 1662), False, 'from datetime import datetime\n')] |
"""
Command line options tests
"""
import os
import re
from six import iteritems, StringIO
try:
# Python 3
from unittest.mock import patch
except ImportError:
from mock import patch
from dirsync.options import ArgParser
from dirsync.run import sync
from ._base import DirSyncTestCase
from . import trees
class CmdLineTests(DirSyncTestCase):
def dirsync(self, *args, **kwargs):
kwargs.update(vars(ArgParser().parse_args(args)))
sync(**kwargs)
class SyncTests(CmdLineTests):
init_trees = (('src', trees.simple),)
def test_sync(self):
self.dirsync('src', 'dst', '--sync', '-c')
self.assertIsFile('dst/file1.txt')
self.assertIsDir('dst/dir')
self.assertListDir('dst/dir', ['file4.txt'])
self.assertIsDir('dst/empty_dir')
self.assertListDir('dst/empty_dir', [])
def test_no_action(self):
with self.assertRaises(ValueError):
self.dirsync('src', 'dst')
def test_no_create(self):
with self.assertRaises(ValueError):
self.dirsync('src', 'dst', '--sync')
@patch('sys.stdout', new_callable=StringIO)
def test_output(self, stdout):
self.dirsync('src', 'dst', '--sync', '-c')
self.dirsync('src', 'dst', '--sync', '-c')
self.assertEqual(
re.sub('\d\.\d{2}', 'X', stdout.getvalue().strip()),
'dirsync finished in X seconds.\n'
'3 directories parsed, 4 files copied\n'
'3 directories were created.\n\n'
'dirsync finished in X seconds.\n'
'3 directories parsed, 0 files copied'
)
class CfgFiles(CmdLineTests):
init_trees = (('src', trees.simple),)
def mk_cfg_file(self, **options):
cfg_file = open(os.path.join('src', '.dirsync'), 'w')
cfg_file.write('[defaults]\n')
for opt, val in iteritems(options):
cfg_file.write('%s = %s\n' % (opt, str(val)))
cfg_file.close()
def test_sync_default(self):
self.mk_cfg_file(action='sync', create=True)
self.dirsync('src', 'dst')
self.assertIsFile('dst/file1.txt')
self.assertIsDir('dst/dir')
self.assertListDir('dst/dir', ['file4.txt'])
self.assertIsDir('dst/empty_dir')
self.assertListDir('dst/empty_dir', [])
self.assertNotExists('dst/.dirsync')
| [
"mock.patch",
"os.path.join",
"dirsync.run.sync",
"dirsync.options.ArgParser",
"six.iteritems"
] | [((1150, 1192), 'mock.patch', 'patch', (['"""sys.stdout"""'], {'new_callable': 'StringIO'}), "('sys.stdout', new_callable=StringIO)\n", (1155, 1192), False, 'from mock import patch\n'), ((491, 505), 'dirsync.run.sync', 'sync', ([], {}), '(**kwargs)\n', (495, 505), False, 'from dirsync.run import sync\n'), ((1937, 1955), 'six.iteritems', 'iteritems', (['options'], {}), '(options)\n', (1946, 1955), False, 'from six import iteritems, StringIO\n'), ((1834, 1865), 'os.path.join', 'os.path.join', (['"""src"""', '""".dirsync"""'], {}), "('src', '.dirsync')\n", (1846, 1865), False, 'import os\n'), ((451, 462), 'dirsync.options.ArgParser', 'ArgParser', ([], {}), '()\n', (460, 462), False, 'from dirsync.options import ArgParser\n')] |
#
# Copyright 2021 Ocean Protocol Foundation
# SPDX-License-Identifier: Apache-2.0
#
import logging
import lzma
from hashlib import sha256
from typing import Optional, Tuple
from eth_typing.encoding import HexStr
from flask import Response, request
from flask_sieve import validate
from ocean_provider.requests_session import get_requests_session
from ocean_provider.user_nonce import update_nonce
from ocean_provider.utils.basics import (
get_config,
get_provider_wallet,
get_web3,
)
from ocean_provider.utils.data_nft import (
MetadataState,
get_metadata,
get_metadata_logs_from_tx_receipt,
)
from ocean_provider.utils.data_nft_factory import is_nft_deployed_from_factory
from ocean_provider.utils.encryption import do_decrypt
from ocean_provider.utils.error_responses import error_response
from ocean_provider.utils.util import get_request_data
from ocean_provider.validation.provider_requests import DecryptRequest
from web3.main import Web3
from . import services
provider_wallet = get_provider_wallet()
requests_session = get_requests_session()
logger = logging.getLogger(__name__)
@services.route("/decrypt", methods=["POST"])
@validate(DecryptRequest)
def decrypt():
"""Decrypts an encrypted document based on transaction Id or dataNftAddress.
---
consumes:
- application/json
parameters:
- name: decrypterAddress
description: address of agent requesting decrypt
type: string
required: true
- name: chainId
description: chainId of the chain on which the encrypted document is stored
type: int
required: true
- name: transactionId
description: transaction Id where the document was created or last updated,
required if dataNftAddress, encryptedDocument and flags parameters missing
required: false
type: string
- name: dataNftAddress
description: NFT address of the document,
required if the transactionId parameter is missing
required: false
type: string
- name: encryptedDocument
description: encrypted document contents,
required if the transactionId parameter is missing
required: false
type: string
- name: flags
description: encryption and compression flags,
required if the transactionId parameter is missing
required: false
type: int
- name: documentHash
description: hash of the original document used for integrity check,
required if the transactionId parameter is missing
required: false
type: int
- name: nonce
description: user nonce (timestamp)
required: true
type: decimal
- name: signature
description: user signature based on
transactionId+dataNftAddress+decrypterAddress+chainId+nonce
required: true
type: string
responses:
201:
description: decrypted document
400:
description: One or more of the required attributes are missing or invalid.
503:
description: Service Unavailable
"""
data = get_request_data(request)
logger.info(f"decrypt called. arguments = {data}")
return _decrypt(
decrypter_address=data.get("decrypterAddress"),
chain_id=data.get("chainId"),
transaction_id=data.get("transactionId"),
data_nft_address=data.get("dataNftAddress"),
encrypted_document=data.get("encryptedDocument"),
flags=data.get("flags"),
document_hash=data.get("documentHash"),
nonce=data.get("nonce"),
)
def _decrypt(
decrypter_address: HexStr,
chain_id: int,
transaction_id: Optional[HexStr],
data_nft_address: HexStr,
encrypted_document: Optional[HexStr],
flags: Optional[int],
document_hash: Optional[HexStr],
nonce: str,
) -> Response:
update_nonce(decrypter_address, nonce)
# Check if given chain_id matches Provider's chain_id
web3 = get_web3()
if web3.chain_id != chain_id:
return error_response(f"Unsupported chain ID {chain_id}", 400, logger)
# Check if decrypter is authorized
authorized_decrypters = get_config().authorized_decrypters
logger.info(f"authorized_decrypters = {authorized_decrypters}")
if authorized_decrypters and decrypter_address not in authorized_decrypters:
return error_response("Decrypter not authorized", 403, logger)
if not is_nft_deployed_from_factory(web3, data_nft_address):
return error_response(
"Asset not deployed by the data NFT factory.", 400, logger
)
if not transaction_id:
try:
(encrypted_document, flags, document_hash) = _convert_args_to_bytes(
encrypted_document, flags, document_hash
)
except Exception:
return error_response("Failed to convert input args to bytes.", 400, logger)
else:
try:
(
encrypted_document,
flags,
document_hash,
) = _get_args_from_transaction_id(web3, transaction_id, data_nft_address)
except Exception:
return error_response("Failed to process transaction id.", 400, logger)
logger.info(
f"data_nft_address = {data_nft_address}, "
f"encrypted_document as bytes = {encrypted_document}, "
f"flags as bytes = {flags}, "
f"document_hash as bytes = {document_hash}"
)
# Check if DDO metadata state is ACTIVE
(_, _, metadata_state, _) = get_metadata(web3, data_nft_address)
logger.info(f"metadata_state = {metadata_state}")
if metadata_state in [MetadataState.ACTIVE, MetadataState.TEMPORARILY_DISABLED]:
pass
elif metadata_state == MetadataState.END_OF_LIFE:
return error_response("Asset end of life", 403, logger)
elif metadata_state == MetadataState.DEPRECATED:
return error_response("Asset deprecated", 403, logger)
elif metadata_state == MetadataState.REVOKED:
return error_response("Asset revoked", 403, logger)
else:
return error_response("Invalid MetadataState", 400, logger)
working_document = encrypted_document
# bit 2: check if DDO is ecies encrypted
if flags[0] & 2:
try:
working_document = do_decrypt(working_document, get_provider_wallet())
logger.info("Successfully decrypted document.")
except Exception:
return error_response("Failed to decrypt.", 400, logger)
else:
logger.warning(
"Document not encrypted (flags bit 2 not set). Skipping decryption."
)
# bit 1: check if DDO is lzma compressed
if flags[0] & 1:
try:
working_document = lzma.decompress(working_document)
logger.info("Successfully decompressed document.")
except Exception:
return error_response("Failed to decompress", 400, logger)
document = working_document
logger.info(f"document = {document}")
# Verify checksum matches
if sha256(document).hexdigest() != document_hash.hex():
return error_response("Checksum doesn't match.", 400, logger)
logger.info("Checksum matches.")
response = Response(document, 201, {"Content-type": "text/plain"})
logger.info(f"decrypt response = {response}")
return response
def _convert_args_to_bytes(
encrypted_document: HexStr, flags: int, document_hash: HexStr
) -> Tuple[bytes, bytes, bytes]:
"""Return the encrypted_document, flags, and document_hash as bytes."""
return (
Web3.toBytes(hexstr=encrypted_document),
flags.to_bytes(1, "big"),
Web3.toBytes(hexstr=document_hash),
)
def _get_args_from_transaction_id(
web3: Web3, transaction_id: HexStr, data_nft_address: HexStr
) -> Tuple[bytes, bytes, bytes]:
"""Get the MetadataCreated and MetadataUpdated logs from the transaction id.
Parse logs and return the data_nft_address, encrypted_document, flags, and
document_hash.
"""
tx_receipt = web3.eth.get_transaction_receipt(transaction_id)
logs = get_metadata_logs_from_tx_receipt(web3, tx_receipt, data_nft_address)
logger.info(f"transaction_id = {transaction_id}, logs = {logs}")
if len(logs) > 1:
logger.warning(
"More than 1 MetadataCreated/MetadataUpdated event detected. "
"Using the event at index 0."
)
log = logs[0]
return (log.args["data"], log.args["flags"], log.args["metaDataHash"])
| [
"logging.getLogger",
"ocean_provider.utils.basics.get_web3",
"ocean_provider.utils.util.get_request_data",
"flask_sieve.validate",
"ocean_provider.utils.data_nft_factory.is_nft_deployed_from_factory",
"hashlib.sha256",
"ocean_provider.utils.data_nft.get_metadata",
"ocean_provider.requests_session.get_... | [((1014, 1035), 'ocean_provider.utils.basics.get_provider_wallet', 'get_provider_wallet', ([], {}), '()\n', (1033, 1035), False, 'from ocean_provider.utils.basics import get_config, get_provider_wallet, get_web3\n'), ((1055, 1077), 'ocean_provider.requests_session.get_requests_session', 'get_requests_session', ([], {}), '()\n', (1075, 1077), False, 'from ocean_provider.requests_session import get_requests_session\n'), ((1088, 1115), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1105, 1115), False, 'import logging\n'), ((1165, 1189), 'flask_sieve.validate', 'validate', (['DecryptRequest'], {}), '(DecryptRequest)\n', (1173, 1189), False, 'from flask_sieve import validate\n'), ((3165, 3190), 'ocean_provider.utils.util.get_request_data', 'get_request_data', (['request'], {}), '(request)\n', (3181, 3190), False, 'from ocean_provider.utils.util import get_request_data\n'), ((3917, 3955), 'ocean_provider.user_nonce.update_nonce', 'update_nonce', (['decrypter_address', 'nonce'], {}), '(decrypter_address, nonce)\n', (3929, 3955), False, 'from ocean_provider.user_nonce import update_nonce\n'), ((4026, 4036), 'ocean_provider.utils.basics.get_web3', 'get_web3', ([], {}), '()\n', (4034, 4036), False, 'from ocean_provider.utils.basics import get_config, get_provider_wallet, get_web3\n'), ((5587, 5623), 'ocean_provider.utils.data_nft.get_metadata', 'get_metadata', (['web3', 'data_nft_address'], {}), '(web3, data_nft_address)\n', (5599, 5623), False, 'from ocean_provider.utils.data_nft import MetadataState, get_metadata, get_metadata_logs_from_tx_receipt\n'), ((7280, 7335), 'flask.Response', 'Response', (['document', '(201)', "{'Content-type': 'text/plain'}"], {}), "(document, 201, {'Content-type': 'text/plain'})\n", (7288, 7335), False, 'from flask import Response, request\n'), ((8156, 8225), 'ocean_provider.utils.data_nft.get_metadata_logs_from_tx_receipt', 'get_metadata_logs_from_tx_receipt', (['web3', 'tx_receipt', 'data_nft_address'], {}), '(web3, tx_receipt, data_nft_address)\n', (8189, 8225), False, 'from ocean_provider.utils.data_nft import MetadataState, get_metadata, get_metadata_logs_from_tx_receipt\n'), ((4086, 4149), 'ocean_provider.utils.error_responses.error_response', 'error_response', (['f"""Unsupported chain ID {chain_id}"""', '(400)', 'logger'], {}), "(f'Unsupported chain ID {chain_id}', 400, logger)\n", (4100, 4149), False, 'from ocean_provider.utils.error_responses import error_response\n'), ((4218, 4230), 'ocean_provider.utils.basics.get_config', 'get_config', ([], {}), '()\n', (4228, 4230), False, 'from ocean_provider.utils.basics import get_config, get_provider_wallet, get_web3\n'), ((4417, 4472), 'ocean_provider.utils.error_responses.error_response', 'error_response', (['"""Decrypter not authorized"""', '(403)', 'logger'], {}), "('Decrypter not authorized', 403, logger)\n", (4431, 4472), False, 'from ocean_provider.utils.error_responses import error_response\n'), ((4485, 4537), 'ocean_provider.utils.data_nft_factory.is_nft_deployed_from_factory', 'is_nft_deployed_from_factory', (['web3', 'data_nft_address'], {}), '(web3, data_nft_address)\n', (4513, 4537), False, 'from ocean_provider.utils.data_nft_factory import is_nft_deployed_from_factory\n'), ((4554, 4628), 'ocean_provider.utils.error_responses.error_response', 'error_response', (['"""Asset not deployed by the data NFT factory."""', '(400)', 'logger'], {}), "('Asset not deployed by the data NFT factory.', 400, logger)\n", (4568, 4628), False, 'from ocean_provider.utils.error_responses import error_response\n'), ((7172, 7226), 'ocean_provider.utils.error_responses.error_response', 'error_response', (['"""Checksum doesn\'t match."""', '(400)', 'logger'], {}), '("Checksum doesn\'t match.", 400, logger)\n', (7186, 7226), False, 'from ocean_provider.utils.error_responses import error_response\n'), ((7632, 7671), 'web3.main.Web3.toBytes', 'Web3.toBytes', ([], {'hexstr': 'encrypted_document'}), '(hexstr=encrypted_document)\n', (7644, 7671), False, 'from web3.main import Web3\n'), ((7715, 7749), 'web3.main.Web3.toBytes', 'Web3.toBytes', ([], {'hexstr': 'document_hash'}), '(hexstr=document_hash)\n', (7727, 7749), False, 'from web3.main import Web3\n'), ((5845, 5893), 'ocean_provider.utils.error_responses.error_response', 'error_response', (['"""Asset end of life"""', '(403)', 'logger'], {}), "('Asset end of life', 403, logger)\n", (5859, 5893), False, 'from ocean_provider.utils.error_responses import error_response\n'), ((6797, 6830), 'lzma.decompress', 'lzma.decompress', (['working_document'], {}), '(working_document)\n', (6812, 6830), False, 'import lzma\n'), ((4889, 4958), 'ocean_provider.utils.error_responses.error_response', 'error_response', (['"""Failed to convert input args to bytes."""', '(400)', 'logger'], {}), "('Failed to convert input args to bytes.', 400, logger)\n", (4903, 4958), False, 'from ocean_provider.utils.error_responses import error_response\n'), ((5217, 5281), 'ocean_provider.utils.error_responses.error_response', 'error_response', (['"""Failed to process transaction id."""', '(400)', 'logger'], {}), "('Failed to process transaction id.', 400, logger)\n", (5231, 5281), False, 'from ocean_provider.utils.error_responses import error_response\n'), ((5962, 6009), 'ocean_provider.utils.error_responses.error_response', 'error_response', (['"""Asset deprecated"""', '(403)', 'logger'], {}), "('Asset deprecated', 403, logger)\n", (5976, 6009), False, 'from ocean_provider.utils.error_responses import error_response\n'), ((6382, 6403), 'ocean_provider.utils.basics.get_provider_wallet', 'get_provider_wallet', ([], {}), '()\n', (6401, 6403), False, 'from ocean_provider.utils.basics import get_config, get_provider_wallet, get_web3\n'), ((6510, 6559), 'ocean_provider.utils.error_responses.error_response', 'error_response', (['"""Failed to decrypt."""', '(400)', 'logger'], {}), "('Failed to decrypt.', 400, logger)\n", (6524, 6559), False, 'from ocean_provider.utils.error_responses import error_response\n'), ((6939, 6990), 'ocean_provider.utils.error_responses.error_response', 'error_response', (['"""Failed to decompress"""', '(400)', 'logger'], {}), "('Failed to decompress', 400, logger)\n", (6953, 6990), False, 'from ocean_provider.utils.error_responses import error_response\n'), ((7104, 7120), 'hashlib.sha256', 'sha256', (['document'], {}), '(document)\n', (7110, 7120), False, 'from hashlib import sha256\n'), ((6075, 6119), 'ocean_provider.utils.error_responses.error_response', 'error_response', (['"""Asset revoked"""', '(403)', 'logger'], {}), "('Asset revoked', 403, logger)\n", (6089, 6119), False, 'from ocean_provider.utils.error_responses import error_response\n'), ((6145, 6197), 'ocean_provider.utils.error_responses.error_response', 'error_response', (['"""Invalid MetadataState"""', '(400)', 'logger'], {}), "('Invalid MetadataState', 400, logger)\n", (6159, 6197), False, 'from ocean_provider.utils.error_responses import error_response\n')] |
from __future__ import annotations
import os
from datetime import datetime
from twisted.python import log
import cowrie.core.output
from cowrie.core.config import CowrieConfig
token = CowrieConfig.get("output_csirtg", "token", fallback="<PASSWORD>")
if token == "<PASSWORD>":
log.msg("output_csirtg: token not found in configuration file")
exit(1)
os.environ["CSIRTG_TOKEN"] = token
import csirtgsdk # noqa: E402
class Output(cowrie.core.output.Output):
"""
CSIRTG output
"""
def start(self):
"""
Start the output module.
Note that csirtsdk is imported here because it reads CSIRTG_TOKEN on import
Cowrie sets this environment variable.
"""
self.user = CowrieConfig.get("output_csirtg", "username")
self.feed = CowrieConfig.get("output_csirtg", "feed")
self.debug = CowrieConfig.getboolean("output_csirtg", "debug", fallback=False)
self.description = CowrieConfig.get("output_csirtg", "description")
self.context = {}
# self.client = csirtgsdk.client.Client()
def stop(self):
pass
def write(self, e):
"""
Only pass on connection events
"""
if e["eventid"] == "cowrie.session.connect":
self.submitIp(e)
def submitIp(self, e):
peerIP = e["src_ip"]
ts = e["timestamp"]
system = e.get("system", None)
if system not in [
"cowrie.ssh.factory.CowrieSSHFactory",
"cowrie.telnet.transport.HoneyPotTelnetFactory",
]:
return
today = str(datetime.now().date())
if not self.context.get(today):
self.context = {}
self.context[today] = set()
key = ",".join([peerIP, system])
if key in self.context[today]:
return
self.context[today].add(key)
tags = "scanner,ssh"
port = 22
if e["system"] == "cowrie.telnet.transport.HoneyPotTelnetFactory":
tags = "scanner,telnet"
port = 23
i = {
"user": self.user,
"feed": self.feed,
"indicator": peerIP,
"portlist": port,
"protocol": "tcp",
"tags": tags,
"firsttime": ts,
"lasttime": ts,
"description": self.description,
}
if self.debug is True:
log.msg(f"output_csirtg: Submitting {i!r} to CSIRTG")
ind = csirtgsdk.indicator.Indicator(i).submit()
if self.debug is True:
log.msg(f"output_csirtg: Submitted {ind!r} to CSIRTG")
log.msg("output_csirtg: submitted to csirtg at {} ".format(ind["location"]))
| [
"twisted.python.log.msg",
"cowrie.core.config.CowrieConfig.get",
"cowrie.core.config.CowrieConfig.getboolean",
"datetime.datetime.now",
"csirtgsdk.indicator.Indicator"
] | [((187, 252), 'cowrie.core.config.CowrieConfig.get', 'CowrieConfig.get', (['"""output_csirtg"""', '"""token"""'], {'fallback': '"""<PASSWORD>"""'}), "('output_csirtg', 'token', fallback='<PASSWORD>')\n", (203, 252), False, 'from cowrie.core.config import CowrieConfig\n'), ((283, 346), 'twisted.python.log.msg', 'log.msg', (['"""output_csirtg: token not found in configuration file"""'], {}), "('output_csirtg: token not found in configuration file')\n", (290, 346), False, 'from twisted.python import log\n'), ((733, 778), 'cowrie.core.config.CowrieConfig.get', 'CowrieConfig.get', (['"""output_csirtg"""', '"""username"""'], {}), "('output_csirtg', 'username')\n", (749, 778), False, 'from cowrie.core.config import CowrieConfig\n'), ((799, 840), 'cowrie.core.config.CowrieConfig.get', 'CowrieConfig.get', (['"""output_csirtg"""', '"""feed"""'], {}), "('output_csirtg', 'feed')\n", (815, 840), False, 'from cowrie.core.config import CowrieConfig\n'), ((862, 927), 'cowrie.core.config.CowrieConfig.getboolean', 'CowrieConfig.getboolean', (['"""output_csirtg"""', '"""debug"""'], {'fallback': '(False)'}), "('output_csirtg', 'debug', fallback=False)\n", (885, 927), False, 'from cowrie.core.config import CowrieConfig\n'), ((955, 1003), 'cowrie.core.config.CowrieConfig.get', 'CowrieConfig.get', (['"""output_csirtg"""', '"""description"""'], {}), "('output_csirtg', 'description')\n", (971, 1003), False, 'from cowrie.core.config import CowrieConfig\n'), ((2407, 2460), 'twisted.python.log.msg', 'log.msg', (['f"""output_csirtg: Submitting {i!r} to CSIRTG"""'], {}), "(f'output_csirtg: Submitting {i!r} to CSIRTG')\n", (2414, 2460), False, 'from twisted.python import log\n'), ((2562, 2616), 'twisted.python.log.msg', 'log.msg', (['f"""output_csirtg: Submitted {ind!r} to CSIRTG"""'], {}), "(f'output_csirtg: Submitted {ind!r} to CSIRTG')\n", (2569, 2616), False, 'from twisted.python import log\n'), ((2476, 2508), 'csirtgsdk.indicator.Indicator', 'csirtgsdk.indicator.Indicator', (['i'], {}), '(i)\n', (2505, 2508), False, 'import csirtgsdk\n'), ((1600, 1614), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1612, 1614), False, 'from datetime import datetime\n')] |
import sys
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import seaborn as sns
fn = sys.argv[1]
pal = sns.color_palette()
with open(fn) as f:
toPlot = []
names = []
goodness = []
xs = []
ys = []
ps = []
sns.set()
for line in f:
tokens = line.split(' ')
if len(tokens) == 1:
numBins = int(tokens[0])
for i in range(0, numBins):
toPlot.append([])
xs.append([])
ys.append([])
ps.append([])
names.append('')
goodness.append(0)
else:
binId = int(tokens[0])
plotNum = int(tokens[1])
val = int(tokens[2])
xs[plotNum].append(int(tokens[4]))
ys[plotNum].append(int(tokens[5]))
ps[plotNum].append(int(tokens[6]))
toPlot[plotNum].append(val)
names[plotNum] = str(binId)
goodness[plotNum] = int(tokens[3])
for i in range(0, len(toPlot)):
clr = pal[2]
#sns.distplot(toPlot[i], kde=False, bins = 50, color=clr)
#plt.title('bin ' + names[i])
#plt.savefig('figures/binHist' + str(i+1) + '.png')
#plt.cla()
#plt.clf()
#plt.close()
sns.lineplot(x=xs[i], y=ys[i], color = pal[0])
sns.lineplot(x=xs[i], y=ps[i], color = clr)
plt.title('bin ' + names[i])
plt.savefig('figures/binScatter' + str(i+1) + '.png')
plt.cla()
plt.clf()
plt.close()
| [
"seaborn.set",
"seaborn.color_palette",
"matplotlib.use",
"matplotlib.pyplot.clf",
"seaborn.lineplot",
"matplotlib.pyplot.close",
"matplotlib.pyplot.title",
"matplotlib.pyplot.cla"
] | [((29, 50), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (43, 50), False, 'import matplotlib\n'), ((128, 147), 'seaborn.color_palette', 'sns.color_palette', ([], {}), '()\n', (145, 147), True, 'import seaborn as sns\n'), ((243, 252), 'seaborn.set', 'sns.set', ([], {}), '()\n', (250, 252), True, 'import seaborn as sns\n'), ((1116, 1160), 'seaborn.lineplot', 'sns.lineplot', ([], {'x': 'xs[i]', 'y': 'ys[i]', 'color': 'pal[0]'}), '(x=xs[i], y=ys[i], color=pal[0])\n', (1128, 1160), True, 'import seaborn as sns\n'), ((1167, 1208), 'seaborn.lineplot', 'sns.lineplot', ([], {'x': 'xs[i]', 'y': 'ps[i]', 'color': 'clr'}), '(x=xs[i], y=ps[i], color=clr)\n', (1179, 1208), True, 'import seaborn as sns\n'), ((1215, 1243), 'matplotlib.pyplot.title', 'plt.title', (["('bin ' + names[i])"], {}), "('bin ' + names[i])\n", (1224, 1243), True, 'import matplotlib.pyplot as plt\n'), ((1306, 1315), 'matplotlib.pyplot.cla', 'plt.cla', ([], {}), '()\n', (1313, 1315), True, 'import matplotlib.pyplot as plt\n'), ((1320, 1329), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (1327, 1329), True, 'import matplotlib.pyplot as plt\n'), ((1334, 1345), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (1343, 1345), True, 'import matplotlib.pyplot as plt\n')] |
from unittest import TestCase
from apps.profiles.models import Profile
from apps.recipes.models import Recipe, Ingredient
from apps.recommendations.utils import ComparableInventory
# TODO: written for manual testing with preloaded db; for general use should create resources in setUp()
class ComparableInventoryTest(TestCase):
def setUp(self):
prof = Profile.objects.get(pk=3)
place = prof.places.first()
inv = place.inventory.all().prefetch_related('product', 'unit')
try:
print('creating')
print(f'---- {self.inv}')
self.inv.print_inventory(product_id=329)
except AttributeError:
pass
self.inv = ComparableInventory(inv)
self.inv.print_inventory(product_id=329)
def tearDown(self):
print('destroying')
self.inv.destroy()
self.inv.print_inventory()
self.inv = None
print('destroyed')
def test_print(self):
self.inv.print_inventory(product_id=329)
def test_substract_ingredient(self):
# Product ID 329: pepino
ing = Ingredient.objects.filter(product_id=329)[0]
print(ing)
self.inv.print_inventory(product_id=329)
self.inv.substract(ing)
print(self.inv.inventory.get(329))
self.inv.print_inventory(product_id=329)
def test_reset(self):
ing = Ingredient.objects.filter(product_id=329)[0]
self.assertEqual(self.inv.get(329).quantity, 3)
self.inv.substract(ing)
self.assertEqual(self.inv.get(329).quantity, 2)
self.inv.substract(ing)
self.assertEqual(self.inv.get(329).quantity, 1)
self.inv.reset()
self.assertEqual(self.inv.get(329).quantity, 3)
def test_can_make_recipe(self):
# Shouldn't be able to do this
recipe1 = Recipe.objects.get(pk=313)
self.assertFalse(self.inv.can_make(recipe1))
# Should be able to make this one
recipe2 = Recipe.objects.get(pk=291)
self.assertTrue(self.inv.can_make(recipe2))
def test_can_make_multiple_times(self):
recipe = Recipe.objects.get(pk=291)
self.assertTrue(self.inv.can_make(recipe))
self.assertTrue(self.inv.can_make(recipe))
self.assertTrue(self.inv.can_make(recipe))
| [
"apps.recipes.models.Recipe.objects.get",
"apps.profiles.models.Profile.objects.get",
"apps.recipes.models.Ingredient.objects.filter",
"apps.recommendations.utils.ComparableInventory"
] | [((366, 391), 'apps.profiles.models.Profile.objects.get', 'Profile.objects.get', ([], {'pk': '(3)'}), '(pk=3)\n', (385, 391), False, 'from apps.profiles.models import Profile\n'), ((702, 726), 'apps.recommendations.utils.ComparableInventory', 'ComparableInventory', (['inv'], {}), '(inv)\n', (721, 726), False, 'from apps.recommendations.utils import ComparableInventory\n'), ((1842, 1868), 'apps.recipes.models.Recipe.objects.get', 'Recipe.objects.get', ([], {'pk': '(313)'}), '(pk=313)\n', (1860, 1868), False, 'from apps.recipes.models import Recipe, Ingredient\n'), ((1983, 2009), 'apps.recipes.models.Recipe.objects.get', 'Recipe.objects.get', ([], {'pk': '(291)'}), '(pk=291)\n', (2001, 2009), False, 'from apps.recipes.models import Recipe, Ingredient\n'), ((2124, 2150), 'apps.recipes.models.Recipe.objects.get', 'Recipe.objects.get', ([], {'pk': '(291)'}), '(pk=291)\n', (2142, 2150), False, 'from apps.recipes.models import Recipe, Ingredient\n'), ((1107, 1148), 'apps.recipes.models.Ingredient.objects.filter', 'Ingredient.objects.filter', ([], {'product_id': '(329)'}), '(product_id=329)\n', (1132, 1148), False, 'from apps.recipes.models import Recipe, Ingredient\n'), ((1388, 1429), 'apps.recipes.models.Ingredient.objects.filter', 'Ingredient.objects.filter', ([], {'product_id': '(329)'}), '(product_id=329)\n', (1413, 1429), False, 'from apps.recipes.models import Recipe, Ingredient\n')] |
# Generated by Django 2.2.13 on 2020-07-28 15:00
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('api', '0075_profile_last_frontend_login'),
]
operations = [
migrations.RemoveField(
model_name='fieldreport',
name='cases',
),
migrations.RemoveField(
model_name='fieldreport',
name='confirmed_cases',
),
migrations.RemoveField(
model_name='fieldreport',
name='health_min_cases',
),
migrations.RemoveField(
model_name='fieldreport',
name='health_min_confirmed_cases',
),
migrations.RemoveField(
model_name='fieldreport',
name='health_min_num_dead',
),
migrations.RemoveField(
model_name='fieldreport',
name='health_min_probable_cases',
),
migrations.RemoveField(
model_name='fieldreport',
name='health_min_suspected_cases',
),
migrations.RemoveField(
model_name='fieldreport',
name='other_cases',
),
migrations.RemoveField(
model_name='fieldreport',
name='other_confirmed_cases',
),
migrations.RemoveField(
model_name='fieldreport',
name='other_probable_cases',
),
migrations.RemoveField(
model_name='fieldreport',
name='other_suspected_cases',
),
migrations.RemoveField(
model_name='fieldreport',
name='probable_cases',
),
migrations.RemoveField(
model_name='fieldreport',
name='suspected_cases',
),
migrations.RemoveField(
model_name='fieldreport',
name='who_cases',
),
migrations.RemoveField(
model_name='fieldreport',
name='who_confirmed_cases',
),
migrations.RemoveField(
model_name='fieldreport',
name='who_num_dead',
),
migrations.RemoveField(
model_name='fieldreport',
name='who_probable_cases',
),
migrations.RemoveField(
model_name='fieldreport',
name='who_suspected_cases',
),
]
| [
"django.db.migrations.RemoveField"
] | [((233, 295), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""fieldreport"""', 'name': '"""cases"""'}), "(model_name='fieldreport', name='cases')\n", (255, 295), False, 'from django.db import migrations\n'), ((340, 412), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""fieldreport"""', 'name': '"""confirmed_cases"""'}), "(model_name='fieldreport', name='confirmed_cases')\n", (362, 412), False, 'from django.db import migrations\n'), ((457, 530), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""fieldreport"""', 'name': '"""health_min_cases"""'}), "(model_name='fieldreport', name='health_min_cases')\n", (479, 530), False, 'from django.db import migrations\n'), ((575, 663), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""fieldreport"""', 'name': '"""health_min_confirmed_cases"""'}), "(model_name='fieldreport', name=\n 'health_min_confirmed_cases')\n", (597, 663), False, 'from django.db import migrations\n'), ((703, 779), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""fieldreport"""', 'name': '"""health_min_num_dead"""'}), "(model_name='fieldreport', name='health_min_num_dead')\n", (725, 779), False, 'from django.db import migrations\n'), ((824, 911), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""fieldreport"""', 'name': '"""health_min_probable_cases"""'}), "(model_name='fieldreport', name=\n 'health_min_probable_cases')\n", (846, 911), False, 'from django.db import migrations\n'), ((951, 1039), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""fieldreport"""', 'name': '"""health_min_suspected_cases"""'}), "(model_name='fieldreport', name=\n 'health_min_suspected_cases')\n", (973, 1039), False, 'from django.db import migrations\n'), ((1079, 1147), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""fieldreport"""', 'name': '"""other_cases"""'}), "(model_name='fieldreport', name='other_cases')\n", (1101, 1147), False, 'from django.db import migrations\n'), ((1192, 1270), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""fieldreport"""', 'name': '"""other_confirmed_cases"""'}), "(model_name='fieldreport', name='other_confirmed_cases')\n", (1214, 1270), False, 'from django.db import migrations\n'), ((1315, 1392), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""fieldreport"""', 'name': '"""other_probable_cases"""'}), "(model_name='fieldreport', name='other_probable_cases')\n", (1337, 1392), False, 'from django.db import migrations\n'), ((1437, 1515), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""fieldreport"""', 'name': '"""other_suspected_cases"""'}), "(model_name='fieldreport', name='other_suspected_cases')\n", (1459, 1515), False, 'from django.db import migrations\n'), ((1560, 1631), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""fieldreport"""', 'name': '"""probable_cases"""'}), "(model_name='fieldreport', name='probable_cases')\n", (1582, 1631), False, 'from django.db import migrations\n'), ((1676, 1748), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""fieldreport"""', 'name': '"""suspected_cases"""'}), "(model_name='fieldreport', name='suspected_cases')\n", (1698, 1748), False, 'from django.db import migrations\n'), ((1793, 1859), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""fieldreport"""', 'name': '"""who_cases"""'}), "(model_name='fieldreport', name='who_cases')\n", (1815, 1859), False, 'from django.db import migrations\n'), ((1904, 1980), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""fieldreport"""', 'name': '"""who_confirmed_cases"""'}), "(model_name='fieldreport', name='who_confirmed_cases')\n", (1926, 1980), False, 'from django.db import migrations\n'), ((2025, 2094), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""fieldreport"""', 'name': '"""who_num_dead"""'}), "(model_name='fieldreport', name='who_num_dead')\n", (2047, 2094), False, 'from django.db import migrations\n'), ((2139, 2214), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""fieldreport"""', 'name': '"""who_probable_cases"""'}), "(model_name='fieldreport', name='who_probable_cases')\n", (2161, 2214), False, 'from django.db import migrations\n'), ((2259, 2335), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""fieldreport"""', 'name': '"""who_suspected_cases"""'}), "(model_name='fieldreport', name='who_suspected_cases')\n", (2281, 2335), False, 'from django.db import migrations\n')] |
import serial
import json
import io
import time
ser = serial.Serial("COM24" , 9600, timeout=2)
topic = "hydro/light1"
payload = 1
#data = json.dumps({"topic":topic,"payload":payload})
data = "{\"topic\":\"hydro/light1\",\"payload\":1}"
data = data.encode()
print(data)
ser.write(b'A')
hello = ser.readline()#.decode("ascii")
print(hello) | [
"serial.Serial"
] | [((56, 95), 'serial.Serial', 'serial.Serial', (['"""COM24"""', '(9600)'], {'timeout': '(2)'}), "('COM24', 9600, timeout=2)\n", (69, 95), False, 'import serial\n')] |
import random
aluno1 = input('Nome aluno 1: ')
aluno2 = input('Nome aluno 2: ')
aluno3 = input('Nome aluno 3: ')
aluno4 = input('Nome aluno 4: ')
sorteado = random.choice([aluno1, aluno2, aluno3, aluno4])
print('O sorteado para apagar o quadro foi: {}'.format(sorteado))
| [
"random.choice"
] | [((159, 206), 'random.choice', 'random.choice', (['[aluno1, aluno2, aluno3, aluno4]'], {}), '([aluno1, aluno2, aluno3, aluno4])\n', (172, 206), False, 'import random\n')] |
from django.shortcuts import get_object_or_404, render
from django.http import HttpResponseRedirect, HttpResponse
from django.http import HttpResponse
from django.urls import reverse
from django.views.generic import ListView,DetailView
from .models import Poll, Choice
class IndexView(ListView):
context_object_name = 'latest_question_list'
def get_queryset(self):
"""Return the last five published questions."""
return Poll.objects.order_by('-pub_date')[:5]
class DetailView(DetailView):
model = Poll
class ResultsView(DetailView):
model = Poll
def vote(request, question_id):
question = get_object_or_404(Poll, pk=question_id)
try:
selected_choice = question.choice_set.get(pk=request.POST['choice'])
except (KeyError, Choice.DoesNotExist):
# Redisplay the question voting form.
return render(request, 'polls/detail.html', {
'question': question,
'error_message': "You didn't select a choice.",
})
else:
selected_choice.votes += 1
selected_choice.save()
# Always return an HttpResponseRedirect after successfully dealing
# with POST data. This prevents data from being posted twice if a
# user hits the Back button.
return HttpResponseRedirect(reverse('polls:results', args=(question.id,)))
| [
"django.shortcuts.render",
"django.shortcuts.get_object_or_404",
"django.urls.reverse"
] | [((633, 672), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['Poll'], {'pk': 'question_id'}), '(Poll, pk=question_id)\n', (650, 672), False, 'from django.shortcuts import get_object_or_404, render\n'), ((864, 976), 'django.shortcuts.render', 'render', (['request', '"""polls/detail.html"""', '{\'question\': question, \'error_message\': "You didn\'t select a choice."}'], {}), '(request, \'polls/detail.html\', {\'question\': question, \'error_message\':\n "You didn\'t select a choice."})\n', (870, 976), False, 'from django.shortcuts import get_object_or_404, render\n'), ((1306, 1351), 'django.urls.reverse', 'reverse', (['"""polls:results"""'], {'args': '(question.id,)'}), "('polls:results', args=(question.id,))\n", (1313, 1351), False, 'from django.urls import reverse\n')] |
import os
import unittest
import invirtualenv.contextmanager
class TestContextmanager(unittest.TestCase):
def test__revert_file(self):
with invirtualenv.contextmanager.InTemporaryDirectory():
with open('testfile', 'w') as fh:
fh.write('original')
self.assertEqual('original', open('testfile').read())
with invirtualenv.contextmanager.revert_file('testfile'):
with open('testfile', 'w') as fh:
fh.write('changed')
self.assertEqual('changed', open('testfile').read())
self.assertEqual('original', open('testfile').read())
def test__InTemporaryDir(self):
with invirtualenv.contextmanager.InTemporaryDirectory() as tempdir:
self.assertIsInstance(tempdir, str)
self.assertTrue(os.path.exists(tempdir))
| [
"os.path.exists"
] | [((839, 862), 'os.path.exists', 'os.path.exists', (['tempdir'], {}), '(tempdir)\n', (853, 862), False, 'import os\n')] |