text stringlengths 0 1.05M | meta dict |
|---|---|
"""Application body"""
import os
from urllib.error import HTTPError
from urllib.request import urlopen
from app.auth import HubAuth
from flask import Flask, redirect
from webargs import Arg
from webargs.flaskparser import use_args
def create_app(config='production'):
app = Flask(__name__, static_url_path='/static')
print(' * Running in {} mode'.format(config))
app.config.from_object('app.config.%sConfig' % config.capitalize())
index_args = {
'file': Arg(str, required=True),
# must include filename /path/to/file.ipynb relative to the directory
# specified in config
'destination': Arg(str, required=True)
}
@app.route(app.config['URL'])
@use_args(index_args)
def view(args):
"""URL to access"""
try:
redirection = username = authenticate()
if isinstance(username, str):
file_contents = get_remote_file(app.config, args['file'])
destination = args['destination']
path = construct_path(app.config['COPY_PATH'], locals())
write_to_destination(file_contents, path, app.config)
redirect_url = construct_path(app.config['REDIRECT_PATH'], locals())
redirection = redirect(redirect_url)
except HTTPError:
return 'Source file "{}" does not exist or is not accessible.'.\
format(args['file'])
return redirection
return app
def authenticate():
"""Authenticates the user with the local JupyterHub installation"""
return HubAuth().authenticate()
def get_remote_file(config, source):
"""fetches remote file"""
assert source.startswith(config['ALLOWED_DOMAIN'])
return urlopen(source).read().decode('utf-8')
def write_to_destination(file_contents, destination, config):
"""Write file to destination on server"""
# check that this filetype is allowed (ideally, not an executable)
assert '.' in destination and \
destination.split('.')[-1] in config['ALLOWED_FILETYPES']
# make user directory if it doesn't exist
os.makedirs('/'.join(destination.split('/')[:-1]), exist_ok=True)
# write the file
open(destination, 'w').write(file_contents)
def construct_path(path, format, *args):
"""constructs a path using locally available variables"""
return os.path.join(path.format(**format), *args) | {
"repo_name": "alvinwan/DS8-Interact",
"path": "app/__init__.py",
"copies": "1",
"size": "2192",
"license": "apache-2.0",
"hash": -4437046985736566000,
"line_mean": 29.0410958904,
"line_max": 72,
"alpha_frac": 0.7062043796,
"autogenerated": false,
"ratio": 3.552674230145867,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4758878609745867,
"avg_score": null,
"num_lines": null
} |
# Application condition
func.id == max_used_id and not cur_node_is_processed
# Reaction
function_code = func.Body
function_code = function_code.replace("Sensor1", port_values[0] + "1)");
function_code = function_code.replace("Sensor2", port_values[1] + "2)");
function_code = function_code.replace("Sensor3", port_values[2] + "3)");
function_code = function_code.replace("Sensor4", port_values[3] + "4)");
variables = set()
for left_part in function_code.split(';'):
if left_part != "":
part_code = left_part.strip() + ";\n"
if not func.Init:
code.append([part_code])
if waitFor.id not in id_to_pos_in_code:
id_to_pos_in_code[waitFor.id] = len(code) - 1
else:
init_code.append(part_code)
if left_part.find('=') != -1:
left_part = left_part[: left_part.find('=')]
if left_part[len(left_part) - 1] in '+-=*/><':
left_part = left_part[: len(left_part) - 1]
left_part = left_part.strip()
variables.add(left_part)
for variable in variables:
variables_code.append("static int " + variable + ";\n")
cur_node_is_processed = True
| {
"repo_name": "tara-sova/qreal",
"path": "plugins/tools/visualInterpreter/examples/robotsCodeGeneration/reactionsStorage/FunctionGenerator.py",
"copies": "12",
"size": "1093",
"license": "apache-2.0",
"hash": 8923680166499430000,
"line_mean": 32.1212121212,
"line_max": 72,
"alpha_frac": 0.6349496798,
"autogenerated": false,
"ratio": 2.9381720430107525,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9573121722810752,
"avg_score": null,
"num_lines": null
} |
# Application condition
waitFor.id == max_used_id and not cur_node_is_processed
# Reaction
port = "NXT_PORT_S" + waitFor.Port
color_nxt_type = ""
color_str = waitFor.Color
if color_str == "Красный":
color_nxt_type = "NXT_COLOR_RED"
elif color_str == "Зелёный":
color_nxt_type = "NXT_COLOR_GREEN"
elif color_str == "Синий":
color_nxt_type = "NXT_COLOR_BLUE"
elif color_str == "Чёрный":
color_nxt_type = "NXT_COLOR_BLACK"
elif color_str == "Жёлтый":
color_nxt_type = "NXT_COLOR_YELLOW"
elif color_str == "Белый":
color_nxt_type = "NXT_COLOR_WHITE"
wait_for_color_block_code = "while (ecrobot_get_nxtcolorsensor_id(" + port + ") != " + color_nxt_type + ") {}\n"
wait_init_code = "ecrobot_init_nxtcolorsensor(" + port + ", " + color_nxt_type + ");\n"
wait_terminate_code = "ecrobot_init_nxtcolorsensor(" + port + ", " + color_nxt_type + ");\n"
if wait_init_code not in init_code:
init_code.append(wait_init_code)
terminate_code.append(wait_terminate_code)
code.append([wait_for_color_block_code])
id_to_pos_in_code[waitFor.id] = len(code) - 1
cur_node_is_processed = True
| {
"repo_name": "Ashatta/qreal",
"path": "plugins/tools/visualInterpreter/examples/robotsCodeGeneration/reactionsStorage/WaitForColorBlockGenerator.py",
"copies": "12",
"size": "1127",
"license": "apache-2.0",
"hash": -9194273822989110000,
"line_mean": 31.0882352941,
"line_max": 112,
"alpha_frac": 0.6718606783,
"autogenerated": false,
"ratio": 2.3614718614718613,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9033332539771861,
"avg_score": null,
"num_lines": null
} |
""" application config
"""
from types import SimpleNamespace
bootstrap = SimpleNamespace(
# Is bootstrap running
running=False,
# Attached output
output=None
)
app = SimpleNamespace(
# Juju bootstrap details
bootstrap=bootstrap,
# The conjure-up UI framework
ui=None,
# Contains metadata and spell name
config=None,
# Contains conjure-up global settings
global_config=None,
# List of multiple bundles, usually from a charmstore search
bundles=None,
# Selected bundle from a Variant view
current_bundle=None,
# cli opts
argv=None,
# Current Juju model being used
current_model=None,
# Current Juju controller selected
current_controller=None,
# Session ID for current deployment
session_id=None,
# Application logger
log=None,
# Charm store metadata API client
metadata_controller=None,
# Application environment passed to processing steps
env=None,
# Did deployment complete
complete=False,
# Run in non interactive mode
headless=False,
# Remote endpoint type (An enum, see download.py)
endpoint_type=None)
| {
"repo_name": "battlemidget/conjure-up",
"path": "conjureup/app_config.py",
"copies": "1",
"size": "1169",
"license": "mit",
"hash": -1215708651759780400,
"line_mean": 18.4833333333,
"line_max": 64,
"alpha_frac": 0.6826347305,
"autogenerated": false,
"ratio": 4.282051282051282,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5464686012551282,
"avg_score": null,
"num_lines": null
} |
""" application config
"""
import json
from types import SimpleNamespace
bootstrap = SimpleNamespace(
# Is bootstrap running
running=False,
# Attached output
output=None
)
maas = SimpleNamespace(
# Client
client=None,
# API key
api_key=None,
# API Endpoint
endpoint=None
)
juju = SimpleNamespace(
# Client
client=None,
# Is authenticated?
authenticated=False,
# Path to juju binary
bin_path=None,
# Path to juju-wait binary
wait_path=None,
# Charmstore
charmstore=None
)
class AppConfig:
""" Application config storage
"""
# MAAS client
# TODO: move this into MAAS provider
maas = maas
# Juju bootstrap details
bootstrap = bootstrap
# Juju Provider
provider = None
# Juju Client
juju = juju
# The conjure-up UI framework
ui = None
# Contains spell name
config = None
# Conjurefile
conjurefile = None
# Spell metadata
metadata = None
# List of multiple bundles, usually from a charmstore search
bundles = None
# Selected bundle from a Variant view
current_bundle = None
# Is JAAS supported by the current spell
jaas_ok = True
# Which controller, if any, is the JAAS controller
jaas_controller = None
# Whether the JAAS controller is selected
is_jaas = False
# Current UI View rendered
current_view = None
# Session ID for current deployment
session_id = None
# Application logger
log = None
# Charm store metadata API client
metadata_controller = None
# disable telemetry tracking
no_track = False
# disable automatic error reporting
no_report = False
# Application environment passed to processing steps
env = None
# Did deployment complete
complete = False
# Run in non interactive mode
headless = False
# Remote endpoint type (An enum, see download.py)
endpoint_type = None
# Reference to asyncio loop so that it can be accessed from other threads
loop = None
# State storage
state = None
# Sentry endpoint
sentry = None
# Spells index
spells_index = None
# Password for sudo, if needed
sudo_pass = None
# Step descriptions
steps = None
# Step user data
steps_data = {}
# exit code for conjure-up to terminate with
exit_code = 0
# All available addons by name
addons = {}
# Addon aliases for required spells
addons_aliases = {}
# Selected addons
selected_addons = []
spell_given = False
alias_given = False
def __setattr__(self, name, value):
""" Gaurds against setting attributes that don't already exist
"""
try:
getattr(AppConfig, name)
except AttributeError:
raise Exception(
"Attempted to set an unknown attribute for application config")
super().__setattr__(name, value)
@property
def _internal_state_key(self):
""" Internal, formatted namespace key
"""
return "conjure-up.{}.{}".format(self.provider.cloud_type,
self.config['spell'])
@property
def all_steps(self):
"""
All steps, including those from selected addons.
"""
from conjureup.models.addon import AddonModel
return app.steps + AddonModel.selected_addons_steps()
@property
def has_bundle_modifications(self):
"""
Whether or not any step modifies the bundle.
"""
return any(step.bundle_add or step.bundle_remove
for step in self.all_steps)
async def save(self):
if not self.provider:
# don't bother saving if they haven't even picked a cloud yet
return
if not self.conjurefile:
return
self.log.info('Storing conjure-up state')
if self.juju.authenticated:
await self.juju.client.set_config(
{'extra-info': json.dumps(self.conjurefile)})
self.log.info('State saved in model config')
# Check for existing key and clear it
self.state.pop(self._internal_state_key, None)
else:
# sanitize
self.conjurefile['conf-file'] = [
str(conf_path)
for conf_path in self.conjurefile['conf-file']
]
self.state[self._internal_state_key] = json.dumps(
self.conjurefile)
self.log.info('State saved')
async def restore(self):
self.log.info('Attempting to load conjure-up cached state.')
try:
if self.juju.authenticated:
result = await self.juju.client.get_config()
if 'extra-info' in result:
self.log.info(
"Found cached state from Juju model, reloading.")
self.from_json(result['extra-info'].value)
return
result = self.state.get(self._internal_state_key)
if result:
self.log.info("Found cached state, reloading.")
self.conjurefile = json.loads(result)
except json.JSONDecodeError as e:
# Dont fail fatally if state information is incorrect. Just log it
# and move on
self.log.debug(
"State information possibly corrupt "
"or malformed: {}".format(e))
app = AppConfig()
| {
"repo_name": "conjure-up/conjure-up",
"path": "conjureup/app_config.py",
"copies": "3",
"size": "5539",
"license": "mit",
"hash": 5308395890383095000,
"line_mean": 22.9783549784,
"line_max": 79,
"alpha_frac": 0.5903592706,
"autogenerated": false,
"ratio": 4.317225253312548,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 231
} |
## Application configuration
from luggage import crypto
## Public configuration -- these pose no security risk
## if published.
PARSE_APPLICATION_ID = "sYIULJPxHZCryGbqxbNfsXF8PPU4Blf4Ow5b8SFc"
STATE_APPLICATION_ID = "jcMhpH2gLjin9TK3oQH25Nn4kOG1XQHE8K0WmySL"
## Secret configuration -- these API keys would be a problem
## to publish. They are encrypted here with the public key
## defined. Note that the signing key here is a *private*
## key: this means that anyone could put other values
## and get us to trust them. In practice this is not a problem,
## since anyone who manages to gain *write-access* to the
## dictionary can do far worse.
SECRETS = crypto.Secrets(
envVar='SECRET_KEY',
publicKey="1Wy7uKOcaa5p/5BiMJ82M7v+1c3+SB0DesMTWIj5AxU=",
signingKey="p14AmTX3SAR7wdN3+6HHWXdTiYhkMG9XzsztBoxnhr4=",
encSecrets=
dict(
PARSE_REST_API_KEY="a9uodFprEyR4K/xKP7I4lCmafObPac/67+pHGy2So3TzVTZJuLU2oZqt+kBr3pGFvyb/ZMWANaMPkFadXmkv/RlYWeQBO9gBo+/ziTNNjy0=",
STATE_REST_API_KEY="wSRKdcA5JbXjXV5/4BIIoiVxrwgbp0DHAiT1O1Qrqa9ACRuytA2yOmo6Je05p/5pRHiKi2VQ/BMXxCRBWrBTOqE+TqG/EWkaRj8vSxQYj4I=",
),
)
| {
"repo_name": "moshez/boredbot",
"path": "boredbot_deploy/config.py",
"copies": "1",
"size": "1153",
"license": "mit",
"hash": -3227251536802526700,
"line_mean": 45.12,
"line_max": 142,
"alpha_frac": 0.7658282741,
"autogenerated": false,
"ratio": 2.3822314049586777,
"config_test": false,
"has_no_keywords": true,
"few_assignments": false,
"quality_score": 0.3648059679058678,
"avg_score": null,
"num_lines": null
} |
"""Application configuration."""
import logging
from contextlib import suppress
from importlib import import_module
from django.apps import AppConfig
from django.utils.translation import ugettext_lazy as _
from resolwe.storage.connectors import connectors
from resolwe.storage.connectors.baseconnector import BaseStorageConnector
from resolwe.storage.settings import STORAGE_CONNECTORS
logger = logging.getLogger(__name__)
class StorageConfig(AppConfig):
"""Application configuration."""
name = "resolwe.storage"
verbose_name = _("Resolwe Storage")
def _check_connector_settings(self):
"""Validate the storage connector settings in the django config.
When there exists a section that does not match any known storage
connector then error is logged.
"""
for connector_name, connector_settings in STORAGE_CONNECTORS.items():
if connector_name not in connectors:
full_class_name = connector_settings.get("connector")
class_exists = False
is_subclass = False
with suppress(Exception):
module_name, class_name = full_class_name.rsplit(".", 1)
module = import_module(module_name)
class_exists = hasattr(module, class_name)
if class_exists:
is_subclass = issubclass(
getattr(module, class_name), BaseStorageConnector
)
message = "Connector named {} using class {} is not registered.".format(
connector_name, full_class_name
)
if not class_exists:
message += " Class does not exist."
elif not is_subclass:
message += " Class is not a subclass of BaseStorageConnector."
logger.warning(message)
def ready(self):
"""Application initialization."""
self._check_connector_settings()
# Register signals handlers
from . import signals # noqa: F401
return super().ready()
| {
"repo_name": "genialis/resolwe",
"path": "resolwe/storage/apps.py",
"copies": "1",
"size": "2138",
"license": "apache-2.0",
"hash": -2122979579628690000,
"line_mean": 37.1785714286,
"line_max": 88,
"alpha_frac": 0.6071094481,
"autogenerated": false,
"ratio": 5.078384798099762,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6185494246199763,
"avg_score": null,
"num_lines": null
} |
"""Application configuration."""
import sys
from django.apps import AppConfig
class ElasticConfig(AppConfig):
"""Application configuration."""
name = 'resolwe.elastic'
def ready(self):
"""Perform application initialization."""
# Initialize the type extension composer.
from . composer import composer
composer.discover_extensions()
is_migrating = sys.argv[1:2] == ['migrate']
if is_migrating:
# Do not register signals and ES indices when:
# * migrating - model instances used during migrations do
# not contain the full functionality of models and things
# like content types don't work correctly and signals are
# not versioned so they are guaranteed to work only with
# the last version of the model
return
# Connect all signals
from . import signals # pylint: disable=unused-variable
# Register ES indices
from . builder import index_builder # pylint: disable=unused-variable
| {
"repo_name": "jberci/resolwe",
"path": "resolwe/elastic/apps.py",
"copies": "1",
"size": "1074",
"license": "apache-2.0",
"hash": 5637985595168791000,
"line_mean": 32.5625,
"line_max": 78,
"alpha_frac": 0.6368715084,
"autogenerated": false,
"ratio": 4.995348837209303,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6132220345609303,
"avg_score": null,
"num_lines": null
} |
""" Application configuration """
import os
class Config:
""" Base Configuration """
APP_DIR = os.path.abspath(os.path.dirname(__file__)) # this directory
PROJECT_ROOT = os.path.abspath(os.path.join(APP_DIR, os.pardir))
SECRET_KEY = os.environ.get(
'SECRET_KEY',
'alsdkjfoqw90eurjojasvnqu9ehrotj'
)
SQLALCHEMY_TRACK_MODIFICATIONS = False
SSL_DISABLE = False
@staticmethod
def init_app(app):
pass
class DevelopmentConfig(Config):
""" Development Configuration """
DEBUG = True
DB_NAME = 'data-dev.db'
DB_PATH = os.path.join(Config.PROJECT_ROOT, DB_NAME)
SQLALCHEMY_DATABASE_URI = 'sqlite:///{}'.format(DB_PATH)
class TestConfig(Config):
""" Test Configuration """
TESTING = True
DB_NAME = 'data-test.db'
DB_PATH = os.path.join(Config.PROJECT_ROOT, DB_NAME)
SQLALCHEMY_DATABASE_URI = 'sqlite:///{}'.format(DB_PATH)
class ProductionConfig(Config):
""" Production Configuration """
DEBUG = False
if os.environ.get('DATABASE_URL') is None:
DB_NAME = 'db.sqlite'
DB_PATH = os.path.join(Config.PROJECT_ROOT, DB_NAME)
SQLALCHEMY_DATABASE_URI = 'sqlite:///{}'.format(DB_PATH)
else:
SQLALCHEMY_DATABASE_URI = os.environ['DATABASE_URL']
config = {
'development': DevelopmentConfig,
'testing': TestConfig,
'production': ProductionConfig,
'default': DevelopmentConfig
}
| {
"repo_name": "cfflymolo/personal-website",
"path": "app/config.py",
"copies": "1",
"size": "1441",
"license": "mit",
"hash": 1914178985617479700,
"line_mean": 23.4237288136,
"line_max": 76,
"alpha_frac": 0.6391394865,
"autogenerated": false,
"ratio": 3.3985849056603774,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9537724392160377,
"avg_score": 0,
"num_lines": 59
} |
"""Application core."""
from flask.ext.mail import Mail
from flask.ext.security import Security
from flask.ext.sqlalchemy import SQLAlchemy
__all__ = 'db',
db = SQLAlchemy() # NOQA
mail = Mail() # NOQA
security = Security() # NOQA
class Server(object):
"""A wrapper around common SQLAlchemy functionality."""
def _isinstance(self, instance, raise_error=True):
"""Check if the specified instance matches the service's model.
By default this method will raise :class:`ValueError` if the
instance is not of the correct type.
:param instance: the instance to check.
:param raise_error: whether or not to raise an error on
type mismatch.
:return bool: whether or not the instance is of the expected
type.
:raises: ValueError
"""
if isinstance(instance, self.__model__):
return True
elif raise_error:
raise ValueError('{} is not of type {}.'.format(
instance, self.__model__,
))
else:
return False
def all(self):
"""Return a generator containing all instances of the model."""
return self.__model__.query.all()
def save(self, instance, commit=True):
"""Commit the instance to the database and return it.
:param instance: the instance to save.
:param commit: whether or not to commit the current session.
"""
self._isinstance(instance)
db.session.add(instance)
if commit:
db.session.commit()
return instance
| {
"repo_name": "dirn/Secret-Santa",
"path": "xmas/core.py",
"copies": "1",
"size": "1632",
"license": "bsd-3-clause",
"hash": 6010419583069413000,
"line_mean": 26.2,
"line_max": 71,
"alpha_frac": 0.5949754902,
"autogenerated": false,
"ratio": 4.785923753665689,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 60
} |
'''Application Data (bpy.app)
This module contains application values that remain unchanged during runtime.
'''
debug = None
'''Boolean, for debug info (started with --debug / --debug_* matching this attribute name)
'''
debug_events = None
'''Boolean, for debug info (started with --debug / --debug_* matching this attribute name)
'''
debug_ffmpeg = None
'''Boolean, for debug info (started with --debug / --debug_* matching this attribute name)
'''
debug_handlers = None
'''Boolean, for debug info (started with --debug / --debug_* matching this attribute name)
'''
debug_python = None
'''Boolean, for debug info (started with --debug / --debug_* matching this attribute name)
'''
debug_value = None
'''Int, number which can be set to non-zero values for testing purposes
'''
debug_wm = None
'''Boolean, for debug info (started with --debug / --debug_* matching this attribute name)
'''
driver_namespace = None
'''Dictionary for drivers namespace, editable in-place, reset on file load (read-only)
'''
tempdir = None
'''String, the temp directory used by blender (read-only)
'''
background = None
'''Boolean, True when blender is running without a user interface (started with -b)
'''
binary_path = None
'''The location of blenders executable, useful for utilities that spawn new instances
'''
build_cflags = None
'''C compiler flags
'''
build_cxxflags = None
'''C++ compiler flags
'''
build_date = None
'''The date this blender instance was built
'''
build_linkflags = None
'''Binary linking flags
'''
build_options = None
'''A set containing most important enabled optional build features
'''
build_platform = None
'''The platform this blender instance was built for
'''
build_revision = None
'''The subversion revision this blender instance was built with
'''
build_system = None
'''Build system used
'''
build_time = None
'''The time this blender instance was built
'''
build_type = None
'''The type of build (Release, Debug)
'''
ffmpeg = None
'''FFmpeg library information backend
'''
handlers = None
'''Application handler callbacks
'''
translations = None
'''Application and addons internationalization API
'''
version = None
'''The Blender version as a tuple of 3 numbers. eg. (2, 50, 11)
'''
version_char = None
'''The Blender version character (for minor releases)
'''
version_cycle = None
'''The release status of this build alpha/beta/rc/release
'''
version_string = None
'''The Blender version formatted as a string
'''
def count(*argv):
'''T.count(value) -> integer -- return number of occurrences of value
'''
pass
def index(*argv):
'''T.index(value, [start, [stop]]) -> integer -- return first index of value.
Raises ValueError if the value is not present.
'''
pass
| {
"repo_name": "kabuku/blender-python",
"path": "blenderlib/bpy.app.py",
"copies": "1",
"size": "3091",
"license": "mit",
"hash": 5422606904759285000,
"line_mean": 14.3544973545,
"line_max": 90,
"alpha_frac": 0.6211582012,
"autogenerated": false,
"ratio": 3.81134401972873,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9747983896405961,
"avg_score": 0.036903664904553775,
"num_lines": 189
} |
APPLICATION_DATA = (
(730, 'Counter-Strike: Global Offensive', 'csgo'),
(620, 'Portal 2', 'portal2'),
(723, 'Steam', 'steam'),
(570, 'Dota 2', 'dota2'),
(440, 'Team Fortress 2', 'tf2')
)
APP_IDS = []
APP_NAMES = {}
APP_SHORT_NAMES = {}
APP_SHORT_NAME_TO_ID = {}
def __init_module():
for app_id, app_long_name, app_short_name in APPLICATION_DATA:
APP_IDS.append(app_id)
APP_NAMES[app_id] = app_long_name
APP_SHORT_NAMES[app_id] = app_short_name
APP_SHORT_NAME_TO_ID[app_short_name] = app_id
UNKNOWN_APP_NAME = 'Unknown Application Name'
UNKNOWN_SHORT_APP_NAME = 'unknown_app'
def __get_name(app_id, name_dict, default_value):
try:
app_id = int(app_id)
except ValueError:
return 'INVALID_APP_ID'
if app_id in name_dict:
return name_dict[app_id]
return default_value
def app_id(short_name):
return APP_SHORT_NAME_TO_ID[short_name]
def application_name(app_id):
return __get_name(app_id, APP_NAMES, UNKNOWN_APP_NAME)
def application_short_name(app_id):
return __get_name(app_id, APP_SHORT_NAMES, UNKNOWN_SHORT_APP_NAME)
__init_module()
| {
"repo_name": "ClifHouck/desperado",
"path": "desperado/data.py",
"copies": "1",
"size": "1175",
"license": "bsd-3-clause",
"hash": 6404979557817983000,
"line_mean": 26.3255813953,
"line_max": 70,
"alpha_frac": 0.6153191489,
"autogenerated": false,
"ratio": 2.8381642512077296,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.39534834001077296,
"avg_score": null,
"num_lines": null
} |
""" Application entrypoint
"""
import argparse
import os
import os.path as path
import sys
import textwrap
import uuid
import yaml
from prettytable import PrettyTable
from termcolor import colored
from conjureup import __version__ as VERSION
from conjureup import async, consts, controllers, utils
from conjureup.app_config import app
from conjureup.controllers.steps.common import get_step_metadata_filenames
from conjureup.download import (
EndpointType,
detect_endpoint,
download,
download_local,
download_or_sync_registry,
get_remote_url
)
from conjureup.log import setup_logging
from conjureup.ui import ConjureUI
from ubuntui.ev import EventLoop
from ubuntui.palette import STYLES
def parse_options(argv):
parser = argparse.ArgumentParser(prog="conjure-up")
parser.add_argument('spell', nargs='?',
default=consts.UNSPECIFIED_SPELL,
help="""The name ('openstack-nclxd') or location
('githubusername/spellrepo') of a conjure-up
spell, or a keyword matching multiple spells
('openstack')""")
parser.add_argument('-d', '--debug', action='store_true',
dest='debug',
help='Enable debug logging.')
parser.add_argument('--show-env', action='store_true',
dest='show_env',
help='Shows what environment variables are used '
'during post deployment actions. This is useful '
'for headless installs allowing you to set those '
'variables to further customize your deployment.')
parser.add_argument('-c', dest='global_config_file',
help='Location of conjure-up.conf',
default='/etc/conjure-up.conf')
parser.add_argument('--cache-dir', dest='cache_dir',
help='Download directory for spells',
default=os.path.expanduser("~/.cache/conjure-up"))
parser.add_argument('--spells-dir', dest='spells_dir',
help='Location of conjure-up managed spells directory',
default=os.path.expanduser(
"~/.cache/conjure-up-spells"))
parser.add_argument('--apt-proxy', dest='apt_http_proxy',
help='Specify APT proxy')
parser.add_argument('--apt-https-proxy', dest='apt_https_proxy',
help='Specify APT HTTPS proxy')
parser.add_argument('--http-proxy', dest='http_proxy',
help='Specify HTTP proxy')
parser.add_argument('--https-proxy', dest='https_proxy',
help='Specify HTTPS proxy')
parser.add_argument('--proxy-proxy', dest='no_proxy',
help='Comma separated list of IPs to not '
'filter through a proxy')
parser.add_argument('--bootstrap-timeout', dest='bootstrap_timeout',
help='Amount of time to wait for initial controller '
'creation. Useful for slower network connections.')
parser.add_argument('--bootstrap-to', dest='bootstrap_to',
help='The MAAS node hostname to deploy to. Useful '
'for using lower end hardware as the Juju admin '
'controller.', metavar='<host>.maas')
parser.add_argument(
'--version', action='version', version='%(prog)s {}'.format(VERSION))
parser.add_argument('cloud', nargs='?',
help="Name of a Juju controller type to "
"target, such as ['aws', 'localhost' ...]")
return parser.parse_args(argv)
def unhandled_input(key):
if key in ['q', 'Q']:
async.shutdown()
EventLoop.exit(0)
def _start(*args, **kwargs):
if app.endpoint_type in [None, EndpointType.LOCAL_SEARCH]:
controllers.use('spellpicker').render()
return
utils.setup_metadata_controller()
if os.getenv('CONJUREUP_STATUS_ONLY'):
controllers.use('deploystatus').render()
return
if app.argv.cloud:
controllers.use('clouds').render()
return
controllers.use('controllerpicker').render()
def apply_proxy():
""" Sets up proxy information.
"""
# Apply proxy information
if app.argv.http_proxy:
os.environ['HTTP_PROXY'] = app.argv.http_proxy
os.environ['http_proxy'] = app.argv.http_proxy
if app.argv.https_proxy:
os.environ['HTTPS_PROXY'] = app.argv.https_proxy
os.environ['https_proxy'] = app.argv.https_proxy
def show_env():
""" Shows environment variables from post deploy actions
"""
step_scripts = os.path.join(
app.config['spell-dir'], 'steps'
)
step_metas = get_step_metadata_filenames(step_scripts)
print("Available environment variables: \n")
table = PrettyTable()
table.field_names = ["ENV", "DEFAULT",
""]
table.align = 'l'
for step_meta_path in step_metas:
with open(step_meta_path) as fp:
step_metadata = yaml.load(fp.read())
if 'additional-input' in step_metadata:
for x in step_metadata['additional-input']:
default = colored(x['default'], 'green', attrs=['bold'])
key = colored(x['key'], 'blue', attrs=['bold'])
table.add_row([key, default,
textwrap.fill(step_metadata['description'],
width=55)])
print(table)
print("")
print(
textwrap.fill(
"See http://conjure-up.io/docs/en/users/#running-in-headless-mode "
"for more information on using these variables to further "
"customize your deployment.", width=79))
sys.exit(0)
def main():
opts = parse_options(sys.argv[1:])
spell = os.path.basename(os.path.abspath(opts.spell))
if not os.path.isdir(opts.cache_dir):
os.makedirs(opts.cache_dir)
if os.geteuid() == 0:
utils.info("")
utils.info("This should _not_ be run as root or with sudo.")
utils.info("")
sys.exit(1)
# Application Config
app.config = {'metadata': None}
app.argv = opts
app.log = setup_logging("conjure-up/{}".format(spell),
os.path.join(opts.cache_dir, 'conjure-up.log'),
opts.debug)
# Setup proxy
apply_proxy()
app.session_id = os.getenv('CONJURE_TEST_SESSION_ID',
'{}/{}'.format(
spell,
str(uuid.uuid4())))
global_config_filename = app.argv.global_config_file
if not os.path.exists(global_config_filename):
# fallback to source tree location
global_config_filename = os.path.join(os.path.dirname(__file__),
"../etc/conjure-up.conf")
if not os.path.exists(global_config_filename):
utils.error("Could not find {}.".format(global_config_filename))
sys.exit(1)
with open(global_config_filename) as fp:
global_conf = yaml.safe_load(fp.read())
app.global_config = global_conf
spells_dir = app.argv.spells_dir
app.config['spells-dir'] = spells_dir
if not os.path.exists(spells_dir):
utils.info("No spells found, syncing from registry, please wait.")
download_or_sync_registry(app.global_config['registry']['repo'],
spells_dir)
else:
app.log.debug("Refreshing spell registry")
download_or_sync_registry(app.global_config['registry']['repo'],
spells_dir, True)
spells_index_path = os.path.join(app.config['spells-dir'],
'spells-index.yaml')
with open(spells_index_path) as fp:
app.spells_index = yaml.safe_load(fp.read())
spell_name = spell
app.endpoint_type = detect_endpoint(opts.spell)
if app.endpoint_type == EndpointType.LOCAL_SEARCH:
spells = utils.find_spells_matching(opts.spell)
if len(spells) == 0:
utils.error("Can't find a spell matching '{}'".format(opts.spell))
sys.exit(1)
# One result means it was a direct match and we can copy it
# now. Changing the endpoint type then stops us from showing
# the picker UI. More than one result means we need to show
# the picker UI and will defer the copy to
# SpellPickerController.finish(), so nothing to do here.
if len(spells) == 1:
app.log.debug("found spell {}".format(spells[0]))
spell = spells[0]
utils.set_chosen_spell(spell_name,
os.path.join(opts.cache_dir,
spell['key']))
download_local(os.path.join(app.config['spells-dir'],
spell['key']),
app.config['spell-dir'])
utils.set_spell_metadata()
app.endpoint_type = EndpointType.LOCAL_DIR
# download spell if necessary
elif app.endpoint_type == EndpointType.LOCAL_DIR:
if not os.path.isdir(opts.spell):
utils.warning("Could not find spell {}".format(opts.spell))
sys.exit(1)
if not os.path.exists(os.path.join(opts.spell,
"metadata.yaml")):
utils.warning("'{}' does not appear to be a spell. "
"{}/metadata.yaml was not found.".format(
opts.spell, opts.spell))
sys.exit(1)
spell_name = os.path.basename(os.path.abspath(spell))
utils.set_chosen_spell(spell_name,
path.join(opts.cache_dir, spell_name))
download_local(opts.spell, app.config['spell-dir'])
utils.set_spell_metadata()
elif app.endpoint_type in [EndpointType.VCS, EndpointType.HTTP]:
utils.set_chosen_spell(spell, path.join(opts.cache_dir, spell))
remote = get_remote_url(opts.spell)
if remote is None:
utils.warning("Can't guess URL matching '{}'".format(opts.spell))
sys.exit(1)
download(remote, app.config['spell-dir'], True)
utils.set_spell_metadata()
app.env = os.environ.copy()
app.env['CONJURE_UP_CACHEDIR'] = app.argv.cache_dir
app.env['CONJURE_UP_SPELL'] = spell_name
if app.argv.show_env:
if not app.argv.cloud:
utils.error("You must specify a cloud for headless mode.")
sys.exit(1)
if app.endpoint_type in [None, EndpointType.LOCAL_SEARCH]:
utils.error("Please specify a spell for headless mode.")
sys.exit(1)
show_env()
if app.argv.cloud:
if app.endpoint_type in [None, EndpointType.LOCAL_SEARCH]:
utils.error("Please specify a spell for headless mode.")
sys.exit(1)
app.headless = True
app.ui = None
app.env['CONJURE_UP_HEADLESS'] = "1"
_start()
else:
app.ui = ConjureUI()
EventLoop.build_loop(app.ui, STYLES,
unhandled_input=unhandled_input)
EventLoop.set_alarm_in(0.05, _start)
EventLoop.run()
| {
"repo_name": "battlemidget/conjure-up",
"path": "conjureup/app.py",
"copies": "1",
"size": "11491",
"license": "mit",
"hash": 8045641459085238000,
"line_mean": 37.3033333333,
"line_max": 79,
"alpha_frac": 0.5642676878,
"autogenerated": false,
"ratio": 4.027690150718542,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00019285714285714286,
"num_lines": 300
} |
"""Application environment.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import abc
import logging
import os
import six
from treadmill import watchdog
_LOGGER = logging.getLogger(__name__)
@six.add_metaclass(abc.ABCMeta)
class AppEnvironment(object):
"""Treadmill application environment.
:param root:
Path to the root directory of the Treadmill environment
:type root:
`str`
"""
__slots__ = (
'apps_dir',
'app_events_dir',
'app_types',
'archives_dir',
'bin_dir',
'cache_dir',
'cleaning_dir',
'cleanup_apps_dir',
'cleanup_dir',
'cleanup_tombstone_dir',
'configs_dir',
'images_dir',
'init1_dir',
'init_dir',
'init_tombstone_dir',
'root',
'running_dir',
'running_tombstone_dir',
'tombstones_dir',
'watchdogs',
'watchdog_dir',
)
APPS_DIR = 'apps'
BIN_DIR = 'bin'
ARCHIVES_DIR = 'archives'
CACHE_DIR = 'cache'
CLEANING_DIR = 'cleaning'
CLEANUP_DIR = 'cleanup'
CLEANUP_APPS_DIR = 'cleanup_apps'
CONFIG_DIR = 'configs'
INIT_DIR = 'init'
INIT1_DIR = 'init1'
RUNNING_DIR = 'running'
WATCHDOG_DIR = 'watchdogs'
APP_EVENTS_DIR = 'appevents'
IMAGES_DIR = 'images'
TOMBSTONES_DIR = 'tombstones'
def __init__(self, root):
self.root = root
self.apps_dir = os.path.join(self.root, self.APPS_DIR)
self.bin_dir = os.path.join(self.root, self.BIN_DIR)
self.watchdog_dir = os.path.join(self.root, self.WATCHDOG_DIR)
self.running_dir = os.path.join(self.root, self.RUNNING_DIR)
self.cache_dir = os.path.join(self.root, self.CACHE_DIR)
self.cleaning_dir = os.path.join(self.root, self.CLEANING_DIR)
self.cleanup_dir = os.path.join(self.root, self.CLEANUP_DIR)
self.cleanup_apps_dir = os.path.join(self.root, self.CLEANUP_APPS_DIR)
self.configs_dir = os.path.join(self.root, self.CONFIG_DIR)
self.app_events_dir = os.path.join(self.root, self.APP_EVENTS_DIR)
self.archives_dir = os.path.join(self.root, self.ARCHIVES_DIR)
self.images_dir = os.path.join(self.root, self.IMAGES_DIR)
self.init_dir = os.path.join(self.root, self.INIT_DIR)
self.init1_dir = os.path.join(self.root, self.INIT1_DIR)
self.tombstones_dir = os.path.join(self.root, self.TOMBSTONES_DIR)
self.cleanup_tombstone_dir = os.path.join(self.tombstones_dir,
self.CLEANUP_DIR)
self.running_tombstone_dir = os.path.join(self.tombstones_dir,
self.RUNNING_DIR)
self.init_tombstone_dir = os.path.join(self.tombstones_dir,
self.INIT_DIR)
self.watchdogs = watchdog.Watchdog(self.watchdog_dir)
@abc.abstractmethod
def initialize(self, params):
"""One time initialization of the Treadmill environment.
:params ``dict`` params:
dictionary of parameters passed to the OS specific
`meth:initialize` implementation.
"""
pass
| {
"repo_name": "bretttegart/treadmill",
"path": "lib/python/treadmill/appenv/appenv.py",
"copies": "1",
"size": "3322",
"license": "apache-2.0",
"hash": 4484385793310961000,
"line_mean": 30.3396226415,
"line_max": 78,
"alpha_frac": 0.5927152318,
"autogenerated": false,
"ratio": 3.298907646474677,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4391622878274677,
"avg_score": null,
"num_lines": null
} |
"""Application environments, which determine the servers, database and other
conditions for deployment.
"""
from fabric.api import require, env
import os
from buedafab import aws
def _not_localhost():
"""All non-localhost environments need to install the "production" pip
requirements, which typically includes the Python database bindings.
"""
if (hasattr(env, 'pip_requirements')
and hasattr(env, 'pip_requirements_production')):
env.pip_requirements += env.pip_requirements_production
def development():
"""[Env] Development server environment
- Sets the hostname of the development server (using the default ssh port)
- Sets the app environment to "DEV"
- Permits developers to deploy without creating a tag in git
"""
_not_localhost()
if len(env.hosts) == 0:
env.hosts = ['dev.bueda.com:%(ssh_port)d' % env]
env.allow_no_tag = True
env.deployment_type = "DEV"
if (hasattr(env, 'pip_requirements')
and hasattr(env, 'pip_requirements_dev')):
env.pip_requirements += env.pip_requirements_dev
def staging():
"""[Env] Staging server environment
- Sets the hostname of the staging server (using the default ssh port)
- Sets the app environment to "STAGING"
- Permits developers to deploy without creating a tag in git
- Appends "-staging" to the target directory to allow development and
staging servers to be the same machine
"""
_not_localhost()
if len(env.hosts) == 0:
env.hosts = ['dev.bueda.com:%(ssh_port)d' % env]
env.allow_no_tag = True
env.deployment_type = "STAGING"
env.path += '-staging'
def production():
"""[Env] Production servers. Stricter requirements.
- Collects production servers from the Elastic Load Balancer specified by
the load_balancer env attribute
- Sets the app environment to "PRODUCTION"
- Requires that developers deploy from the 'master' branch in git
- Requires that developers tag the commit in git before deploying
"""
_not_localhost()
env.allow_no_tag = False
env.deployment_type = "PRODUCTION"
if hasattr(env, 'load_balancer'):
if len(env.hosts) == 0:
env.hosts = aws.collect_load_balanced_instances()
env.default_revision = '%(master_remote)s/master' % env
def localhost(deployment_type=None):
"""[Env] Bootstrap the localhost - can be either dev, production or staging.
We don't really use this anymore except for 'fab setup', and even there it
may not be neccessary. It was originally intended for deploying
automatically with Chef, but we moved away from that approach.
"""
require('root_dir')
if len(env.hosts) == 0:
env.hosts = ['localhost']
env.allow_no_tag = True
env.deployment_type = deployment_type
env.virtualenv = os.environ.get('VIRTUAL_ENV', 'env')
if deployment_type is None:
deployment_type = "SOLO"
env.deployment_type = deployment_type
if env.deployment_type == "STAGING":
env.path += '-staging'
if (hasattr(env, 'pip_requirements')
and hasattr(env, 'pip_requirements_dev')):
env.pip_requirements += env.pip_requirements_dev
def django_development():
"""[Env] Django development server environment
In addition to everything from the development() task, also:
- loads any database fixtures named "dev"
- loads a crontab from the scripts directory (deprecated at Bueda)
"""
development()
env.extra_fixtures += ["dev"]
env.crontab = os.path.join('scripts', 'crontab', 'development')
def django_staging():
"""[Env] Django staging server environment
In addition to everything from the staging() task, also:
- loads a production crontab from the scripts directory (deprecated at
Bueda)
"""
staging()
env.crontab = os.path.join('scripts', 'crontab', 'production')
def django_production():
"""[Env] Django production server environment
In addition to everything from the production() task, also:
- loads a production crontab from the scripts directory (deprecated at
Bueda)
"""
production()
env.crontab = os.path.join('scripts', 'crontab', 'production')
| {
"repo_name": "alexmerser/ops",
"path": "buedafab/environments.py",
"copies": "3",
"size": "4285",
"license": "mit",
"hash": -7307526362049874000,
"line_mean": 34.7083333333,
"line_max": 80,
"alpha_frac": 0.6665110852,
"autogenerated": false,
"ratio": 4.057765151515151,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.003196642804829939,
"num_lines": 120
} |
"""Application factory."""
from flask import Flask
from flask.ext.security import SQLAlchemyUserDatastore
from xmas.core import db, mail, security
from xmas.models import Role, User
from xmas.utils import register_blueprints
__all__ = 'create_app',
def create_app(package_name, package_path, settings_override=None,
register_security_blueprints=True):
"""Return a :class:`~flask.Flask` application.
:param package_name: application package name.
:param package_path: application package path.
:param settings_override: a ``dict`` of settings to override.
:param register_security_blueprints: whether or not to register the
Flask-Security blueprints.
"""
app = Flask(package_name, instance_relative_config=True)
app.config.from_object('xmas.settings')
app.config.from_pyfile('settings.cfg', silent=True)
app.config.from_object(settings_override)
db.init_app(app)
mail.init_app(app)
security.init_app(
app,
SQLAlchemyUserDatastore(db, User, Role),
register_blueprint=register_security_blueprints,
)
register_blueprints(app, package_name, package_path)
return app
| {
"repo_name": "dirn/Secret-Santa",
"path": "xmas/factory.py",
"copies": "1",
"size": "1217",
"license": "bsd-3-clause",
"hash": 8908976718662456000,
"line_mean": 28.6829268293,
"line_max": 71,
"alpha_frac": 0.687756779,
"autogenerated": false,
"ratio": 3.964169381107492,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5151926160107492,
"avg_score": null,
"num_lines": null
} |
"""Application factory."""
from flask import Flask
from flask_security import SQLAlchemyUserDatastore
from pygotham.core import db, mail, migrate, security
from pygotham.models import Role, User
from pygotham.utils import check_required_settings, register_blueprints
__all__ = ('create_app',)
def create_app(package_name, package_path, settings_override=None,
register_security_blueprints=True):
"""Return a :class:`~flask.Flask` application.
:param package_name: application package name.
:param package_path: application package path.
:param settings_override: a ``dict`` of settings to override.
:param register_security_blueprints: whether or not to register the
Flask-Security blueprints.
"""
app = Flask(package_name, instance_relative_config=True)
app.config.from_object('pygotham.settings')
app.config.from_pyfile('settings.cfg', silent=True)
app.config.from_object(settings_override)
check_required_settings(app.config)
db.init_app(app)
mail.init_app(app)
migrate.init_app(app, db)
security.init_app(
app,
SQLAlchemyUserDatastore(db, User, Role),
register_blueprint=register_security_blueprints,
)
register_blueprints(app, package_name, package_path)
return app
| {
"repo_name": "djds23/pygotham-1",
"path": "pygotham/factory.py",
"copies": "1",
"size": "1335",
"license": "bsd-3-clause",
"hash": 8824202032778256000,
"line_mean": 30.0465116279,
"line_max": 71,
"alpha_frac": 0.6943820225,
"autogenerated": false,
"ratio": 3.892128279883382,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5086510302383381,
"avg_score": null,
"num_lines": null
} |
# Application file
#
# Simple application to add a business and list businesses
from flask import Flask, url_for
from flask import request, render_template, flash, redirect
from flask.ext.sqlalchemy import SQLAlchemy
from flask.ext.security import Security, utils
import forms
from forms import BizForm, AddBiz
# Create app and load the config
app = Flask(__name__)
app.config.from_object('config')
# connect to the database
db = SQLAlchemy(app)
# the "models" import SHOULD be after the above "db" assignment
import models
# 'from models import *' works but 'from models import Business' does NOT work
# dont know why!
from models import *
# run db.create_all before running the app to create DB Tables
@app.route('/')
@app.route('/index')
def index():
return render_template('index.html',
title='Home')
@app.route('/add/',methods=['GET','POST'])
def new_biz():
form = AddBiz(csrf_enabled=True)
if form.validate_on_submit():
new_biz = Business(
form.name.data,
form.description.data,
form.added_date.data
)
db.session.add(new_biz)
db.session.commit()
return redirect(url_for('biz_list'))
return render_template('biz.html',form=form)
@app.route('/bizlist',methods=['GET','POST'])
def biz_list():
""" Query Business object and list businesses
"""
biz_list = Business.query.all()
return render_template('biz_list.html', bizs=biz_list)
if __name__ == '__main__':
app.run(
host='127.0.0.1',
port=int('8080'),
debug=app.config['DEBUG']
)
| {
"repo_name": "akaak/flask-mega-tutorial",
"path": "part-iii-forms/app.py",
"copies": "1",
"size": "1658",
"license": "bsd-3-clause",
"hash": 36544880659954710,
"line_mean": 23.0289855072,
"line_max": 78,
"alpha_frac": 0.6314837153,
"autogenerated": false,
"ratio": 3.5809935205183585,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47124772358183586,
"avg_score": null,
"num_lines": null
} |
"""Application framework.
This framework is somewhat opinionated; it assumes that applications
need four pieces being initialized before start:
* contextlib.ExitStack
* garage.parameters
* garage.parts
* logging
(At least in my use cases, they are almost ubiquitous.)
"""
__all__ = [
'App',
'ensure_app',
'run',
'with_apps',
'with_argument',
'with_decorators',
'with_defaults',
'with_description',
'with_help',
'with_logging_level',
'with_part_names',
'with_prog',
'with_selected_makers',
]
import argparse
import contextlib
import logging
import os
import sys
import threading
from garage import parameters
from garage import parts
from garage.assertions import ASSERT
PARTS = parts.Parts(__name__)
PARTS.exit_stack = parts.AUTO
def ensure_app(main):
"""Ensure main is an App object."""
if not isinstance(main, App):
main = App(main)
return main
def with_decorators(*decorators):
"""Combine a series of decorators."""
def combined(main):
for decorator in reversed(decorators):
main = decorator(main)
return main
return combined
def with_prog(prog):
"""Set application name of argparse.ArgumentParser."""
return lambda main: ensure_app(main).with_prog(prog)
def with_description(description):
"""Set description of argparse.ArgumentParser."""
return lambda main: ensure_app(main).with_description(description)
def with_help(help):
"""Set help message of argparse.ArgumentParser."""
return lambda main: ensure_app(main).with_help(help)
def with_argument(*args, **kwargs):
"""Add argument to argparse.ArgumentParser."""
return lambda main: ensure_app(main)._with_argument_for_decorator(
*args, **kwargs)
def with_defaults(**defaults):
"""Update defaults of argparse.ArgumentParser."""
return lambda main: ensure_app(main).with_defaults(**defaults)
def with_apps(dest, help, *apps):
"""Set a group of applications under this one."""
return lambda main: ensure_app(main).with_apps(dest, help, *apps)
def with_logging_level(logging_level):
"""Set default logging level."""
return lambda main: ensure_app(main).with_logging_level(logging_level)
def with_part_names(*part_names):
"""Add part names for garage.parts.assemble.
Call this when you want to assemble these parts but do not want
them to be passed to main.
"""
return lambda main: ensure_app(main).with_part_names(*part_names)
def with_selected_makers(selected_makers):
"""Update selected maker for garage.parts.assemble."""
return lambda main: ensure_app(main).with_selected_makers(selected_makers)
class App:
"""Represent an application."""
class Group:
"""Represent a group of applications."""
def __init__(self, dest, help, apps):
self.dest = dest
self.help = help
self.apps = apps
def __init__(self, main):
self._main = main
# For argparse.ArgumentParser.
self._prog = None
self._description = None
self._help = None
self._arguments = []
self._defaults = {}
# For other applications.
self._app_group = None
# For logging.
self._logging_level = logging.INFO
# For garage.parts.
self._part_names = set()
self._selected_makers = {}
# Inject these parts when calling the main function.
self._using_part_specs = parts.parse_maker_spec(self._main).input_specs
self._using_parts = None
def __repr__(self):
return '<%s.%s 0x%x %r>' % (
self.__module__, self.__class__.__qualname__,
id(self),
self._main,
)
# Provide both fluent interface and decorator chain interface.
def with_prog(self, prog):
self._prog = prog
return self
def with_description(self, description):
self._description = description
return self
def with_help(self, help):
self._help = help
return self
# Decorator chain style of with_argument.
def _with_argument_for_decorator(self, *args, **kwargs):
# This is intended to be used in decorator chains; thus the
# order is usually reversed (so prepend here, not append).
self._arguments.insert(0, (args, kwargs))
return self
# Fluent style of with_argument.
def with_argument(self, *args, **kwargs):
self._arguments.append((args, kwargs))
return self
def with_defaults(self, **defaults):
self._defaults.update(defaults)
return self
def with_apps(self, dest, help, *apps):
apps = [ensure_app(app) for app in apps]
ASSERT(apps, 'expect at least one app: %r', apps)
self._app_group = self.Group(dest, help, apps)
return self
def with_logging_level(self, logging_level):
self._logging_level = logging_level
return self
def with_part_names(self, *part_names):
self._part_names.update(part_names)
return self
def with_selected_makers(self, selected_makers):
self._selected_makers.update(selected_makers)
return self
def get_prog(self, argv0=None):
return self._prog or argv0 or self._main.__name__
def get_description(self):
return (self._description or
self._main.__doc__ or
sys.modules[self._main.__module__].__doc__)
def get_help(self):
return self._help or self.get_description()
def prepare(self, argv, exit_stack):
"""Prepare context for running application.main."""
# Firstly, configure command-line parser.
parser = argparse.ArgumentParser(
prog=self.get_prog(os.path.basename(argv[0])),
description=self.get_description(),
)
parser.add_argument(
'-v', '--verbose',
action='count', default=0,
help='increase log level',
)
self.configure_parser(parser)
# Add parameter's command-line arguments at last.
parameter_list = parameters.add_arguments_to(parser)
# Secondly, parse command-line arguments.
args = parser.parse_args(argv[1:])
# Thirdly, set up the "global" stuff.
# Configure logging as soon as possible.
configure_logging(self._logging_level, args.verbose)
# Then read parameter values.
parameters.read_parameters_from(args, parameter_list)
# Assemble parts for applications.
values = self.assemble_parts(exit_stack)
self.provide_parts(values)
return args
def configure_parser(self, parser):
"""Configure argparse.ArgumentParser recursively."""
parser.set_defaults(**self._defaults)
for add_argument_args, add_argument_kwargs in self._arguments:
parser.add_argument(*add_argument_args, **add_argument_kwargs)
if self._app_group:
subparsers = parser.add_subparsers(help=self._app_group.help)
subparsers.dest = self._app_group.dest
# TODO: We need to explicitly set `required` (see [1] for
# more). This bug is fixed in Python 3.7 (see [2]). We may
# remove this once we upgrade everywhere to Python 3.7.
# [1] http://bugs.python.org/issue9253
# [2] https://bugs.python.org/issue26510
subparsers.required = True
for app in self._app_group.apps:
subparser = subparsers.add_parser(
app.get_prog(),
description=app.get_description(),
help=app.get_help(),
)
subparser.set_defaults(**{self._app_group.dest: app})
app.configure_parser(subparser)
def assemble_parts(self, exit_stack):
"""Assemble parts and fill up self._using_parts."""
part_names = []
selected_makers = {}
self.collect_for_assemble(part_names, selected_makers)
return parts.assemble(
part_names=part_names,
input_parts={PARTS.exit_stack: exit_stack},
selected_makers=selected_makers,
)
def collect_for_assemble(self, part_names, selected_makers):
"""Collect stuff for assemble() recursively.
Unfortunately there is no way for me to know which app is going
to be called, and thus we collect stuff from all sub-apps.
"""
part_names.extend(self._part_names)
part_names.extend(self._using_part_specs)
selected_makers.update(self._selected_makers)
if self._app_group:
for app in self._app_group.apps:
app.collect_for_assemble(part_names, selected_makers)
def provide_parts(self, values):
"""Provide parts to using_parts of this and all sub-apps."""
ASSERT.none(self._using_parts)
self._using_parts = {
spec.parameter: values[spec.part_name]
for spec in self._using_part_specs
}
if self._app_group:
for app in self._app_group.apps:
app.provide_parts(values)
def __call__(self, args, **kwargs):
"""Run the main function."""
ASSERT(
self._using_parts is not None,
'expect context being set up before calling app: %r', self,
)
return self._main(args, **kwargs, **self._using_parts)
def run(main, argv=None):
"""Run the application.
An application can be merely a callable that takes `args` as its
sole argument and returns and integral status code.
"""
main = ensure_app(main)
with contextlib.ExitStack() as exit_stack:
args = main.prepare(
argv=sys.argv if argv is None else argv,
exit_stack=exit_stack,
)
status = main(args)
sys.exit(status)
def configure_logging(level, verbose):
"""Configure logging."""
fmt = '%(asctime)s %(threadName)s %(levelname)s %(name)s: %(message)s'
levels = (logging.WARNING, logging.INFO, logging.DEBUG, TRACE)
index = min(levels.index(level) + verbose, len(levels) - 1)
logging.basicConfig(level=levels[index], format=fmt)
# Add a new, finer logging level.
TRACE = logging.DEBUG - 1
logging.addLevelName(TRACE, 'TRACE')
# For prettier logging messages.
threading.main_thread().name = 'main'
# Check if debug logging is enabled.
if os.environ.get('DEBUG', '').lower() not in ('', '0', 'false'):
configure_logging(logging.DEBUG, 0)
logging.getLogger(__name__).debug('start at DEBUG level')
| {
"repo_name": "clchiou/garage",
"path": "py/garage/garage/apps.py",
"copies": "1",
"size": "10651",
"license": "mit",
"hash": -1834513047060682500,
"line_mean": 29.6945244957,
"line_max": 79,
"alpha_frac": 0.6170312647,
"autogenerated": false,
"ratio": 3.9801943198804186,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5097225584580418,
"avg_score": null,
"num_lines": null
} |
"""Application helper utilities."""
import importlib
import pkgutil
from flask import Blueprint
__all__ = ('check_required_settings', 'register_blueprints',)
DOES_NOT_EXIST = '!@DNE@!' # Placeholder value to use for missing settings.
REQUIRED_SETTINGS = 'SECRET_KEY', 'SECURITY_PASSWORD_SALT'
def check_required_settings(config, keys=REQUIRED_SETTINGS):
"""Validate the presence of required settings."""
for key in keys:
if config.get(key, DOES_NOT_EXIST) == DOES_NOT_EXIST:
message = 'The {} configuration settings is required.'.format(key)
raise RuntimeError(message)
def register_blueprints(app, package_name, package_path):
"""Register all :class:`~flask.Blueprint` instances on the app."""
for _, name, _ in pkgutil.iter_modules(package_path):
m = importlib.import_module('{}.{}'.format(package_name, name))
for x in dir(m):
item = getattr(m, x)
if isinstance(item, Blueprint):
app.register_blueprint(item)
| {
"repo_name": "djds23/pygotham-1",
"path": "pygotham/utils.py",
"copies": "3",
"size": "1025",
"license": "bsd-3-clause",
"hash": 7247918413007204000,
"line_mean": 34.3448275862,
"line_max": 78,
"alpha_frac": 0.6595121951,
"autogenerated": false,
"ratio": 3.8679245283018866,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 29
} |
# application import
from content.models import Image
from models import Season
class GamePhotoHelper:
"""Helper classs to prepare Game photo view data"""
def __init__(self):
pass
@classmethod
def get_photos(self, albumn_slug):
"""Get photos by slug for a view"""
photos = Image.objects.filter(active = True, albumn__slug= albumn_slug).order_by('weight')
return photos
@classmethod
def get_games(self, games):
"""Prepare games object for display photos"""
game_dict = {}
i = 1
for game in games:
g = {}
g['id'] = game.id
g['name'] = game.name()
# albumn with slug: /albumn/albumn-slug
g['albumn_slug'] = game.albumn.slug
game_dict[i] = g
i+=1
return game_dict
class GameHelper:
def __init__(self):
pass
@classmethod
def find_most_recent_season_year(self):
"""
find the most recent season in the database
return None if table is empty
"""
return Season.objects.order_by('pk').last()
def get_games_for_view(self, games):
"""
conver a game to dict for views
forloop.counter
"""
game_dict = {}
i = 1
for game in games:
g = {}
g['id'] = game.id
g['teams'] = '{0} - {1}'.format(game.host.name, game.guest.name)
g['address'] = game.address
g['date'] = game.start_time.strftime('%Y-%m-%d')
g['time'] = game.start_time.strftime('%H:%M') + ' - ' + game.end_time.strftime('%H:%M')
if game.finished:
g['status'] = '{0} - {1}'.format(game.host_score, game.guest_score)
else:
g['status'] = _('future_game')
# g['master_referee'] = game.master_referee
# g['secondary_referee'] = game.secondary_referee
g['recorder'] = game.recorder
g['timer'] = game.timer
game_dict[i] = g
i+=1
return game_dict
| {
"repo_name": "vollov/lotad",
"path": "game/service.py",
"copies": "1",
"size": "2228",
"license": "mit",
"hash": 3605851955243056600,
"line_mean": 27.2025316456,
"line_max": 99,
"alpha_frac": 0.4842908438,
"autogenerated": false,
"ratio": 3.9087719298245616,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9750470838749083,
"avg_score": 0.028518386975095456,
"num_lines": 79
} |
"""Application initialization and running."""
# Copyright © 2014 Mikko Ronkainen <firstname@mikkoronkainen.com>
# License: MIT, see the LICENSE file.
import configparser as cp
import distutils.util as du
import sfml as sf
from pymazing import framebuffer, game_state_simple_cube, game_state_loaded_level, game_engine
def run():
"""
Read settings from a file, initialize the components and run the game.
"""
config = cp.ConfigParser()
config.read("data/settings.ini")
window_width = int(config["window"]["width"])
window_height = int(config["window"]["height"])
flags = sf.Style.DEFAULT
fullscreen = du.strtobool(config["window"]["fullscreen"])
if fullscreen:
flags |= sf.Style.FULLSCREEN
window = sf.RenderWindow(sf.VideoMode(window_width, window_height), "Pymazing", flags)
window.vertical_synchronization = du.strtobool(config["window"]["vsync"])
window.mouse_cursor_visible = not du.strtobool(config["window"]["hide_mouse"])
window.key_repeat_enabled = False
framebuffer_scale = float(config["window"]["framebuffer_scale"])
framebuffer_width = int(framebuffer_scale * window_width)
framebuffer_height = int(framebuffer_scale * window_height)
framebuffer_ = framebuffer.FrameBuffer()
framebuffer_.resize(framebuffer_width, framebuffer_height)
game_state_simple_cube_ = game_state_simple_cube.GameStateSimpleCube(config)
game_state_loaded_level_ = game_state_loaded_level.GameStateLoadedLevel(config)
game_engine_ = game_engine.GameEngine(window, framebuffer_, config)
game_engine_.game_states.append(game_state_simple_cube_)
game_engine_.game_states.append(game_state_loaded_level_)
#game_engine_.active_game_state = game_state_simple_cube_
game_engine_.active_game_state = game_state_loaded_level_
game_engine_.run()
| {
"repo_name": "mikoro/pymazing",
"path": "pymazing/application.py",
"copies": "1",
"size": "1899",
"license": "mit",
"hash": -6815901433268958000,
"line_mean": 36.7346938776,
"line_max": 94,
"alpha_frac": 0.7012644889,
"autogenerated": false,
"ratio": 3.65,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48512644889,
"avg_score": null,
"num_lines": null
} |
APPLICATION_LABEL = 'cog'
PURPOSE_TYPES = (
'Overall Project Coordination',
'Steering Committee',
'Design',
'Design and Implementation Review',
'Task Prioritization',
'Requirements Identification',
'Strategic Direction',
'External Review',
'Implementation',
'Meeting Planning',
'Testing',
'Knowledge Transfer',
'Grant Writing',
'Other',
)
PURPOSE_CV = [ (x,x) for x in PURPOSE_TYPES ]
COMMUNICATION_TYPES = (
'Telco',
'Face-to-face',
'Webinar',
'Video Conference',
'Internet Chat',
'Wiki',
'Mailing List'
)
COMMUNICATION_CV = [ (x,x) for x in COMMUNICATION_TYPES ]
# list of tuples containing (role value, role display order)
LEAD_ORGANIZATIONAL_ROLES = (
('Principal Investigator', 1),
('Co-Investigator', 2),
('Program Manager', 3),
('Project Manager', 4),
('Software Architect', 5),
('Lead', 6),
('Other Lead', 7),
)
# list of tuples containing (role value, role display order)
MEMBER_ORGANIZATIONAL_ROLES = (
('Administrative Assistant', 1),
('Data Manager', 2),
('Outreach Coordinator', 3),
('Researcher', 4),
('Software Developer', 5),
('Webmaster', 6),
('Other Member', 7),
)
ORGANIZATIONAL_ROLE_CV = [ (x[0]," %s (Lead Role)" % x[0]) for x in LEAD_ORGANIZATIONAL_ROLES ] + [('','--------------')] + [ (x[0]," %s (Member Role)" % x[0]) for x in MEMBER_ORGANIZATIONAL_ROLES ]
# create and merge a combined dictionary of organizational roles
LEAD_ORGANIZATIONAL_ROLES_DICT = dict(LEAD_ORGANIZATIONAL_ROLES)
MEMBER_ORGANIZATIONAL_ROLES_DICT = dict(MEMBER_ORGANIZATIONAL_ROLES)
ORGANIZATIONAL_ROLES_DICT = dict( LEAD_ORGANIZATIONAL_ROLES_DICT.items() + MEMBER_ORGANIZATIONAL_ROLES_DICT.items() )
ROLE_CATEGORY_LEAD = 'Lead'
ROLE_CATEGORY_MEMBER = 'Member'
ORGANIZATIONAL_ROLE_CATEGORIES = (ROLE_CATEGORY_LEAD, ROLE_CATEGORY_MEMBER)
ORGANIZATIONAL_ROLE_CATEGORIES_CV = [ (x,x) for x in ORGANIZATIONAL_ROLE_CATEGORIES ]
MANAGEMENT_BODY_CATEGORY_STRATEGIC = 'Strategic'
MANAGEMENT_BODY_CATEGORY_OPERATIONAL = 'Operational'
MANAGEMENT_BODY_CATEGORIES = (MANAGEMENT_BODY_CATEGORY_STRATEGIC, MANAGEMENT_BODY_CATEGORY_OPERATIONAL)
MANAGEMENT_BODY_CATEGORIES_CV = [ (x,x) for x in MANAGEMENT_BODY_CATEGORIES ]
# list of tuples containing (management body value, management body display order)
STRATEGIC_MANAGEMENT_BODIES = (
('Strategic Direction', 1),
('Advice or Guidance', 2),
('Program Direction', 3),
('Review', 4),
)
# list of tuples containing (role value, role display order)
OPERATIONAL_MANAGEMENT_BODIES = (
('Research', 1),
('Development', 2),
('Requirements Identification', 3),
('Task Prioritization', 4),
('Testing', 5),
('Review', 6),
('Meeting and Event Planning', 7),
('Administration', 8),
)
MANAGEMENT_BODY_CV = [ (x[0]," %s (Strategic)" % x[0]) for x in STRATEGIC_MANAGEMENT_BODIES ] + [ (x[0]," %s (Operational)" % x[0]) for x in OPERATIONAL_MANAGEMENT_BODIES ]
# create and merge a combined dictionary of management bodies
STRATEGIC_MANAGEMENT_BODY_DICT = dict(STRATEGIC_MANAGEMENT_BODIES)
OPERATIONAL_MANAGEMENT_BODY_DICT = dict(OPERATIONAL_MANAGEMENT_BODIES)
MANAGEMENT_BODY_DICT = dict( STRATEGIC_MANAGEMENT_BODY_DICT.items() + OPERATIONAL_MANAGEMENT_BODY_DICT.items() )
MEMBERSHIP_TYPES = ('Open','Closed','By Invitation')
MEMBERSHIP_CV = [ (x,x) for x in MEMBERSHIP_TYPES ]
ROLE_ADMIN = 'admin'
ROLE_CONTRIBUTOR = 'contributor'
ROLE_USER = 'user'
ROLES = [ROLE_ADMIN, ROLE_CONTRIBUTOR, ROLE_USER]
DOCUMENT_TYPE_ALL = 'All'
DOCUMENT_TYPE_IMAGE = 'Image'
DOCUMENT_TYPE_TEXT = 'Text'
DOCUMENT_TYPE_PRESENTATION = 'Presentation'
DOCUMENT_TYPE_PROGRAM = 'Program'
DOCUMENT_TYPES = {
DOCUMENT_TYPE_IMAGE: ['.gif', '.png', 'jpg,', '.jpeg'],
DOCUMENT_TYPE_TEXT: ['.txt', '.pdf', '.doc', '.docx'],
DOCUMENT_TYPE_PRESENTATION: ['.ppt','.pptx','.key'],
DOCUMENT_TYPE_PROGRAM: ['.java', '.py', '.sh']
}
# path of default logo relative to MEDIA_ROOT
# use a location outside of "logos/" so that the default logo can
#DEFAULT_LOGO = "img/admin/logo_1109_cog.JPG"
DEFAULT_LOGO = "cog/img/cog_web_beta.png"
FOOTER_LOGO = "cog/img/logo_1310_cogfootershrunk.PNG"
UPLOAD_DIR_PHOTOS = "photos/"
UPLOAD_DIR_LOGOS = "logos/"
# DEFAULT_IMAGES are located under static/cog/img/...
DEFAULT_IMAGES = { 'User':'cog/img/unknown.jpeg',
'Collaborator':'cog/img/unknown.jpeg',
'Organization':'cog/img/notfound.jpeg',
'FundingSource':'cog/img/notfound.jpeg'}
# legacy media sub-directories of 'projects/'
SYSTEM_DOCS = 'system_docs'
SYSTEM_IMAGES = 'system_images'
# 1MB - 1048576
# 2.5MB - 2621440
# 5MB - 5242880
# 10MB - 10485760
# 20MB - 20971520
# 50MB - 52428800
# 100MB 104857600
# 250MB - 214958080
# 500MB - 429916160
#MAX_UPLOADES_BYTES = 52428800
#MAX_UPLOADES_BYTES = 500000000
RESEARCH_KEYWORDS_MAX_CHARS = 60
RESEARCH_INTERESTS_MAX_CHARS = 1000
# signals
SIGNAL_OBJECT_CREATED = 'object_created'
SIGNAL_OBJECT_UPDATED = 'object_updated'
SIGNAL_OBJECT_DELETED = 'object_deleted'
DEFAULT_SEARCH_FACETS = { 'project':'Project',
'variable':'Variable' }
| {
"repo_name": "sashakames/COG",
"path": "cog/models/constants.py",
"copies": "2",
"size": "6453",
"license": "bsd-3-clause",
"hash": 2739642855856173600,
"line_mean": 38.8333333333,
"line_max": 198,
"alpha_frac": 0.5409886874,
"autogenerated": false,
"ratio": 3.7582993593476997,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.52992880467477,
"avg_score": null,
"num_lines": null
} |
'''Application logic and muxing'''
from collections import defaultdict
import json
import logging
import re
import sys
import json
import urllib
from flask import url_for, session, request
import github
import github_comments
class PullRequest(object):
@staticmethod
def from_github(db, token, login, owner, repo, number):
pr = PullRequest()
pr._db = db
pr._token = token
pr._login = login
pr._owner = owner
pr._repo = repo
pr._number = number
pr._get_pr_info()
return pr
def __init__(self):
pass
def _api(self, fn, *args):
'''Helper to pass token, owner, and repo to github.py'''
all_args = [self._token, self._owner, self._repo] + list(args)
return fn(*all_args)
def _get_outdated_commit_shas(self, commit_shas, comments):
'''Outdated commit SHAs are known only from comments on them.'''
known_shas = set(commit_shas)
outdated_shas = set()
for comment in comments['diff_level']:
sha = comment['original_commit_id']
if sha not in known_shas:
outdated_shas.add(sha)
return list(outdated_shas)
def _attach_comments(self):
'''Adds 'comments' fields commit, file and file/commit pair.'''
sha_to_commit = {}
for commit in self.commits:
sha_to_commit[commit['sha']] = commit
sha_file_map = {}
for commit in self.commits:
commit['comments'] = []
for f in commit['files']:
f['comments'] = []
sha_file_map[(commit['sha'], f['filename'])] = f
path_to_file = {}
for f in self.files:
f['comments'] = []
path_to_file[f['filename']] = f
for comment in self.comments['diff_level']:
sha = comment['original_commit_id']
if sha in sha_to_commit:
sha_to_commit[sha]['comments'].append(comment)
pair = (sha, comment['path'])
if pair in sha_file_map:
sha_file_map[pair]['comments'].append(comment)
path_to_file[comment['path']]['comments'].append(comment)
def add_counts(obj):
cs = obj['comments']
obj.update({
'total_comment_count': len(cs),
'comment_count': len([c for c in cs if 'is_draft' not in c]),
'draft_comment_count': len([c for c in cs if 'is_draft' in c])
})
for commit in self.commits:
add_counts(commit)
for f in commit['files']:
add_counts(f)
for f in self.files:
add_counts(f)
def _get_pr_info(self):
'''Fill in basic information about a pull request.'''
pr = self._api(github.get_pull_request, self._number)
# get a list of files which have been affected by this PR, base to
# head.
sha1 = pr['base']['sha']
sha2 = pr['head']['sha']
diff_info = self._api(github.get_diff_info, sha1, sha2)
files = diff_info['files']
# get a list of commits in the pull request. The API does not return
# "outdated" commits or the base commit. We add these using auxiliary
# data.
commit_shas = [c['sha'] for c in self._api(github.get_pull_request_commits, self._number)]
comments = self._api(github.get_pull_request_comments, self._number)
# NOTE: need to do some more thinking about outdated commits.
# Since the PR's base sha sha may have changed since the commit, it
# could be hard to show a meaningful diff.
# outdated_commit_shas = self._get_outdated_commit_shas(commit_shas, comments)
# commit_shas.extend(outdated_commit_shas)
commit_shas.append(pr['base']['sha'])
# Get "thick" commit data.
# This includes a list of modified files, whereas
# get_pull_request_commits does not. This gives us information about
# reverted files.
commits = []
for sha in commit_shas:
commits.append(self._api(github.get_commit_info, sha))
commits.sort(key=lambda c: c['commit']['committer']['date'])
commits.reverse()
# Merge draft and published comments.
draft_comments = self._db.get_draft_comments(
self._login, self._owner, self._repo, self._number)
for comment in draft_comments:
comments['diff_level'].append(self._db.githubify_comment(comment))
github_comments.add_line_numbers_to_comments(
self._token, self._owner, self._repo,
pr['base']['sha'], comments['diff_level'])
github_comments.add_in_response_to(pr, comments['diff_level'])
self.pull_request = pr
self.commits = commits
self.comments = comments
self.files = files
self._attach_comments()
self.reverted_files = self._find_reverted_files()
self._augment_commits()
self._augment_files()
def _find_reverted_files(self):
'''Look for files appearing only in intermediate commits.'''
files = set([f['filename'] for f in self.files])
reverted_files = set()
for commit in self.commits[:-1]:
if len(commit['parents']) >= 2:
# ignore merge commits.
# See http://stackoverflow.com/questions/6713652/git-diff-unique-to-merge-commit
continue
for f in commit['files']:
path = f['filename']
if path not in files:
reverted_files.add(path)
return list(reverted_files)
def _augment_commits(self):
base_index = -1
for idx, commit in enumerate(self.commits):
commit.update({
'short_message':
re.sub(r'[\n\r].*', '', commit['commit']['message']),
})
if commit['sha'] == self.pull_request['base']['sha']:
commit['short_message'] = '(base)'
base_index = idx
# move the base commit to the bottom.
# Even if that's not where it belongs chronologically, it is where
# it belongs logically.
if base_index >= 0:
base_commit = self.commits[base_index]
del self.commits[base_index]
self.commits.append(base_commit)
def _augment_files(self):
pass
def add_file_diff_links(self, sha1, sha2):
for f in self.files:
f.update({
'link': url_for('file_diff', owner=self._owner, repo=self._repo, number=self._number) + '?path=' + urllib.quote(f['filename']) + '&sha1=' + urllib.quote(sha1) + '&sha2=' + urllib.quote(sha2) + '#diff'
})
def _add_urls_to_pull_requests(prs):
for pr in prs:
repo = pr['base']['repo']
pr['url'] = url_for('pull', owner=repo['owner']['login'],
repo=repo['name'], number=pr['number'])
def handle_get_pull_requests(owner, repo):
'''Returns template vars for open pull requests for a repo.'''
token = session['token']
pull_requests = github.get_pull_requests(token, owner, repo,
bust_cache=True)
_add_urls_to_pull_requests(pull_requests)
return {
'logged_in_user': session['login'],
'pull_requests': pull_requests
}
def count_open_pull_requests(owner, repo):
token = session['token']
login = session['login']
pull_requests = github.get_pull_requests(token, owner, repo,
bust_cache=True)
_add_urls_to_pull_requests(pull_requests)
own_prs = filter(lambda pr: pr['user']['login'] == login, pull_requests)
return {
'count': len(pull_requests),
'own': own_prs
}
| {
"repo_name": "danvk/better-pull-requests",
"path": "gitcritic.py",
"copies": "1",
"size": "7875",
"license": "apache-2.0",
"hash": -6767284965684197000,
"line_mean": 34.6334841629,
"line_max": 216,
"alpha_frac": 0.5594920635,
"autogenerated": false,
"ratio": 3.9081885856079404,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.996441705337278,
"avg_score": 0.0006527191470319439,
"num_lines": 221
} |
"""Application management CLI"""
import os
from pathlib import Path
import sys
from sys import stderr
# noinspection PyPackageRequirements
from dotenv import load_dotenv
from typing import Dict
from flask_script import Manager, Shell
from psycopg2 import DatabaseError
from sqlalchemy.exc import StatementError
from pma_api import create_app
from pma_api.config import PROJECT_ROOT_PATH
from pma_api.manage.server_mgmt import store_pid
from pma_api.manage.db_mgmt import get_api_data, get_ui_data, \
make_shell_context, connection_error, backup_db, \
restore_db, list_backups as listbackups, \
list_ui_data as listuidata, list_datasets as listdatasets, \
backup_source_files as backupsourcefiles
from pma_api.manage.initdb_from_wb import InitDbFromWb
from pma_api.models import db, Cache, ApiMetadata, Translation
from pma_api.utils import dict_to_pretty_json
load_dotenv(dotenv_path=Path(PROJECT_ROOT_PATH) / '.env')
app = create_app(os.getenv('ENV_NAME', 'default'))
manager = Manager(app)
@manager.option('-a', '--api_file_path', help='Custom path for api file')
@manager.option('-u', '--ui_file_path', help='Custom path for ui file')
def initdb(api_file_path: str, ui_file_path: str):
"""Initialize a fresh database instance.
WARNING: If DB already exists, will drop it.
Side effects:
- Drops database
- Creates database
- Prints results
Args:
api_file_path (str): Path to API spec file; if not present, gets
from default path
ui_file_path (str): Path to UI spec file; if not present, gets
from default path
"""
api_fp = api_file_path if api_file_path else get_api_data()
ui_fp = ui_file_path if ui_file_path else get_ui_data()
results: Dict = InitDbFromWb(
_app=app,
api_file_path=api_fp,
ui_file_path=ui_fp)\
.run()
warning_str = ''
if results['warnings']:
warning_str += '\nWarnings:'
warnings: dict = results['warnings']
for k, v in warnings.items():
warning_str += '\n{}: {}'.format(k, v)
result = 'Successfully initialized dataset.' if results['success'] \
else 'Failed to initialize dataset.'
print('\n' + result + '\n' + warning_str)
@manager.command
def translations():
"""Import all translations into the database."""
with app.app_context():
try:
# TODO 2017.09.28-jkp make one transaction instead of many
db.session.query(ApiMetadata).delete()
db.session.query(Translation).delete()
db.session.commit()
db_initializer = InitDbFromWb()
db_initializer.init_api_worksheet('translation')
db_initializer.init_client_ui_data()
cache_responses()
except (StatementError, DatabaseError) as e:
print(connection_error.format(str(e)), file=stderr)
except RuntimeError as e:
print('Error trying to execute caching. Is the server running?\n\n'
+ '- Original error:\n'
+ type(e).__name__ + ': ' + str(e))
@manager.command
def cache_responses():
"""Cache responses in the 'cache' table of DB."""
with app.app_context():
try:
Cache.cache_datalab_init(app)
except (StatementError, DatabaseError) as e:
print(connection_error.format(str(e)), file=stderr)
@manager.option('--path', help='Custom path for backup file')
def backup(path: str = ''):
"""Backup db
Args:
path (str): Path to save backup file
"""
if path:
backup_db(path)
else:
backup_db()
@manager.option('--path', help='Path of backup file to restore, or the '
'filename to fetch from AWS S3')
def restore(path: str):
"""Restore db
Args:
path (str): Path to backup file
"""
import inspect
if not path:
syntax = ' '.join([__file__,
inspect.currentframe().f_code.co_name,
'--path=PATH/TO/BACKUP'])
print('\nMust specify path: ' + syntax, file=stderr)
print('\nHere is a list of backups to choose from: \n',
dict_to_pretty_json(listbackups()))
else:
restore_db(path)
@manager.command
def list_backups():
"""List available backups"""
pretty_json = dict_to_pretty_json(listbackups())
print(pretty_json)
@manager.command
def list_ui_data():
"""List available ui data"""
pretty_json = dict_to_pretty_json(listuidata())
print(pretty_json)
@manager.command
def list_datasets():
"""List available datasets"""
pretty_json = dict_to_pretty_json(listdatasets())
print(pretty_json)
@manager.command
def list_source_files():
"""List available source files: ui data and datasets"""
print('Datasets: ')
list_datasets()
print('UI data files: ')
list_ui_data()
@manager.command
def backup_source_files():
"""Backup available source files: ui data and datasets"""
backupsourcefiles()
@manager.command
def release():
"""Perform steps necessary for a deployment"""
print('Deployment release task: Beginning')
initdb(api_file_path='', ui_file_path='')
print('Deployment release task: Complete')
manager.add_command('shell', Shell(make_context=make_shell_context))
if __name__ == '__main__':
args = ' '.join(sys.argv)
if 'runserver' in args: # native Manager command
store_pid()
manager.run()
| {
"repo_name": "joeflack4/pma-api",
"path": "manage.py",
"copies": "1",
"size": "5506",
"license": "mit",
"hash": -1511081847432253200,
"line_mean": 28.7621621622,
"line_max": 79,
"alpha_frac": 0.6304031965,
"autogenerated": false,
"ratio": 3.766073871409029,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9896477067909029,
"avg_score": 0,
"num_lines": 185
} |
"""Application management command utility functions"""
from collections import defaultdict
from django.core.exceptions import ValidationError
from django.db import transaction
from applications.api import derive_application_state
from applications.constants import APPROVED_APP_STATES
from applications.models import (
BootcampApplication,
ApplicationStepSubmission,
ApplicationStep,
)
from klasses.models import BootcampRun
from main.utils import is_empty_file
def fetch_bootcamp_run(run_property):
"""
Fetches a bootcamp run based on a given property, which could refer to a few different fields
Args:
run_property (str): A string indicating an id, title, etc.
Returns:
BootcampRun: The bootcamp run that matches the given property
"""
if run_property.isdigit():
bootcamp_run = BootcampRun.objects.get(id=run_property)
else:
bootcamp_run = BootcampRun.objects.get(title=run_property)
return bootcamp_run
def has_same_application_steps(bootcamp_id1, bootcamp_id2, ignore_order=True):
"""
Returns True if the application steps are the same for the bootcamps indicated by the given ids
Args:
bootcamp_id1 (int): A bootcamp id
bootcamp_id2 (int): Another bootcamp id
ignore_order (bool): If set to True, the function will still return True if the two bootcamps have the same
steps in a different order.
Returns:
bool: True if the application steps are the same for the bootcamps indicated by the given ids
"""
if bootcamp_id1 == bootcamp_id2:
return True
first_bootcamp_app_steps = ApplicationStep.objects.filter(bootcamp_id=bootcamp_id1)
second_bootcamp_app_steps = ApplicationStep.objects.filter(bootcamp_id=bootcamp_id2)
order_by_field = "submission_type" if ignore_order else "step_order"
first_bootcamp_step_types = list(
first_bootcamp_app_steps.order_by(order_by_field).values_list(
"submission_type", flat=True
)
)
second_bootcamp_step_types = list(
second_bootcamp_app_steps.order_by(order_by_field).values_list(
"submission_type", flat=True
)
)
return first_bootcamp_step_types == second_bootcamp_step_types
def migrate_application(from_run_application, to_run):
"""
Given an existing application, creates a new application in a different bootcamp run and "migrates" over all of
the data from the existing application. Assumes that the 'from' run and 'to' run have the same application steps.
Args:
from_run_application (BootcampApplication): The bootcamp application to copy
to_run (BootcampRun): The bootcamp run for which a new application will be created
Returns:
BootcampApplication: The newly-created bootcamp application that was created based on the existing one.
"""
has_completed_app = BootcampApplication.objects.filter(
bootcamp_run=to_run,
user=from_run_application.user,
state__in=APPROVED_APP_STATES,
).exists()
if has_completed_app:
raise ValidationError(
"An approved/completed application already exists for this user and run ({}, {})".format(
from_run_application.user.email, to_run.title
)
)
with transaction.atomic():
(
to_run_application,
_,
) = BootcampApplication.objects.select_for_update().get_or_create(
bootcamp_run=to_run, user=from_run_application.user
)
# Copy work history data
if is_empty_file(to_run_application.resume_file) and not is_empty_file(
from_run_application.resume_file
):
to_run_application.resume_file.name = from_run_application.resume_file.name
if (
to_run_application.linkedin_url is None
and from_run_application.linkedin_url is not None
):
to_run_application.linkedin_url = from_run_application.linkedin_url
to_run_application.resume_upload_date = from_run_application.resume_upload_date
to_run_application.save()
# Copy application submissions (video interview, etc.)
from_app_step_submissions = ApplicationStepSubmission.objects.filter(
bootcamp_application=from_run_application
).order_by("run_application_step__application_step__step_order")
# Build a dict of each submission type mapped to a list of the bootcamp run application step ids that require
# that submission type (e.g.: {"videointerviewsubmission": [1, 2], "quizsubmission": [3]}).
to_run_step_qset = to_run.application_steps.order_by(
"application_step__step_order"
).values("id", "application_step__submission_type")
to_run_steps = defaultdict(list)
for to_run_step in to_run_step_qset:
submission_type = to_run_step["application_step__submission_type"]
to_run_steps[submission_type].append(to_run_step["id"])
# In order to make this work even if the 'from' and 'to' runs have possibly-repeated application steps in a
# possibly-different order, keep track of the run step ids for which a submission has already been created.
used_run_step_ids = set()
for from_app_step_submission in from_app_step_submissions:
submission_type = (
from_app_step_submission.run_application_step.application_step.submission_type
)
to_run_step_id = next(
step_id
for step_id in to_run_steps[submission_type]
if step_id not in used_run_step_ids
)
ApplicationStepSubmission.objects.update_or_create(
bootcamp_application=to_run_application,
run_application_step_id=to_run_step_id,
defaults=dict(
review_status=from_app_step_submission.review_status,
review_status_date=from_app_step_submission.review_status_date,
submitted_date=from_app_step_submission.submitted_date,
submission_status=from_app_step_submission.submission_status,
content_type=from_app_step_submission.content_type,
object_id=from_app_step_submission.object_id,
),
)
used_run_step_ids.add(to_run_step_id)
# Set state
to_run_application.state = derive_application_state(to_run_application)
to_run_application.save()
return to_run_application
| {
"repo_name": "mitodl/bootcamp-ecommerce",
"path": "applications/management/utils.py",
"copies": "1",
"size": "6610",
"license": "bsd-3-clause",
"hash": 9168199669001570000,
"line_mean": 42.2026143791,
"line_max": 117,
"alpha_frac": 0.6593040847,
"autogenerated": false,
"ratio": 4.020681265206813,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5179985349906813,
"avg_score": null,
"num_lines": null
} |
"""Application manager."""
import csv
import glob
import logging
import os
from flask_script import Manager, Shell
import xlrd
from pma_api import create_app, db
from pma_api.models import (Cache, Characteristic, CharacteristicGroup,
Country, Data, EnglishString, Geography, Indicator,
SourceData, Survey, Translation)
import pma_api.api_1_0.caching as caching
app = create_app(os.getenv('FLASK_CONFIG', 'default'))
manager = Manager(app)
def get_file_by_glob(pattern):
"""Get file by glob.
Args:
pattern (str): A glob pattern.
Returns:
str: Path/to/first_file_found
"""
found = glob.glob(pattern)
return found[0]
SRC_DATA = get_file_by_glob('./data/api_data*.xlsx')
UI_DATA = get_file_by_glob('./data/ui_data*.xlsx')
ORDERED_MODEL_MAP = (
('geography', Geography),
('country', Country),
('survey', Survey),
('char_grp', CharacteristicGroup),
('char', Characteristic),
('indicator', Indicator),
('translation', Translation),
('data', Data)
)
TRANSLATION_MODEL_MAP = (
('translation', Translation),
)
def make_shell_context():
"""Make shell context.
Returns:
dict: Context for application manager shell.
"""
return dict(app=app, db=db, Country=Country, EnglishString=EnglishString,
Translation=Translation, Survey=Survey, Indicator=Indicator,
Data=Data, Characteristic=Characteristic, Cache=Cache,
CharacteristicGroup=CharacteristicGroup, SourceData=SourceData)
def init_from_source(path, model):
"""Initialize DB table data from csv file.
Initialize table data from csv source data files associated with the
corresponding data model.
Args:
path (str): Path to csv data file.
model (class): SqlAlchemy model class.
"""
with open(path, newline='', encoding='utf-8') as csvfile:
csvreader = csv.DictReader(csvfile)
for row in csvreader:
record = model(**row)
db.session.add(record)
db.session.commit()
def init_from_sheet(ws, model):
"""Initialize DB table data from XLRD Worksheet.
Initialize table data from source data associated with the corresponding
data model.
Args:
ws (xlrd.sheet.Sheet): XLRD worksheet object.
model (class): SqlAlchemy model class.
"""
header = None
for i, row in enumerate(ws.get_rows()):
row = [r.value for r in row]
if i == 0:
header = row
else:
row_dict = {k: v for k, v in zip(header, row)}
try:
record = model(**row_dict)
except:
msg = 'Error when processing row {} of "{}". Cell values: {}'
msg = msg.format(i+1, ws.name, row)
logging.error(msg)
raise
db.session.add(record)
db.session.commit()
def init_from_workbook(wb, queue):
"""Init from workbook.
Args:
wb (xlrd.Workbook): Workbook object.
queue (tuple): Order in which to load models.
"""
with xlrd.open_workbook(wb) as book:
for sheetname, model in queue:
if sheetname == 'data': # actually done last
for ws in book.sheets():
if ws.name.startswith('data'):
init_from_sheet(ws, model)
else:
ws = book.sheet_by_name(sheetname)
init_from_sheet(ws, model)
create_wb_metadata(wb)
def create_wb_metadata(wb_path):
"""Create metadata for Excel Workbook files imported into the DB.
Args:
wb_path (str) Path to Excel Workbook.
"""
record = SourceData(wb_path)
db.session.add(record)
db.session.commit()
@manager.option('--overwrite', help='Drop tables first?', action='store_true')
def initdb(overwrite=False):
"""Create the database.
Args:
overwrite (bool): Overwrite database if True, else update.
"""
with app.app_context():
if overwrite:
db.drop_all()
db.create_all()
if overwrite:
init_from_workbook(wb=SRC_DATA, queue=ORDERED_MODEL_MAP)
init_from_workbook(wb=UI_DATA, queue=TRANSLATION_MODEL_MAP)
caching.cache_datalab_init(app)
@manager.command
def translations():
"""Import anew all translations into the database."""
with app.app_context():
# TODO (jkp 2017-09-28) make this ONE transaction instead of many.
db.session.query(SourceData).delete()
db.session.query(Translation).delete()
db.session.commit()
init_from_workbook(wb=SRC_DATA, queue=TRANSLATION_MODEL_MAP)
init_from_workbook(wb=UI_DATA, queue=TRANSLATION_MODEL_MAP)
@manager.command
def cache_responses():
"""Cache responses in the 'cache' table of DB."""
with app.app_context():
caching.cache_datalab_init(app)
manager.add_command('shell', Shell(make_context=make_shell_context))
if __name__ == '__main__':
manager.run()
| {
"repo_name": "jkpr/pma-api",
"path": "manage.py",
"copies": "1",
"size": "5082",
"license": "mit",
"hash": -4368485461093442600,
"line_mean": 26.7704918033,
"line_max": 79,
"alpha_frac": 0.6082251082,
"autogenerated": false,
"ratio": 3.7953696788648243,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49035947870648244,
"avg_score": null,
"num_lines": null
} |
""" Application menu setup and callable actions
"""
import os
import sys
import collections
# local imports
import nginx
import php
import mysql
import config_loader
import package_installer
import system
import ssh
import vim
import wordpress
import letsencrypt
import postfix
from teminal_colors import echo
def menu():
"""Application menu"""
_actions = collections.OrderedDict()
_actions['1'] = 'Basic System Setup (Hostname, Timezone, '\
'SSH, Fail2ban, Nginx, PHP, MySQL, Firewall)'
_actions['2'] = 'New HTTP Virtual Host Setup'
_actions['3'] = 'New HTTPS Virtual Host with Letsencrypt SSL setup'
_actions['4'] = 'New MySQL database and user'
_actions['5'] = 'New WordPress installation'
_actions['6'] = 'Quit'
echo('Hi, please select an action to perform, available actions:',
'n', 'n')
for key in _actions:
echo(key + ': ' + _actions[key], 'i', 'n')
while True:
_selection = input('Please select one action: ')
if _selection in _actions:
break
else:
echo('Please select one of the available actions: ', 'w', 'n')
continue
if _selection:
if _selection == '1':
_do_system_setup()
elif _selection == '2':
nginx.new_host(False)
elif _selection == '3':
letsencrypt.create_certificate()
elif _selection == '4':
mysql.create_user_and_database()
elif _selection == '5':
wordpress.install_wordpress()
elif _selection == '6':
sys.exit()
def _do_system_setup():
"""Check and run action as stated in config.json"""
try:
cfg = config_loader.parse_config('basic_system.json')
# run some checks for crucial data in configuration
for data in cfg['data']:
if cfg['data'][data] == '':
echo(data + ' is data missing', 'e')
sys.exit()
# since we reached here start calling actions
for action in cfg['actions']:
for action_entry in action:
# first fix locale
if action_entry == 'fix_locale' and action[action_entry]:
system.fix_locale(cfg['data']['locale'])
# install all packages
if action_entry == 'install_packages' and action[action_entry]:
# do an update - upgrade first
system.upgrade_system()
package_installer.install_packages(cfg['packages'])
# create a sudo user to remote login after disabling root
if action_entry == 'create_user' and action[action_entry] and \
cfg['data']['username'] != '':
system.new_sudoer(cfg['data']['username'])
# set machine's hostname
if action_entry == 'set_hostname' and action[action_entry]:
system.setup_hostname(cfg['data']['hostname'])
# update /etc/hosts file
if action_entry == 'update_hosts' and action[action_entry]:
system.update_hosts(cfg['data']['hostname'],
cfg['data']['domain'],
cfg['data']['public_ip'])
# setup timezone
if action_entry == 'set_timezone' and action[action_entry]:
system.setup_timezone(cfg['data']['timezone'])
# setup basic firewall rules
if action_entry == 'config_firewall' and action[action_entry]:
system.setup_firewall()
# configure Vim
if action_entry == 'config_vim' and action[action_entry]:
vim.setup_vim(os.path.join('templates', 'dot-vimrc'),
os.path.join('templates', 'molokai.vim'))
# configure sshd
if action_entry == 'config_sshd' and action[action_entry]:
ssh.setup_ssh(os.path.join('templates', 'sshd-config'))
# configure Nginx
if action_entry == 'config_nginx' and action[action_entry]:
nginx.setup_nginx(os.path.join('templates', 'nginx.conf'))
# configure PHP-FPM
if action_entry == 'config_php_fpm' and action[action_entry]:
php.setup_php(os.path.join('templates', 'php-fpm.ini'))
# configure Postfix
if action_entry == 'config_postfix' and action[action_entry]:
postfix.setup_postfix(
os.path.join('templates', 'main.cf'))
# configure MySQL server
if action_entry == 'config_mysql' and action[action_entry]:
mysql.setup_mysql()
except Exception as e:
if hasattr(e, 'message'):
echo(e.message, 'e')
else:
echo(str(e), 'e')
| {
"repo_name": "stef-k/starter",
"path": "menu.py",
"copies": "1",
"size": "4989",
"license": "mit",
"hash": -5550848467688631000,
"line_mean": 40.2314049587,
"line_max": 79,
"alpha_frac": 0.5339747444,
"autogenerated": false,
"ratio": 4.418954827280779,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5452929571680779,
"avg_score": null,
"num_lines": null
} |
import os, os.path
from sqlalchemy import select
from sqlalchemy.orm import column_property
from flask import current_app as app
from flask_sqlalchemy import SQLAlchemy
from flask_migrate import Migrate
from flask_login import LoginManager
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.config['SQLALCHEMY_DATABASE_URI'] = os.environ.get('DB_URI',
'sqlite:///{}'.format(os.path.join(os.path.dirname(app.root_path), app.name + '.db')))
# or e.g. DB_URI=mysql+pymysql://user:pass@server/dbname
db = SQLAlchemy(app)
migrate = Migrate(app, db)
login_manager = LoginManager()
login_manager.init_app(app)
login_manager.login_view = f'{app.name}.login'
__all__ = (
'db',
'UserAccount',
)
# Quickref:
# types: Integer, String(L), Text, DateTime, ForeignKey('table_name.col')
# keywords: primary_key, nullable, unique, default
# rels: db.relationship('OtherModel', backref=db.backref('mymodels'), lazy='dynamic'))
# other_id = db.Column(db.Integer, db.ForeignKey('othermodel.id'))
#class MyModel(db.Model):
# id = db.Column(db.Integer, primary_key=True)
#
#
# def __init__(self, ...):
# pass
#
#
# def __str__(self):
# return self.name
#
#
# def __repr__(self):
# return '<{} {!r}>'.format(self.__class__.__name__, self.id)
class UserAccount(db.Model):
id = db.Column(db.Integer, primary_key=True)
@property
def is_active(self): return True
@property
def is_authenticated(self): return True
def get_id(self, user_id): return self.id
@login_manager.user_loader
def load_user(user_id):
return UserAccount.query.get(user_id)
| {
"repo_name": "0xquad/flask-app-template",
"path": "app.tmpl/models.py",
"copies": "1",
"size": "1705",
"license": "mit",
"hash": 4369501270483260400,
"line_mean": 24.0735294118,
"line_max": 90,
"alpha_frac": 0.682111437,
"autogenerated": false,
"ratio": 3.077617328519856,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.42597287655198557,
"avg_score": null,
"num_lines": null
} |
"""Application Models"""
import os
class PEP(object):
LINE_READ_COUNT = 20
def __init__(self, pep_text):
self.pep_text = pep_text
@classmethod
def load_pep(cls, pep_number, directory):
if not os.path.isdir(directory):
raise ValueError('Unexpected directory value: %s' % directory)
path = os.path.join(directory, 'pep-%04d.txt' % pep_number)
with open(path, 'rb') as f:
data = f.read()
return PEP(data)
def parse_metadata(self):
lines = self.pep_text.splitlines()
self.metadata_dict = {}
caches = []
for line in lines[:20]:
if len(line) > 1 and line[-1] == ',':
caches.append(line)
continue
else:
caches.append(line)
combined = ''.join(caches)
caches = []
parts = combined.split(':')
if len(parts) == 2:
self.metadata_dict[parts[0]] = parts[1].strip()
if 'Title' in self.metadata_dict:
self.title = self.metadata_dict['Title']
if 'PEP' in self.metadata_dict:
self.number = self.metadata_dict['PEP']
if 'Author' in self.metadata_dict:
author_string = self.metadata_dict['Author']
self.authors = [x.strip() for x in author_string.split(',')]
if 'Type' in self.metadata_dict:
self.type = self.metadata_dict['Type']
if 'Status' in self.metadata_dict:
self.status = self.metadata_dict['Status']
def to_dict(self):
return {
'title': self.title,
'number': self.number,
'authors': self.authors,
'type': self.type,
'status': self.status
}
| {
"repo_name": "kennethzfeng/pep-visualize",
"path": "app/models.py",
"copies": "1",
"size": "1805",
"license": "mit",
"hash": -2785282434247313000,
"line_mean": 29.0833333333,
"line_max": 74,
"alpha_frac": 0.5152354571,
"autogenerated": false,
"ratio": 3.993362831858407,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5008598288958407,
"avg_score": null,
"num_lines": null
} |
# Application Name: medvid.io Demp App
# Description: Basic medvid.io web app implemented in Python 2.7. Requires medvid.io deveoper account.
# THIS APPLICATION IS FOR DEMO PURPOSES ONLY. IT HAS LIMITED SECURITY CAPABILITIES AND LIMITED ERROR HANDLING. DO NOT RUN THIS
# ON A PUBLIC WEB SERVER OR PRODUCTION ENVIRONMENT.
# Author: Andrew Richards <andrew@reeldx.com>
# Version: 1.0
# Author URI: https://github.com/ReelDx
# License: MIT
# Copyright (c) 2016 ReelDx, Inc.
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import web, os
import jwt_tools, api_tools, api_tools_user, api_tools_video, api_tools_group, api_tools_policy, render_tools
urls = (
'/', 'Index',
'/status', 'status',
'/user_new', 'user_new',
'/user_list', 'user_list',
'/video_new', 'video_new',
'/video_upload', 'video_upload',
'/video_list', 'video_list',
'/video_group_list', 'video_group_list',
'/video_play', 'video_play',
'/video_delete', 'video_delete',
'/video_update', 'video_update',
'/group_list','group_list',
'/group_new','group_new',
'/group_delete','group_delete',
'/policy_list', 'policy_list',
'/policy_manage', 'policy_manage',
'/policy_delete', 'policy_delete'
)
app = web.application(urls, globals())
render = web.template.render('templates/')
# Local cache of temporary file names
file_list = []
class Index:
# Default landing page
def GET(self):
return render.Index()
class status:
# Generic status reporting page
def GET(self):
status = True
message = "Everything is ok!"
return render.status(op_status = status, op_message = message, op_API = None)
class user_new:
# Load user profile of existing user / create new user
def GET(self):
return render.user_new()
def POST(self):
# Gather data; execute API call
form = web.input(GUID="")
new_jwt = jwt_tools.build_jwt(str(form.GUID))
api_status, app_user_id, user_id, response_msg, api_msg = api_tools_user.user_profile(new_jwt)
# Construct Render Output
if api_status == False:
final_output = render_tools.render_error(response_msg)
else:
final_output = render_tools.render_user_new(app_user_id, user_id)
# Display as needed
return render.status(op_status = api_status, op_message = final_output, op_API = render_tools.render_api_msg(api_msg))
class user_list:
# Lists all users in the current application
def GET(self):
# Execute API Call
account_jwt = jwt_tools.build_account_jwt()
api_status, user_data, response_msg, api_msg = api_tools_user.user_list(account_jwt)
# Construct Render Output
if api_status == False:
final_output = render_tools.render_error(response_msg)
else:
final_output = render_tools.render_user_list(user_data)
# Display as needed
return render.user_list(op_status = api_status, op_data = final_output, op_API = render_tools.render_api_msg(api_msg))
class video_new:
# Second step in uploading a video; pushes file from server to medvid.io then deletes local copy
def GET(self):
return render.video_new()
def POST(self):
form = web.input(GUID="", MVID="", videoName="", videoDesc="", videoLoc = "", file_id ="", videoLocation="", videoSubject="", videoViewers="", videoGroups="")
# get local file path from UUID
file_path = ""
for x in file_list:
if x[0] == form.file_id:
file_path = str(x[1])
# build JWT
new_jwt = jwt_tools.build_jwt(str(form.GUID))
# upload to medvid.io
api_status, return_data, response_msg, api_msg = api_tools_video.video_post(new_jwt, form.MVID, form.videoName, form.videoDesc, form.videoLoc, file_path, form.videoSubject, form.videoViewers, form.videoGroups)
# Construct Render Output
if api_status == False:
final_output = render_tools.render_error(response_msg)
else:
final_output = render_tools.render_video_new(return_data)
# remove temp file
os.remove(file_path)
return render.status(op_status = api_status, op_message = final_output, op_API = render_tools.render_api_msg(api_msg))
class video_upload:
# First step in uploading a video; pulls a file from client to server
def GET(self):
return render.video_upload()
def POST(self):
form = web.input(videoFile={})
save_uuid, save_path = api_tools_video.video_upload(form['videoFile'].value, form['videoFile'].filename)
file_info = (save_uuid, save_path)
file_list.append(file_info)
return render.video_new(op_uuid = save_uuid)
class video_list:
# List videos for a specifc user
def GET(self):
return render.video_list(op_status = True, op_mvid = "", op_guid="", op_data="", op_API = None)
def POST(self):
# Gather data; execute API call
form = web.input(GUID="", MVID="")
new_jwt = jwt_tools.build_jwt(str(form.GUID))
api_status, video_data, response_msg, api_msg = api_tools_video.video_list(new_jwt, form.GUID, form.MVID)
# Construct Render Output
if api_status == False:
final_output = render_tools.render_error(response_msg)
else:
final_output = render_tools.render_video_list(video_data)
return render.video_list(op_status = api_status, op_guid = form.GUID, op_mvid = form.MVID, op_data=final_output, op_API = render_tools.render_api_msg(api_msg))
class video_group_list:
# List videos for a specifc user and group
def GET(self):
return render.video_group_list(op_status = True, op_mvid = "", op_guid="", op_group_id = "", op_data="", op_API = None)
def POST(self):
# Gather data; execute API call
form = web.input(GUID="", MVID="", group_id = "")
new_jwt = jwt_tools.build_jwt(str(form.GUID))
api_status, video_data, response_msg, api_msg = api_tools_video.video_group_list(new_jwt, form.GUID, form.MVID, form.group_id)
# Construct Render Output
if api_status == False:
final_output = render_tools.render_error(response_msg)
else:
final_output = render_tools.render_video_group_list(video_data)
return render.video_group_list(op_status = api_status, op_guid = form.GUID, op_mvid = form.MVID, op_group_id = form.group_id, op_data=final_output, op_API = render_tools.render_api_msg(api_msg))
class video_play:
# Play a specific video
def POST(self):
form = web.input(file_id="", GUID="")
new_jwt = jwt_tools.build_jwt(str(form.GUID))
api_status, smil_url, response_msg, api_msg = api_tools_video.video_play(new_jwt, form.file_id)
# Construct Render Output
if api_status == False:
final_output = render_tools.render_error(response_msg)
else:
final_output = render_tools.render_video_play(smil_url)
return render.video_play(op_status=api_status, op_data = final_output, op_API = render_tools.render_api_msg(api_msg))
class video_update:
# Update a specified video
def GET(self):
input_data = web.input(op_mvid = "", op_guid = "", op_vid = "")
status_msg = "Please specifiy a user and video to update!"
if input_data.op_mvid and input_data.op_guid and input_data.op_vid:
# Loaded to edit a specific video
new_jwt = jwt_tools.build_jwt(str(input_data.op_guid))
api_status, video_data, response_msg, api_msg = api_tools_video.video_get(new_jwt, input_data.op_vid)
if api_status:
# Able to load and edit video
return render.video_update(op_status = True, op_data = None, op_mvid = input_data.op_mvid, op_guid = input_data.op_guid, op_vid = input_data.op_vid, \
op_vname = video_data.get('video_title'), op_vdesc = video_data.get('video_desc'), op_vloc = video_data.get('video_location'), \
op_vown = video_data.get('video_owner_id'), op_vsub = video_data.get('video_subject_id'), op_vvids = video_data.get('video_user_viewer_ids'), op_vgids = video_data.get('video_group_viewer_ids'), \
op_API = render_tools.render_api_msg(api_msg))
else:
return render.video_update(op_status = False, op_data = response_msg, op_mvid = "", op_guid = "", op_vid = "", op_vname = "", op_vdesc = "", op_vloc = "", op_vown = "", op_vsub = "", op_vvids = "", op_vgids = "", op_API = render_tools.render_api_msg(api_msg))
return render.video_update(op_status = False, op_data = status_msg, op_mvid = "", op_guid = "", op_vid = "", op_vname = "", op_vdesc = "", op_vloc = "", op_vown = "", op_vsub = "", op_vvids = "", op_vgids = "", op_API = None)
def POST(self):
form = web.input(GUID="", MVID="", video_id = "", videoName="", videoDesc="", videoLocation="", videoOwner="", videoSubject="", videoViewers="", videoGroups="")
# build JWT
new_jwt = jwt_tools.build_jwt(str(form.GUID))
# update video on medvid.io
api_status, return_data, response_msg, api_msg = api_tools_video.video_update(new_jwt, form.video_id, form.videoName, form.videoDesc, form.videoLocation, form.videoOwner, form.videoSubject, form.videoViewers, form.videoGroups)
# Construct Render Output
if api_status == False:
final_output = render_tools.render_error(response_msg)
else:
final_output = render_tools.render_video_update(return_data)
return render.status(op_status = api_status, op_message = final_output, op_API = render_tools.render_api_msg(api_msg))
class video_delete:
# Delete a specific video
def POST(self):
form = web.input(file_id="", GUID="")
new_jwt = jwt_tools.build_jwt(str(form.GUID))
api_status, delete_video_id, response_msg, api_msg = api_tools_video.video_delete(new_jwt, form.file_id)
# Construct Render Output
if api_status == False:
final_output = render_tools.render_error(response_msg)
else:
final_output = render_tools.render_video_delete(delete_video_id)
return render.status(op_status=api_status, op_message = final_output, op_API = render_tools.render_api_msg(api_msg))
class group_list:
# Lists all groups in the current account
def GET(self):
return render.group_list(op_status = True, op_mvid = "", op_guid="", op_data="", op_API = None)
def POST(self):
# Gather data; execute API call
form = web.input(GUID="", MVID="")
if form.GUID and form.MVID:
# Run as Application User
new_jwt = jwt_tools.build_jwt(str(form.GUID))
group_delete = False
else:
# Run as Account User
new_jwt = jwt_tools.build_account_jwt()
group_delete = True
api_status, group_data, response_msg, api_msg = api_tools_group.group_list(new_jwt, group_delete, form.GUID, form.MVID)
# Construct Render Output
if api_status == False:
final_output = render_tools.render_error(response_msg)
else:
final_output = render_tools.render_group_list(group_data)
# Display as needed
return render.group_list(op_status = api_status, op_guid = form.GUID, op_mvid = form.MVID, op_data = final_output, op_API = render_tools.render_api_msg(api_msg))
class group_new:
# Creates a new group for the account
def GET(self):
return render.group_new()
def POST(self):
# Gather data; execute API call
form = web.input(name="")
account_jwt = jwt_tools.build_account_jwt()
api_status, new_group_id, new_group_name, response_msg, api_msg = api_tools_group.group_new(account_jwt, form.name)
# Construct Render Output
if api_status == False:
final_output = render_tools.render_error(response_msg)
else:
final_output = render_tools.render_group_new(new_group_id, new_group_name)
# Display as needed
return render.status(op_status = api_status, op_message = final_output, op_API = render_tools.render_api_msg(api_msg))
class group_delete:
# Delete a specific group
def POST(self):
form = web.input(group_id="")
account_jwt = jwt_tools.build_account_jwt()
api_status, delete_group_id, response_msg, api_msg = api_tools_group.group_delete(account_jwt, form.group_id)
# Construct Render Output
if api_status == False:
final_output = render_tools.render_error(response_msg)
else:
final_output = render_tools.render_group_delete(delete_group_id)
return render.status(op_status=api_status, op_message = final_output, op_API = render_tools.render_api_msg(api_msg))
class policy_list:
# List policies for a specifc user
def GET(self):
return render.policy_list(op_status = True, op_mvid = "", op_guid="", op_data="", op_API = None)
def POST(self):
# Gather data; execute API call
form = web.input(GUID="", MVID="")
new_jwt = jwt_tools.build_jwt(str(form.GUID))
api_status, policy_data, response_msg, api_msg = api_tools_policy.policy_list(new_jwt, form.GUID, form.MVID)
# Construct Render Output
if api_status == False:
final_output = render_tools.render_error(response_msg)
else:
final_output = render_tools.render_policy_list(policy_data)
return render.policy_list(op_status = api_status, op_guid = form.GUID, op_mvid = form.MVID, op_data=final_output, op_API = render_tools.render_api_msg(api_msg))
class policy_manage:
# Create / Update policies for a user
def GET(self):
input_data = web.input(op_mvid = "", op_p_id = "", op_group_id = "", op_create = "", op_read = "", op_update = "", op_delete = "", op_list = "")
return render.policy_manage(op_status = True, op_data = "", op_mvid = input_data.op_mvid, op_p_id = input_data.op_p_id, op_group_id = input_data.op_group_id, op_create = input_data.op_create, op_read = input_data.op_read, op_update = input_data.op_update, op_delete = input_data.op_delete, op_list = input_data.op_list, op_API = None)
def POST(self):
# Gather data; execute API call
form = web.input(MVID="", p_id = "", g_id = "", _create = "", _read = "", _update = "", _delete = "", _list = "")
account_jwt = jwt_tools.build_account_jwt()
api_status, policy_data, response_msg, api_msg = api_tools_policy.policy_update(account_jwt, form.MVID, form.p_id, form.g_id, form._create, form._read, form._update, form._delete, form._list)
# Construct Render Output
if api_status == False:
final_output = render_tools.render_error(response_msg)
else:
if form.p_id:
# If a policy ID exists we are editing an existing policy
final_output = render_tools.render_policy_manage(policy_data, False)
else:
# No policy ID, new policy
final_output = render_tools.render_policy_manage(policy_data, True)
return render.status(op_status=api_status, op_message = final_output, op_API = render_tools.render_api_msg(api_msg))
class policy_delete:
# Delete a specific policy
def POST(self):
form = web.input(policy_id="")
account_jwt = jwt_tools.build_account_jwt()
api_status, delete_policy_id, response_msg, api_msg = api_tools_policy.policy_delete(account_jwt, form.policy_id)
# Construct Render Output
if api_status == False:
final_output = render_tools.render_error(response_msg)
else:
final_output = render_tools.render_policy_delete(delete_policy_id)
return render.status(op_status=api_status, op_message = final_output, op_API = render_tools.render_api_msg(api_msg))
if __name__ == "__main__":
app.run()
| {
"repo_name": "ReelDx/medvidio-webapp-demo",
"path": "bin/app.py",
"copies": "1",
"size": "15586",
"license": "mit",
"hash": -568817257387280960,
"line_mean": 40.2328042328,
"line_max": 336,
"alpha_frac": 0.6969074811,
"autogenerated": false,
"ratio": 2.9835375191424194,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.895394848319117,
"avg_score": 0.045299303410250145,
"num_lines": 378
} |
"""Application
"""
from snipping import engine
from snipping import key_bindings
from snipping import layout
from snipping import prompt_toolkit
from snipping.prompt_toolkit import buffers
from snipping.prompt_toolkit import style
from snipping.utils import fileutil
class Application(prompt_toolkit.Application):
def __init__(self, **kwargs):
self.engine = kwargs.pop('engine', None)
snippet_file = kwargs.pop('snippet_file')
if snippet_file is None:
snippet_file = 'snippet.py'
self.snippet_file = snippet_file
super(Application, self).__init__(**kwargs)
@property
def snippet(self):
return fileutil.base_name(self.snippet_file)
def buffer_display(self, name):
if name is buffers.DEFAULT_BUFFER:
return self.snippet
return name
def get_application(init_file=None):
init_contents = {}
if init_file is not None:
init_content = fileutil.read_from_file(init_file)
if init_content is not None:
init_contents[buffers.DEFAULT_BUFFER] = init_content
app_engine = engine.Engine(from_file=init_file)
contents = app_engine.contents()
key_binding_manager = key_bindings.registry()
screen = layout.create_layout(contents, key_binding_manager)
bm = buffers.get_buffer_mapping(contents, init_contents=init_contents)
return Application(engine=app_engine,
snippet_file=init_file,
layout=screen,
buffers=bm,
style=style.default_style,
key_bindings_registry=key_binding_manager.registry,
use_alternate_screen=True)
| {
"repo_name": "yittg/Snipping",
"path": "snipping/application.py",
"copies": "1",
"size": "1711",
"license": "mit",
"hash": 6018094522869136000,
"line_mean": 31.2830188679,
"line_max": 74,
"alpha_frac": 0.6440677966,
"autogenerated": false,
"ratio": 4.103117505995204,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5247185302595204,
"avg_score": null,
"num_lines": null
} |
""" application page """
from PyQt5.QtWidgets import QWidget
from PyQt5 import QtCore, QtGui, QtWidgets
class Application(QWidget):
def __init__(self, parent=None):
super(Application, self).__init__(parent)
self.setupUi(self)
def setupUi(self, QWidget):
QWidget.setObjectName("QWidget")
QWidget.setEnabled(True)
QWidget.resize(400, 560)
self.verticalLayout = QtWidgets.QVBoxLayout(QWidget)
self.verticalLayout.setObjectName("verticalLayout")
self.groupBox = QtWidgets.QGroupBox(QWidget)
self.groupBox.setObjectName("groupBox")
self.formLayout = QtWidgets.QFormLayout(self.groupBox)
self.formLayout.setObjectName("formLayout")
self.radioButton = QtWidgets.QRadioButton(self.groupBox)
self.radioButton.setObjectName("radioButton")
self.radioButton.setChecked(True)
self.formLayout.setWidget(0, QtWidgets.QFormLayout.LabelRole, self.radioButton)
self.radioButton_2 = QtWidgets.QRadioButton(self.groupBox)
self.radioButton_2.setObjectName("radioButton_2")
self.formLayout.setWidget(0, QtWidgets.QFormLayout.FieldRole, self.radioButton_2)
self.verticalLayout.addWidget(self.groupBox)
self.groupBox_2 = QtWidgets.QGroupBox(QWidget)
self.groupBox_2.setObjectName("groupBox_2")
self.formLayout_2 = QtWidgets.QFormLayout(self.groupBox_2)
self.formLayout_2.setObjectName("formLayout_2")
self.label = QtWidgets.QLabel(self.groupBox_2)
self.label.setObjectName("label")
self.formLayout_2.setWidget(0, QtWidgets.QFormLayout.LabelRole, self.label)
self.lineEdit = QtWidgets.QLineEdit(self.groupBox_2)
self.lineEdit.setObjectName("lineEdit")
self.formLayout_2.setWidget(0, QtWidgets.QFormLayout.FieldRole, self.lineEdit)
self.label_2 = QtWidgets.QLabel(self.groupBox_2)
self.label_2.setObjectName("label_2")
self.formLayout_2.setWidget(1, QtWidgets.QFormLayout.LabelRole, self.label_2)
self.lineEdit_2 = QtWidgets.QLineEdit(self.groupBox_2)
self.lineEdit_2.setObjectName("lineEdit_2")
self.formLayout_2.setWidget(1, QtWidgets.QFormLayout.FieldRole, self.lineEdit_2)
self.verticalLayout.addWidget(self.groupBox_2)
self.groupBox_3 = QtWidgets.QGroupBox(QWidget)
self.groupBox_3.setObjectName("groupBox_3")
self.formLayout_3 = QtWidgets.QFormLayout(self.groupBox_3)
self.formLayout_3.setObjectName("formLayout_3")
self.label_3 = QtWidgets.QLabel(self.groupBox_3)
self.label_3.setObjectName("label_3")
self.formLayout_3.setWidget(0, QtWidgets.QFormLayout.LabelRole, self.label_3)
self.lineEdit_3 = QtWidgets.QLineEdit(self.groupBox_3)
self.lineEdit_3.setObjectName("lineEdit_3")
self.formLayout_3.setWidget(0, QtWidgets.QFormLayout.FieldRole, self.lineEdit_3)
self.label_4 = QtWidgets.QLabel(self.groupBox_3)
self.label_4.setObjectName("label_4")
self.formLayout_3.setWidget(1, QtWidgets.QFormLayout.LabelRole, self.label_4)
self.lineEdit_4 = QtWidgets.QLineEdit(self.groupBox_3)
self.lineEdit_4.setObjectName("lineEdit_4")
self.formLayout_3.setWidget(1, QtWidgets.QFormLayout.FieldRole, self.lineEdit_4)
self.label_5 = QtWidgets.QLabel(self.groupBox_3)
self.label_5.setObjectName("label_5")
self.formLayout_3.setWidget(2, QtWidgets.QFormLayout.LabelRole, self.label_5)
self.lineEdit_5 = QtWidgets.QLineEdit(self.groupBox_3)
self.lineEdit_5.setObjectName("lineEdit_5")
self.formLayout_3.setWidget(2, QtWidgets.QFormLayout.FieldRole, self.lineEdit_5)
self.label_6 = QtWidgets.QLabel(self.groupBox_3)
self.label_6.setObjectName("label_6")
self.formLayout_3.setWidget(3, QtWidgets.QFormLayout.LabelRole, self.label_6)
self.lineEdit_6 = QtWidgets.QLineEdit(self.groupBox_3)
self.lineEdit_6.setObjectName("lineEdit_6")
self.formLayout_3.setWidget(3, QtWidgets.QFormLayout.FieldRole, self.lineEdit_6)
self.verticalLayout.addWidget(self.groupBox_3)
self.pushButton = QtWidgets.QPushButton(QWidget)
self.pushButton.setObjectName("pushButton")
self.verticalLayout.addWidget(self.pushButton, 0, QtCore.Qt.AlignHCenter)
self.retranslateUi(QWidget)
QtCore.QMetaObject.connectSlotsByName(QWidget)
def retranslateUi(self, QWidget):
_translate = QtCore.QCoreApplication.translate
QWidget.setWindowTitle(_translate("QWidget", "Application"))
self.groupBox.setTitle(_translate("QWidget", "Chose one of user type:"))
self.radioButton.setText(_translate("QWidget", "Client"))
self.radioButton_2.setText(_translate("QWidget", "Developer"))
self.groupBox_2.setTitle(_translate("QWidget", "Basis information"))
self.label.setText(_translate("QWidget", "Email: "))
self.label_2.setText(_translate("QWidget", "Adress:"))
self.groupBox_3.setTitle(_translate("QWidget", "Money Deposit:"))
self.label_3.setText(_translate("QWidget", "Card Number:"))
self.label_4.setText(_translate("QWidget", "Security Code:"))
self.label_5.setText(_translate("QWidget", "Valid Thru:"))
self.label_6.setText(_translate("QWidget", "Deposit Amount:"))
self.pushButton.setText(_translate("QWidget", "Confirm"))
| {
"repo_name": "whuang001/cts",
"path": "gui/Application.py",
"copies": "1",
"size": "5436",
"license": "mit",
"hash": -7157054241950525000,
"line_mean": 55.0412371134,
"line_max": 89,
"alpha_frac": 0.6994113319,
"autogenerated": false,
"ratio": 3.7567380787836906,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49561494106836906,
"avg_score": null,
"num_lines": null
} |
"""Application Programming Interface (API)"""
import re
import logging
from xml.etree.ElementTree import (
Element,
fromstring
)
from collections import OrderedDict
from io import BytesIO
from http import HTTPStatus
from typing import (
Any,
Callable,
Dict,
Iterator,
List,
Optional,
Set,
Sequence,
Union,
Tuple,
)
from urllib.parse import quote_plus, urlparse
from warnings import warn
import requests
import retrying
import pydicom
logger = logging.getLogger(__name__)
def load_json_dataset(dataset: Dict[str, dict]) -> pydicom.dataset.Dataset:
"""Loads DICOM Data Set in DICOM JSON format.
Parameters
----------
dataset: Dict[str, dict]
mapping where keys are DICOM *Tags* and values are mappings of DICOM
*VR* and *Value* key-value pairs
Returns
-------
pydicom.dataset.Dataset
data set
"""
warning_message = (
'Function "load_json_dataset()" will be deprecated in the next major '
'version. Use "pydicom.dataset.Dataset.from_json()" instead.'
)
warn(warning_message, category=DeprecationWarning)
return pydicom.dataset.Dataset.from_json(dataset)
def _load_xml_dataset(dataset: Element) -> pydicom.dataset.Dataset:
"""Loads DICOM Data Set in DICOM XML format.
Parameters
----------
dataset: xml.etree.ElementTree.Element
parsed element tree
Returns
-------
pydicom.dataset.Dataset
data set
"""
ds = pydicom.Dataset()
for element in dataset:
keyword = element.attrib['keyword']
vr = element.attrib['vr']
value: Optional[Union[List[Any], str]]
if vr == 'SQ':
value = [
_load_xml_dataset(item)
for item in element
]
else:
value = list(element)
if len(value) == 1:
value = value[0].text.strip()
elif len(value) > 1:
value = [v.text.strip() for v in value]
else:
value = None
setattr(ds, keyword, value)
return ds
class DICOMwebClient(object):
"""Class for connecting to and interacting with a DICOMweb RESTful service.
Attributes
----------
base_url: str
unique resource locator of the DICOMweb service
protocol: str
name of the protocol, e.g. ``"https"``
host: str
IP address or DNS name of the machine that hosts the server
port: int
number of the port to which the server listens
url_prefix: str
URL path prefix for DICOMweb services (part of `base_url`)
qido_url_prefix: Union[str, None]
URL path prefix for QIDO-RS (not part of `base_url`)
wado_url_prefix: Union[str, None]
URL path prefix for WADO-RS (not part of `base_url`)
stow_url_prefix: Union[str, None]
URL path prefix for STOW-RS (not part of `base_url`)
delete_url_prefix: Union[str, None]
URL path prefix for DELETE (not part of `base_url`)
chunk_size: int
maximum number of bytes that should be transferred per data chunk
when streaming data from the server using chunked transfer encoding
(used by ``iter_*()`` methods as well as the ``store_instances()``
method)
"""
def set_chunk_size(self, chunk_size: int) -> None:
"""Sets value of `chunk_size` attribute.
Parameters
----------
chunk_size: int
maximum number of bytes that should be transferred per data chunk
when streaming data from the server using chunked transfer encoding
(used by ``iter_*()`` methods as well as the ``store_instances()``
method)
"""
self._chunk_size = chunk_size
def set_http_retry_params(
self,
retry: bool = True,
max_attempts: int = 5,
wait_exponential_multiplier: int = 1000,
retriable_error_codes: Tuple[HTTPStatus, ...] = (
HTTPStatus.TOO_MANY_REQUESTS,
HTTPStatus.REQUEST_TIMEOUT,
HTTPStatus.SERVICE_UNAVAILABLE,
HTTPStatus.GATEWAY_TIMEOUT,
)
) -> None:
"""Sets parameters for HTTP retrying logic. These parameters are passed
to @retrying.retry which wraps the HTTP requests and retries all
responses that return an error code defined in |retriable_error_codes|.
The retrying method uses exponential back off using the multiplier
|wait_exponential_multiplier| for a max attempts defined by
|max_attempts|.
Parameters
----------
retry: bool, optional
whether HTTP retrying should be performed, if it is set to
``False``, the rest of the parameters are ignored.
max_attempts: int, optional
the maximum number of request attempts.
wait_exponential_multiplier: float, optional
exponential multiplier applied to delay between attempts in ms.
retriable_error_codes: tuple, optional
tuple of HTTP error codes to retry if raised.
"""
self._http_retry = retry
if retry:
self._max_attempts = max_attempts
self._wait_exponential_multiplier = wait_exponential_multiplier
self._http_retrable_errors = retriable_error_codes
else:
self._max_attempts = 1
self._wait_exponential_multiplier = 1
self._http_retrable_errors = ()
def _is_retriable_http_error(
self,
response: requests.models.Response
) -> bool:
"""Determines whether the given response's status code is retriable.
Parameters
----------
response: requests.models.Response
HTTP response object returned by the request method.
Returns
-------
bool
Whether the HTTP request should be retried.
"""
return response.status_code in self._http_retrable_errors
def __init__(
self,
url: str,
session: Optional[requests.Session] = None,
qido_url_prefix: Optional[str] = None,
wado_url_prefix: Optional[str] = None,
stow_url_prefix: Optional[str] = None,
delete_url_prefix: Optional[str] = None,
proxies: Optional[Dict[str, str]] = None,
headers: Optional[Dict[str, str]] = None,
callback: Optional[Callable] = None,
chunk_size: int = 10**6
) -> None:
"""
Parameters
----------
url: str
base unique resource locator consisting of protocol, hostname
(IP address or DNS name) of the machine that hosts the server and
optionally port number and path prefix
session: requests.Session, optional
session required to make connection to the DICOMweb service
(see session_utils.py to create a valid session if necessary)
qido_url_prefix: str, optional
URL path prefix for QIDO RESTful services
wado_url_prefix: str, optional
URL path prefix for WADO RESTful services
stow_url_prefix: str, optional
URL path prefix for STOW RESTful services
delete_url_prefix: str, optional
URL path prefix for DELETE RESTful services
proxies: Dict[str, str], optional
mapping of protocol or protocol + host to the URL of a proxy server
headers: Dict[str, str], optional
custom headers that should be included in request messages,
e.g., authentication tokens
callback: Callable, optional
callback function to manipulate responses generated from requests
(see `requests event hooks <http://docs.python-requests.org/en/master/user/advanced/#event-hooks>`_)
chunk_size: int, optional
maximum number of bytes that should be transferred per data chunk
when streaming data from the server using chunked transfer encoding
(used by ``iter_*()`` methods as well as the ``store_instances()``
method); defaults to ``10**6`` bytes (10MB)
Warning
-------
Modifies the passed `session` (in particular header fields),
so be careful when reusing the session outside the scope of an instance.
Warning
-------
Choose the value of `chunk_size` carefully. A small value may cause
significant network communication and message parsing overhead.
""" # noqa
if session is None:
logger.debug('initialize HTTP session')
session = requests.session()
self._session = session
self.base_url = url
self.qido_url_prefix = qido_url_prefix
self.wado_url_prefix = wado_url_prefix
self.stow_url_prefix = stow_url_prefix
self.delete_url_prefix = delete_url_prefix
# This regular expression extracts the scheme and host name from the URL
# and optionally the port number and prefix:
# <scheme>://<host>(:<port>)(/<prefix>)
# For example: "https://mydomain.com:443/wado-rs", where
# scheme="https", host="mydomain.com", port=443, prefix="wado-rs"
pattern = re.compile(
r'(?P<scheme>[https]+)://(?P<host>[^/:]+)'
r'(?::(?P<port>\d+))?(?:(?P<prefix>/[\w/]+))?'
)
match = re.match(pattern, self.base_url)
if match is None:
raise ValueError(f'Malformed URL: {self.base_url}')
try:
self.protocol = match.group('scheme')
self.host = match.group('host')
port = match.group('port')
except AttributeError:
raise ValueError(f'Malformed URL: {self.base_url}')
if port:
self.port = int(port)
else:
if self.protocol == 'http':
self.port = 80
elif self.protocol == 'https':
self.port = 443
else:
raise ValueError(
f'URL scheme "{self.protocol}" is not supported.'
)
url_components = urlparse(url)
self.url_prefix = url_components.path
if headers is not None:
self._session.headers.update(headers)
if proxies is not None:
self._session.proxies = proxies
if callback is not None:
self._session.hooks = {'response': [callback, ]}
self._chunk_size = chunk_size
self.set_http_retry_params()
def _parse_qido_query_parameters(
self,
fuzzymatching: Optional[bool] = None,
limit: Optional[int] = None,
offset: Optional[int] = None,
fields: Optional[Sequence[str]] = None,
search_filters: Optional[Dict[str, Any]] = None
) -> Dict[str, Any]:
"""Parses query parameters for inclusion into a HTTP query string
of a QIDO-RS request message.
Parameters
----------
fuzzymatching: bool, optional
whether fuzzy semantic matching should be performed
limit: int, optional
maximum number of results that should be returned
offset: int, optional
number of results that should be skipped
fields: Sequence[str], optional
names of fields (attributes) that should be included in results
search_filters: Dict[str, Any], optional
search filter criteria as key-value pairs, where *key* is a keyword
or a tag of the attribute and *value* is the expected value that
should match
Returns
-------
collections.OrderedDict
sanitized and sorted query parameters
"""
params: Dict[str, Union[int, str, List[str]]] = {}
if limit is not None:
if not(isinstance(limit, int)):
raise TypeError('Parameter "limit" must be an integer.')
if limit < 0:
raise ValueError('Parameter "limit" must not be negative.')
params['limit'] = limit
if offset is not None:
if not(isinstance(offset, int)):
raise TypeError('Parameter "offset" must be an integer.')
if offset < 0:
raise ValueError('Parameter "offset" must not be negative.')
params['offset'] = offset
if fuzzymatching is not None:
if not(isinstance(fuzzymatching, bool)):
raise TypeError('Parameter "fuzzymatching" must be boolean.')
if fuzzymatching:
params['fuzzymatching'] = 'true'
else:
params['fuzzymatching'] = 'false'
if fields is not None:
includefields = []
for field in set(fields):
if not(isinstance(field, str)):
raise TypeError('Elements of "fields" must be a string.')
includefields.append(field)
params['includefield'] = includefields
if search_filters is not None:
for field, criterion in search_filters.items():
if not(isinstance(field, str)):
raise TypeError(
'Keys of "search_filters" must be strings.'
)
# TODO: datetime?
params[field] = criterion
# Sort query parameters to facilitate unit testing
return OrderedDict(sorted(params.items()))
def _get_service_url(self, service_name: str) -> str:
"""Constructes the URL of a DICOMweb RESTful service.
Parameters
----------
service_name: str
name of the RESTful service
(choices: ``"qido"``, ``"wado"``, or ``"stow"``)
Returns
-------
str
full URL for the given service
"""
service_url = self.base_url
if service_name == 'qido':
if self.qido_url_prefix is not None:
service_url += f'/{self.qido_url_prefix}'
elif service_name == 'wado':
if self.wado_url_prefix is not None:
service_url += f'/{self.wado_url_prefix}'
elif service_name == 'stow':
if self.stow_url_prefix is not None:
service_url += f'/{self.stow_url_prefix}'
elif service_name == 'delete':
if self.delete_url_prefix is not None:
service_url += f'/{self.delete_url_prefix}'
else:
raise ValueError(
f'Unsupported DICOMweb service "{service_name}".'
)
return service_url
def _get_studies_url(
self,
service_name: str,
study_instance_uid: Optional[str] = None
) -> str:
"""Constructes the URL for study-level requests.
Parameters
----------
service_name: str
name of the RESTful service
(choices: ``"qido"``, ``"wado"``, or ``"stow"``)
study_instance_uid: str, optional
unique study identifier
Returns
-------
str
URL
"""
if study_instance_uid is not None:
url = '{service_url}/studies/{study_instance_uid}'
else:
url = '{service_url}/studies'
service_url = self._get_service_url(service_name)
return url.format(
service_url=service_url, study_instance_uid=study_instance_uid
)
def _get_series_url(
self,
service_name: str,
study_instance_uid: Optional[str] = None,
series_instance_uid: Optional[str] = None
) -> str:
"""Constructes the URL for series-level requests.
Parameters
----------
service_name: str
name of the RESTful service
(choices: ``"qido"``, ``"wado"``, or ``"stow"``)
study_instance_uid: str, optional
unique study identifier
series_instance_uid: str, optional
unique series identifier
Returns
-------
str
URL
"""
if study_instance_uid is not None:
url = self._get_studies_url(service_name, study_instance_uid)
if series_instance_uid is not None:
url += '/series/{series_instance_uid}'
else:
url += '/series'
else:
if series_instance_uid is not None:
logger.warning(
'series UID is ignored because study UID is undefined'
)
url = '{service_url}/series'
service_url = self._get_service_url(service_name)
return url.format(
service_url=service_url, series_instance_uid=series_instance_uid
)
def _get_instances_url(
self,
service_name: str,
study_instance_uid: Optional[str] = None,
series_instance_uid: Optional[str] = None,
sop_instance_uid: Optional[str] = None
) -> str:
"""Constructes the URL for instance-level requests.
Parameters
----------
service_name: str
name of the RESTful service
(choices: ``"qido"``, ``"wado"``, or ``"stow"``)
study_instance_uid: str, optional
unique study identifier
series_instance_uid: str, optional
unique series identifier
sop_instance_uid: str, optional
unique instance identifier
Returns
-------
str
URL
"""
if study_instance_uid is not None and series_instance_uid is not None:
url = self._get_series_url(
service_name,
study_instance_uid,
series_instance_uid
)
url += '/instances'
if sop_instance_uid is not None:
url += '/{sop_instance_uid}'
else:
if sop_instance_uid is not None:
logger.warning(
'SOP Instance UID is ignored because Study/Series '
'Instance UID are undefined'
)
url = '{service_url}/instances'
service_url = self._get_service_url(service_name)
return url.format(
service_url=service_url, sop_instance_uid=sop_instance_uid
)
def _build_query_string(self, params: Dict[str, Any]) -> str:
"""Builds a HTTP query string for a GET request message.
Parameters
----------
params: dict
query parameters as mapping of key-value pairs;
in case a key should be included more than once with different
values, values need to be provided in form of an iterable (e.g.,
``{"key": ["value1", "value2"]}`` will result in
``"?key=value1&key=value2"``)
Returns
-------
str
query string
"""
components = []
for key, value in params.items():
if isinstance(value, (list, tuple, set)):
for v in value:
c = '='.join([key, quote_plus(str(v))])
components.append(c)
else:
c = '='.join([key, quote_plus(str(value))])
components.append(c)
if components:
return '?{}'.format('&'.join(components))
else:
return ''
def _http_get(
self,
url: str,
params: Optional[Dict[str, Any]] = None,
headers: Optional[Dict[str, str]] = None,
stream: bool = False
) -> requests.models.Response:
"""Performs a HTTP GET request.
Parameters
----------
url: str
unique resource locator
params: Dict[str, Any], optional
query parameters
headers: Dict[str, str], optional
HTTP request message headers
stream: bool, optional
whether data should be streamed (i.e., requested using chunked
transfer encoding)
Returns
-------
requests.models.Response
HTTP response message
"""
@retrying.retry(
retry_on_result=self._is_retriable_http_error,
wait_exponential_multiplier=self._wait_exponential_multiplier,
stop_max_attempt_number=self._max_attempts
)
def _invoke_get_request(
url: str,
headers: Optional[Dict[str, str]] = None
) -> requests.models.Response:
logger.debug(f'GET: {url} {headers}')
# Setting stream allows for retrieval of data using chunked transer
# encoding. The iter_content() method can be used to iterate over
# chunks. If stream is not set, iter_content() will return the
# full payload at once.
return self._session.get(url=url, headers=headers, stream=stream)
if headers is None:
headers = {}
if not stream:
headers['Host'] = self.host
if params is None:
params = {}
url += self._build_query_string(params)
if stream:
logger.debug('use chunked transfer encoding')
response = _invoke_get_request(url, headers)
logger.debug(f'request status code: {response.status_code}')
response.raise_for_status()
if response.status_code == 204:
logger.warning('empty response')
# The server may not return all results, but rather include a warning
# header to notify that client that there are remaining results.
# (see DICOM Part 3.18 Section 6.7.1.2)
if 'Warning' in response.headers:
logger.warning(response.headers['Warning'])
return response
def _http_get_application_json(
self,
url: str,
params: Optional[Dict[str, Any]] = None,
stream: bool = False
) -> List[Dict[str, dict]]:
"""Performs a HTTP GET request that accepts "applicaton/dicom+json"
or "application/json" media type.
Parameters
----------
url: str
unique resource locator
params: Dict[str], optional
query parameters
stream: bool, optional
whether data should be streamed (i.e., requested using chunked
transfer encoding)
Returns
-------
List[str, dict]
content of HTTP message body in DICOM JSON format
"""
content_type = 'application/dicom+json, application/json'
response = self._http_get(
url,
params=params,
headers={'Accept': content_type},
stream=stream
)
if response.content:
decoded_response = response.json()
# All metadata resources are expected to be sent as a JSON array of
# DICOM data sets. However, some origin servers may incorrectly
# sent an individual data set.
if isinstance(decoded_response, dict):
return [decoded_response]
return decoded_response
return []
@classmethod
def _extract_part_content(cls, part: bytes) -> Union[bytes, None]:
"""Extracts the content of a single part of a multipart message
by stripping the headers.
Parameters
----------
part: bytes
an individual part of a multipart message
Returns
-------
Union[bytes, None]
content of the message part or ``None`` in case the message
part is empty
Raises
------
ValueError
when the message part is not CRLF CRLF terminated
"""
if part in (b'', b'--', b'\r\n') or part.startswith(b'--\r\n'):
return None
idx = part.index(b'\r\n\r\n')
if idx > -1:
return part[idx + 4:]
raise ValueError('Message part does not contain CRLF CRLF')
def _decode_multipart_message(
self,
response: requests.Response,
stream: bool
) -> Iterator[bytes]:
"""Decodes extracted parts of a HTTP multipart response message.
Parameters
----------
response: requests.Response
HTTP response message
stream: bool
whether data should be streamed (i.e., requested using chunked
transfer encoding)
Returns
-------
Iterator[bytes]
message parts
"""
logger.debug('decode multipart message')
logger.debug('decode message header')
content_type = response.headers['content-type']
media_type, *ct_info = [ct.strip() for ct in content_type.split(';')]
if media_type.lower() != 'multipart/related':
raise ValueError(
f'Unexpected media type: "{media_type}". '
'Expected "multipart/related".'
)
for item in ct_info:
attr, _, value = item.partition('=')
if attr.lower() == 'boundary':
boundary = value.strip('"').encode('utf-8')
break
else:
# Some servers set the media type to multipart but don't provide a
# boundary and just send a single frame in the body - return as is.
yield response.content
return
marker = b''.join((b'--', boundary))
delimiter = b''.join((b'\r\n', marker))
data = b''
j = 0
with response:
logger.debug('decode message content')
if stream:
iterator = response.iter_content(chunk_size=self._chunk_size)
else:
iterator = iter([response.content])
for i, chunk in enumerate(iterator):
if stream:
logger.debug(f'decode message content chunk #{i}')
data += chunk
while delimiter in data:
logger.debug(f'decode message part #{j}')
part, data = data.split(delimiter, maxsplit=1)
content = self._extract_part_content(part)
j += 1
if content is not None:
logger.debug(
f'extracted {len(content)} bytes from part #{j}'
)
yield content
content = self._extract_part_content(data)
if content is not None:
yield content
@classmethod
def _encode_multipart_message(
cls,
content: Sequence[bytes],
content_type: str
) -> bytes:
"""Encodes the payload of a HTTP multipart response message.
Parameters
----------
content: Sequence[bytes]
content of each part
content_type: str
content type of the multipart HTTP request message
Returns
-------
bytes
HTTP request message body
"""
media_type, *ct_info = [ct.strip() for ct in content_type.split(';')]
if media_type != 'multipart/related':
raise ValueError(
'No "multipart/related" usage found in content type field'
)
parameters = {}
for item in ct_info:
name, value = item.split('=')
parameters[name.lower()] = value.strip('"')
try:
content_type = parameters['type']
except KeyError:
raise ValueError(
'No "type" parameter in found in content-type field'
)
try:
boundary = parameters['boundary']
except KeyError:
raise ValueError(
'No "boundary" parameter in found in content-type field'
)
body = b''
for part in content:
body += f'\r\n--{boundary}'.encode('utf-8')
body += f'\r\nContent-Type: {content_type}\r\n\r\n'.encode('utf-8')
body += part
body += f'\r\n--{boundary}--'.encode('utf-8')
return body
@classmethod
def _assert_media_type_is_valid(cls, media_type: str):
"""Asserts that a given media type is valid.
Parameters
----------
media_type: str
media type
Raises
------
ValueError
when `media_type` is invalid
"""
error_message = f'Not a valid media type: "{media_type}"'
sep_index = media_type.find('/')
if sep_index == -1:
raise ValueError(error_message)
media_type_type = media_type[:sep_index]
if media_type_type not in {'application', 'image', 'text', 'video'}:
raise ValueError(error_message)
if media_type.find('/', sep_index + 1) > 0:
raise ValueError(error_message)
@classmethod
def _build_range_header_field_value(
cls,
byte_range: Optional[Tuple[int, int]]
) -> str:
"""Builds a range header field value for HTTP GET request messages.
Parameters
----------
byte_range: Tuple[int], optional
start and end of byte range
Returns
-------
str
range header field value
"""
if byte_range is not None:
start = str(byte_range[0])
try:
end = str(byte_range[1])
except IndexError:
end = ''
range_header_field_value = f'bytes={start}-{end}'
else:
range_header_field_value = 'bytes=0-'
return range_header_field_value
@classmethod
def _build_accept_header_field_value(
cls,
media_types: Union[Tuple[Union[str, Tuple[str, str]]], None],
supported_media_types: Set[str]
) -> str:
"""Builds an accept header field value for HTTP GET request messages.
Parameters
----------
media_types: Union[Tuple[str], None]
acceptable media types
supported_media_types: Set[str]
supported media types
Returns
-------
str
accept header field value
"""
if not isinstance(media_types, (list, tuple, set)):
raise TypeError(
'Acceptable media types must be provided as a sequence.'
)
field_value_parts = []
for media_type in media_types:
if not isinstance(media_type, str):
raise TypeError(
f'Media type "{media_type}" is not supported for '
'requested resource'
)
cls._assert_media_type_is_valid(media_type)
if media_type not in supported_media_types:
raise ValueError(
f'Media type "{media_type}" is not supported for '
'requested resource'
)
field_value_parts.append(media_type)
return ', '.join(field_value_parts)
@classmethod
def _build_multipart_accept_header_field_value(
cls,
media_types: Union[Tuple[Union[str, Tuple[str, str]]], None],
supported_media_types: Union[Dict[str, str], Set[str]]
) -> str:
"""Builds an accept header field value for HTTP GET multipart request
messages.
Parameters
----------
media_types: Union[Tuple[Union[str, Tuple[str, str]]], None]
acceptable media types and optionally the UIDs of the corresponding
transfer syntaxes
supported_media_types: Union[Dict[str, str], Set[str]]
set of supported media types or mapping of transfer syntaxes
to their corresponding media types
Returns
-------
str
accept header field value
"""
if not isinstance(media_types, (list, tuple, set)):
raise TypeError(
'Acceptable media types must be provided as a sequence.'
)
field_value_parts = []
for item in media_types:
if isinstance(item, str):
media_type = item
transfer_syntax_uid = None
else:
media_type = item[0]
try:
transfer_syntax_uid = item[1]
except IndexError:
transfer_syntax_uid = None
cls._assert_media_type_is_valid(media_type)
field_value = f'multipart/related; type="{media_type}"'
if isinstance(supported_media_types, dict):
if media_type not in supported_media_types.values():
if not (media_type.endswith('/*') or
media_type.endswith('/')):
raise ValueError(
f'Media type "{media_type}" is not supported for '
'requested resource.'
)
if transfer_syntax_uid is not None:
if transfer_syntax_uid != '*':
if transfer_syntax_uid not in supported_media_types:
raise ValueError(
f'Transfer syntax "{transfer_syntax_uid}" '
'is not supported for requested resource.'
)
expected_media_type = supported_media_types[
transfer_syntax_uid
]
if expected_media_type != media_type:
have_same_type = (
cls._parse_media_type(media_type)[0] ==
cls._parse_media_type(expected_media_type)[0]
)
if (have_same_type and
(media_type.endswith('/*') or
media_type.endswith('/'))):
continue
raise ValueError(
f'Transfer syntax "{transfer_syntax_uid}" '
'is not supported for media '
f'type "{media_type}".'
)
field_value += f'; transfer-syntax={transfer_syntax_uid}'
else:
if media_type not in supported_media_types:
raise ValueError(
f'Media type "{media_type}" is not supported for '
'requested resource.'
)
field_value_parts.append(field_value)
return ', '.join(field_value_parts)
def _http_get_multipart_application_dicom(
self,
url: str,
media_types: Optional[Tuple[Union[str, Tuple[str, str]]]] = None,
params: Optional[Dict[str, Any]] = None,
stream: bool = False
) -> Iterator[pydicom.dataset.Dataset]:
"""Performs a HTTP GET request that accepts a multipart message with
"applicaton/dicom" media type.
Parameters
----------
url: str
unique resource locator
media_types: Tuple[Union[str, Tuple[str, str]]], optional
acceptable media types and optionally the UIDs of the
corresponding transfer syntaxes, (defaults to
``("application/dicom", "1.2.840.10008.1.2.1")``)
params: Dict[str, Any], optional
additional HTTP GET query parameters
stream: bool, optional
whether data should be streamed (i.e., requested using chunked
transfer encoding)
Returns
-------
Iterator[pydicom.dataset.Dataset]
DICOM data sets
"""
default_media_type = 'application/dicom'
supported_media_types = {
'1.2.840.10008.1.2.1': default_media_type,
'1.2.840.10008.1.2.5': default_media_type,
'1.2.840.10008.1.2.4.50': default_media_type,
'1.2.840.10008.1.2.4.51': default_media_type,
'1.2.840.10008.1.2.4.57': default_media_type,
'1.2.840.10008.1.2.4.70': default_media_type,
'1.2.840.10008.1.2.4.80': default_media_type,
'1.2.840.10008.1.2.4.81': default_media_type,
'1.2.840.10008.1.2.4.90': default_media_type,
'1.2.840.10008.1.2.4.91': default_media_type,
'1.2.840.10008.1.2.4.92': default_media_type,
'1.2.840.10008.1.2.4.93': default_media_type,
'1.2.840.10008.1.2.4.100': default_media_type,
'1.2.840.10008.1.2.4.101': default_media_type,
'1.2.840.10008.1.2.4.102': default_media_type,
'1.2.840.10008.1.2.4.103': default_media_type,
'1.2.840.10008.1.2.4.104': default_media_type,
'1.2.840.10008.1.2.4.105': default_media_type,
'1.2.840.10008.1.2.4.106': default_media_type,
}
if media_types is None:
media_types = (default_media_type, )
headers = {
'Accept': self._build_multipart_accept_header_field_value(
media_types, supported_media_types
),
}
response = self._http_get(
url,
params=params,
headers=headers,
stream=stream
)
# The response of the Retrieve Instance transaction is supposed to
# contain a message body with Content-Type "multipart/related", even
# if it only contains a single part. However, some origin servers
# violate the standard and send the part non-encapsulated.
# Unfortunately, an error was introduced into the standard via
# Supplement 183 as part of re-documentation efforts, which stated that
# this behavior was allowed. We will support this behavior at least
# until the standard is fixed via a Correction Proposal 2040.
if response.headers['Content-Type'].startswith('application/dicom'):
warning_message = (
'message sent by origin server in response to GET request '
'of Retrieve Instance transaction was not compliant with the '
'DICOM standard, message body shall have Content-Type '
'\'multipart/related; type="application/dicom"\' rather than '
'"application/dicom"'
)
warn(warning_message, category=UserWarning)
part = pydicom.dcmread(BytesIO(response.content))
return iter([part])
return (
pydicom.dcmread(BytesIO(part))
for part in self._decode_multipart_message(response, stream=stream)
)
def _http_get_multipart_application_octet_stream(
self,
url: str,
media_types: Optional[Tuple[Union[str, Tuple[str, str]]]] = None,
byte_range: Optional[Tuple[int, int]] = None,
params: Optional[Dict[str, Any]] = None,
stream: bool = False
) -> Iterator[bytes]:
"""Performs a HTTP GET request that accepts a multipart message with
"applicaton/octet-stream" media type.
Parameters
----------
url: str
unique resource locator
media_types: Tuple[Union[str, Tuple[str, str]]], optional
acceptable media types and optionally the UIDs of the
corresponding transfer syntaxes (defaults to
``("application/octet-stream", "1.2.840.10008.1.2.1")``)
byte_range: Tuple[int, int], optional
start and end of byte range
params: Dict[str, Any], optional
additional HTTP GET query parameters
stream: bool, optional
whether data should be streamed (i.e., requested using chunked
transfer encoding)
Returns
-------
Iterator[bytes]
content of HTTP message body parts
"""
default_media_type = 'application/octet-stream'
supported_media_types = {
'1.2.840.10008.1.2.1': default_media_type,
}
if media_types is None:
media_types = (default_media_type, )
headers = {
'Accept': self._build_multipart_accept_header_field_value(
media_types,
supported_media_types
),
}
if byte_range is not None:
headers['Range'] = self._build_range_header_field_value(byte_range)
response = self._http_get(
url,
params=params,
headers=headers,
stream=stream
)
return self._decode_multipart_message(response, stream=stream)
def _http_get_multipart_image(
self,
url: str,
media_types: Optional[Tuple[Union[str, Tuple[str, str]]]] = None,
byte_range: Optional[Tuple[int, int]] = None,
params: Optional[Dict[str, Any]] = None,
rendered: bool = False,
stream: bool = False
) -> Iterator[bytes]:
"""Performs a HTTP GET request that accepts a multipart message with
an image media type.
Parameters
----------
url: str
unique resource locator
media_types: Tuple[Union[str, Tuple[str, str]]], optional
acceptable media types and optionally the UIDs of the
corresponding transfer syntaxes
byte_range: Tuple[int, int], optional
start and end of byte range
params: Dict[str, Any], optional
additional HTTP GET query parameters
rendered: bool, optional
whether resource should be requested using rendered media types
stream: bool, optional
whether data should be streamed (i.e., requested using chunked
transfer encoding)
Returns
-------
Iterator[bytes]
content of HTTP message body parts
"""
headers = {}
supported_media_types: Union[set, dict]
if rendered:
supported_media_types = {
'image/jpeg',
'image/gif',
'image/png',
'image/jp2',
}
else:
supported_media_types = {
'1.2.840.10008.1.2.5': 'image/x-dicom-rle',
'1.2.840.10008.1.2.4.50': 'image/jpeg',
'1.2.840.10008.1.2.4.51': 'image/jpeg',
'1.2.840.10008.1.2.4.57': 'image/jpeg',
'1.2.840.10008.1.2.4.70': 'image/jpeg',
'1.2.840.10008.1.2.4.80': 'image/x-jls',
'1.2.840.10008.1.2.4.81': 'image/x-jls',
'1.2.840.10008.1.2.4.90': 'image/jp2',
'1.2.840.10008.1.2.4.91': 'image/jp2',
'1.2.840.10008.1.2.4.92': 'image/jpx',
'1.2.840.10008.1.2.4.93': 'image/jpx',
}
if byte_range is not None:
headers['Range'] = self._build_range_header_field_value(
byte_range
)
headers['Accept'] = self._build_multipart_accept_header_field_value(
media_types,
supported_media_types
)
response = self._http_get(
url,
params=params,
headers=headers,
stream=stream
)
return self._decode_multipart_message(response, stream=stream)
def _http_get_multipart_video(
self,
url: str,
media_types: Optional[Tuple[Union[str, Tuple[str, str]]]] = None,
byte_range: Optional[Tuple[int, int]] = None,
params: Optional[Dict[str, Any]] = None,
rendered: bool = False,
stream: bool = False
) -> Iterator[bytes]:
"""Performs a HTTP GET request that accepts a multipart message with
a video media type.
Parameters
----------
url: str
unique resource locator
media_types: Tuple[Union[str, Tuple[str, str]]]
acceptable media types and optionally the UIDs of the
corresponding transfer syntaxes
byte_range: Tuple[int, int], optional
start and end of byte range
params: Dict[str, Any], optional
additional HTTP GET query parameters
rendered: bool, optional
whether resource should be requested using rendered media types
stream: bool, optional
whether data should be streamed (i.e., requested using chunked
transfer encoding)
Returns
-------
Iterator[bytes]
content of HTTP message body parts
"""
headers = {}
supported_media_types: Union[set, dict]
if rendered:
supported_media_types = {
'video/',
'video/*',
'video/mpeg2',
'video/mp4',
'video/H265',
}
else:
supported_media_types = {
'1.2.840.10008.1.2.4.100': 'video/mpeg2',
'1.2.840.10008.1.2.4.101': 'video/mpeg2',
'1.2.840.10008.1.2.4.102': 'video/mp4',
'1.2.840.10008.1.2.4.103': 'video/mp4',
'1.2.840.10008.1.2.4.104': 'video/mp4',
'1.2.840.10008.1.2.4.105': 'video/mp4',
'1.2.840.10008.1.2.4.106': 'video/mp4',
}
if byte_range is not None:
headers['Range'] = self._build_range_header_field_value(
byte_range
)
headers['Accept'] = self._build_multipart_accept_header_field_value(
media_types,
supported_media_types
)
response = self._http_get(
url,
params=params,
headers=headers,
stream=stream
)
return self._decode_multipart_message(response, stream=stream)
def _http_get_application_pdf(
self,
url: str,
params: Optional[Dict[str, Any]] = None,
stream: bool = False
) -> bytes:
"""Performs a HTTP GET request that accepts a message with
"applicaton/pdf" media type.
Parameters
----------
url: str
unique resource locator
params: Dict[str], optional
additional HTTP GET query parameters
rendered: bool, optional
whether resource should be requested using rendered media types
stream: bool, optional
whether data should be streamed (i.e., requested using chunked
transfer encoding)
Returns
-------
bytes
content of HTTP message body
"""
response = self._http_get(
url,
params=params,
headers={'Accept': 'application/pdf'},
stream=stream
)
return response.content
def _http_get_image(
self,
url: str,
media_types: Optional[Tuple[Union[str, Tuple[str, str]]]] = None,
params: Optional[Dict[str, Any]] = None,
stream: bool = False
) -> bytes:
"""Performs a HTTP GET request that accepts a message with an image
media type.
Parameters
----------
url: str
unique resource locator
media_types: Tuple[Union[str, Tuple[str, str]]], optional
image media type (choices: ``"image/jpeg"``, ``"image/gif"``,
``"image/jp2"``, ``"image/png"``)
params: Dict[str, Any], optional
additional HTTP GET query parameters
stream: bool, optional
whether data should be streamed (i.e., requested using chunked
transfer encoding)
Returns
-------
bytes
content of HTTP message body
"""
supported_media_types = {
'image/',
'image/*',
'image/jpeg',
'image/jp2',
'image/gif',
'image/png',
}
accept_header_field_value = self._build_accept_header_field_value(
media_types,
supported_media_types
)
response = self._http_get(
url,
params=params,
headers={'Accept': accept_header_field_value},
stream=stream
)
return response.content
def _http_get_video(
self,
url: str,
media_types: Optional[Tuple[Union[str, Tuple[str, str]]]] = None,
params: Optional[Dict[str, Any]] = None,
stream: bool = False
) -> bytes:
"""Performs a HTTP GET request that accepts a message with an video
media type.
Parameters
----------
url: str
unique resource locator
media_types: Tuple[Union[str, Tuple[str, str]]], optional
video media type (choices: ``"video/mpeg"``, ``"video/mp4"``,
``"video/H265"``)
params: Dict[str, Any], optional
additional HTTP GET query parameters
stream: bool, optional
whether data should be streamed (i.e., requested using chunked
transfer encoding)
Returns
-------
bytes
content of HTTP message body
"""
supported_media_types = {
'video/',
'video/*',
'video/mpeg',
'video/mp4',
'video/H265',
}
accept_header_field_value = self._build_accept_header_field_value(
media_types,
supported_media_types
)
response = self._http_get(
url,
params=params,
headers={'Accept': accept_header_field_value},
stream=stream
)
return response.content
def _http_get_text(
self,
url: str,
media_types: Optional[Tuple[Union[str, Tuple[str, str]]]] = None,
params: Optional[Dict[str, Any]] = None,
stream: bool = False
) -> bytes:
"""Performs a HTTP GET request that accepts a message with an text
media type.
Parameters
----------
url: str
unique resource locator
media_types: Tuple[Union[str, Tuple[str, str]]], optional
text media type (choices: ``"text/html"``, ``"text/plain"``,
``"text/xml"``, ``"text/rtf"``)
params: Dict[str, Any], optional
additional HTTP GET query parameters
stream: bool, optional
whether data should be streamed (i.e., requested using chunked
transfer encoding)
Returns
-------
bytes
content of HTTP message body
"""
supported_media_types = {
'text/',
'text/*',
'text/html',
'text/plain',
'text/rtf',
'text/xml',
}
accept_header_field_value = self._build_accept_header_field_value(
media_types, supported_media_types
)
response = self._http_get(
url,
params=params,
headers={'Accept': accept_header_field_value},
stream=stream
)
return response.content
def _http_post(
self,
url: str,
data: bytes,
headers: Dict[str, str]
) -> requests.models.Response:
"""Performs a HTTP POST request.
Parameters
----------
url: str
unique resource locator
data: bytes
HTTP request message payload
headers: Dict[str, str]
HTTP request message headers
Returns
-------
requests.models.Response
HTTP response message
"""
def serve_data_chunks(data):
for i, offset in enumerate(range(0, len(data), self._chunk_size)):
logger.debug(f'serve data chunk #{i}')
end = offset + self._chunk_size
yield data[offset:end]
@retrying.retry(
retry_on_result=self._is_retriable_http_error,
wait_exponential_multiplier=self._wait_exponential_multiplier,
stop_max_attempt_number=self._max_attempts
)
def _invoke_post_request(
url: str,
data: bytes,
headers: Optional[Dict[str, str]] = None
) -> requests.models.Response:
logger.debug(f'POST: {url} {headers}')
return self._session.post(url, data=data, headers=headers)
if len(data) > self._chunk_size:
logger.info('store data in chunks using chunked transfer encoding')
chunked_headers = dict(headers)
chunked_headers['Transfer-Encoding'] = 'chunked'
chunked_headers['Cache-Control'] = 'no-cache'
chunked_headers['Connection'] = 'Keep-Alive'
data_chunks = serve_data_chunks(data)
response = _invoke_post_request(url, data_chunks, chunked_headers)
else:
# There is a bug in the requests library that sets the Host header
# again when using chunked transer encoding. Apparently this is
# tricky to fix (see https://github.com/psf/requests/issues/4392).
# As a temporary workaround we are only setting the header field,
# if we don't use chunked transfer encoding.
headers['Host'] = self.host
response = _invoke_post_request(url, data, headers)
logger.debug(f'request status code: {response.status_code}')
response.raise_for_status()
if not response.ok:
logger.warning('storage was not successful for all instances')
payload = response.content
content_type = response.headers['Content-Type']
if content_type in ('application/dicom+json', 'application/json', ):
dataset = load_json_dataset(payload)
elif content_type in ('application/dicom+xml', 'application/xml', ):
tree = fromstring(payload)
dataset = _load_xml_dataset(tree)
else:
raise ValueError('Response message has unexpected media type.')
failed_sop_sequence = getattr(dataset, 'FailedSOPSequence', [])
for failed_sop_item in failed_sop_sequence:
logger.error(
'storage of instance {} failed: "{}"'.format(
failed_sop_item.ReferencedSOPInstanceUID,
failed_sop_item.FailureReason
)
)
return response
def _http_post_multipart_application_dicom(
self,
url: str,
data: Sequence[bytes]
) -> pydicom.Dataset:
"""Performs a HTTP POST request with a multipart payload with
"application/dicom" media type.
Parameters
----------
url: str
unique resource locator
data: Sequence[bytes]
DICOM data sets that should be posted
Returns
-------
pydicom.dataset.Dataset
information about stored instances
"""
content_type = (
'multipart/related; '
'type="application/dicom"; '
'boundary="0f3cf5c0-70e0-41ef-baef-c6f9f65ec3e1"'
)
content = self._encode_multipart_message(data, content_type)
response = self._http_post(
url,
content,
headers={'Content-Type': content_type}
)
if response.content:
content_type = response.headers['Content-Type']
if content_type in ('application/dicom+json', 'application/json', ):
return pydicom.Dataset.from_json(response.json())
elif content_type in ('application/dicom+xml', 'application/xml', ):
tree = fromstring(response.content)
return _load_xml_dataset(tree)
return pydicom.Dataset()
def _http_delete(self, url: str):
"""Performs a HTTP DELETE request to the specified URL.
Parameters
----------
url: str
unique resource locator
Returns
-------
requests.models.Response
HTTP response message
"""
@retrying.retry(
retry_on_result=self._is_retriable_http_error,
wait_exponential_multiplier=self._wait_exponential_multiplier,
stop_max_attempt_number=self._max_attempts
)
def _invoke_delete_request(url: str) -> requests.models.Response:
return self._session.delete(url)
response = _invoke_delete_request(url)
if response.status_code == HTTPStatus.METHOD_NOT_ALLOWED:
logger.error(
'Resource could not be deleted. The origin server may not support'
'deletion or you may not have the necessary permissions.')
response.raise_for_status()
return response
def search_for_studies(
self,
fuzzymatching: Optional[bool] = None,
limit: Optional[int] = None,
offset: Optional[int] = None,
fields: Optional[Sequence[str]] = None,
search_filters: Optional[Dict[str, Any]] = None
) -> List[Dict[str, dict]]:
"""Searches for DICOM studies.
Parameters
----------
fuzzymatching: bool, optional
whether fuzzy semantic matching should be performed
limit: int, optional
maximum number of results that should be returned
offset: int, optional
number of results that should be skipped
fields: Sequence[str], optional
names of fields (attributes) that should be included in results
search_filters: dict, optional
search filter criteria as key-value pairs, where *key* is a keyword
or a tag of the attribute and *value* is the expected value that
should match
Returns
-------
List[Dict[str, dict]]
study representations
(see `Study Result Attributes <http://dicom.nema.org/medical/dicom/current/output/chtml/part18/sect_6.7.html#table_6.7.1-2>`_)
Note
----
The server may only return a subset of search results. In this case,
a warning will notify the client that there are remaining results.
Remaining results can be requested via repeated calls using the
`offset` parameter.
""" # noqa
logger.info('search for studies')
url = self._get_studies_url('qido')
params = self._parse_qido_query_parameters(
fuzzymatching, limit, offset, fields, search_filters
)
return self._http_get_application_json(url, params)
@classmethod
def _parse_media_type(cls, media_type: str) -> Tuple[str, str]:
"""Parses media type and extracts its type and subtype.
Parameters
----------
media_type: str
media type, e.g., ``"image/jpeg"``
Returns
-------
Tuple[str, str]
type and subtype of media type (``("image", "jpeg")``)
Raises
------
ValueError
when `media_type` is invalid
"""
cls._assert_media_type_is_valid(media_type)
media_type_type, media_type_subtype = media_type.split('/')
return media_type_type, media_type_subtype
@classmethod
def _get_common_media_type(
cls,
media_types: Tuple[Union[str, Tuple[str, str]]]
) -> str:
"""Gets common type of acceptable media types and asserts that only
one type of a given category of DICOM data (``"application/dicom"``),
compressed bulkdata (``"image/"``, ``"video/"``) or uncompressed
bulkdata (``"application/octet-stream"``).
For example, ``("image/jpeg", "image/jp2")`` or
``("application/dicom", "application/dicom")`` will pass and
return ``"image/"`` or ``"application/dicom"``, respectively. However,
``("image/jpeg", "video/mpeg2")`` or
``("application/dicom", "application/octet-stream")``
will raise an exception.
Parameters
----------
media_types: Tuple[Union[str, Tuple[str, str]]], optional
acceptable media types and optionally the UIDs of the
corresponding transfer syntaxes
Returns
-------
str
common type of media type category
Raises
------
ValueError
when no media types are provided or more than one type is provided
"""
if media_types is None:
raise ValueError('No acceptable media types provided.')
common_media_types = []
for item in media_types:
if isinstance(item, str):
media_type = item
else:
media_type = item[0]
if media_type.startswith('application'):
common_media_types.append(media_type)
else:
mtype, msubtype = cls._parse_media_type(media_type)
common_media_types.append(f'{mtype}/')
if len(set(common_media_types)) == 0:
raise ValueError(
'No common acceptable media type could be identified.'
)
elif len(set(common_media_types)) > 1:
raise ValueError('Acceptable media types must have the same type.')
return common_media_types[0]
def _get_bulkdata(
self,
url: str,
media_types: Optional[Tuple[Union[str, Tuple[str, str]]]] = None,
byte_range: Optional[Tuple[int, int]] = None,
stream: bool = False,
) -> Iterator[bytes]:
"""Gets bulk data items from a given location.
Parameters
----------
url: str
location of the bulk data
media_types: Tuple[Union[str, Tuple[str, str]]], optional
acceptable media types and optionally the UIDs of the
corresponding transfer syntaxes
byte_range: Tuple[int], optional
start and end of byte range
stream: bool, optional
whether data should be streamed (i.e., requested using chunked
transfer encoding)
Returns
-------
Iterator[bytes]
bulk data items
"""
if media_types is None:
return self._http_get_multipart_application_octet_stream(
url, media_types, byte_range=byte_range, stream=stream
)
common_media_type = self._get_common_media_type(media_types)
if common_media_type == 'application/octet-stream':
return self._http_get_multipart_application_octet_stream(
url, media_types, byte_range=byte_range, stream=stream
)
elif common_media_type.startswith('image'):
return self._http_get_multipart_image(
url, media_types, byte_range=byte_range, stream=stream
)
elif common_media_type.startswith('video'):
return self._http_get_multipart_video(
url, media_types, byte_range=byte_range, stream=stream
)
else:
raise ValueError(
f'Media type "{common_media_type}" is not supported for '
'retrieval of bulkdata.'
)
def retrieve_bulkdata(
self,
url: str,
media_types: Optional[Tuple[Union[str, Tuple[str, str]]]] = None,
byte_range: Optional[Tuple[int, int]] = None
) -> List[bytes]:
"""Retrieves bulk data from a given location.
Parameters
----------
url: str
location of the bulk data
media_types: Tuple[Union[str, Tuple[str, str]]], optional
acceptable media types and optionally the UIDs of the
corresponding transfer syntaxes
byte_range: Tuple[int], optional
start and end of byte range
Returns
-------
Iterator[bytes]
bulk data items
"""
return list(
self._get_bulkdata(
url=url,
media_types=media_types,
byte_range=byte_range,
stream=False
)
)
def iter_bulkdata(
self,
url: str,
media_types: Optional[Tuple[Union[str, Tuple[str, str]]]] = None,
byte_range: Optional[Tuple[int, int]] = None
) -> Iterator[bytes]:
"""Iterates over bulk data items from a given location.
Parameters
----------
url: str
location of the bulk data
media_types: Tuple[Union[str, Tuple[str, str]]], optional
acceptable media types and optionally the UIDs of the
corresponding transfer syntaxes
byte_range: Tuple[int], optional
start and end of byte range
Returns
-------
Iterator[bytes]
bulk data items
Note
----
Data is streamed from the DICOMweb server.
"""
return self._get_bulkdata(
url=url,
media_types=media_types,
byte_range=byte_range,
stream=True
)
def _get_study(
self,
study_instance_uid: str,
media_types: Optional[Tuple[Union[str, Tuple[str, str]]]] = None,
stream: bool = False
) -> Iterator[pydicom.dataset.Dataset]:
"""Gets instances of a given DICOM study.
Parameters
----------
study_instance_uid: str
unique study identifier
media_types: Tuple[Union[str, Tuple[str, str]]], optional
acceptable media types and optionally the UIDs of the
corresponding transfer syntaxescceptable transfer syntax UIDs
stream: bool, optional
whether data should be streamed (i.e., requested using chunked
transfer encoding)
Returns
-------
Iterator[pydicom.dataset.Dataset]
data sets
"""
if study_instance_uid is None:
raise ValueError(
'Study Instance UID is required for retrieval of study.'
)
url = self._get_studies_url('wado', study_instance_uid)
if media_types is None:
return self._http_get_multipart_application_dicom(
url,
stream=stream
)
common_media_type = self._get_common_media_type(media_types)
if common_media_type != 'application/dicom':
raise ValueError(
f'Media type "{common_media_type}" is not supported for '
'retrieval of a study. It must be "application/dicom".'
)
return self._http_get_multipart_application_dicom(
url,
media_types=media_types,
stream=stream
)
def retrieve_study(
self,
study_instance_uid: str,
media_types: Optional[Tuple[Union[str, Tuple[str, str]]]] = None,
) -> List[pydicom.dataset.Dataset]:
"""Retrieves instances of a given DICOM study.
Parameters
----------
study_instance_uid: str
unique study identifier
media_types: Tuple[Union[str, Tuple[str, str]]], optional
acceptable media types and optionally the UIDs of the
acceptable transfer syntaxes
Returns
-------
List[pydicom.dataset.Dataset]
data sets
Note
----
Instances are by default retrieved using Implicit VR Little Endian
transfer syntax (Transfer Syntax UID ``"1.2.840.10008.1.2"``). This
means that Pixel Data of Image instances will be retrieved
uncompressed. To retrieve instances in any available transfer syntax
(typically the one in which instances were originally stored), specify
acceptable transfer syntaxes using the wildcard
``("application/dicom", "*")``.
"""
return list(
self._get_study(
study_instance_uid=study_instance_uid,
media_types=media_types,
stream=False
)
)
def iter_study(
self,
study_instance_uid: str,
media_types: Optional[Tuple[Union[str, Tuple[str, str]]]] = None,
) -> Iterator[pydicom.dataset.Dataset]:
"""Iterates over instances of a given DICOM study.
Parameters
----------
study_instance_uid: str
unique study identifier
media_types: Tuple[Union[str, Tuple[str, str]]], optional
acceptable media types and optionally the UIDs of the
acceptable transfer syntaxes
Returns
-------
Iterator[pydicom.dataset.Dataset]
data sets
Note
----
Instances are by default retrieved using Implicit VR Little Endian
transfer syntax (Transfer Syntax UID ``"1.2.840.10008.1.2"``). This
means that Pixel Data of Image instances will be retrieved
uncompressed. To retrieve instances in any available transfer syntax
(typically the one in which instances were originally stored), specify
acceptable transfer syntaxes using the wildcard
``("application/dicom", "*")``.
Note
----
Data is streamed from the DICOMweb server.
"""
return self._get_study(
study_instance_uid=study_instance_uid,
media_types=media_types,
stream=True
)
def retrieve_study_metadata(
self,
study_instance_uid: str
) -> List[Dict[str, dict]]:
"""Retrieves metadata of instances of a given DICOM study.
Parameters
----------
study_instance_uid: str
unique study identifier
Returns
-------
List[Dict[str, dict]]
metadata in DICOM JSON format
"""
if study_instance_uid is None:
raise ValueError(
'Study Instance UID is required for retrieval of '
'study metadata.'
)
url = self._get_studies_url('wado', study_instance_uid)
url += '/metadata'
return self._http_get_application_json(url)
def delete_study(self, study_instance_uid: str) -> None:
"""Deletes specified study and its respective instances.
Parameters
----------
study_instance_uid: str
unique study identifier
Returns
-------
requests.models.Response
HTTP response object returned.
Note
----
The Delete Study resource is not part of the DICOM standard
and may not be supported by all origin servers.
Warning
-------
This method performs a DELETE and should be used with caution.
"""
if study_instance_uid is None:
raise ValueError(
'Study Instance UID is required for deletion of a study.'
)
url = self._get_studies_url('delete', study_instance_uid)
return self._http_delete(url)
def _assert_uid_format(self, uid: str) -> None:
"""Checks whether a DICOM UID has the correct format.
Parameters
----------
uid: str
DICOM UID
Raises
------
TypeError
when `uid` is not a string
ValueError
when `uid` doesn't match the regular expression pattern
``"^[.0-9]+$"``
"""
if not isinstance(uid, str):
raise TypeError('DICOM UID must be a string.')
pattern = re.compile('^[.0-9]+$')
if not pattern.search(uid):
raise ValueError('DICOM UID has invalid format.')
def search_for_series(
self,
study_instance_uid: Optional[str] = None,
fuzzymatching: Optional[bool] = None,
limit: Optional[int] = None,
offset: Optional[int] = None,
fields: Optional[Sequence[str]] = None,
search_filters: Optional[Dict[str, Any]] = None
) -> List[Dict[str, dict]]:
"""Searches for DICOM series.
Parameters
----------
study_instance_uid: str, optional
unique study identifier
fuzzymatching: bool, optional
whether fuzzy semantic matching should be performed
limit: int, optional
maximum number of results that should be returned
offset: int, optional
number of results that should be skipped
fields: Union[list, tuple, set], optional
names of fields (attributes) that should be included in results
search_filters: Dict[str, Union[str, int, float]], optional
search filter criteria as key-value pairs, where *key* is a keyword
or a tag of the attribute and *value* is the expected value that
should match
Returns
-------
List[Dict[str, dict]]
series representations
(see `Series Result Attributes <http://dicom.nema.org/medical/dicom/current/output/chtml/part18/sect_6.7.html#table_6.7.1-2a>`_)
Note
----
The server may only return a subset of search results. In this case,
a warning will notify the client that there are remaining results.
Remaining results can be requested via repeated calls using the
`offset` parameter.
""" # noqa
if study_instance_uid is not None:
self._assert_uid_format(study_instance_uid)
logger.info(f'search for series of study "{study_instance_uid}"')
url = self._get_series_url('qido', study_instance_uid)
params = self._parse_qido_query_parameters(
fuzzymatching, limit, offset, fields, search_filters
)
return self._http_get_application_json(url, params)
def _get_series(
self,
study_instance_uid: str,
series_instance_uid: str,
media_types: Optional[Tuple[Union[str, Tuple[str, str]]]] = None,
stream: bool = False
) -> Iterator[pydicom.dataset.Dataset]:
"""Gets instances of a given DICOM series.
Parameters
----------
study_instance_uid: str
unique study identifier
series_instance_uid: str
unique series identifier
media_types: Tuple[Union[str, Tuple[str, str]]], optional
acceptable media types and optionally the UIDs of the
corresponding transfer syntaxes
stream: bool, optional
whether data should be streamed (i.e., requested using chunked
transfer encoding)
Returns
-------
Iterator[pydicom.dataset.Dataset]
data sets
"""
if study_instance_uid is None:
raise ValueError(
'Study Instance UID is required for retrieval of series.'
)
logger.info(
f'retrieve series "{series_instance_uid}" '
f'of study "{study_instance_uid}"'
)
self._assert_uid_format(study_instance_uid)
if series_instance_uid is None:
raise ValueError(
'Series Instance UID is required for retrieval of series.'
)
self._assert_uid_format(series_instance_uid)
url = self._get_series_url(
'wado', study_instance_uid, series_instance_uid
)
if media_types is None:
return self._http_get_multipart_application_dicom(
url,
stream=stream
)
common_media_type = self._get_common_media_type(media_types)
if common_media_type != 'application/dicom':
raise ValueError(
f'Media type "{common_media_type}" is not supported for '
'retrieval of a series. It must be "application/dicom".'
)
return self._http_get_multipart_application_dicom(
url,
media_types=media_types,
stream=stream
)
def retrieve_series(
self,
study_instance_uid: str,
series_instance_uid: str,
media_types: Optional[Tuple[Union[str, Tuple[str, str]]]] = None
) -> List[pydicom.dataset.Dataset]:
"""Retrieves instances of a given DICOM series.
Parameters
----------
study_instance_uid: str
unique study identifier
series_instance_uid: str
unique series identifier
media_types: Tuple[Union[str, Tuple[str, str]]], optional
acceptable media types and optionally the UIDs of the
acceptable transfer syntaxes
Returns
-------
List[pydicom.dataset.Dataset]
data sets
Note
----
Instances are by default retrieved using Implicit VR Little Endian
transfer syntax (Transfer Syntax UID ``"1.2.840.10008.1.2"``). This
means that Pixel Data of Image instances will be retrieved
uncompressed. To retrieve instances in any available transfer syntax
(typically the one in which instances were originally stored), specify
acceptable transfer syntaxes using the wildcard
``("application/dicom", "*")``.
"""
return list(
self._get_series(
study_instance_uid=study_instance_uid,
series_instance_uid=series_instance_uid,
media_types=media_types,
stream=False
)
)
def iter_series(
self,
study_instance_uid: str,
series_instance_uid: str,
media_types: Optional[Tuple[Union[str, Tuple[str, str]]]] = None
) -> Iterator[pydicom.dataset.Dataset]:
"""Iterates over retrieved instances of a given DICOM series.
Parameters
----------
study_instance_uid: str
unique study identifier
series_instance_uid: str
unique series identifier
media_types: Tuple[Union[str, Tuple[str, str]]], optional
acceptable media types and optionally the UIDs of the
acceptable transfer syntaxes
Returns
-------
Iterator[pydicom.dataset.Dataset]
data sets
Note
----
Instances are by default retrieved using Implicit VR Little Endian
transfer syntax (Transfer Syntax UID ``"1.2.840.10008.1.2"``). This
means that Pixel Data of Image instances will be retrieved
uncompressed. To retrieve instances in any available transfer syntax
(typically the one in which instances were originally stored), specify
acceptable transfer syntaxes using the wildcard
``("application/dicom", "*")``.
Note
----
Data is streamed from the DICOMweb server.
"""
return self._get_series(
study_instance_uid=study_instance_uid,
series_instance_uid=series_instance_uid,
media_types=media_types,
stream=True
)
def retrieve_series_metadata(
self,
study_instance_uid: str,
series_instance_uid: str,
) -> List[Dict[str, dict]]:
"""Retrieves metadata for instances of a given DICOM series.
Parameters
----------
study_instance_uid: str
unique study identifier
series_instance_uid: str
unique series identifier
Returns
-------
Dict[str, dict]
metadata in DICOM JSON format
"""
if study_instance_uid is None:
raise ValueError(
'Study Instance UID is required for retrieval of '
'series metadata.'
)
self._assert_uid_format(study_instance_uid)
if series_instance_uid is None:
raise ValueError(
'Series Instance UID is required for retrieval of '
'series metadata.'
)
logger.info(
f'retrieve metadata of series "{series_instance_uid}" '
f'of study "{study_instance_uid}"'
)
self._assert_uid_format(series_instance_uid)
url = self._get_series_url(
'wado', study_instance_uid, series_instance_uid
)
url += '/metadata'
return self._http_get_application_json(url)
def retrieve_series_rendered(
self, study_instance_uid,
series_instance_uid,
media_types: Optional[Tuple[Union[str, Tuple[str, str]]]] = None,
params: Optional[Dict[str, Any]] = None
) -> bytes:
"""Retrieves an individual, server-side rendered DICOM series.
Parameters
----------
study_instance_uid: str
unique study identifier
series_instance_uid: str
unique series identifier
media_types: Tuple[Union[str, Tuple[str, str]]], optional
acceptable media types (choices: ``"image/jpeg"``, ``"image/jp2"``,
``"image/gif"``, ``"image/png"``, ``"video/gif"``, ``"video/mp4"``,
``"video/h265"``, ``"text/html"``, ``"text/plain"``,
``"text/xml"``, ``"text/rtf"``, ``"application/pdf"``)
params: Dict[str, Any], optional
additional parameters relevant for given `media_type`,
e.g., ``{"quality": 95}`` for ``"image/jpeg"``
Returns
-------
bytes
rendered series
"""
if study_instance_uid is None:
raise ValueError(
'Study Instance UID is required for retrieval of '
'rendered series.'
)
if series_instance_uid is None:
raise ValueError(
'Series Instance UID is required for retrieval of '
'rendered series.'
)
logger.info(
f'retrieve rendered series "{series_instance_uid}" '
f'of study "{study_instance_uid}"'
)
url = self._get_series_url(
'wado', study_instance_uid, series_instance_uid
)
url += '/rendered'
if media_types is None:
response = self._http_get(url, params)
return response.content
common_media_type = self._get_common_media_type(media_types)
if common_media_type.startswith('image'):
return self._http_get_image(url, media_types, params)
elif common_media_type.startswith('video'):
return self._http_get_video(url, media_types, params)
elif common_media_type.startswith('text'):
return self._http_get_text(url, media_types, params)
elif common_media_type == 'application/pdf':
return self._http_get_application_pdf(url, params)
else:
raise ValueError(
f'Media type "{common_media_type}" is not supported for '
'retrieval of rendered series.'
)
def delete_series(
self,
study_instance_uid: str,
series_instance_uid: str
) -> None:
"""Deletes specified series and its respective instances.
Parameters
----------
study_instance_uid: str
unique study identifier
series_instance_uid: str
unique series identifier
Note
----
The Delete Series resource is not part of the DICOM standard
and may not be supported by all origin servers.
Returns
-------
requests.models.Response
HTTP response object returned.
Warning
-------
This method performs a DELETE and should be used with caution.
"""
if study_instance_uid is None:
raise ValueError(
'Study Instance UID is required for deletion of a series.'
)
if series_instance_uid is None:
raise ValueError(
'Series Instance UID is required for deletion of a series.'
)
logger.info(
f'delete series "{series_instance_uid}" '
f'of study "{study_instance_uid}"'
)
url = self._get_series_url('delete', study_instance_uid,
series_instance_uid)
return self._http_delete(url)
def search_for_instances(
self,
study_instance_uid: Optional[str] = None,
series_instance_uid: Optional[str] = None,
fuzzymatching: Optional[bool] = None,
limit: Optional[int] = None,
offset: Optional[int] = None,
fields: Optional[Sequence[str]] = None,
search_filters: Optional[Dict[str, Any]] = None
) -> List[Dict[str, dict]]:
"""Searches for DICOM instances.
Parameters
----------
study_instance_uid: str, optional
unique study identifier
series_instance_uid: str, optional
unique series identifier
fuzzymatching: bool, optional
whether fuzzy semantic matching should be performed
limit: int, optional
maximum number of results that should be returned
offset: int, optional
number of results that should be skipped
fields: Union[list, tuple, set], optional
names of fields (attributes) that should be included in results
search_filters: Dict[str, Union[str, int, float]], optional
search filter criteria as key-value pairs, where *key* is a keyword
or a tag of the attribute and *value* is the expected value that
should match
Returns
-------
List[Dict[str, dict]]
instance representations
(see `Instance Result Attributes <http://dicom.nema.org/medical/dicom/current/output/chtml/part18/sect_6.7.html#table_6.7.1-2b>`_)
Note
----
The server may only return a subset of search results. In this case,
a warning will notify the client that there are remaining results.
Remaining results can be requested via repeated calls using the
`offset` parameter.
""" # noqa
message = 'search for instances'
if series_instance_uid is not None:
message += f' of series "{series_instance_uid}"'
if study_instance_uid is not None:
self._assert_uid_format(study_instance_uid)
message += f' of study "{study_instance_uid}"'
logger.info(message)
url = self._get_instances_url(
'qido', study_instance_uid, series_instance_uid
)
params = self._parse_qido_query_parameters(
fuzzymatching, limit, offset, fields, search_filters
)
return self._http_get_application_json(url, params)
def retrieve_instance(
self,
study_instance_uid: str,
series_instance_uid: str,
sop_instance_uid: str,
media_types: Optional[Tuple[Union[str, Tuple[str, str]]]] = None,
) -> pydicom.dataset.Dataset:
"""Retrieves an individual DICOM instance.
Parameters
----------
study_instance_uid: str
unique study identifier
series_instance_uid: str
unique series identifier
sop_instance_uid: str
unique instance identifier
media_types: Tuple[Union[str, Tuple[str, str]]], optional
acceptable media types and optionally the UIDs of the
acceptable transfer syntaxes
Returns
-------
pydicom.dataset.Dataset
data set
Note
----
Instances are by default retrieved using Implicit VR Little Endian
transfer syntax (Transfer Syntax UID ``"1.2.840.10008.1.2"``). This
means that Pixel Data of Image instances will be retrieved
uncompressed. To retrieve instances in any available transfer syntax
(typically the one in which instances were originally stored), specify
acceptable transfer syntaxes using the wildcard
``("application/dicom", "*")``.
"""
if study_instance_uid is None:
raise ValueError(
'Study Instance UID is required for retrieval of instance.'
)
self._assert_uid_format(study_instance_uid)
if series_instance_uid is None:
raise ValueError(
'Series Instance UID is required for retrieval of instance.'
)
self._assert_uid_format(series_instance_uid)
if sop_instance_uid is None:
raise ValueError(
'SOP Instance UID is required for retrieval of instance.'
)
logger.info(
f'retrieve instance "{sop_instance_uid}" '
f'of series "{series_instance_uid}" '
f'of study "{study_instance_uid}"'
)
self._assert_uid_format(sop_instance_uid)
url = self._get_instances_url(
'wado', study_instance_uid, series_instance_uid, sop_instance_uid
)
if media_types is not None:
common_media_type = self._get_common_media_type(media_types)
if common_media_type != 'application/dicom':
raise ValueError(
f'Media type "{common_media_type}" is not supported for '
'retrieval of an instance. It must be "application/dicom".'
)
iterator = self._http_get_multipart_application_dicom(url, media_types)
instances = list(iterator)
if len(instances) > 1:
# This should not occur, but safety first.
raise ValueError('More than one instance returned by origin server')
return instances[0]
def store_instances(
self,
datasets: Sequence[pydicom.dataset.Dataset],
study_instance_uid: Optional[str] = None
) -> pydicom.dataset.Dataset:
"""Stores DICOM instances.
Parameters
----------
datasets: Sequence[pydicom.dataset.Dataset]
instances that should be stored
study_instance_uid: str, optional
unique study identifier
Returns
-------
pydicom.dataset.Dataset
information about status of stored instances
"""
message = 'store instances'
if study_instance_uid is not None:
message += f' of study "{study_instance_uid}"'
logger.info(message)
url = self._get_studies_url('stow', study_instance_uid)
encoded_datasets = list()
for ds in datasets:
with BytesIO() as b:
pydicom.dcmwrite(b, ds)
encoded_ds = b.getvalue()
encoded_datasets.append(encoded_ds)
return self._http_post_multipart_application_dicom(
url,
encoded_datasets
)
def delete_instance(
self,
study_instance_uid: str,
series_instance_uid: str,
sop_instance_uid: str
) -> None:
"""Deletes specified instance.
Parameters
----------
study_instance_uid: str
unique study identifier
series_instance_uid: str
unique series identifier
sop_instance_uid: str
unique instance identifier
Returns
-------
requests.models.Response
HTTP response object returned.
Note
----
The Delete Instance resource is not part of the DICOM standard
and may not be supported by all origin servers.
Warning
-------
This method performs a DELETE and should be used with caution.
"""
if study_instance_uid is None:
raise ValueError(
'Study Instance UID is required for deletion of an instance.'
)
if series_instance_uid is None:
raise ValueError(
'Series Instance UID is required for deletion of an instance.'
)
if sop_instance_uid is None:
raise ValueError(
'SOP Instance UID is required for deletion of an instance.'
)
url = self._get_instances_url('delete', study_instance_uid,
series_instance_uid, sop_instance_uid)
return self._http_delete(url)
def retrieve_instance_metadata(
self,
study_instance_uid: str,
series_instance_uid: str,
sop_instance_uid: str
) -> Dict[str, dict]:
"""Retrieves metadata of an individual DICOM instance.
Parameters
----------
study_instance_uid: str
unique study identifier
series_instance_uid: str
unique series identifier
sop_instance_uid: str
unique instance identifier
Returns
-------
Dict[str, dict]
metadata in DICOM JSON format
"""
if study_instance_uid is None:
raise ValueError(
'Study Instance UID is required for retrieval of '
'instance metadata.'
)
if series_instance_uid is None:
raise ValueError(
'Series Instance UID is required for retrieval of '
'instance metadata.'
)
if sop_instance_uid is None:
raise ValueError(
'SOP Instance UID is required for retrieval of '
'instance metadata.'
)
url = self._get_instances_url(
'wado', study_instance_uid, series_instance_uid, sop_instance_uid
)
url += '/metadata'
return self._http_get_application_json(url)[0]
def retrieve_instance_rendered(
self,
study_instance_uid: str,
series_instance_uid: str,
sop_instance_uid: str,
media_types: Optional[Tuple[Union[str, Tuple[str, str]]]] = None,
params: Optional[Dict[str, Any]] = None
) -> bytes:
"""Retrieves an individual, server-side rendered DICOM instance.
Parameters
----------
study_instance_uid: str
unique study identifier
series_instance_uid: str
unique series identifier
sop_instance_uid: str
unique instance identifier
media_types: Tuple[Union[str, Tuple[str, str]]], optional
acceptable media types (choices: ``"image/jpeg"``, ``"image/jp2"``,
``"image/gif"``, ``"image/png"``, ``"video/gif"``, ``"video/mp4"``,
``"video/h265"``, ``"text/html"``, ``"text/plain"``,
``"text/xml"``, ``"text/rtf"``, ``"application/pdf"``)
params: Dict[str], optional
additional parameters relevant for given `media_type`,
e.g., ``{"quality": 95}`` for ``"image/jpeg"``
Returns
-------
bytes
rendered representation of instance
"""
if study_instance_uid is None:
raise ValueError(
'Study Instance UID is required for retrieval of '
'rendered instance.'
)
if series_instance_uid is None:
raise ValueError(
'Series Instance UID is required for retrieval of '
'rendered instance.'
)
if sop_instance_uid is None:
raise ValueError(
'SOP Instance UID is required for retrieval of '
'rendered instance.'
)
url = self._get_instances_url(
'wado', study_instance_uid, series_instance_uid, sop_instance_uid
)
url += '/rendered'
if media_types is None:
response = self._http_get(url, params)
return response.content
common_media_type = self._get_common_media_type(media_types)
if common_media_type.startswith('image'):
return self._http_get_image(url, media_types, params)
elif common_media_type.startswith('video'):
return self._http_get_video(url, media_types, params)
elif common_media_type.startswith('text'):
return self._http_get_text(url, media_types, params)
elif common_media_type == 'application/pdf':
return self._http_get_application_pdf(url, params)
else:
raise ValueError(
f'Media type "{common_media_type}" is not supported for '
'retrieval of rendered instance.'
)
def _get_instance_frames(
self,
study_instance_uid: str,
series_instance_uid: str,
sop_instance_uid: str,
frame_numbers: Sequence[int],
media_types: Optional[Tuple[Union[str, Tuple[str, str]]]] = None,
stream: bool = False
) -> Iterator[bytes]:
"""Gets frames of a DICOM instance.
Parameters
----------
study_instance_uid: str
unique study identifier
series_instance_uid: str
unique series identifier
sop_instance_uid: str
unique instance identifier
frame_numbers: Sequence[int]
one-based positional indices of the frames within the instance
media_types: Tuple[Union[str, Tuple[str, str]]], optional
acceptable media types and optionally the UIDs of the
corresponding transfer syntaxes
stream: bool, optional
whether data should be streamed (i.e., requested using chunked
transfer encoding)
Returns
-------
Iterator[bytes]
pixel data for each frame
"""
if study_instance_uid is None:
raise ValueError(
'Study Instance UID is required for retrieval of frames.'
)
if series_instance_uid is None:
raise ValueError(
'Series Instance UID is required for retrieval of frames.'
)
if sop_instance_uid is None:
raise ValueError(
'SOP Instance UID is required for retrieval of frames.'
)
url = self._get_instances_url(
'wado', study_instance_uid, series_instance_uid, sop_instance_uid
)
frame_list = ','.join([str(n) for n in frame_numbers])
url += f'/frames/{frame_list}'
if media_types is None:
return self._http_get_multipart_application_octet_stream(
url,
stream=stream
)
common_media_type = self._get_common_media_type(media_types)
if common_media_type == 'application/octet-stream':
return self._http_get_multipart_application_octet_stream(
url,
media_types=media_types,
stream=stream
)
elif common_media_type.startswith('image'):
return self._http_get_multipart_image(
url,
media_types=media_types,
stream=stream
)
elif common_media_type.startswith('video'):
return self._http_get_multipart_video(
url,
media_types=media_types,
stream=stream
)
else:
raise ValueError(
f'Media type "{common_media_type}" is not supported for '
'retrieval of frames.'
)
def retrieve_instance_frames(
self,
study_instance_uid: str,
series_instance_uid: str,
sop_instance_uid: str,
frame_numbers: Sequence[int],
media_types: Optional[Tuple[Union[str, Tuple[str, str]]]] = None
) -> List[bytes]:
"""Retrieves one or more frames of a DICOM instance.
Parameters
----------
study_instance_uid: str
unique study identifier
series_instance_uid: str
unique series identifier
sop_instance_uid: str
unique instance identifier
frame_numbers: Sequence[int]
one-based positional indices of the frames within the instance
media_types: Tuple[Union[str, Tuple[str, str]]], optional
acceptable media types and optionally the UIDs of the
corresponding transfer syntaxes
Returns
-------
List[bytes]
pixel data for each frame
"""
return list(
self._get_instance_frames(
study_instance_uid=study_instance_uid,
series_instance_uid=series_instance_uid,
sop_instance_uid=sop_instance_uid,
frame_numbers=frame_numbers,
media_types=media_types,
stream=False
)
)
def iter_instance_frames(
self,
study_instance_uid: str,
series_instance_uid: str,
sop_instance_uid: str,
frame_numbers: Sequence[int],
media_types: Optional[Tuple[Union[str, Tuple[str, str]]]] = None
) -> Iterator[bytes]:
"""Iterates over frames of a DICOM instance.
Parameters
----------
study_instance_uid: str
unique study identifier
series_instance_uid: str
unique series identifier
sop_instance_uid: str
unique instance identifier
frame_numbers: Sequence[int]
one-based positional indices of the frames within the instance
media_types: Tuple[Union[str, Tuple[str, str]]], optional
acceptable media types and optionally the UIDs of the
corresponding transfer syntaxes
Returns
-------
Iterator[bytes]
pixel data for each frame
Note
----
Data is streamed from the DICOMweb server.
"""
return self._get_instance_frames(
study_instance_uid=study_instance_uid,
series_instance_uid=series_instance_uid,
sop_instance_uid=sop_instance_uid,
frame_numbers=frame_numbers,
media_types=media_types,
stream=True
)
def retrieve_instance_frames_rendered(
self,
study_instance_uid: str,
series_instance_uid: str,
sop_instance_uid: str,
frame_numbers: Sequence[int],
media_types: Optional[Tuple[Union[str, Tuple[str, str]]]] = None,
params: Optional[Dict[str, Any]] = None
) -> bytes:
"""Retrieves one or more server-side rendered frames of a
DICOM instance.
Parameters
----------
study_instance_uid: str
unique study identifier
series_instance_uid: str
unique series identifier
sop_instance_uid: str
unique instance identifier
frame_numbers: Sequence[int]
one-based positional index of the frame within the instance
media_types: Tuple[Union[str, Tuple[str, str]]], optional
acceptable media type (choices: ``"image/jpeg"``, ``"image/jp2"``,
``"image/gif"``, ``"image/png"``)
params: Dict[str], optional
additional parameters relevant for given `media_type`,
e.g., ``{"quality": 95}`` for ``"image/jpeg"`` media type
Returns
-------
bytes
rendered frames
Note
----
Not all media types are compatible with all SOP classes.
"""
if study_instance_uid is None:
raise ValueError(
'Study Instance UID is required for retrieval of '
'rendered frame.'
)
if series_instance_uid is None:
raise ValueError(
'Series Instance UID is required for retrieval of '
'rendered frame.'
)
if sop_instance_uid is None:
raise ValueError(
'SOP Instance UID is required for retrieval of rendered frame.'
)
url = self._get_instances_url(
'wado', study_instance_uid, series_instance_uid, sop_instance_uid
)
url += '/frames/{frame_numbers}/rendered'.format(
frame_numbers=','.join([str(n) for n in frame_numbers])
)
if media_types is None:
# Try and hope for the best...
response = self._http_get(url, params)
return response.content
common_media_type = self._get_common_media_type(media_types)
if common_media_type.startswith('image'):
return self._http_get_image(url, media_types, params)
elif common_media_type.startswith('video'):
return self._http_get_video(url, media_types, params)
else:
raise ValueError(
f'Media type "{common_media_type}" is not supported for '
'retrieval of rendered frame.'
)
@staticmethod
def lookup_keyword(
tag: Union[str, int, Tuple[str, str], pydicom.tag.BaseTag]
) -> str:
"""Looks up the keyword of a DICOM attribute.
Parameters
----------
tag: Union[str, int, Tuple[str, str], pydicom.tag.BaseTag]
attribute tag (e.g. ``"00080018"``)
Returns
-------
str
attribute keyword (e.g. ``"SOPInstanceUID"``)
"""
return pydicom.datadict.keyword_for_tag(tag)
@staticmethod
def lookup_tag(keyword: str) -> str:
"""Looks up the tag of a DICOM attribute.
Parameters
----------
keyword: str
attribute keyword (e.g. ``"SOPInstanceUID"``)
Returns
-------
str
attribute tag as HEX string (e.g. ``"00080018"``)
"""
tag = pydicom.datadict.tag_for_keyword(keyword)
tag = pydicom.tag.Tag(tag)
return '{0:04x}{1:04x}'.format(tag.group, tag.element).upper()
| {
"repo_name": "MGHComputationalPathology/dicomweb-client",
"path": "src/dicomweb_client/api.py",
"copies": "1",
"size": "106071",
"license": "mit",
"hash": 3129006970129366000,
"line_mean": 34.0763888889,
"line_max": 142,
"alpha_frac": 0.5489153492,
"autogenerated": false,
"ratio": 4.515581098339719,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0000807918286800678,
"num_lines": 3024
} |
# /application/project.py
from portfolio.models import Project, needs_db
from portfolio.admin import needs_logged_in
from portfolio.application import application
from flask import abort, jsonify, request
@application.route("/api/projects/count")
@needs_db
def project_count(db_session):
""" Returns the number of projects in the portfolio. """
count = db_session.query(Project).count()
return jsonify(**{
"count": count
})
@application.route("/api/projects/<int:key>")
@needs_db
def project_read(db_session, key):
""" Returns the information about a project from its ID. """
data = db_session.query(
Project
).filter(
Project.key == key
)
if data.count() == 1:
user = data.one()
return jsonify(**user.to_json())
else:
return jsonify(**{"error": "project not found"})
@application.route("/admin/api/projects/new", methods=["POST"])
@needs_db
@needs_logged_in
def project_new(db_session):
""" Creates a new project. """
data = request.get_json()
if data is None:
abort(400)
if "name" not in data:
abort(400)
if "url" not in data:
abort(400)
if "show" not in data:
abort(400)
if "description" not in data:
abort(400)
project = Project(
name=data["name"],
url=data["url"],
show = data["show"],
description=data["description"]
)
try:
db_session.add(project)
db_session.commit()
return ("", 204)
except IntegrityError:
return jsonify(**{"error": "project could not be added"})
@application.route("/admin/api/projects/<int:key>", methods=["POST"])
@needs_db
@needs_logged_in
def project_write(db_session, key):
"""Updates information in existing project"""
data = request.get_json()
if data is None:
abort(400)
project = db_session.query(
Project
).filter(
Project.key == key
)
if project.count() == 1:
project = project.one()
if "name" in data:
project.name = data["name"]
if "url" in data:
project.url = data["url"]
if "show" in data:
project.show = data["show"]
if "description" in data:
project.description = data["description"]
try:
db_session.commit()
return ("", 204)
except IntegrityError:
return jsonify(**{"error": "project could not be updated"})
else:
return jsonify(**{"error": "project not found"})
| {
"repo_name": "TumblrCommunity/PowerPortfolio",
"path": "portfolio/application/project.py",
"copies": "1",
"size": "2605",
"license": "mit",
"hash": 8730913199126216000,
"line_mean": 26.4210526316,
"line_max": 71,
"alpha_frac": 0.5765834933,
"autogenerated": false,
"ratio": 3.9831804281345566,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5059763921434557,
"avg_score": null,
"num_lines": null
} |
# ./application.py
from flask import Flask, jsonify, make_response, request, session, redirect
from apiclient.discovery import build
from apiclient.errors import HttpError
from oauth2client.tools import argparser
import isodate
import string
import random
import requests
import base64
import os
import urllib
import base64
import datetime
import configparser
import youtube_dl
import eyed3
KEYS = {}
YOUTUBE_API_SERVICE_NAME = "youtube"
YOUTUBE_API_VERSION = "v3"
RANDOM_STRING_LENGTH = 16
YOUTUBE_URL = 'https://www.youtube.com/watch?v='
SPOTIFY_BASE_URL = 'https://api.spotify.com'
# SPOTIFY_REDIRECT = 'http://www.youtunes-downloader.com/spotifyCallback'
SPOTIFY_REDIRECT = 'http://localhost:5000/spotifyCallback'
SPOTIFY_STATE_KEY = 'spotify_auth_state'
SPOTIFY_EXPIRATION = 3600
MP3_FILES = 'mp3-files/'
FRONT_COVER = 3
MILLISECONDS_PER_SECOND = 1000
application = Flask(__name__, static_url_path='', static_folder='')
application.secret_key = 'Y\x16++D\xdf\xbeww\x9a\x01(\xe9\xd6\xc6\xa2\xaa\x97wDp\xa6\xd2\xd1n<\xafO\x93\xf8H\x82'
@application.route("/")
def index():
# redirect to home page
return redirect('/music-app.html')
def generate_random_string(size=RANDOM_STRING_LENGTH, chars=string.ascii_uppercase + string.ascii_lowercase + string.digits):
return ''.join(random.choice(chars) for _ in range(size))
@application.route('/spotifyLogin', methods=['POST'])
def spotify_login():
# check to see if session already has info
if 'spotify_name' in session:
return make_response('Already logged in!')
state = generate_random_string()
scope = 'user-read-private playlist-read-private user-follow-read user-read-currently-playing user-library-read user-top-read user-read-recently-played'
info_obj = {'response_type': 'code', 'client_id': KEYS['SPOTIFY_CLIENT_ID'], 'scope': scope, 'redirect_uri': SPOTIFY_REDIRECT,
'state': state
}
query_string = urllib.urlencode(info_obj)
response = jsonify({'login_url' : 'https://accounts.spotify.com/authorize/?' + query_string})
response.set_cookie(SPOTIFY_STATE_KEY, state)
return response
@application.route('/spotifyCallback', methods=['GET'])
def spotify_callback():
if 'spotify_name' in session:
return make_response('Already logged in!')
code = request.args.get('code')
state = request.args.get('state')
cookies = request.cookies
storedState = cookies[SPOTIFY_STATE_KEY] if cookies else None
if not state or state != storedState:
# error
return redirect('/music-app.html#!/home?' + urllib.urlencode({'error': 'Spotify failed to authenticate user. Please try again.'}))
else:
headers = {'Authorization': 'Basic ' + base64.b64encode(KEYS['SPOTIFY_CLIENT_ID'] + ':' + KEYS['SPOTIFY_CLIENT_SECRET'])}
data = {
'code': code,
'redirect_uri': SPOTIFY_REDIRECT,
'grant_type': 'authorization_code'
}
r = requests.post('https://accounts.spotify.com/api/token', headers=headers, data=data)
if r.status_code != 200:
# failure
return redirect('/music-app.html#!/home?' + urllib.urlencode({'error': 'Spotify failed to authenticate user. Please try again.'}))
else:
content = r.json()
now = datetime.datetime.now()
access_token = content['access_token']
refresh_token = content['refresh_token']
expires_in = content['expires_in']
# get some information about user
headers = {'Authorization': 'Bearer ' + access_token}
r = requests.get(SPOTIFY_BASE_URL + '/v1/me', headers=headers)
if r.status_code != 200:
return redirect('/music-app.html#!/home?' + urllib.urlencode({'error': 'Spotify credentials were valid, but failed to fetch user information. Please try again.'}))
content = r.json()
images = content['images']
if len(images) != 0:
session['spotify_img_url'] = images[0]['url']
# store all this information in session
session['spotify_id'] = content['id']
session['spotify_name'] = content['display_name']
session['spotify_access_token'] = access_token
session['spotify_refresh_token'] = refresh_token
session['spotify_expiration'] = now + datetime.timedelta(seconds=expires_in)
session['country'] = content['country']
return redirect('/music-app.html#!/browse')
def spotify_refresh():
# requesting access token from refresh token
refresh_token = session['spotify_refresh_token']
headers = {'Authorization': 'Basic ' + base64.b64encode(KEYS['SPOTIFY_CLIENT_ID'] + ':' + KEYS['SPOTIFY_CLIENT_SECRET'])}
data = {'grant_type': 'refresh_token', 'refresh_token': refresh_token}
now = datetime.datetime.now()
r = requests.post('https://accounts.spotify.com/api/token', headers=headers, data=data)
if r.status_code == 200:
# replace the session id
session['spotify_access_token'] = r.json()['access_token']
session['spotify_expiration'] = now + datetime.timedelta(seconds=SPOTIFY_EXPIRATION)
return True
else:
return False
@application.route('/spotifyLogout', methods=['POST'])
def spotify_logout():
# just clear session info
session.clear()
return make_response('Logged out successfully.')
@application.route('/getSpotifyInfo', methods=['GET'])
def get_spotify_info():
if 'spotify_name' not in session:
return make_response('No Spotify user information.', 401)
# check to see that access token is still valid
if (datetime.datetime.now() > session['spotify_expiration']):
success = spotify_refresh()
if not success:
return make_response('Failed to refresh token.', 400)
# fetch information from session
return jsonify({'name': session['spotify_name'], 'img_url': session['spotify_img_url']})
def make_spotify_get_request(endpoint, params={}):
if 'spotify_name' not in session:
return make_response('No Spotify user information.', 401), False
# check to see that access token is still valid
if (datetime.datetime.now() > session['spotify_expiration']):
success = spotify_refresh()
if not success:
return make_response('Failed to refresh token.', 400), False
headers = {'Authorization': 'Bearer ' + session['spotify_access_token']}
response = requests.get(SPOTIFY_BASE_URL + endpoint, headers=headers, params=params)
return response, True
def filter_spotify_info(item):
filtered_info = {
'song_name': item['name'],
'artists': [artist_info['name'] for artist_info in item['artists']],
'uri': item['uri'],
}
# calculate duration
seconds = item['duration_ms'] / MILLISECONDS_PER_SECOND
m, s = divmod(seconds, 60)
h, m = divmod(m, 60)
if h != 0:
filtered_info['duration'] = "%d:%02d:%02d" % (h, m, s)
else:
filtered_info['duration'] = "%d:%02d" % (m, s)
if 'album' in item:
album = item['album']
if 'artists' in album:
filtered_info['album_artists'] = [album_artist_info['name'] for album_artist_info in album['artists']]
if 'name' in album:
filtered_info['album_name'] = album['name']
if 'images' in album and len(album['images']) != 0:
filtered_info['album_art_url'] = album['images'][0]['url']
return filtered_info
@application.route('/getSpotifyRecentlyPlayed', methods=['GET'])
def get_spotify_recently_played():
response, success = make_spotify_get_request('/v1/me/player/recently-played')
if not success:
# failed refresh token
return response
if response.status_code == 200:
content = response.json()
items = content['items']
filtered_items = []
for item in items:
filtered_info = filter_spotify_info(item['track'])
if filtered_info not in filtered_items:
filtered_items.append(filter_spotify_info(item['track']))
return jsonify({'tracks': filtered_items, 'type': 'Recently Played Tracks'})
else:
return make_response('Failed to get recently-played tracks.', response.status_code)
@application.route('/getSpotifyTopTracks', methods=['GET'])
def get_top_tracks():
response, success = make_spotify_get_request('/v1/me/top/tracks')
if not success:
# failed refresh token
return response
if response.status_code == 200:
content = response.json()
items = content['items']
return jsonify({'tracks': [filter_spotify_info(item) for item in items], 'type': 'Top Tracks'})
else:
return make_response('Failed to get top tracks.', response.status_code)
@application.route('/getSpotifySaved', methods=['GET'])
def get_spotify_saved():
# add albums later
#/v1/me/albums
response, success = make_spotify_get_request('/v1/me/tracks')
if not success:
# failed refresh token
return response
if response.status_code == 200:
content = response.json()
items = content['items']
return jsonify({'tracks': [filter_spotify_info(item['track']) for item in items], 'type': 'Saved Tracks'})
else:
return make_response('Failed to get top tracks.', response.status_code)
@application.route('/getSpotifyNew', methods=['GET'])
def get_new_releases():
response, success = make_spotify_get_request('/v1/browse/new-releases', params={'limit': 5})
if not success:
# failed refresh token
return response
if response.status_code == 200:
content = response.json()
albums = content['albums']['items']
new_releases = []
for album in albums:
id = album['id']
album_name = album['name']
album_artists = [album_artist['name'] for album_artist in album['artists']]
if len(album['images']) != 0:
album_art_url = album['images'][0]['url']
response, success = make_spotify_get_request('/v1/albums/{}/tracks'.format(id))
if success and response.status_code == 200:
# then add to new_releases
tracks = response.json()['items']
for track in tracks:
track_info = filter_spotify_info(track)
track_info.update({
'album_name': album_name,
'album_artists': album_artists,
'album_art_url': album_art_url
})
new_releases.append(track_info)
return jsonify({'tracks': new_releases, 'type': 'New Releases'})
return make_response('Failed to get new releases.', response.status_code)
def get_spotify_playlists():
#/v1/users/{user_id}/playlists
#/v1/users/{user_id}/playlists/{playlist_id}
#/v1/users/{user_id}/playlists/{playlist_id}/tracks
return
@application.route("/searchSpotify", methods=['GET'])
def search_spotify():
query = request.args['query']
response, success = make_spotify_get_request('/v1/search', params={'q': query, 'type': 'track'})
if not success:
# failed refresh token
return response
if response.status_code == 200:
content = response.json()
items = content['tracks']['items']
return jsonify({'tracks': [filter_spotify_info(item) for item in items]})
else:
return make_response('Failed to get top tracks.', response.status_code)
def youtube_search(results, query):
youtube = build(YOUTUBE_API_SERVICE_NAME, YOUTUBE_API_VERSION, developerKey=KEYS['YOUTUBE_DEVELOPER_KEY'])
# Call the search.list method to retrieve results matching the specified
# query term.
search_response = youtube.search().list(
q=query,
part="id",
type='video'
).execute()
# video IDs
video_ids = [search_result['id']['videoId'] for search_result in search_response.get("items", [])]
# look up videos for more specific information
video_response = youtube.videos().list(
id=','.join(video_ids),
part='snippet,contentDetails,statistics'
).execute()
for video_result in video_response.get("items", []):
obj = {
'title': video_result['snippet']['title'],
'channel': video_result['snippet']['channelTitle'],
'channelId': video_result['snippet']['channelId'],
'description': video_result['snippet']['description'],
'date': video_result['snippet']['publishedAt'],
'thumbnail': video_result['snippet']['thumbnails']['default']['url'],
'id': video_result['id']
}
seconds = int(isodate.parse_duration(video_result['contentDetails']['duration']).total_seconds())
# format
m, s = divmod(seconds, 60)
h, m = divmod(m, 60)
if h != 0:
obj['duration'] = "%d:%02d:%02d" % (h, m, s)
else:
obj['duration'] = "%d:%02d" % (m, s)
if 'viewCount' in video_result['statistics']:
obj['views'] = video_result['statistics']['viewCount']
if 'likeCount' in video_result['statistics']:
obj['likes'] = video_result['statistics']['likeCount']
if 'dislikeCount' in video_result['statistics']:
obj['dislikes'] = video_result['statistics']['dislikeCount']
results.append(obj)
@application.route("/searchYoutube", methods=['GET'])
def search_youtube():
query = request.args['query']
# use query to search youtube
results = []
youtube_search(results, query)
return jsonify({'results': results})
def update_song_info(filename, info):
f = eyed3.load(filename)
if 'album_name' in info:
f.tag.album = info['album_name']
if len(info['artists']) != 0:
f.tag.artist = ', '.join(info['artists'])
if len(info['album_artists']) != 0:
f.tag.album_artist = ', '.join(info['album_artists'])
if 'album_art_url' in info:
response = requests.get(info['album_art_url'])
if response.status_code == 200:
f.tag.images.set(FRONT_COVER, response.content, 'image/jpeg')
f.tag.save()
@application.route("/download", methods=['POST'])
def download():
video_id = request.json['id']
video_info = request.json['info']
ydl_opts = {
'quiet': 'True',
'no_warnings': 'True',
'format': 'bestaudio/best',
'postprocessors': [{
'key': 'FFmpegExtractAudio',
'preferredcodec': 'mp3',
'preferredquality': '192',
}],
}
while True:
filename = MP3_FILES + generate_random_string()
if not os.path.exists(filename + '.mp3'):
break
ydl_opts['outtmpl'] = '{}.%(ext)s'.format(filename)
filename += '.mp3'
try:
with youtube_dl.YoutubeDL(ydl_opts) as ydl:
ydl.download([YOUTUBE_URL + video_id])
except:
return make_response('Invalid video id.', 400)
update_song_info(filename, video_info)
return jsonify({'download': filename})
def read_keys():
config = configparser.ConfigParser()
config.read('keys.ini')
for key, value in config['youtunes'].iteritems():
# read into global keys
KEYS[key.upper()] = value
if __name__ == "__main__":
read_keys()
application.run(debug=True)
| {
"repo_name": "achang97/YouTunes",
"path": "application.py",
"copies": "1",
"size": "15630",
"license": "mit",
"hash": 2341502402560118300,
"line_mean": 30.3855421687,
"line_max": 179,
"alpha_frac": 0.6142034549,
"autogenerated": false,
"ratio": 3.7781000725163163,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4892303527416316,
"avg_score": null,
"num_lines": null
} |
# application.py
import wx
import sys
#from waxconfig import WaxConfig
#import font
class Application(wx.App):
def __init__(self, frameklass, *args, **kwargs):
# takes a frame *class* plus arbitrary options. these options will
# be passed to the frame constructor.
self.frameklass = frameklass
self.args = args
self.kwargs = kwargs
# when set, the app uses the stdout/stderr window; off by default
use_stdout_window = 0
if kwargs.has_key('use_stdout_window'):
use_stdout_window = kwargs['use_stdout_window']
del kwargs['use_stdout_window']
wx.App.__init__(self, use_stdout_window)
def OnInit(self):
self.mainframe = self.frameklass(*self.args, **self.kwargs)
if hasattr(self.mainframe.__class__, "__ExceptHook__"):
sys.excepthook = self.mainframe.__ExceptHook__
self.mainframe.Show(True)
self.SetTopWindow(self.mainframe)
return True
def Run(self):
self.MainLoop()
| {
"repo_name": "bblais/plasticity",
"path": "plasticity/dialogs/waxy/application.py",
"copies": "1",
"size": "1039",
"license": "mit",
"hash": -1217205376491620400,
"line_mean": 31.46875,
"line_max": 75,
"alpha_frac": 0.624639076,
"autogenerated": false,
"ratio": 3.8058608058608057,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4930499881860806,
"avg_score": null,
"num_lines": null
} |
# Released under the MIT licence.
# Copyright (C) Peter Hinch 2018
# The App class emulates a user application intended to service a single
# client. In this case we have four instances of the application servicing
# clients with ID's 1-4.
import uasyncio as asyncio
loop = asyncio.get_event_loop(runq_len=32, waitq_len=32)
import ujson
import server
class App():
def __init__(self, client_id):
self.client_id = client_id
self.data = [0, 0] # Exchange a 2-list with remote
loop = asyncio.get_event_loop()
loop.create_task(self.start(loop))
async def start(self, loop):
print('Client {} Awaiting connection.'.format(self.client_id))
conn = None
while conn is None:
await asyncio.sleep_ms(100)
conn = server.client_conn(self.client_id)
loop.create_task(self.reader(conn))
loop.create_task(self.writer(conn))
async def reader(self, conn):
print('Started reader')
while True:
# Attempt to read data: server times out if none arrives in timeout
# period closing the Connection. .readline() pauses until the
# connection is re-established.
line = await conn.readline()
self.data = ujson.loads(line)
# Receives [restart count, uptime in secs]
print('Got', self.data, 'from remote', self.client_id)
# Send [approx application uptime in secs, received client uptime]
async def writer(self, conn):
print('Started writer')
count = 0
while True:
self.data[0] = count
count += 1
print('Sent', self.data, 'to remote', self.client_id)
print()
# .write() behaves as per .readline()
await conn.write('{}\n'.format(ujson.dumps(self.data)))
await asyncio.sleep(5)
clients = [App(n) for n in range(1, 5)] # Accept 4 clients with ID's 1-4
try:
loop.run_until_complete(server.run(timeout=1500))
except KeyboardInterrupt:
print('Interrupted')
finally:
print('Closing sockets')
for s in server.socks:
s.close()
| {
"repo_name": "peterhinch/micropython-samples",
"path": "resilient/application.py",
"copies": "1",
"size": "2171",
"license": "mit",
"hash": -129621345335257170,
"line_mean": 32.921875,
"line_max": 79,
"alpha_frac": 0.6153846154,
"autogenerated": false,
"ratio": 3.8021015761821366,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49174861915821366,
"avg_score": null,
"num_lines": null
} |
import hashlib
import os
class chayi():
def __init__(self,old_path,new_path):
self.old_path=old_path #定义旧代码的目录
self.new_path=new_path #定义新代码的目录
def md5(self,data):
key=hashlib.md5(bytes("007",encoding="utf-8"))
key.update(data)
result=key.hexdigest()
return result
def old_code(self): #求旧代码的MD5.放在集合里面
try:
file_list=[]
file_md5=[]
for root,dirs,files in os.walk(self.old_path):
for i in files:
file_list.append(root+"\\"+i)
for i in file_list:
with open(i,"rb") as old:
result=self.md5(old.read())
u=str(i).replace(self.old_path,"")+":"+result
file_md5.append(u)
a=set(file_md5)
self.old_code_set=a #旧代码的MD5,拿到集合
return True
except Exception as ex:
print(ex)
exit()
def new_code(self): #求新代码的MD5
try:
file_list=[]
file_md5=[]
for root,dirs,files in os.walk(self.new_path):
for i in files:
file_list.append(root+"\\"+i)
for i in file_list:
with open(i,"rb") as old:
result=self.md5(old.read())
u=str(i).replace(self.new_path,"")+":"+result
file_md5.append(u)
b=set(file_md5)
self.new_code_set=b #新代码的MD5,拿到集合
return True
except Exception as ex:
print(ex)
exit()
def duibi(self):
res=self.old_code()
if res:
res=self.new_code()
if res:
a=self.old_code_set #老的code
b=self.new_code_set #最新的code
c=b.difference(a) #新代码和旧代码的MD5 求新代码有的,旧代码没有的,差集,就是要更新的代码路径
c=str(c).replace("{'","").replace("'}","")
msg="最新code下面的{p}{c}发生了更新".format(p=self.new_path,c=c)
print(msg)
obj=chayi("E:\\test\\","E:\\test2\\") #前面第一个参数写旧代码路径,后面第二个路径写信代码路径,开始比较
obj.duibi() | {
"repo_name": "xiaoyongaa/ALL",
"path": "python最新函数/差异.py",
"copies": "1",
"size": "2479",
"license": "apache-2.0",
"hash": 7087643319016135000,
"line_mean": 29.8888888889,
"line_max": 78,
"alpha_frac": 0.4822312191,
"autogenerated": false,
"ratio": 2.8758085381630014,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.38580397572630015,
"avg_score": null,
"num_lines": null
} |
import socket,os,pickle
ip_port=("127.0.0.1",9999)
server=socket.socket()
server.bind(ip_port)
server.listen() #监听端口
def connect():
while True:
conn,addr=server.accept()
while True:
try:
print("等待")
#等待电话打进来
print("电话来了")
data=conn.recv(1024) #服务器接收
print("接收到了")
cmd=str(data,encoding="utf-8")
stat=os.system(cmd)
print(stat)
print(cmd)
if stat==0:
print("命令正确")
s="ok"
conn.send(bytes(s,encoding="utf-8"))
stat=str(conn.recv(1024),encoding="utf-8")
if stat=="ok":
result=bytes(os.popen(cmd).read(),encoding="utf-8") #命令的结果数据包
infor={"stat":"ready","size":len(result)}
infor=pickle.dumps(infor)
conn.sendall(infor) #发送开始传输和包的一共字节大小
stat=str(conn.recv(1024),encoding="utf-8")
if stat=="go":
conn.sendall(result)
else:
print("命令不正确,请重新输入")
s="no"
conn.send(bytes(s,encoding="utf-8"))
continue
except Exception as ex:
print(addr,ex)
break
server.close()
connect()
| {
"repo_name": "xiaoyongaa/ALL",
"path": "最新socket网络编程/简单socket例子(server).py",
"copies": "1",
"size": "1690",
"license": "apache-2.0",
"hash": 8120792307286840000,
"line_mean": 29.0769230769,
"line_max": 86,
"alpha_frac": 0.429028133,
"autogenerated": false,
"ratio": 3.4,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.43290281329999997,
"avg_score": null,
"num_lines": null
} |
import socket,pickle
ip_port=("192.168.1.10",9999)
client=socket.socket() #声明socket类型对象
client.connect(ip_port)
while True:
inp=input("请输入你要发送的文字>>>:").strip()
if len(inp)==0:continue
client.send(bytes(inp,encoding="utf-8")) #发送第一次信息
stat=str(client.recv(1024),encoding="utf-8")
print(stat)
if stat=="ok":
print("命令输入正确")
s=bytes("ok",encoding="utf-8")
client.send(s)
infor=client.recv(1024)
infor=pickle.loads(infor)
size=int(infor.get("size"))
stat=infor.get("stat")
c=0
b=bytes() #定义一个空字节
if stat=="ready":
print("开始循环接收")
stat=bytes("go",encoding="utf-8")
client.sendall(stat)
while True:
if c==size:
break
else:
shou=client.recv(1024)
c+=len(shou)
b+=shou
print(str(b,encoding="utf-8"))
print(len(b),size)
# while c<size:
# shou=client.recv(1024)
# c=c+len(shou)
# b=b+shou #每次增加接收的1024内容
# print(str(b,encoding="utf-8"))
elif stat=="no":
print("命令输入错误,,重新输入!")
continue
client.close()
| {
"repo_name": "xiaoyongaa/ALL",
"path": "最新socket网络编程/简单socket例子(client).py",
"copies": "1",
"size": "1480",
"license": "apache-2.0",
"hash": -5820689534308806000,
"line_mean": 26.7142857143,
"line_max": 55,
"alpha_frac": 0.5014727541,
"autogenerated": false,
"ratio": 2.8710359408033828,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8649948225977917,
"avg_score": 0.04451209378509328,
"num_lines": 49
} |
import paramiko
import sys
import os
import socket
def run():
host_list=[{"host":"192.168.1.10","username":"root","port":"22","password":"1","auth_id":"p"},
{"host":"192.168.1.11","username":"root","port":"22","password":"1","auth_id":"r"},
{"host":"192.168.1.12","username":"root","port":"22","password":"103","auth_id":"p"},
]
for key,i in enumerate(host_list,1):
print(key,i)
mun=int(input("num:"))
try:
infor=host_list[mun-1]
print(infor)
username=infor.get("username")
hostname=infor.get("host")
auth_id=infor.get("auth_id")
print(auth_id)
password=infor.get("password")
port=int(infor.get("port"))
print(username,hostname,password,port)
tran=paramiko.Transport((hostname,port)) #验证ip+端口
tran.start_client() #创建客户端
if auth_id=="p":
tran.auth_password(username,password) #验证用户名和密码
else:
#秘钥登录
print("ras login")
default_path=os.path.join(os.environ["HOME"],".ssh","id_rsa")
print(default_path)
new_path=input("please input rsa path:").strip()
if len(new_path)==0:
print("default rsa login")
key_path=default_path
key=paramiko.RSAKey.from_private_key_file(key_path)
else:
key_path=new_path
key=paramiko.RSAKey.from_private_key_file(key_path)
tran.auth_publickey(username, key)
chan=tran.open_session() #打开一个通道
chan.get_pty() #获取一个终端
chan.invoke_shell() #激活终
print(username)
linux_windows_choose(chan)
chan.close()
tran.close()
except Exception as ex:
print(ex)
run()
| {
"repo_name": "xiaoyongaa/ALL",
"path": "paramiko目录/终极版本适应框架.py",
"copies": "1",
"size": "1958",
"license": "apache-2.0",
"hash": -5628242467332687000,
"line_mean": 30.4333333333,
"line_max": 98,
"alpha_frac": 0.5455991516,
"autogenerated": false,
"ratio": 3.1804384485666106,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.42260376001666106,
"avg_score": null,
"num_lines": null
} |
#!/application/python3.5/bin/python3.5
import paramiko
import sys
import os
import select
import tty
import termios
from paramiko.py3compat import u
ip="192.168.1.11"
port=22
username="x"
password="1"
tran=paramiko.Transport((ip,port))
tran.start_client()
tran.auth_password(username,password)
chan=tran.open_session() #打开一个通道
chan.get_pty() #获取一个终端
chan.invoke_shell() #激活
oldtty=termios.tcgetattr(sys.stdin) #获取原系统tty属性
try:
# 放置特殊字符应用在当前终端,如此设置,将所有的用户输入均发送到远程服务器
tty.setraw(sys.stdin.fileno()) # 这是为原始模式,不认识所有特殊符号
chan.settimeout(0.0)
while True:
readable, writeable, error = select.select([chan, sys.stdin, ],[],[],1)
if chan in readable:
try:
result=str(chan.recv(1024),encoding="utf-8")
if len(result)==0:break
sys.stdout.write(result)
sys.stdout.flush()
except Exception as ex:
print(ex)
elif sys.stdin in readable:
inp=sys.stdin.read(1)
if len(inp)==0:break
chan.send(inp)
finally:
termios.tcsetattr(sys.stdin,termios.TCSADRAIN, oldtty) # 还原系统终端属性
chan.close()
tran.close() | {
"repo_name": "xiaoyongaa/ALL",
"path": "paramiko目录/paramiko模块之肆意妄为.py",
"copies": "1",
"size": "1342",
"license": "apache-2.0",
"hash": -3598382750280346600,
"line_mean": 25.2,
"line_max": 79,
"alpha_frac": 0.6358234295,
"autogenerated": false,
"ratio": 2.5063829787234044,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3642206408223404,
"avg_score": null,
"num_lines": null
} |
#!/application/python3.5/bin/python3.5
import paramiko
import sys
import os
import socket
import getpass
from paramiko.py3compat import u
# windows does not have termios...
#判断主机为linux还是windows
try:
import termios #判断主机为linux还是windows
import tty
has_termios=True
print("this is linux")
except Exception as ex:
has_termios=False
print("this is windows")
#判断主机为linux还是windows
def linux_windows_choose(chan):
if has_termios==True:
linux_shell(chan)
else:
windows_shell(chan)
def linux_shell(chan):
print("linux shell")
import select
oldtty=termios.tcgetattr(sys.stdin) #获取原系统tty属性
tab_falg=False
try:
# 放置特殊字符应用在当前终端,如此设置,将所有的用户输入均发送到远程服务器
tty.setraw(sys.stdin.fileno()) # 这是为原始模式,不认识所有特殊符号
tty.setcbreak(sys.stdin.fileno())
chan.settimeout(0.0)
#记录日志
#with open("/root/baoleiji.log","a+") as log:
#记录日志
while True:
readable, writeable, error = select.select([chan, sys.stdin, ],[],[],1)
if chan in readable:
try:
result=str(chan.recv(1024),encoding="utf-8")
if len(result)==0:break
'''
if tab_falg==True: #如果输入的tab按键的话,把结果写入日志
if result.startswith("\r\n"):
pass
else:
log.write(result)
log.flush()
tab_falg=False
'''
sys.stdout.write(result)
sys.stdout.flush()
except Exception as ex:
print(ex,"linu_shell mode ")
elif sys.stdin in readable:
inp=sys.stdin.read(1)
'''
if inp=="\t": #判断用户输入的是否是tab符号
tab_falg=True
else:
if ord(inp)==13:
log.write("\n") #写入记录
log.flush() #刷新
else:
log.write(inp) #写入记录
log.flush() #刷新
'''
chan.send(inp)
finally:
termios.tcsetattr(sys.stdin,termios.TCSADRAIN, oldtty) # 还原系统终端属性
def windows_shell(chan):
print("windows shell")
def run():
host_list=[{"host":"192.168.1.10","username":"root","port":"22","password":"1","auth_id":"p"},
{"host":"192.168.1.11","username":"root","port":"22","password":"1","auth_id":"r"},
{"host":"192.168.1.12","username":"root","port":"22","password":"103","auth_id":"p"},
]
for key,i in enumerate(host_list,1):
print(key,i)
mun=int(input("num:"))
try:
infor=host_list[mun-1]
print(infor)
username=infor.get("username")
hostname=infor.get("host")
auth_id=infor.get("auth_id")
print(auth_id)
password=infor.get("password")
port=int(infor.get("port"))
print(username,hostname,password,port)
tran=paramiko.Transport((hostname,port)) #验证ip+端口
tran.start_client() #创建客户端
if auth_id=="p":
tran.auth_password(username,password) #验证用户名和密码
else:
#秘钥登录
print("ras login")
default_path=os.path.join(os.environ["HOME"],".ssh","id_rsa")
print(default_path)
new_path=input("please input rsa path:").strip()
if len(new_path)==0:
print("default rsa login")
key_path=default_path
key=paramiko.RSAKey.from_private_key_file(key_path)
else:
key_path=new_path
key=paramiko.RSAKey.from_private_key_file(key_path)
tran.auth_publickey(username, key)
chan=tran.open_session() #打开一个通道
chan.get_pty() #获取一个终端
chan.invoke_shell() #激活终
print(username)
linux_windows_choose(chan)
chan.close()
tran.close()
except Exception as ex:
print(ex)
run()
if __name__=="__main__":
run() | {
"repo_name": "xiaoyongaa/ALL",
"path": "paramiko目录/终极版本.py",
"copies": "1",
"size": "4515",
"license": "apache-2.0",
"hash": 1318886414157745700,
"line_mean": 29.6470588235,
"line_max": 98,
"alpha_frac": 0.5063594912,
"autogenerated": false,
"ratio": 3.1882172915072684,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.41945767827072683,
"avg_score": null,
"num_lines": null
} |
"""Application registry."""
import collections
import inspect
from .common import ResourceID
from .typings import ResourceIdentifier
class Registry(collections.UserDict):
"""
JSON API application registry.
This is a dictionary created on JSON API application set up.
It contains a mapping between types, resource classes and schemas.
"""
__slots__ = ('data',)
def __getitem__(self, key):
"""
Get schema for type or resource class type.
:param key: Type string or resource class.
:return: Schema instance
"""
return super(Registry, self).__getitem__(
key if isinstance(key, str) or inspect.isclass(key) else type(key)
)
def ensure_identifier(self, obj, asdict=False) -> ResourceIdentifier:
"""
Return the identifier object for the *resource*.
(:class:`ResourceID <.common.ResourceID>`)
.. code-block:: python3
>>> registry.ensure_identifier({'type': 'something', 'id': 123})
ResourceID(type='something', id='123')
:arg obj:
A two tuple ``(typename, id)``, a resource object or a resource
document, which contains the *id* and *type* key
``{"type": ..., "id": ...}``.
:arg bool asdict:
Return ResourceID as dictionary if true
"""
if isinstance(obj, collections.Sequence) and len(obj) == 2:
result = ResourceID(str(obj[0]), str(obj[1]))
elif isinstance(obj, collections.Mapping):
result = ResourceID(str(obj['type']), str(obj['id']))
else:
try:
schema_cls, _ = self.data[type(obj)]
result = ResourceID(schema_cls.opts.resource_type,
schema_cls.get_object_id(obj))
except KeyError:
raise RuntimeError(
'Schema for %s is not found.' % obj.__class__.__name__
)
return result._asdict() if asdict and result else result
| {
"repo_name": "vovanbo/aiohttp_json_api",
"path": "aiohttp_json_api/registry.py",
"copies": "1",
"size": "2054",
"license": "mit",
"hash": 959521360515591600,
"line_mean": 31.6031746032,
"line_max": 78,
"alpha_frac": 0.5666991237,
"autogenerated": false,
"ratio": 4.514285714285714,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5580984837985714,
"avg_score": null,
"num_lines": null
} |
#Application Requirements:
# Python 3.X (Latest is recommended)
# Pillow Image Library
# Tkinter GUI Library (comes by default with Python Build, just make sure though
import sys, os;
import time;
from multiprocessing import Process;
import _thread as thread;
#from tkinter import Tk, Button, Menu, Canvas, NW;
from tkinter import *;
from tkinter.filedialog import askopenfilename;
from PIL import Image, ImageTk;
from shutil import copyfile;
from functools import partial;
from tkinter.messagebox import askquestion;
import dbm;
#global variables
canvas = "";
set = False;
points = [];
display_image = "";
#FRONT END
def display_alert (title, string):
temp = Tk ();
temp.title (title);
msg = Message (temp, text = string);
msg.config (width = "700", pady = "30", padx = "50");
msg.pack ();
temp.mainloop ();
#temp.quit ();
def display_version ():
version = "LILY 1.0, the first release. For reporting bugs, contact duaraghav8@gmail.com";
display_alert ("Version Information", version);
def display_credits ():
credits = '''
Raghav Dua (duaraghav8@gmail.com) B.Tech, UPES, Dehradun''';
display_alert ("Credits", credits);
def upload_img (x):
thread.start_new_thread (upload_image, ());
def upload_image ():
Tk ().withdraw ();
global win_width;
global win_height;
try:
file_path = askopenfilename ();
if (file_path == ""):
thread.exit ();
except Exception as fnf:
#print ("File was not uploaded");
#print ("Verbose: " + str (fnf));
thread.exit ();
else:
file_name = file_path.split ("/") [-1];
print (file_name);
test = Image.open (file_path);
width, height = test.size;
max_width = int ( (80.7446 / 100) * win_width );
max_height = int ( (98.6651 / 100) * win_height );
if (width > max_width or height > max_height):
final = test.resize ( (max_width, max_height) );
final.save ("image_db/" + file_name);
test.close ();
final.close ();
print ("Verbose: " + file_path + " has been added to the local DataBase");
successfully_uploaded ();
thread.exit ();
try:
copyfile (file_path, "image_db/" + file_name);
except Exception as e:
print ("The application has encountered an un-expected error. Please contact the application developer at duaraghav8@gmail.com");
print ("Verbose: \n" + str (e));
else:
print ("Verbose: " + file_path + " has been added to the local DataBase");
successfully_uploaded ();
def successfully_uploaded ():
display_alert ("Upload Successful", "Image has been uploaded to local database successfully.");
def clear_image_db (x):
thread.start_new_thread (empty_database, ());
def empty_database ():
confirm = askquestion ("Clear Local Database", "This operation effectively removes all the images and their corresponding profiles from the database. Would you like to proceed?", icon = "warning");
if (confirm == "yes"):
try:
os.chdir ("image_db");
files = os.listdir ();
except FileNotFoundError:
display_alert ("Error", "The directory 'image_db' was not found.");
except Exception as e:
pass;
else:
for f in files:
if (f == "crops"):
cropped_files = os.listdir ("crops");
for g in cropped_files:
os.remove ("crops/" + g);
continue;
os.remove (f);
print ("Database has successfully been cleared");
os.chdir ("..");
try:
os.chdir ("profiles");
files = os.listdir ();
except FileNotFoundError:
display_alert ("Error", "The directory 'image_db' was not found.");
except Exception as e:
pass;
else:
for f in files:
os.remove (f);
display_alert ('Update', 'Database was successfully cleared');
thread.exit ();
def initialize_menu (window):
top = Menu (window);
window.config (menu = top);
upload_command = partial (upload_img, 0);
edit_command_right = partial (spawn_process, 'RIGHT', 0);
edit_command_left = partial (spawn_process, 'LEFT', 0);
file = Menu (top);
file.add_command (label = 'Upload New Side Face Image', command = upload_command, underline = 0);
file.add_command (label = 'Create Right Profile', command = edit_command_right, underline = 0);
file.add_command (label = 'Create Left Profile', command = edit_command_left, underline = 0);
file.add_command (label = 'Quit', command = window.quit, underline = 0);
help = Menu (top);
help.add_command (label = 'Credits', command = display_credits, underline = 0);
help.add_command (label = 'Version', command = display_version, underline = 0);
top.add_cascade (label = 'File', menu = file, underline = 0);
top.add_cascade (label = 'Help', menu = help, underline = 0);
def initialize_buttons (root):
global win_width;
button_width = int ((2.221 / 100) * win_width);
edit_command_right = partial (spawn_process, False, 'RIGHT');
edit_command_left = partial (spawn_process, False, 'LEFT');
edit_cropped_command_left = partial (spawn_process, True, 'LEFT');
edit_cropped_command_right = partial (spawn_process, True, 'RIGHT');
upload_image_button = Button (root, text = "Upload Side Face Image", width = button_width);
upload_image_button.pack ();
upload_image_button.place (relx=.1, rely=.12, anchor="c");
upload_image_button.bind ("<Button-1>", upload_img); #have to use upload_image () here
clear_imagedb_button = Button (root, text = "Clear Local Database", width = button_width);
clear_imagedb_button.pack ();
clear_imagedb_button.place (relx=.1, rely=.18, anchor="c");
clear_imagedb_button.bind ("<Button-1>", clear_image_db);
edit_image_button_r = Button (root, text = "Create Right Profile", width = button_width);
edit_image_button_r.pack ();
edit_image_button_r.place (relx=.1, rely=.24, anchor="c");
edit_image_button_r.bind ("<Button-1>", edit_command_right);
edit_image_button_l = Button (root, text = "Create Left Profile", width = button_width);
edit_image_button_l.pack ();
edit_image_button_l.place (relx=.1, rely=.30, anchor="c");
edit_image_button_l.bind ("<Button-1>", edit_command_left);
temp = Button (root, text = "Display new Image", width = button_width);
temp.pack ();
temp.place (relx=.1, rely=.36, anchor="c");
temp.bind ("<Button-1>", temp_func_launcher);
clear = Button (root, text = "Clear Preview", width = button_width);
clear.pack ();
clear.place (relx=.1, rely=.42, anchor="c");
clear.bind ("<Button-1>", clear_preview);
crop = Button (root, text = "Crop Selected Region", width = button_width);
crop.pack ();
crop.place (relx=.1, rely=.48, anchor="c");
crop.bind ("<Button-1>", crop_region);
edit_cropped = Button (root, text = "Edit Cropped Region as Right Profile", width = button_width);
edit_cropped.pack ();
edit_cropped.place (relx=.1, rely=.54, anchor="c");
edit_cropped.bind ("<Button-1>", edit_cropped_command_right);
edit_cropped = Button (root, text = "Edit Cropped Region as Left Profile", width = button_width);
edit_cropped.pack ();
edit_cropped.place (relx=.1, rely=.60, anchor="c");
edit_cropped.bind ("<Button-1>", edit_cropped_command_left);
#BACK END
def crop_region (x):
global points, display_image;
image = Image.open (display_image);
xlist = [i [0] for i in points];
xmin = min (xlist);
xmax = max (xlist);
ylist = [i [1] for i in points]
ymax = max (ylist);
ymin = min (ylist);
#print (xmin, xmax, ymin, ymax);
box = (xmin - 240, ymin - 40, xmax - 240, ymax - 40);
print (image.size);
print (ymax, ymin);
new = image.crop (box);
new.save ('image_db/crops/' + (display_image.split ('/')[-1]));
new.close ();
image.close ();
def callback (event):
radius = 2;
global points;
#print (event.x, " ", event.y);
if (len (points) < 4):
canvas.create_oval (event.x - radius, event.y - radius, event.x + radius, event.y + radius, fill = "red", outline = "#DDD", width = 0);
points.append ( (event.x, event.y) );
else:
display_alert ('Error', 'Crop tool does not allow more than 4 marks');
def spawn_process (cropped, side, x):
if (cropped):
p = Process (target = mark_spots_cropped, args = (side,));
else:
p = Process (target = mark_spots, args = (side, ));
p.start ();
p.join ();
def mark_spots_cropped (side):
display_image = dbm.open ('application_data/cropped_image_name', 'r');
file_name = display_image ['image'].decode ();
file_name = file_name.split ('/') [-1];
print (file_name);
os.environ ['CROPPED'] = file_name;
mark_spots (side);
def mark_spots (side):
os.environ ['LION_PROFILE_SIDE'] = side;
os.system ('python edit_image.py');
def mark_image_point (event):
spot_radius = 1;
canvas.create_oval (event.x - spot_radius, event.y - spot_radius, event.x + spot_radius, event.y + spot_radius, fill = "red", outline = "#DDD", width = 0);
def clear_preview (x):
global points;
canvas.delete (ALL);
canvas.create_rectangle (int ( (18.1347 / 100) * win_width), int ( (4.491 / 100) * win_height), win_width - int (2.5906 / 100 * win_width), win_height + int ( (4.491 / 100) * win_height), fill = "white");
points = [];
def temp_func_launcher (x):
thread.start_new_thread (temp_func, (0, ));
def browse ():
global display_image, buffer;
crop_file = dbm.open ('application_data/cropped_image_name', 'c');
display_image = askopenfilename (initialdir = 'image_db');
crop_file ['image'] = display_image;
crop_file.close ();
return (display_image);
def temp_func (x):
global set;
global win_height;
global win_width;
global photo;
img = Image.open (browse ());
photo = ImageTk.PhotoImage (img);
lion = canvas.create_image (int ( (18.1347 / 100) * win_width) + 4, int ((5.2395 / 100) * win_height), image = photo, anchor = NW);
canvas.bind ("<Button-1>", callback);
def clean_up ():
#print ("Closing and wrapping up...");
root.destroy ();
os._exit (0);
if (__name__ == '__main__'):
if (not os.getenv ('LILY_LOGIN')):
display_alert ('Access Denied', "You are not authorized to use the application. Kindly Log in via the home screen.");
os._exit (0);
if (os.path.exists ('application_data/cropped_image_name')):
os.remove ('application_data/cropped_image_name');
root = Tk ();
root.wm_protocol ("WM_DELETE_WINDOW", clean_up);
root.title ("Lion Identification Lossless Yield - Main Menu");
win_width = root.winfo_screenwidth () - int ( (1.0981 / 100) * root.winfo_screenwidth ()); #15
win_height = root.winfo_screenheight () - int ( (13.0208 / 100) * root.winfo_screenheight ()); #100
#print (root.winfo_screenwidth ());
#print (root.winfo_screenheight ());
#print ("-----");
#print (win_width, win_height);
#print (win_width - int (2.5906 / 100 * win_width), win_height + int ( (4.491 / 100) * win_height));
root.title ("WII");
root.state ('zoomed');
root.wm_resizable (False, False);
initialize_menu (root);
canvas = Canvas (root, width = win_width - int ((2.2206 / 100) * win_width), height = win_height + int ( (4.491 / 100) * win_height));
canvas.pack ();
canvas.create_rectangle (int ( (18.1347 / 100) * win_width), int ( (4.491 / 100) * win_height), win_width - int (2.5906 / 100 * win_width), win_height + int ( (4.491 / 100) * win_height), fill = "white");
#photo = ImageTk.PhotoImage (file = 'image_db/lion.jpg');
#photo = ImageTk.PhotoImage (file = 'lion_big.jpg');
initialize_buttons (root);
root.mainloop ();
| {
"repo_name": "duaraghav8/LILY",
"path": "engine.py",
"copies": "1",
"size": "11523",
"license": "mit",
"hash": -1958456998744605400,
"line_mean": 32.2946428571,
"line_max": 205,
"alpha_frac": 0.6355983685,
"autogenerated": false,
"ratio": 3.0172820109976435,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4152880379497643,
"avg_score": null,
"num_lines": null
} |
#!/Applications/Anaconda3/anaconda/bin/python
# https://www.thecodeship.com/web-development/proxypy-cross-domain-javascript-requests-python/
from flask import Flask, request, Response, render_template, send_from_directory
import json
import proxypy
import urllib.parse
import os
app = Flask(__name__,static_folder='public')
@app.route("/")
def index():
return send_from_directory(app.static_folder, "index.html")
@app.route('/js/<path:filename>')
def serve_static_js(filename):
return send_from_directory(app.static_folder + '/js/', filename)
@app.route('/css/<path:filename>')
def serve_static_css(filename):
return send_from_directory(app.static_folder + '/css/', filename)
@app.route("/abfahrten_for_station",methods=['GET', 'POST'])
def abfahrten_for_station():
arguments=dict(urllib.parse.parse_qsl(request.query_string))
reply = proxypy.abfahrten_for_station(arguments)
http_code=reply["status"]["http_code"]
data=reply["content"] # '{"data": "JSON string example"}'
return Response(data,status=http_code,mimetype='application/json')
if __name__ == "__main__":
app.run(debug=True)
# http://127.0.0.1:5000/crossdomain?url=https://haltestellenmonitor.vrr.de/backend/app.php/api/stations/table
# https://stackoverflow.com/questions/11945523/forcing-application-json-mime-type-in-a-view-flask
# http://blog.luisrei.com/articles/flaskrest.html | {
"repo_name": "zmijunkie/elm_haltestellen_monitor_bootstrap4",
"path": "server.py",
"copies": "1",
"size": "1405",
"license": "bsd-2-clause",
"hash": 2903474493433083400,
"line_mean": 33.2926829268,
"line_max": 109,
"alpha_frac": 0.7231316726,
"autogenerated": false,
"ratio": 3.1859410430839,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.929408787906264,
"avg_score": 0.022996967324251896,
"num_lines": 41
} |
#!/Applications/anaconda/envs/imgPIL/bin
# Python Challenge - 11
# http://www.pythonchallenge.com/pc/return/5808.html
# Username: huge; Password: file
# Keyword: evil
'''
Uses Anaconda environment with Pillow for image processing
- Python 3.7, numpy, and Pillow (PIL)
- Run `source activate imgPIL`, `python chall_11.py`
'''
from PIL import Image
import numpy as np
def main():
'''
Hint: odd even
'''
photo_path = './odd-even_chall_11/cave.jpg'
photo = Image.open(photo_path)
width, height = photo.size # 640, 480
# Load image into numpy array
pixel_data = np.asarray(photo)
# pixel_data.shape: 480 rows, 640 pixels/row, 3 points with RGB info
even_pixels = np.zeros((height, width // 2, 3), dtype=np.uint8)
odd_pixels = np.zeros((height, width // 2, 3), dtype=np.uint8)
# Split image into two by even and odd pixels
for row in range(height):
even_pixels[row] = (pixel_data[row, ::2, :] if row % 2 == 0
else pixel_data[row, 1::2, :])
odd_pixels[row] = (pixel_data[row, ::2, :] if row % 2 == 1
else pixel_data[row, 1::2, :])
# Draw new images and save
even_photo = Image.fromarray(even_pixels, 'RGB')
even_photo.save('./odd-even_chall_11/even.jpg')
odd_photo = Image.fromarray(odd_pixels, 'RGB')
odd_photo.save('./odd-even_chall_11/odd.jpg')
return 0
if __name__ == '__main__':
main()
| {
"repo_name": "HKuz/PythonChallenge",
"path": "Challenges/chall_11.py",
"copies": "1",
"size": "1455",
"license": "mit",
"hash": 9140503849155127000,
"line_mean": 28.693877551,
"line_max": 72,
"alpha_frac": 0.606185567,
"autogenerated": false,
"ratio": 3.1699346405228757,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.42761202075228755,
"avg_score": null,
"num_lines": null
} |
#!/Applications/anaconda/envs/imgPIL/bin
# Python Challenge - 14
# http://www.pythonchallenge.com/pc/return/italy.html
# Username: huge; Password: file
# Keyword: cat
'''
Uses Anaconda environment with Pillow for image processing
- Python 3.7, numpy, and Pillow (PIL)
- Run `source activate imgPIL`, `python chall_14.py`
'''
from PIL import Image
import numpy as np
def main():
'''
Hint: walk around. remember: 100*100 = (100+99+99+98) + (...
http://www.pythonchallenge.com/pc/return/wire.png
'''
photo_path = './spiral_chall_14/wire.png'
photo = Image.open(photo_path)
wire_pixels = np.asarray(photo)
print(wire_pixels.shape) # (1, 10000, 3)
# Creat numpy array to hold pixels
height, width = 100, 100
pic_array = np.zeros((height, width, 3), dtype=np.uint8)
# Copy wire_pixels in spiral order to pic_array
wire_index = 0
n = width
top, bottom = 0, height
left, right = 0, width
direction = 0 # 0: right, 1: down, 2: left, 3: up
while n > 0:
if direction == 0: # RIGHT
# Copying into row from left to right
pic_array[top, left:right, :] = \
wire_pixels[0, wire_index:wire_index + n, :]
wire_index += n
n -= 1
top += 1
elif direction == 1: # DOWN
# Copying into right-most column in order
pic_array[top:bottom, right - 1, :] = \
wire_pixels[0, wire_index:wire_index + n, :]
wire_index += n
right -= 1
elif direction == 2: # LEFT
# Copying into bottom-most row in reverse order
pic_array[bottom - 1, left:right, :] = \
wire_pixels[0, wire_index:wire_index + n, :][::-1]
wire_index += n
n -= 1
bottom -= 1
elif direction == 3: # UP
# Copying into left-most column in reverse order
pic_array[top:bottom, left, :] = \
wire_pixels[0, wire_index:wire_index + n, :][::-1]
wire_index += n
left += 1
direction = (direction + 1) % 4
# Create image from array
spiral_photo = Image.fromarray(pic_array, 'RGB')
spiral_photo.save('./spiral_chall_14/spiral.png')
return 0
if __name__ == '__main__':
main()
| {
"repo_name": "HKuz/PythonChallenge",
"path": "Challenges/chall_14.py",
"copies": "1",
"size": "2330",
"license": "mit",
"hash": -1914488259990793500,
"line_mean": 29.2597402597,
"line_max": 66,
"alpha_frac": 0.5497854077,
"autogenerated": false,
"ratio": 3.4416543574593796,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.44914397651593796,
"avg_score": null,
"num_lines": null
} |
#!/Applications/anaconda/envs/imgPIL/bin
# Python Challenge - 16
# www.pythonchallenge.com/pc/return/mozart.html
# Username: huge; Password: file
# Keyword: romance
'''
Uses Anaconda environment with Pillow for image processing
- Python 3.7, numpy, and Pillow (PIL)
- Run `source activate imgPIL`, `python chall_16.py`
'''
from PIL import Image
import numpy as np
def main():
'''
Hint: let me get this straight
Image has bars of 5 pink pixels with two white ones on ends,
seemingly one/row
Need to align the pink bars
'''
photo_path = './mozart-static_chall_16/mozart.gif'
photo = Image.open(photo_path)
width, height = photo.size # 640, 480
mode = photo.mode # P
# 5th row (index 4), 106th pixel (index 105) is pink
# print('Pink pixel: {}'.format(photo.getpixel((106, 4)))) # 195
pink = 195
photo_array = np.asarray(photo)
shifted_array = np.zeros((height, width), dtype=np.uint8)
for y in range(height):
for x in range(width - 4):
if photo.getpixel((x, y)) == pink and \
photo.getpixel((x + 4, y)) == pink:
# Number of pixels from pink to end is width - x
shifted_array[y, 0:width - x] = photo_array[y, x:]
shifted_array[y, width - x:] = photo_array[y, 0:x]
# Create new photo from re-aligned rows
new_img = Image.fromarray(shifted_array, mode)
new_img.save('./mozart-static_chall_16/mozart_16_solution.gif')
return 0
if __name__ == '__main__':
main()
| {
"repo_name": "HKuz/PythonChallenge",
"path": "Challenges/chall_16.py",
"copies": "1",
"size": "1552",
"license": "mit",
"hash": 2186771597907513900,
"line_mean": 28.8461538462,
"line_max": 68,
"alpha_frac": 0.612757732,
"autogenerated": false,
"ratio": 3.2605042016806722,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4373261933680672,
"avg_score": null,
"num_lines": null
} |
#!/Applications/anaconda/envs/imgPIL/bin
# Python Challenge - 22
# http://www.pythonchallenge.com/pc/hex/copper.html
# http://www.pythonchallenge.com/pc/hex/white.gif
# Username: butter; Password: fly
# Keyword: bonus
'''
Uses Anaconda environment with Pillow for image processing
- Python 3.7, numpy, and Pillow (PIL)
- Run `source activate imgPIL`, `python chall_22.py`
'''
from PIL import Image, ImageDraw
def main():
'''
Hint: emulate (picture of joystick)
<!-- or maybe white.gif would be more bright -->
http://www.pythonchallenge.com/pc/hex/white.gif shows a 200x200 black
square, download has 133 pages in preview (frames?)
'''
img_path = './joystick_chall_22/white.gif'
with Image.open(img_path) as gif:
# print('Format: {}, Mode: {}'.format(gif.format, gif.mode)) # GIF, P
indices = []
while True:
hist = gif.histogram() # 1 pixel in hist bin 8 (0-255)
color = hist.index(1) # bin 8 out of 256
# print('Color bin: {}'.format(color))
data = list(gif.getdata())
pixel_index = data.index(color) # 20100 in 1st frame
indices.append(pixel_index)
# print('Pixel index: {}'.format(pixel_index))
try:
# Note: gif.n_frames gives total frames (133)
gif.seek(gif.tell() + 1)
# print('Frame: {}'.format(gif.tell() + 1))
except EOFError:
break # end of sequence
# Convert flattened indices to get position on 200x200 square
coords = list(map(lambda x: divmod(x, 200), indices))
# print(sum([1 for r, c in coords if r == 100 == c])) # 5
new = Image.new('RGB', (500, 200))
draw = ImageDraw.Draw(new)
x, y = (0, 100)
for coord in coords:
dx = coord[1] - 100
dy = coord[0] - 100
if coord == (100, 100):
# New letter, move right and reset height
x += 50
y = 100
x += dx
y += dy
draw.point((x, y))
new.save('./joystick_chall_22/final.jpg')
return 0
if __name__ == '__main__':
main()
| {
"repo_name": "HKuz/PythonChallenge",
"path": "Challenges/chall_22.py",
"copies": "1",
"size": "2224",
"license": "mit",
"hash": -3107083838477155300,
"line_mean": 31.231884058,
"line_max": 78,
"alpha_frac": 0.5431654676,
"autogenerated": false,
"ratio": 3.598705501618123,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46418709692181226,
"avg_score": null,
"num_lines": null
} |
#!/Applications/anaconda/envs/imgPIL/bin
# Python Challenge - 25
# http://www.pythonchallenge.com/pc/hex/lake.html
# Username: butter; Password: fly
# Keyword: decent
'''
Uses Anaconda environment with Pillow for image processing
- Python 3.7, numpy, and Pillow (PIL)
- Run `source activate imgPIL`, `python chall_25.py`
'''
import wave
from PIL import Image
def main():
'''
Hint: imagine how they sound; picture of lake with puzzle pieces overlaid
can you see the waves; name of image is lake1.jpg
Download wave files 1-25: http://www.pythonchallenge.com/pc/hex/lake[N].wav
Bash command (-o option writes to file vs. stdout, #1 refers to seq val
to name the file, -U is proxy-user login info):
curl -U butter:fly -o "./waves_chall_25/lake#1.wav"
"http://www.pythonchallenge.com/pc/hex/lake[1-25].wav"
'''
waves = ['./waves_chall_25/lake{}.wav'.format(i) for i in range(1, 26)]
w = 60 # Challenge image is 640x480, these values by trial and error
h = 60
# Look at wave file properties - all files are same
'''
with wave.open(wave_files[0], 'rb') as tmp:
print('Frames: {}'.format(tmp.getnframes())) # 10800
print('Channels: {}'.format(tmp.getnchannels())) # 1
print('Samp width: {}'.format(tmp.getsampwidth())) # 1
print('Frame rate: {}'.format(tmp.getframerate())) # 9600
bytes_datum = tmp.readframes(tmp.getnframes())
tmpimg = Image.frombytes('RGB', (w, h), bytes_datum)
tmpimg.save('./waves_chall_25/test.jpg')
'''
# Convert each wave file to an image and combine into one
img = Image.new('RGB', (w * 5, h * 5), color=(255, 255, 255))
for i, filename in enumerate(waves):
with wave.open(filename, 'rb') as wavfile:
bytes_data = wavfile.readframes(wavfile.getnframes())
wave_img = Image.frombytes('RGB', (w, h), bytes_data)
img.paste(wave_img, ((i % 5) * w, (i // 5) * h))
img.save('./waves_chall_25/final.jpg')
return 0
if __name__ == '__main__':
main()
| {
"repo_name": "HKuz/PythonChallenge",
"path": "Challenges/chall_25.py",
"copies": "1",
"size": "2083",
"license": "mit",
"hash": 8976935247152338000,
"line_mean": 33.7166666667,
"line_max": 79,
"alpha_frac": 0.618819011,
"autogenerated": false,
"ratio": 3.1898928024502298,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.430871181345023,
"avg_score": null,
"num_lines": null
} |
#!/Applications/anaconda/envs/imgPIL/bin
# Python Challenge - 27
# http://www.pythonchallenge.com/pc/hex/speedboat.html
# http://www.pythonchallenge.com/pc/hex/zigzag.gif
# Username: butter; Password: fly
# Keyword: ../ring/bell.html, repeat, switch
'''
Uses Anaconda environment with Pillow for image processing
- Python 3.7, numpy, and Pillow (PIL)
- Run `source activate imgPIL`, `python chall_27.py`
'''
import bz2
import keyword
from PIL import Image
def main():
'''
Hint: between the tables, picture of rowboat on lake with zigzag drawn on
did you say gif?
oh, and this is NOT a repeat of 14 (spiral matrix)
'''
with Image.open('./zigzag_chall_27/zigzag.gif') as img:
w, h = img.size # (320, 270). Format: GIF; Mode: P (8-bit pixels)
# print(img.getextrema()) # Pixel range: 0-255
palette = img.getpalette()[::3]
# Pallette to data
data = img.getdata() # Seq of flattened pixel values
oddballs = []
expected = -1
i_s = []
for i, p in enumerate(data):
if expected >= 0 and p != expected:
oddballs.append(p)
i_s.append(i)
expected = palette[p]
buzz_data = bz2.decompress(bytearray(oddballs))
# print(buzz_data)
# Create image with oddball indices
'''
new = Image.new('RGB', (w, h))
new_data = [(0, 0, 0)] * len(data)
for i in i_s:
new_data[i] = (255, 0, 0)
new.putdata(new_data)
new.save('./zigzag_chall_27/unexpected.jpg')
'''
not_kws = []
for word in (buzz_data.decode('utf-8')).split():
if word not in not_kws and not keyword.iskeyword(word):
not_kws.append(word)
# ['../ring/bell.html', 'repeat', 'exec', 'print', 'switch']
return 0
if __name__ == '__main__':
main()
| {
"repo_name": "HKuz/PythonChallenge",
"path": "Challenges/chall_27.py",
"copies": "1",
"size": "1899",
"license": "mit",
"hash": 8282176164191294000,
"line_mean": 29.6290322581,
"line_max": 77,
"alpha_frac": 0.5681937862,
"autogenerated": false,
"ratio": 3.246153846153846,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9314347632353847,
"avg_score": 0,
"num_lines": 62
} |
#!/Applications/anaconda/envs/imgPIL/bin
# Python Challenge - 28
# http://www.pythonchallenge.com/pc/ring/bell.html
# http://www.pythonchallenge.com/pc/ring/green.html
# Username: repeat; Password: switch
# Keyword: guido
'''
Uses Anaconda environment with Pillow for image processing
- Python 3.7, numpy, and Pillow (PIL)
- Run `source activate imgPIL`, `python chall_28.py`
'''
from PIL import Image
def main():
'''
Hint: many pairs ring-ring, picture of waterfall
RING-RING-RING say it out loud
yes! green!
'''
with Image.open('./ring_chall_28/bell.png') as bell:
w, h = bell.size # (640, 480). Format: PNG; Mode: RGB
# histo = bell.histogram()
# print(histo[256:513]) # green pixels
data = bell.getdata()
green = [g for r, g, b in data] # 307200 pixels
# Get difference between green values next to each other
tmp = [abs(green[i] - green[i + 1]) for i in range(0, len(green), 2)]
# print(bytes(tmp).decode())
# Remove the *'s (42)
print(bytes(filter(lambda x: x != 42, tmp)).decode())
# whodunnit().split()[0] ?
return 0
if __name__ == '__main__':
main()
| {
"repo_name": "HKuz/PythonChallenge",
"path": "Challenges/chall_28.py",
"copies": "1",
"size": "1199",
"license": "mit",
"hash": -1396793311569825000,
"line_mean": 27.5476190476,
"line_max": 77,
"alpha_frac": 0.6055045872,
"autogenerated": false,
"ratio": 3.214477211796247,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.43199817989962463,
"avg_score": null,
"num_lines": null
} |
#!/Applications/anaconda/envs/imgPIL/bin
# Python Challenge - 30
# http://www.pythonchallenge.com/pc/ring/yankeedoodle.html
# http://www.pythonchallenge.com/pc/ring/yankeedoodle.csv
# Username: repeat; Password: switch
# Keyword: grandpa
'''
Uses Anaconda environment with Pillow for image processing
- Python 3.7, numpy, and Pillow (PIL)
- Run `source activate imgPIL`, `python chall_30.py`
'''
from PIL import Image
def main():
'''
Hint: relax you are on 30; picture of beach scene
The picture is only meant to help you relax
while you look at the csv file
'''
with open('./relax_chall_30/yankeedoodle.csv') as csvfile:
nums = [n.strip() for n in csvfile.read().split(',')]
len_n = len(nums) # 7367
# float_nums = [float(s) for s in nums]
# print('Max: {}, Min: {}'.format(max(float_nums), min(float_nums)))
w, h = [n for n in range(2, int(len_n / 2) + 1) if
len_n % n == 0] # [53, 139]
img = Image.new('P', (w, h))
pixels = [int(float(n) * 256) for n in nums]
img.putdata(pixels)
img = img.transpose(Image.FLIP_LEFT_RIGHT)
img = img.transpose(Image.ROTATE_90)
img.save('./relax_chall_30/result.png')
# n = str(x[i])[5] + str(x[i+1])[5] + str(x[i+2])[6]
msg = [int(nums[i][5] + nums[i + 1][5] + nums[i + 2][6])
for i in range(0, len_n - 3, 3)]
print(bytes(msg).decode())
'''
So, you found the hidden message.
There is lots of room here for a long message, but we only need very
little space to say "look at grandpa", so the rest is just garbage.
'''
return 0
if __name__ == '__main__':
main()
| {
"repo_name": "HKuz/PythonChallenge",
"path": "Challenges/chall_30.py",
"copies": "1",
"size": "1730",
"license": "mit",
"hash": 4410425752049177000,
"line_mean": 31.641509434,
"line_max": 76,
"alpha_frac": 0.576300578,
"autogenerated": false,
"ratio": 3.0948121645796065,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4171112742579607,
"avg_score": null,
"num_lines": null
} |
#!/Applications/anaconda/envs/imgPIL/bin
# Python Challenge - 31
# http://www.pythonchallenge.com/pc/ring/grandpa.html
# http://www.pythonchallenge.com/pc/rock/grandpa.html
# Username: repeat; Password: switch
# Keyword: kohsamui, thailand; arecibo
'''
Uses Anaconda environment with Pillow for image processing
- Python 3.7, numpy, and Pillow (PIL)
- Run `source activate imgPIL`, `python chall_31.py`
'''
from PIL import Image
def main():
'''
Hint: where am I? Picture of rock near a beach
short break, this ***REALLY*** has nothing to do with Python
Login required: island: country -> search for Grandpa rock
Next page:
UFO's ?
That was too easy. You are still on 31...
Window element with iterations attribute of 128
'''
left = 0.34
top = 0.57
width = 0.036 # x-axis = reals
height = 0.027 # y-axis = imaginaries
max_iter = 128
with Image.open('./mandelbrot_chall_31/mandelbrot_copy.gif') as mb:
w, h = mb.size
print('W: {}, H: {}'.format(w, h))
r_step = width / w
i_step = height / h
new_data = []
for y in range(h - 1, -1, -1):
for x in range(w):
c = complex(left + x * r_step, top + y * i_step)
z = complex(0, 0)
# f_c(z) = z^2 + c for f_c(0), f_c(f_c(0)), etc.
for n_iter in range(max_iter):
z = z**2 + c
if abs(z) > 2:
break
new_data.append(n_iter)
new_mb = mb.copy()
new_mb.putdata(new_data)
new_mb.save('./mandelbrot_chall_31/new_mandelbrot.gif')
# Find differences
changes = [(a - b)for a, b in zip(mb.getdata(), new_mb.getdata())
if a != b] # Length: 1679
print(changes)
factors = [n for n in range(2, int(len(changes) / 2) + 1) if
len(changes) % n == 0] # [23, 73]
# Create image from the differences
new_new_mb = Image.new('1', tuple(factors))
new_new_data = [1 if p >= 16 else 0 for p in changes]
new_new_mb.putdata(new_new_data)
new_new_mb.save('./mandelbrot_chall_31/message.gif')
return 0
if __name__ == '__main__':
main()
| {
"repo_name": "HKuz/PythonChallenge",
"path": "Challenges/chall_31.py",
"copies": "1",
"size": "2272",
"license": "mit",
"hash": -2450235680595348000,
"line_mean": 30.5555555556,
"line_max": 73,
"alpha_frac": 0.542693662,
"autogenerated": false,
"ratio": 3.2,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9242693662,
"avg_score": 0,
"num_lines": 72
} |
#!/Applications/anaconda/envs/imgPIL/bin
# Python Challenge - 33
# http://www.pythonchallenge.com/pc/rock/beer.html
# http://www.pythonchallenge.com/pc/rock/beer2.png
# Username: kohsamui; Password: thailand
# Keyword: gremlins
'''
Uses Anaconda environment with Pillow for image processing
- Python 3.7, numpy, and Pillow (PIL)
- Run `source activate imgPIL`, `python chall_33.py`
'''
import math
import numpy as np
from PIL import Image
def main():
'''
Hint: 33 bottles of beer on the wall
If you are blinded by the light,
remove its power, with its might.
Then from the ashes, fair and square,
another truth at you will glare.
Image is beer1.jpg -> beer2.jpg says "no, png"
New page has greyscale image
'''
path = './light_chall_33/beer2.png'
with Image.open(path) as beer:
w, h = beer.size # 138, 138; mode: L; format: PNG
mode = beer.mode
histo = np.array([(pix, count) for pix, count in
enumerate(beer.histogram()) if count != 0])
# print(histo) # Length is 66, pixels come in consecutive pairs
# Find where pixel counts still make a square -> every other index
# sqrts = [np.sqrt(p) for p in np.cumsum(histo[:, 1])]
# print(sqrts)
data = list(beer.getdata())
for i in range(len(histo) - 2, 0, -2):
# Start at index 64, jump back by 2's, remove 2 brightest pixels,
# reshape image, save as new image
data = [x for x in data if x < histo[i][0]]
side = int(math.sqrt(len(data)))
next_img = Image.new(mode, (side, side))
next_img.putdata(data)
next_img.save('./light_chall_33/res_{}.png'
.format(int((64 - i) / 2)))
# Result:
# xxxxxxxxxxxvrnegarwinemoldinosl_
# 'Squared' letters: gremlins
return 0
if __name__ == '__main__':
main()
| {
"repo_name": "HKuz/PythonChallenge",
"path": "Challenges/chall_33.py",
"copies": "1",
"size": "1955",
"license": "mit",
"hash": -2832526241788979000,
"line_mean": 31.5833333333,
"line_max": 77,
"alpha_frac": 0.5887468031,
"autogenerated": false,
"ratio": 3.33617747440273,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.942492427750273,
"avg_score": 0,
"num_lines": 60
} |
#!/Applications/anaconda/envs/imgPIL/bin
# Python Challenge - 7
# http://www.pythonchallenge.com/pc/def/hockey.html
# http://www.pythonchallenge.com/pc/def/oxygen.html
# Keyword: integrity
'''
Uses Anaconda environment with Pillow for image processing
- Python 3.7, numpy, and Pillow (PIL)
- Run `source activate imgPIL`, `python chall_07.py`
'''
import PIL
import PIL.Image
def main():
'''
Hint: photo with a stripe of grayscale boxes
http://www.pythonchallenge.com/pc/def/oxygen.png
'''
photo = PIL.Image.open('./Resources/oxygen.png')
pixel_access = photo.load()
# print(photo.size) # (629, 95)
width = photo.size[0]
height = photo.size[1]
# See how wide in pixels each gray box is - 1st is 5, rest 7
# for i in range(50):
# print(pixel_access[i, height/2])
# Create list comprehension of gray values
first = [pixel_access[0, height / 2][0]]
rest = [pixel_access[
5 + 7 * i, height / 2][0] for i in range((width - 5) // 7)
]
grays = first + rest
message = ''.join([chr(item) for item in grays])
print(message)
# Get second message
start_index = message.find('[')
end_index = message.find(']')
new_set = message[start_index + 1: end_index].split(',')
final_message = ''.join([chr(int(item)) for item in new_set])
print(final_message)
return 0
if __name__ == '__main__':
main()
| {
"repo_name": "HKuz/PythonChallenge",
"path": "Challenges/chall_07.py",
"copies": "1",
"size": "1417",
"license": "mit",
"hash": -4766237816565861000,
"line_mean": 26.25,
"line_max": 66,
"alpha_frac": 0.6224417784,
"autogenerated": false,
"ratio": 3.2058823529411766,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9328324131341177,
"avg_score": 0,
"num_lines": 52
} |
#!/Applications/anaconda/envs/imgPIL/bin
# Python Challenge - 9
# http://www.pythonchallenge.com/pc/return/good.html
# Username: huge; Password: file
# Keywords: cow, bull
'''
Uses Anaconda environment with Pillow for image processing
- Python 3.7, numpy, and Pillow (PIL)
- Run `source activate imgPIL`, `python chall_09.py`
'''
# import PIL
from PIL import Image, ImageDraw
def main():
'''
Hint: Connect the dots
first+second=?
first:
146, 399, 163, 403, 170, 393, 169, 391, 166, 386, 170, 381, 170, 371, 170,
355, 169, 346, 167, 335, 170, 329, 170, 320, 170,
310, 171, 301, 173, 290, 178, 289, 182, 287, 188, 286, 190, 286, 192, 291,
194, 296, 195, 305, 194, 307, 191, 312, 190, 316,
190, 321, 192, 331, 193, 338, 196, 341, 197, 346, 199, 352, 198, 360, 197,
366, 197, 373, 196, 380, 197, 383, 196, 387, 192,
389, 191, 392, 190, 396, 189, 400, 194, 401, 201, 402, 208, 403, 213, 402,
216, 401, 219, 397, 219, 393, 216, 390, 215, 385,
215, 379, 213, 373, 213, 365, 212, 360, 210, 353, 210, 347, 212, 338, 213,
329, 214, 319, 215, 311, 215, 306, 216, 296, 218,
290, 221, 283, 225, 282, 233, 284, 238, 287, 243, 290, 250, 291, 255, 294,
261, 293, 265, 291, 271, 291, 273, 289, 278, 287,
279, 285, 281, 280, 284, 278, 284, 276, 287, 277, 289, 283, 291, 286, 294,
291, 296, 295, 299, 300, 301, 304, 304, 320, 305,
327, 306, 332, 307, 341, 306, 349, 303, 354, 301, 364, 301, 371, 297, 375,
292, 384, 291, 386, 302, 393, 324, 391, 333, 387,
328, 375, 329, 367, 329, 353, 330, 341, 331, 328, 336, 319, 338, 310, 341,
304, 341, 285, 341, 278, 343, 269, 344, 262, 346,
259, 346, 251, 349, 259, 349, 264, 349, 273, 349, 280, 349, 288, 349, 295,
349, 298, 354, 293, 356, 286, 354, 279, 352, 268,
352, 257, 351, 249, 350, 234, 351, 211, 352, 197, 354, 185, 353, 171, 351,
154, 348, 147, 342, 137, 339, 132, 330, 122, 327,
120, 314, 116, 304, 117, 293, 118, 284, 118, 281, 122, 275, 128, 265, 129,
257, 131, 244, 133, 239, 134, 228, 136, 221, 137,
214, 138, 209, 135, 201, 132, 192, 130, 184, 131, 175, 129, 170, 131, 159,
134, 157, 134, 160, 130, 170, 125, 176, 114, 176,
102, 173, 103, 172, 108, 171, 111, 163, 115, 156, 116, 149, 117, 142, 116,
136, 115, 129, 115, 124, 115, 120, 115, 115, 117,
113, 120, 109, 122, 102, 122, 100, 121, 95, 121, 89, 115, 87, 110, 82,
109, 84, 118, 89, 123, 93, 129, 100, 130, 108, 132, 110,
133, 110, 136, 107, 138, 105, 140, 95, 138, 86, 141, 79, 149, 77, 155, 81,
162, 90, 165, 97, 167, 99, 171, 109, 171, 107, 161,
111, 156, 113, 170, 115, 185, 118, 208, 117, 223, 121, 239, 128, 251, 133,
259, 136, 266, 139, 276, 143, 290, 148, 310, 151,
332, 155, 348, 156, 353, 153, 366, 149, 379, 147, 394, 146, 399
second:
156, 141, 165, 135, 169, 131, 176, 130, 187, 134, 191, 140, 191, 146, 186,
150, 179, 155, 175, 157, 168, 157, 163, 157, 159, 157, 158, 164, 159, 175,
159, 181, 157, 191, 154, 197, 153, 205, 153, 210, 152, 212, 147, 215, 146,
218, 143, 220, 132, 220, 125, 217, 119, 209, 116, 196, 115, 185, 114, 172,
114, 167, 112, 161, 109, 165, 107, 170, 99, 171, 97, 167, 89, 164, 81, 162,
77, 155, 81, 148, 87, 140, 96, 138, 105, 141, 110, 136, 111, 126, 113,
129, 118, 117, 128, 114, 137, 115, 146, 114, 155, 115, 158, 121, 157, 128,
156, 134, 157, 136, 156, 136
'''
first = (
146, 399, 163, 403, 170, 393, 169, 391, 166, 386, 170, 381, 170, 371,
170, 355, 169, 346, 167, 335, 170, 329, 170, 320, 170, 310, 171, 301,
173, 290, 178, 289, 182, 287, 188, 286, 190, 286, 192, 291, 194, 296,
195, 305, 194, 307, 191, 312, 190, 316, 190, 321, 192, 331, 193, 338,
196, 341, 197, 346, 199, 352, 198, 360, 197, 366, 197, 373, 196, 380,
197, 383, 196, 387, 192, 389, 191, 392, 190, 396, 189, 400, 194, 401,
201, 402, 208, 403, 213, 402, 216, 401, 219, 397, 219, 393, 216, 390,
215, 385, 215, 379, 213, 373, 213, 365, 212, 360, 210, 353, 210, 347,
212, 338, 213, 329, 214, 319, 215, 311, 215, 306, 216, 296, 218, 290,
221, 283, 225, 282, 233, 284, 238, 287, 243, 290, 250, 291, 255, 294,
261, 293, 265, 291, 271, 291, 273, 289, 278, 287, 279, 285, 281, 280,
284, 278, 284, 276, 287, 277, 289, 283, 291, 286, 294, 291, 296, 295,
299, 300, 301, 304, 304, 320, 305, 327, 306, 332, 307, 341, 306, 349,
303, 354, 301, 364, 301, 371, 297, 375, 292, 384, 291, 386, 302, 393,
324, 391, 333, 387, 328, 375, 329, 367, 329, 353, 330, 341, 331, 328,
336, 319, 338, 310, 341, 304, 341, 285, 341, 278, 343, 269, 344, 262,
346, 259, 346, 251, 349, 259, 349, 264, 349, 273, 349, 280, 349, 288,
349, 295, 349, 298, 354, 293, 356, 286, 354, 279, 352, 268, 352, 257,
351, 249, 350, 234, 351, 211, 352, 197, 354, 185, 353, 171, 351, 154,
348, 147, 342, 137, 339, 132, 330, 122, 327, 120, 314, 116, 304, 117,
293, 118, 284, 118, 281, 122, 275, 128, 265, 129, 257, 131, 244, 133,
239, 134, 228, 136, 221, 137, 214, 138, 209, 135, 201, 132, 192, 130,
184, 131, 175, 129, 170, 131, 159, 134, 157, 134, 160, 130, 170, 125,
176, 114, 176, 102, 173, 103, 172, 108, 171, 111, 163, 115, 156, 116,
149, 117, 142, 116, 136, 115, 129, 115, 124, 115, 120, 115, 115, 117,
113, 120, 109, 122, 102, 122, 100, 121, 95, 121, 89, 115, 87, 110, 82,
109, 84, 118, 89, 123, 93, 129, 100, 130, 108, 132, 110, 133, 110,
136, 107, 138, 105, 140, 95, 138, 86, 141, 79, 149, 77, 155, 81, 162,
90, 165, 97, 167, 99, 171, 109, 171, 107, 161, 111, 156, 113, 170, 115,
185, 118, 208, 117, 223, 121, 239, 128, 251, 133, 259, 136, 266, 139,
276, 143, 290, 148, 310, 151, 332, 155, 348, 156, 353, 153, 366, 149,
379, 147, 394, 146, 399
)
second = (
156, 141, 165, 135, 169, 131, 176, 130, 187, 134, 191, 140, 191, 146,
186, 150, 179, 155, 175, 157, 168, 157, 163, 157, 159, 157, 158, 164,
159, 175, 159, 181, 157, 191, 154, 197, 153, 205, 153, 210, 152, 212,
147, 215, 146, 218, 143, 220, 132, 220, 125, 217, 119, 209, 116, 196,
115, 185, 114, 172, 114, 167, 112, 161, 109, 165, 107, 170, 99, 171,
97, 167, 89, 164, 81, 162, 77, 155, 81, 148, 87, 140, 96, 138, 105,
141, 110, 136, 111, 126, 113, 129, 118, 117, 128, 114, 137, 115, 146,
114, 155, 115, 158, 121, 157, 128, 156, 134, 157, 136, 156, 136
)
photo_path = './connect-dots_chall_09/good.jpg'
pic = Image.open(photo_path)
size = pic.size
img = Image.new('RGB', size)
draw = ImageDraw.Draw(img)
draw.line(first)
draw.line(second)
del draw
img.save('./connect-dots_chall_09/final.jpg')
return 0
if __name__ == '__main__':
main()
| {
"repo_name": "HKuz/PythonChallenge",
"path": "Challenges/chall_09.py",
"copies": "1",
"size": "6844",
"license": "mit",
"hash": -6436411643647133000,
"line_mean": 50.8484848485,
"line_max": 79,
"alpha_frac": 0.5562536528,
"autogenerated": false,
"ratio": 2.2395287958115184,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.32957824486115184,
"avg_score": null,
"num_lines": null
} |
#!/Applications/anaconda/envs/Python3/bin
def main():
'''Examples Using Exceptions in Python'''
# Python exceptions: http://docs.python.org/library/exceptions.html
# Catch exceptions with try
try:
f = open('noFile.txt')
except IOError as e:
print('Oh no, IOError:', e)
except ValueError as e:
print('Oh no, ValueError:', e)
else:
# Can put the else code in the try part, too
# Runs when try body completes with no exceptions
for line in f:
print(line, end='')
finally:
# Always executed after try, except, and else even if exceptions raised
# or hit break/continue/return statement. Good for clean-up
# f.close()
pass
# Exceptions in a while loop
while True:
try:
n = input('Please enter an integer: ')
n = int(n)
break
except ValueError:
print('Input not an integer, please try again: ')
print('Correct input!')
# Raise own exceptions
try:
for line in readDocFile('noFile.txt'):
print(line.strip())
except ValueError as e:
print('Bad filename:', e)
testBool = True
if testBool:
raise CustomException('NOOOOOO!')
# Assert that input is correct
grades = [79, 92, 84]
assert not len(grades) == 0, 'no grades data'
return 0
def readDocFile(filename):
if filename.endswith('.doc'):
f = open(filename)
return f.readlines()
else:
raise ValueError('Filename must end with .doc')
class CustomException(Exception):
def __init__(self, error):
super(Exception, self).__init__(error)
print(error)
if __name__ == '__main__':
main()
| {
"repo_name": "HKuz/Test_Code",
"path": "exceptions.py",
"copies": "1",
"size": "1762",
"license": "mit",
"hash": 1331444594956361700,
"line_mean": 24.1714285714,
"line_max": 79,
"alpha_frac": 0.5828603859,
"autogenerated": false,
"ratio": 4.16548463356974,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.524834501946974,
"avg_score": null,
"num_lines": null
} |
#!/Applications/anaconda/envs/Python3/bin
import re
def main():
'''Examples Using Regular Expressions in Python'''
printRomanNumerals('sonnets.txt')
replaceWord('sonnets.txt', 'beauty', 'vomit')
# replaceWord('sonnets.txt', 'love', 'shart')
return 0
def printRomanNumerals(filename):
'''Reads filename data and prints Roman Numerals followed by .'''
fh = open(filename, 'r')
# Use raw string for backslash, other special characters
# To access capturing groups: match.group(0) = full match
# match.group(1) first captured group, etc.
pattern = re.compile(r'[IVXLCDM]+\.')
for line in fh:
match = re.search(pattern, line)
if match:
print(match.group())
fh.close()
return 0
def replaceWord(filename, original, new):
'''Prints any line with original word in it replaced with new word'''
print('\nTime to replace the word {} with {} and print the '
'improved line.\n'.format(original, new))
fh = open(filename, 'r')
pattern = re.compile(original)
for line in fh:
if re.search(pattern, line):
print(pattern.sub(new, line), end='')
# print(re.sub(pattern, new, line), end='')
fh.close()
return 0
if __name__ == '__main__':
main()
| {
"repo_name": "HKuz/Test_Code",
"path": "Regex/regex.py",
"copies": "1",
"size": "1298",
"license": "mit",
"hash": -6238065345010001000,
"line_mean": 24.4509803922,
"line_max": 73,
"alpha_frac": 0.6171032357,
"autogenerated": false,
"ratio": 3.615598885793872,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4732702121493872,
"avg_score": null,
"num_lines": null
} |
#!/Applications/anaconda/envs/Python3/bin
import sys
def main():
'''Python 3 Quick Start Code Examples'''
# Get input from user and display it
# feels = input("On a scale of 1-10, how do you feel? ")
# print("You selected: {}".format(feels))
# Python Data Types
integer = 42
floater = 3.14
stringer = 'Hello, World!'
noner = None # singleton value, check: if var is None
tupler = (1, 2, 3)
lister = [1, 2, 3]
dicter = dict(
one = 1,
two = 2,
three = 3
)
boolTrue = True
boolFalse = False
# Conditionals
print("=========== Conditionals ==========")
num1, num2 = 0, 1
if (num1 > num2):
# print("{} is greater than {}".format(num1, num2))
pass
elif (num1 < num2):
# print("{} is less than {}".format(num1, num2))
pass
else:
# print("{} is equal to {}".format(num1, num2))
pass
# Python version of ternary operator
bigger = num1 if num1 >= num2 else num2
smaller = num1 if num1 < num2 else num2
# print("Conditional statment says {} is greater than or equal to {}".format(bigger, smaller))
# Python version of a switch statement
choices = dict(
a = 'First',
b = 'Second',
c = 'Third',
d = 'Fourth',
e = 'Fifth'
)
opt1 = 'c'
opt2 = 'f'
default = 'Option not found'
# print("Python 'switch' statment using a dict: {}".format(choices))
# print("Option 1 was {} and returned: {}".format(opt1, choices.get(opt1, default)))
# print("Option 2 was {} and returned: {}".format(opt2, choices.get(opt2, default)))
print("==============================")
# Loops
print("=========== Loops ==========")
print("Fibonacci series up to 100:")
a, b = 0, 1
while b < 100:
print(b, end=" ")
a, b = b, a + b
print()
# print("For loop printing parts of {}".format(stringer))
for letter in stringer:
# Don't print the vowels
if letter in 'aeiouAEIOU':
continue
# Stop looping at punctuation
if letter in '!@#$%^&*.,?;:-_+=|':
break
# print(letter, end=" ")
# print()
print("==============================")
# Get an index using a for loop with enumerate()
# for index, letter in enumerate(stringer):
# print("Index: {} is letter: {}".format(index, letter))
# List comprehensions
print("=========== List Comprehensions ==========")
# Create a new list - [expression for variable in list]
listOne = [0, 1, 2, 3, 4, 5]
listSquares = [x*x for x in listOne]
print("List comprehension: {}".format(listSquares))
# Filter a list - [expression for variable in list if condition]
listOdd = [x for x in listSquares if x % 2 == 1]
print("Filtered list comprehension: {}".format(listOdd))
# Dictionary comprehensions
print("=========== Dict Comprehensions ==========")
dictComp = {chr(64+x): x for x in range(1, 27)}
print("Dict comprehension: {}".format(dictComp))
# Set comprehension
print("=========== Set Comprehensions ==========")
setComp = {x**5 for x in range(2,8)}
print("Set comprehension: {}".format(setComp))
print("==============================")
# Check if a type is an iterable
print("=========== Is X Type Interable? ==========")
print("Is a string an iterable? {}".format(hasattr(str, '__iter__')))
print("Is a Boolean an iterable? {}".format(hasattr(bool, '__iter__')))
print("Is a list an iterable? {}".format(hasattr(list, '__iter__')))
print("Is a set an iterable? {}".format(hasattr(set, '__iter__')))
print("Is an int an iterable? {}".format(hasattr(int, '__iter__')))
print("==============================")
# Generator Expressions
# Similar to list comprehension, less space in memory
print("=========== Generator Expressions ==========")
genExp = (x**5 for x in range(2,8))
listComp = [x**5 for x in range(2,8)]
print("Type of a generator expression: {}".format(type(genExp)))
print("Actual generator expression: {}".format(genExp))
print("Size of generator expression: {}".format(sys.getsizeof(genExp)))
print("Size of same list comprehension: {}".format(sys.getsizeof(listComp)))
print("==============================")
return 0
if __name__ == '__main__':
main()
| {
"repo_name": "HKuz/Test_Code",
"path": "setup.py",
"copies": "1",
"size": "4412",
"license": "mit",
"hash": 3006655741803645000,
"line_mean": 32.1729323308,
"line_max": 98,
"alpha_frac": 0.5410244787,
"autogenerated": false,
"ratio": 3.75809199318569,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.479911647188569,
"avg_score": null,
"num_lines": null
} |
# Application Scope Bucket
from datetime import datetime
import json
from kii import exceptions as exc, results as rs
from kii.data.clauses import (
Clause,
AllClause,
AndClause,
)
from kii.helpers import BucketsHelper
# Manage Buckets
class ManageBuckets(BucketsHelper):
def __init__(self, scope, bucket_id):
super().__init__(scope)
self.bucket_id = bucket_id
@property
def bucket_id(self):
return self._bucket_id
@bucket_id.setter
def bucket_id(self, bucket_id):
self._bucket_id = bucket_id
@property
def api_path(self):
return '/apps/{appID}/buckets/{bucketID}'.format(
appID=self.api.app_id,
bucketID=self.bucket_id)
@property
def headers(self):
headers = super().headers
if self.access_token:
headers['Authorization'] = self.authorization
return headers
class RetrieveABucket(ManageBuckets):
method = 'GET'
result_container = rs.BucketResult
class DeleteABucket(ManageBuckets):
method = 'DELETE'
result_container = rs.BaseResult
# Manage Objects
class CreateAnObject(BucketsHelper):
method = 'POST'
result_container = rs.CreateResult
def __init__(self, scope, data):
super().__init__(scope)
self.data = data
@property
def api_path(self):
return '/apps/{appID}/buckets/{bucketID}/objects'.format(
appID=self.api.app_id,
bucketID=self.bucket_id)
@property
def headers(self):
headers = super().headers
headers['Content-Type'] = 'application/json'
if self.access_token:
headers['Authorization'] = self.authorization
return headers
def request(self):
return super().request(json=self.data)
class RetrieveAnObject(BucketsHelper):
method = 'GET'
result_container = rs.ObjectResult
def __init__(self, scope, object_id):
super().__init__(scope)
self.object_id = object_id
@property
def api_path(self):
return '/apps/{appID}/buckets/{bucketID}/objects/{objectID}'.format(
appID=self.api.app_id,
bucketID=self.bucket_id,
objectID=self.object_id)
@property
def headers(self):
headers = super().headers
headers['Content-Type'] = 'application/json'
return headers
class FullyUpdateAnObject(BucketsHelper):
method = 'PUT'
result_container = rs.UpdateResult
def __init__(self, scope, object_id, data, *, if_match=None, if_none_match=None):
super().__init__(scope)
self.object_id = object_id
self.data = data
self.if_match = if_match
self.if_none_match = if_none_match
@property
def api_path(self):
return '/apps/{appID}/buckets/{bucketID}/objects/{objectID}'.format(
appID=self.api.app_id,
bucketID=self.bucket_id,
objectID=self.object_id)
@property
def headers(self):
headers = super().headers
headers['Content-Type'] = 'application/json'
if self.access_token:
headers['Authorization'] = self.authorization
if self.if_match:
headers['If-Match'] = self.if_match
if self.if_none_match:
headers['If-None-Match'] = self.if_none_match
return headers
def request(self):
return super().request(json=self.data)
class CreateANewObjectWithAnID(FullyUpdateAnObject):
"""
synonym of FullyUpdateAnObject
"""
class PartiallyUpdateAnObject(FullyUpdateAnObject):
method = 'POST'
@property
def headers(self):
headers = super().headers
headers['X-HTTP-Method-Override'] = 'PATCH'
headers['Content-Type'] = 'application/json'
if self.access_token:
headers['Authorization'] = self.authorization
if self.if_match:
headers['If-Match'] = self.if_match
if self.if_none_match:
headers['If-None-Match'] = self.if_none_match
return headers
class DeleteAnObject(BucketsHelper):
method = 'DELETE'
result_container = rs.DeleteResult
def __init__(self, scope, object_id, *, if_match=None, if_none_match=None):
super().__init__(scope)
self.object_id = object_id
self.if_match = if_match
self.if_none_match = if_none_match
@property
def api_path(self):
return '/apps/{appID}/buckets/{bucketID}/objects/{objectID}'.format(
appID=self.api.app_id,
bucketID=self.bucket_id,
objectID=self.object_id)
@property
def headers(self):
headers = super().headers
headers['Content-Type'] = 'application/json'
if self.access_token:
headers['Authorization'] = self.authorization
if self.if_match:
headers['If-Match'] = self.if_match
if self.if_none_match:
headers['If-None-Match'] = self.if_none_match
return headers
class QueryForObjects(BucketsHelper):
method = 'POST'
result_container = rs.QueryResult
def __init__(self, scope,
clause=None,
*,
order_by=None,
descending=None,
pagination_key=None,
best_effort_limit=None,
limit=None):
super().__init__(scope)
self.internal = False
if clause is None:
clause = AllClause()
self.clause = clause
self._order_by = order_by
self._descending = descending
self._pagination_key = pagination_key
self._best_effort_limit = best_effort_limit
self._limit = limit
self._offset = 0
self._aggregations = []
@property
def api_path(self):
return '/apps/{appID}/buckets/{bucketID}/query'.format(
appID=self.api.app_id,
bucketID=self.bucket_id)
@property
def headers(self):
headers = super().headers
headers['Content-Type'] = 'application/vnd.kii.QueryRequest+json'
return headers
@property
def clause(self):
return self._clause
@clause.setter
def clause(self, clause):
if not isinstance(clause, Clause):
raise exc.KiiInvalidClauseError
self._clause = clause
def clone(self):
instance = self.__class__(self.scope, self.clause)
instance._order_by = self._order_by
instance._descending = self._descending
instance._pagination_key = self._pagination_key
instance._best_effort_limit = self._best_effort_limit
instance._limit = self._limit
instance._offset = self._offset
return instance
def filter(self, *clauses):
instance = self.clone()
instance.clause = AndClause(instance.clause, *clauses)
return instance
def request(self):
return super().request(json=self._assemble())
def bucket_query(self):
query = {}
query['clause'] = self.clause.query()
if self._order_by is not None:
query['orderBy'] = self._order_by
if self._descending is not None:
query['descending'] = self._descending
if self._aggregations:
query['aggregations'] = self._aggregations
return query
def _assemble(self):
params = {}
query = self.bucket_query()
if query:
params['bucketQuery'] = query
if self._pagination_key:
params['paginationKey'] = self._pagination_key
if self._limit and self._best_effort_limit is None:
self._best_effort_limit = self._limit
if self._best_effort_limit:
params['bestEffortLimit'] = self._best_effort_limit + self._offset
return params
def all(self):
return self.request()
def count(self):
self.result_container = rs.QueryCountResult
self._aggregations = [
{
"type": "COUNT",
"putAggregationInto": "count_field"
}
]
result = self.request()
return result.count
def first(self):
results = self.request()
try:
return results[0]
except IndexError:
return None
def one(self):
results = self.request()
if len(results) > 1:
raise exc.KiiMultipleResultsFoundError
try:
return results[0]
except IndexError as e:
raise exc.KiiObjectNotFoundError from e
def offset(self, offset):
self._offset = offset
return self
def step(self, step):
self._step = step
return self
def best_effort_limit(self, best_effort_limit):
self._best_effort_limit = best_effort_limit
return self
def limit(self, limit):
self._limit = limit
return self
def order_by(self, key, descending=True):
self._order_by = key
self._descending = descending
return self
def pagination_key(self, pagination_key):
self._pagination_key = pagination_key
return self
def __str__(self):
headers = json.dumps(self.headers, ensure_ascii=False, indent=4, sort_keys=True)
query = json.dumps(self._assemble(), ensure_ascii=False, indent=4, sort_keys=True)
return '''\
[{method}] {url}
Headers:
{headers}
Query Request:
{query}'''.format(method=self.method,
url=self.url,
headers=headers,
query=query)
class RetrieveAnObjectBody(BucketsHelper):
method = 'GET'
result_container = rs.BodyResult
def __init__(self, scope, object_id, *,
if_match=None,
range=None):
super().__init__(scope)
self.object_id = object_id
self.if_match = if_match
# range is tuple or list. e.g.) [begin, end]
if range is not None and not isinstance(range, (list, tuple)):
raise exc.KiiInvalidTypeError
self.range = range
@property
def api_path(self):
return '/apps/{appID}/buckets/{bucketID}/objects/{objectID}/body'.format(
appID=self.api.app_id,
bucketID=self.bucket_id,
objectID=self.object_id)
@property
def headers(self):
headers = super().headers
headers['Accept'] = '*/*'
if self.access_token:
headers['Authorization'] = self.authorization
if self.if_match:
headers['If-Match'] = self.if_match
if self.range:
headers['Range'] = 'bytes={0}-{1}'.format(*self.range)
return headers
class AddOrReplaceAnObjectBody(BucketsHelper):
method = 'PUT'
result_container = rs.BaseResult
def __init__(self, scope, object_id, body, content_type, *,
if_match=None, if_none_match=None):
super().__init__(scope)
self.object_id = object_id
self.body = body
self.content_type = content_type
self.if_match = if_match
self.if_none_match = if_none_match
@property
def api_path(self):
return '/apps/{appID}/buckets/{bucketID}/objects/{objectID}/body'.format(
appID=self.api.app_id,
bucketID=self.bucket_id,
objectID=self.object_id)
@property
def headers(self):
headers = super().headers
headers['Content-Type'] = self.content_type
if self.access_token:
headers['Authorization'] = self.authorization
if self.if_match:
headers['If-Match'] = self.if_match
if self.if_none_match:
headers['If-None-Match'] = self.if_none_match
return headers
def request(self):
return super().request(data=self.body)
class VerifyTheObjectBodyExistence(BucketsHelper):
method = 'HEAD'
result_container = rs.ObjectResult
def __init__(self, scope, object_id):
super().__init__(scope)
self.object_id = object_id
@property
def api_path(self):
return '/apps/{appID}/buckets/{bucketID}/objects/{objectID}/body'.format(
appID=self.api.app_id,
bucketID=self.bucket_id,
objectID=self.object_id)
@property
def headers(self):
headers = super().headers
if self.access_token:
headers['Authorization'] = self.authorization
return headers
class DeleteAnObjectBody(BucketsHelper):
method = 'DELETE'
result_container = rs.ObjectResult
def __init__(self, scope, object_id):
super().__init__(scope)
self.object_id = object_id
@property
def api_path(self):
return '/apps/{appID}/buckets/{bucketID}/objects/{objectID}/body'.format(
appID=self.api.app_id,
bucketID=self.bucket_id,
objectID=self.object_id)
@property
def headers(self):
headers = super().headers
if self.access_token:
headers['Authorization'] = self.authorization
return headers
class PublishAnObjectBody(BucketsHelper):
method = 'POST'
result_container = rs.PublishBodyResult
def __init__(self, scope, object_id, *,
expires_at=None, expires_in=None):
"""
expires_at: The date in Unix epoch in milliseconds
when the publication URL should expire
expires_in: The period in seconds the publication URL
has to be available, after that it will expire
"""
super().__init__(scope)
self.object_id = object_id
self.expires_at = expires_at
self.expires_in = expires_in
@property
def api_path(self):
return '/apps/{appID}/buckets/{bucketID}/objects/{objectID}/body/publish'.format(
appID=self.api.app_id,
bucketID=self.bucket_id,
objectID=self.object_id)
@property
def headers(self):
headers = super().headers
headers['Content-Type'] = 'application/vnd.kii.ObjectBodyPublicationRequest+json'
if self.access_token:
headers['Authorization'] = self.authorization
return headers
def request(self):
data = {}
if self.expires_at is not None:
if not isinstance(self.expires_at, datetime):
raise exc.KiiInvalidExpirationError
expire = int(self.expires_at.timestamp() * 1000)
data['expiresAt'] = expire
if self.expires_in is not None:
data['expiresIn'] = self.expires_in
return super().request(json=data)
from .startuploadinganobjectbody import StartUploadingAnObjectBody # NOQA
from .gettheuploadmetadata import GetTheUploadMetadata # NOQA
from .uploadthegivenobjectdata import UploadTheGivenObjectData # NOQA
from .settheobjectbodyuploadstatustocommitted import SetTheObjectBodyUploadStatusToCommitted # NOQA
from .settheobjectbodyuploadstatustocancelled import SetTheObjectBodyUploadStatusToCancelled # NOQA
| {
"repo_name": "ta2xeo/python3-kii",
"path": "kii/data/application/__init__.py",
"copies": "1",
"size": "15123",
"license": "mit",
"hash": -5617599474132138000,
"line_mean": 25.8614564831,
"line_max": 100,
"alpha_frac": 0.5939959003,
"autogenerated": false,
"ratio": 4.043582887700535,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5137578788000535,
"avg_score": null,
"num_lines": null
} |
"""Application settings."""
from os import environ as env
from pygotham.utils import DOES_NOT_EXIST
def bool_(key, default):
"""Return an environment setting represented as a boolean."""
return env.get(key, str(default)).lower() == 'true'
DEBUG = bool_('DEBUG', False)
SECRET_KEY = env.get('SECRET_KEY', DOES_NOT_EXIST)
GOOGLE_ANALYTICS_PROFILE_ID = env.get('GOOGLE_ANALYTICS_PROFILE_ID')
# Flask-Assets
ASSETS_DEBUG = bool_('ASSETS_DEBUG', False)
# Flask-Foundation
FOUNDATION_HTML5_SHIM = bool_('FOUNDATION_HTML5_SHIM', True)
FOUNDATION_USE_CDN = bool_('FOUNDATION_USE_CDN', True)
FOUNDATION_USE_MINIFIED = bool_('FOUNDATION_USE_MINIFIED', True)
# Flask-Mail
MAIL_SERVER = env.get('MAIL_SERVER', 'localhost')
MAIL_PORT = int(env.get('MAIL_PORT', 25))
MAIL_USERNAME = env.get('MAIL_USERNAME')
MAIL_PASSWORD = env.get('MAIL_PASSWORD')
MAIL_USE_SSL = bool_('MAIL_USE_SSL', True)
MAIL_USE_TLS = bool_('MAIL_USE_TLS', True)
MAIL_DEBUG = bool_('MAIL_DEBUG', False)
MAIL_DEFAULT_SENDER = env.get('MAIL_DEFAULT_SENDER')
# Flask-Security
SECURITY_CHANGEABLE = True
SECURITY_DEFAULT_REMEMBER_ME = True
SECURITY_EMAIL_SENDER = MAIL_DEFAULT_SENDER
SECURITY_EMAIL_SUBJECT_REGISTER = 'Welcome to PyGotham'
SECURITY_PASSWORD_HASH = 'pbkdf2_sha512'
SECURITY_PASSWORD_SALT = env.get('SECURITY_PASSWORD_SALT', DOES_NOT_EXIST)
SECURITY_POST_REGISTER_VIEW = 'profile.settings'
SECURITY_RECOVERABLE = True
SECURITY_REGISTERABLE = True
SECURITY_SEND_REGISTER_EMAIL = bool_('SECURITY_SEND_REGISTER_EMAIL', True)
SECURITY_TRACKABLE = True
# Flask-SQLAlchemy
SQLALCHEMY_DATABASE_URI = env.get('DATABASE_URL')
del bool_
del env
del DOES_NOT_EXIST
| {
"repo_name": "djds23/pygotham-1",
"path": "pygotham/settings.py",
"copies": "1",
"size": "1644",
"license": "bsd-3-clause",
"hash": 405854103931126500,
"line_mean": 29.4444444444,
"line_max": 74,
"alpha_frac": 0.7378345499,
"autogenerated": false,
"ratio": 3.010989010989011,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.924882356088901,
"avg_score": 0,
"num_lines": 54
} |
"""Applications filters"""
import pytest
from applications.constants import (
ALL_REVIEW_STATUSES,
REVIEW_STATUS_REJECTED,
REVIEW_STATUS_APPROVED,
REVIEW_STATUS_PENDING,
)
from applications.factories import ApplicationStepSubmissionFactory
from applications.filters import ApplicationStepSubmissionFilterSet
from applications.models import ApplicationStepSubmission
pytestmark = pytest.mark.django_db
def test_application_step_submission_filterset_bootcamp_id():
"""Verify that ApplicationStepSubmissionFilterSet's bootcamp_id filter works"""
matching = ApplicationStepSubmissionFactory.create()
nonmatching = ApplicationStepSubmissionFactory.create()
params = {"bootcamp_run_id": matching.bootcamp_application.bootcamp_run.id}
query = ApplicationStepSubmissionFilterSet(
params, queryset=ApplicationStepSubmission.objects.all()
).qs
assert matching in query
assert nonmatching not in query
@pytest.mark.parametrize("review_status", ALL_REVIEW_STATUSES)
def test_application_step_submission_filterset_review_status_exact(review_status):
"""Verify that ApplicationStepSubmissionFilterSet's review_status (exact) filter works"""
submissions = [
ApplicationStepSubmissionFactory.create(is_pending=True),
ApplicationStepSubmissionFactory.create(is_approved=True),
ApplicationStepSubmissionFactory.create(is_rejected=True),
ApplicationStepSubmissionFactory.create(is_waitlisted=True),
]
params = {"review_status": review_status}
query = ApplicationStepSubmissionFilterSet(
params, queryset=ApplicationStepSubmission.objects.all()
).qs
assert len(query) == 1
for submission in submissions:
if submission.review_status == review_status:
assert submission in query
else:
assert submission not in query
@pytest.mark.parametrize(
"review_statuses",
[
[REVIEW_STATUS_PENDING],
[REVIEW_STATUS_APPROVED],
[REVIEW_STATUS_REJECTED],
[REVIEW_STATUS_PENDING, REVIEW_STATUS_APPROVED],
[REVIEW_STATUS_APPROVED, REVIEW_STATUS_REJECTED],
[REVIEW_STATUS_REJECTED, REVIEW_STATUS_PENDING],
[REVIEW_STATUS_REJECTED, REVIEW_STATUS_APPROVED, REVIEW_STATUS_PENDING],
],
)
def test_application_step_submission_filterset_review_status_in(review_statuses):
"""Verify that ApplicationStepSubmissionFilterSet's review_status__in filter works"""
submissions = [
ApplicationStepSubmissionFactory.create(is_pending=True),
ApplicationStepSubmissionFactory.create(is_approved=True),
ApplicationStepSubmissionFactory.create(is_rejected=True),
]
params = {"review_status__in": ",".join(review_statuses)}
query = ApplicationStepSubmissionFilterSet(
params, queryset=ApplicationStepSubmission.objects.all()
).qs
assert len(query) == len(review_statuses)
for submission in submissions:
if submission.review_status in review_statuses:
assert submission in query
else:
assert submission not in query
| {
"repo_name": "mitodl/bootcamp-ecommerce",
"path": "applications/filters_test.py",
"copies": "1",
"size": "3114",
"license": "bsd-3-clause",
"hash": 1195086406913279000,
"line_mean": 33.9887640449,
"line_max": 93,
"alpha_frac": 0.7292870906,
"autogenerated": false,
"ratio": 4.146471371504661,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0007892490407063255,
"num_lines": 89
} |
applications= ['jevans', 'jhoule', 'mpuello', 'kyang']
development= ['rdw', 'ysaxena', 'scarnes']
infrastructure= ['smeltzer', 'tpenn', 'awestfall']
df_app= df.loc[df.person.isin(applications)]
51 df_app_break= df_app.loc[df_app.component.isin(break_component)]
52 df_dev= df.loc[df.person.isin(development)]
53 df_inf= df.loc[df.person.isin(infrastructure)]
54
55
56 df_app_component=df_app.groupby(df_app.component)
57 df_app_component_time= df_app_component.aggregate({'time_spent':np.sum})
58 #print (df_app_component_time)
59 app_time=df_app_component_time.nlargest(10,'time_spent')
60 app_time.plot(kind='barh')
61 #plt.show()
62 #plt.bar(df_app_component_time.time_spent, df_app_component_time.component, align='center', alpha=0.5)
63 #df_app_component_time[:10].sort(ascending=0).plot(kind='barh')
64
65 df_dev_component= df_dev.groupby(df_dev.component)
66 df_dev_component_time= df_dev_component.aggregate({'time_spent':np.sum})
67 #print (df_dev_component_time)
68 dev_time= df_dev_component_time= df_dev_component_time.nlargest(10, 'time_spent')
69 dev_time.plot(kind='barh')
70 #plt.show()
71
72 df_inf_component=df_inf.groupby(df_inf.component)
73 df_inf_component_time= df_inf_component.aggregate({'time_spent': np.sum})
'''
def component(self, component):
"""getting team component break down"""
self.component=component
df_component= df_team.loc(df_team.component==self.component)
return df_component
def component_time(self):
df_team_component_time= df_team_component.aggregate({'time_spent':np.sum})
#plot
df_team_component_time=df_team_component_time.nlargest(10,'time_spent')
df_team_component_time.plot(kind='barh')
def aggregate(self):
"""aggregating data by team and component"""
#suming the amount of hours spent on break/fixes by teams this year
df_team= df.loc[(df.team==self.team)
df_team_component= df_team.loc[(df_team.component==self.component)]
df_team_component_time= df_team_component.aggregate({'time_spent':np.sum})
df_team_component_time=df_team_component_time.nlargest(10,'time_spent')
df_team_component_time.plot(kind='barh')
df_component= df.groupby(df.component)
df_component_time= df_component.aggregate({'time_spent':np.sum})
| {
"repo_name": "artopping/nyu-python",
"path": "course2/data_project/test_data.py",
"copies": "1",
"size": "2474",
"license": "mit",
"hash": 5220973949052489000,
"line_mean": 43.1785714286,
"line_max": 110,
"alpha_frac": 0.6576394503,
"autogenerated": false,
"ratio": 3.0771144278606966,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.42347538781606964,
"avg_score": null,
"num_lines": null
} |
"""Applications List Dialog"""
import logging
import sys
from PyQt5 import QtWidgets
import ui.functions
from database.db import Database
from helpers.functions import ham
from ui.window.ui_applications_list import (
Ui_AppListDialog,
)
logger = logging.getLogger(__name__)
class AppListDialog(QtWidgets.QDialog, Ui_AppListDialog):
def __init__(self, parent=None):
super(AppListDialog, self).__init__(parent)
self.setupUi(self)
self.db = Database(use_default_db=True)
ui.functions.set_window_icon(self)
self._setup_ui_buttons()
def _setup_ui_buttons(self):
self.btnPushAdd.clicked.connect(ham)
self.btnPushRemove.clicked.connect(ham)
self.btnPushClear.clicked.connect(ham)
def show_dialog():
dialog = AppListDialog()
if not dialog.exec_():
logger.debug('Aborting Applications List...')
if __name__ == '__main__':
app = QtWidgets.QApplication(sys.argv)
window = AppListDialog()
window.show()
sys.exit(app.exec_())
| {
"repo_name": "hueyyeng/AssetsBrowser",
"path": "ui/dialog/applications_list.py",
"copies": "1",
"size": "1031",
"license": "mit",
"hash": -5430191292202712000,
"line_mean": 24.1463414634,
"line_max": 57,
"alpha_frac": 0.676042677,
"autogenerated": false,
"ratio": 3.6690391459074734,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9845081822907473,
"avg_score": 0,
"num_lines": 41
} |
# application specific
from ..utils import flatten
from .grammar_parser import GrammarParser
from ..nodes import AST, ContainerNode, ListNode
from ..exceptions import TyrianSyntaxError, NoSuchGrammar
__all__ = ['Parser']
class Parser(object):
"""
Simplifies parsing
"""
def __init__(self, **kwargs):
self.grammar_parser = GrammarParser(**kwargs)
def parse(self, lexed: list) -> AST:
"""
given a list of tokens, returns a :py:class:`AST <tyrian.nodes.AST>`
:param lexed: list of tokens to parse
"""
# grab the start token from the settings
start_token = self.grammar_parser.settings['start_token'].upper()
if start_token not in self.grammar_parser.grammars:
raise NoSuchGrammar('No such grammar as "{}"'.format(start_token))
base_grammar = self.grammar_parser.grammars[start_token]
index = 0
results = []
while index < len(lexed):
result = base_grammar.check(
lexed[index:], '<{}>'.format(start_token)
)
if not result['result']:
raise TyrianSyntaxError(
'error found near line {} in file {}'.format(
lexed[index]['line_no'],
lexed[index]['filename']
)
)
results.append(result)
index += result['consumed']
processed = self._process(results)
return AST(processed)
def _process(self, parsed: list) -> ContainerNode:
"""
:param parsed: list of Nodes to process
"""
processed = []
for result in parsed:
parse_tree = result['parse_tree']
parse_tree = flatten(parse_tree, can_return_single=True)
parse_tree = ListNode(parse_tree)
processed.append(parse_tree)
return processed
| {
"repo_name": "Mause/tyrian",
"path": "tyrian/typarser/typarser.py",
"copies": "1",
"size": "1925",
"license": "mit",
"hash": -3874144051819062300,
"line_mean": 27.7313432836,
"line_max": 78,
"alpha_frac": 0.558961039,
"autogenerated": false,
"ratio": 4.415137614678899,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 67
} |
# application specific
from .utils import flatten
class AST(object):
"""
Is the overruling object returned from the \
:py:class:`Parser <tyrian.typarser.Parser>`
"""
def __init__(self, content):
self.content = content
def __repr__(self):
return '<AST len(content)=={}>'.format(len(self.content))
def pprint(self):
return '\n'.join(self._pprint(self.content))
def _pprint(self, node, indent=0):
if isinstance(node, list):
name = '<list len={}>'.format(len(node))
else:
name = node.__repr__()
cur_lines = []
cur_lines.append('{}{}'.format('\t' * indent, name))
if type(node) in [ListNode, ContainerNode, list]:
try:
iterable = (
node.content if issubclass(type(node), Node)
else node
)
for sub_node in iterable:
if sub_node == node:
continue
cur_lines += self._pprint(sub_node, indent + 1)
cur_lines.append('{}</{}>'.format('\t' * indent, name[1:-1]))
except TypeError:
cur_lines += self._pprint(iterable, indent + 1)
return cur_lines
class Node(object):
"""
Base object for Node's
"""
class ListNode(Node):
"""
Represents a () in LISP
"""
__spec_name = 'LN'
def __init__(self, content, strip=True):
# strip away the brackets
content = content[1:-1]
self.content = flatten(content)
def __repr__(self):
return '<{} len(content)=={}>'.format(
self.__spec_name,
len(self.content))
class ContainerNode(ListNode):
"""
Aside from being functionally identical to :py:class:`ListNode`,
this Node does not represent anything in the AST,
it simply serves as a container; hence the name
"""
__spec_name = 'CN'
class IDNode(Node):
"Represents an ID"
def __init__(self, content):
self.line_no = content.line_no
content = content.content
self.content = content
def __repr__(self):
return '<IDNode content="{}">'.format(self.content)
class NumberNode(Node):
"Represents a number"
def __init__(self, content):
self.line_no = content.line_no
self.content = int(content.content)
def __repr__(self):
return '<NumberNode content={}>'.format(self.content)
class StringNode(Node):
"Represents a string, per se"
def __init__(self, content):
# remove the quotes, grab the content
content = content[1:-1][0]
self.content = content.content
self.line_no = content.line_no
def __repr__(self):
return '<StringNode content="{}">'.format(self.content)
class SymbolNode(Node):
"Represents a mathematical symbol"
def __init__(self, content):
self.line_no = content.line_no
self.content = content.content
def __repr__(self):
return '<SymbolNode content="{}">'.format(self.content)
class QuotedNode(Node):
"""
Represents a quoted token
"""
def __init__(self, *args, **kwargs):
raise NotImplementedError()
# and we define the mappings
grammar_mapping = {
"list": ListNode,
"string": StringNode,
"number": NumberNode,
"id": IDNode,
"symbol": SymbolNode,
"quoted_sexpr": QuotedNode
}
| {
"repo_name": "Mause/tyrian",
"path": "tyrian/nodes.py",
"copies": "1",
"size": "3450",
"license": "mit",
"hash": -5489936147959956000,
"line_mean": 23.2957746479,
"line_max": 77,
"alpha_frac": 0.5562318841,
"autogenerated": false,
"ratio": 3.9884393063583814,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00035211267605633805,
"num_lines": 142
} |
# application specific
from ..utils import logger
logger = logger.getChild('LispRegistry')
if 'lisp_registry' not in globals():
lisp_registry = {}
def lisp_function(**kwargs):
"""
Registers decorated function in the lisp_registry
if the decorator is being used like so;
.. code-block:: python
@lisp_registry
def func():
pass
then we assume the __name__ attribute of the function is to be used
if the decorator is used like so;
.. code-block:: python
@lisp_registry(name="blardy")
def randy():
pass
then we use the supplied name :)
"""
def decorator(func):
name = kwargs['name']
logger.debug('Registering function with name: {}'.format(name))
assert name not in lisp_registry, (
'Function "{}" already exists'.format(name))
lisp_registry[name] = func
return func
if 'name' not in kwargs and not kwargs['name']:
return decorator(kwargs['func'])
else:
return decorator
def main():
from pprint import pprint
pprint(lisp_registry)
if __name__ == '__main__':
main()
| {
"repo_name": "Mause/tyrian",
"path": "tyrian/lisp_runtime/registry.py",
"copies": "1",
"size": "1169",
"license": "mit",
"hash": -2521925242921467000,
"line_mean": 19.1551724138,
"line_max": 71,
"alpha_frac": 0.5962360992,
"autogenerated": false,
"ratio": 4.189964157706093,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 58
} |
"""Application-specific settings."""
import os
from django.conf import settings as _settings
from django.core.exceptions import ImproperlyConfigured
###############################################################################
# Single settings.
###############################################################################
class Setting(object):
"""Settings option helper class."""
def __init__(self, **kwargs):
"""Initializer.
:kwarg default: Override default for getting.
:type default: ``object``
:kwarg from_env: Allow variable from evironment.
:type from_env: ``bool``
:kwarg valid_set: Set of valid values for setting.
:type valid_set: ``set``
"""
self.from_env = kwargs.get('from_env', False)
self.default = kwargs.get('default', None)
self.valid_set = kwargs.get('valid_set', None)
def validate(self, name, value):
"""Validate and return a value."""
if self.valid_set and value not in self.valid_set:
raise ImproperlyConfigured(
"%s: \"%s\" is not a valid setting (choose between %s)." %
(name, value, ", ".join("\"%s\"" % x for x in self.valid_set)))
return value
def env_clean(self, value): # pylint: disable=R0201
"""Clean / convert environment variable to proper type."""
return value
def get(self, name, default=None):
"""Get value."""
default = default if default is not None else self.default
try:
value = getattr(_settings, name)
except AttributeError:
value = os.environ.get(name, default) if self.from_env else default
# Convert env variable.
if value != default:
value = self.env_clean(value)
return self.validate(name, value)
class BoolSetting(Setting):
"""Boolean setting.."""
def env_clean(self, value):
"""Clean / convert environment variable to proper type."""
return self.parse_bool(value)
@classmethod
def parse_bool(cls, value, default=None):
"""Convert ``string`` or ``bool`` to ``bool``."""
if value is None:
return default
elif isinstance(value, bool):
return value
elif isinstance(value, basestring):
if value == 'True':
return True
elif value == 'False':
return False
raise Exception("Value %s is not boolean." % value)
###############################################################################
# Settings wrapper.
###############################################################################
class Settings(object):
"""Cloud Browser application settings.
This class wraps the "real" Django settings object, so can be used instead.
The additional cloud browser settings are as follows:
.. note::
**Environment Variables**: Certain credential settings can come from OS
environment variables instead of from a settings file value to open up
more options for secrets management. Values that can be set in the
environment are designated with an "(*Env*)" notation.
Setting a value this way could be done, e.g.::
$ export CLOUD_BROWSER_AWS_ACCOUNT="my_account"
$ export CLOUD_BROWSER_AWS_SECRET_KEY="my_secret"
$ # ... start django application with environment variables.
**Datastore Settings**:
* ``CLOUD_BROWSER_DATASTORE``: Choice of datastore (see values below).
**Amazon Web Services**: Configure AWS S3 as backing datastore.
* ``CLOUD_BROWSER_DATASTORE = "AWS"``
* ``CLOUD_BROWSER_AWS_ACCOUNT``: Account name. (*Env*)
* ``CLOUD_BROWSER_AWS_SECRET_KEY``: Account API secret key. (*Env*)
**Google Storage for Developers**: Configure Google Storage as backing
datastore.
* ``CLOUD_BROWSER_DATASTORE = "Google"``
* ``CLOUD_BROWSER_GS_ACCOUNT``: Account name. (*Env*)
* ``CLOUD_BROWSER_GS_SECRET_KEY``: Account API secret key. (*Env*)
**Rackspace**: Configure Rackspace Cloud Files as backing datastore.
* ``CLOUD_BROWSER_DATASTORE = "Rackspace"``
* ``CLOUD_BROWSER_RACKSPACE_ACCOUNT``: Account name. (*Env*)
* ``CLOUD_BROWSER_RACKSPACE_SECRET_KEY``: Account API secret key. (*Env*)
* ``CLOUD_BROWSER_RACKSPACE_SERVICENET``: Boolean designating whether or
not to use Rackspace's servicenet (i.e., the private interface on a
Cloud Server). (*Env*)
* ``CLOUD_BROWSER_RACKSPACE_AUTHURL``: Alternative authorization server,
for use, e.g., with `OpenStack <http://www.openstack.org/>`_ instead of
Rackspace. (*Env*)
**Filesystem**: Configure simple filesystem mock datastore.
* ``CLOUD_BROWSER_DATASTORE = "Filesystem"``
* ``CLOUD_BROWSER_FILESYSTEM_ROOT``: Filesystem root to serve from.
**View Permissions**: A standard Django view decorator object can be
specified, which is wrapped for all browsing / viewing view -- for example,
to limit views to logged in members, use ``login_required`` and for staff
only, use ``staff_member_required``. Note that either a real decorator
function or a fully-qualifid string path are acceptable, so you can use,
e.g., "django.contrib.admin.views.decorators.staff_member_required" instead
which might help with certain settings.py import-order-related issues.
* ``CLOUD_BROWSER_VIEW_DECORATOR``: View decorator or fully-qualified
string path.
**Container Permissions**: Cloud browser allows a very rudimentary form
of access control at the container level with white and black lists.
If the white list is set, only container names in the white list are
allowed. If the white list is unset, then any container name *not* in
the black list is permitted. All name matching is exact (no regular
expressions, etc.).
* ``CLOUD_BROWSER_CONTAINER_WHITELIST``: White list of names. (Iterable)
* ``CLOUD_BROWSER_CONTAINER_BLACKLIST``: Black list of names. (Iterable)
**General**: Other settings.
* ``CLOUD_BROWSER_DEFAULT_LIST_LIMIT``: Default number of objects to
diplay per browser page.
* ``CLOUD_BROWSER_STATIC_MEDIA_DIR``: If this applications static media
(found in ``app_media``) is served up under the ``settings.MEDIA_ROOT``,
then set a relative path from the root, and the static media will be used
instead of a Django-based static view fallback.
"""
#: Valid datastore types.
DATASTORES = set((
'CVMFilesystem',
))
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
#: Settings dictionary of accessor callables.
SETTINGS = {
# Datastore choice.
'CLOUD_BROWSER_DATASTORE': Setting(
default='CVMFilesystem',
valid_set=DATASTORES
),
# Amazon Web Services S3 datastore settings.
'CLOUD_BROWSER_AWS_ACCOUNT': Setting(from_env=True),
'CLOUD_BROWSER_AWS_SECRET_KEY': Setting(from_env=True),
# Google Storage for Developers datastore settings.
'CLOUD_BROWSER_GS_ACCOUNT': Setting(from_env=True),
'CLOUD_BROWSER_GS_SECRET_KEY': Setting(from_env=True),
# Rackspace datastore settings.
'CLOUD_BROWSER_RACKSPACE_ACCOUNT': Setting(from_env=True),
'CLOUD_BROWSER_RACKSPACE_SECRET_KEY': Setting(from_env=True),
'CLOUD_BROWSER_RACKSPACE_SERVICENET': BoolSetting(from_env=True),
'CLOUD_BROWSER_RACKSPACE_AUTHURL': BoolSetting(from_env=True),
# Filesystem datastore settings.
'CLOUD_BROWSER_FILESYSTEM_ROOT': Setting(),
# Cvmfs datastore settings.
'CLOUD_BROWSER_CVMFILESYSTEM_REPOSITORY_URL': Setting(from_env=True),
'CLOUD_BROWSER_CVMFILESYSTEM_CACHE': Setting(from_env=True,
default=os.path.join(BASE_DIR, 'cvmfs_cache')),
# View permissions.
'CLOUD_BROWSER_VIEW_DECORATOR': Setting(),
# Permissions lists for containers.
'CLOUD_BROWSER_CONTAINER_WHITELIST': Setting(),
'CLOUD_BROWSER_CONTAINER_BLACKLIST': Setting(),
# Browser settings.
'CLOUD_BROWSER_DEFAULT_LIST_LIMIT': Setting(default=20),
# Static media root.
'CLOUD_BROWSER_STATIC_MEDIA_DIR': Setting(),
}
def __init__(self):
"""Initializer."""
self.__container_whitelist = None
self.__container_blacklist = None
def __getattr__(self, name, default=None):
"""Get setting."""
if name in self.SETTINGS:
return self.SETTINGS[name].get(name, default)
# Use real Django settings.
return getattr(_settings, name, default)
@property
def _container_whitelist(self):
"""Container whitelist."""
if self.__container_whitelist is None:
self.__container_whitelist = \
set(self.CLOUD_BROWSER_CONTAINER_WHITELIST or [])
return self.__container_whitelist
@property
def _container_blacklist(self):
"""Container blacklist."""
if self.__container_blacklist is None:
self.__container_blacklist = \
set(self.CLOUD_BROWSER_CONTAINER_BLACKLIST or [])
return self.__container_blacklist
def container_permitted(self, name):
"""Return whether or not a container is permitted.
:param name: Container name.
:return: ``True`` if container is permitted.
:rtype: ``bool``
"""
white = self._container_whitelist
black = self._container_blacklist
return name not in black and (not white or name in white)
@property
def app_media_url(self):
"""Get application static root from real static root URL."""
return settings.STATIC_URL
settings = Settings() # pylint: disable=C0103
| {
"repo_name": "Moliholy/cvmfs-browser",
"path": "cvmfs_browser/app_settings.py",
"copies": "2",
"size": "9904",
"license": "bsd-3-clause",
"hash": -3714563299199048000,
"line_mean": 37.0923076923,
"line_max": 100,
"alpha_frac": 0.6153069467,
"autogenerated": false,
"ratio": 4.311710927296474,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5927017873996474,
"avg_score": null,
"num_lines": null
} |
APPLICATIONS_PREFIX = "/applications/plexmanager"
NAME = L('Title')
# make sure to replace artwork with what you want
# these filenames reference the example files in
# the Contents/Resources/ folder in the bundle
ART = 'art-default.jpg'
ICON = 'icon-default.png'
####################################################################################################
def Start():
## make this plugin show up in the 'Applications' section
## in Plex. The L() function pulls the string out of the strings
## file in the Contents/Strings/ folder in the bundle
## see also:
## http://dev.plexapp.com/docs/mod_Plugin.html
## http://dev.plexapp.com/docs/Bundle.html#the-strings-directory
Plugin.AddPrefixHandler(APPLICATIONS_PREFIX, ApplicationsMainMenu, NAME, ICON, ART)
Plugin.AddViewGroup("InfoList", viewMode="InfoList", mediaType="items")
Plugin.AddViewGroup("List", viewMode="List", mediaType="items")
## set some defaults so that you don't have to
## pass these parameters to these object types
## every single time
## see also:
## http://dev.plexapp.com/docs/Objects.html
MediaContainer.title1 = NAME
MediaContainer.viewGroup = "List"
MediaContainer.art = R(ART)
DirectoryItem.thumb = R(ICON)
VideoItem.thumb = R(ICON)
HTTP.CacheTime = CACHE_1HOUR
# see:
# http://dev.plexapp.com/docs/Functions.html#ValidatePrefs
def ValidatePrefs():
u = Prefs['username']
p = Prefs['password']
## do some checks and return a
## message container
if( u and p ):
return MessageContainer(
"Success",
"User and password provided ok"
)
else:
return MessageContainer(
"Error",
"You need to provide both a user and password"
)
#### the rest of these are user created functions and
#### are not reserved by the plugin framework.
#### see: http://dev.plexapp.com/docs/Functions.html for
#### a list of reserved functions above
#
# Example main menu referenced in the Start() method
# for the 'Applications' prefix handler
#
def ApplicationsMainMenu():
# Container acting sort of like a folder on
# a file system containing other things like
# "sub-folders", videos, music, etc
# see:
# http://dev.plexapp.com/docs/Objects.html#MediaContainer
dir = MediaContainer(viewGroup="InfoList")
# see:
# http://dev.plexapp.com/docs/Objects.html#DirectoryItem
# http://dev.plexapp.com/docs/Objects.html#function-objects
dir.Append(
Function(
DirectoryItem(
CallbackExample,
"directory item title",
subtitle="subtitle",
summary="clicking on me will call CallbackExample",
thumb=R(ICON),
art=R(ART)
)
)
)
# Part of the "preferences" example
# see also:
# http://dev.plexapp.com/docs/Objects.html#PrefsItem
# http://dev.plexapp.com/docs/Functions.html#CreatePrefs
# http://dev.plexapp.com/docs/Functions.html#ValidatePrefs
dir.Append(
PrefsItem(
title="Your preferences",
subtile="So you can set preferences",
summary="lets you set preferences",
thumb=R(ICON)
)
)
# ... and then return the container
return dir
def CallbackExample(sender):
## you might want to try making me return a MediaContainer
## containing a list of DirectoryItems to see what happens =)
return MessageContainer(
"Not implemented",
"In real life, you'll make more than one callback,\nand you'll do something useful.\nsender.itemTitle=%s" % sender.itemTitle
)
| {
"repo_name": "rxsegrxup/PlexMan.bundle",
"path": "Contents/Code/__init__.py",
"copies": "3",
"size": "3741",
"license": "mit",
"hash": -746035014535664000,
"line_mean": 28.6904761905,
"line_max": 132,
"alpha_frac": 0.6233627372,
"autogenerated": false,
"ratio": 4.005353319057816,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.021610825027172823,
"num_lines": 126
} |
#!/Applications/RevAssign.app/Contents/Resources/miniconda/bin/python
# 1. Treat the mpc file as a long string.
# 2. Find and replace a segment of that string with given args.
# 3. Save to a new mpc file.
import os
import sys
import pickle
import xlrd # tools for reading spreadsheets
import xlwt # tools for writing spreadsheets
import PyQt4
from PyQt4 import QtCore,QtGui
usage='usage: '+sys.argv[0] + ''' <input.mpc> <find> <replace>
Outputs a new mpc file named <input>_replaced.mpc
'''
if ('--help' in sys.argv) or ('-h' in sys.argv):
print(usage)
sys.exit(0)
if len(sys.argv) < 4:
print(usage)
sys.exit(0)
infile = sys.argv[1]
path = os.path.dirname(infile)
outfile,_ = os.path.splitext(os.path.basename(infile))
outfile = os.path.join(path, outfile+'_replaced.mpc')
search = sys.argv[2]
replace = sys.argv[3]
with open(infile, 'rb') as f:
p = pickle.load(f)
s = str(p)
print '\n'
print "The number of matching instances of \'{}\': {}".format(search, s.count(search))
print "Here are the matches:"
for l in s.split():
if l.count(search):
print '\t', l
print '\n'
r = s.replace(search, replace)
d = eval(r,globals(), locals())
with open(outfile, 'wb') as o:
print "writing output to: {}".format(outfile)
pickle.dump(d,o)
| {
"repo_name": "nckz/RevAssign",
"path": "utils/findReplaceMPC.py",
"copies": "1",
"size": "1330",
"license": "bsd-2-clause",
"hash": -8789627225493460000,
"line_mean": 26.1428571429,
"line_max": 90,
"alpha_frac": 0.6466165414,
"autogenerated": false,
"ratio": 3.107476635514019,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4254093176914019,
"avg_score": null,
"num_lines": null
} |
APPLICATION_STATUS_ACCEPTED = "accepted"
APPLICATION_STATUS_REJECTED = "rejected"
APPLICATION_STATUS_UPDATE_REQUEST = "update_request"
APPLICATION_STATUS_REVISIONS_REQUIRED = "revisions_required"
APPLICATION_STATUS_PENDING = "pending"
APPLICATION_STATUS_IN_PROGRESS = "in progress"
APPLICATION_STATUS_COMPLETED = "completed"
APPLICATION_STATUS_ON_HOLD = "on hold"
APPLICATION_STATUS_READY = "ready"
APPLICATION_STATUSES_ALL = [
APPLICATION_STATUS_ACCEPTED,
APPLICATION_STATUS_REJECTED,
APPLICATION_STATUS_UPDATE_REQUEST,
APPLICATION_STATUS_REVISIONS_REQUIRED,
APPLICATION_STATUS_PENDING,
APPLICATION_STATUS_IN_PROGRESS,
APPLICATION_STATUS_COMPLETED,
APPLICATION_STATUS_ON_HOLD,
APPLICATION_STATUS_READY
]
APPLICATION_TYPE_UPDATE_REQUEST = "update request"
APPLICATION_TYPE_FINISHED = "finished application/update"
APPLICATION_TYPE_NEW_APPLICATION = "new application"
PROVENANCE_STATUS_REJECTED = "status:rejected"
PROVENANCE_STATUS_ACCEPTED = "status:accepted"
LOCK_APPLICATION = "suggestion"
LOCK_JOURNAL = "journal"
IDENT_TYPE_DOI = "doi"
LINK_TYPE_FULLTEXT = "fulltext"
| {
"repo_name": "DOAJ/doaj",
"path": "portality/constants.py",
"copies": "1",
"size": "1115",
"license": "apache-2.0",
"hash": -148926764080994370,
"line_mean": 30.8571428571,
"line_max": 60,
"alpha_frac": 0.7775784753,
"autogenerated": false,
"ratio": 3.420245398773006,
"config_test": false,
"has_no_keywords": true,
"few_assignments": false,
"quality_score": 0.9554966731215864,
"avg_score": 0.02857142857142857,
"num_lines": 35
} |
"""Application support library for using information exported by nameq."""
from __future__ import absolute_import
__all__ = [
"DEFAULT_STATEDIR",
"Feature",
"FeatureMonitor",
"log",
"remove_feature",
"set_feature",
]
import errno
import fcntl
import json
import logging
import os
import select
import tempfile
import threading
import inotify
DEFAULT_FEATUREDIR = "/etc/nameq/features"
DEFAULT_STATEDIR = "/run/nameq/state"
log = logging.getLogger("nameq")
filename_encoding = "utf-8"
def set_feature(name, value, featuredir=DEFAULT_FEATUREDIR):
_create_config_file(featuredir, name, json.dumps(value))
return _FeatureRemover(featuredir, name)
def remove_feature(name, featuredir=DEFAULT_FEATUREDIR):
_remove_config_file(featuredir, name)
class _FeatureRemover(object):
def __init__(self, featuredir, name):
self.featuredir = featuredir
self.name = name
def __enter__(self):
pass
def __exit__(self, *exc):
remove_feature(self.name, self.featuredir)
def _create_config_file(dirpath, name, data=""):
try:
os.makedirs(dirpath, 0o755)
except OSError:
pass
tmpdirpath = os.path.join(dirpath, ".tmp")
try:
os.mkdir(tmpdirpath, 0o700)
except OSError:
pass
with tempfile.NamedTemporaryFile(mode="w", dir=tmpdirpath) as f:
f.write(data)
f.flush()
os.chmod(f.name, 0o664)
os.rename(f.name, os.path.join(dirpath, name))
if hasattr(f, "_closer"):
f._closer.delete = False
else:
f.delete = False
def _remove_config_file(dirpath, name):
try:
os.remove(os.path.join(dirpath, name))
except OSError as e:
if e.errno != errno.ENOENT:
raise
class Feature(object):
"""Represents a momentary state of a feature on a host.
Has properties 'name' (of the feature), 'host' (IPv4 or IPv6 address
string of the host where the feature exists) and 'value' (None if
feature was removed)."""
__slots__ = (
"name",
"host",
"value",
)
def __init__(self, name, host, value=None):
self.name = name
self.host = host
self.value = value
def __repr__(self):
return "Feature(name=%r, host=%r, value=%r)" % (self.name, self.host, self.value)
class _FeatureMonitor(object):
def __init__(self, statedir=DEFAULT_STATEDIR):
featuredir = os.path.join(statedir, "features")
try:
os.mkdir(featuredir, 0o755)
except OSError as e:
if e.errno != errno.EEXIST:
raise
self._featuredir = os.path.realpath(featuredir)
self._wd_featurepaths = {}
self._featurename_wds = {}
self._queued_features = []
self._fd = inotify.init(inotify.CLOEXEC | inotify.NONBLOCK)
ok = False
try:
flags = inotify.ONLYDIR | inotify.CREATE | inotify.DELETE | inotify.DELETE_SELF
self._featuredir_wd = inotify.add_watch(self._fd, self._featuredir, flags)
ok = True
finally:
if not ok:
os.close(self._fd)
try:
featurenames = os.listdir(self._featuredir)
except Exception:
log.exception("listing %s", self._featuredir)
else:
for featurename in featurenames:
self._add_feature(featurename)
def __enter__(self):
return self
def __exit__(self, *exc):
self.close()
def close(self):
log.debug("_FeatureMonitor.close method not implemented")
def _handle(self, event):
if event.mask & inotify.CREATE:
self._add_feature(event.name.decode(filename_encoding))
if event.mask & inotify.DELETE:
if event.wd == self._featuredir_wd:
self._remove_feature(event.name.decode(filename_encoding))
else:
self._remove_host(event.wd, event.name.decode(filename_encoding))
if event.mask & inotify.DELETE_SELF:
self.close()
if event.mask & inotify.MOVED_TO:
self._add_host(event.wd, event.name.decode(filename_encoding))
def _add_feature(self, name):
log.debug("adding feature %s", name)
path = os.path.join(self._featuredir, name)
try:
flags = inotify.ONLYDIR | inotify.DELETE | inotify.MOVED_TO
wd = inotify.add_watch(self._fd, path, flags)
except Exception:
log.exception("adding watch for %s", path)
return
self._wd_featurepaths[wd] = path
self._featurename_wds[name] = wd
try:
hostnames = os.listdir(path)
except Exception:
log.exception("listing %s", path)
return
for hostname in hostnames:
self._add_host(wd, hostname)
def _remove_feature(self, name):
log.debug("removing feature %s", name)
wd = self._featurename_wds[name]
del self._wd_featurepaths[wd]
del self._featurename_wds[name]
def _add_host(self, wd, name):
path = os.path.join(self._wd_featurepaths[wd], name)
log.debug("adding host %s", path)
try:
f = open(path)
except Exception:
log.debug("opening %s", path, exc_info=True)
return
try:
with f:
data = f.read()
except Exception:
log.exception("reading %s", path)
return
self._enqueue_feature(path, data)
def _remove_host(self, wd, name):
path = os.path.join(self._wd_featurepaths[wd], name)
log.debug("removing host %s", path)
if os.access(path, os.F_OK):
log.debug("%s exists, not removing", path)
return
self._enqueue_feature(path)
def _enqueue_feature(self, path, data=None):
log.debug("enqueuing %s update", path)
value = None
if data is not None:
try:
value = json.loads(data)
except Exception:
log.exception("decoding JSON data %r", data)
return
self._queued_features.append(Feature(
name=os.path.basename(os.path.dirname(path)),
host=os.path.basename(path),
value=value,
))
class FeatureMonitor(_FeatureMonitor):
"""Watches the nameq runtime state for changes. Either the 'changed' method
must be implemented in a subclass, or a callable must be provided as the
'changed' parameter. It will be invoked with a Feature instance, or the
terminator when the monitor is closed. The 'booted' method/callable
will be invoked without parameters after all pre-existing features have
been delivered. The state directory must exist.
"""
_bufsize = 65536
def __init__(self, changed=None, terminator=None, statedir=DEFAULT_STATEDIR, booted=None):
super(FeatureMonitor, self).__init__(statedir)
try:
self._changed = changed or self.changed
except AttributeError:
self._changed = self._changed_not_implemented
try:
self._booted = booted or self.booted
except AttributeError:
self._booted = self._booted_not_implemented
self.terminator = terminator
self._pipe = os.pipe()
try:
os.O_CLOEXEC
except AttributeError:
pass
else:
for fd in self._pipe:
flags = fcntl.fcntl(fd, fcntl.F_GETFD)
fcntl.fcntl(fd, fcntl.F_SETFD, flags | os.O_CLOEXEC)
flags = fcntl.fcntl(self._pipe[0], fcntl.F_GETFL)
fcntl.fcntl(self._pipe[0], fcntl.F_SETFL, flags | os.O_NONBLOCK)
self._thread = threading.Thread(target=self._loop)
self._thread.start()
def close(self):
"""Stop watching and invoke the callback with the terminator."""
os.close(self._pipe[1])
self._thread.join()
def _changed_not_implemented(self, feature):
log.debug("FeatureMonitor.changed method not implemented")
def _booted_not_implemented(self):
log.debug("FeatureMonitor.booted method not implemented")
def _iter(self):
while True:
try:
readable, _, _ = select.select([self._fd, self._pipe[0]], [], [])
except select.error as e:
if e.args[0] == errno.EINTR:
continue
raise
if self._fd in readable:
try:
buf = os.read(self._fd, self._bufsize)
except OSError as e:
if e.errno != errno.EAGAIN:
raise
else:
assert buf
for event in inotify.unpack_events(buf):
yield event
if self._pipe[0] in readable:
try:
os.read(self._pipe[0], 1)
except OSError as e:
if e.errno != errno.EAGAIN:
raise
else:
break
def _loop(self):
try:
self._deliver()
try:
self._booted()
except Exception:
log.exception("uncaught exception in FeatureMonitor.booted callback")
for event in self._iter():
self._handle(event)
self._deliver()
finally:
self._changed(self.terminator)
def _deliver(self):
for feature in self._queued_features:
try:
self._changed(feature)
except Exception:
log.exception("uncaught exception in FeatureMonitor.changed callback")
del self._queued_features[:]
| {
"repo_name": "ninchat/nameq",
"path": "python/nameq/__init__.py",
"copies": "1",
"size": "9921",
"license": "mit",
"hash": 2429355859801083000,
"line_mean": 26.7122905028,
"line_max": 94,
"alpha_frac": 0.5604273763,
"autogenerated": false,
"ratio": 4.008484848484849,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00029010985406880286,
"num_lines": 358
} |
"""application_tags
Revision ID: 3dbf6db7f9eb
Revises: 7cf5787a089e
Create Date: 2019-01-08 20:51:11.845673
"""
# From: http://alembic.zzzcomputing.com/en/latest/cookbook.html#conditional-migration-elements
from alembic import op
import sqlalchemy as sa
import commandment.dbtypes
from alembic import context
# revision identifiers, used by Alembic.
revision = '3dbf6db7f9eb'
down_revision = '7cf5787a089e'
branch_labels = None
depends_on = None
def upgrade():
schema_upgrades()
# if context.get_x_argument(as_dictionary=True).get('data', None):
# data_upgrades()
def downgrade():
# if context.get_x_argument(as_dictionary=True).get('data', None):
# data_downgrades()
schema_downgrades()
def schema_upgrades():
"""schema upgrade migrations go here."""
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('application_tags',
sa.Column('application_id', sa.Integer(), nullable=True),
sa.Column('tag_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['application_id'], ['applications.id'], ),
sa.ForeignKeyConstraint(['tag_id'], ['tags.id'], )
)
def schema_downgrades():
"""schema downgrade migrations go here."""
op.drop_table('application_tags')
| {
"repo_name": "jessepeterson/commandment",
"path": "commandment/alembic/versions/3dbf6db7f9eb_application_tags.py",
"copies": "1",
"size": "1268",
"license": "mit",
"hash": -8172776586826064000,
"line_mean": 24.36,
"line_max": 94,
"alpha_frac": 0.6940063091,
"autogenerated": false,
"ratio": 3.363395225464191,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45574015345641905,
"avg_score": null,
"num_lines": null
} |
# Application to pull streaming data from twitter and determine the sentiment of them.
from tweepy import Stream
from tweepy import OAuthHandler
from tweepy.streaming import StreamListener
import json
import sys
import webbrowser
import codecs
import csv
from string import punctuation
import matplotlib.pyplot as plt
import time
class tweetlistener(StreamListener):
def on_status(self,status):
global counter,Total_tweet_count,outfile,search_words_list,indiv,outfile
counter += 1
if counter >= Total_tweet_count:
search_words_list.pop(0)
outfile.close()
senti1 = Sentiment()
senti1.sentiment_analysis()
#time.sleep(15)
search_tweets()
try:
print("----------NEW TWEET ARRIVED!-----------")
print("Tweet Text : ", status.text)
outfile.write(status.text)
outfile.write(str("\n"))
print("Author's Screen name : ", status.author.screen_name)
print("Time of creation : ", status.created_at)
print("Source of Tweet : ", status.source)
except UnicodeEncodeError:
print("Skipping a tweet")
def on_error(self, status):
drawing()
print("Too soon reconnected . Will terminate the program")
print(status)
sys.exit()
class Sentiment():
def sentiment_analysis(self):
global file2,indiv,outfile,labels,colors,all_figs
pos_sent = open("positive_words.txt").read()
positive_words = pos_sent.split('\n')
positive_counts = []
neg_sent = open('negative_words.txt').read()
negative_words = neg_sent.split('\n')
outfile.close()
negative_counts = []
conclusion = []
tweets_list = []
tot_pos = 0
tot_neu = 0
tot_neg = 0
all_total = 0
#print file2
tweets = codecs.open(file2, 'r', "utf-8").read()
tweet_list_dup = []
tweets_list = tweets.split('\n')
#print tweets_list
for tweet in tweets_list:
positive_counter = 0
negative_counter = 0
tweet = tweet.encode("utf-8")
tweet_list_dup.append(tweet)
tweet_processed = tweet.lower()
for p in tuple(punctuation):
tweet_processed = tweet_processed.replace(p, '')
words = tweet_processed.split(' ')
word_count = len(words)
for word in words:
if word in positive_words:
positive_counter = positive_counter + 1
elif word in negative_words:
negative_counter = negative_counter + 1
positive_counts.append(positive_counter)
negative_counts.append(negative_counter)
if positive_counter > negative_counter:
conclusion.append("Positive")
tot_pos += 1
elif positive_counter == negative_counter:
conclusion.append("Neutral")
tot_neu += 0.5
else:
conclusion.append("Negative")
tot_neg +=1
#print len(positive_counts)
output = zip(tweet_list_dup, positive_counts, negative_counts,conclusion)
#output = output.encode('utf-8')
print("******** Overall Analysis **************")
if tot_pos > tot_neg and tot_pos > tot_neu:
print("Overall Sentiment - Positive")
elif tot_neg > tot_pos and tot_neg > tot_neu:
print("Overall Sentiment - Negative")
elif tot_neg == tot_neu and tot_neg > tot_pos:
print("Overall Sentiment - Negative")
elif tot_pos + tot_neg < tot_neu:
print("Overall Sentiment - Semi Positive ")
else:
print("Overall Sentiment - Neutral")
print("%%%%%%%%%%%% End of stream - " + indiv + " %%%%%%%%%%%%%%%%%%%%%")
file1 = 'tweet_sentiment_' + indiv + '.csv'
writer = csv.writer(open(file1, 'wb'))
writer.writerows(output)
draw_helper = []
draw_helper.append(tot_pos)
draw_helper.append(tot_neg)
draw_helper.append(tot_neu)
draw_helper.append(indiv)
all_figs.append(draw_helper)
#figs.append(drawing())
def drawing():
global all_figs
for one_fig in all_figs:
all_total = 0
sentiments = {}
sentiments["Positive"] = one_fig[0]
sentiments["Negative"] = one_fig[1]
sentiments["Neutral"] = one_fig[2]
all_total = one_fig[0] + one_fig[1] + one_fig[2]
sizes = []
sizes = [sentiments['Positive']/float(all_total), sentiments['Negative']/float(all_total), sentiments['Neutral']/float(all_total)]
plt.pie(sizes,labels=labels, colors=colors, autopct='%1.1f%%', shadow=True)
plt.axis('equal')
plt.title('sentiment for the word - ' + str(one_fig[3]))
fig_name = "fig_" + str(one_fig[3]) + ".png"
# Save the figures
plt.savefig(fig_name)
plt.close()
plt.show()
def main():
global Total_tweet_count,outfile,file,search_words_list,auth,labels,colors,all_figs
consumer_key = 'O9KXKiFmfzTNgF0eevXXXX'
consumer_secret = 'ozgNXFyi4A0rimGGPx8bGJHLGosJibGiFASZbXXXXX'
access_token = '300198545-EsrLh8Xh9OzkRUkjwubPomH0M4GS3pXOfGgBLXXX'
access_secret = 'NlqKNVpnbYK1T5WuOROjSdGrmSfxy8mluggN0w36uzxXXX'
search_words = str(input("Enter Search words - separate them by comma: "))
Total_tweet_count = int(input("Enter tweets to be pulled for each search word: "))
#print search_words
search_words_list = search_words.split(",")
Total_tweet_count = 10
auth = OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_secret)
labels = ['Positive','Negative','Neutral']
colors = ['yellowgreen','lightcoral','gold']
all_figs= []
search_tweets()
outfile = codecs.open("F:\\test_tweets1.txt", 'w', "utf-8")#iphone
def search_tweets():
global search_words_list,counter,auth,indiv,outfile,file2,plt,access
consumer_key = 'ZkIxjbsPacixuhTg7aclkQ'
consumer_secret = 'yme0jG3UDhG0CFgqlc50UQFSspo3EkUfPziUf2FFo'
access_token = '1635433267-29ZpqtvpBIzVOQTnz1wgCsaotyEBTgs4V4jkUEM'
access_secret = '33ZEGzs7pR1M0AYnD0mwOaZJ8JIF1Nc183VOFNkeug'
auth = OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_secret)
#auth.set_access_token(access_token, access_secret)
print(search_words_list)
for indiv in search_words_list:
#indiv = indiv.split()
print("Search Word - " + indiv + " - is being processed")
counter = 0
file2 = "test_" + str(indiv[0]) + ".txt"
outfile = codecs.open(file2, 'w', "utf-8")
twitterStream = Stream(auth, tweetlistener())
one_list = []
one_list.append(indiv)
print(one_list)
twitterStream.filter(track=one_list,languages = ["en"])
#for i in range(len(figs)):
drawing()
sys.exit()
main()
| {
"repo_name": "Swaraj1998/MyCode",
"path": "ML-Workshop/Data/Twitter_Sentiment.py",
"copies": "1",
"size": "7162",
"license": "mit",
"hash": -5909815142245055000,
"line_mean": 34.4554455446,
"line_max": 142,
"alpha_frac": 0.5886623848,
"autogenerated": false,
"ratio": 3.5142296368989205,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46028920216989205,
"avg_score": null,
"num_lines": null
} |
# application to read CMAQ netCDF file and create
# desired CSV file, suitable for loading into DB
import numpy as np
import pandas as pd
import xarray as xr
import datetime
import yaml
# get file path configs
fd = open("./create_cmaq_csv_config.yml")
config = yaml.safe_load(fd)
fd.close()
cmaq2011 = config['cmaq2011']
# open CMAQ file into xarray Dataset
infile = cmaq2011['netcdf-path'] + cmaq2011['netcdf-file-name']
ds = xr.open_dataset(infile, decode_coords=True)
# drop all unused variables in the Dataset
new_ds = ds
for var in ds.data_vars:
if(var not in cmaq2011['data-vars']):
new_ds = new_ds.drop(var)
# delete LAY dimension?
# not for now
# re-start col, row dimensions at 1
new_ds.coords['COL'] = new_ds.coords['COL'] + 1
new_ds.coords['ROW'] = new_ds.coords['ROW'] + 1
# add date range coords to TSTEP dimension
sdate = str(getattr(new_ds, 'SDATE'))
stime = getattr(new_ds, 'STIME')
date_str = datetime.datetime.strptime(sdate, '%Y%j')
tstep_len = len(new_ds.coords['TSTEP'])
new_ds.coords['TSTEP'] = pd.date_range(date_str, freq='H', periods=tstep_len)
# save to CSV
outfile = cmaq2011['out-csv-path'] + cmaq2011['out-csv-file-name']
print("Writing to " + outfile + " - this might take awhile ...")
fd = open(outfile, 'w')
df1 = new_ds.to_dataframe()
df1.to_csv(path_or_buf=fd) #, sep=',', na_rep='', float_format=None, columns=None, header=True, index=True, index_label=None, mode='a', encoding=None, compression=None, quoting=None, quotechar='"', line_terminator='\n', chunksize=None, tupleize_cols=False, date_format=None, doublequote=True, escapechar=None, decimal='.')
fd.close()
print("Done!")
| {
"repo_name": "mjstealey/exposures-api",
"path": "sample-data/cmaq-netcdf/create_cmaq_csv.py",
"copies": "2",
"size": "1654",
"license": "mit",
"hash": -6683876174513744000,
"line_mean": 34.1914893617,
"line_max": 322,
"alpha_frac": 0.6934703748,
"autogenerated": false,
"ratio": 2.8815331010452963,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45750034758452957,
"avg_score": null,
"num_lines": null
} |
'''Application used to generate KV code for BorderImages.'''
import kivy
kivy.require('1.9.2')
from kivy.app import App
from kivy.lang import Builder
from kivy.uix.widget import Widget
from kivy.uix.boxlayout import BoxLayout
from kivy.properties import (
BooleanProperty,
ListProperty,
StringProperty,
NumericProperty
)
from kivy.clock import mainthread
from plyer import filechooser
import os.path
import threading
__version__ = 1.0
class BorderImageWidget(Widget):
'''Widget used to help with setup of BorderImage on kivy widgets.'''
source = StringProperty('data/logo/kivy-icon-512.png')
'''Source of the image, used as the texture.'''
auto_scale = BooleanProperty(False)
'''Indicates if BorderImage should automatically scale when too small.'''
border = ListProperty([0, 0, 0, 0])
'''Border of the image taken from source, set by the user.'''
display_border = ListProperty([0, 0, 0, 0])
'''Size of the border on the display.'''
fill_stretch_area = BooleanProperty(False)
'''Defines if stretch area should be filled with semitransparent color.'''
scale = NumericProperty(1.0)
'''Scale of the borders.'''
class BorderImageTool(BoxLayout):
'''Main app widget.'''
KV_STRING = '''
<MyWidget>:
canvas.before:
Color:
rgb: 1, 1, 1
BorderImage:
size: self.size
pos: self.pos
border: {}
display_border: {}
auto_scale: {}
source: {}
'''.strip()
kv_string = StringProperty(KV_STRING)
'''Code string displayed in the TextInput widget to be copied.'''
def open_image(self):
threading.Thread(target=self._open_image).start()
def _open_image(self):
images = filechooser.open_file(preview=True, multiple=False)
if images:
self._image_opened(images[0])
@mainthread
def _image_opened(self, image):
if image:
if os.path.exists(image):
self.ids.bw.source = image
class KivyBorderImageToolApp(App):
def build(self):
self.title = 'Kivy BorderImage Tool v.{}'.format(__version__)
return BorderImageTool()
if __name__ == '__main__':
KivyBorderImageToolApp().run()
| {
"repo_name": "rafalo1333/KivyBorderImageTool",
"path": "main.py",
"copies": "1",
"size": "2270",
"license": "mit",
"hash": 4236131175792662500,
"line_mean": 24.7954545455,
"line_max": 78,
"alpha_frac": 0.636123348,
"autogenerated": false,
"ratio": 3.8803418803418803,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9993405749561455,
"avg_score": 0.00461189575608514,
"num_lines": 88
} |
import json
from flask import Blueprint, request, url_for, jsonify, redirect, abort, current_app as app, session
from flask_genshi import Genshi, render_template
from flask_login import login_user, logout_user, login_required, current_user
from sqlalchemy import literal, text, and_, or_
from sqlalchemy.orm.exc import aliased, NoResultFound
from .models import *
app_bp = Blueprint(app.name, app.name, static_folder='static')
genshi = Genshi(app)
genshi.extensions['html'] = 'html5'
_orig_url_for = url_for
def url_for(name, *args, **kwargs):
return _orig_url_for(app.name + '.' + name, *args, **kwargs)
def render(template, **kwargs):
"""Render a Genshi template with some extra helpers."""
kwargs.update({
'static' : lambda res: url_for('static', filename=res),
'current_user' : current_user,
})
return render_template(template, kwargs)
@app_bp.route('/')
def home():
"""Display homepage"""
return render('home.html')
def validate_user_login(user_id, passwd):
profile = UserProfile.query.filter_by(email_addr=user_id).first()
validated = profile and profile.check_password(passwd)
if validated:
login_user(profile)
return validated
def check_safe_url(url):
pass
@app_bp.route('/login', methods=['GET', 'POST'])
def login():
email = None
error = None
if request.method == 'POST':
email = request.form.get('email', None)
password = request.form.get('password', None)
if validate_user_login(email, password):
# the 'next' parameter is automatically added to the URL
# when the user accesses a route with @login_required while
# not authenticated
next_url = request.args.get('next', '')
#check_safe_url(next_url)
if not next_url.startswith('/'):
next_url = None
return redirect(next_url or url_for('home'))
else:
error = 'Invalid credentials.'
return render('login.html', email=email, error=error)
@app_bp.route('/logout')
def logout():
logout_user()
return redirect(url_for('home'))
| {
"repo_name": "0xquad/flask-app-template",
"path": "app.tmpl/views.py",
"copies": "1",
"size": "2237",
"license": "mit",
"hash": -515327245951839940,
"line_mean": 26.9625,
"line_max": 100,
"alpha_frac": 0.6477425123,
"autogenerated": false,
"ratio": 3.6672131147540985,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9803811856728838,
"avg_score": 0.002228754065052151,
"num_lines": 80
} |
"""Application views."""
import csv
import itertools
import json
import math
import mimetypes
import os
import peewee
import secrets
import shutil
import traceback
from datetime import datetime
from io import BytesIO
import requests
import tablib
import yaml
from flask import (Response, abort, flash, jsonify, redirect, render_template, request, send_file,
send_from_directory, stream_with_context, url_for)
from flask_admin._compat import csv_encode
from flask_admin.actions import action
from flask_admin.babel import gettext
from flask_admin.base import expose
from flask_admin.contrib.peewee import ModelView, filters
from flask_admin.contrib.peewee.form import CustomModelConverter
from flask_admin.contrib.peewee.view import save_inline
from flask_admin.form import SecureForm, rules
from flask_admin.helpers import get_redirect_target
from flask_admin.model import BaseModelView, typefmt
from flask_login import current_user, login_required
from flask_rq2.job import FlaskJob
from jinja2 import Markup
from orcid_api_v3.rest import ApiException
from playhouse.shortcuts import model_to_dict
from peewee import SQL
from werkzeug.utils import secure_filename
from wtforms.fields import BooleanField
from urllib.parse import parse_qs, urlparse
from wtforms import validators
from . import SENTRY_DSN, admin, app, cache, limiter, models, orcid_client, rq, utils
from .apis import yamlfy
from .forms import (AddressForm, ApplicationFrom, BitmapMultipleValueField, CredentialForm, EmailTemplateForm,
ExternalIdentifierForm, FileUploadForm, FundingForm, GroupIdForm, LogoForm, OrgRegistrationForm,
OtherNameKeywordForm, PartialDateField, PeerReviewForm, ProfileSyncForm,
RecordForm, ResearcherUrlForm, UserInvitationForm, WebhookForm, WorkForm,
validate_orcid_id_field)
from .login_provider import roles_required
from .models import (JOIN, Affiliation, AffiliationRecord, AffiliationExternalId, CharField,
Client, Delegate, ExternalId, FixedCharField, File, FundingContributor,
FundingInvitee, FundingRecord, Grant, GroupIdRecord, Invitee, MessageRecord,
ModelException, NestedDict, OtherIdRecord, OrcidApiCall, OrcidToken,
Organisation, OrgInfo, OrgInvitation, PartialDate, PropertyRecord,
PeerReviewExternalId, PeerReviewInvitee, PeerReviewRecord, RecordInvitee, Role, Task,
TaskType, TextField, Token, Url, User, UserInvitation, UserOrg,
UserOrgAffiliation, WorkContributor, WorkExternalId, WorkInvitee, WorkRecord,
db)
# NB! Should be disabled in production
from .pyinfo import info
from .utils import get_next_url, read_uploaded_file, send_user_invitation
HEADERS = {"Accept": "application/vnd.orcid+json", "Content-type": "application/vnd.orcid+json"}
ORCID_BASE_URL = app.config["ORCID_BASE_URL"]
@app.errorhandler(401)
def unauthorized(e):
"""Handle Unauthorized (401)."""
_next = get_next_url()
if _next:
flash(
"You have not been authenticated, or do not have the necessary permissions to access this page",
"danger")
return redirect(_next)
return render_template("401.html"), 401
@app.errorhandler(403)
def forbidden(e):
"""Handle Forbidden (403)."""
_next = get_next_url()
if _next:
flash("Page Not Found", "danger")
flash(
"You might not have the necessary permissions to access this page.",
"danger")
return redirect(_next)
return render_template("403.html"), 403
@app.errorhandler(404)
def page_not_found(e):
"""Handle nonexistin pages."""
_next = get_next_url()
if _next:
flash("Page Not Found", "danger")
return redirect(_next)
return render_template("404.html"), 404
@app.errorhandler(500)
def internal_error(error):
"""Handle internal error."""
trace = traceback.format_exc()
if SENTRY_DSN:
from sentry_sdk import last_event_id
return render_template(
"500.html",
trace=trace,
error_message=str(error),
sentry_event_id=last_event_id())
else:
return render_template("500.html", trace=trace, error_message=str(error))
@app.route("/unsubscribe/<token>")
def unsubscribe(token):
"""Show unsubscribe page."""
flash("If you wish to unsubscribe from all notifications, please contact the Hub admin", "info")
return redirect(url_for("index"))
@app.route("/favicon.ico")
def favicon():
"""Support for the "favicon" legacy: favicon location in the root directory."""
return send_from_directory(
os.path.join(app.root_path, "static", "images"),
"favicon.ico",
mimetype="image/vnd.microsoft.icon")
@app.route("/status")
@limiter.limit("30/minute")
def status():
"""Check the application health status attempting to connect to the DB.
NB! This entry point should be protected and accessible
only form the application monitoring servers.
"""
try:
now = db.execute_sql(
"SELECT current_timestamp" if isinstance(db, peewee.SqliteDatabase) else "SELECT now()").fetchone()[0]
total, used, free = shutil.disk_usage(__file__)
free = round(free * 100 / total)
return jsonify({
"status": "Connection successful.",
"db-timestamp": now if isinstance(now, str) else now.isoformat(),
"free-storage-percent": free
}), 200 if free > 10 else 418
except Exception as ex:
return jsonify({
"status": "Error",
"message": str(ex),
}), 503 # Service Unavailable
@app.route("/pyinfo/<message>")
@app.route("/pyinfo")
@roles_required(Role.SUPERUSER)
def pyinfo(message=None):
"""Show Python and runtime environment and settings or test exception handling."""
if message:
raise Exception(message)
return render_template("pyinfo.html", **info)
@app.route("/u/<short_id>")
def short_url(short_id):
"""Redirect to the full URL."""
try:
u = Url.get(short_id=short_id)
if request.args:
return redirect(utils.append_qs(u.url, **request.args))
return redirect(u.url)
except Url.DoesNotExist:
abort(404)
def orcid_link_formatter(view, context, model, name):
"""Format ORCID ID for ModelViews."""
if not model.orcid:
return ""
return Markup(f'<a href="{ORCID_BASE_URL}{model.orcid}" target="_blank">{model.orcid}</a>')
class AppCustomModelConverter(CustomModelConverter):
"""Customized field mapping to revove the extra validator.
This is a workaround for https://github.com/coleifer/wtf-peewee/issues/48.
TODO: remove it as soon as the issue gets resoved.
"""
def convert(self, model, field, field_args):
"""Remove the 'Required' validator if the model field is optional."""
fi = super().convert(model, field, field_args)
if field.null and field.choices:
for v in fi.field.kwargs.get("validators", []):
if isinstance(v, validators.Required):
fi.field.kwargs["validators"].remove(v)
break
return fi
class AppModelView(ModelView):
"""ModelView customization."""
# def get_column_name(self, field):
# """
# Return a human-readable column name.
# :param field:
# Model field name.
# """
# if self.column_labels and field in self.column_labels:
# return self.column_labels[field]
# else:
# model_field = self.model._meta.fields.get(field)
# return self._prettify_name(
# model_field.verbose_name if model_field and model_field.verbose_name else field)
roles = {1: "Superuser", 2: "Administrator", 4: "Researcher", 8: "Technical Contact"}
column_editable_list = ["name", "is_active", "email", "role", "city", "region", "value", "url", "display_index"]
roles_required = Role.SUPERUSER
export_types = [
"csv",
"xls",
"tsv",
"yaml",
"json",
"xlsx",
"ods",
"html",
]
form_args = dict(
roles=dict(choices=roles.items()),
email=dict(validators=[validators.email()]),
orcid=dict(validators=[validate_orcid_id_field]))
if app.config["ENV"] not in ["dev", "test", "dev0", ] and not app.debug:
form_base_class = SecureForm
column_formatters = dict(
roles=lambda v, c, m, p: ", ".join(n for r, n in v.roles.items() if r & m.roles),
orcid=orcid_link_formatter)
column_default_sort = ("id", True)
column_labels = dict(org="Organisation", orcid="ORCID iD", identifier="Local Identifier")
column_type_formatters = dict(typefmt.BASE_FORMATTERS)
column_type_formatters.update({datetime: lambda view, value: isodate(value)})
column_type_formatters_export = dict(typefmt.EXPORT_FORMATTERS)
column_type_formatters_export.update({PartialDate: lambda view, value: str(value)})
column_formatters_export = dict(orcid=lambda v, c, m, p: m.orcid)
column_exclude_list = (
"updated_at",
"updated_by",
)
form_overrides = dict(start_date=PartialDateField,
end_date=PartialDateField,
proposal_start_date=PartialDateField,
proposal_end_date=PartialDateField)
form_widget_args = {c: {"readonly": True} for c in column_exclude_list}
form_excluded_columns = ["created_at", "updated_at", "created_by", "updated_by"]
model_form_converter = AppCustomModelConverter
column_display_pk = False
def __init__(self, model=None, *args, **kwargs):
"""Pick the model based on the ModelView class name assuming it is ModelClass + "Admin"."""
if model is None:
if hasattr(self, "model"):
model = self.model
else:
model_class_name = self.__class__.__name__.replace("Admin", '')
model = globals().get(model_class_name)
if model is None:
if model_class_name not in dir(models):
raise Exception(f"Model class {model_class_name} doesn't exit.")
model = models.__dict__.get(model_class_name)
super().__init__(model, *args, **kwargs)
# TODO: remove when it gets merged into the upstream repo (it's a workaround to make
# joins LEFT OUTER)
def _handle_join(self, query, field, joins):
if field.model != self.model:
model_name = field.model.__name__
foreign_keys, _ = self.model._meta.get_rel_for_model(field.model)
if model_name not in joins:
# TODO: find a simple way of getting to the right joining forein key
if len(foreign_keys) > 1:
query = query.join(field.model, JOIN.LEFT_OUTER, on=foreign_keys[0])
else:
query = query.join(field.model, JOIN.LEFT_OUTER)
joins.add(model_name)
return query
def get_one(self, rec_id):
"""Handle missing data."""
try:
return super().get_one(rec_id)
except self.model.DoesNotExist:
flash(f"The record with given ID: {rec_id} doesn't exist or it has been deleted.", "danger")
abort(404)
def init_search(self):
"""Include linked columns in the search if they are defined with 'liked_table.column'."""
if self.column_searchable_list:
for p in self.column_searchable_list:
if isinstance(p, str):
if "." in p:
m, p = p.split('.')
m = getattr(self.model, m).rel_model
p = getattr(m, p)
else:
p = getattr(self.model, p)
# Check type
if not isinstance(p, (
CharField,
TextField,
)):
raise Exception(
f'Can only search on text columns. Failed to setup search for "{p}"')
self._search_fields.append(p)
return bool(self._search_fields)
def is_accessible(self):
"""Verify if the view is accessible for the current user."""
if not current_user.is_active or not current_user.is_authenticated:
return False
if current_user.has_role(self.roles_required):
return True
return False
def inaccessible_callback(self, name, **kwargs):
"""Handle access denial. Redirect to login page if user doesn"t have access."""
return redirect(url_for("index", next=request.url))
def get_query(self):
"""Add URL query to the data select for foreign key and select data that user has access to."""
query = super().get_query()
if current_user and not current_user.has_role(Role.SUPERUSER) and current_user.has_role(
Role.ADMIN):
# Show only rows related to the current organisation the user is admin for.
# Skip this part for SUPERUSER.
column_names = [c.column_name for c in self.model._meta.fields.values()]
if "org_id" in column_names or "organisation_id" in column_names:
if "org_id" in column_names:
query = query.where(self.model.org_id == current_user.organisation.id)
else:
query = query.where(self.model.organisation_id == current_user.organisation.id)
if request.args and any(a.endswith("_id") for a in request.args):
for f in self.model._meta.fields.values():
if f.column_name.endswith("_id") and f.column_name in request.args:
query = query.where(f == int(request.args[f.column_name]))
return query
def _get_list_extra_args(self):
"""Workaround for https://github.com/flask-admin/flask-admin/issues/1512."""
view_args = super()._get_list_extra_args()
extra_args = {
k: v
for k, v in request.args.items()
if k not in (
"page",
"page_size",
"sort",
"desc",
"search",
) and not k.startswith("flt")
}
view_args.extra_args = extra_args
return view_args
class AuditLogModelView(AppModelView):
"""Audit Log model view."""
can_edit = False
can_delete = False
can_create = False
can_view_details = False
column_default_sort = [("ts", True), ("id", True)]
def __init__(self, model, *args, **kwargs):
"""Set up the search list."""
self.column_searchable_list = [
f for f in model._meta.fields.values() if isinstance(f, (CharField, FixedCharField, TextField))
]
self.column_filters = [
filters.DateBetweenFilter(column=model.ts, name="Time-stamp"),
filters.FilterEqual(
column=model.op,
options=[("U", "Updated"), ("D", "Deleted")],
name="Operation"),
]
super().__init__(model, *args, **kwargs)
class MailLogAdmin(AppModelView):
"""Mail Log model view."""
can_edit = False
can_delete = False
can_create = False
can_view_details = True
column_searchable_list = (
"org.name",
"error",
"recipient",
"subject",
)
class UserAdmin(AppModelView):
"""User model view."""
roles = {1: "Superuser", 2: "Administrator", 4: "Researcher", 8: "Technical Contact"}
edit_template = "admin/user_edit.html"
form_extra_fields = dict(is_superuser=BooleanField("Is Superuser"))
form_excluded_columns = (
"roles",
"created_at",
"updated_at",
"created_by",
"updated_by",
)
column_exclude_list = (
"password",
"username",
"first_name",
"last_name",
)
column_searchable_list = (
"name",
"orcid",
"email",
"eppn",
"organisation.name",
)
form_overrides = dict(roles=BitmapMultipleValueField)
form_ajax_refs = {
"organisation": {
"fields": (Organisation.name, "name")
},
}
can_export = True
class OrganisationAdmin(AppModelView):
"""Organisation model view."""
column_formatters = {
"logo":
lambda v, c, m, p: Markup(
'<img style="max-height: 100px; max-width: 100px;" src="'
f"""{url_for('logo_image', token=m.logo.token)}" alt="the logo of {m.name}">""") if m.logo else ''
}
column_exclude_list = (
"orcid_client_id",
"orcid_secret",
"created_at",
"updated_at",
"created_by",
"updated_by",
"email_template",
"email_template_enabled",
)
form_excluded_columns = AppModelView.form_excluded_columns[:]
form_excluded_columns.append("logo")
column_searchable_list = (
"name",
"tuakiri_name",
"city",
)
form_ajax_refs = {
"tech_contact": {
"fields": (User.name, User.email),
"page_size": 5
},
}
edit_template = "admin/organisation_edit.html"
form_widget_args = AppModelView.form_widget_args
form_widget_args["api_credentials_requested_at"] = {"readonly": True}
form_widget_args["api_credentials_entered_at"] = {"readonly": True}
def update_model(self, form, model):
"""Handle change of the technical contact."""
# Technical contact changed:
if form.tech_contact.data and form.tech_contact.data.id != model.tech_contact_id:
# Revoke the TECHNICAL role if thre is no org the user is tech.contact for.
if model.tech_contact and model.tech_contact.has_role(
Role.TECHNICAL) and not Organisation.select().where(
Organisation.tech_contact_id == model.tech_contact_id,
Organisation.id != model.id).exists():
app.logger.info(r"Revoked TECHNICAL from {model.tech_contact}")
model.tech_contact.roles &= ~Role.TECHNICAL
model.tech_contact.save()
return super().update_model(form, model)
class OrgInfoAdmin(AppModelView):
"""OrgInfo model view."""
can_export = True
column_searchable_list = (
"name",
"tuakiri_name",
"city",
"first_name",
"last_name",
"email",
)
form_rules = [
rules.FieldSet(["name", "tuakiri_name"], "Naming"),
rules.FieldSet(["title", "first_name", "last_name", "role", "email", "phone", "is_public"],
"Technical Contact"),
rules.FieldSet(["country", "city"], "Address"),
rules.FieldSet(["disambiguated_id", "disambiguation_source"], "Disambiguation Data"),
]
@action("invite", "Register Organisation",
"Are you sure you want to register selected organisations?")
def action_invite(self, ids):
"""Batch registration of organisations."""
count = 0
for oi in OrgInfo.select().where(OrgInfo.id.in_(ids)):
try:
register_org(
org_name=oi.name,
email=oi.email,
tech_contact=True,
via_orcid=(False if oi.tuakiri_name else True),
first_name=oi.first_name,
last_name=oi.last_name,
city=oi.city,
country=oi.country,
course_or_role=oi.role,
disambiguated_id=oi.disambiguated_id,
disambiguation_source=oi.disambiguation_source)
count += 1
except Exception as ex:
flash(f"Failed to send an invitation to {oi.email}: {ex}")
app.logger.exception(f"Failed to send registration invitation to {oi.email}.")
flash("%d invitations were sent successfully." % count)
class OrcidTokenAdmin(AppModelView):
"""ORCID token model view."""
column_searchable_list = (
"access_token",
"user.name",
"user.email",
"org.name",
)
can_export = True
can_create = True
form_ajax_refs = {
"user": {
"fields": (User.name, User.email),
"page_size": 5
},
"org": {
"fields": (Organisation.name, Organisation.tuakiri_name),
"page_size": 5
},
}
column_filters = ["scopes"]
class OrcidApiCallAmin(AppModelView):
"""ORCID API calls."""
column_list = [
"method", "called_at", "url", "query_params", "body", "status", "put_code",
"response", "response_time_ms"
]
column_default_sort = ("id", True)
can_export = True
can_edit = False
can_delete = False
can_create = False
can_view_details = True
column_searchable_list = (
"url",
"body",
"response",
"user.name",
)
column_formatters = AppModelView.column_formatters
column_formatters_detail = dict()
@staticmethod
def truncate_value(v, c, m, p):
"""Truncate very long strings."""
value = getattr(m, p)
return value[:100] + " ..." if value and len(value) > 100 else value
OrcidApiCallAmin.column_formatters.update(dict(
body=OrcidApiCallAmin.truncate_value, response=OrcidApiCallAmin.truncate_value))
class UserInvitationAdmin(AppModelView):
"""User Invitations."""
can_export = True
can_edit = False
can_delete = False
can_create = False
column_searchable_list = (
"email",
"organisation",
"department",
"first_name",
"last_name",
"token",
"inviter.name",
)
class OrgInvitationAdmin(AppModelView):
"""User Invitations."""
can_export = True
can_edit = False
can_delete = False
can_create = False
column_searchable_list = (
"email",
"org.name",
"token",
"inviter.name",
)
class UserOrgAmin(AppModelView):
"""User Organisations."""
column_searchable_list = (
"user.email",
"org.name",
)
class TaskAdmin(AppModelView):
"""Task model view."""
roles_required = Role.SUPERUSER | Role.ADMIN
list_template = "view_tasks.html"
can_edit = False
can_create = False
can_delete = True
column_searchable_list = [
"filename", "created_by.email", "created_by.name", "created_by.first_name",
"created_by.last_name", "org.name"
]
column_list = [
"task_type", "filename", "created_at", "org", "completed_at", "created_by", "expires_at",
"expiry_email_sent_at", "completed_count"
]
# form_excluded_columns = [
# "is_deleted", "completed_at", "expires_at", "expiry_email_sent_at", "organisation"
# ]
column_filters = (
filters.DateBetweenFilter(column=Task.created_at, name="Uploaded Date"),
filters.FilterEqual(column=Task.task_type, options=models.TaskType.options(), name="Task Type"),
)
column_formatters = dict(
task_type=lambda v, c, m, p: m.task_type.name.replace('_', ' ').title() if m.task_type else "N/A",
completed_count=lambda v, c, m, p: (
'' if not m.record_count else f"{m.completed_count} / {m.record_count} ({m.completed_percent:.1f}%)"),
)
@action("activate", "Activate for processing",
"""Are you sure you want to activate the selected tasks for batch processing?
NB! By clicking "OK" you are affirming that the all selected task records to be written are,
to the best of your knowledge, correct.""")
def activate(self, ids):
"""Acitave or reset and enqueue all records of selected tasks."""
self.activate_or_reset(ids)
@action("reset", "Reset for processing",
"""Are you sure you want to reset every record in selected task batch for processing?
NB! By clicking "OK" you are affirming that all the records of seleced tasks to be written are,
to the best of your knowledge, correct!""")
def reset(self, ids):
"""Acitave or reset and enqueue all records of selected tasks."""
self.activate_or_reset(ids)
def activate_or_reset(self, ids):
"""Acitave or reset and enqueue all records of selected tasks."""
count = 0
for t in Task.select().where(Task.id.in_(ids)):
try:
count += utils.activate_all_records(t) if request.form.get(
"action") == "activate" else utils.reset_all_records(t)
except Exception as ex:
flash(f"Failed to activate the selected records: {ex}", "danger")
else:
flash(f"{count} records were activated for batch processing.", "info")
class RecordModelView(AppModelView):
"""Task record model view."""
roles_required = Role.SUPERUSER | Role.ADMIN
list_template = "record_list.html"
column_exclude_list = (
"task",
"organisation",
)
form_excluded_columns = [
"task",
"organisation",
"processed_at",
"status",
]
column_export_exclude_list = (
"task",
)
can_edit = True
can_create = False
can_delete = True
can_view_details = True
can_export = True
form_widget_args = {"external_id": {"readonly": True}, "task": {"readonly": True}}
def render(self, template, **kwargs):
"""Pass the task to the render function as an added argument."""
if template == self.list_template and "task" not in kwargs:
task_id = request.args.get("task_id")
if task_id:
try:
kwargs["task"] = Task.get(id=task_id)
except Task.DoesNotExist:
flash(f"The task with ID: {task_id} doesn't exist.", "danger")
abort(404)
else:
return redirect(request.args.get("url") or url_for("task.index_view"))
return super().render(template, **kwargs)
def is_accessible(self):
"""Verify if the task view is accessible for the current user."""
if not super().is_accessible():
return False
# Added the feature for superuser to access task related to all research organiastion
if current_user.is_superuser:
return True
if request.method == "POST" and request.form.get("rowid"):
# get the first ROWID:
rowid = int(request.form.get("rowid"))
task_id = self.model.get(id=rowid).task_id
else:
task_id = self.current_task_id
if not task_id:
_id = request.args.get("id")
if not _id:
flash("Cannot invoke the task view without task ID", "danger")
flash("Missing or incorrect task ID value", "danger")
return False
else:
task_id = self.model.get(id=_id).task_id
try:
task = Task.get(id=task_id)
if task.org.id != current_user.organisation.id:
flash("Access denied! You cannot access this task.", "danger")
return False
except Task.DoesNotExist:
flash("The task doesn't exist.", "danger")
abort(404)
except ValueError as ex:
flash(str(ex), "danger")
return False
return True
def get_export_name(self, export_type='csv'):
"""Get export file name using the original imported file name.
:return: The exported csv file name.
"""
task_id = request.args.get("task_id")
if task_id:
try:
task = Task.get(id=task_id)
filename = os.path.splitext(task.filename)[0]
return "%s_%s.%s" % (filename, datetime.utcnow().strftime("%Y-%m-%d_%H-%M-%S"),
export_type)
except Task.DoesNotExist:
flash("The batch task doesn't exist", "danger")
abort(404)
return super().get_export_name(export_type=export_type)
@models.lazy_property
def record_processing_func(self):
"""Record processing function."""
return getattr(utils, f"process_{self.model.underscore_name()}s")
def enqueue_record(self, record_id):
"""Enqueue the specified record or all active and not yet processed ones."""
self.record_processing_func.queue(record_id=record_id)
@action("activate", "Activate for processing",
"""Are you sure you want to activate the selected records for batch processing?
By clicking "OK" you are affirming that the selected records to be written are,
to the best of your knowledge, correct!""")
def action_activate(self, ids):
"""Batch registraion of users."""
try:
status = "The record was activated at " + datetime.now().isoformat(timespec="seconds")
count = self.model.update(is_active=True, status=status).where(
((self.model.is_active.is_null()) | (self.model.is_active == False)), # noqa: E712
self.model.id.in_(ids)).execute()
if self.model == AffiliationRecord:
records = self.model.select().where(self.model.id.in_(ids)).order_by(
self.model.email, self.model.orcid)
for _, chunk in itertools.groupby(records, lambda r: (r.email, r.orcid, )):
self.enqueue_record([r.id for r in chunk])
else:
for record_id in ids:
self.enqueue_record(record_id)
except Exception as ex:
flash(f"Failed to activate the selected records: {ex}")
app.logger.exception("Failed to activate the selected records")
else:
flash(f"{count} records were activated for batch processing.")
@action("reset", "Reset for processing",
"Are you sure you want to reset the selected records for batch processing?")
def action_reset(self, ids):
"""Reset batch task records."""
status = "The record was reset at " + datetime.utcnow().isoformat(timespec="seconds")
task_id = None
with db.atomic() as transaction:
try:
if request.method == "POST" and request.form.get("rowid"):
# get the first ROWID:
rowid = int(request.form.get("rowid"))
task_id = self.model.get(id=rowid).task_id
else:
task_id = request.form.get('task_id')
task = Task.get(id=task_id)
count = self.model.update(
processed_at=None, status=status).where(self.model.is_active,
self.model.id.in_(ids)).execute()
if task.is_raw:
invitee_ids = [i.id for i in Invitee.select().join(
RecordInvitee).join(MessageRecord).where(MessageRecord.id.in_(ids))]
count = Invitee.update(
processed_at=None, status=status).where(Invitee.id.in_(invitee_ids)).execute()
emails = Invitee.select(Invitee.email).where(Invitee.id.in_(invitee_ids))
elif hasattr(self.model, "invitees"):
im = self.model.invitees.rel_model
count = im.update(
processed_at=None, status=status).where(im.record.in_(ids)).execute()
emails = im.select(im.email).where(im.record_id.in_(ids))
else:
emails = self.model.select(self.model.email).where(self.model.id.in_(ids))
# Delete the userInvitation token for selected reset items.
UserInvitation.delete().where(UserInvitation.email.in_(emails)).execute()
for record_id in ids:
self.enqueue_record(record_id)
except Exception as ex:
transaction.rollback()
flash(f"Failed to activate the selected records: {ex}")
app.logger.exception("Failed to activate the selected records")
else:
task.expires_at = None
task.expiry_email_sent_at = None
task.completed_at = None
task.save()
flash(
f"{count} {task.task_type.name} records were reset and/or updated for batch processing."
)
def create_form(self):
"""Prefill form with organisation default values."""
form = super().create_form()
if request.method == "GET":
org = current_user.organisation
if hasattr(form, "org_name"):
form.org_name.data = org.name
if hasattr(form, "city"):
form.city.data = org.city
if hasattr(form, "region"):
form.region.data = org.region
if hasattr(form, "country"):
form.country.data = org.country
if hasattr(form, "disambiguated_id"):
form.disambiguated_id.data = org.disambiguated_id
if hasattr(form, "disambiguation_source"):
form.disambiguation_source.data = org.disambiguation_source
return form
@property
def current_task_id(self):
"""Get task_id form the query pameter task_id or url."""
try:
task_id = request.args.get("task_id")
if task_id:
return int(task_id)
url = request.args.get("url")
if not url:
flash("Missing return URL.", "danger")
return False
qs = parse_qs(urlparse(url).query)
task_id = qs.get("task_id", [None])[0]
if task_id:
return int(task_id)
except:
return None
def create_model(self, form):
"""Link model to the current task."""
task_id = self.current_task_id
if not task_id:
flash("Missing task ID.", "danger")
return False
try:
model = self.model(task_id=task_id)
form.populate_obj(model)
self._on_model_change(form, model, True)
model.save()
# For peewee have to save inline forms after model was saved
save_inline(form, model)
except Exception as ex:
if not self.handle_view_exception(ex):
flash(f"Failed to create record: {ex}", "danger")
app.log.exception("Failed to create record.")
return False
else:
self.after_model_change(form, model, True)
if model.is_active:
self.enqueue_record(model.id)
return model
def update_model(self, form, model):
"""Handle change of the record. Enqueue the record if got activated."""
is_active = model.is_active
update_resp = super().update_model(form, model)
if update_resp and not is_active and model.is_active:
self.enqueue_record(model.id)
return update_resp
class RecordChildAdmin(AppModelView):
"""Batch processing record child model common bits."""
roles_required = Role.SUPERUSER | Role.ADMIN
list_template = "record_child_list.html"
can_edit = True
can_create = True
can_delete = True
can_view_details = True
column_exclude_list = ["record"]
form_excluded_columns = ["record", "record", "status", "processed_at"]
column_details_exclude_list = ["record"]
def is_accessible(self):
"""Verify if the view is accessible for the current user."""
if not super().is_accessible():
flash("Access denied! You cannot access this record.", "danger")
return False
return True
@property
def current_record_id(self):
"""Get record_id form the query pameter record_id or url."""
try:
record_id = request.args.get("record_id")
if record_id:
return int(record_id)
url = request.values.get("url") or request.referrer
if not url:
flash("Missing return URL.", "danger")
return None
qs = parse_qs(urlparse(url).query)
record_id = qs.get("record_id", [None])[0]
if record_id:
return int(record_id)
except:
return None
def create_model(self, form):
"""Link model to the current record."""
record_id = self.current_record_id
if not record_id:
flash("Missing record ID.", "danger")
return False
try:
model = self.model()
form.populate_obj(model)
if self.model != Invitee:
model.record_id = record_id
self._on_model_change(form, model, True)
model.save()
if self.model == Invitee:
model.records.add(record_id)
# For peewee have to save inline forms after model was saved
save_inline(form, model)
except Exception as ex:
if not self.handle_view_exception(ex):
flash(f"Failed to create record: {ex}", "danger")
app.log.exception("Failed to create record.")
return False
else:
self.after_model_change(form, model, True)
return model
class ExternalIdAdmin(RecordChildAdmin):
"""Combine ExternalId model view."""
can_edit = True
can_create = True
can_delete = True
can_view_details = True
form_widget_args = {"external_id": {"readonly": True}}
def is_accessible(self):
"""Verify if the external id's view is accessible for the current user."""
if not super().is_accessible():
flash("Access denied! You cannot access this task.", "danger")
return False
return True
class InviteeAdmin(RecordChildAdmin):
"""Combine Invitees record model view."""
@action("reset", "Reset for processing",
"Are you sure you want to reset the selected records for batch processing?")
def action_reset(self, ids):
"""Batch reset of users."""
rec_id = self.current_record_id
with db.atomic() as transaction:
try:
status = " The record was reset at " + datetime.utcnow().isoformat(timespec="seconds")
count = self.model.update(
processed_at=None, status=status).where(self.model.id.in_(ids)).execute()
if self.model == Invitee and MessageRecord.select().where(MessageRecord.id == rec_id).exists():
rec_class = MessageRecord
else:
rec_class = self.model.record.rel_model
rec_class.update(
processed_at=None, status=status).where(
rec_class.is_active, rec_class.id == rec_id).execute()
getattr(utils, f"process_{rec_class.underscore_name()}s").queue(rec_id)
except Exception as ex:
transaction.rollback()
flash(f"Failed to activate the selected records: {ex}")
app.logger.exception("Failed to activate the selected records")
else:
flash(f"{count} invitee records were reset for batch processing.")
def get_query(self):
"""Build the query for the record related inivtee list."""
query = super().get_query()
if self.model == Invitee:
record_id = self.current_record_id
query = query.join(self.model.records.get_through_model()).join(MessageRecord).where(
MessageRecord.id == record_id)
# query = query.where(MessageRecord.id == record_id).join(
# self.model.records.get_through_model()).joint(MessageRecord)
return query
class CompositeRecordModelView(RecordModelView):
"""Common view for Funding, Work and Peer review model."""
column_export_exclude_list = (
"task",
"status",
"processed_at",
"created_at",
"updated_at",
)
export_types = [
"tsv",
"yaml",
"json",
"csv",
]
def _export_tablib(self, export_type, return_url):
"""Override export functionality to integrate funding/work/peer review invitees with external ids."""
if tablib is None:
flash("Tablib dependency not installed.", "danger")
return redirect(return_url)
filename = self.get_export_name(export_type)
disposition = 'attachment;filename=%s' % (secure_filename(filename), )
mimetype, encoding = mimetypes.guess_type(filename)
if not mimetype:
mimetype = 'application/octet-stream'
if encoding:
mimetype = '%s; charset=%s' % (mimetype, encoding)
if self.model == PeerReviewRecord:
self._export_columns = [(v, v.replace('_', '-')) for v in
['invitees', 'review_group_id', 'review_url', 'reviewer_role', 'review_type',
'review_completion_date', 'subject_external_identifier', 'subject_container_name',
'subject_type', 'subject_name', 'subject_url', 'convening_organization',
'review_identifiers', 'is_active']]
elif self.model == FundingRecord:
self._export_columns = [(v, v.replace('_', '-')) for v in
['invitees', 'title', 'type', 'organization_defined_type', 'short_description',
'amount', 'url', 'start_date', 'end_date', 'organization', 'contributors',
'external_ids', 'is_active']]
elif self.model == WorkRecord:
self._export_columns = [(v, v.replace('_', '-')) for v in
['invitees', 'title', 'journal_title', 'short_description', 'citation', 'type',
'publication_date', 'url', 'language_code', 'country', 'contributors',
'external_ids', 'is_active']]
ds = tablib.Dataset(headers=[c[1] for c in self._export_columns])
count, data = self._export_data()
for row in data:
vals = self.expected_format(row)
ds.append(vals)
try:
try:
if export_type == 'json':
response_data = json.dumps(json.loads(ds.json), ensure_ascii=False)
elif export_type == 'yaml':
response_data = yaml.safe_dump(json.loads(ds.json.replace("]\\", "]").replace("\\n", " ")),
allow_unicode=True)
else:
response_data = ds.export(format=export_type)
except AttributeError:
response_data = getattr(ds, export_type)
except (AttributeError, tablib.UnsupportedFormat):
flash(gettext('Export type "%(type)s not supported.', type=export_type), 'error')
return redirect(return_url)
return Response(
response_data,
headers={'Content-Disposition': disposition},
mimetype=mimetype,
)
def expected_format(self, row):
"""Get expected export format for funding/work/peer_review records."""
vals = []
for c in self._export_columns:
if c[0] == "invitees":
invitee_list = []
for f in row.invitees:
invitees_rec = {}
invitees_rec['local-identifier'] = self.get_export_value(f, 'identifier')
invitees_rec['email'] = self.get_export_value(f, 'email')
invitees_rec['first-name'] = self.get_export_value(f, 'first_name')
invitees_rec['last-name'] = self.get_export_value(f, 'last_name')
invitees_rec['ORCID-iD'] = self.get_export_value(f, 'orcid')
invitees_rec['put-code'] = int(self.get_export_value(f, 'put_code')) if \
self.get_export_value(f, 'put_code') else None
invitees_rec['visibility'] = self.get_export_value(f, 'visibility')
invitee_list.append(invitees_rec)
vals.append(invitee_list)
elif c[0] in ['review_completion_date', 'start_date', 'end_date', 'publication_date']:
vals.append(PartialDate.create(self.get_export_value(row, c[0])).as_orcid_dict()
if self.get_export_value(row, c[0]) else None)
elif c[0] == "subject_external_identifier":
subject_dict = {}
subject_dict['external-id-type'] = self.get_export_value(row, 'subject_external_id_type')
subject_dict['external-id-value'] = self.get_export_value(row, 'subject_external_id_value')
subject_dict['external-id-url'] = dict(value=self.get_export_value(row, 'subject_external_id_url'))
subject_dict['external-id-relationship'] = self.get_export_value(row,
'subject_external_id_relationship')
vals.append(subject_dict)
elif c[0] == "subject_name":
subject_name_dict = dict()
translated_title = dict()
subject_name_dict['title'] = dict(value=self.get_export_value(row, 'subject_name_title'))
subject_name_dict['subtitle'] = dict(value=self.get_export_value(row, 'subject_name_subtitle'))
translated_title['language-code'] = self.get_export_value(row,
'subject_name_translated_title_lang_code')
translated_title['value'] = csv_encode(self.get_export_value(row, 'subject_name_translated_title'))
subject_name_dict['translated-title'] = translated_title
vals.append(subject_name_dict)
elif c[0] in ["convening_organization", "organization"]:
convening_org_dict = dict()
disambiguated_dict = dict()
convening_org_dict['name'] = self.get_export_value(row, 'convening_org_name') or self.get_export_value(
row, 'org_name')
convening_org_dict['address'] = dict(
city=self.get_export_value(row, 'convening_org_city') or self.get_export_value(row, 'city'),
region=self.get_export_value(row, 'convening_org_region') or self.get_export_value(row, 'region'),
country=self.get_export_value(row, 'convening_org_country') or self.get_export_value(row,
'country'))
disambiguated_dict['disambiguated-organization-identifier'] = \
self.get_export_value(row, 'convening_org_disambiguated_identifier') or \
self.get_export_value(row, 'disambiguated_id')
disambiguated_dict['disambiguation-source'] = self.get_export_value(
row, 'convening_org_disambiguation_source') or self.get_export_value(row, 'disambiguation_source')
convening_org_dict['disambiguated-organization'] = disambiguated_dict
vals.append(convening_org_dict)
elif c[0] in ["review_identifiers", "external_ids"]:
external_ids_list = []
external_dict = {}
external_ids_data = row.external_ids
for f in external_ids_data:
external_id_dict = {}
external_id_dict['external-id-type'] = self.get_export_value(f, 'type')
external_id_dict['external-id-value'] = self.get_export_value(f, 'value')
external_id_dict['external-id-relationship'] = self.get_export_value(f, 'relationship')
external_id_dict['external-id-url'] = dict(value=self.get_export_value(f, 'url'))
external_ids_list.append(external_id_dict)
external_dict['external-id'] = external_ids_list
vals.append(external_dict)
elif c[0] == "title":
title_dict = dict()
translated_title = dict()
title_dict['title'] = dict(value=self.get_export_value(row, 'title'))
if self.model == WorkRecord:
title_dict['subtitle'] = dict(value=self.get_export_value(row, 'subtitle'))
translated_title['language-code'] = self.get_export_value(row, 'translated_title_language_code')
translated_title['value'] = csv_encode(self.get_export_value(row, 'translated_title'))
title_dict['translated-title'] = translated_title
vals.append(title_dict)
elif c[0] == "amount":
amount_dict = dict()
amount_dict['currency-code'] = self.get_export_value(row, 'currency')
amount_dict['value'] = csv_encode(self.get_export_value(row, 'amount'))
vals.append(amount_dict)
elif c[0] == "citation":
citation_dict = dict()
if self.get_export_value(row, 'citation_type'):
citation_dict['citation-type'] = self.get_export_value(row, 'citation_type')
citation_dict['citation-value'] = csv_encode(self.get_export_value(row, 'citation_value'))
vals.append(citation_dict)
elif c[0] == "contributors":
contributors_list = []
contributors_dict = {}
contributors_data = row.contributors
for f in contributors_data:
contributor_dict = {}
contributor_dict['contributor-attributes'] = {'contributor-role': self.get_export_value(f, 'role')}
if self.model == WorkRecord:
contributor_dict['contributor-attributes'].update(
{'contributor-sequence': self.get_export_value(f, 'contributor_sequence')})
contributor_dict['contributor-email'] = dict(value=self.get_export_value(f, 'email'))
contributor_dict['credit-name'] = dict(value=self.get_export_value(f, 'name'))
contributor_dict['contributor-orcid'] = dict(path=self.get_export_value(f, 'orcid'))
contributors_list.append(contributor_dict)
contributors_dict['contributor'] = contributors_list
vals.append(contributors_dict)
else:
requires_nested_value = ['review_url', 'subject_container_name', 'subject_url', 'journal_title', 'url',
'organization_defined_type', 'country']
if c[0] in requires_nested_value:
vals.append(dict(value=self.get_export_value(row, c[0])))
else:
vals.append(csv_encode(self.get_export_value(row, c[0])))
return vals
@expose('/export/<export_type>/')
def export(self, export_type):
"""Check the export type whether it is csv, tsv or other format."""
return_url = get_redirect_target() or self.get_url('.index_view')
if not self.can_export or (export_type not in self.export_types):
flash(gettext('Permission denied.'), 'error')
return redirect(return_url)
if export_type == 'csv' or export_type == 'tsv':
return self._export_csv(return_url, export_type)
else:
return self._export_tablib(export_type, return_url)
def _export_csv(self, return_url, export_type):
"""Export a CSV or tsv of records as a stream."""
delimiter = ","
if export_type == 'tsv':
delimiter = "\t"
# Grab parameters from URL
view_args = self._get_list_extra_args()
# Map column index to column name
sort_column = self._get_column_by_idx(view_args.sort)
if sort_column is not None:
sort_column = sort_column[0]
# Get count and data
count, query = self.get_record_list(
0,
sort_column,
view_args.sort_desc,
view_args.search,
view_args.filters,
page_size=self.export_max_rows,
execute=False)
# https://docs.djangoproject.com/en/1.8/howto/outputting-csv/
class Echo(object):
"""An object that implements just the write method of the file-like interface."""
def write(self, value):
"""Write the value by returning it, instead of storing in a buffer."""
return value
writer = csv.writer(Echo(), delimiter=delimiter)
def generate():
# Append the column titles at the beginning
titles = [csv_encode(c[1]) for c in self._export_columns]
yield writer.writerow(titles)
for row in query:
vals = [csv_encode(self.get_export_value(row, c[0]))
for c in self._export_columns]
yield writer.writerow(vals)
filename = self.get_export_name(export_type=export_type)
disposition = 'attachment;filename=%s' % (secure_filename(filename), )
return Response(
stream_with_context(generate()),
headers={'Content-Disposition': disposition},
mimetype='text/' + export_type)
class FundingRecordAdmin(CompositeRecordModelView):
"""Funding record model view."""
column_exclude_list = ("task", "translated_title_language_code", "short_description", "disambiguation_source")
can_create = True
column_searchable_list = ("title",)
list_template = "funding_record_list.html"
column_export_list = (
"funding_id",
"local_identifier",
"put_code",
"title",
"translated_title",
"translated_title_language_code",
"type",
"organization_defined_type",
"short_description",
"amount",
"url",
"currency",
"start_date",
"end_date",
"org_name",
"city",
"region",
"country",
"disambiguated_id",
"disambiguation_source",
"visibility",
"orcid",
"email",
"first_name",
"last_name",
"external_id_type",
"external_id_url",
"external_id_relationship",
"status",
"is_active")
def get_record_list(self, page, sort_column, sort_desc, search, filters, execute=True, page_size=None):
"""Return records and realated to the record data."""
count, query = self.get_list(
0,
sort_column,
sort_desc,
search,
filters,
page_size=page_size,
execute=False)
ext_ids = [r.id for r in
ExternalId.select(models.fn.min(ExternalId.id).alias("id")).join(FundingRecord).where(
FundingRecord.task == self.current_task_id).group_by(FundingRecord.id).objects()]
return count, query.select(
self.model,
FundingInvitee.email,
FundingInvitee.orcid,
FundingInvitee.identifier.alias("local_identifier"),
FundingInvitee.first_name,
FundingInvitee.last_name,
FundingInvitee.put_code,
FundingInvitee.visibility,
ExternalId.type.alias("external_id_type"),
ExternalId.value.alias("funding_id"),
ExternalId.url.alias("external_id_url"),
ExternalId.relationship.alias("external_id_relationship"),
).join(
ExternalId,
JOIN.LEFT_OUTER,
on=(ExternalId.record_id == self.model.id)).where(ExternalId.id << ext_ids).join(
FundingInvitee,
JOIN.LEFT_OUTER,
on=(FundingInvitee.record_id == self.model.id)).objects()
class WorkRecordAdmin(CompositeRecordModelView):
"""Work record model view."""
column_exclude_list = ("task", "translated_title_language_code", "short_description", "citation_value")
can_create = True
column_searchable_list = ("title",)
list_template = "work_record_list.html"
form_overrides = dict(publication_date=PartialDateField)
column_export_list = [
"work_id",
"put_code",
"title",
"subtitle",
"translated_title",
"translated_title_language_code",
"journal_title",
"short_description",
"citation_type",
"citation_value",
"type",
"publication_date",
"url",
"language_code",
"country",
"visibility",
"orcid",
"email",
"local_identifier",
"first_name",
"last_name",
"external_id_type",
"external_id_url",
"external_id_relationship",
"status",
"is_active"
]
def get_record_list(self, page, sort_column, sort_desc, search, filters, execute=True, page_size=None):
"""Return records and realated to the record data."""
count, query = self.get_list(
0,
sort_column,
sort_desc,
search,
filters,
page_size=page_size,
execute=False)
ext_ids = [r.id for r in
WorkExternalId.select(models.fn.min(WorkExternalId.id).alias("id")).join(WorkRecord).where(
WorkRecord.task == self.current_task_id).group_by(WorkRecord.id).objects()]
return count, query.select(
self.model,
WorkInvitee.email,
WorkInvitee.orcid,
WorkInvitee.identifier.alias("local_identifier"),
WorkInvitee.first_name,
WorkInvitee.last_name,
WorkInvitee.put_code,
WorkInvitee.visibility,
WorkExternalId.type.alias("external_id_type"),
WorkExternalId.value.alias("work_id"),
WorkExternalId.url.alias("external_id_url"),
WorkExternalId.relationship.alias("external_id_relationship"),
).join(
WorkExternalId,
JOIN.LEFT_OUTER,
on=(WorkExternalId.record_id == self.model.id)).where(WorkExternalId.id << ext_ids).join(
WorkInvitee,
JOIN.LEFT_OUTER,
on=(WorkInvitee.record_id == self.model.id)).objects()
class PeerReviewRecordAdmin(CompositeRecordModelView):
"""Peer Review record model view."""
column_exclude_list = (
"task", "subject_external_id_type", "external_id_type", "convening_org_disambiguation_source")
can_create = True
column_searchable_list = ("review_group_id", )
list_template = "peer_review_record_list.html"
form_overrides = dict(review_completion_date=PartialDateField)
form_rules = [
rules.FieldSet([
"review_group_id", "reviewer_role", "review_url", "review_type",
"review_completion_date"
], "Review Group"),
rules.FieldSet([
"subject_external_id_type", "subject_external_id_value", "subject_external_id_url",
"subject_external_id_relationship", "subject_container_name", "subject_type",
"subject_name_title", "subject_name_subtitle",
"subject_name_translated_title_lang_code", "subject_name_translated_title",
"subject_url"
], "Subject"),
rules.FieldSet([
"convening_org_name", "convening_org_city", "convening_org_region",
"convening_org_country", "convening_org_disambiguated_identifier",
"convening_org_disambiguation_source"
], "Convening Organisation"),
"is_active",
]
column_export_list = [
"review_group_id",
"reviewer_role",
"review_url",
"review_type",
"review_completion_date",
"subject_external_id_type",
"subject_external_id_value",
"subject_external_id_url",
"subject_external_id_relationship",
"subject_container_name",
"subject_type",
"subject_name_title",
"subject_name_subtitle",
"subject_name_translated_title_lang_code",
"subject_name_translated_title",
"subject_url",
"convening_org_name",
"convening_org_city",
"convening_org_region",
"convening_org_country",
"convening_org_disambiguated_identifier",
"convening_org_disambiguation_source",
"email",
"orcid",
"local_identifier",
"first_name",
"last_name",
"put_code",
"visibility",
"external_id_type",
"peer_review_id",
"external_id_url",
"external_id_relationship",
"is_active"
]
def get_record_list(self,
page,
sort_column,
sort_desc,
search,
filters,
execute=True,
page_size=None):
"""Return records and realated to the record data."""
count, query = self.get_list(
0, sort_column, sort_desc, search, filters, page_size=page_size, execute=False)
ext_ids = [r.id for r in
PeerReviewExternalId.select(models.fn.min(PeerReviewExternalId.id).alias("id")).join(
PeerReviewRecord).where(
PeerReviewRecord.task == self.current_task_id).group_by(PeerReviewRecord.id).objects()]
return count, query.select(
self.model,
PeerReviewInvitee.email,
PeerReviewInvitee.orcid,
PeerReviewInvitee.identifier.alias("local_identifier"),
PeerReviewInvitee.first_name,
PeerReviewInvitee.last_name,
PeerReviewInvitee.put_code,
PeerReviewInvitee.visibility,
PeerReviewExternalId.type.alias("external_id_type"),
PeerReviewExternalId.value.alias("peer_review_id"),
PeerReviewExternalId.url.alias("external_id_url"),
PeerReviewExternalId.relationship.alias("external_id_relationship"),
).join(
PeerReviewExternalId,
JOIN.LEFT_OUTER,
on=(PeerReviewExternalId.record_id == self.model.id)).where(PeerReviewExternalId.id << ext_ids).join(
PeerReviewInvitee,
JOIN.LEFT_OUTER,
on=(PeerReviewInvitee.record_id == self.model.id)).objects()
class AffiliationRecordAdmin(CompositeRecordModelView):
"""Affiliation record model view."""
can_create = True
column_exclude_list = (
"task",
"organisation",
)
column_searchable_list = (
"first_name",
"last_name",
"email",
"role",
"department",
"region",
"status",
)
column_export_list = [
"is_active",
"put_code",
"local_id",
"processed_at",
"status",
"first_name",
"last_name",
"email",
"visibility",
"orcid",
"organisation",
"affiliation_type",
"role",
"department",
"start_date",
"end_date",
"city",
"region",
"country",
"disambiguated_id",
"disambiguation_source",
"delete_record",
"visibility",
"url",
"display_index",
"external_id_type",
"external_id_value",
"external_id_url",
"external_id_relationship",
]
form_widget_args = {"task": {"readonly": True}}
def validate_form(self, form):
"""Validate the input."""
if request.method == "POST" and hasattr(form, "orcid") and hasattr(
form, "email") and hasattr(form, "put_code"):
if not (form.orcid.data or form.email.data or form.put_code.data):
flash(
"Either <b>email</b>, <b>ORCID iD</b>, or <b>put-code</b> should be provided.",
"danger")
return False
return super().validate_form(form)
@expose("/export/<export_type>/")
def export(self, export_type):
"""Check the export type whether it is csv, tsv or other format."""
if export_type not in ["json", "yaml", "yml"]:
return super().export(export_type)
return_url = get_redirect_target() or self.get_url(".index_view")
task_id = self.current_task_id
if not task_id:
flash("Missing task ID.", "danger")
return redirect(return_url)
if not self.can_export or (export_type not in self.export_types):
flash("Permission denied.", "danger")
return redirect(return_url)
data = Task.get(int(task_id)).to_dict()
if export_type == "json":
resp = jsonify(data)
else:
resp = yamlfy(data)
resp.headers[
"Content-Disposition"] = f"attachment;filename={secure_filename(self.get_export_name(export_type))}"
return resp
def get_record_list(self,
page,
sort_column,
sort_desc,
search,
filters,
execute=True,
page_size=None):
"""Return records and realated to the record data."""
count, query = self.get_list(
0, sort_column, sort_desc, search, filters, page_size=page_size, execute=False)
ext_ids = [r.id for r in
AffiliationExternalId.select(models.fn.min(AffiliationExternalId.id).alias("id")).join(
AffiliationRecord).where(
AffiliationRecord.task == self.current_task_id).group_by(AffiliationRecord.id).objects()]
return count, query.select(
self.model,
AffiliationExternalId.type.alias("external_id_type"),
AffiliationExternalId.value.alias("external_id_value"),
AffiliationExternalId.url.alias("external_id_url"),
AffiliationExternalId.relationship.alias("external_id_relationship"),
).join(
AffiliationExternalId,
JOIN.LEFT_OUTER,
on=((AffiliationExternalId.record_id == self.model.id) & (AffiliationExternalId.id << ext_ids))).objects()
class ProfilePropertyRecordAdmin(RecordModelView):
"""Researcher Url, Other Name, and Keyword record model view."""
can_create = True
form_widget_args = {"task": {"readonly": True}}
def __init__(self, model_class, *args, **kwargs):
"""Set up model specific attributes."""
self.column_searchable_list = [
f for f in ["content", "name", "value", "first_name", "last_name", "email"]
if f in model_class._meta.fields
]
super().__init__(model_class, *args, **kwargs)
def validate_form(self, form):
"""Validate the input."""
if request.method == "POST" and hasattr(form, "orcid") and hasattr(
form, "email") and hasattr(form, "put_code"):
if not (form.orcid.data or form.email.data or form.put_code.data):
flash(
"Either <b>email</b>, <b>ORCID iD</b>, or <b>put-code</b> should be provided.",
"danger")
return False
return super().validate_form(form)
@expose("/export/<export_type>/")
def export(self, export_type):
"""Check the export type whether it is csv, tsv or other format."""
if export_type not in ["json", "yaml", "yml"]:
return super().export(export_type)
return_url = get_redirect_target() or self.get_url(".index_view")
task_id = self.current_task_id
if not task_id:
flash("Missing task ID.", "danger")
return redirect(return_url)
if not self.can_export or (export_type not in self.export_types):
flash("Permission denied.", "danger")
return redirect(return_url)
data = Task.get(int(task_id)).to_dict()
if export_type == "json":
resp = jsonify(data)
else:
resp = yamlfy(data)
resp.headers[
"Content-Disposition"] = f"attachment;filename={secure_filename(self.get_export_name(export_type))}"
return resp
class ViewMembersAdmin(AppModelView):
"""Organisation member model (User beloging to the current org.admin oganisation) view."""
roles_required = Role.SUPERUSER | Role.ADMIN
list_template = "viewMembers.html"
edit_template = "admin/member_edit.html"
form_columns = ["name", "orcid", "email", "eppn"]
form_widget_args = {c: {"readonly": True} for c in form_columns if c != "email"}
column_list = ["email", "orcid", "created_at", "updated_at", "orcid_updated_at"]
column_formatters_export = dict(orcid=lambda v, c, m, p: m.orcid)
column_exclude_list = None
column_searchable_list = ["email", "orcid", "name", "first_name", "last_name"]
column_export_list = ("email", "eppn", "orcid")
model = User
can_edit = True
can_create = False
can_delete = True
can_view_details = False
can_export = True
column_filters = (
filters.DateBetweenFilter(column=User.created_at, name="Registration Date"),
filters.DateBetweenFilter(column=User.updated_at, name="Update Date"),
filters.DateBetweenFilter(column=User.orcid_updated_at, name="ORCID Update Date"),
)
column_labels = {"created_at": "Registered At"}
def get_query(self):
"""Get quiery for the user belonging to the organistation of the current user."""
return current_user.organisation.users
def _order_by(self, query, joins, order):
"""Add ID for determenistic order of rows if sorting is by NULLable field."""
query, joins = super()._order_by(query, joins, order)
# add ID only if all fields are NULLable (exlcude ones given by str):
if all(not isinstance(f, str) and f.null for (f, _) in order):
query = query.order_by(*query._order_by,
self.model.id.desc() if order[0][1] else self.model.id)
return query, joins
def get_one(self, rec_id):
"""Limit access only to the userers belonging to the current organisation."""
try:
user = User.get(id=rec_id)
if not user.organisations.where(UserOrg.org == current_user.organisation).exists():
flash("Access Denied!", "danger")
abort(403)
return user
except User.DoesNotExist:
flash(f"The user with given ID: {rec_id} doesn't exist or it was deleted.", "danger")
abort(404)
def delete_model(self, model):
"""Delete a row and revoke all access tokens issues for the organisation."""
org = current_user.organisation
token_revoke_url = app.config["ORCID_BASE_URL"] + "oauth/revoke"
if UserOrg.select().where(UserOrg.user_id == model.id, UserOrg.org_id == org.id,
UserOrg.is_admin).exists():
flash(
f"Failed to delete record for {model}, As User appears to be one of the admins. "
f"Please contact orcid@royalsociety.org.nz for support", "danger")
return False
for token in OrcidToken.select().where(OrcidToken.org == org, OrcidToken.user == model):
try:
resp = requests.post(token_revoke_url,
headers={"Accepts": "application/json"},
data=dict(client_id=org.orcid_client_id,
client_secret=org.orcid_secret,
token=token.access_token))
if resp.status_code != 200:
flash("Failed to revoke token {token.access_token}: {ex}", "danger")
return False
token.delete_instance(recursive=True)
except Exception as ex:
flash(f"Failed to revoke token {token.access_token}: {ex}", "danger")
app.logger.exception('Failed to delete record.')
return False
user_org = UserOrg.select().where(UserOrg.user == model, UserOrg.org == org).first()
try:
self.on_model_delete(model)
if model.organisations.count() < 2:
model.delete_instance()
else:
if model.organisation == user_org.org:
model.organisation = model.organisations.first()
model.save()
user_org.delete_instance()
except Exception as ex:
if not self.handle_view_exception(ex):
flash(gettext('Failed to delete record. %(error)s', error=str(ex)), 'danger')
app.logger.exception('Failed to delete record.')
return False
else:
self.after_model_delete(model)
return True
@action("delete", "Delete", "Are you sure you want to delete selected records?")
def action_delete(self, ids):
"""Delete a record for selected entries."""
try:
model_pk = getattr(self.model, self._primary_key)
count = 0
query = self.model.select().where(model_pk << ids)
for m in query:
if self.delete_model(m):
count += 1
if count:
flash(
gettext(
'Record was successfully deleted.%(count)s records were successfully deleted.',
count=count), 'success')
except Exception as ex:
flash(gettext('Failed to delete records. %(error)s', error=str(ex)), 'danger')
@action(
"export_affiliations", "Export Affiliation Records",
"Are you sure you want to retrieve and export selected records affiliation entries from ORCID?"
)
def action_export_affiliations(self, ids):
"""Export all user profile section list (either 'Education' or 'Employment')."""
tokens = OrcidToken.select(User, OrcidToken).join(
User, on=(OrcidToken.user_id == User.id)).where(
OrcidToken.user_id << ids,
OrcidToken.scopes.contains(orcid_client.READ_LIMITED))
if not current_user.is_superuser:
tokens = tokens.where(OrcidToken.org_id == current_user.organisation.id)
records = []
for t in tokens:
try:
api = orcid_client.MemberAPIV3(user=t.user, access_token=t.access_token)
profile = api.get_record()
if not profile:
continue
except ApiException as ex:
if ex.status == 401:
flash(f"User {t.user} has revoked the permissions to update his/her records",
"warning")
else:
flash(
"Exception when calling ORCID API: \n"
+ json.loads(ex.body.replace("''", "\"")).get('user-messsage'), "danger")
except Exception as ex:
abort(500, ex)
records = itertools.chain(
*[[(t.user, s.get(f"{rt}-summary")) for ag in
profile.get("activities-summary", f"{rt}s", "affiliation-group", default=[])
for s in ag.get("summaries")] for rt in ["employment", "education", "distinction", "membership",
"service", "qualification", "invited-position"]])
# https://docs.djangoproject.com/en/1.8/howto/outputting-csv/
class Echo(object):
"""An object that implements just the write method of the file-like interface."""
def write(self, value):
"""Write the value by returning it, instead of storing in a buffer."""
return '' if value is None or value == "None" else value
writer = csv.writer(Echo(), delimiter=",")
def generate():
titles = [
csv_encode(c) for c in [
"Put Code", "First Name", "Last Name", "Email", "ORCID iD", "Affiliation Type",
"Role", "Department", "Start Date", "End Date", "City", "Region", "Country",
"Disambiguated Id", "Disambiguation Source"
]
]
yield writer.writerow(titles)
for row in records:
u, r = row
_, orcid, affiliation_type, put_code = r.get("path").split("/")
yield writer.writerow(
csv_encode(v or '') for v in [
r.get("put-code"), u.first_name, u.last_name, u.email, orcid,
affiliation_type,
r.get("role-title"),
r.get("department-name"),
PartialDate.create(r.get("start-date")),
PartialDate.create(r.get("end-date")),
r.get("organization", "address", "city"),
r.get("organization", "address", "region"),
r.get("organization", "address", "country"),
r.get("disambiguated-organization",
"disambiguated-organization-identifier"),
r.get("disambiguated-organization", "disambiguation-source")
])
return Response(stream_with_context(generate()),
headers={"Content-Disposition": "attachment;filename=affiliations.csv"},
mimetype="text/csv")
class GroupIdRecordAdmin(AppModelView):
"""GroupIdRecord model view."""
roles_required = Role.SUPERUSER | Role.ADMIN
list_template = "viewGroupIdRecords.html"
can_edit = True
can_create = True
can_delete = True
form_widget_args = {"organisation": {"disabled": True}}
column_searchable_list = (
"name",
"group_id",
)
form_excluded_columns = (
"processed_at",
"status",
)
def create_form(self, obj=None):
"""Preselect the organisation field with Admin's organisation."""
form = super().create_form()
form.organisation.data = current_user.organisation
return form
@action("Insert/Update Record", "Insert or Update record",
"Are you sure you want add or update group id record?")
def action_insert_update(self, ids):
"""Insert/Update GroupID records."""
count = 0
with db.atomic():
for gid in self.model.select().where(self.model.id.in_(ids)):
try:
org = gid.organisation
orcid_token = None
gid.status = None
try:
orcid_token = OrcidToken.get(org=org, scopes='/group-id-record/update')
except OrcidToken.DoesNotExist:
orcid_token = utils.get_client_credentials_token(org=org, scopes="/group-id-record/update")
except Exception as ex:
flash("Something went wrong in ORCID call, "
"please contact orcid@royalsociety.org.nz for support", "warning")
app.logger.exception(f'Exception occured {ex}')
api = orcid_client.MemberAPIV3(org=org, access_token=orcid_token.access_token)
put_code, created = api.create_or_update_record_id_group(put_code=gid.put_code,
org=org, group_name=gid.name,
group_id=gid.group_id,
description=gid.description, type=gid.type)
if created:
gid.add_status_line("The group id record was created.")
else:
gid.add_status_line("The group id record was updated.")
gid.put_code = put_code
count += 1
except ApiException as ex:
if ex.status == 404:
gid.put_code = None
elif ex.status == 401:
orcid_token.delete_instance()
flash("Something went wrong in ORCID call, Please try again by making by making necessary changes, "
"In case you understand the 'user-message' present in the status field or else "
"please contact orcid@royalsociety.org.nz for support", "warning")
app.logger.exception(f'Exception occured {ex}')
gid.add_status_line(f"ApiException: {ex}")
except Exception as ex:
flash("Something went wrong in ORCID call, "
"Please contact orcid@royalsociety.org.nz for support", "warning")
app.logger.exception(f'Exception occured {ex}')
gid.add_status_line(f"Exception: {ex}")
finally:
gid.processed_at = datetime.utcnow()
gid.save()
flash("%d Record was processed." % count)
class ResourceRecordAdmin(RecordModelView):
"""Researcher resource administration view."""
form_rules = [
# rules.Header("Record"),
"local_id",
"is_active",
"display_index",
"visibility",
"put_code",
rules.FieldSet([
"identifier",
"email",
"orcid",
"first_name",
"last_name"
], "Invitee"),
rules.HTML("<hr>"),
rules.FieldSet([
"proposal_title",
"proposal_start_date",
"proposal_end_date",
"proposal_url",
rules.FieldSet([
"proposal_host_name",
"proposal_host_city",
"proposal_host_region",
"proposal_host_country",
"proposal_host_disambiguated_id",
"proposal_host_disambiguation_source",
], "Host"),
rules.FieldSet([
"proposal_external_id_type",
"proposal_external_id_value",
"proposal_external_id_url",
"proposal_external_id_relationship",
], "External ID")
], "Proposal"),
rules.HTML("<hr>"),
rules.FieldSet([
"name",
"type",
"start_date",
"end_date",
"url",
rules.FieldSet([
"host_name",
"host_city",
"host_region",
"host_country",
"host_disambiguated_id",
"host_disambiguation_source",
], "Host"),
rules.FieldSet([
"external_id_type",
"external_id_value",
"external_id_url",
"external_id_relationship",
], "External ID")
], "Resource"),
]
export_column_headers = dict(
name="Resource Name",
type="Resource Type",
start_date="Resource Start date",
end_date="Resource End date",
url="Resource URL",
host_name="Resource Host Name",
host_city="Resource Host City",
host_region="Resource Host Region",
host_country="Resource Host Country",
host_disambiguated_id="Resource Host Disambiguated ID",
host_disambiguation_source="Resource Host Disambiguation Source",
external_id_type="Resource External ID Type",
external_id_value="Resource External Id Value",
external_id_url="Resource External ID URL",
external_id_relationship="Resource External ID Relationship")
def get_export_columns(self):
"""Create a list of exported columns with labels."""
return [(c, self.export_column_headers.get(c, n)) for c, n in super().get_export_columns()]
@expose("/export/<export_type>/")
def export(self, export_type):
"""Check the export type whether it is csv, tsv or other format."""
if export_type not in ["json", "yaml", "yml"]:
return super().export(export_type)
return_url = get_redirect_target() or self.get_url(".index_view")
task_id = self.current_task_id
if not task_id:
flash("Missing task ID.", "danger")
return redirect(return_url)
if not self.can_export or (export_type not in self.export_types):
flash("Permission denied.", "danger")
return redirect(return_url)
data = Task.get(int(task_id)).to_export_dict()
if export_type == "json":
resp = jsonify(data)
else:
resp = yamlfy(data)
resp.headers[
"Content-Disposition"] = f"attachment;filename={secure_filename(self.get_export_name(export_type))}"
return resp
class MessageRecordAdmin(RecordModelView):
"""Researcher resource administration view."""
export_types = ["yaml", "json"]
form_widget_args = dict(message=dict(rows=20))
@expose("/export/<export_type>/")
def export(self, export_type):
"""Check the export type whether it is csv, tsv or other format."""
if export_type not in ["json", "yaml", "yml"]:
return super().export(export_type)
return_url = get_redirect_target() or self.get_url(".index_view")
task_id = self.current_task_id
if not task_id:
flash("Missing task ID.", "danger")
return redirect(return_url)
if not self.can_export or (export_type not in self.export_types):
flash("Permission denied.", "danger")
return redirect(return_url)
data = Task.get(int(task_id)).to_export_dict()
if export_type == "json":
resp = jsonify(data)
else:
resp = yamlfy(data)
resp.headers[
"Content-Disposition"] = f"attachment;filename={secure_filename(self.get_export_name(export_type))}"
return resp
admin.add_view(UserAdmin(User))
admin.add_view(OrganisationAdmin(Organisation))
admin.add_view(OrcidTokenAdmin(OrcidToken))
admin.add_view(OrgInfoAdmin(OrgInfo))
admin.add_view(OrcidApiCallAmin(OrcidApiCall))
admin.add_view(UserInvitationAdmin())
admin.add_view(OrgInvitationAdmin())
admin.add_view(MailLogAdmin())
admin.add_view(TaskAdmin(Task))
admin.add_view(AffiliationRecordAdmin())
admin.add_view(RecordChildAdmin(AffiliationExternalId))
admin.add_view(FundingRecordAdmin())
admin.add_view(RecordChildAdmin(FundingContributor))
admin.add_view(InviteeAdmin(FundingInvitee))
admin.add_view(RecordChildAdmin(ExternalId))
admin.add_view(RecordChildAdmin(WorkContributor))
admin.add_view(RecordChildAdmin(WorkExternalId))
admin.add_view(InviteeAdmin(WorkInvitee))
admin.add_view(WorkRecordAdmin())
admin.add_view(PeerReviewRecordAdmin())
admin.add_view(InviteeAdmin(PeerReviewInvitee))
admin.add_view(RecordChildAdmin(PeerReviewExternalId))
admin.add_view(ProfilePropertyRecordAdmin(PropertyRecord))
admin.add_view(ProfilePropertyRecordAdmin(OtherIdRecord))
admin.add_view(ResourceRecordAdmin())
admin.add_view(MessageRecordAdmin())
admin.add_view(InviteeAdmin(Invitee))
admin.add_view(ViewMembersAdmin(name="viewmembers", endpoint="viewmembers"))
admin.add_view(UserOrgAmin(UserOrg))
admin.add_view(AppModelView(Client))
admin.add_view(AppModelView(Grant))
admin.add_view(AppModelView(Token))
admin.add_view(AppModelView(Delegate))
admin.add_view(GroupIdRecordAdmin(GroupIdRecord))
for name, model in models.audit_models().items():
admin.add_view(AuditLogModelView(model, endpoint=name + "_log"))
@app.template_filter("plural")
def plural(single):
"""Pluralize a noun."""
return utils.plural(single)
@app.template_filter("year_range")
def year_range(entry):
"""Show an interval of employment in years."""
val = ""
start_date = entry.get("start_date") or entry.get("start-date")
if start_date and start_date["year"]["value"]:
val = start_date["year"]["value"]
else:
val = "unknown"
val += "-"
end_date = entry.get("end_date") or entry.get("end-date")
if end_date and end_date["year"]["value"]:
val += end_date["year"]["value"]
else:
val += "present"
return val
@app.template_filter("orcid")
def user_orcid_id_url(user):
"""Render user ORCID Id URL."""
return ORCID_BASE_URL + user.orcid if user.orcid else ""
@app.template_filter("isodate")
def isodate(d, sep=" ", no_time=False):
"""Render date into format YYYY-mm-dd HH:MM."""
if d:
if isinstance(d, datetime):
ts_format = '' if no_time else f"[{sep}]HH:mm"
return Markup(
f"""<time datetime="{d.isoformat(timespec='minutes')}" """
f"""data-toggle="tooltip" title="{d.isoformat(timespec='minutes', sep=' ')} UTC" """
f"""data-format="YYYY[‑]MM[‑]DD{ts_format}" />""")
if isinstance(d, str):
return Markup(f"""<time datetime="{d}" />""")
return ''
@app.template_filter("shorturl")
def shorturl(url):
"""Create and render short url."""
u = Url.shorten(url)
return url_for("short_url", short_id=u.short_id, _external=True)
@app.route("/activate_all", methods=["POST"])
@roles_required(Role.SUPERUSER, Role.ADMIN, Role.TECHNICAL)
def activate_all():
"""Batch registraion of users."""
_url = request.args.get("url") or request.referrer
task_id = request.form.get("task_id")
task = Task.get(task_id)
try:
count = utils.activate_all_records(task)
except Exception as ex:
flash(f"Failed to activate the selected records: {ex}")
else:
flash(f"{count} records were activated for batch processing.")
return redirect(_url)
@app.route("/reset_all", methods=["POST"])
@roles_required(Role.SUPERUSER, Role.ADMIN, Role.TECHNICAL)
def reset_all():
"""Batch reset of batch records."""
_url = request.args.get("url") or request.referrer
task_id = request.form.get("task_id")
task = Task.get(task_id)
try:
count = utils.reset_all_records(task)
except Exception as ex:
flash(f"Failed to reset the selected records: {ex}")
else:
flash(f"{count} {task.task_type.name} records were reset for batch processing.", "info")
return redirect(_url)
@app.route("/section/<int:user_id>/<string:section_type>/<int:put_code>/delete", methods=["POST"])
@roles_required(Role.ADMIN)
def delete_record(user_id, section_type, put_code):
"""Delete an employment, education, peer review, works or funding record."""
_url = request.referrer or request.args.get("url") or url_for(
"section", user_id=user_id, section_type=section_type)
try:
user = User.get(id=user_id)
except Exception:
flash("ORCID HUB doesn't have data related to this researcher", "warning")
return redirect(url_for('viewmembers.index_view'))
if not user.orcid:
flash("The user hasn't yet linked their ORCID record", "danger")
return redirect(_url)
orcid_token = None
if section_type in ["RUR", "ONR", "KWR", "ADR", "EXR"]:
orcid_token = OrcidToken.select(OrcidToken.access_token).where(OrcidToken.user_id == user.id,
OrcidToken.org_id == current_user
.organisation_id, OrcidToken.scopes.contains(
orcid_client.PERSON_UPDATE)).first()
if not orcid_token:
flash("The user hasn't given 'PERSON/UPDATE' permission to delete this record", "warning")
return redirect(_url)
else:
orcid_token = OrcidToken.select(OrcidToken.access_token).where(OrcidToken.user_id == user.id,
OrcidToken.org_id == current_user
.organisation_id, OrcidToken.scopes.contains(
orcid_client.ACTIVITIES_UPDATE)).first()
if not orcid_token:
flash("The user hasn't given 'ACTIVITIES/UPDATE' permission to delete this record", "warning")
return redirect(_url)
api = orcid_client.MemberAPIV3(user=user, org=current_user.organisation, access_token=orcid_token.access_token)
try:
api.delete_section(section_type, put_code)
app.logger.info(f"For {user.orcid} '{section_type}' record was deleted by {current_user}")
flash("The record was successfully deleted.", "success")
except ApiException as e:
flash(
"Failed to delete the entry: " + json.loads(e.body.replace(
"''", "\"")).get('user-messsage'), "danger")
except Exception as ex:
app.logger.error("For %r encountered exception: %r", user, ex)
abort(500, ex)
return redirect(_url)
@app.route("/section/<int:user_id>/<string:section_type>/<int:put_code>/edit", methods=["GET", "POST"])
@app.route("/section/<int:user_id>/<string:section_type>/new", methods=["GET", "POST"])
@roles_required(Role.ADMIN)
def edit_record(user_id, section_type, put_code=None):
"""Create a new or edit an existing profile section record."""
section_type = section_type.upper()[:3]
_url = (request.args.get("url")
or url_for("section", user_id=user_id, section_type=section_type))
org = current_user.organisation
try:
user = User.get(id=user_id)
except User.DoesNotExist:
flash("ORCID HUB doent have data related to this researcher", "warning")
return redirect(_url)
if not user.orcid:
flash("The user hasn't yet linked their ORCID record", "danger")
return redirect(_url)
orcid_token = None
is_person_update = section_type in ["RUR", "ONR", "KWR", "ADR", "EXR"]
orcid_token = OrcidToken.select(OrcidToken.access_token).where(
OrcidToken.user_id == user.id, OrcidToken.org_id == org.id,
OrcidToken.scopes.contains(orcid_client.PERSON_UPDATE if is_person_update else orcid_client
.ACTIVITIES_UPDATE)).first()
if not orcid_token:
flash(
f"""The user hasn't given '{"PERSON/UPDATE" if is_person_update else "ACTIVITIES/UPDATE"}' """
"permission to you to Add/Update these records", "warning")
return redirect(_url)
api = orcid_client.MemberAPIV3(org=org, user=user, access_token=orcid_token.access_token)
if section_type == "FUN":
form = FundingForm(form_type=section_type)
elif section_type == "PRR":
form = PeerReviewForm(form_type=section_type)
elif section_type == "WOR":
form = WorkForm(form_type=section_type)
elif section_type == "RUR":
form = ResearcherUrlForm(form_type=section_type)
elif section_type == "ADR":
form = AddressForm(form_type=section_type)
elif section_type == "EXR":
form = ExternalIdentifierForm(form_type=section_type)
elif section_type in ["ONR", "KWR"]:
form = OtherNameKeywordForm(form_type=section_type)
else:
form = RecordForm(form_type=section_type)
grant_data_list = []
if request.method == "GET":
data = {}
if put_code:
try:
# Fetch an Employment
if section_type == "EMP":
api_response = api.view_employmentv3(user.orcid, put_code, _preload_content=False)
elif section_type == "EDU":
api_response = api.view_educationv3(user.orcid, put_code, _preload_content=False)
elif section_type == "DST":
api_response = api.view_distinctionv3(user.orcid, put_code, _preload_content=False)
elif section_type == "MEM":
api_response = api.view_membershipv3(user.orcid, put_code, _preload_content=False)
elif section_type == "SER":
api_response = api.view_servicev3(user.orcid, put_code, _preload_content=False)
elif section_type == "QUA":
api_response = api.view_qualificationv3(user.orcid, put_code, _preload_content=False)
elif section_type == "POS":
api_response = api.view_invited_positionv3(user.orcid, put_code, _preload_content=False)
elif section_type == "FUN":
api_response = api.view_fundingv3(user.orcid, put_code, _preload_content=False)
elif section_type == "WOR":
api_response = api.view_workv3(user.orcid, put_code, _preload_content=False)
elif section_type == "PRR":
api_response = api.view_peer_reviewv3(user.orcid, put_code, _preload_content=False)
elif section_type == "RUR":
api_response = api.view_researcher_urlv3(user.orcid, put_code, _preload_content=False)
elif section_type == "ONR":
api_response = api.view_other_namev3(user.orcid, put_code, _preload_content=False)
elif section_type == "ADR":
api_response = api.view_addressv3(user.orcid, put_code, _preload_content=False)
elif section_type == "EXR":
api_response = api.view_external_identifierv3(user.orcid, put_code, _preload_content=False)
elif section_type == "KWR":
api_response = api.view_keywordv3(user.orcid, put_code, _preload_content=False)
_data = json.loads(api_response.data, object_pairs_hook=NestedDict)
if section_type == "PRR" or section_type == "WOR":
if section_type == "PRR":
external_ids_list = _data.get("review-identifiers", "external-id", default=[])
else:
external_ids_list = _data.get("external-ids", "external-id", default=[])
for extid in external_ids_list:
external_id_value = (extid.get('external-id-value', default='') or '')
external_id_url = (extid.get('external-id-url', 'value', default='') or '')
external_id_relationship = (extid.get(
'external-id-relationship', default='') or '').lower()
external_id_type = (extid.get('external-id-type', default='') or '')
grant_data_list.append(dict(grant_number=external_id_value, grant_url=external_id_url,
grant_relationship=external_id_relationship,
grant_type=external_id_type))
if section_type == "WOR":
data = dict(work_type=(_data.get("type", default='') or '').lower(),
title=_data.get("title", "title", "value"),
subtitle=_data.get("title", "subtitle", "value"),
translated_title=_data.get("title", "translated-title", "value"),
translated_title_language_code=_data.get("title", "translated-title",
"language-code"),
journal_title=_data.get("journal-title", "value"),
short_description=_data.get("short-description"),
citation_type=(_data.get("citation", "citation-type", default='') or '').lower(),
citation=_data.get("citation", "citation-value"),
url=_data.get("url", "value"),
language_code=_data.get("language-code"),
publication_date=PartialDate.create(_data.get("publication-date")),
country=_data.get("country", "value"),
visibility=(_data.get("visibility", default='') or '').lower())
else:
data = dict(
org_name=_data.get("convening-organization", "name"),
disambiguated_id=_data.get("convening-organization", "disambiguated-organization",
"disambiguated-organization-identifier"),
disambiguation_source=_data.get("convening-organization", "disambiguated-organization",
"disambiguation-source"),
city=_data.get("convening-organization", "address", "city"),
region=_data.get("convening-organization", "address", "region"),
country=_data.get("convening-organization", "address", "country"),
reviewer_role=(_data.get("reviewer-role", default='') or '').lower(),
review_url=_data.get("review-url", "value"),
review_type=(_data.get("review-type", default='') or '').lower(),
review_group_id=_data.get("review-group-id", default=''),
subject_external_identifier_type=_data.get("subject-external-identifier",
"external-id-type"),
subject_external_identifier_value=_data.get("subject-external-identifier",
"external-id-value"),
subject_external_identifier_url=_data.get("subject-external-identifier", "external-id-url",
"value"),
subject_external_identifier_relationship=(_data.get(
"subject-external-identifier", "external-id-relationship", default='') or '').lower(),
subject_container_name=_data.get("subject-container-name", "value"),
subject_type=(_data.get("subject-type", default='') or '').lower(),
subject_title=_data.get("subject-name", "title", "value"),
subject_subtitle=_data.get("subject-name", "subtitle"),
subject_translated_title=_data.get("subject-name", "translated-title", "value"),
subject_translated_title_language_code=_data.get("subject-name", "translated-title",
"language-code"),
subject_url=_data.get("subject-url", "value"),
visibility=(_data.get("visibility", default='') or '').lower(),
review_completion_date=PartialDate.create(_data.get("review-completion-date")))
elif section_type in ["RUR", "ONR", "KWR", "ADR", "EXR"]:
data = dict(visibility=(_data.get("visibility", default='') or '').lower(),
display_index=_data.get("display-index"))
if section_type == "RUR":
data.update(dict(name=_data.get("url-name"), value=_data.get("url", "value")))
elif section_type == "ADR":
data.update(dict(country=_data.get("country", "value")))
elif section_type == "EXR":
data.update(dict(type=(_data.get("external-id-type", default='') or ''),
value=_data.get("external-id-value"),
url=_data.get("external-id-url", "value"),
relationship=(_data.get("external-id-relationship", default='')
or '').lower()))
else:
data.update(dict(content=_data.get("content")))
else:
data = dict(
org_name=_data.get("organization", "name"),
disambiguated_id=_data.get("organization", "disambiguated-organization",
"disambiguated-organization-identifier"),
disambiguation_source=_data.get("organization", "disambiguated-organization",
"disambiguation-source"),
city=_data.get("organization", "address", "city"),
region=_data.get("organization", "address", "region"),
country=_data.get("organization", "address", "country"),
department=_data.get("department-name"),
role=_data.get("role-title"),
url=_data.get("url", "value"),
visibility=(_data.get("visibility", default='') or '').lower(),
start_date=PartialDate.create(_data.get("start-date")),
end_date=PartialDate.create(_data.get("end-date")))
external_ids_list = _data.get("external-ids", "external-id", default=[])
for extid in external_ids_list:
external_id_value = extid.get('external-id-value', default='')
external_id_url = extid.get('external-id-url', 'value', default='')
external_id_relationship = (extid.get(
'external-id-relationship', default='') or '').lower()
external_id_type = extid.get('external-id-type', default='')
if external_id_type and external_id_type.lower() != "grant_number":
external_id_type = external_id_type.replace('_', '-')
grant_data_list.append(dict(grant_number=external_id_value, grant_url=external_id_url,
grant_relationship=external_id_relationship,
grant_type=external_id_type))
if section_type == "FUN":
data.update(dict(funding_title=_data.get("title", "title", "value"),
funding_translated_title=_data.get("title", "translated-title", "value"),
translated_title_language=_data.get("title", "translated-title",
"language-code"),
funding_type=(_data.get("type", default='') or '').lower(),
funding_subtype=_data.get("organization-defined-type", "value"),
funding_description=_data.get("short-description"),
total_funding_amount=_data.get("amount", "value"),
total_funding_amount_currency=_data.get("amount", "currency-code")))
else:
data.update(dict(display_index=_data.get("display-index")))
except ApiException as e:
message = json.loads(e.body.replace("''", "\"")).get('user-messsage')
app.logger.error(f"Exception when calling ORCID API: {message}")
except Exception as ex:
app.logger.exception(
"Unhandler error occured while creating or editing a profile record.")
abort(500, ex)
else:
data = dict(
org_name=org.name,
disambiguated_id=org.disambiguated_id,
disambiguation_source=org.disambiguation_source,
city=org.city,
country=org.country)
form.process(data=data)
if form.validate_on_submit():
try:
if section_type == "RUR":
put_code, orcid, created, visibility = api.create_or_update_researcher_url(
put_code=put_code,
**{f.name: f.data
for f in form})
elif section_type == "ONR":
put_code, orcid, created, visibility = api.create_or_update_other_name(
put_code=put_code,
**{f.name: f.data
for f in form})
elif section_type == "ADR":
put_code, orcid, created, visibility = api.create_or_update_address(
put_code=put_code,
**{f.name: f.data
for f in form})
elif section_type == "EXR":
put_code, orcid, created, visibility = api.create_or_update_person_external_id(
put_code=put_code,
**{f.name: f.data
for f in form})
elif section_type == "KWR":
put_code, orcid, created, visibility = api.create_or_update_keyword(
put_code=put_code,
**{f.name: f.data
for f in form})
else:
grant_type = request.form.getlist('grant_type')
grant_number = request.form.getlist('grant_number')
grant_url = request.form.getlist('grant_url')
grant_relationship = request.form.getlist('grant_relationship')
# Skip entries with no grant number:
grant_data_list = [{
'grant_number': gn,
'grant_type': gt,
'grant_url': gu,
'grant_relationship': gr
} for gn, gt, gu, gr in zip(grant_number, grant_type, grant_url,
grant_relationship) if gn]
if section_type == "FUN":
put_code, orcid, created = api.create_or_update_individual_funding(
put_code=put_code,
grant_data_list=grant_data_list,
**{f.name: f.data
for f in form})
elif section_type == "WOR":
put_code, orcid, created = api.create_or_update_individual_work(
put_code=put_code,
grant_data_list=grant_data_list,
**{f.name: f.data
for f in form})
elif section_type == "PRR":
put_code, orcid, created = api.create_or_update_individual_peer_review(
put_code=put_code,
grant_data_list=grant_data_list,
**{f.name: f.data
for f in form})
else:
put_code, orcid, created, visibility = api.create_or_update_affiliation(
put_code=put_code,
affiliation=Affiliation[section_type],
grant_data_list=grant_data_list,
**{f.name: f.data
for f in form})
affiliation, _ = UserOrgAffiliation.get_or_create(
user=user,
organisation=org,
put_code=put_code)
affiliation.department_name = form.department.data
affiliation.department_city = form.city.data
affiliation.role_title = form.role.data
form.populate_obj(affiliation)
affiliation.save()
if put_code and created:
flash("Record details has been added successfully!", "success")
else:
flash("Record details has been updated successfully!", "success")
return redirect(_url)
except ApiException as e:
body = json.loads(e.body)
message = body.get("user-message")
dev_message = body.get("developer-message")
more_info = body.get("more-info")
flash(f"Failed to update the entry: {message}; {dev_message}", "danger")
if more_info:
flash(f'You can find more information at <a href="{more_info}">{more_info}</a>', "info")
app.logger.exception(f"For {user} exception encountered; {dev_message}")
except Exception as ex:
app.logger.exception(
"Unhandler error occured while creating or editing a profile record.")
abort(500, ex)
if not grant_data_list:
grant_data_list.append(dict(grant_number='', grant_url='',
grant_relationship='',
grant_type=''))
return render_template("profile_entry.html", section_type=section_type, form=form, _url=_url,
grant_data_list=grant_data_list)
@app.route("/section/<int:user_id>/<string:section_type>/list", methods=["GET", "POST"])
@login_required
def section(user_id, section_type="EMP"):
"""Show all user profile section list (either 'Education' or 'Employment')."""
_url = request.args.get("url") or request.referrer or url_for("viewmembers.index_view")
section_type = section_type.upper()[:3] # normalize the section type
if section_type not in ["EDU", "EMP", "FUN", "PRR", "WOR", "RUR", "ONR", "KWR", "ADR", "EXR", "DST", "MEM", "SER",
"QUA", "POS"]:
flash("Incorrect user profile section", "danger")
return redirect(_url)
try:
user = User.get(id=user_id)
except Exception:
flash("ORCID HUB doent have data related to this researcher", "warning")
return redirect(_url)
if not user.orcid:
flash("The user hasn't yet linked their ORCID record", "danger")
return redirect(_url)
orcid_token = None
if request.method == "POST" and section_type in ["RUR", "ONR", "KWR", "ADR", "EXR"]:
try:
orcid_token = OrcidToken.select(OrcidToken.access_token).where(
OrcidToken.user_id == user.id, OrcidToken.org_id == current_user.organisation_id,
OrcidToken.scopes.contains(orcid_client.PERSON_UPDATE)).first()
if orcid_token:
flash(
"There is no need to send an invite as you already have the token with 'PERSON/UPDATE' permission",
"success")
else:
app.logger.info(f"Ready to send an ivitation to '{user.email}'.")
token = utils.new_invitation_token()
invitation_url = url_for("orcid_login", invitation_token=token, _external=True)
ui = UserInvitation.create(
is_person_update_invite=True,
invitee_id=user.id,
inviter_id=current_user.id,
org=current_user.organisation,
email=user.email,
first_name=user.first_name,
last_name=user.last_name,
organisation=current_user.organisation,
disambiguated_id=current_user.organisation.disambiguated_id,
disambiguation_source=current_user.organisation.disambiguation_source,
affiliations=0,
token=token)
utils.send_email(
"email/property_invitation.html",
invitation=ui,
invitation_url=invitation_url,
recipient=(current_user.organisation.name, user.email),
reply_to=(current_user.name, current_user.email),
cc_email=(current_user.name, current_user.email))
flash("Invitation requesting 'PERSON/UPDATE' as been sent.", "success")
except Exception as ex:
flash(f"Exception occured while sending mails {ex}", "danger")
app.logger.exception(f"For {user} encountered exception")
return redirect(_url)
try:
if not orcid_token:
orcid_token = OrcidToken.get(user=user, org=current_user.organisation)
except Exception:
flash("User didn't give permissions to update his/her records", "warning")
return redirect(_url)
api = orcid_client.MemberAPIV3(user=user, org=current_user.organisation, access_token=orcid_token.access_token)
try:
api_response = api.get_section(section_type)
except ApiException as ex:
if ex.status == 401:
flash("User has revoked the permissions to update his/her records", "warning")
else:
flash(
"Exception when calling ORCID API: \n" + json.loads(ex.body.replace(
"''", "\"")).get('user-messsage'), "danger")
return redirect(_url)
except Exception as ex:
abort(500, ex)
# TODO: Organisation has read token
# TODO: Organisation has access to the employment records
# TODO: retrieve and tranform for presentation (order, etc)
try:
data = json.loads(api_response.data, object_pairs_hook=NestedDict)
except Exception as ex:
flash("User didn't give permissions to update his/her records", "warning")
flash("Unhandled exception occured while retrieving ORCID data: %s" % ex, "danger")
app.logger.exception(f"For {user} encountered exception")
return redirect(_url)
if section_type in ["FUN", "WOR"]:
records = (fs for g in data.get("group") for fs in g.get({
"FUN": "funding-summary",
"WOR": "work-summary",
}[section_type])) if data.get("group") else None
elif section_type == "PRR":
records = (fs for g in data.get("group") for pg in g.get("peer-review-group") for fs in pg.get({
"PRR": "peer-review-summary",
}[section_type])) if data.get("group") else None
elif section_type in ["EDU", "EMP", "DST", "MEM", "SER", "QUA", "POS"]:
records = (ss.get({"EDU": "education-summary",
"EMP": "employment-summary",
"DST": "distinction-summary",
"MEM": "membership-summary",
"SER": "service-summary",
"QUA": "qualification-summary",
"POS": "invited-position-summary"}[section_type]) for ag in
data.get("affiliation-group", default=[]) for ss in ag.get("summaries", default=[]))
else:
records = data.get({
"RUR": "researcher-url",
"KWR": "keyword",
"ADR": "address",
"ONR": "other-name",
"EXR": "external-identifier",
}[section_type])
return render_template(
"section.html",
user=user,
url=_url,
Affiliation=Affiliation,
records=records,
section_type=section_type,
org_client_id=current_user.organisation.orcid_client_id)
@app.route("/search/group_id_record/list", methods=["GET", "POST"])
@roles_required(Role.ADMIN)
def search_group_id_record():
"""Search groupID record."""
_url = url_for("groupidrecord.index_view")
form = GroupIdForm()
records = []
if request.method == "GET":
data = dict(
page_size="100",
page="1")
form.process(data=data)
if request.method == "POST" and not form.search.data:
group_id = request.form.get('g_id')
name = request.form.get('g_name')
description = request.form.get('description')
id_type = request.form.get('type')
put_code = request.form.get('put_code')
with db.atomic() as transaction:
try:
gir, created = GroupIdRecord.get_or_create(organisation=current_user.organisation,
group_id=group_id, name=name, description=description,
type=id_type)
gir.put_code = put_code
if created:
gir.add_status_line(f"Successfully added {group_id} from ORCID.")
flash(f"Successfully added {group_id}.", "success")
else:
flash(f"The GroupID Record {group_id} is already existing in your list.", "success")
gir.save()
except Exception as ex:
transaction.rollback()
flash(f"Failed to save GroupID Record: {ex}", "warning")
app.logger.exception(f"Failed to save GroupID Record: {ex}")
return redirect(_url)
elif form.validate_on_submit():
try:
orcid_token = None
org = current_user.organisation
try:
orcid_token = OrcidToken.get(org=org, scopes='/group-id-record/read')
except OrcidToken.DoesNotExist:
orcid_token = utils.get_client_credentials_token(org=org, scopes="/group-id-record/read")
except Exception as ex:
flash("Something went wrong in ORCID call, "
"please contact orcid@royalsociety.org.nz for support", "warning")
app.logger.exception(f'Exception occured {ex}')
api = orcid_client.MemberAPIV3(org=org, access_token=orcid_token.access_token)
params = {f.name: f.data for f in form if f.data and f.name in ['group_id', 'name', 'page_size', 'page']}
api_response = api.view_group_id_recordsv3(**params, _preload_content=False)
if api_response:
data = json.loads(api_response.data)
# Currently the api only gives correct response for one entry otherwise it throws 500 exception.
records.append(data)
except ApiException as ex:
if ex.status == 401:
orcid_token.delete_instance()
flash("Old token was expired. Please search again so that next time we will fetch latest token",
"warning")
elif ex.status == 500:
flash(f"ORCID API Exception: {ex}", "warning")
else:
flash(f"Something went wrong in ORCID call, Please try again by making necessary changes, "
f"In case you understand this message: {ex} or"
f" else please contact orcid@royalsociety.org.nz for support", "warning")
app.logger.warning(f'Exception occured {ex}')
except Exception as ex:
app.logger.exception(f'Exception occured {ex}')
abort(500, ex)
return render_template("groupid_record_entries.html", form=form, _url=_url, records=records)
@app.route("/load/org", methods=["GET", "POST"])
@roles_required(Role.SUPERUSER)
def load_org():
"""Preload organisation data."""
form = FileUploadForm()
if form.validate_on_submit():
row_count = OrgInfo.load_from_csv(read_uploaded_file(form))
flash("Successfully loaded %d rows." % row_count, "success")
return redirect(url_for("orginfo.index_view"))
return render_template("fileUpload.html", form=form, title="Organisation")
@app.route("/load/task/<task_type>", methods=["GET", "POST"])
@roles_required(Role.ADMIN)
def load_task(task_type):
"""Preload organisation data."""
task_type = TaskType[task_type]
record_model = getattr(models, task_type.name.title().replace('_', '') + "Record")
form = FileUploadForm(extensions=["csv", "tsv", "json", "yaml", "yml"])
if form.validate_on_submit():
try:
filename = secure_filename(form.file_.data.filename)
content_type = form.file_.data.content_type
content = read_uploaded_file(form)
if content_type in ["text/tab-separated-values", "text/csv"] or (
filename and filename.lower().endswith(('.csv', '.tsv'))):
task = record_model.load_from_csv(content, filename=filename)
else:
task = record_model.load(content, filename=filename)
flash(f"Successfully loaded {task.record_count} rows.")
task_view = ("message" if task.is_raw else task_type.name.lower()) + "record.index_view"
return redirect(url_for(task_view, task_id=task.id))
except (
ValueError,
ModelException,
) as ex:
flash(f"Failed to load record file: {ex}", "danger")
app.logger.exception("Failed to load records.")
return render_template(
"fileUpload.html", form=form, task_type=task_type,
title=f"Researcher {task_type.name.title().replace('_', ' ')} Upload")
@app.route("/load/researcher/funding", methods=["GET", "POST"])
@roles_required(Role.ADMIN)
def load_researcher_funding():
"""Preload organisation data."""
form = FileUploadForm(extensions=["json", "yaml", "csv", "tsv"])
if form.validate_on_submit():
filename = secure_filename(form.file_.data.filename)
content_type = form.file_.data.content_type
try:
if content_type in ["text/tab-separated-values", "text/csv"] or (
filename and filename.lower().endswith(('.csv', '.tsv'))):
task = FundingRecord.load_from_csv(
read_uploaded_file(form), filename=filename)
else:
task = FundingRecord.load_from_json(read_uploaded_file(form), filename=filename)
flash(f"Successfully loaded {task.record_count} rows.")
return redirect(url_for("fundingrecord.index_view", task_id=task.id))
except Exception as ex:
flash(f"Failed to load funding record file: {ex.args}", "danger")
app.logger.exception("Failed to load funding records.")
return render_template("fileUpload.html", form=form, title="Funding Info Upload")
@app.route("/load/researcher/work", methods=["GET", "POST"])
@roles_required(Role.ADMIN)
def load_researcher_work():
"""Preload researcher's work data."""
form = FileUploadForm(extensions=["json", "yaml", "csv", "tsv"])
if form.validate_on_submit():
filename = secure_filename(form.file_.data.filename)
content_type = form.file_.data.content_type
try:
if content_type in ["text/tab-separated-values", "text/csv"] or (
filename and filename.lower().endswith(('.csv', '.tsv'))):
task = WorkRecord.load_from_csv(
read_uploaded_file(form), filename=filename)
else:
task = WorkRecord.load_from_json(read_uploaded_file(form), filename=filename)
flash(f"Successfully loaded {task.record_count} rows.")
return redirect(url_for("workrecord.index_view", task_id=task.id))
except Exception as ex:
flash(f"Failed to load work record file: {ex.args}", "danger")
app.logger.exception("Failed to load work records.")
return render_template("fileUpload.html", form=form, title="Work Info Upload")
@app.route("/load/researcher/peer_review", methods=["GET", "POST"])
@roles_required(Role.ADMIN)
def load_researcher_peer_review():
"""Preload researcher's peer review data."""
form = FileUploadForm(extensions=["json", "yaml", "csv", "tsv"])
if form.validate_on_submit():
filename = secure_filename(form.file_.data.filename)
content_type = form.file_.data.content_type
try:
if content_type in ["text/tab-separated-values", "text/csv"] or (
filename and filename.lower().endswith(('.csv', '.tsv'))):
task = PeerReviewRecord.load_from_csv(
read_uploaded_file(form), filename=filename)
else:
task = PeerReviewRecord.load_from_json(read_uploaded_file(form), filename=filename)
flash(f"Successfully loaded {task.record_count} rows.")
return redirect(url_for("peerreviewrecord.index_view", task_id=task.id))
except Exception as ex:
flash(f"Failed to load peer review record file: {ex.args}", "danger")
app.logger.exception("Failed to load peer review records.")
return render_template("fileUpload.html", form=form, title="Peer Review Info Upload")
@app.route("/load/researcher/properties/<string:property_type>", methods=["GET", "POST"])
@app.route("/load/researcher/properties", methods=["GET", "POST"])
@roles_required(Role.ADMIN)
def load_properties(property_type=None):
"""Preload researcher's property data."""
form = FileUploadForm(extensions=["json", "yaml", "csv", "tsv"])
if form.validate_on_submit():
filename = secure_filename(form.file_.data.filename)
content_type = form.file_.data.content_type
try:
if content_type in ["text/tab-separated-values", "text/csv"] or (
filename and filename.lower().endswith(('.csv', '.tsv'))):
task = PropertyRecord.load_from_csv(read_uploaded_file(form),
filename=filename,
file_property_type=property_type)
else:
task = PropertyRecord.load_from_json(read_uploaded_file(form),
filename=filename,
file_property_type=property_type)
flash(f"Successfully loaded {task.record_count} rows.")
return redirect(url_for("propertyrecord.index_view", task_id=task.id))
except Exception as ex:
flash(f"Failed to load researcher property record file: {ex}", "danger")
app.logger.exception("Failed to load researcher property records.")
return render_template(
"fileUpload.html",
form=form,
title=f"Researcher {property_type.title() if property_type else 'Property'} Upload")
@app.route("/load/researcher/urls", methods=["GET", "POST"])
@roles_required(Role.ADMIN)
def load_researcher_urls():
"""Preload researcher's property data."""
return load_properties(property_type="URL")
@app.route("/load/other/names", methods=["GET", "POST"])
@roles_required(Role.ADMIN)
def load_other_names():
"""Preload Other Name data."""
return load_properties(property_type="NAME")
@app.route("/load/keyword", methods=["GET", "POST"])
@roles_required(Role.ADMIN)
def load_keyword():
"""Preload Keywords data."""
return load_properties(property_type="KEYWORD")
@app.route("/load/country", methods=["GET", "POST"])
@roles_required(Role.ADMIN)
def load_country():
"""Preload Country data."""
return load_properties(property_type="COUNTRY")
@app.route("/load/other/ids", methods=["GET", "POST"])
@roles_required(Role.ADMIN)
def load_other_ids():
"""Preload researcher's Other IDs data."""
form = FileUploadForm(extensions=["json", "yaml", "csv", "tsv"])
if form.validate_on_submit():
filename = secure_filename(form.file_.data.filename)
content_type = form.file_.data.content_type
try:
if content_type in ["text/tab-separated-values", "text/csv"] or (
filename and filename.lower().endswith(('.csv', '.tsv'))):
task = OtherIdRecord.load_from_csv(
read_uploaded_file(form), filename=filename)
else:
task = OtherIdRecord.load_from_json(read_uploaded_file(form), filename=filename)
flash(f"Successfully loaded {task.record_count} rows.")
return redirect(url_for("otheridrecord.index_view", task_id=task.id))
except Exception as ex:
flash(f"Failed to load Other IDs record file: {ex}", "danger")
app.logger.exception("Failed to load Other IDs records.")
return render_template("fileUpload.html", form=form, title="Other IDs Info Upload")
def register_org(org_name,
email=None,
org_email=None,
tech_contact=True,
via_orcid=False,
first_name=None,
last_name=None,
orcid_id=None,
city=None,
region=None,
country=None,
course_or_role=None,
disambiguated_id=None,
disambiguation_source=None,
**kwargs):
"""Register research organisaion."""
email = (email or org_email).lower()
try:
User.get(User.email == email)
except User.DoesNotExist:
pass
finally:
try:
org = Organisation.get(name=org_name)
except Organisation.DoesNotExist:
org = Organisation(name=org_name)
if via_orcid:
org.region = region
org.city = city
org.country = country
org.disambiguated_id = disambiguated_id
org.disambiguation_source = disambiguation_source
try:
org_info = OrgInfo.get(name=org.name)
except OrgInfo.DoesNotExist:
pass
else:
org.tuakiri_name = org_info.tuakiri_name
try:
org.save()
except Exception:
app.logger.exception("Failed to save organisation data")
raise
try:
user = User.get(email=email)
user.organisation = org
except User.DoesNotExist:
user = User.create(
email=email,
organisation=org)
user.roles |= Role.ADMIN
if tech_contact:
user.roles |= Role.TECHNICAL
if via_orcid:
if not user.orcid and orcid_id:
user.orcid = orcid_id
if not user.first_name and first_name:
user.first_name = first_name
if not user.last_name and last_name:
user.last_name = last_name
try:
user.save()
except Exception:
app.logger.exception("Failed to save user data")
raise
try:
user_org = UserOrg.get(user=user, org=org)
user_org.is_admin = True
try:
user_org.save()
except Exception:
app.logger.exception(
"Failed to assign the user as an administrator to the organisation")
raise
except UserOrg.DoesNotExist:
user_org = UserOrg.create(user=user, org=org, is_admin=True)
app.logger.info(f"Ready to send an ivitation to '{org_name} <{email}>'.")
token = utils.new_invitation_token()
# TODO: for via_orcid constact direct link to ORCID with callback like to HUB
if via_orcid:
invitation_url = url_for("orcid_login", invitation_token=token, _external=True)
else:
invitation_url = url_for("index", _external=True)
oi = OrgInvitation.create(
inviter_id=current_user.id,
invitee_id=user.id,
email=user.email,
org=org,
token=token,
tech_contact=tech_contact,
url=invitation_url)
utils.send_email(
"email/org_invitation.html",
invitation=oi,
recipient=(org_name, email),
reply_to=(current_user.name, current_user.email),
cc_email=(current_user.name, current_user.email))
org.is_email_sent = True
if tech_contact:
org.tech_contact = user
try:
org.save()
except Exception:
app.logger.exception("Failed to save organisation data")
raise
# TODO: user can be admin for multiple org and org can have multiple admins:
# TODO: user shoud be assigned exclicitly organization
# TODO: OrgAdmin ...
# TODO: gracefully handle all exceptions (repeated infitation, user is
# already an admin for the organization etc.)
@app.route("/invite/organisation", methods=["GET", "POST"])
@roles_required(Role.SUPERUSER)
def invite_organisation():
"""Invite an organisation to register.
Flow:
* Hub administrort (super user) invokes the page,
* Fills in the form with the organisation and organisation technica contatct data (including an email address);
* Submits the form;
* A secure registration token gets ceated;
* An email message with confirmation link gets created and sent off to the technical contact.
"""
form = OrgRegistrationForm()
if form.validate_on_submit():
params = {f.name: f.data for f in form}
try:
org_name = params.get("org_name")
email = params.get("org_email").lower()
if params.get("tech_contact"):
try:
org = Organisation.get(name=org_name)
if org.tech_contact and org.tech_contact.email != email:
# If the current tech contact is technical contact of more than one organisation,
# then dont update the Roles in User table.
check_tech_contact_count = Organisation.select().where(
Organisation.tech_contact == org.tech_contact).count()
if check_tech_contact_count == 1:
org.tech_contact.roles &= ~Role.TECHNICAL
org.tech_contact.save()
flash(f"The current tech.contact {org.tech_contact.name} "
f"({org.tech_contact.email}) will be revoked.", "warning")
except Organisation.DoesNotExist:
pass
except User.DoesNotExist:
pass
register_org(**params)
org = Organisation.get(name=org_name)
user = User.get(email=email)
if org.confirmed:
if user.is_tech_contact_of(org):
flash("New Technical contact has been Invited Successfully! "
"An email has been sent to the Technical contact", "success")
app.logger.info(
f"For Organisation '{org_name}', "
f"New Technical Contact '{email}' has been invited successfully.")
else:
flash("New Organisation Admin has been Invited Successfully! "
"An email has been sent to the Organisation Admin", "success")
app.logger.info(
f"For Organisation '{org_name}', "
f"New Organisation Admin '{email}' has been invited successfully.")
else:
flash("Organisation Invited Successfully! "
"An email has been sent to the organisation contact", "success")
app.logger.info(f"Organisation '{org_name}' successfully invited. "
f"Invitation sent to '{email}'.")
except Exception as ex:
app.logger.exception(f"Failed to send registration invitation with {params}.")
flash(f"Failed to send registration invitation: {ex}.", "danger")
org_info = cache.get("org_info")
if not org_info:
org_info = {
r.name: r.to_dict(only=[
OrgInfo.email, OrgInfo.first_name, OrgInfo.last_name, OrgInfo.country,
OrgInfo.city, OrgInfo.disambiguated_id, OrgInfo.disambiguation_source
])
for r in OrgInfo.select(OrgInfo.name, OrgInfo.email, OrgInfo.first_name,
OrgInfo.last_name, OrgInfo.country, OrgInfo.city,
OrgInfo.disambiguated_id, OrgInfo.disambiguation_source)
| Organisation.select(
Organisation.name,
SQL('NULL').alias("email"),
SQL('NULL').alias("first_name"),
SQL('NULL').alias("last_name"), Organisation.country, Organisation.city,
Organisation.disambiguated_id, Organisation.disambiguation_source).join(
OrgInfo, JOIN.LEFT_OUTER, on=(
OrgInfo.name == Organisation.name)).where(OrgInfo.name.is_null())
}
cache.set("org_info", org_info, timeout=60)
return render_template("registration.html", form=form, org_info=org_info)
@app.route("/invite/user", methods=["GET", "POST"])
@roles_required(Role.SUPERUSER, Role.ADMIN)
def invite_user():
"""Invite a researcher to join the hub."""
form = UserInvitationForm()
org = current_user.organisation
if request.method == "GET":
form.organisation.data = org.name
form.disambiguated_id.data = org.disambiguated_id
form.disambiguation_source.data = org.disambiguation_source
form.city.data = org.city
form.region.data = org.region
form.country.data = org.country
while form.validate_on_submit():
resend = form.resend.data
email = form.email_address.data.lower()
affiliations = 0
invited_user = None
if form.is_student.data:
affiliations = Affiliation.EDU
if form.is_employee.data:
affiliations |= Affiliation.EMP
invited_user = User.select().where(User.email == email).first()
if (invited_user and OrcidToken.select().where(
(OrcidToken.user_id == invited_user.id) & (OrcidToken.org_id == org.id)
& (OrcidToken.scopes.contains("/activities/update"))).exists()):
try:
if affiliations & (Affiliation.EMP | Affiliation.EDU):
api = orcid_client.MemberAPIV3(org, invited_user)
params = {f.name: f.data for f in form if f.data != ""}
for a in Affiliation:
if a & affiliations:
params["affiliation"] = a
params["initial"] = False
api.create_or_update_affiliation(**params)
flash(
f"The ORCID Hub was able to automatically write an affiliation with "
f"{invited_user.organisation}, as the nature of the affiliation with {invited_user} "
f"organisation does appear to include either Employment or Education.\n ",
"success")
else:
flash(
f"The ORCID Hub was not able to automatically write an affiliation with "
f"{invited_user.organisation}, as the nature of the affiliation with {invited_user} "
f"organisation does not appear to include either Employment or Education.\n "
f"Please select 'staff' or 'student' checkbox present on this page.", "warning")
except Exception as ex:
flash(f"Something went wrong: {ex}", "danger")
app.logger.exception("Failed to create affiliation record")
else:
try:
ui = UserInvitation.get(org=org, email=email)
flash(
f"An invitation to affiliate with {org} had been already sent to {email} earlier "
f"at {isodate(ui.sent_at)}.", "warning" if resend else "danger")
if not form.resend.data:
break
except UserInvitation.DoesNotExist:
pass
inviter = current_user._get_current_object()
job = send_user_invitation.queue(
inviter.id,
org.id,
email=email,
affiliations=affiliations,
**{f.name: f.data
for f in form},
cc_email=(current_user.name, current_user.email))
flash(
f"An invitation to {email} was {'resent' if resend else 'sent'} successfully (task id: {job.id}).",
"success")
break
return render_template("user_invitation.html", form=form)
@app.route(
"/settings/email_template", methods=[
"GET",
"POST",
])
@roles_required(Role.TECHNICAL, Role.ADMIN)
def manage_email_template():
"""Manage organisation invitation email template."""
org = current_user.organisation
form = EmailTemplateForm(obj=org)
default_template = app.config.get("DEFAULT_EMAIL_TEMPLATE")
if form.validate_on_submit():
if form.prefill.data or form.reset.data:
form.email_template.data = default_template
elif form.cancel.data:
pass
elif form.send.data:
logo = org.logo if form.email_template_enabled.data else None
email = current_user.email
recipient = (current_user.name, email)
utils.send_email(
"email/test.html",
recipient=recipient,
reply_to=recipient,
cc_email=recipient,
sender=recipient,
subject="TEST EMAIL",
org_name=org.name,
logo=url_for("logo_image", token=logo.token, _external=True) if logo else None,
base=form.email_template.data
if form.email_template_enabled.data else default_template)
flash(f"Test email sent to the address <b>{email}</b>", "info")
elif form.save.data:
# form.populate_obj(org)
if any(x in form.email_template.data for x in ['{MESSAGE}', '{INCLUDED_URL}']):
org.email_template = form.email_template.data
org.email_template_enabled = form.email_template_enabled.data
org.save()
flash("Saved organisation email template", "info")
else:
flash("Are you sure? Without a {MESSAGE} or {INCLUDED_URL} "
"your users will be unable to respond to your invitations.", "danger")
return render_template("email_template.html", BASE_URL=url_for("index", _external=True)[:-1], form=form)
@app.route("/logo/<string:token>")
@app.route("/logo")
def logo_image(token=None):
"""Get organisation Logo image."""
if token:
logo = File.select().where(File.token == token).first()
if logo:
return send_file(
BytesIO(logo.data), mimetype=logo.mimetype, attachment_filename=logo.filename)
return redirect(url_for("static", filename="images/banner-small.png", _external=True))
@app.route(
"/settings/logo", methods=[
"GET",
"POST",
])
@roles_required(Role.TECHNICAL, Role.ADMIN)
def logo():
"""Manage organisation 'logo'."""
org = current_user.organisation
best = request.accept_mimetypes.best_match(["text/html", "image/*"])
if best == "image/*" and org.logo:
return send_file(
BytesIO(org.logo.data),
mimetype=org.logo.mimetype,
attachment_filename=org.logo.filename)
form = LogoForm()
if request.method == "POST" and form.reset.data:
org.logo = None
org.save()
elif form.validate_on_submit():
f = form.logo_file.data
filename = secure_filename(f.filename)
logo = File.create(data=f.read(), mimetype=f.mimetype, filename=f.filename)
org.logo = logo
org.save()
flash(f"Saved organisation logo '{filename}'", "info")
return render_template("logo.html", form=form)
@app.route(
"/settings/applications/<int:app_id>", methods=[
"GET",
"POST",
])
@app.route(
"/settings/applications", methods=[
"GET",
"POST",
])
@roles_required(Role.SUPERUSER, Role.TECHNICAL)
def application(app_id=None):
"""Register an application client."""
form = ApplicationFrom()
if app_id:
client = Client.select().where(Client.id == app_id).first()
else:
client = Client.select().where(Client.user_id == current_user.id).first()
if client:
flash(
f"You aready have registered application '{client.name}' and issued API credentials.",
"info")
return redirect(url_for("api_credentials", app_id=client.id))
if form.validate_on_submit():
client = Client(org_id=current_user.organisation.id)
form.populate_obj(client)
client.client_id = secrets.token_hex(10)
client.client_secret = secrets.token_urlsafe(20)
client.save()
flash(f"Application '{client.name}' was successfully registered.", "success")
return redirect(url_for("api_credentials", app_id=client.id))
return render_template("application.html", form=form)
@app.route(
"/settings/credentials/<int:app_id>", methods=[
"GET",
"POST",
])
@app.route(
"/settings/credentials", methods=[
"GET",
"POST",
])
@roles_required(Role.SUPERUSER, Role.TECHNICAL)
def api_credentials(app_id=None):
"""Manage API credentials."""
if app_id:
client = Client.select().where(Client.id == app_id).first()
if client and client.user_id != current_user.id:
flash("Access denied!", "danger")
return redirect(url_for("application"))
else:
client = Client.select().where(Client.user_id == current_user.id).first()
if not client:
return redirect(url_for("application"))
form = CredentialForm(obj=client)
if form.validate_on_submit():
if form.revoke.data:
Token.delete().where(Token.client == client).execute()
elif form.reset.data:
form.client_id.data = client.client_id = secrets.token_hex(10)
form.client_secret.data = client.client_secret = secrets.token_urlsafe(20)
client.save()
elif form.update_app.data:
form.populate_obj(client)
client.save()
elif form.delete.data:
with db.atomic():
Token.delete().where(Token.client == client).execute()
client.delete_instance(recursive=True)
return redirect(url_for("application"))
return render_template("api_credentials.html", form=form)
@app.route("/hub/api/v0.1/users/<int:user_id>/orgs/<int:org_id>")
@app.route("/hub/api/v0.1/users/<int:user_id>/orgs/")
@roles_required(Role.SUPERUSER, Role.ADMIN)
def user_orgs(user_id, org_id=None):
"""Retrive all linked to the user organisations."""
try:
u = User.get(id=user_id)
if org_id:
org = u.organisations.where(Organisation.id == org_id).first()
if org:
return jsonify(model_to_dict(org))
return jsonify({"error": f"Not Found Organisation with ID: {org_id}"}), 404
return jsonify({"user-orgs": list(u.organisations.dicts())})
except User.DoesNotExist:
return jsonify({"error": f"Not Found user with ID: {user_id}"}), 404
@app.route(
"/hub/api/v0.1/users/<int:user_id>/orgs/<int:org_id>",
methods=[
"DELETE",
"PATCH",
"POST",
"PUT",
])
@app.route(
"/hub/api/v0.1/users/<int:user_id>/orgs/", methods=[
"DELETE",
"PATCH",
"POST",
"PUT",
])
@roles_required(Role.SUPERUSER, Role.ADMIN)
def user_orgs_org(user_id, org_id=None):
"""Add an organisation to the user.
Receives:
{"id": N, "is_admin": true/false, "is_tech_contact": true/false, ...}
Where: id - the organisation ID.
If the user is already linked to the organisation, the entry gets only updated.
If another user is the tech.contact of the organisation, the existing user
should be demoted.
Returns: user_org entry
"""
data = request.json
if not org_id and not (data and data.get("id")):
return jsonify({"error": "NOT DATA"}), 400
if not org_id:
org_id = data.get("id")
if request.method == "DELETE":
UserOrg.delete().where(
(UserOrg.user_id == user_id) & (UserOrg.org_id == org_id)).execute()
user = User.get(id=user_id)
if (user.roles & Role.ADMIN) and not user.admin_for.exists():
user.roles &= ~Role.ADMIN
user.save()
app.logger.info(f"Revoked ADMIN role from user {user}")
if user.organisation_id == org_id:
user.organisation_id = None
user.save()
return jsonify({
"user-org": data,
"status": "DELETED",
}), 204
else:
org = Organisation.get(org_id)
uo, created = UserOrg.get_or_create(user_id=user_id, org_id=org_id)
if "is_admin" in data:
uo.is_admin = data["is_admin"]
uo.save()
if "is_tech_contact" in data:
user = User.get(id=user_id)
if data["is_tech_contact"]:
# Updating old Technical Contact's Role info.
if org.tech_contact and org.tech_contact != user:
org.tech_contact.roles &= ~Role.TECHNICAL
org.tech_contact.save()
# Assigning new tech contact to organisation.
org.tech_contact = user
elif org.tech_contact == user:
org.tech_contact_id = None
org.save()
return jsonify({
"org": model_to_dict(org, recurse=False),
"user_org": model_to_dict(uo, recurse=False),
"status": ("CREATED" if created else "UPDATED"),
}), (201 if created else 200)
@app.route("/services/<int:user_id>/updated", methods=["POST"])
def update_webhook(user_id):
"""Handle webook calls."""
try:
updated_at = datetime.utcnow()
user = User.get(user_id)
if not user.orcid:
return '', 404
user.orcid_updated_at = updated_at
user.save()
utils.notify_about_update(user)
except Exception:
app.logger.exception(f"Invalid user_id: {user_id}")
return '', 204
@app.route(
"/settings/webhook", methods=[
"GET",
"POST",
])
@roles_required(Role.TECHNICAL, Role.SUPERUSER)
def org_webhook():
"""Manage organisation invitation email template."""
_url = request.values.get("url") or request.referrer
org = current_user.organisation
form = WebhookForm(obj=org)
if form.validate_on_submit():
form.populate_obj(org)
org.save()
if form.webhook_enabled.data or form.email_notifications_enabled.data:
job = utils.enable_org_webhook.queue(org)
flash(f"Webhook activation was initiated (task id: {job.id})", "info")
else:
utils.disable_org_webhook.queue(org)
flash("Webhook was disabled.", "info")
return render_template("form.html", form=form, title="Organisation Webhook", url=_url)
@app.route("/sync_profiles/<int:task_id>", methods=["GET", "POST"])
@app.route(
"/sync_profiles", methods=[
"GET",
"POST",
])
@roles_required(Role.TECHNICAL, Role.SUPERUSER)
def sync_profiles(task_id=None):
"""Start research profile synchronization."""
if not current_user.is_tech_contact_of() and not current_user.is_superuser:
flash(
f"Access Denied! You must be the technical conatact of '{current_user.organisation}'",
"danger")
abort(403)
if not task_id:
task_id = request.args.get("task_id")
if task_id:
task = Task.get(task_id)
org = task.org
else:
org = current_user.organisation
task = Task.select().where(Task.task_type == TaskType.SYNC, Task.org == org).order_by(
Task.created_at.desc()).limit(1).first()
form = ProfileSyncForm()
if form.is_submitted():
if form.close.data:
_next = get_next_url() or url_for("task.index_view")
return redirect(_next)
if task and not form.restart.data:
flash("There is already an active profile synchronization task", "warning")
else:
Task.delete().where(Task.org == org, Task.task_type == TaskType.SYNC).execute()
task = Task.create(org=org, task_type=TaskType.SYNC)
job = utils.sync_profile.queue(task_id=task.id)
flash(f"Profile synchronization task was initiated (job id: {job.id})", "info")
return redirect(url_for("sync_profiles"))
page_size = 10
page = int(request.args.get("page", 1))
page_count = math.ceil(task.log_entries.count() / page_size) if task else 0
return render_template(
"profile_sync.html",
form=form,
title="Profile Synchronization",
task=task,
page=page,
page_size=page_size,
page_count=page_count)
@app.route("/remove/orcid/linkage", methods=["POST"])
@login_required
def remove_linkage():
"""Delete an ORCID Token and ORCiD iD."""
_url = request.args.get("url") or request.referrer or url_for("link")
org = current_user.organisation
token_revoke_url = app.config["ORCID_BASE_URL"] + "oauth/revoke"
if UserOrg.select().where(
(UserOrg.user_id == current_user.id) & (UserOrg.org_id == org.id) & UserOrg.is_admin).exists():
flash(f"Failed to remove linkage for {current_user}, as this user appears to be one of the admins for {org}. "
"Please contact orcid@royalsociety.org.nz for support", "danger")
return redirect(_url)
for token in OrcidToken.select().where(OrcidToken.org_id == org.id, OrcidToken.user_id == current_user.id):
try:
resp = requests.post(
token_revoke_url,
headers={"Accepts": "application/json"},
data=dict(
client_id=org.orcid_client_id,
client_secret=org.orcid_secret,
token=token.access_token))
if resp.status_code != 200:
flash("Failed to revoke token {token.access_token}: {ex}", "danger")
return redirect(_url)
token.delete_instance()
except Exception as ex:
flash(f"Failed to revoke token {token.access_token}: {ex}", "danger")
app.logger.exception('Failed to delete record.')
return redirect(_url)
# Check if the User is Admin for other organisation or has given permissions to other organisations.
if UserOrg.select().where(
(UserOrg.user_id == current_user.id) & UserOrg.is_admin).exists() or OrcidToken.select().where(
OrcidToken.user_id == current_user.id).exists():
flash(
f"We have removed the Access token related to {org}, However we did not remove the stored ORCiD ID as "
f"{current_user} is either an admin of other organisation or has given permission to other organisation.",
"warning")
else:
current_user.orcid = None
current_user.save()
flash(
f"We have removed the Access token and storied ORCiD ID for {current_user}. "
f"If you logout now without giving permissions, you may not be able to login again. "
f"Please press the below button to give permissions to {org}",
"success")
return redirect(_url)
class ScheduerView(BaseModelView):
"""Simle Flask-RQ2 scheduled task viewer."""
can_edit = False
can_delete = False
can_create = False
can_view_details = True
column_type_formatters = {datetime: lambda view, value: isodate(value)}
def __init__(self,
name=None,
category=None,
endpoint=None,
url=None,
static_folder=None,
menu_class_name=None,
menu_icon_type=None,
menu_icon_value=None):
"""Initialize the view."""
self._search_fields = []
model = FlaskJob
super().__init__(
model,
name,
category,
endpoint,
url,
static_folder,
menu_class_name=menu_class_name,
menu_icon_type=menu_icon_type,
menu_icon_value=menu_icon_value)
self._primary_key = self.scaffold_pk()
def scaffold_pk(self): # noqa: D102
return "id"
def get_pk_value(self, model): # noqa: D102
return model.id
def scaffold_list_columns(self):
"""Scaffold list columns."""
return [
"description", "created_at", "origin", "enqueued_at", "timeout", "result_ttl",
"status", "meta"
]
def scaffold_sortable_columns(self): # noqa: D102
return self.scaffold_list_columns()
def init_search(self): # noqa: D102
if self.column_searchable_list:
for p in self.column_searchable_list:
if isinstance(p, str):
p = getattr(self.model, p)
# Check type
if not isinstance(p, (CharField, TextField)):
raise Exception(
f'Can only search on text columns. Failed to setup search for "{p}"')
self._search_fields.append(p)
return bool(self._search_fields)
def scaffold_filters(self, name): # noqa: D102
return None
def is_valid_filter(self, filter_object): # noqa: D102
return isinstance(filter_object, filters.BasePeeweeFilter)
def scaffold_form(self): # noqa: D102
from wtforms import Form
return Form()
def scaffold_inline_form_models(self, form_class): # noqa: D102
converter = self.model_form_converter(self)
inline_converter = self.inline_model_form_converter(self)
for m in self.inline_models:
form_class = inline_converter.contribute(converter,
self.model,
form_class,
m)
return form_class
def get_query(self): # noqa: D102
return rq.get_scheduler().get_jobs()
def get_list(self, page, sort_column, sort_desc, search, filters, execute=True,
page_size=None):
"""Return records from the database.
:param page:
Page number
:param sort_column:
Sort column name
:param sort_desc:
Descending or ascending sort
:param search:
Search query
:param filters:
List of filter tuples
:param execute:
Execute query immediately? Default is `True`
:param page_size:
Number of results. Defaults to ModelView's page_size. Can be
overriden to change the page_size limit. Removing the page_size
limit requires setting page_size to 0 or False.
"""
jobs = list(self.get_query())
# Get count
count = len(jobs)
# TODO: sort
return count, jobs
def get_one(self, job_id):
"""Get a single job."""
try:
scheduler = rq.get_scheduler()
return scheduler.job_class.fetch(job_id, connection=scheduler.connection)
except Exception as ex:
flash(f"The jeb with given ID: {job_id} doesn't exist or it was deleted: {ex}.",
"danger")
abort(404)
def is_accessible(self):
"""Verify if the view is accessible for the current user."""
if not current_user.is_active or not current_user.is_authenticated:
return False
if current_user.has_role(Role.SUPERUSER):
return True
return False
admin.add_view(ScheduerView(name="schedude", endpoint="schedude"))
| {
"repo_name": "Royal-Society-of-New-Zealand/NZ-ORCID-Hub",
"path": "orcid_hub/views.py",
"copies": "1",
"size": "165778",
"license": "mit",
"hash": 4529615198530900000,
"line_mean": 40.3308401895,
"line_max": 120,
"alpha_frac": 0.5566661439,
"autogenerated": false,
"ratio": 4.084308556505457,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0013877207667986541,
"num_lines": 4011
} |
#Applies an algorithm to significantly decimate dense microlens data (eg from wfirst data challenge) without losing information.
import numpy as np
import argparse
import matplotlib
import matplotlib.pyplot as plt
#The idea here is that we successively pass over the data, decimating progressive factors of two
#In each pass, we perform LLSF over surrounding N points, then compare likelihood of that fit
#both using the full data and using a factor of two decimation for the target points.
#If the difference is smaller than some tolerance (weighted by the original data rms err)
#then we keep the decimated version
# data are in a np array of the form [t,x,sigma^2,tmin,tmax]
# line info is in the form [t0, x0, slope]
def chi2(data,line):
res=data[:,1]-line[1]-(data[:,0]-line[0])*line[2]
return np.sum(res*res/data[:,2])
def DoLLSF(data,tref=None):
sig2=1/np.sum(data[:,2]**(-1))
t0=sig2*np.sum(data[:,0]/data[:,2])
x0=sig2*np.sum(data[:,1]/data[:,2])
t2sum=np.sum((data[:,0]-t0)**2/data[:,2])
xtsum=np.sum((data[:,0]-t0)*(data[:,1]-x0)/data[:,2])
slope=xtsum/t2sum;
#print("\n1/sig2=",1/sig2,"t2sum=",t2sum,"xtsum=",xtsum)
#print("t0,x0,slope",t0,x0,slope)
#print(" data=",data)
if(tref is None):tref=t0
return np.array([tref,x0+(tref-t0)*slope,slope])
def subData(data,line,dchitol):
#We will replace the data with a single point, requiring that
# 1. llsf fit for this data + other data is unchanged
# -require slope and x0 variations of delta chi2 vanish
# 2. the derivative of chi^2 wrt llsf intercept at mean time is preserved
#
# deltachi = sum[ (xi -x0 -(ti-t0)*s)^2 / sig2i ] - (xnew -x0 -(tnew-t0)*s)^2 / sig2new
#
# d^2deltachi/dx0^2 = 0 -> 1/sig2new = sum(1/sig2i)
# and
# d deltachi/dx0 = 0 -> xnew -x0 -s*(tnew-t0) = sig2new * sum((xi-x0-s*(ti-t0))/sig2i)
# = xd-x0
# where xd=sig2new*sum(xi/sig2i), and we write the line setting t0=t0d=sig2new*sum(ti/sig2i)
# and
# d deltachi/ds = 0 = -sum((ti-t0)*(xi-x0-s*(ti-t0))/sig2i) + (tnew-t0)*(xnew-x0-s*(tnew-t0))/sig2new
# = -sum((ti-t0)*ri/sig2i) + (tnew-t0)*(xd-x0)/sig2new
# where ri = xi-x0-s*(ti-t0)
#
# For the last equation, if xd!=x0, we can set tnew to solve, but we constrain tnew to be within the
# time limits of the data.
# We also constrain the size of the resulting deltachi to be below some limit, after solving as above
global nsub, nsubtfail,nsubchi2fail
nsub+=1
sig2new=1/np.sum(data[:,2]**(-1))
t0d=sig2new*np.sum(data[:,0]/data[:,2])
xd=sig2new*np.sum(data[:,1]/data[:,2])
slope=line[2]
x0=(t0d-line[0])*slope+line[1]
#print("line0=",t0d,x0,slope)
trel=data[:,0]-t0d;
res=(data[:,1]-x0-trel*slope)
#compute new t point to ensure that slope matches line
trsum=np.sum(trel*res/data[:,2])
#xsum=np.sum((data[:,1]-x0)/data[:,2])
xsum=(xd-x0)/sig2new
if(xsum==0):
if(trsum==0):toff=0
else: return data
else: toff=trsum/xsum
dataTmax=data[-1,4]
dataTmin=data[0,3]
if(dataTmax-t0d <= toff ):
#print("fail tmax")
nsubtfail+=1
return data
if(dataTmin-t0d >= toff ):
#print("fail tmin")
nsubtfail+=1
return data
tnew=t0d+toff
#compute new xval
xnew=xd+slope*(tnew-t0d)
#print("xd,tnew,xnew",xd,tnew,xnew)
dchi=(np.sum(res*res/data[:,2])-(xd-x0)**2/sig2new)
if(dchi>dchitol):
#print("fail dchi=",dchi,">",dchitol)
nsubchi2fail+=1
return data
return np.array([[tnew,xnew,sig2new,dataTmin,dataTmax]])
def reduceDataChunk(segment,target,tol):
line=DoLLSF(segment)
n=len(segment)
if( n - len(target) < 2):
#in this case there is no real solution to the formal problem I pose
#if there is just 1 remaining point, then a solution could be found, but it
#will be set at the location of the remaining point and will not satisfy the
#time-range condition
return target
redchi2=chi2(segment,line)/(n-2)
global nchi2,nchi2fail
nchi2+=1
if(redchi2>1+tol):
#print("fail redchi2-1=",redchi2-1)
nchi2fail+=1
return target
return subData(target,line,tol*n)
def reduceDataPass(data,chunksize,tol,segwid=3):
ndata=len(data)
nchunk=int(ndata/chunksize)+1
segsize=int(segwid*chunksize)
noff=int((nchunk*chunksize-ndata)/2)
#noff=int((nchunk*chunksize-ndata)*np.random.rand())
nfirst=chunksize
if(noff>0):nfirst-=noff
for i in range(nchunk):
#print("\n****\ni=",i)
#set the range of the target chunk constraining within bounds
itargleft=nfirst+(i-1)*chunksize
if(itargleft<0):itargleft=0
itargright=nfirst+i*chunksize
if(itargright>ndata):itargright=ndata
target=data[itargleft:itargright]
#time grouping test:
dtmax=0;dtmin=target[-1,0]-target[0,0]
for k in range(len(target)-1):
dt=target[k+1,0]-target[k,0]
if(dt>dtmax):dtmax=dt
#for the time grouping test dtmin we expand to the nearest neighbor points (if any)
for k in range(max(0,itargleft-1),min(ndata-1,itargright+1)):
dt=data[k+1,0]-data[k,0]
if(dt<dtmin):dtmin=dt
if(len(target)<2 or dtmax/dtmin > 30):
#target too short or times not grouped
replacement=target.copy()
else: #passed test so continue
#print(" target=",target)
#set the range of the surrounding segment
isegleft=int((itargleft+itargright-segsize)/2)
if(isegleft<0):isegleft=0
isegright=isegleft+segsize
if(isegright>ndata):isegright=ndata
#print(" ",isegleft,"--",itargleft,"++",itargright,"--",isegright)
segment=data[isegleft:isegright]
#print(" segment=",segment)
replacement=reduceDataChunk(segment,target,tol).copy()
#diagnostics:
#newseg=np.concatenate((data[isegleft:itargleft],replacement,data[itargright:isegright]),axis=0)
#llsf=DoLLSF(segment,tref=0)[1:3]
#nllsf=DoLLSF(newseg,tref=0)[1:3]
#print(" replacement=",replacement)
if(i==0):newdata=replacement
else: newdata=np.append(newdata,replacement,axis=0)
#print(" newdata=",newdata)
#print(" LLSF: ",llsf,"->",nllsf," delta=",llsf-nllsf)
return newdata
def zeroCounters():
global nchi2,nchi2fail,nsub,nsubtfail,nsubchi2fail
nchi2=0
nchi2fail=0
nsub=0
nsubtfail=0
nsubchi2fail=0
#Some controls
maxpass=1000
ntemper=20
parser = argparse.ArgumentParser(description='Attempt to decimate data without losing information.')
parser.add_argument('fname', metavar='chain_file', type=str, help='input file path')
parser.add_argument('-lev', default="5",help='level of aggressiveness in data reduction')
parser.add_argument('-plot', action="store_true", help='whether to plot results')
args = parser.parse_args()
#Note that I find similar levels of concentration [and net num of samples] on the peak region for segw=csmin*nwid~75 with csmin varying from 4->10
#These tests are done with
#segw=75,tol=0.25 segw=150,tol=0.25 segw=150,tol=0.5 segw=75,tol=0.5
#2: n=523 nev=321 F=.61 764 / 1182 = .64 533 / 799 = .67 338 / 476 = .71
#3: n=736 nev=472 F=.64 704 / 1158 = .61 523 / 823 = .64 330 / 487 = .68
#4: n=783 nev=421 F=.54 747 / 1196 = .62 536 / 909 = .59 368 / 659 = .56
#5: n=900 nev=494 F=.55 784 / 1389 = .56 617 /1174 = .53 386 / 744 = .52
#6: n=796 nev=425 F=.53 728 / 1306 = .62 670 /1140 = .59 437 / 782 = .56
#7: n=877 nev=485 F=.55 812 / 1409 = .58
#8: n=917 nev=512 F=.56 797 / 1324 = .60 684 /1253 = .55 384 / 769 = .50
#9: n=908 nev=504 F=.55
#10:n=908 nev=493 F=.54 787 / 1283 = .61 695 /1167 = .60
#11:n=1022 nev=476 F=.46
#12:n=926 nev=398 F=.43 753 / 1317 = .57 666 /1137 = .59
#14:n=1109 nev=513 F=.46 819 / 1433 = .57 664 /1188 = .56
segw=150;tol=0.2;csmin=10
#here we set up some scalings for these params blending between the following guides
#lev=0:segw=1000,tol=0.05,csmin=25 #few % red of lens reg. but next 10x reduced overall
#lev=5:segw=150,tol=0.2,csmin=10 #reduction by factor of ~30 overall
#lev=10:segw=60,tol=0.5,csmin=2 #reduction by factor ~100 overall
#lev=15:segw=25,tol=1.0,csmin=2 #reduction by factor >200 overall
lev=int(args.lev)
tag="lev"+str(lev)
if(lev<=5):
x=lev/5.0
segw=int(np.exp(np.log(1000)*(1-x)+np.log(150)*x))
tol=np.exp(np.log(0.05)*(1-x)+np.log(0.2)*x)
csmin=int(25*(1-x)+10*x)
#csmin=10
elif(lev<=10):
x=(lev-5)/5.0
segw=int(np.exp(np.log(150)*(1-x)+np.log(60)*x))
tol=np.exp(np.log(0.2)*(1-x)+np.log(0.5)*x)
csmin=int(10*(1-x)+2.0*x)
else:
x=(lev-10)/5.0
segw=int(np.exp(np.log(60)*(1-x)+np.log(25)*x))
tol=np.exp(np.log(0.5)*(1-x)+np.log(1.0)*x)
csmin=2
print("segw,csmin,tol:",segw,csmin,tol)
nwid=int(segw/csmin)
data=np.loadtxt(args.fname) #Assume reading in t,x,sigma
#data=np.array([[t,np.random.normal(),1] for t in range(300)])#fake data
#data=np.array([[t,0.1*(t%2)+t,1] for t in range(10)])#fake data
data=np.array([[d[0],d[1],d[2]**2,d[0],d[0]] for d in data])
origdata=data.copy()
lastcs=0
doneAtSize=False
for i in range(maxpass):
zeroCounters()
chunksize=int(csmin+1000/(i/ntemper*(1+i/ntemper)+1))
if(chunksize==lastcs and doneAtSize):
#already tried this case
continue
#print("\n",i)
#print("Trying chunksize=",chunksize)
newdata = reduceDataPass(data,chunksize,tol,nwid)
#print("data size ",len(data),"->",len(newdata))
#print("fail rate: chi2:",nchi2fail/nchi2,"sub t:",nsubtfail/(nsub+1e-18),"sub chi2:",nsubchi2fail/(nsub+1e-18))
datallsf=DoLLSF(origdata,tref=0)
newdatallsf=DoLLSF(newdata,tref=0)
#print("llsf:",datallsf[1:3],"->",newdatallsf[1:3]," delta=",(newdatallsf-datallsf)[1:3])
#for diagonistic define the event region as 2458750<t<2458785 for challenge 152 data
if(False):
nevent=0;
for t in newdata[:,0]:
if(t>2458750 and t<2458785):nevent+=1
print("Fraction of samples in event region: ",nevent/len(newdata)," n=",nevent,"/",len(newdata), "cs=",chunksize)
#termination condition
if(len(newdata)==len(data) and lastcs==chunksize):
if(chunksize<=csmin):
break
else: doneAtSize=True
else:doneAtSize=False
lastcs=chunksize
data=newdata
print("nsamples:",len(origdata),"->",len(newdata))
if(args.plot):
plt.errorbar(origdata[:,0],origdata[:,1],yerr=np.sqrt(origdata[:,2]),fmt="+")
plt.errorbar(newdata[:,0],newdata[:,1],yerr=np.sqrt(newdata[:,2]),fmt=".")
icut=int(len(newdata)*9/10)
plt.errorbar(newdata[:icut,0],newdata[:icut,1],yerr=np.sqrt(newdata[:icut,2]),fmt=".")
icut=int(len(newdata)*4/5)
plt.errorbar(newdata[:icut,0],newdata[:icut,1],yerr=np.sqrt(newdata[:icut,2]),fmt=".")
icut=int(len(newdata)*3/5)
plt.errorbar(newdata[:icut,0],newdata[:icut,1],yerr=np.sqrt(newdata[:icut,2]),fmt=".")
icut=int(len(newdata)*2/5)
plt.errorbar(newdata[:icut,0],newdata[:icut,1],yerr=np.sqrt(newdata[:icut,2]),fmt=".")
icut=int(len(newdata)/5)
plt.errorbar(newdata[:icut,0],newdata[:icut,1],yerr=np.sqrt(newdata[:icut,2]),fmt=".")
icut=int(len(newdata)/10)
plt.errorbar(newdata[:icut,0],newdata[:icut,1],yerr=np.sqrt(newdata[:icut,2]),fmt=".")
newdata=np.array([[d[0],d[1],np.sqrt(d[2])] for d in newdata])
outfile=args.fname.replace(".txt","_"+tag+".txt")
print("outfile=",outfile)
np.savetxt(outfile,newdata)
if(args.plot):
plt.show()
| {
"repo_name": "JohnGBaker/gleam",
"path": "script/decimate.py",
"copies": "1",
"size": "11865",
"license": "apache-2.0",
"hash": 7788371446339336000,
"line_mean": 39.9137931034,
"line_max": 146,
"alpha_frac": 0.6227560051,
"autogenerated": false,
"ratio": 2.66270197486535,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.378545797996535,
"avg_score": null,
"num_lines": null
} |
"""Applies an OSMChange file to the database"""
import sys
import os
import time
import urllib2
import StringIO
import gzip
from datetime import datetime
from xml.sax import make_parser
from xml.sax.handler import ContentHandler
from pymongo import Connection
def convert_time(isotime):
"Returns the time string as a time tuple"
t = datetime.strptime(isotime, "%Y-%m-%dT%H:%M:%SZ")
return time.mktime(t.timetuple())
class OsmChangeHandler(ContentHandler):
"""This ContentHandler works with the OSMChange XML file"""
def __init__(self, client):
"""Initializes the OsmChange object"""
self.action = ""
self.record = {}
self.nodes = []
self.ways = []
self.relations = []
self.client = client
def fillDefault(self, attrs):
"""Fills in default attributes for new records"""
self.record['id'] = long(attrs['id'])
self.record['timestamp'] = convert_time(attrs['timestamp'])
self.record['tags'] = {}
if attrs.has_key('user'):
self.record['user'] = attrs['user']
if attrs.has_key('uid'):
self.record['uid'] = long(attrs['uid'])
if attrs.has_key('version'):
self.record['version'] = int(attrs['version'])
if attrs.has_key('changeset'):
self.record['changeset'] = long(attrs['changeset'])
def startElement(self, name, attrs):
"""Parse the XML element at the start"""
if name in ['create', 'modify', 'delete']:
self.action = name
elif name == 'node':
self.record = {}
self.fillDefault(attrs)
self.record['loc'] = {'lat': float(attrs['lat']), 'lon': float(attrs['lon'])}
elif name == 'tag':
# MongoDB doesn't let us have dots in the key names.
k = attrs['k']
k = k.replace('.', ',,')
self.record['tags'][k] = attrs['v']
elif name == 'way':
self.fillDefault(attrs)
self.record['nodes'] = []
elif name == 'relation':
self.fillDefault(attrs)
self.record['members'] = []
elif name == 'nd':
ref = long(attrs['ref'])
self.record['nodes'].append(ref)
nodes2ways = self.client.osm.nodes.find_one({ 'id' : ref })
if nodes2ways:
if 'ways' not in nodes2ways:
nodes2ways['ways'] = []
nodes2ways['ways'].append(self.record['id'])
self.client.osm.nodes.save(nodes2ways)
else:
print "Node %d ref'd by way %d not in file." % \
(ref, self.record['id'])
elif name == 'member':
ref = long(attrs['ref'])
member = {'type': attrs['type'],
'ref': ref,
'role': attrs['role']}
self.record['members'].append(member)
if attrs['type'] == 'way':
ways2relations = self.client.osm.ways.find_one({ 'id' : ref})
if ways2relations:
if 'relations' not in ways2relations:
ways2relations['relations'] = []
ways2relations['relations'].append(self.record['id'])
self.client.osm.ways.save(ways2relations)
elif attrs['type'] == 'node':
nodes2relations = self.client.osm.nodes.find_one({ 'id' : ref})
if nodes2relations:
if 'relations' not in nodes2relations:
nodes2relations['relations'] = []
nodes2relations['relations'].append(self.record['id'])
self.client.osm.nodes.save(nodes2relations)
elif name == 'node':
self.record['loc'] = {'lat': float(attrs['lat']),
'lon': float(attrs['lon'])}
self.fillDefault(attrs)
elif name == 'tag':
# MongoDB doesn't let us have dots in the key names.
k = attrs['k']
k = k.replace('.', ',,')
self.record['tags'][k] = attrs['v']
elif name == 'way':
self.fillDefault(attrs)
self.record['nodes'] = []
elif name == 'relation':
self.fillDefault(attrs)
self.record['members'] = []
elif name == 'nd':
ref = long(attrs['ref'])
self.record['nodes'].append(ref)
elif name == 'member':
ref = long(attrs['ref'])
member = {'type': attrs['type'],
'ref': ref,
'role': attrs['role']}
self.record['members'].append(member)
def endElement(self, name):
"""Finish parsing osm objects or actions"""
if name in ('node', 'way', 'relation'):
self.type = name
if self.action == 'delete':
self.record['visible'] = False
if self.type == 'way':
nodes = self.client.osm.nodes.find({ 'id': { '$in': self.record['nodes'] } },
{ 'loc': 1, '_id': 0 })
self.record['loc'] = []
for node in nodes:
self.record['loc'].append(node['loc'])
getattr(self, name + 's').append(self.record)
elif name in ('create', 'modify', 'delete'):
if name == 'create':
for coll in ('nodes', 'ways', 'relations'):
if getattr(self, coll):
getattr(self.client.osm, coll).insert(getattr(self, coll))
elif name == 'modify':
for coll in ('nodes', 'ways', 'relations'):
if getattr(self, coll):
primitive_list = getattr(self, coll)
for prim in primitive_list:
getattr(self.client.osm, coll).update({'id': prim['id']},
prim)
elif name == 'delete':
for coll in ('nodes', 'ways', 'relations'):
if getattr(self, coll):
primitive_list = getattr(self, coll)
for prim in primitive_list:
getattr(self.client.osm, coll).remove({'id': prim['id']})
self.action = None
if __name__ == "__main__":
client = Connection()
parser = make_parser()
keepGoing = True
while keepGoing:
# Read the state.txt
sf = open('state.txt', 'r')
state = {}
for line in sf:
if line[0] == '#':
continue
(k, v) = line.split('=')
state[k] = v.strip().replace("\\:", ":")
# Grab the sequence number and build a URL out of it
sqnStr = state['sequenceNumber'].zfill(9)
url = "http://planet.openstreetmap.org/minute-replicate/%s/%s/%s.osc.gz" % (sqnStr[0:3], sqnStr[3:6], sqnStr[6:9])
print "Downloading change file (%s)." % (url)
content = urllib2.urlopen(url)
content = StringIO.StringIO(content.read())
gzipper = gzip.GzipFile(fileobj=content)
print "Parsing change file."
handler = OsmChangeHandler(client)
parser.setContentHandler(handler)
parser.parse(gzipper)
# Download the next state file
nextSqn = int(state['sequenceNumber']) + 1
sqnStr = str(nextSqn).zfill(9)
url = "http://planet.openstreetmap.org/minute-replicate/%s/%s/%s.state.txt" % (sqnStr[0:3], sqnStr[3:6], sqnStr[6:9])
try:
u = urllib2.urlopen(url)
statefile = open('state.txt', 'w')
statefile.write(u.read())
statefile.close()
except Exception, e:
keepGoing = False
print e
client.disconnect()
| {
"repo_name": "iandees/mongosm",
"path": "apply-osmchange.py",
"copies": "1",
"size": "7918",
"license": "mit",
"hash": -1486452090332511700,
"line_mean": 38.59,
"line_max": 125,
"alpha_frac": 0.4945693357,
"autogenerated": false,
"ratio": 4.187202538339503,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5181771874039502,
"avg_score": null,
"num_lines": null
} |
"""Applies the APASS-based calibration to the resolved catalogues."""
import os
import glob
import numpy as np
from astropy import log
from astropy import table
from astropy.table import Table
from astropy.io import fits
from surveytools import SURVEYTOOLS_DATA
def get_calib_group(imagetbl="vphas-dr2-red-images.fits"):
tbl = table.Table.read(os.path.join(SURVEYTOOLS_DATA, imagetbl))
tbl['night'] = [fn[0:9] for fn in tbl['image file']]
calib_groups = {}
for group in tbl.group_by('night').groups:
fields = np.unique(group['Field_1'])
mygroup = [fld.split('_')[1]+off for fld in fields for off in ['a', 'b']]
for offset in mygroup:
calib_groups[offset] = mygroup
return calib_groups
RED_GROUPS = get_calib_group("vphas-dr2-red-images.fits")
BLUE_GROUPS = get_calib_group("vphas-dr2-blue-images.fits")
# Read the per-offset shifts into a dictionary
SHIFTS_TBL = Table.read('shifts-mike.fits')
SHIFTS = dict(zip(SHIFTS_TBL['field'], SHIFTS_TBL))
INDEX_TBL = Table.read('/car-data/gb/vphas/psfcat/vphas-offsetcats.fits')
HA_ZPT_CORR = dict(zip(INDEX_TBL['offset'], -(3.01 - (INDEX_TBL['rzpt'] - INDEX_TBL['hazpt']))))
# From Fukugita (1996)
VEGA2AB = {'u': 0.961, 'g': -0.123, 'r2': 0.136, 'r': 0.136, 'i': 0.373}
def get_median_shifts():
"""Returns the median shifts, to be used in the case of insufficient APASS calibrators."""
shifts = {}
for band in ['u', 'g', 'r2', 'r', 'i']:
shifts[band] = np.nanmedian(SHIFTS_TBL[band + 'shift'])
shifts[band + '_ab'] = shifts[band] + VEGA2AB[band]
# H-alpha uses the r-band shift but requires a special ZPT correction to fix the offset to 3.01
shifts['ha'] = shifts['r'] + np.median([x for x in HA_ZPT_CORR.values()])
return shifts
def get_night_shifts(offset):
"""Returns the mean APASS-based shifts for all the offsets in the same night."""
shifts = {}
shifts_std = {}
for band in ['r', 'i']:
offset_shifts = []
for sibling in red_groups[offset]:
try:
offset_shifts.append(SHIFTS[sibling][band + 'shift'])
except KeyError:
pass
shifts[band] = np.nanmean(offset_shifts)
shifts_std[band] = np.nanstd(offset_shifts)
if offset not in blue_groups:
for band in ['u', 'g', 'r2']:
shifts[band] = np.nan
else:
for band in ['u', 'g', 'r2']:
offset_shifts = []
for sibling in blue_groups[offset]:
try:
offset_shifts.append(SHIFTS[sibling][band + 'shift'])
except KeyError:
pass
shifts[band] = np.nanmean(offset_shifts)
shifts_std[band] = np.nanstd(offset_shifts)
return shifts, shifts_std
def get_shifts(offset):
"""Returns the shifts to apply to the various columns."""
#if offset not in SHIFTS:
# log.warning('Using median offsets for {}'.format(offset))
# return get_median_shifts()
#shifts = {}
#for band in ['u', 'g', 'r2', 'r', 'i']:
# shifts[band] = SHIFTS[offset][band + 'shift']
# if np.isnan(shifts[band]):
# shifts[band] = get_median_shifts()[band]
shifts, shifts_std = get_night_shifts(offset)
for band in shifts.keys():
shifts[band + '_ab'] = shifts[band] + VEGA2AB[band]
# H-alpha uses the r-band shift but requires a special ZPT correction to fix the offset to 3.01
hazpcorr = HA_ZPT_CORR[offset]
log.debug('Ha zeropoint correction: {:+.2f}'.format(hazpcorr))
shifts['ha'] = shifts['r'] + hazpcorr
return shifts
def apply_calibration(input_fn):
offset = input_fn.split('/')[-1].split('-')[0]
shifts = get_shifts(offset)
#log.info(shifts)
log.info('Opening {}'.format(input_fn))
cat = fits.open(input_fn, memmap=False)
for band in ['u', 'g', 'r2', 'ha', 'r', 'i']:
cat[1].data[band] += shifts[band]
cat[1].data['aperMag_' + band] += shifts[band]
cat[1].data['magLim_' + band] += shifts[band]
if band != 'ha':
cat[1].data[band + '_ab'] += shifts[band + '_ab']
cat[1].data['aperMag_' + band + '_ab'] += shifts[band + '_ab']
# Also correct the colours
cat[1].data['u_g'] += shifts['u'] - shifts['g']
cat[1].data['g_r2'] += shifts['g'] - shifts['r2']
cat[1].data['r_i'] += shifts['r'] - shifts['i']
cat[1].data['r_ha'] += shifts['r'] - shifts['ha']
output_fn = input_fn.replace('resolved.fits', 'calibrated.fits')
log.info('Writing {}'.format(output_fn))
cat.writeto(output_fn, clobber=True)
if __name__ == '__main__':
log.setLevel('INFO')
filenames = glob.glob('/car-data/gb/vphas/psfcat/resolved/*resolved.fits')
import multiprocessing
pool = multiprocessing.Pool(16)
pool.map(apply_calibration, filenames)
#apply_calibration('tmp/0005a-1-resolved.fits')
| {
"repo_name": "barentsen/surveytools",
"path": "scripts/apply-calibration/apply-calibration.py",
"copies": "1",
"size": "4912",
"license": "mit",
"hash": -1729786211372585500,
"line_mean": 36.2121212121,
"line_max": 99,
"alpha_frac": 0.5981270358,
"autogenerated": false,
"ratio": 3.052827843380982,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9131331759731023,
"avg_score": 0.003924623889991775,
"num_lines": 132
} |
# app lives in a directory above our example
# project so we need to make sure it is findable on our path.
import sys
from os.path import abspath, dirname, join
PROJECT_DIR = abspath(dirname(__file__))
grandparent = abspath(join(PROJECT_DIR, '..'))
for path in (grandparent, PROJECT_DIR):
if path not in sys.path:
sys.path.insert(0, path)
DEBUG = True
ADMINS = (
('Joe Smith', 'joe.smith@example.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': join(PROJECT_DIR, 'example.db'),
'USER': '',
'PASSWORD': '',
'HOST': '',
'PORT': '',
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'UTC'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# URL prefix for admin static files -- CSS, JavaScript and images.
# Make sure to use a trailing slash.
# Examples: "http://foo.com/static/admin/", "/static/admin/".
ADMIN_MEDIA_PREFIX = '/static/admin/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = '6kfuaep$0j)5*b-zodi+p)x*xl$=27@s@queywbp_$_l4f#3a+'
MIDDLEWARE = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'debug_toolbar.middleware.DebugToolbarMiddleware',
)
ROOT_URLCONF = 'example.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'APP_DIRS': True,
'DIRS': (
join(PROJECT_DIR, 'templates'),
),
'OPTIONS': {
'context_processors': [
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
]
},
},
]
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
'debug_toolbar',
'contact_form',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
# debug_toolbar
INTERNAL_IPS = ('127.0.0.1',)
DEBUG_TOOLBAR_CONFIG = {
'INTERCEPT_REDIRECTS': False,
}
| {
"repo_name": "madisona/django-contact-form",
"path": "example/settings.py",
"copies": "1",
"size": "4953",
"license": "bsd-3-clause",
"hash": -5600193003733385000,
"line_mean": 28.6586826347,
"line_max": 79,
"alpha_frac": 0.6743387846,
"autogenerated": false,
"ratio": 3.6259150805270863,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4800253865127086,
"avg_score": null,
"num_lines": null
} |
# app lives in a directory above our example
# project so we need to make sure it is findable on our path.
import sys
from os.path import abspath, dirname, join
parent = abspath(dirname(__file__))
grandparent = abspath(join(parent, '..'))
for path in (grandparent, parent):
if path not in sys.path:
sys.path.insert(0, path)
DEBUG = True
TEMPLATE_DEBUG = DEBUG
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'example.db',
}
}
SECRET_KEY = 'abcahfdkafdaksfdj93243'
STATIC_URL = '/static/'
ADMIN_MEDIA_PREFIX = '/static/admin/'
ROOT_URLCONF = 'urls'
TEMPLATE_DIRS = (abspath(join(parent, 'templates')), )
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
INTERNAL_IPS = ('127.0.0.1', )
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.admin',
'django.contrib.staticfiles',
'django.contrib.messages',
'django_nose',
'sample',
'forms_ext',
)
TEST_RUNNER = 'django_nose.NoseTestSuiteRunner'
| {
"repo_name": "imtapps/django-forms-ext",
"path": "example/settings.py",
"copies": "1",
"size": "1220",
"license": "bsd-2-clause",
"hash": 1489276251031723,
"line_mean": 23.4,
"line_max": 62,
"alpha_frac": 0.6819672131,
"autogenerated": false,
"ratio": 3.3795013850415514,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4561468598141551,
"avg_score": null,
"num_lines": null
} |
# app lives in a directory above our example
# project so we need to make sure it is findable on our path.
import sys
import os
from os.path import abspath, dirname, join
parent = abspath(dirname(__file__))
grandparent = abspath(join(parent, '..'))
for path in (grandparent, parent):
if path not in sys.path:
sys.path.insert(0, path)
DEBUG = True
BASE_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..'))
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': abspath(join(parent, 'example.db')),
}
}
SECRET_KEY = 'x'
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
ADMIN_MEDIA_PREFIX = '/static/admin/'
ROOT_URLCONF = 'urls'
PROJECT_APPS = ('djadmin_ext', 'sample')
INSTALLED_APPS = (
'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions',
'django.contrib.messages', 'django.contrib.staticfiles', 'django_nose'
) + PROJECT_APPS
MIDDLEWARE = (
'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [abspath(join(parent, 'templates'))],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
'debug':
DEBUG,
},
},
]
TEST_RUNNER = 'django_nose.NoseTestSuiteRunner'
FIXTURE_DIRS = [
'example/sample/fixtures/',
]
| {
"repo_name": "imtapps/django-admin-ext",
"path": "example/settings.py",
"copies": "1",
"size": "2126",
"license": "bsd-2-clause",
"hash": 8436451740365314000,
"line_mean": 31.2121212121,
"line_max": 117,
"alpha_frac": 0.6636876764,
"autogenerated": false,
"ratio": 3.729824561403509,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48935122378035084,
"avg_score": null,
"num_lines": null
} |
# app lives in a directory above our example
# project so we need to make sure it is findable on our path.
import sys
from os.path import abspath, dirname, join
from os import pardir
PROJECT_DIR = abspath(dirname(__file__))
grandparent = abspath(join(PROJECT_DIR, pardir))
for path in (grandparent, PROJECT_DIR):
if path not in sys.path:
sys.path.insert(0, path)
DEBUG = True
TEMPLATE_DEBUG = DEBUG
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': join(PROJECT_DIR, 'local.db'),
}
}
ALLOWED_HOSTS = []
TIME_ZONE = 'America/Chicago'
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
USE_I18N = True
USE_L10N = True
USE_TZ = True
MEDIA_ROOT = ''
MEDIA_URL = ''
STATIC_ROOT = ''
STATIC_URL = '/static/'
STATICFILES_DIRS = ()
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
SECRET_KEY = ')m%q23e1*23pgcij6-44rpgpz)i63%z7=h7!!tr0v_@01e3)c5'
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'urls'
WSGI_APPLICATION = 'wsgi.application'
TEMPLATE_DIRS = ()
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
'zendesk_auth',
)
ZENDESK_URL = "https://domain.zendesk.com"
ZENDESK_TOKEN = "my-random-token-provided-by-zendesk"
LOGIN_URL = "/admin" | {
"repo_name": "madisona/zendesk_django_auth",
"path": "example/settings.py",
"copies": "1",
"size": "1945",
"license": "bsd-3-clause",
"hash": -2876088033469813000,
"line_mean": 23.6329113924,
"line_max": 65,
"alpha_frac": 0.7064267352,
"autogenerated": false,
"ratio": 3.2255389718076284,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9428801150045603,
"avg_score": 0.0006329113924050633,
"num_lines": 79
} |
# App logic
from __future__ import print_function
import os
from . import keys
from . import readchar
from . import command
from . import renderer
from .filter import filter_commands
from sys import version_info, platform
if version_info[0] == 2:
keyboard_input = raw_input
else:
keyboard_input = input
def get_os():
if platform.lower() == 'darwin':
return 'osx'
elif platform.lower().startswith('linux'):
return 'linux'
else:
# throw is better
return 'unknown'
def get_user_marks_path():
return os.path.join(os.getenv('MARKER_DATA_HOME'), 'user_commands.txt')
def get_tldr_os_marks_path():
return os.path.join(os.getenv('MARKER_HOME'), 'tldr', get_os()+'.txt')
def get_tldr_common_marks_path():
return os.path.join(os.getenv('MARKER_HOME'), 'tldr', 'common.txt')
def mark_command(cmd_string, alias):
''' Adding a new Mark '''
if cmd_string:
cmd_string = cmd_string.strip()
if not cmd_string:
cmd_string = keyboard_input("Command:")
else:
print("command: %s" % cmd_string)
if not cmd_string:
print ("command field is required")
return
if not alias:
alias = keyboard_input("Alias?:")
else:
print("alias: %s" % alias)
if '##' in cmd_string or '##' in alias:
# ## isn't allowed since it's used as seperator
print ("command can't contain ##(it's used as command alias seperator)")
return
commands = command.load(get_user_marks_path())
command.add(commands, command.Command(cmd_string, alias))
command.save(commands, get_user_marks_path())
def get_selected_command_or_input(search):
''' Display an interactive UI interface where the user can type and select commands
this function returns the selected command if there is matches or the written characters in the prompt line if no matches are present
'''
commands = command.load(get_user_marks_path()) + command.load(get_tldr_os_marks_path()) + command.load(get_tldr_common_marks_path())
state = State(commands, search)
# draw the screen (prompt + matchd marks)
renderer.refresh(state)
# wait for user input(returns selected mark)
output = read_line(state)
# clear the screen
renderer.erase()
if not output:
return state.input
return output.cmd
def remove_command(search):
''' Remove a command interactively '''
commands = command.load(get_user_marks_path())
state = State(commands, search)
renderer.refresh(state)
selected_mark = read_line(state)
if selected_mark:
command.remove(commands, selected_mark)
command.save(commands, get_user_marks_path())
# clear the screen
renderer.erase()
return selected_mark
def read_line(state):
''' parse user input '''
output = None
while True:
c = readchar.get_symbol()
if c == keys.ENTER:
if state.get_matches():
output = state.get_selected_match()
break
elif c == keys.CTRL_C or c == keys.ESC:
state.reset_input()
break
elif c == keys.CTRL_U:
state.clear_input()
elif c == keys.BACKSPACE:
state.set_input(state.input[0:-1])
elif c == keys.UP:
state.select_previous()
elif c == keys.DOWN or c == keys.TAB:
state.select_next()
elif c <= 126 and c >= 32:
state.set_input(state.input + chr(c))
renderer.refresh(state)
return output
class State(object):
''' The app State, including user written characters, matched commands, and selected one '''
def __init__(self, bookmarks, default_input):
self.bookmarks = bookmarks
self._selected_command_index = 0
self.matches = []
self.default_input = default_input
self.set_input(default_input)
def get_matches(self):
return self.matches
def reset_input(self):
self.input = self.default_input
def set_input(self, input):
self.input = input if input else ""
self._update()
def clear_input(self):
self.set_input("")
def clear_selection(self):
self._selected_command_index = 0
def select_next(self):
self._selected_command_index = (self._selected_command_index + 1) % len(self.matches) if len(self.matches) else 0
def select_previous(self):
self._selected_command_index = (self._selected_command_index - 1) % len(self.matches) if len(self.matches) else 0
def _update(self):
self.matches = filter_commands(self.bookmarks, self.input)
self._selected_command_index = 0
def get_selected_match(self):
if len(self.matches):
return self.matches[self._selected_command_index]
else:
raise 'No matches found'
| {
"repo_name": "pindexis/marker",
"path": "marker/core.py",
"copies": "1",
"size": "4866",
"license": "mit",
"hash": -8688739651781158000,
"line_mean": 31.0131578947,
"line_max": 141,
"alpha_frac": 0.6224825319,
"autogenerated": false,
"ratio": 3.777950310559006,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4900432842459006,
"avg_score": null,
"num_lines": null
} |
# app/logs.py
from logging import ERROR, DEBUG, Filter, Formatter, StreamHandler
from logging.handlers import TimedRotatingFileHandler
from flask import request
class ContextualFilter(Filter):
def filter(self, log_record):
log_record.url = request.path
log_record.method = request.method
log_record.ip = request.environ.get("REMOTE_ADDR")
return True
def init_app(app, remove_existing_handlers=False):
# Create the filter and add it to the base application logger
context_provider = ContextualFilter()
app.logger.addFilter(context_provider)
# Optionally, remove Flask's default debug handler
if remove_existing_handlers:
del app.logger.handlers[:]
# Create a new handler for log messages
# that will send them to standard error
handler = StreamHandler()
# Add a formatter that makes use of our new contextual information
log_format = ("%(asctime)s\t%(levelname)s\t"
"%(ip)s\t%(method)s\t%(url)s\t%(message)s")
formatter = Formatter(log_format)
handler.setFormatter(formatter)
# Finally, attach the handler to our logger
app.logger.addHandler(handler)
# Only set up a file handler if we know where to put the logs
if app.config.get("ERROR_LOG_PATH"):
# Create one file for each day. Delete logs over 7 days old.
file_handler = TimedRotatingFileHandler(filename= app.config["ERROR_LOG_PATH"],
when="D", backupCount=7)
# Use a multi-line format for this logger, for easier scanning
file_formatter = Formatter("""
Time: %(asctime)s
Level: %(levelname)s
Method: %(method)s
Path: %(url)s
IP: %(ip)s
Message: %(message)s
---------------------""")
# Filter out all log messages that are lower than Error.
file_handler.setLevel(DEBUG)
#file_handler.addFormatter(file_formatter)
app.logger.addHandler(file_handler) | {
"repo_name": "ChristopherGS/sensor_readings",
"path": "app/logs.py",
"copies": "2",
"size": "2009",
"license": "bsd-3-clause",
"hash": -3422077123448581000,
"line_mean": 32.5,
"line_max": 87,
"alpha_frac": 0.6455948233,
"autogenerated": false,
"ratio": 4.159420289855072,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.004479593546368512,
"num_lines": 60
} |
"""Apply a function passed as parameter.
Defaults to passing."""
import logging
import pypes.component
log = logging.getLogger(__name__)
class SingleFunction(pypes.component.Component):
"""
mandatory input packet attributes:
- data: the input to the function
optional input packet attributes:
- None
parameters:
- function: [default: lambda x: x]
output packet attributes:
- data: function(data)
"""
# defines the type of component we're creating.
__metatype__ = 'TRANSFORMER'
def __init__(self):
# initialize parent class
pypes.component.Component.__init__(self)
# Setup any user parameters required by this component
# 2nd arg is the default value, 3rd arg is optional list of choices
self.set_parameter('function', lambda x: x)
# log successful initialization message
log.debug('Component Initialized: %s', self.__class__.__name__)
def run(self):
# Define our components entry point
while True:
function = self.get_parameter('function')
# for each packet waiting on our input port
for packet in self.receive_all('in'):
try:
data = packet.get("data")
packet.set("data", function(data))
log.debug("%s calculated %s",
self.__class__.__name__,
function.__name__,
exc_info=True)
except:
log.error('Component Failed: %s',
self.__class__.__name__, exc_info=True)
# send the packet to the next component
self.send('out', packet)
# yield the CPU, allowing another component to run
self.yield_ctrl()
| {
"repo_name": "Enucatl/pypes",
"path": "pypes/plugins/single_function.py",
"copies": "1",
"size": "1863",
"license": "apache-2.0",
"hash": -2654871278815607000,
"line_mean": 28.109375,
"line_max": 75,
"alpha_frac": 0.5507246377,
"autogenerated": false,
"ratio": 4.864229765013055,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0006510416666666666,
"num_lines": 64
} |
""" Apply aggregate functions to data.
"""
from operator import itemgetter
from .buffer import _ReaderBuffer
from .buffer import _WriterBuffer
__all__ = "AggregateReader", "AggregateWriter"
class _Aggregator(object):
""" Base class for AggregateReader and AggregateWriter.
During aggregation, incoming records are grouped, reduction functions are
applied to each group, and a single record is output for each group.
Records are presumed to be sorted such that all records in a group are
contiguous.
"""
@classmethod
def reduction(cls, callback, field, alias=None):
""" Create a reduction function from a callback.
The callback should take a sequence of values as its argument and
return a value, e.g. the built-in function sum(). The field argument
specifies which values to pass to the callback from the sequence of
records being reduced. This is either a single name or sequence of
names. In the latter case, arguments are passed to the callback as a
sequence of tuples. By default the reduction field is named after the
input field, or specify an alias.
"""
def wrapper(records):
""" Execute the callback as a reduction function.
"""
return {alias: callback(list(map(get, records)))}
if not alias:
alias = field
if isinstance(field, str):
field = field,
get = itemgetter(*field)
return wrapper
def __init__(self, key):
""" Initialize this object.
The key argument is either a single field name, a sequence of names,
or a key function. A key function must return a dict-like object
specifying the name and value for each key field. Key functions are
free to create key fields that are not in the incoming data.
Because the key function is called for every record, optimization is
(probably) worthwhile. For multiple key fields, passing in a hard-coded
key function instead of relying on the automatically-generated function
may be give better performance, e.g.
lambda record: {"key1": record["key1"], ...}
"""
if not callable(key):
if isinstance(key, str):
# Define a single-value key function.
name = key # force static binding
key = lambda record: {name: record[name]}
else:
# Define a key function for multiple fields.
names = key # force static binding
key = lambda record: {key: record[key] for key in names}
self._keyfunc = key
self._keyval = None
self._buffer = []
self._reductions = []
self._output = None # initialized by derived classes
return
def reduce(self, *callbacks):
""" Add reductions to this object or clear all reductions (default).
A reduction is an callable object that takes a sequence of records and
aggregates them into a single dict-like result keyed by field name.
A reduction can return one or more fields. Reduction fields do not have
to match the incoming records. A reduction function is free to create
new fields, and, conversely, incoming fields that do not have a
reduction will not be in the aggregated data. The reduction() class
method can be used to generate reduction functions from basic functions
like sum() or max().
Reductions are applied in order to each group of records in the input
sequence, and the results are merged to create one record per group.
If multiple reductions return a field with the same name, the latter
value will overwrite the existing value.
"""
if not callbacks:
self._reductions = []
return
for callback in callbacks:
self._reductions.append(callback)
return
def _queue(self, record):
""" Process an incoming record.
"""
keyval = self._keyfunc(record)
if keyval != self._keyval:
# This is a new group, finalize the buffered group.
self._flush()
self._buffer.append(record)
self._keyval = keyval
return
def _flush(self):
""" Apply all reductions to the buffered data and flush to output.
"""
if not self._buffer:
return
record = self._keyval
for callback in self._reductions:
# If multiple callbacks return the same field values, the latter
# value will overwrite the existing value.
record.update(callback(self._buffer))
self._output.append(record)
self._buffer = []
return
class AggregateReader(_Aggregator, _ReaderBuffer):
""" Apply aggregate functions to input from another reader.
"""
def __init__(self, reader, key):
""" Initialize this object.
"""
_Aggregator.__init__(self, key)
_ReaderBuffer.__init__(self, reader)
return
def _uflow(self):
""" Handle an underflow condition.
This is called when the input reader is exhausted and there are no
records in the output queue.
"""
# Last chance to flush buffered data.
if not self._buffer:
# All data has been output.
raise StopIteration
self._flush()
return
class AggregateWriter(_Aggregator, _WriterBuffer):
""" Apply aggregate functions to output for another writer.
"""
def __init__(self, writer, key):
""" Initialize this object.
"""
_Aggregator.__init__(self, key)
_WriterBuffer.__init__(self, writer)
return
| {
"repo_name": "mdklatt/serial-python",
"path": "src/serial/core/reduce.py",
"copies": "1",
"size": "6042",
"license": "mit",
"hash": -7115087637715033000,
"line_mean": 34.3333333333,
"line_max": 80,
"alpha_frac": 0.5958291956,
"autogenerated": false,
"ratio": 5.064543168482817,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.01690827213220035,
"num_lines": 171
} |
#Apply a mask to a fasta file
#The mask contains [0-9] and possibly other characters (counts as -1)
#Replaces everything in the orginal fasta with Ns if it is below the integer level specified
#If no mask file is specificed, then it just produces an output that reads 0/1/2/3/4 according to
#whether the input is uncalled (0) or called
from __future__ import division, print_function
import argparse, sys
from pyfaidx import Fasta
WRAP_LENGTH=50
IUPAC_ACGT=["A","C","G","T"]
IUPAC_HETS=["R", "Y", "S", "W", "K", "M"]
IUPAC_CALLS=IUPAC_ACGT+IUPAC_HETS
################################################################################
def parse_options():
"""
argparse
"""
parser=argparse.ArgumentParser()
parser.add_argument('-f', '--fasta', type=str, default="", help=
"input fasta file")
parser.add_argument('-m', '--mask', type=str, default="", help=
"Mask file (fasta)")
parser.add_argument('-c', '--level', type=int, default=1, help=
"Filter level")
return parser.parse_args()
################################################################################
def main(options):
"""
print masked fasta
"""
fa=Fasta(options.fasta)
if options.mask:
mask=Fasta(options.mask)
if not set(fa.keys()).issubset(set(mask.keys())):
raise Exception("Mask does not include all chromosomes in fasta")
chroms=[x for x in fa.keys()]
for chrom in chroms:
if len(fa[chrom][:]) != len(mask[chrom][:]):
raise Exception("Chromosome "+chrom+" is different length in mask than fasta")
for chrom in chroms:
print("Masking chromosome "+chrom, file=sys.stderr)
faseq=fa[chrom][:].seq
maskseq=mask[chrom][:].seq
new_seq="".join([x if y.isdigit() and int(y)>=options.level else "N" for x,y in zip(faseq,maskseq)])
wrap_seq="\n".join(new_seq[i:i+WRAP_LENGTH] for i in range(0,len(new_seq), WRAP_LENGTH))
print(">"+chrom)
print(wrap_seq)
else:
for chrom in chroms:
print("Masking chromosome "+chrom, file=sys.stderr)
faseq=fa[chrom][:].seq
new_seq="".join(["1" if x in IUPAC_CALLS else "0" for x in faseq])
wrap_seq="\n".join(new_seq[i:i+WRAP_LENGTH] for i in range(0,len(new_seq), WRAP_LENGTH))
print(">"+chrom)
print(wrap_seq)
################################################################################
if __name__=="__main__":
options=parse_options()
main(options)
| {
"repo_name": "mathii/gdc",
"path": "maskfa.py",
"copies": "1",
"size": "2666",
"license": "apache-2.0",
"hash": 6165873969583507000,
"line_mean": 35.027027027,
"line_max": 112,
"alpha_frac": 0.5266316579,
"autogenerated": false,
"ratio": 3.7286713286713287,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9498025729890018,
"avg_score": 0.05145545133626202,
"num_lines": 74
} |
"""Apply arbitrary daily shifts within the climate file."""
import sys
import os
from datetime import date, timedelta
from pyiem.util import get_dbconn
from pandas.io.sql import read_sql
from tqdm import tqdm
MYHUCS = [x.strip() for x in open("myhucs.txt")]
FLOOR = date(2007, 1, 1)
CEILING = date(2020, 12, 31)
def do(origfn, scenario, dayshift):
"""Work we do."""
if not os.path.isfile(origfn):
return False
newfn = origfn.replace("/0/", f"/{scenario}/")
newdir = os.path.dirname(newfn)
if not os.path.isdir(newdir):
os.makedirs(newdir)
with open(newfn, "w") as fh:
lines = open(origfn).readlines()
linenum = 0
while linenum < len(lines):
if linenum < 15:
fh.write(lines[linenum])
linenum += 1
continue
tokens = lines[linenum].strip().split("\t")
breakpoints = int(tokens[3])
valid = date(int(tokens[2]), int(tokens[1]), int(tokens[0]))
newvalid = valid + timedelta(days=dayshift)
if newvalid < FLOOR or newvalid > CEILING:
linenum += 1
linenum += breakpoints
continue
# If shifting forward, need to replicate 1 Jan
if valid == FLOOR and dayshift > 0:
for i in range(dayshift):
newvalid2 = FLOOR + timedelta(days=i)
tokens[0] = str(newvalid2.day)
tokens[1] = str(newvalid2.month)
tokens[2] = str(newvalid2.year)
fh.write("\t".join(tokens) + "\n")
for j in range(breakpoints):
fh.write(lines[linenum + j + 1].strip() + "\n")
tokens[0] = str(newvalid.day)
tokens[1] = str(newvalid.month)
tokens[2] = str(newvalid.year)
fh.write("\t".join(tokens) + "\n")
for j in range(breakpoints):
fh.write(lines[linenum + j + 1].strip() + "\n")
# If shifting backward, need to replicate 31 Dec
if valid == CEILING and dayshift < 0:
for i in range(dayshift + 1, 1):
newvalid2 = CEILING + timedelta(days=i)
tokens[0] = str(newvalid2.day)
tokens[1] = str(newvalid2.month)
tokens[2] = str(newvalid2.year)
fh.write("\t".join(tokens) + "\n")
for j in range(breakpoints):
fh.write(lines[linenum + j + 1].strip() + "\n")
linenum += 1
linenum += breakpoints
return True
def main(argv):
"""Go Main Go."""
scenario = int(argv[1])
dayshift = int(argv[2])
# Find the climate files we need to edit
df = read_sql(
"SELECT distinct climate_file from flowpaths where scenario = 0 and "
"huc_12 in %s",
get_dbconn("idep"),
params=(tuple(MYHUCS),),
index_col=None,
)
# Create directories as needed
progress = tqdm(df["climate_file"].values)
failed = 0
for origfn in progress:
progress.set_description(origfn.split("/")[-1])
if not do(origfn, scenario, dayshift):
failed += 1
print(f"{failed}/{len(df.index)} runs failed.")
if __name__ == "__main__":
main(sys.argv)
| {
"repo_name": "akrherz/dep",
"path": "scripts/climatechange/shift_cli.py",
"copies": "2",
"size": "3360",
"license": "mit",
"hash": -8582318352226996000,
"line_mean": 35.1290322581,
"line_max": 77,
"alpha_frac": 0.524702381,
"autogenerated": false,
"ratio": 3.7086092715231787,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5233311652523178,
"avg_score": null,
"num_lines": null
} |
"""Apply arbitrary multiplier to baseline precip.
see GH #39
"""
import os
import sys
from multiprocessing import Pool
from tqdm import tqdm
def editor(arg):
"""Do the editing."""
fn, scenario, multiplier = arg
newfn = fn.replace("/i/0/", "/i/%s/" % (scenario,))
newdir = os.path.dirname(newfn)
if not os.path.isdir(newdir):
try:
# subject to race conditions
os.makedirs(newdir)
except FileExistsError:
pass
fp = open(newfn, "w")
for line in open(fn):
tokens = line.split()
if len(tokens) != 2:
fp.write(line)
continue
try:
fp.write("%s %.2f\n" % (tokens[0], float(tokens[1]) * multiplier))
except Exception as exp:
print("Editing %s hit exp: %s" % (fn, exp))
sys.exit()
fp.close()
def finder(scenario, multiplier):
"""yield what we can find."""
res = []
for dirname, _dirpath, filenames in os.walk("/i/0/cli"):
for fn in filenames:
res.append(["%s/%s" % (dirname, fn), scenario, multiplier])
return res
def main(argv):
"""Go Main Go."""
scenario = int(argv[1])
if scenario == 0:
print("NO!")
return
multiplier = float(argv[2])
queue = finder(scenario, multiplier)
print("Applying %.2f multiplier for scenario %s" % (multiplier, scenario))
pool = Pool()
for _ in tqdm(pool.imap_unordered(editor, queue), total=len(queue)):
pass
if __name__ == "__main__":
main(sys.argv)
| {
"repo_name": "akrherz/dep",
"path": "scripts/cligen/arb_precip_delta.py",
"copies": "2",
"size": "1552",
"license": "mit",
"hash": -7547725567089156000,
"line_mean": 24.4426229508,
"line_max": 78,
"alpha_frac": 0.5592783505,
"autogenerated": false,
"ratio": 3.6261682242990654,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5185446574799065,
"avg_score": null,
"num_lines": null
} |
"""Apply a set of rolling filter to a data cube
This version takes a single CSV file as input parameter with optional job_id
parameter. This interface is optimized for submitting jobs on a Slurm cluster.
"""
import argparse
import os
from datetime import datetime
from glob import glob
from multiprocessing import Pool, Array, current_process
import numpy as np
import xarray as xr
import pandas as pd
from opstats.foreground import apply_filter
def do_mask(i):
if args.verbose:
print('... P{:d}: applying filter {:s}'
.format(current_process().pid, filter_files[i].split('/')[-1]))
filter_da = xr.open_dataarray(filter_files[i])
filter_array = filter_da.values
data_channels = filter_da.attrs['frequency_channels']
image_channel = int(np.floor(filter_da.shape[0] / 2))
# Figure out FFT and filter normalization
# FFT normalization factor
x = filter_da.attrs['x']
y = filter_da.attrs['y']
f = filter_da.attrs['f']
dx = x[1] - x[0]
dy = y[1] - y[0]
df = f[1] - f[0]
u = filter_da.attrs['u']
v = filter_da.attrs['v']
e = filter_da.attrs['e']
du = u[1] - u[0]
dv = v[1] - v[0]
de = e[1] - e[0]
fft_norm = dx * dy * df
ifft_norm = du * dv * de * filter_array.size
# Filter normalization factor
filter_volume = np.sum(filter_array.size * du * dv * de)
filter_integral = np.sum(np.abs(filter_array) ** 2 * du * dv * de)
filter_norm = np.sqrt(filter_volume / filter_integral)
# Apply filter
filtered_data = apply_filter(
data_array[data_channels], filter_array,
fft_multiplier=fft_norm, ifft_multiplier=ifft_norm,
output_multiplier=filter_norm, apply_window_func=args.apply_window_func,
invert_filter=False
).real
# Select and store the center channel of the filtered data array
filtered_data_array[data_channels[image_channel]] = \
filtered_data[image_channel]
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
'param_file', type=str,
help='CSV file contain job parameters.\n'
'Column order should be: job_id (int), input_file (str), '
'filter_directory (str), output_file (str). '
'First row should be the column labels (header).'
)
parser.add_argument('--job_id', type=int, default=None,
help='job_id number in the job_id column to execute. '
'First data row will be execute if not given.')
parser.add_argument('--apply_window_func', action='store_true',
help='apply Blackman-nuttal window function before FFT')
parser.add_argument('--nprocs', type=int, default=4,
help='number of processes to spawn')
parser.add_argument('--verbose', action='store_true',
help='print subprocess tasks at runtime')
args = parser.parse_args()
start_time = datetime.now()
# Reading and selecting parameters to execute
param = pd.read_csv(args.param_file, header=0, index_col=0)
if args.job_id is not None:
param_sel = param.loc[args.job_id] # Select param by job_id number
else:
param_sel = param.iloc[0] # Select the first data row.
input_file = param_sel.iloc[0]
filter_directory = param_sel.iloc[1]
output_file = param_sel.iloc[2]
# Read input data cube
data_da = xr.open_dataarray(input_file)
data_array = data_da.values
# Create shared memory array to store filtered data cube
filtered_data_array_base = Array('d', data_array.size)
filtered_data_array = np.frombuffer(filtered_data_array_base.get_obj())
filtered_data_array.shape = data_array.shape
# Read in list of filter files
filter_files = glob('{:s}/*.nc'.format(filter_directory))
filter_files.sort()
nbins = len(filter_files)
# Attributes for output files
# Temporary read in the first filter to read filter information
da0 = xr.open_dataarray(filter_files[0])
extra_attrs = {'filter_type': 'wedge',
'filter_bandwidth': da0.attrs['filter_bandwidth'],
'image_bandwidth': da0.attrs['channel_bandwidth'],
'theta': da0.attrs['theta'],
'theta_unit': da0.attrs['theta_unit'],
'slope_shift_pix': da0.attrs['slope_shift_pix']}
da0.close()
output_attrs = data_da.attrs
for key, value in extra_attrs.items():
output_attrs[key] = value
# Check if output directory exist. If not, make it
output_dir = output_file.rsplit('/', 1)[0]
os.makedirs(output_dir, exist_ok=True)
# Start processing pool
pool = Pool(args.nprocs)
pool.map(do_mask, range(nbins))
pool.close()
pool.join()
# Save output
da = xr.DataArray(filtered_data_array, dims=['f', 'y', 'x'],
coords={'x': data_da.x.values, 'y': data_da.y.values,
'f': data_da.f.values},
attrs=output_attrs)
da.to_netcdf(output_file)
print('Finish applying rolling filters from {:s} to {:s}. '
'Output is saved to {:s}. Time spent: {:.5f}'
.format(filter_directory, input_file, output_file,
(datetime.now() - start_time).total_seconds()))
| {
"repo_name": "piyanatk/sim",
"path": "scripts/fg1p/apply_filters3.py",
"copies": "1",
"size": "5344",
"license": "mit",
"hash": 4169472958002339000,
"line_mean": 36.6338028169,
"line_max": 80,
"alpha_frac": 0.6122754491,
"autogenerated": false,
"ratio": 3.569806279225117,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9681212314310164,
"avg_score": 0.0001738828029907842,
"num_lines": 142
} |
"""Apply a set of rolling filter to a data cubes and separately save output
without stitching to form an image cube"""
import argparse
import os
from glob import glob
from multiprocessing import Pool, current_process
import numpy as np
import xarray as xr
from opstats.foreground import apply_filter
def run(binnum):
process = current_process().pid
print('... P{:d}: applying filter {:s}'
.format(process, filter_files[binnum].split('/')[-1]))
filter_da = xr.open_dataarray(filter_files[binnum])
filter_array = filter_da.values
data_channels = filter_da.attrs['frequency_channels']
filter_bandwidth = filter_da.attrs['filter_bandwidth']
# Figure out FFT and filter normalization
# FFT normalization factor
x = filter_da.attrs['x']
y = filter_da.attrs['y']
f = filter_da.attrs['f']
dx = x[1] - x[0]
dy = y[1] - y[0]
df = f[1] - f[0]
u = filter_da.attrs['u']
v = filter_da.attrs['v']
e = filter_da.attrs['e']
du = u[1] - u[0]
dv = v[1] - v[0]
de = e[1] - e[0]
fft_norm = dx * dy * df
ifft_norm = du * dv * de * filter_array.size
# Filter normalization factor
filter_volume = np.sum(filter_array.size * du * dv * de)
filter_integral = np.sum(np.abs(filter_array) ** 2 * du * dv * de)
filter_norm = np.sqrt(filter_volume / filter_integral)
# Apply filter
filtered_data = apply_filter(
data_array[data_channels], filter_array,
fft_multiplier=fft_norm, ifft_multiplier=ifft_norm,
output_multiplier=filter_norm, apply_window_func=args.apply_window_func,
invert_filter=False
).real
out_da_attrs = filter_da.attrs
out_da_attrs.pop('x')
out_da_attrs.pop('y')
out_da_attrs.pop('f')
out_da_attrs['kx'] = filter_da.kx.values
out_da_attrs['ky'] = filter_da.ky.values
out_da_attrs['kz'] = filter_da.kz.values
out_da = xr.DataArray(
filtered_data, dims=['f', 'y', 'x'], coords={'f': f, 'y': y, 'x': x},
attrs=out_da_attrs
)
outfile = '{:s}/signal_cube_filtered_fbw{:.2f}MHz_{:03d}_bin{:03d}.nc'\
.format(args.output_directory, filter_bandwidth / 1e6,
field_num, binnum)
out_da.to_netcdf(outfile)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('data_cube', type=str)
parser.add_argument('filter_directory', type=str,
help='filter directory containing *.nc filter files.')
parser.add_argument('output_directory', type=str)
parser.add_argument('--apply_window_func', action='store_true')
parser.add_argument('--nprocs', type=int, default=4)
args = parser.parse_args()
print('Data cube: {:s}'.format(args.data_cube))
print('Filter directory: {:s}'.format(args.filter_directory))
print('Output directory: {:s}'.format(args.output_directory))
# Read input data cube
data_da = xr.open_dataarray(args.data_cube)
data_array = data_da.values
field_num = int(args.data_cube.split('_')[-1][0:-3])
# Read in list of filter files
filter_files = glob('{:s}/*.nc'.format(args.filter_directory))
filter_files.sort()
nbins = len(filter_files)
# Check output directory
os.makedirs(args.output_directory, exist_ok=True)
# Start processing pool
pool = Pool(args.nprocs)
pool.map(run, range(nbins))
pool.close()
pool.join()
| {
"repo_name": "piyanatk/sim",
"path": "scripts/fg1p/apply_filters_nostitch.py",
"copies": "1",
"size": "3401",
"license": "mit",
"hash": -1063452672134457900,
"line_mean": 33.01,
"line_max": 80,
"alpha_frac": 0.6277565422,
"autogenerated": false,
"ratio": 3.2145557655954633,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9341695023844845,
"avg_score": 0.0001234567901234568,
"num_lines": 100
} |
"""Apply a set of rolling filter to a data cubes"""
import argparse
import os
from datetime import datetime
from glob import glob
from multiprocessing import Pool, Array, current_process
import numpy as np
import xarray as xr
from opstats.foreground import apply_filter
def do_mask(i):
process = current_process().pid
if args.verbose:
print('... P{:d}: applying filter {:s}'
.format(process, filter_files[i].split('/')[-1]))
filter_da = xr.open_dataarray(filter_files[i])
filter_array = filter_da.values
data_channels = filter_da.attrs['frequency_channels']
image_channel = int(np.floor(filter_da.shape[0] / 2))
# Figure out FFT and filter normalization
# FFT normalization factor
x = filter_da.attrs['x']
y = filter_da.attrs['y']
f = filter_da.attrs['f']
dx = x[1] - x[0]
dy = y[1] - y[0]
df = f[1] - f[0]
u = filter_da.attrs['u']
v = filter_da.attrs['v']
e = filter_da.attrs['e']
du = u[1] - u[0]
dv = v[1] - v[0]
de = e[1] - e[0]
fft_norm = dx * dy * df
ifft_norm = du * dv * de * filter_array.size
# Filter normalization factor
filter_volume = np.sum(filter_array.size * du * dv * de)
filter_integral = np.sum(np.abs(filter_array) ** 2 * du * dv * de)
filter_norm = np.sqrt(filter_volume / filter_integral)
# Apply filter
filtered_data = apply_filter(
data_array[data_channels], filter_array,
fft_multiplier=fft_norm, ifft_multiplier=ifft_norm,
output_multiplier=filter_norm, apply_window_func=args.apply_window_func,
invert_filter=False
).real
# Select and store the center channel of the filtered data array
filtered_data_array[data_channels[image_channel]] = \
filtered_data[image_channel]
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('data_cube', type=str)
parser.add_argument('filter_directory', type=str,
help='filter directory containing *.nc filter files.')
parser.add_argument('output_file', type=str)
parser.add_argument('--apply_window_func', action='store_true',
help='apply Blackman-nuttal window function before fft')
parser.add_argument('--nprocs', type=int, default=4)
parser.add_argument('--verbose', action='store_true',
help='print subprocess tasks at runtime')
args = parser.parse_args()
start_time = datetime.now()
# Read input data cube
data_da = xr.open_dataarray(args.data_cube)
data_array = data_da.values
# Create shared memory array to store filtered data cube
filtered_data_array_base = Array('d', data_array.size)
filtered_data_array = np.frombuffer(filtered_data_array_base.get_obj())
filtered_data_array.shape = data_array.shape
# Read in list of filter files
filter_files = glob('{:s}/*.nc'.format(args.filter_directory))
filter_files.sort()
nbins = len(filter_files)
# Attributes for output files
# Temporary read in the first filter to retrive filter information
da0 = xr.open_dataarray(filter_files[0])
filter_bandwidth = da0.attrs['filter_bandwidth']
image_bandwidth = da0.attrs['channel_bandwidth']
da0.close()
output_attrs = data_da.attrs
extra_attrs = {'filter_type': 'wedge',
'extra_filter_shift': 'None',
'filter_bandwidth': filter_bandwidth,
'image_bandwidth': image_bandwidth}
for key, value in extra_attrs.items():
output_attrs[key] = value
# Check output directory
output_dir = args.output_file.rsplit('/', 1)[0]
os.makedirs(output_dir, exist_ok=True)
# Start processing pool
pool = Pool(args.nprocs)
pool.map(do_mask, range(nbins))
pool.close()
pool.join()
# Save output
da = xr.DataArray(filtered_data_array, dims=['f', 'y', 'x'],
coords={'x': data_da.x.values, 'y': data_da.y.values,
'f': data_da.f.values},
attrs=output_attrs)
da.to_netcdf(args.output_file)
print('Finish applying rolling filters from {:s} to {:s}. '
'Output is saved to {:s}. Time spent: {:.5f}'
.format(args.filter_directory, args.data_cube, args.output_file,
(datetime.now() - start_time).total_seconds()))
| {
"repo_name": "piyanatk/sim",
"path": "scripts/fg1p/apply_filters.py",
"copies": "1",
"size": "4384",
"license": "mit",
"hash": 32313809912170416,
"line_mean": 35.2314049587,
"line_max": 80,
"alpha_frac": 0.6204379562,
"autogenerated": false,
"ratio": 3.5241157556270095,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46445537118270097,
"avg_score": null,
"num_lines": null
} |
"""Apply a set of rolling filter to a data cubes
This copy read complex filter dataset.
"""
import argparse
import os
from glob import glob
from multiprocessing import Pool, Array, current_process
import numpy as np
import xarray as xr
from opstats.foreground import apply_filter
def do_mask(i):
process = current_process().pid
print('... P{:d}: applying filter {:s}'
.format(process, filter_files[i].split('/')[-1]))
filter_ds = xr.open_dataset(filter_files[i])
filter_array = filter_ds['real'].values + (1j * filter_ds['imag'].values)
data_channels = filter_ds.attrs['frequency_channels']
image_channel = int(np.floor(filter_ds['real'].shape[0] / 2))
# Figure out FFT and filter normalization
# FFT normalization factor
x = filter_ds.attrs['x']
y = filter_ds.attrs['y']
f = filter_ds.attrs['f']
dx = x[1] - x[0]
dy = y[1] - y[0]
df = f[1] - f[0]
u = filter_ds.attrs['u']
v = filter_ds.attrs['v']
e = filter_ds.attrs['e']
du = u[1] - u[0]
dv = v[1] - v[0]
de = e[1] - e[0]
fft_norm = dx * dy * df
ifft_norm = du * dv * de * filter_array.size
# Filter normalization factor
filter_volume = np.sum(filter_array.size * du * dv * de)
filter_integral = np.sum(np.abs(filter_array) ** 2 * du * dv * de)
filter_norm = np.sqrt(filter_volume / filter_integral)
# Apply filter
filtered_data = apply_filter(
data_array[data_channels], filter_array,
fft_multiplier=fft_norm, ifft_multiplier=ifft_norm,
output_multiplier=filter_norm, apply_window_func=True,
invert_filter=False
).real
# Select and store the center channel of the filtered data array
filtered_data_array[data_channels[image_channel]] = \
filtered_data[image_channel]
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('data_cube', type=str)
parser.add_argument('filter_directory', type=str,
help='filter directory containing *.nc filter files.')
parser.add_argument('output_file', type=str)
parser.add_argument('--nprocs', type=int, default=4)
args = parser.parse_args()
print('Data cube: {:s}'.format(args.data_cube))
print('Filter directory: {:s}'.format(args.filter_directory))
# Read input data cube
data_da = xr.open_dataarray(args.data_cube)
data_array = data_da.values
# Create shared memory array to store filtered data cube
filtered_data_array_base = Array('d', data_array.size)
filtered_data_array = np.frombuffer(filtered_data_array_base.get_obj())
filtered_data_array.shape = data_array.shape
# Read in list of filter files
filter_files = glob('{:s}/*.nc'.format(args.filter_directory))
filter_files.sort()
nbins = len(filter_files)
# Attributes for output files
# Temporary read in the first filter to retrive filter information
ds0 = xr.open_dataset(filter_files[0])
filter_bandwidth = ds0.attrs['filter_bandwidth']
image_bandwidth = ds0.attrs['channel_bandwidth']
print('Filter bandwidth: {:.1f} Hz'.format(filter_bandwidth))
print('Image bandwidth: {:.1f} Hz'.format(image_bandwidth))
ds0.close()
output_attrs = data_da.attrs
extra_attrs = {'filter_type': 'wedge',
'extra_filter_shift': 'None',
'filter_bandwidth': filter_bandwidth,
'image_bandwidth': image_bandwidth}
for key, value in extra_attrs.items():
output_attrs[key] = value
# Check output directory
output_dir = args.output_file.rsplit('/', 1)[0]
os.makedirs(output_dir, exist_ok=True)
# Start processing pool
pool = Pool(args.nprocs)
pool.map(do_mask, range(nbins))
pool.close()
pool.join()
# Save output
da = xr.DataArray(filtered_data_array, dims=['f', 'y', 'x'],
coords={'x': data_da.x.values, 'y': data_da.y.values,
'f': data_da.f.values},
attrs=output_attrs)
da.to_netcdf(args.output_file)
print('Saving out put to {:s}'.format(args.output_file))
| {
"repo_name": "piyanatk/sim",
"path": "scripts/fg1p/apply_filters2.py",
"copies": "1",
"size": "4136",
"license": "mit",
"hash": 7481226182816106000,
"line_mean": 34.3504273504,
"line_max": 78,
"alpha_frac": 0.6266924565,
"autogenerated": false,
"ratio": 3.4380714879467997,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45647639444468,
"avg_score": null,
"num_lines": null
} |
# applyconfidence.py
# Applies models that were trained by logisticconfidence.py in order to estimate
# the likely accuracy of page-level predictions for volumes. Accuracy is being
# estimated at the volume level so that users can choose to filter out volumes
# of dubious reliability.
# We're generating two kinds of predictions. The estimate of "overall" accuracy
# is based on, and attempts to predict, the overall number of pages correctly predicted
# in any given volume. We also produce genre-specific predictions for drama, fiction,
# and poetry, which attempt to predict the proportion of pages predicted to be genre X
# that are correctly identified. (Actually, technically all of these predictions are based
# on models that attempt to predict the number of *words* rather than the sheer number
# of pages correctly identified.)
# The way we do it, technically, is odd. Instead of training a model that directly
# predicts accuracy, we train logistic models that estimate the probability
# of a binary threshold -- i.e., what's the *probability* that the pages in this volume are
# more than 95% correctly identified by genre?
# For reasons that I do not pretend to fully understand, this turned out more accurate than a lot of
# other possible modeling strategies. (I think the basic reason is that the function we're
# dealing with here is nonlinear.) Anyway, this approach worked well, even when cross-validated,
# and some others didn't.
# Of course, what users really want to know is, what threshold should I set if I want to
# ensure that the corpus I'm getting has a certain given level of precision? I've calculated
# that in an imperfect, ad-hoc way, by measuring the recall and precision stats for corpora
# created by thresholding my training corpus at different probability levels. This gives me
# predicted precision and recall curves, which I also smoothed with lowess regression to
# minimize the influence of arbitrary artefacts in the training set. Then I can use the
# predicted probability of accuracy in an individual volume to infer, What precision or recall
# would I likely get *if* I cut the whole corpus at this probability threshold, discarding
# all volumes predicted to be less reliable.
import json
import os, sys
import numpy as np
import pandas as pd
import SonicScrewdriver as utils
from sklearn.linear_model import LogisticRegression
from sklearn import cross_validation
from scipy.stats.stats import pearsonr
import pickle, csv
def intpart(afloat):
''' Given a float between 0 and 1, returns an index between 0 and 99.
We use this to index into precision and recall curves that were calculated
with thresholds varying through range(0, 1, 0.01).
'''
idx = (int(afloat*100))
if idx < 0:
idx = 0
if idx > 99:
idx = 99
return idx
def calibrate(probability, curveset):
''' Simply returns the corpus precision and recall estimates appropriate
for a given volume probability.
'''
idx = intpart(probability)
precision = curveset['precision'][idx]
recall = curveset['recall'][idx]
return idx/100, precision, recall
def sequence_to_counts(genresequence):
'''Converts a sequence of page-level predictions to
a dictionary of counts reflecting the number of pages
assigned to each genre. Also reports the largest genre.
Note that this function cannot return "bio." If
biography is the largest genre it returns "non"fiction.
It counts bio, but ensures that all votes for bio are also votes
for non.
'''
genrecounts = dict()
for page in genresequence:
utils.addtodict(page, 1, genrecounts)
if page == 'bio':
utils.addtodict('non', 1, genrecounts)
# Convert the dictionary of counts into a sorted list, and take the max.
genretuples = utils.sortkeysbyvalue(genrecounts, whethertoreverse = True)
maxgenre = genretuples[0][1]
if maxgenre == 'bio':
maxgenre = 'non'
return genrecounts, maxgenre
def count_flips(sequence):
''' Volumes that go back and forth a lot between genres are less reliable than
those with a more stable sequence. So, we count flips.
'''
numflips = 0
prevgenre = ""
for genre in sequence:
if genre != prevgenre:
numflips += 1
prevgenre = genre
return numflips
def normalizearray(featurearray):
'''Normalizes an array by centering on means and
scaling by standard deviations.
'''
numinstances, numfeatures = featurearray.shape
means = list()
stdevs = list()
for featureidx in range(numfeatures):
thiscolumn = featurearray[ : , featureidx]
thismean = np.mean(thiscolumn)
means.append(thismean)
thisstdev = np.std(thiscolumn)
stdevs.append(thisstdev)
featurearray[ : , featureidx] = (thiscolumn - thismean) / thisstdev
return featurearray
def normalizeformodel(featurearray, modeldict):
'''Normalizes an array by centering on means and
scaling by standard deviations associated with the given model.
This version of the function is designed to operate, actually, on
a one-dimensional array for a single volume.
'''
numfeatures = len(featurearray)
means = modeldict['means']
stdevs = modeldict['stdevs']
for featureidx in range(numfeatures):
thiscolumn = featurearray[featureidx]
thismean = means[featureidx]
thisstdev = stdevs[featureidx]
featurearray[featureidx] = (thiscolumn - thismean) / thisstdev
return featurearray
class Prediction:
''' Holds information about a single volume, or technically about the
page-level genre predictions we have made for the volume.
'''
def __init__(self, filepath):
with open(filepath, encoding='utf-8') as f:
filelines = f.readlines()
jsonobject = json.loads(filelines[0])
self.dirtyid = jsonobject['volID']
self.rawPredictions = jsonobject['rawPredictions']
self.smoothPredictions = jsonobject['smoothedPredictions']
self.probabilities = jsonobject['smoothedProbabilities']
self.avggap = jsonobject['avgGap']
self.maxprob = jsonobject['avgMaxProb']
self.pagelen = len(self.smoothPredictions)
self.genrecounts, self.maxgenre = sequence_to_counts(self.smoothPredictions)
pagesinmax = self.genrecounts[self.maxgenre]
self.maxratio = pagesinmax / self.pagelen
self.rawflipratio = (count_flips(self.rawPredictions) / self.pagelen)
self.smoothflips = count_flips(self.smoothPredictions)
if 'bio' in self.genrecounts and self.genrecounts['bio'] > (self.pagelen / 2):
self.bioflag = True
print("BIO: " + self.dirtyid)
else:
self.bioflag = False
def addmetadata(self, row, table):
self.author = table['author'][row]
self.title = table['title'][row]
self.date = utils.simple_date(row, table)
genrelist = table['genres'][row].split(';')
self.genres = set(genrelist)
varietiesofnon = ['Bibliographies', 'Catalog', 'Dictionary', 'Encyclopedia', 'Handbooks', 'Indexes', 'Legislation', 'Directories', 'Statistics', 'Legal cases', 'Legal articles', 'Calendars', 'Autobiography', 'Biography', 'Letters', 'Essays', 'Speeches']
self.nonmetaflag = False
for genre in varietiesofnon:
if genre in self.genres:
self.nonmetaflag = True
def missingmetadata(self):
self.author = ''
self.title = ''
self.date = ''
self.genres = set()
self.nonmetaflag = False
def getfeatures(self):
''' Returns features used for an overall accuracy prediction. There are more of
these (13) than we use for genre-specific predictions. See logisticconfidence.py
for more information about features.
'''
features = np.zeros(13)
if self.maxgenre == 'fic':
if 'Fiction' in self.genres or 'Novel' in self.genres or 'Short stories' in self.genres:
features[0] = 1
if 'Drama' in self.genres or 'Poetry' in self.genres or self.nonmetaflag:
features[1] = 1
if self.maxgenre == 'poe':
if 'Poetry' in self.genres or 'poems' in self.title.lower():
features[2] = 1
if 'Drama' in self.genres or 'Fiction' in self.genres or self.nonmetaflag:
features[3] = 1
if self.maxgenre == 'dra':
if 'Drama' in self.genres or 'plays' in self.title.lower():
features[4] = 1
if 'Fiction' in self.genres or 'Poetry' in self.genres or self.nonmetaflag:
features[5] = 1
if self.maxgenre == 'non':
if self.nonmetaflag:
features[6] = 1
if 'Fiction' in self.genres or 'Poetry' in self.genres or 'Drama' in self.genres or 'Novel' in self.genres or 'Short stories' in self.genres:
features[7] = 1
features[8] = self.maxratio
features[9] = self.rawflipratio
features[10] = self.smoothflips
features[11] = self.avggap
features[12] = self.maxprob
return features
def genrefeatures(self, agenre):
''' Extracts features to characterize the likelihood of accuracy in a
particular genre.
'''
if agenre in self.genrecounts:
pagesingenre = self.genrecounts[agenre]
else:
pagesingenre = 0
genreproportion = pagesingenre / self.pagelen
features = np.zeros(8)
if agenre == 'fic':
if 'Fiction' in self.genres or 'Novel' in self.genres or 'Short stories' in self.genres:
features[0] = 1
if 'Drama' in self.genres or 'Poetry' in self.genres or self.nonmetaflag:
features[1] = 1
if agenre == 'poe':
if 'Poetry' in self.genres or 'poems' in self.title.lower():
features[0] = 1
if 'Drama' in self.genres or 'Fiction' in self.genres or self.nonmetaflag:
features[1] = 1
if agenre == 'dra':
if 'Drama' in self.genres or 'plays' in self.title.lower():
features[0] = 1
if 'Fiction' in self.genres or 'Poetry' in self.genres or self.nonmetaflag:
features[1] = 1
features[2] = genreproportion
features[3] = self.rawflipratio
features[4] = self.smoothflips
features[5] = self.avggap
features[6] = self.maxprob
features[7] = self.maxratio
return features
def getpredictions(self):
''' A getter function that transforms a list of page predictions into a dictionary. It's
debatable whether we should be representing pages as a dictionary, since our sequencing logic
not in fact allow skipped pages! But I'm doing it this way because it will be more flexible
in the long run, in case something happens that I can't anticipate.
'''
pagedict = dict()
for idx, genre in enumerate(self.smoothPredictions):
pagedict[idx] = genre
return pagedict
def getmetadata(self):
''' Basically just a getter function for metadata in a Prediction object. Only thing
interesting is that it filters out certain genre tags known to be unreliable or convey
little information.
'''
metadict = dict()
metadict['htid'] = self.dirtyid
metadict['author'] = self.author
metadict['title'] = self.title
metadict['inferred_date'] = self.date
genrelist = []
for genre in self.genres:
if genre == "NotFiction":
continue
if genre == "UnknownGenre":
continue
if genre == "ContainsBiogMaterial":
continue
# In my experience, none of those tags are informative in my Hathi dataset.
genrelist.append(genre.lower())
metadict['genre_tags'] = ", ".join(genrelist)
return metadict
# Begin main script.
args = sys.argv
sourcedirfile = args[1]
with open(sourcedirfile, encoding = 'utf-8') as f:
filelines = f.readlines()
directorylist = [x.strip() for x in filelines]
modeldir = args[2]
genrestocheck = ['fic', 'poe', 'dra']
genrepath = dict()
genremodel = dict()
overallpath = os.path.join(modeldir, 'overallmodel.p')
genrepath['fic'] = os.path.join(modeldir, 'ficmodel.p')
genrepath['dra'] = os.path.join(modeldir, 'dramodel.p')
genrepath['poe'] = os.path.join(modeldir, 'poemodel.p')
with open(overallpath, mode='rb') as f:
overallmodel = pickle.load(f)
for genre in genrestocheck:
with open(genrepath[genre], mode='rb') as f:
genremodel[genre] = pickle.load(f)
fullnames = {'fic': 'fiction', 'poe': 'poetry', 'dra': 'drama'}
# The logistic models we train on volumes are technically
# predicting the probability that an individual volume will
# cross a particular accuracy threshold. For the overall model
# it's .95, for the genres it's .8.
# This doesn't tell us what we really want to know, which is,
# if we construct a corpus of volumes like this, what will our
# precision and recall be? To infer that, we calculate precision
# and recall in the test set using different probability-thresholds,
# smooth the curve, and then use it empiricially to map a
# threshold-probability to a corpus level prediction for precision and recall.
genrestocalibrate = ['overall', 'fic', 'poe', 'dra']
calibration = dict()
for genre in genrestocalibrate:
calibration[genre] = dict()
calibration[genre]['precision'] = list()
calibration[genre]['recall'] = list()
calipath = os.path.join(modeldir, 'calibration.csv')
with open(calipath, encoding = 'utf-8') as f:
reader = csv.reader(f)
next(reader, None)
for row in reader:
for idx, genre in enumerate(genrestocalibrate):
calibration[genre]['precision'].append(float(row[idx * 2]))
calibration[genre]['recall'].append(float(row[(idx * 2) + 1]))
outputdir = args[3]
metadatapath = '/projects/ichass/usesofscale/hathimeta/MergedMonographs.tsv'
# metadatapath = '/Volumes/TARDIS/work/metadata/MergedMonographs.tsv'
# if you run it locally
rows, columns, table = utils.readtsv(metadatapath)
for sourcedir in directorylist:
predicts = os.listdir(sourcedir)
predicts = [x for x in predicts if not x.startswith('.')]
for filename in predicts:
cleanid = utils.pairtreelabel(filename.replace('.predict', ''))
fileid = filename.replace('.predict', '')
filepath = os.path.join(sourcedir, filename)
try:
predicted = Prediction(filepath)
except:
print("Failure to load prediction from " + filepath)
continue
if cleanid in rows:
predicted.addmetadata(cleanid, table)
else:
print('Missing metadata for ' + cleanid)
predicted.missingmetadata()
overallfeatures = predicted.getfeatures()
featurearray = normalizeformodel(np.array(overallfeatures), overallmodel)
featureframe = pd.DataFrame(featurearray)
thismodel = overallmodel['model']
overall95proba = thismodel.predict_proba(featureframe.T)[0][1]
genreprobs = dict()
for genre in genrestocheck:
features = predicted.genrefeatures(genre)
featurearray = normalizeformodel(np.array(features), genremodel[genre])
featureframe = pd.DataFrame(featurearray)
thismodel = genremodel[genre]['model']
genreprobs[genre] = thismodel.predict_proba(featureframe.T)[0][1]
jsontemplate = dict()
jsontemplate['page_genres'] = predicted.getpredictions()
jsontemplate['hathi_metadata'] = predicted.getmetadata()
jsontemplate['added_metadata'] = dict()
jsontemplate['added_metadata']['totalpages'] = predicted.pagelen
jsontemplate['added_metadata']['maxgenre'] = predicted.maxgenre
jsontemplate['added_metadata']['genre_counts'] = predicted.genrecounts
overallprob, overallprecision, overallrecall = calibrate(overall95proba, calibration['overall'])
overallaccuracy = dict()
overallaccuracy['prob>95acc'] = overallprob
overallaccuracy['precision@prob'] = overallprecision
overallaccuracy['recall@prob'] = overallrecall
jsontemplate['volume_accuracy'] = overallaccuracy
for genre in genrestocheck:
if genre in predicted.genrecounts:
gpages = predicted.genrecounts[genre]
gpercent = round((gpages / predicted.pagelen) * 100) / 100
gprob, gprecision, grecall = calibrate(genreprobs[genre], calibration[genre])
name = fullnames[genre]
newdict = dict()
newdict['prob_' + genre + '>80precise'] = gprob
newdict['pages_' + genre] = gpages
newdict['pct_' + genre] = gpercent
newdict[genre+ '_precision@prob'] = gprecision
newdict[genre + '_recall@prob'] = grecall
jsontemplate[name] = newdict
prefix = filename.split('.')[0]
subdirectory = os.path.join(outputdir, prefix)
if not os.path.isdir(subdirectory):
os.mkdir(subdirectory)
outpath = os.path.join(subdirectory, fileid + ".json")
with open(outpath, mode = 'w', encoding = 'utf-8') as f:
f.write(json.dumps(jsontemplate, sort_keys = True))
| {
"repo_name": "tedunderwood/genre",
"path": "confidencefilter/applyconfidence.py",
"copies": "1",
"size": "17657",
"license": "mit",
"hash": -1804200273645564700,
"line_mean": 36.4883227176,
"line_max": 261,
"alpha_frac": 0.6541315059,
"autogenerated": false,
"ratio": 3.9150776053215077,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5069209111221508,
"avg_score": null,
"num_lines": null
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.