code stringlengths 2k 1.04M | repo_path stringlengths 5 517 | parsed_code stringlengths 0 1.04M | quality_prob float64 0.02 0.95 | learning_prob float64 0.02 0.93 |
|---|---|---|---|---|
import json
import os
import dotenv
import redis
import requests
from flask import Flask
from flask import redirect
from flask import render_template
from flask import request
dotenv.load_dotenv(".env.secret")
dotenv.load_dotenv(".env.shared")
CLIENT_ID = os.environ["PUTIO_CLIENT_ID"]
CLIENT_SECRET = os.environ["PUTIO_CLIENT_SECRET"]
PORT = int(os.environ.get("PORT", 5500))
DEBUG = bool(os.environ.get("DEBUG", False))
DEV = bool(os.environ.get("DEV", False))
REDIRECT_BASE_URL = f"http://127.0.0.1:{PORT}" if DEV else os.environ["BASE_URL"]
REDIRECT_URL = f"{REDIRECT_BASE_URL}/oauth-callback"
HOME_URL = os.environ["HOME_URL"]
REDIS_URL = os.environ["REDIS_URL"]
REDIS_TIMEOUT = os.environ.get("REDIS_TIMEOUT", 300)
db = redis.from_url(REDIS_URL, decode_responses=True)
app = Flask(__name__)
@app.route("/", methods=("GET",))
def home():
return redirect(HOME_URL)
@app.route("/oauth-callback", methods=("GET",))
def oauth_callback():
"""
Callback method for OAuth 2.0 Authorization Code flow.
Uses `state` to distinguish and verify users.
Keeps access tokens in Redis for a period of time (default: 300 seconds).
Finally, renders a page to display access token.
"""
try:
state = request.args["state"]
code = request.args["code"]
except KeyError:
return "Request must include `state` and `code` as query parameters.", 400
response = requests.get(
"https://api.put.io/v2/oauth2/access_token",
params={
"client_id": CLIENT_ID,
"client_secret": CLIENT_SECRET,
"redirect_uri": REDIRECT_URL,
"grant_type": "authorization_code",
"code": code,
},
)
try:
access_token = json.loads(response.content.decode("utf-8"))["access_token"]
db.set(state, access_token, ex=300)
except Exception:
return "An unknown error occured.", 500
return render_template("show_access_token.html", access_token=access_token), 200
@app.route("/get-access-token", methods=("GET",))
def get_access_token():
"""
Returns access token to the user.
Uses `state` to distinguish and verify users.
When an access token is returned, it is deleted from Redis.
"""
try:
state = request.args["state"]
except KeyError:
return "Request must include `state` as a query parameter.", 400
try:
access_token = db.getdel(state)
except Exception:
return "An unknown error occured.", 500
if not access_token:
return "Access token couldn't be found", 404
return access_token, 200
@app.route("/create-access-token", methods=("POST",))
def create_access_token():
"""Creates access token for the user with username and password."""
try:
username = request.get_json()["username"]
password = request.get_json()["password"]
except KeyError:
return "Request must include `username` and `password` fields in its body.", 400
response = requests.put(
f"https://api.put.io/v2/oauth2/authorizations/clients/{CLIENT_ID}/",
data={"client_secret": CLIENT_SECRET},
auth=(username, password),
)
try:
access_token = json.loads(response.content.decode("utf-8"))["access_token"]
except Exception:
return "An unknown error occured.", 500
return access_token, 200
if __name__ == "__main__":
app.run(host="0.0.0.0", port=PORT, debug=DEBUG) | putio-auth/app.py |
import json
import os
import dotenv
import redis
import requests
from flask import Flask
from flask import redirect
from flask import render_template
from flask import request
dotenv.load_dotenv(".env.secret")
dotenv.load_dotenv(".env.shared")
CLIENT_ID = os.environ["PUTIO_CLIENT_ID"]
CLIENT_SECRET = os.environ["PUTIO_CLIENT_SECRET"]
PORT = int(os.environ.get("PORT", 5500))
DEBUG = bool(os.environ.get("DEBUG", False))
DEV = bool(os.environ.get("DEV", False))
REDIRECT_BASE_URL = f"http://127.0.0.1:{PORT}" if DEV else os.environ["BASE_URL"]
REDIRECT_URL = f"{REDIRECT_BASE_URL}/oauth-callback"
HOME_URL = os.environ["HOME_URL"]
REDIS_URL = os.environ["REDIS_URL"]
REDIS_TIMEOUT = os.environ.get("REDIS_TIMEOUT", 300)
db = redis.from_url(REDIS_URL, decode_responses=True)
app = Flask(__name__)
@app.route("/", methods=("GET",))
def home():
return redirect(HOME_URL)
@app.route("/oauth-callback", methods=("GET",))
def oauth_callback():
"""
Callback method for OAuth 2.0 Authorization Code flow.
Uses `state` to distinguish and verify users.
Keeps access tokens in Redis for a period of time (default: 300 seconds).
Finally, renders a page to display access token.
"""
try:
state = request.args["state"]
code = request.args["code"]
except KeyError:
return "Request must include `state` and `code` as query parameters.", 400
response = requests.get(
"https://api.put.io/v2/oauth2/access_token",
params={
"client_id": CLIENT_ID,
"client_secret": CLIENT_SECRET,
"redirect_uri": REDIRECT_URL,
"grant_type": "authorization_code",
"code": code,
},
)
try:
access_token = json.loads(response.content.decode("utf-8"))["access_token"]
db.set(state, access_token, ex=300)
except Exception:
return "An unknown error occured.", 500
return render_template("show_access_token.html", access_token=access_token), 200
@app.route("/get-access-token", methods=("GET",))
def get_access_token():
"""
Returns access token to the user.
Uses `state` to distinguish and verify users.
When an access token is returned, it is deleted from Redis.
"""
try:
state = request.args["state"]
except KeyError:
return "Request must include `state` as a query parameter.", 400
try:
access_token = db.getdel(state)
except Exception:
return "An unknown error occured.", 500
if not access_token:
return "Access token couldn't be found", 404
return access_token, 200
@app.route("/create-access-token", methods=("POST",))
def create_access_token():
"""Creates access token for the user with username and password."""
try:
username = request.get_json()["username"]
password = request.get_json()["password"]
except KeyError:
return "Request must include `username` and `password` fields in its body.", 400
response = requests.put(
f"https://api.put.io/v2/oauth2/authorizations/clients/{CLIENT_ID}/",
data={"client_secret": CLIENT_SECRET},
auth=(username, password),
)
try:
access_token = json.loads(response.content.decode("utf-8"))["access_token"]
except Exception:
return "An unknown error occured.", 500
return access_token, 200
if __name__ == "__main__":
app.run(host="0.0.0.0", port=PORT, debug=DEBUG) | 0.522689 | 0.078078 |
from homeassistant.components.switch import SwitchEntity
from .core.const import DOMAIN
from .core.entity import XEntity
from .core.ewelink import XRegistry, SIGNAL_ADD_ENTITIES
PARALLEL_UPDATES = 0 # fix entity_platform parallel_updates Semaphore
async def async_setup_entry(hass, config_entry, add_entities):
ewelink: XRegistry = hass.data[DOMAIN][config_entry.entry_id]
ewelink.dispatcher_connect(
SIGNAL_ADD_ENTITIES,
lambda x: add_entities([e for e in x if isinstance(e, SwitchEntity)])
)
# noinspection PyAbstractClass
class XSwitch(XEntity, SwitchEntity):
params = {"switch"}
def set_state(self, params: dict):
self._attr_is_on = params["switch"] == "on"
async def async_turn_on(self, **kwargs):
await self.ewelink.send(self.device, {"switch": "on"})
async def async_turn_off(self):
await self.ewelink.send(self.device, {"switch": "off"})
# noinspection PyAbstractClass
class XSwitches(XEntity, SwitchEntity):
params = {"switches"}
channel: int = 0
def __init__(self, ewelink: XRegistry, device: dict):
XEntity.__init__(self, ewelink, device)
try:
self._attr_name = \
device["tags"]["ck_channel_name"][str(self.channel)]
except KeyError:
pass
# backward compatibility
self._attr_unique_id = f"{device['deviceid']}_{self.channel + 1}"
def set_state(self, params: dict):
try:
params = next(
i for i in params["switches"] if i["outlet"] == self.channel
)
self._attr_is_on = params["switch"] == "on"
except StopIteration:
pass
async def async_turn_on(self, **kwargs):
params = {"switches": [{"outlet": self.channel, "switch": "on"}]}
await self.ewelink.send_bulk(self.device, params)
async def async_turn_off(self):
params = {"switches": [{"outlet": self.channel, "switch": "off"}]}
await self.ewelink.send_bulk(self.device, params)
# noinspection PyAbstractClass
class XSwitchTH(XSwitch):
async def async_turn_on(self):
params = {"switch": "on", "mainSwitch": "on", "deviceType": "normal"}
await self.ewelink.send(self.device, params)
async def async_turn_off(self):
params = {"switch": "off", "mainSwitch": "off", "deviceType": "normal"}
await self.ewelink.send(self.device, params)
# noinspection PyAbstractClass
class XZigbeeSwitches(XSwitches):
async def async_turn_on(self, **kwargs):
# zigbee switch should send all channels at once
# https://github.com/AlexxIT/SonoffLAN/issues/714
switches = [
{"outlet": self.channel, "switch": "on"}
if switch["outlet"] == self.channel else switch
for switch in self.device["params"]["switches"]
]
await self.ewelink.send(self.device, {"switches": switches})
async def async_turn_off(self):
switches = [
{"outlet": self.channel, "switch": "off"}
if switch["outlet"] == self.channel else switch
for switch in self.device["params"]["switches"]
]
await self.ewelink.send(self.device, {"switches": switches})
# noinspection PyAbstractClass
class XToggle(XEntity, SwitchEntity):
def set_state(self, params: dict):
self.device["params"][self.param] = params[self.param]
self._attr_is_on = params[self.param] == "on"
async def async_turn_on(self):
await self.ewelink.send(self.device, {self.param: "on"})
async def async_turn_off(self):
await self.ewelink.send(self.device, {self.param: "off"}) | custom_components/sonoff/switch.py | from homeassistant.components.switch import SwitchEntity
from .core.const import DOMAIN
from .core.entity import XEntity
from .core.ewelink import XRegistry, SIGNAL_ADD_ENTITIES
PARALLEL_UPDATES = 0 # fix entity_platform parallel_updates Semaphore
async def async_setup_entry(hass, config_entry, add_entities):
ewelink: XRegistry = hass.data[DOMAIN][config_entry.entry_id]
ewelink.dispatcher_connect(
SIGNAL_ADD_ENTITIES,
lambda x: add_entities([e for e in x if isinstance(e, SwitchEntity)])
)
# noinspection PyAbstractClass
class XSwitch(XEntity, SwitchEntity):
params = {"switch"}
def set_state(self, params: dict):
self._attr_is_on = params["switch"] == "on"
async def async_turn_on(self, **kwargs):
await self.ewelink.send(self.device, {"switch": "on"})
async def async_turn_off(self):
await self.ewelink.send(self.device, {"switch": "off"})
# noinspection PyAbstractClass
class XSwitches(XEntity, SwitchEntity):
params = {"switches"}
channel: int = 0
def __init__(self, ewelink: XRegistry, device: dict):
XEntity.__init__(self, ewelink, device)
try:
self._attr_name = \
device["tags"]["ck_channel_name"][str(self.channel)]
except KeyError:
pass
# backward compatibility
self._attr_unique_id = f"{device['deviceid']}_{self.channel + 1}"
def set_state(self, params: dict):
try:
params = next(
i for i in params["switches"] if i["outlet"] == self.channel
)
self._attr_is_on = params["switch"] == "on"
except StopIteration:
pass
async def async_turn_on(self, **kwargs):
params = {"switches": [{"outlet": self.channel, "switch": "on"}]}
await self.ewelink.send_bulk(self.device, params)
async def async_turn_off(self):
params = {"switches": [{"outlet": self.channel, "switch": "off"}]}
await self.ewelink.send_bulk(self.device, params)
# noinspection PyAbstractClass
class XSwitchTH(XSwitch):
async def async_turn_on(self):
params = {"switch": "on", "mainSwitch": "on", "deviceType": "normal"}
await self.ewelink.send(self.device, params)
async def async_turn_off(self):
params = {"switch": "off", "mainSwitch": "off", "deviceType": "normal"}
await self.ewelink.send(self.device, params)
# noinspection PyAbstractClass
class XZigbeeSwitches(XSwitches):
async def async_turn_on(self, **kwargs):
# zigbee switch should send all channels at once
# https://github.com/AlexxIT/SonoffLAN/issues/714
switches = [
{"outlet": self.channel, "switch": "on"}
if switch["outlet"] == self.channel else switch
for switch in self.device["params"]["switches"]
]
await self.ewelink.send(self.device, {"switches": switches})
async def async_turn_off(self):
switches = [
{"outlet": self.channel, "switch": "off"}
if switch["outlet"] == self.channel else switch
for switch in self.device["params"]["switches"]
]
await self.ewelink.send(self.device, {"switches": switches})
# noinspection PyAbstractClass
class XToggle(XEntity, SwitchEntity):
def set_state(self, params: dict):
self.device["params"][self.param] = params[self.param]
self._attr_is_on = params[self.param] == "on"
async def async_turn_on(self):
await self.ewelink.send(self.device, {self.param: "on"})
async def async_turn_off(self):
await self.ewelink.send(self.device, {self.param: "off"}) | 0.620277 | 0.136349 |
from flask import (Flask, make_response)
from flask_restful import (reqparse, abort, Api, Resource)
import argparse
from storage_engine import storage_engine_nuage, storage_engine_pan, storage_engine_f5
import nuage_pgsync_configuration
import json
import threading
import uuid
from time import sleep
try:
# Try and import Nuage VSPK from the development release
from vspk import v5_0 as vsdk
except ImportError:
# If this fails, import the Nuage VSPK from the pip release
from vspk.vsdk import v5_0 as vsdk
# imported parameters in .ini file :
# section
ini_general_section = "GENERAL"
# parameters in section
ini_nuage_enterprise = "Enterprise"
# section
ini_nuage_vsd_section = "NUAGE_VSD_CONNECTION"
# parameters in section
ini_nuage_deployment_mode = "DeploymentMode"
ini_nuage_port = "ApiPort"
ini_nuage_host1 = "IpAddr1"
ini_nuage_host2 = "IpAddr2"
ini_nuage_host3 = "IpAddr3"
# section
ini_nuage_api_section = "NUAGE_REST_API_DETAILS"
# parameters in section
ini_nuage_username = 'UserName'
ini_nuage_password = 'Password'
ini_nuage_organization = 'Organization'
ini_nuage_domain_filter = 'DomainFilter'
ini_nuage_pg_filter = 'PolicyGroupFilter'
# section
ini_state_engine_section = "STATE_ENGINE_CONNECTION"
# parameters in section
ini_pgsync_api_port = "StateEnginePort"
ini_pgsync_api_host = "StateEngineAddr"
# section
ini_api_section = "API"
# parameters in section
ini_api_bind_address = "BindAddr"
# section
ini_pan_section = "PAN"
# parameters in section
ini_panorama_deployment_mode = "DeploymentMode"
ini_panorama_host1 = "PanoramaIpAddr1"
ini_panorama_host2 = "PanoramaIpAddr2"
ini_panorama_port = "PanoramaPort"
ini_panorama_username = 'PanoramaUserName'
ini_panorama_password = '<PASSWORD>'
# section
ini_f5_section = 'F5'
# IpAddrX
# UserNameX
# PasswordX
def main():
# Handling arguments
"""
args = get_args()
debug = args.debug
verbose = args.verbose
log_file = args.logfile
ini_file = args.inifile
"""
# Bouchonnage arguments
debug = False
verbose = True
log_file = 'logs/state_engine.log'
ini_file = 'nuage-pgsync.ini'
# Logging settings
global logger
logger = setup_logging(debug, verbose, log_file)
# Load configuration
global config
vault_config = nuage_pgsync_configuration.Enterprise(ini_file=ini_file,
logger=logger)
vault_config.fetch()
config = vault_config.config
# Get parameters from config (.ini file)
global se
se = StateEngine()
### Init phase
logger.info("Starting state_engine")
# Step 1. Fetch Nuage storage engine = Master database
logger.info("step 1. Fetch ip address / policy groups mapping table from Nuage")
# Next Gen
global nuage_db
nuage_db = storage_engine_nuage.NuageDatabase(nuage_enterprise=se.nuage_enterprise,
nuage_domain_filter=se.nuage_domain_filter,
nuage_pg_filter=se.nuage_pg_filter,
logger=logger
)
nuage_db.import_vsd_pool(name="non-PROD",
host_list=se.nuage_host_list,
username=se.nuage_username,
password=<PASSWORD>,
organization=se.nuage_organization
)
nuage_db.fetch()
# Step 2. Fetch other storage engines = Slaves databases
logger.info("step 2. Fetch storage engines")
global storage_engine_list
storage_engine_list = {}
# PAN db
global pan_db
storage_engine_list['PAN'] = []
pan_db = storage_engine_pan.PanDatabase(nuage_db=nuage_db,
logger=logger)
storage_engine_list['PAN'].append(pan_db)
pan_db.import_panorama_pool(name="non-PROD",
host_list=se.panorama_host_list,
username=se.panorama_username,
password=<PASSWORD>
)
# load current configuration from devices managed by PANORAMA
pan_db.fetch()
# sync current configuration with Nuage
pan_db.sync()
# F5 db
global f5_db
f5_db = None
storage_engine_list['F5'] = []
"""
se.f5_host_list = ["10.5.26.110"]
f5_db = storage_engine_f5.F5Database(nuage_db=nuage_db,
logger=logger)
storage_engine_list['F5'].append(f5_db)
f5_db.import_devices(host_list=se.f5_host_list,
username_list=se.f5_username_list,
password_list=se.f5_password_list)
f5_db.fetch()
f5_db.sync()
"""
# Step 3. Intialize the queue of syncing request
global sync_queue
sync_queue = []
global sync_in_progress
sync_in_progress = [0]
# Step 4. Start API
logger.info("step 3. Start API")
logger.warning("state engine started")
state_engine_listener.run(debug=debug,
host=se.state_engine_host,
port=se.state_engine_port,
use_reloader=False)
# use_reloader - whether to reload and fork the process on exception
def get_args():
"""
Supports the command-line arguments listed below.
"""
parser = argparse.ArgumentParser(description="Run the state_engine.")
parser.add_argument('-d', '--debug',
required=False,
help='Enable debug output',
dest='debug',
action='store_true')
parser.add_argument('-v', '--verbose',
required=False,
help='Enable verbose output',
dest='verbose',
action='store_true')
parser.add_argument('-l', '--log-file',
required=False,
help='File to log to',
dest='logfile',
type=str,
default="state_engine.log")
parser.add_argument('-p', '--ini-file',
required=False,
help='File that contain parameters',
dest='inifile',
type=str,
default="nuage-pgsync.ini")
args = parser.parse_args()
return args
def setup_logging(debug, verbose, log_file):
import logging
from vspk.utils import set_log_level
if debug:
log_level = logging.DEBUG
elif verbose:
log_level = logging.INFO
else:
log_level = logging.WARNING
set_log_level(log_level)
logging.basicConfig(filename=log_file, format='%(asctime)s %(levelname)s %(message)s', level=log_level)
return logging.getLogger(__name__)
class StateEngine(object):
def __init__(self):
# Initialize Defaults
self.nuage_organization = 'csp'
self.nuage_port = '8443'
self.nuage_password = <PASSWORD>
self.nuage_username = 'csproot'
self.nuage_deployment_mode = 'standalone'
self.nuage_host_list = []
self.nuage_enterprise = None
self.nuage_domain_filter = None
self.nuage_pg_filter = None
self.state_engine_host = '127.0.0.1'
self.state_engine_port = '80'
self.panorama_deployment_mode = 'standalone'
self.panorama_host_list = []
self.panorama_port = None
self.panorama_username = None
self.panorama_password = None
self.f5_host_list = []
self.f5_port = '443'
self.f5_username_list = []
self.f5_password_list = []
# Get attributes from .ini file
self.parse_file()
def parse_file(self):
logger.info("INI file: get parameters")
# GENERAL
if config.has_section(ini_general_section):
# Enterprise
if config.has_option(ini_general_section, ini_nuage_enterprise):
self.nuage_enterprise = config.get(ini_general_section, ini_nuage_enterprise)
else:
logger.error("No Enterprise in GENERAL Section")
raise SyntaxError("No Enterprise in GENERAL Section")
else:
logger.error("No GENERAL Section")
raise SyntaxError("No GENERAL Section")
# NUAGE_VSD_CONNECTION
if config.has_section(ini_nuage_vsd_section):
# ApiPort
if config.has_option(ini_nuage_vsd_section, ini_nuage_port):
self.nuage_port = config.get(ini_nuage_vsd_section, ini_nuage_port)
# DeploymentMode
if config.has_option(ini_nuage_vsd_section, ini_nuage_deployment_mode):
self.nuage_deployment_mode = config.get(ini_nuage_vsd_section, ini_nuage_deployment_mode)
if self.nuage_deployment_mode == 'cluster':
logger.info("VSD in Cluster mode, adding all 3 IP's")
self.nuage_host_list.append(config.get(ini_nuage_vsd_section, ini_nuage_host1))
self.nuage_host_list.append(config.get(ini_nuage_vsd_section, ini_nuage_host2))
self.nuage_host_list.append(config.get(ini_nuage_vsd_section, ini_nuage_host3))
else:
logger.info("VSD in Standalone mode, adding only one IP")
self.nuage_host_list.append(config.get(ini_nuage_vsd_section, ini_nuage_host1))
else:
logger.error("No VSD's Connection Details Section")
raise SyntaxError("No VSD's Connection Details Section")
# NUAGE_REST_API_DETAILS
if config.has_section(ini_nuage_api_section):
# UserName
if config.has_option(ini_nuage_api_section, ini_nuage_username):
self.nuage_username = config.get(ini_nuage_api_section, ini_nuage_username)
# Password
if config.has_option(ini_nuage_api_section, ini_nuage_password):
self.nuage_password = config.get(ini_nuage_api_section, ini_nuage_password)
# Organization
if config.has_option(ini_nuage_api_section, ini_nuage_organization):
self.nuage_organization = config.get(ini_nuage_api_section, ini_nuage_organization)
# DomainFilter
if config.has_option(ini_nuage_api_section, ini_nuage_domain_filter):
self.nuage_domain_filter = config.get(ini_nuage_api_section, ini_nuage_domain_filter)
if self.nuage_domain_filter == 'none':
# none is a specific keyword for no filter
self.nuage_domain_filter = None
# PolicyGroupFilter
if config.has_option(ini_nuage_api_section, ini_nuage_pg_filter):
self.nuage_pg_filter = config.get(ini_nuage_api_section, ini_nuage_pg_filter)
if self.nuage_pg_filter == 'none':
# none is a specific keyword for no filter
self.nuage_pg_filter = None
else:
logger.warning("No VSD's REST API Details Section")
# STATE_ENGINE_CONNECTION
if config.has_section(ini_state_engine_section):
# StateEnginePort
if config.has_option(ini_state_engine_section, ini_pgsync_api_port):
self.state_engine_port = config.get(ini_state_engine_section, ini_pgsync_api_port)
else:
logger.error("No State Engine's Connection Details Section")
# API
if config.has_section(ini_api_section):
# BindAddr
if config.has_option(ini_api_section, ini_api_bind_address):
self.state_engine_host = config.get(ini_api_section, ini_api_bind_address)
else:
logger.error("No State Engine's Connection Details Section")
# PAN
if config.has_section(ini_pan_section):
# PanoramaPort
if config.has_option(ini_pan_section, ini_panorama_port):
self.panorama_port = config.get(ini_pan_section, ini_panorama_port)
# DeploymentMode
if config.has_option(ini_pan_section, ini_panorama_deployment_mode):
self.panorama_deployment_mode = config.get(ini_pan_section, ini_panorama_deployment_mode)
if self.panorama_deployment_mode == 'cluster':
logger.info("PANORAMA in Cluster mode, adding all 2 IP's")
self.panorama_host_list.append(config.get(ini_pan_section, ini_panorama_host1))
self.panorama_host_list.append(config.get(ini_pan_section, ini_panorama_host2))
else:
logger.info("PANORAMA in Standalone mode, adding only one IP")
self.panorama_host_list.append(config.get(ini_pan_section, ini_panorama_host1))
# UserName
if config.has_option(ini_pan_section, ini_panorama_username):
self.panorama_username = config.get(ini_pan_section, ini_panorama_username)
# Password
if config.has_option(ini_pan_section, ini_panorama_password):
self.panorama_password = config.get(ini_pan_section, ini_panorama_password)
else:
logger.warning("No PAN's Connection Details Section")
# F5
if config.has_section(ini_f5_section):
i = 0
f5_current_device = True
while f5_current_device:
i += 1
ini_f5_current_host = "IpAddr" + str(i)
ini_f5_current_username = "UserName" + str(i)
ini_f5_current_password = "Password" + <PASSWORD>)
# IpAddr
if config.has_option(ini_f5_section, ini_f5_current_host):
self.f5_host_list.append(config.get(ini_f5_section, ini_f5_current_host))
else:
# no more F5 device
f5_current_device = False
continue
# UserName
if config.has_option(ini_f5_section, ini_f5_current_username):
self.f5_username_list.append(config.get(ini_f5_section, ini_f5_current_username))
# Password
if config.has_option(ini_f5_section, ini_f5_current_password):
self.f5_password_list.append(config.get(ini_f5_section, ini_f5_current_password))
else:
logger.warning("No F5's Connection Details Section")
def get_json_format(self):
data = {}
data['NUAGE'] = {}
data['NUAGE']['enterprise'] = self.nuage_enterprise
data['NUAGE']['organization'] = self.nuage_organization
data['NUAGE']['port'] = self.nuage_port
data['NUAGE']['username'] = self.nuage_username
data['NUAGE']['hosts'] = self.nuage_host_list
data['NUAGE']['domain_filter'] = self.nuage_domain_filter
data['NUAGE']['pg_filter'] = self.nuage_pg_filter
data['API'] = {}
data['API']['bind_address'] = self.state_engine_host
data['API']['port'] = self.state_engine_port
data['PANORAMA'] = {}
data['PANORAMA']['hosts'] = self.panorama_host_list
data['PANORAMA']['port'] = self.panorama_port
data['PANORAMA']['username'] = self.panorama_username
data['F5'] = {}
data['F5']['hosts'] = self.f5_host_list
data['F5']['port'] = self.f5_port
data['F5']['username'] = self.f5_username_list
return data
def output_txt_response_format(data, code, headers=None):
resp = make_response(data, code)
resp.headers.extend(headers or {})
return resp
def output_json_response_format(data, code, headers=None):
resp = make_response(json.dumps(data), code)
resp.headers.extend(headers or {})
return resp
class ApiHealthcheck(Resource):
@staticmethod
def get():
return "OK", 200
class ApiConfig(Resource):
@staticmethod
def get():
return se.get_json_format(), 200
class Generic:
@staticmethod
def sanity_check_enterprise(vsd_id):
if vsd_id != nuage_db.id:
logger.info("%s::%s: object's enterprise is out of scope: enterprise_id=%s" %
(__class__.__name__, __name__, vsd_id))
return False
else:
return True
@staticmethod
def sanity_check_domain(vsd_id):
cur_domain = storage_engine_nuage.NuageGenericDomain(vsd_id=vsd_id,
logger=logger)
nuage_db.create_child(cur_domain)
cur_domain.fetch()
cur_domain_name = cur_domain.name
cur_domain.delete()
if cur_domain_name is None or ini_nuage_domain_filter not in cur_domain_name:
# Domain is out of scope
logger.info("%s::%s: object's domain is out of scope: name=%s; id=%s" %
(__class__.__name__, __name__, cur_domain_name, vsd_id))
return False
else:
# Domain should be in database
logger.error("%s::%s: unknown policy group's domain, reset database: name=%s; id=%s" %
(__class__.__name__, __name__, cur_domain_name, vsd_id))
return True
@staticmethod
def reset_nuage_storage_database(vsd_id):
logger.info("%s::%s: reset database, expected object to load: %s" % (__class__.__name__, __name__, vsd_id))
nuage_db.flush()
nuage_db.fetch()
@staticmethod
def log_object_not_found_in_nuage(name, vsd_id):
logger.warning("%s::%s: Object not found in Nuage: name=%s; id=%s" %
(__class__.__name__, __name__, name, vsd_id))
@staticmethod
def log_nuage_storage_engine_already_synchronized(name, vsd_id):
logger.warning("%s::%s: Nuage storage database already synchronized: name=%s; id=%s" %
(__class__.__name__, __name__, name, vsd_id))
@staticmethod
def sync_storage_databases():
if len(sync_queue) <= 1:
# value 0: no current sync in progress
t = threading.Thread(target=Generic.thread_sync_storage_databases, name=str(uuid.uuid4()))
sync_queue.append(t)
logger.info("%s::%s: NEW THREAD, database changes will be sync by the new thread in sync_queue: id=%s" %
(__class__.__name__, __name__, t.name))
t.start()
else:
# value 2+: the 2nd thread in queue will include changes for this sync_storage_databases request
logger.info("%s::%s: PASS THREAD, sync_queue full: nb=%s" %
(__class__.__name__, __name__, len(sync_queue)))
@staticmethod
def thread_sync_storage_databases():
"""
One sync at a time is possible.
Only 2 threads are in sync_queue: #0 in current sync operation, #1 that will wait for its turn to sync
:return:
"""
try:
# be in queue
while len(sync_queue) == 2 and sync_in_progress[0] == 1:
sleep(1)
logger.info("%s::%s: WAIT THREAD, current sync in progress, thread is waiting in queue" %
(__class__.__name__, __name__))
except Exception as e:
logger.error("%s::%s: ERROR THREAD, error raised by the thread in queue. Error: %s" %
(__class__.__name__, __name__, e))
sync_queue.pop(0)
return
# Start sync
logger.info("%s::%s: START THREAD, thread chose to start" %
(__class__.__name__, __name__))
sync_in_progress[0] = 1
try:
# sync
logger.info("%s::%s: SYNC THREAD, thread start to sync all databases" %
(__class__.__name__, __name__))
for storage_engine_type in storage_engine_list.values():
for storage_engine in storage_engine_type:
storage_engine.sync()
except Exception as e:
logger.error("%s::%s: ERROR THREAD, error raised by the thread during sync. Error: %s" %
(__class__.__name__, __name__, e))
sync_queue.pop(0)
sync_in_progress[0] = 0
else:
# Ending normaly
logger.info("%s::%s: STOP THREAD, thread ended to sync all databases" %
(__class__.__name__, __name__))
# End sync
sync_queue.pop(0)
sync_in_progress[0] = 0
@staticmethod
def sync_f5_storage_databases():
logger.info("%s::%s: synchronize F5 databases" % (__class__.__name__, __name__))
Generic.sync_storage_databases()
# TODO change to not sync to PAN
"""
for storage_engine_type in storage_engine_list.values():
for storage_engine in storage_engine_type:
storage_engine.sync()
"""
class ApiNuagePolicyGroupTemplateCreate(Resource):
@staticmethod
def put():
args = parser_generic.parse_args()
pgt_vsd_id = args['ID']
pgt_name = args['name']
ent_vsd_id = args['sourceEnterpriseID']
dt_vsd_id = args['parentID']
# Sanity check on enterprise
if not Generic.sanity_check_enterprise(ent_vsd_id):
return "no database update needed", 200
# load database
db_pgt = nuage_db.get_policy_group_template(vsd_id=pgt_vsd_id)
if db_pgt is None:
# unknown policy group template
db_dt = nuage_db.get_domain_template(vsd_id=dt_vsd_id)
if db_dt is None:
# unknown domain template
Generic.reset_nuage_storage_database(dt_vsd_id)
return "database updated", 201
else:
# Domain in db
# new PolicyGroupTemplate
db_pgt = storage_engine_nuage.NuagePolicyGroupTemplate(vsd_id=pgt_vsd_id,
logger=logger)
db_pgt.name = pgt_name
db_dt.create_child(db_pgt)
return "nuage database updated", 201
else:
# policy group template already exist
Generic.log_nuage_storage_engine_already_synchronized(name=pgt_name, vsd_id=pgt_vsd_id)
return "database already synchronized", 200
class ApiNuagePolicyGroupTemplateUpdate(Resource):
@staticmethod
def put():
args = parser_generic.parse_args()
pgt_vsd_id = args['ID']
pgt_name = args['name']
dt_vsd_id = args['parentID']
ent_vsd_id = args['sourceEnterpriseID']
# Sanity check on enterprise
if not Generic.sanity_check_enterprise(ent_vsd_id):
return "no database update needed", 200
# load domain in database
db_pgt = nuage_db.get_policy_group_template(vsd_id=pgt_vsd_id)
if db_pgt is None:
# unknown policy group template
db_dt = nuage_db.get_domain_template(vsd_id=dt_vsd_id)
if db_dt is None:
# unknown domain template
Generic.reset_nuage_storage_database(dt_vsd_id)
return "database updated", 201
else:
# domain Template in db
logger.info("%s: Unexpected state for policy group template '%s %s', fetch domain template '%s'" %
(__class__.__name__, pgt_vsd_id, pgt_name, dt_vsd_id))
# update db from current config
db_dt.fetch()
# load policy_group from Nuage storage database
db_pgt = storage_engine_nuage.NuagePolicyGroupTemplate(vsd_id=pgt_vsd_id,
logger=logger)
if db_pgt is None:
Generic.log_object_not_found_in_nuage(pgt_name, pgt_vsd_id)
return "no database update needed", 200
else:
return "database updated", 201
else:
# check for name update
if db_pgt.name != pgt_name:
# Update Nuage storage database
logger.info("%s: update name: pg_id=%s; old_pg_name=%s; new_pg_name=%s" %
(__class__.__name__, pgt_vsd_id, db_pgt.name, pgt_name))
db_pgt.name = pgt_name
return "database updated", 201
else:
return "no database update needed", 200
class ApiNuagePolicyGroupTemplateDelete(Resource):
@staticmethod
def put():
args = parser_generic.parse_args()
pgt_vsd_id = args['ID']
pgt_name = args['name']
ent_vsd_id = args['sourceEnterpriseID']
# Sanity check on enterprise
if not Generic.sanity_check_enterprise(ent_vsd_id):
return "no database update needed", 200
# load policy group template in database
db_pgt = nuage_db.get_policy_group_template(vsd_id=pgt_vsd_id)
if db_pgt is None:
# unknown policy group template
Generic.log_nuage_storage_engine_already_synchronized(name=pgt_name, vsd_id=pgt_vsd_id)
return "database already synchronized", 201
else:
# existing policy group template
db_pgt.delete()
logger.info("%s::%s: database updated: name=%s; id=%s" %
(__class__.__name__, __name__, pgt_name, pgt_vsd_id))
return "database updated", 201
class ApiNuagePolicyGroupCreate(Resource):
@staticmethod
def put():
# get parameter in payload
args = parser_policygroup.parse_args()
name = str(args['name'])
policy_group_id = str(args['policyGroupID'])
pg_vsd_id = args['ID']
domain_vsd_id = args['parentID']
pgt_vsd_id = args['templateID']
ent_vsd_id = args['sourceEnterpriseID']
# Sanity check on enterprise
if not Generic.sanity_check_enterprise(ent_vsd_id):
return "no database update needed", 200
# load policy_group from Nuage storage database
db_pg = nuage_db.get_policy_group(vsd_id=pg_vsd_id)
if db_pg is None:
# unknown policy group
db_domain = nuage_db.get_domain(vsd_id=domain_vsd_id)
if db_domain is None:
# unknown domain
if not Generic.sanity_check_domain(domain_vsd_id):
return "no database update needed", 200
else:
Generic.reset_nuage_storage_database(domain_vsd_id)
Generic.sync_f5_storage_databases()
return "database updated", 201
else:
# create policy group and fetch
logger.info("%s::%s: create and fetch policy group: pg_id=%s; pg_name=%s; domain_id=%s" %
(__class__.__name__, __name__, policy_group_id, name, domain_vsd_id))
cur_pg = storage_engine_nuage.NuagePolicyGroup(vsd_id=pg_vsd_id,
logger=logger
)
cur_pg.name = name
db_domain.create_child(cur_pg)
# Associate policy_group_template
if pgt_vsd_id != "null":
for domain_template in nuage_db.domain_templates:
if pgt_vsd_id in domain_template.children['policy_group_template'].keys() and \
pgt_vsd_id not in cur_pg.associated_objects['policy_group_template'].keys():
# known policy_group_template
# Create a relation with policy_group_template
cur_pg.assign(domain_template.children['policy_group_template'][pgt_vsd_id])
else:
# Policy Group Template not found
# Fetch domain_template
nuage_db.fetch()
# Sync
Generic.sync_f5_storage_databases()
return "database updated", 201
else:
Generic.log_nuage_storage_engine_already_synchronized(name, pg_vsd_id)
return "database already synchronized", 200
class ApiNuagePolicyGroupUpdate(Resource):
@staticmethod
def put():
# get parameter in payload
args = parser_policygroup.parse_args()
name = str(args['name'])
vsd_id = args['ID']
domain_vsd_id = args['parentID']
ent_vsd_id = args['sourceEnterpriseID']
# Sanity check on enterprise
if not Generic.sanity_check_enterprise(ent_vsd_id):
return "no database update needed", 200
# load policy_group from Nuage storage database
pg_db = nuage_db.get_policy_group(vsd_id=vsd_id)
if pg_db is None:
# unknown pg
domain_db = nuage_db.get_domain(vsd_id=domain_vsd_id)
if domain_db is None:
# unknown domain
if not Generic.sanity_check_domain(vsd_id):
return "no database update needed", 200
else:
# fetch database
nuage_db.flush()
nuage_db.fetch()
# load policy_group from Nuage storage database
pg_db = nuage_db.get_policy_group(vsd_id=vsd_id)
if pg_db is None:
Generic.log_object_not_found_in_nuage(name, vsd_id)
return "no database update needed", 200
else:
# pg in db
# update db from current config
pg_db.fetch()
# Sync
Generic.sync_storage_databases()
return "database updated", 201
# check for name update
if pg_db.name != name:
# Update Nuage storage database
logger.info("%s: update name: pg_id=%s; old_pg_name=%s; new_pg_name=%s" %
(__class__.__name__, vsd_id, pg_db.name, name))
pg_db.name = name
Generic.sync_storage_databases()
return "database updated", 201
else:
# check for associated ip_address update
# compare ip_address list in current config and database
# load old ip_address list from database
old_ip_address_list = set(pg_db.get_ip_address_list())
# clear associated vPorts
for vport in list(pg_db.vports):
pg_db.detach(vport)
# fetch from current configuration
logger.info("%s: fetch policy group: pg_id=%s; pg_name=%s" %
(__class__.__name__, vsd_id, name))
pg_db.fetch()
# load current ip_address list from database
cur_ip_address_list = set(pg_db.get_ip_address_list())
# compare new and current ip_address list
if cur_ip_address_list == old_ip_address_list:
Generic.log_nuage_storage_engine_already_synchronized(name, vsd_id)
return "database already synchronized", 200
else:
# log new ip address
ip_address_list_to_attach = list(cur_ip_address_list - old_ip_address_list)
if len(ip_address_list_to_attach) > 0:
logger.info("%s: pg_id=%s ; pg_name=%s ; added ip_address=%s" %
(__class__.__name__, vsd_id, name, ip_address_list_to_attach))
# log deleted ip address
ip_address_list_to_detach = list(old_ip_address_list - cur_ip_address_list)
if len(ip_address_list_to_detach) > 0:
logger.info("%s: pg_id=%s ; pg_name=%s ; deleted ip_address=%s" %
(__class__.__name__, vsd_id, name, ip_address_list_to_detach))
# Sync
Generic.sync_storage_databases()
return "database updated", 201
class ApiNuagePolicyGroupUpdateDirectAttach(Resource):
@staticmethod
def put():
"""
Used for unit tests only
Same as ApiNuagePolicyGroupUpdate() but the associated vPort is already in the 'vport_vsd_id' parameter
:return:
"""
# ToDo error unknown policy group
# get parameter in payload
args = parser_policygroup_direct_attach.parse_args()
name = str(args['name'])
vsd_id = args['ID']
domain_vsd_id = args['parentID']
ent_vsd_id = args['sourceEnterpriseID']
vport_vsd_id = args['vportID']
# Sanity check on enterprise
if not Generic.sanity_check_enterprise(ent_vsd_id):
return "no database update needed", 200
# load policy_group from Nuage storage database
pg_db = nuage_db.get_policy_group(vsd_id=vsd_id)
if pg_db is None:
# unknown pg
domain_db = nuage_db.get_domain(vsd_id=domain_vsd_id)
if domain_db is None:
# unknown domain
return "error, unknown policy group and unknown domain", 404
else:
return "error, unknown policy group", 404
else:
# pg in db
if vport_vsd_id in pg_db.associated_objects['vport'].keys():
# already attached vport
pass
elif vport_vsd_id in pg_db.parent.children['vport'].keys():
# existing vport in db and attached to the domain
vport_db = pg_db.parent.children['vport'][vport_vsd_id]
# attach vPort to policy group
pg_db.assign(vport_db)
else:
# unknown vport in db
return "error, unknown vport", 404
# Sync
Generic.sync_storage_databases()
return "database updated", 201
class ApiNuagePolicyGroupDelete(Resource):
@staticmethod
def put():
# get parameter in payload
args = parser_policygroup.parse_args()
name = str(args['name'])
vsd_id = args['ID']
# load policy_group from Nuage storage database
db_pg = nuage_db.get_policy_group(vsd_id=vsd_id)
if db_pg is None:
# Database and current Nuage configuration already synchronized
Generic.log_nuage_storage_engine_already_synchronized(name='unknown', vsd_id=vsd_id)
return "database already synchronized", 200
else:
# existing policy group
# delete policy group
logger.info("%s: delete policy group: pg_id=%s; pg_name=%s" %
(__class__.__name__, vsd_id, name))
db_pg.delete()
# Sync
Generic.sync_f5_storage_databases()
return "database updated", 201
class ApiNuageVminterfaceCreate(Resource):
@staticmethod
def put():
args = parser_vminterface.parse_args()
ip_address = args['IPAddress']
vport_vsd_id = args['VPortID']
domain_vsd_id = args['domainID']
# load vport current configuration
cur_vport = nuage_db.get_vport(vsd_id=vport_vsd_id)
if cur_vport is None:
# unknown vport
db_domain = nuage_db.get_domain(vsd_id=domain_vsd_id)
if db_domain is None:
# unknown domain
if not Generic.sanity_check_domain(domain_vsd_id):
return "no database update needed", 200
else:
Generic.reset_nuage_storage_database(domain_vsd_id)
cur_vport = nuage_db.get_vport(vsd_id=vport_vsd_id)
if cur_vport is None:
Generic.log_object_not_found_in_nuage(name=ip_address, vsd_id=vport_vsd_id)
return "no database update needed", 200
else:
# vport unknown but parent domain in db
# fetch domain
db_domain.fetch()
cur_vport = nuage_db.get_vport(vsd_id=vport_vsd_id)
if cur_vport is None:
Generic.log_object_not_found_in_nuage(name=ip_address, vsd_id=vport_vsd_id)
return "no database update needed", 200
else:
Generic.sync_storage_databases()
return "database updated", 201
else:
# known vPort
# add VM interface IP
cur_vport.ip_address_list.append(ip_address)
Generic.sync_storage_databases()
return "database updated", 201
class ApiNuageVminterfaceDelete(Resource):
@staticmethod
def put():
args = parser_vminterface.parse_args()
ip_address = args['IPAddress']
vsd_id = args['VPortID']
# load vport in database
db_vport = nuage_db.get_vport(vsd_id=vsd_id)
if db_vport is None:
# unknown vport
Generic.log_nuage_storage_engine_already_synchronized(name=ip_address, vsd_id=vsd_id)
return "database already synchronized", 201
else:
# existing vport
db_vport.fetch()
Generic.sync_f5_storage_databases()
return "database updated", 201
class ApiNuageVportCreate(Resource):
@staticmethod
def put():
args = parser_vport.parse_args()
vsd_id = args['ID']
domain_vsd_id = args['domainID']
name = args['name']
vport_type = args['type']
# load vport current configuration
cur_vport = nuage_db.get_vport(vsd_id=vsd_id)
if cur_vport is None:
# unknown vport
db_domain = nuage_db.get_domain(vsd_id=domain_vsd_id)
if db_domain is None:
# unknown domain
if not Generic.sanity_check_domain(domain_vsd_id):
return "no database update needed", 200
else:
Generic.reset_nuage_storage_database(domain_vsd_id)
cur_vport = nuage_db.get_vport(vsd_id=vsd_id)
if cur_vport is None:
Generic.log_object_not_found_in_nuage(name=name, vsd_id=vsd_id)
return "no database update needed", 200
else:
# vport unknown but parent domain in db
# attach to domain
# new PolicyGroupTemplate
db_vport = storage_engine_nuage.NuageVPort(vsd_id=vsd_id,
vport_type=vport_type,
logger=logger)
db_domain.create_child(db_vport)
return "no database update needed", 200
else:
# known vPort
return "error, object already exists", 404
class ApiNuageVportDelete(Resource):
@staticmethod
def put():
args = parser_vport.parse_args()
vsd_id = args['ID']
name = args['name']
# load vport in database
db_vport = nuage_db.get_vport(vsd_id=vsd_id)
if db_vport is None:
# unknown vport
Generic.log_nuage_storage_engine_already_synchronized(name=name, vsd_id=vsd_id)
return "database already synchronized", 201
else:
# existing vport
db_vport.delete()
return "no database update needed", 200
class ApiNuageDomainTemplateCreate(Resource):
@staticmethod
def put():
args = parser_generic.parse_args()
dt_vsd_id = args['ID']
dt_name = args['name']
ent_vsd_id = args['sourceEnterpriseID']
# Sanity check on enterprise
if not Generic.sanity_check_enterprise(ent_vsd_id):
return "no database update needed", 200
# load Domain current configuration
db_dt = nuage_db.get_domain_template(vsd_id=dt_vsd_id)
if db_dt is None:
# new Domain
db_dt = storage_engine_nuage.NuageDomainTemplate(vsd_id=dt_vsd_id,
domain_type='domaintemplate',
logger=logger)
db_dt.name = dt_name
nuage_db.create_child(db_dt)
return "nuage database updated", 201
else:
# Domain already exist
Generic.log_nuage_storage_engine_already_synchronized(name=dt_name, vsd_id=dt_vsd_id)
return "Domain_template already exist in database", 200
class ApiNuageDomainTemplateUpdate(Resource):
@staticmethod
def put():
args = parser_generic.parse_args()
dt_vsd_id = args['ID']
dt_name = args['name']
ent_vsd_id = args['sourceEnterpriseID']
# Sanity check on enterprise
if not Generic.sanity_check_enterprise(ent_vsd_id):
return "no database update needed", 200
# load domain in database
db_domain = nuage_db.get_domain_template(vsd_id=dt_vsd_id)
if db_domain is None:
# unknown domain
Generic.reset_nuage_storage_database(dt_vsd_id)
# Update storage db
Generic.sync_storage_databases()
return "database updated", 201
else:
# existing domain
db_domain.fetch()
logger.info("%s: database updated: name=%s; id=%s" %
(__class__.__name__, dt_name, dt_vsd_id))
return "database updated", 201
class ApiNuageDomainTemplateDelete(Resource):
@staticmethod
def put():
args = parser_generic.parse_args()
dt_vsd_id = args['ID']
dt_name = args['name']
ent_vsd_id = args['sourceEnterpriseID']
# Sanity check on enterprise
if not Generic.sanity_check_enterprise(ent_vsd_id):
return "no database update needed", 200
# load domain in database
db_domain = nuage_db.get_domain_template(vsd_id=dt_vsd_id)
if db_domain is None:
# unknown domain
Generic.log_nuage_storage_engine_already_synchronized(name=dt_name, vsd_id=dt_vsd_id)
return "database already synchronized", 201
else:
# existing domain
db_domain.delete()
logger.info("%s::%s: database updated: name=%s; id=%s" %
(__class__.__name__, __name__, dt_name, dt_vsd_id))
return "database updated", 201
class ApiNuageDomainCreate(Resource):
@staticmethod
def put():
args = parser_domain.parse_args()
domain_vsd_id = args['ID']
domain_name = args['name']
domain_template_vsd_id = args['templateID']
ent_vsd_id = args['sourceEnterpriseID']
# Sanity check on enterprise
if not Generic.sanity_check_enterprise(ent_vsd_id):
return "no database update needed", 200
# Domain belong to db's enterprise
# load Domain current configuration
cur_domain = nuage_db.get_domain(vsd_id=domain_vsd_id)
if cur_domain is None:
# new Domain
logger.info("%s::%s: create new domain: "
"id=%s; name=%s; enterprise_id=%s" %
(__class__.__name__, __name__, domain_vsd_id, domain_name, ent_vsd_id))
db_domain = storage_engine_nuage.NuageDomain(vsd_id=domain_vsd_id,
domain_type='domain',
logger=logger
)
db_domain.name = domain_name
nuage_db.create_child(db_domain)
# Assign domain template
if nuage_db.children['domain_template'][domain_template_vsd_id] is not None:
db_domain.assign(nuage_db.children['domain_template'][domain_template_vsd_id])
else:
db_domain.fetch()
return "no database update needed", 200
else:
# Domain already exist
Generic.log_nuage_storage_engine_already_synchronized(name=domain_name, vsd_id=domain_vsd_id)
return "database already synchronized", 200
class ApiNuageDomainUpdate(Resource):
@staticmethod
def put():
args = parser_generic.parse_args()
domain_vsd_id = args['ID']
ent_vsd_id = args['sourceEnterpriseID']
# Sanity check on enterprise
if not Generic.sanity_check_enterprise(ent_vsd_id):
return "no database update needed", 200
# load domain in database
db_domain = nuage_db.get_domain(vsd_id=domain_vsd_id)
if db_domain is None:
# unknown domain
if not Generic.sanity_check_domain(domain_vsd_id):
return "no database update needed", 200
else:
# fetch database
Generic.reset_nuage_storage_database(domain_vsd_id)
Generic.sync_storage_databases()
return "database updated", 201
else:
# existing domain
db_domain.fetch()
# Sync
Generic.sync_storage_databases()
return "database updated", 201
class ApiNuageDomainDelete(Resource):
@staticmethod
def put():
args = parser_generic.parse_args()
domain_vsd_id = args['ID']
domain_name = args['name']
ent_vsd_id = args['sourceEnterpriseID']
# Sanity check on enterprise
if not Generic.sanity_check_enterprise(ent_vsd_id):
return "no database update needed", 200
# load domain in database
db_domain = nuage_db.get_domain(vsd_id=domain_vsd_id)
if db_domain is None:
# unknown domain
Generic.log_nuage_storage_engine_already_synchronized(domain_name, domain_vsd_id)
return "database already synchronized", 201
else:
# domain in db
db_domain.delete()
Generic.sync_storage_databases()
return "database updated", 201
class ApiNuageDbDump(Resource):
@staticmethod
def get():
# set output format
api.representations.update({'application/json': output_json_response_format})
return nuage_db.dump_json_format(), 200
class ApiNuageDbFetch(Resource):
@staticmethod
def get():
# set output format
api.representations.update({'application/json': output_json_response_format})
nuage_db.fetch()
return "OK", 200
class ApiNuageDbFlush(Resource):
@staticmethod
def get():
# set output format
api.representations.update({'application/json': output_json_response_format})
nuage_db.flush()
return "OK", 200
class ApiNuageDbDomainDump(Resource):
@staticmethod
def get(domain_name):
# set output format
api.representations.update({'application/json': output_json_response_format})
domain = nuage_db.get_domain(domain_name)
if domain is None:
return "unknown domain", 200
else:
return domain.dump_json_format(), 200
class ApiNuageDbDomainGet(Resource):
@staticmethod
def get(domain_name):
# set output format
api.representations.update({'application/json': output_json_response_format})
domain = nuage_db.get_domain(domain_name)
if domain is None:
return "unknown domain", 200
else:
return domain.get_json_format(), 200
class ApiNuageDbDomainFetch(Resource):
@staticmethod
def get(domain_name):
domain = nuage_db.get_domain(domain_name)
if domain is None:
return "unknown domain", 200
else:
domain.fetch()
return "OK", 200
class ApiNuageDbPolicyGroupDump(Resource):
@staticmethod
def get(domain_name, policy_group_name):
# set output format
api.representations.update({'application/json': output_json_response_format})
policy_group = nuage_db.get_policy_group(domain_name, policy_group_name)
if policy_group is None:
return "unknown policy_group", 200
else:
return policy_group.dump_json_format(), 200
class ApiNuageDbPolicyGroupGet(Resource):
@staticmethod
def get(domain_name, policy_group_name):
# set output format
api.representations.update({'application/json': output_json_response_format})
policy_group = nuage_db.get_policy_group(domain_name, policy_group_name)
if policy_group is None:
return "unknown policy_group", 200
else:
return policy_group.get_json_format(), 200
class ApiNuageDbPolicyGroupFetch(Resource):
@staticmethod
def get(domain_name, policy_group_name):
policy_group = nuage_db.get_policy_group(domain_name, policy_group_name)
if policy_group is None:
return "unknown policy_group", 200
else:
policy_group.fetch()
return "OK", 200
class ApiNuageDbPolicyGroupTemplateIpAddresses(Resource):
@staticmethod
def get(domain_template_name, policy_group_template_name):
policy_group_template = nuage_db.get_policy_group_template_ip_address_list(domain_template_name,
policy_group_template_name)
if policy_group_template is None:
return "unknown policy_group", 200
else:
return policy_group_template.get_ip_address_list(), 200
class ApiNuageDbIpPolicyGroupMappings(Resource):
@staticmethod
def get():
return nuage_db.get_ip_policy_group_mapping(), 200
class ApiNuageDbIpPolicyGroupMapping(Resource):
@staticmethod
def get(ip_address):
return nuage_db.get_ip_policy_group_mapping(ip_address_filter=ip_address), 200
class ApiPanDbSync(Resource):
@staticmethod
def get():
# set output format
api.representations.update({'application/json': output_json_response_format})
pan_db.sync()
return "OK", 200
class ApiPanDbDump(Resource):
@staticmethod
def get():
# set output format
api.representations.update({'application/json': output_json_response_format})
return pan_db.dump_json_format(), 200
class ApiPanDbFetch(Resource):
@staticmethod
def get():
pan_db.fetch()
return "OK", 200
class ApiPanDbDeviceDump(Resource):
@staticmethod
def get(host):
# set output format
api.representations.update({'application/json': output_json_response_format})
device = pan_db.get_host(host)
if device is None:
return "unknown host", 200
else:
return device.dump_json_format(), 200
class ApiPanDbDeviceGet(Resource):
@staticmethod
def get(host):
# set output format
api.representations.update({'application/json': output_json_response_format})
device = pan_db.get_host(host)
if device is None:
return "unknown host", 200
else:
return device.get_json_format(), 200
class ApiPanDbDeviceFetch(Resource):
@staticmethod
def get(host):
device = pan_db.get_host(host)
if device is None:
return "unknown host", 200
else:
device.fetch()
return "OK", 200
class ApiPanDbVSysDump(Resource):
@staticmethod
def get(host, vsys_id):
# set output format
api.representations.update({'application/json': output_json_response_format})
vsys = pan_db.get_vsys(host, vsys_id)
if vsys is None:
return "unknown vsys", 200
else:
return vsys.dump_json_format(), 200
class ApiPanDbVSysGet(Resource):
@staticmethod
def get(host, vsys_id):
# set output format
api.representations.update({'application/json': output_json_response_format})
vsys = pan_db.get_vsys(host, vsys_id)
if vsys is None:
return "unknown vsys", 200
else:
return vsys.get_json_format(), 200
class ApiPanDbVSysFetch(Resource):
@staticmethod
def get(host, vsys_id):
vsys = pan_db.get_vsys(host, vsys_id)
if vsys is None:
return "unknown host or vsys", 200
else:
vsys.fetch()
return "OK", 200
class ApiPanFeed(Resource):
@staticmethod
def get(feed_list_name):
# External Dynamic List in format :
# <enterprise_name>--<domain_tpl_name>--<pg_tpl_name>
# set output format
api.representations.update({'application/json': output_txt_response_format})
# get domain tpl and pg tpl name
enterprise_name, domain_tpl_name, pg_tpl_name = feed_list_name.split("--")
pgt_db = nuage_db.get_policy_group_template(domain_template_name=domain_tpl_name,
policy_group_template_name=pg_tpl_name)
if pgt_db is None:
abort(404, message="policy group template name {} doesn't exist".format(domain_tpl_name, pg_tpl_name))
else:
# get feed list in the storage database format
return storage_engine_pan.StorageEnginePan.get_feedlist_format(pgt_db.get_ip_address_list()), 200
class ApiF5DbSync(Resource):
@staticmethod
def get():
# set output format
api.representations.update({'application/json': output_json_response_format})
f5_db.sync()
return "OK", 200
class ApiF5DbDump(Resource):
@staticmethod
def get():
# set output format
api.representations.update({'application/json': output_json_response_format})
return f5_db.dump_json_format(), 200
class ApiF5DbFetch(Resource):
@staticmethod
def get():
# set output format
api.representations.update({'application/json': output_json_response_format})
f5_db.fetch()
return "OK", 200
class ApiF5DbDeviceDump(Resource):
@staticmethod
def get(host):
# set output format
api.representations.update({'application/json': output_json_response_format})
device = f5_db.get_host(host)
if device is None:
return "unknown host", 200
else:
return device.dump_json_format(), 200
class ApiF5DbDeviceGet(Resource):
@staticmethod
def get(host):
# set output format
api.representations.update({'application/json': output_json_response_format})
device = f5_db.get_host(host)
if device is None:
return "unknown host", 200
else:
return device.get_json_format(), 200
class ApiF5DbDeviceFetch(Resource):
@staticmethod
def get(host):
device = f5_db.get_host(host)
if device is None:
return "unknown host", 200
else:
device.fetch()
return "OK", 200
class ApiF5DbPartitionDump(Resource):
@staticmethod
def get(host, partition_name):
# set output format
api.representations.update({'application/json': output_json_response_format})
partition = f5_db.get_partition(host, partition_name)
if partition is None:
return "unknown partition", 200
else:
return partition.dump_json_format(), 200
class ApiF5DbPartitionGet(Resource):
@staticmethod
def get(host, partition_name):
# set output format
api.representations.update({'application/json': output_json_response_format})
partition = pan_db.get_partition(host, partition_name)
if partition_name is None:
return "unknown partition", 200
else:
return partition.get_json_format(), 200
class ApiF5DbPartitionFetch(Resource):
@staticmethod
def get(host, partition_name):
partition = f5_db.get_partition(host, partition_name)
if partition is None:
return "unknown host or partition", 200
else:
partition.fetch()
return "OK", 200
class ApiF5Feed(Resource):
@staticmethod
def get(feed_list_name):
# External Dynamic List in format :
# <enterprise_name>--<domain_tpl_name>--<pg_tpl_name>
# set output format
api.representations.update({'application/json': output_txt_response_format})
# extract domain tpl and pg tpl name
enterprise_name, domain_tpl_name, pg_tpl_name = feed_list_name.split("--")
# load objects from db
pgt_db = nuage_db.get_policy_group_template(domain_template_name=domain_tpl_name,
policy_group_template_name=pg_tpl_name)
if pgt_db is None:
abort(404, message="policy group template name {} doesn't exist".format(domain_tpl_name, pg_tpl_name))
else:
# get feed list in the storage database format
return storage_engine_f5.StorageEngineF5.get_feedlist_format(pgt_db.get_ip_address_list()), 200
class ApiPanFeedSocSimulation(Resource):
@staticmethod
def get(feed_list_name):
# set output format
api.representations.update({'application/json': output_txt_response_format})
# set simaulated feed list
soc_feed_list = []
soc_feed_list.append("1.1.1.1")
soc_feed_list.append("2.2.2.2")
soc_feed_list.append("3.3.3.3")
return storage_engine_pan.StorageEnginePan.get_feedlist_format(soc_feed_list), 200
class ApiF5FeedSocSimulation(Resource):
@staticmethod
def get(feed_list_name):
# set output format
api.representations.update({'application/json': output_txt_response_format})
# set simaulated feed list
soc_feed_list = []
soc_feed_list.append("1.1.1.1")
soc_feed_list.append("2.2.2.2")
soc_feed_list.append("3.3.3.3")
return storage_engine_f5.StorageEngineF5.get_feedlist_format(soc_feed_list), 200
# -------------- API --------------
# listener
state_engine_listener = Flask(__name__)
api = Api(state_engine_listener)
# resource routing
api.add_resource(ApiHealthcheck, '/healthcheck')
api.add_resource(ApiConfig, '/config')
# Nuage storage engine
api.add_resource(ApiNuagePolicyGroupTemplateCreate, '/sensor/nuage/policygrouptemplate/CREATE')
api.add_resource(ApiNuagePolicyGroupTemplateUpdate, '/sensor/nuage/policygrouptemplate/UPDATE')
api.add_resource(ApiNuagePolicyGroupTemplateDelete, '/sensor/nuage/policygrouptemplate/DELETE')
api.add_resource(ApiNuagePolicyGroupCreate, '/sensor/nuage/policygroup/CREATE')
api.add_resource(ApiNuagePolicyGroupUpdate, '/sensor/nuage/policygroup/UPDATE')
api.add_resource(ApiNuagePolicyGroupUpdateDirectAttach, '/sensor/nuage/policygroup/UPDATE/direct_attach')
api.add_resource(ApiNuagePolicyGroupDelete, '/sensor/nuage/policygroup/DELETE')
api.add_resource(ApiNuageVminterfaceCreate, '/sensor/nuage/vminterface/CREATE')
api.add_resource(ApiNuageVminterfaceDelete, '/sensor/nuage/vminterface/DELETE')
api.add_resource(ApiNuageVportCreate, '/sensor/nuage/vport/CREATE')
api.add_resource(ApiNuageVportDelete, '/sensor/nuage/vport/DELETE')
api.add_resource(ApiNuageDomainTemplateCreate, '/sensor/nuage/domaintemplate/CREATE')
api.add_resource(ApiNuageDomainTemplateUpdate, '/sensor/nuage/domaintemplate/UPDATE')
api.add_resource(ApiNuageDomainTemplateDelete, '/sensor/nuage/domaintemplate/DELETE')
api.add_resource(ApiNuageDomainCreate, '/sensor/nuage/domain/CREATE')
api.add_resource(ApiNuageDomainUpdate, '/sensor/nuage/domain/UPDATE')
api.add_resource(ApiNuageDomainDelete, '/sensor/nuage/domain/DELETE')
api.add_resource(ApiNuageDbDump, '/database/nuage/dump')
api.add_resource(ApiNuageDbFetch, '/database/nuage/fetch')
api.add_resource(ApiNuageDbFlush, '/database/nuage/flush')
api.add_resource(ApiNuageDbDomainDump, '/database/nuage/domain/<domain_name>/dump')
api.add_resource(ApiNuageDbDomainGet, '/database/nuage/domain/<domain_name>/get')
api.add_resource(ApiNuageDbDomainFetch, '/database/nuage/domain/<domain_name>/fetch')
api.add_resource(ApiNuageDbPolicyGroupDump, '/database/nuage/domain/<domain_name>'
'/pg/<policy_group_name>/dump')
api.add_resource(ApiNuageDbPolicyGroupGet, '/database/nuage/domain/<domain_name>'
'/pg/<policy_group_name>/get')
api.add_resource(ApiNuageDbPolicyGroupFetch, '/database/nuage/domain/<domain_name>'
'/pg/<policy_group_name>/fetch')
api.add_resource(ApiNuageDbPolicyGroupTemplateIpAddresses, '/database/nuage/domain_tpl/<domain_template_name>'
'/pg_tpl/<policy_group_template_name>')
api.add_resource(ApiNuageDbIpPolicyGroupMappings, '/database/nuage/ip_pg_mapping/all')
api.add_resource(ApiNuageDbIpPolicyGroupMapping, '/database/nuage/ip_pg_mapping/<ip_address>')
# PAN storage engine
api.add_resource(ApiPanDbSync, '/database/pan/sync')
api.add_resource(ApiPanDbDump, '/database/pan/dump')
api.add_resource(ApiPanDbFetch, '/database/pan/fetch')
api.add_resource(ApiPanDbDeviceDump, '/database/pan/device/<host>/dump')
api.add_resource(ApiPanDbDeviceGet, '/database/pan/device/<host>/get')
api.add_resource(ApiPanDbDeviceFetch, '/database/pan/device/<host>/fetch')
api.add_resource(ApiPanDbVSysDump, '/database/pan/device/<host>/vsys/<vsys_id>/dump')
api.add_resource(ApiPanDbVSysGet, '/database/pan/device/<host>/vsys/<vsys_id>/get')
api.add_resource(ApiPanDbVSysFetch, '/database/pan/device/<host>/vsys/<vsys_id>/fetch')
api.add_resource(ApiPanFeed, '/database/pan/edl/<feed_list_name>')
api.add_resource(ApiPanFeedSocSimulation, '/database/pan/soc_feed/<feed_list_name>')
# F5 storage engine
api.add_resource(ApiF5DbSync, '/database/f5/sync')
api.add_resource(ApiF5DbDump, '/database/f5/dump')
api.add_resource(ApiF5DbFetch, '/database/f5/fetch')
api.add_resource(ApiF5DbDeviceDump, '/database/f5/device/<host>/dump')
api.add_resource(ApiF5DbDeviceGet, '/database/f5/device/<host>/get')
api.add_resource(ApiF5DbDeviceFetch, '/database/f5/device/<host>/fetch')
api.add_resource(ApiF5DbPartitionDump, '/database/f5/device/<host>/partition/<partition_name>/dump')
api.add_resource(ApiF5DbPartitionGet, '/database/f5/device/<host>/partition/<partition_name>/get')
api.add_resource(ApiF5DbPartitionFetch, '/database/f5/device/<host>/partition/<partition_name>/fetch')
api.add_resource(ApiF5Feed, '/database/f5/edl/<feed_list_name>')
api.add_resource(ApiF5FeedSocSimulation, '/database/f5/soc_feed/<feed_list_name>')
# parser_policygroup
parser_policygroup = reqparse.RequestParser()
parser_policygroup.add_argument('ID')
parser_policygroup.add_argument('name')
parser_policygroup.add_argument('sourceEnterpriseID')
parser_policygroup.add_argument('parentType')
parser_policygroup.add_argument('parentID')
parser_policygroup.add_argument('policyGroupID')
parser_policygroup.add_argument('templateID')# parser_policygroup
parser_policygroup_direct_attach = reqparse.RequestParser()
parser_policygroup_direct_attach.add_argument('ID')
parser_policygroup_direct_attach.add_argument('name')
parser_policygroup_direct_attach.add_argument('sourceEnterpriseID')
parser_policygroup_direct_attach.add_argument('parentType')
parser_policygroup_direct_attach.add_argument('parentID')
parser_policygroup_direct_attach.add_argument('vportID')
# parser_vminterface
parser_vminterface = reqparse.RequestParser()
parser_vminterface.add_argument('IPAddress')
parser_vminterface.add_argument('VPortID')
parser_vminterface.add_argument('domainID')
# parser_generic / domain_template
parser_generic = reqparse.RequestParser()
parser_generic.add_argument('ID')
parser_generic.add_argument('name')
parser_generic.add_argument('parentID')
parser_generic.add_argument('sourceEnterpriseID')
# parser_domain
parser_domain = reqparse.RequestParser()
parser_domain.add_argument('ID')
parser_domain.add_argument('name')
parser_domain.add_argument('parentID')
parser_domain.add_argument('templateID')
parser_domain.add_argument('sourceEnterpriseID')
# parser_vport
parser_vport = reqparse.RequestParser()
parser_vport.add_argument('ID')
parser_vport.add_argument('name')
parser_vport.add_argument('domainID')
parser_vport.add_argument('type')
# Start program
if __name__ == "__main__":
main() | state_engine.py | from flask import (Flask, make_response)
from flask_restful import (reqparse, abort, Api, Resource)
import argparse
from storage_engine import storage_engine_nuage, storage_engine_pan, storage_engine_f5
import nuage_pgsync_configuration
import json
import threading
import uuid
from time import sleep
try:
# Try and import Nuage VSPK from the development release
from vspk import v5_0 as vsdk
except ImportError:
# If this fails, import the Nuage VSPK from the pip release
from vspk.vsdk import v5_0 as vsdk
# imported parameters in .ini file :
# section
ini_general_section = "GENERAL"
# parameters in section
ini_nuage_enterprise = "Enterprise"
# section
ini_nuage_vsd_section = "NUAGE_VSD_CONNECTION"
# parameters in section
ini_nuage_deployment_mode = "DeploymentMode"
ini_nuage_port = "ApiPort"
ini_nuage_host1 = "IpAddr1"
ini_nuage_host2 = "IpAddr2"
ini_nuage_host3 = "IpAddr3"
# section
ini_nuage_api_section = "NUAGE_REST_API_DETAILS"
# parameters in section
ini_nuage_username = 'UserName'
ini_nuage_password = 'Password'
ini_nuage_organization = 'Organization'
ini_nuage_domain_filter = 'DomainFilter'
ini_nuage_pg_filter = 'PolicyGroupFilter'
# section
ini_state_engine_section = "STATE_ENGINE_CONNECTION"
# parameters in section
ini_pgsync_api_port = "StateEnginePort"
ini_pgsync_api_host = "StateEngineAddr"
# section
ini_api_section = "API"
# parameters in section
ini_api_bind_address = "BindAddr"
# section
ini_pan_section = "PAN"
# parameters in section
ini_panorama_deployment_mode = "DeploymentMode"
ini_panorama_host1 = "PanoramaIpAddr1"
ini_panorama_host2 = "PanoramaIpAddr2"
ini_panorama_port = "PanoramaPort"
ini_panorama_username = 'PanoramaUserName'
ini_panorama_password = '<PASSWORD>'
# section
ini_f5_section = 'F5'
# IpAddrX
# UserNameX
# PasswordX
def main():
# Handling arguments
"""
args = get_args()
debug = args.debug
verbose = args.verbose
log_file = args.logfile
ini_file = args.inifile
"""
# Bouchonnage arguments
debug = False
verbose = True
log_file = 'logs/state_engine.log'
ini_file = 'nuage-pgsync.ini'
# Logging settings
global logger
logger = setup_logging(debug, verbose, log_file)
# Load configuration
global config
vault_config = nuage_pgsync_configuration.Enterprise(ini_file=ini_file,
logger=logger)
vault_config.fetch()
config = vault_config.config
# Get parameters from config (.ini file)
global se
se = StateEngine()
### Init phase
logger.info("Starting state_engine")
# Step 1. Fetch Nuage storage engine = Master database
logger.info("step 1. Fetch ip address / policy groups mapping table from Nuage")
# Next Gen
global nuage_db
nuage_db = storage_engine_nuage.NuageDatabase(nuage_enterprise=se.nuage_enterprise,
nuage_domain_filter=se.nuage_domain_filter,
nuage_pg_filter=se.nuage_pg_filter,
logger=logger
)
nuage_db.import_vsd_pool(name="non-PROD",
host_list=se.nuage_host_list,
username=se.nuage_username,
password=<PASSWORD>,
organization=se.nuage_organization
)
nuage_db.fetch()
# Step 2. Fetch other storage engines = Slaves databases
logger.info("step 2. Fetch storage engines")
global storage_engine_list
storage_engine_list = {}
# PAN db
global pan_db
storage_engine_list['PAN'] = []
pan_db = storage_engine_pan.PanDatabase(nuage_db=nuage_db,
logger=logger)
storage_engine_list['PAN'].append(pan_db)
pan_db.import_panorama_pool(name="non-PROD",
host_list=se.panorama_host_list,
username=se.panorama_username,
password=<PASSWORD>
)
# load current configuration from devices managed by PANORAMA
pan_db.fetch()
# sync current configuration with Nuage
pan_db.sync()
# F5 db
global f5_db
f5_db = None
storage_engine_list['F5'] = []
"""
se.f5_host_list = ["10.5.26.110"]
f5_db = storage_engine_f5.F5Database(nuage_db=nuage_db,
logger=logger)
storage_engine_list['F5'].append(f5_db)
f5_db.import_devices(host_list=se.f5_host_list,
username_list=se.f5_username_list,
password_list=se.f5_password_list)
f5_db.fetch()
f5_db.sync()
"""
# Step 3. Intialize the queue of syncing request
global sync_queue
sync_queue = []
global sync_in_progress
sync_in_progress = [0]
# Step 4. Start API
logger.info("step 3. Start API")
logger.warning("state engine started")
state_engine_listener.run(debug=debug,
host=se.state_engine_host,
port=se.state_engine_port,
use_reloader=False)
# use_reloader - whether to reload and fork the process on exception
def get_args():
"""
Supports the command-line arguments listed below.
"""
parser = argparse.ArgumentParser(description="Run the state_engine.")
parser.add_argument('-d', '--debug',
required=False,
help='Enable debug output',
dest='debug',
action='store_true')
parser.add_argument('-v', '--verbose',
required=False,
help='Enable verbose output',
dest='verbose',
action='store_true')
parser.add_argument('-l', '--log-file',
required=False,
help='File to log to',
dest='logfile',
type=str,
default="state_engine.log")
parser.add_argument('-p', '--ini-file',
required=False,
help='File that contain parameters',
dest='inifile',
type=str,
default="nuage-pgsync.ini")
args = parser.parse_args()
return args
def setup_logging(debug, verbose, log_file):
import logging
from vspk.utils import set_log_level
if debug:
log_level = logging.DEBUG
elif verbose:
log_level = logging.INFO
else:
log_level = logging.WARNING
set_log_level(log_level)
logging.basicConfig(filename=log_file, format='%(asctime)s %(levelname)s %(message)s', level=log_level)
return logging.getLogger(__name__)
class StateEngine(object):
def __init__(self):
# Initialize Defaults
self.nuage_organization = 'csp'
self.nuage_port = '8443'
self.nuage_password = <PASSWORD>
self.nuage_username = 'csproot'
self.nuage_deployment_mode = 'standalone'
self.nuage_host_list = []
self.nuage_enterprise = None
self.nuage_domain_filter = None
self.nuage_pg_filter = None
self.state_engine_host = '127.0.0.1'
self.state_engine_port = '80'
self.panorama_deployment_mode = 'standalone'
self.panorama_host_list = []
self.panorama_port = None
self.panorama_username = None
self.panorama_password = None
self.f5_host_list = []
self.f5_port = '443'
self.f5_username_list = []
self.f5_password_list = []
# Get attributes from .ini file
self.parse_file()
def parse_file(self):
logger.info("INI file: get parameters")
# GENERAL
if config.has_section(ini_general_section):
# Enterprise
if config.has_option(ini_general_section, ini_nuage_enterprise):
self.nuage_enterprise = config.get(ini_general_section, ini_nuage_enterprise)
else:
logger.error("No Enterprise in GENERAL Section")
raise SyntaxError("No Enterprise in GENERAL Section")
else:
logger.error("No GENERAL Section")
raise SyntaxError("No GENERAL Section")
# NUAGE_VSD_CONNECTION
if config.has_section(ini_nuage_vsd_section):
# ApiPort
if config.has_option(ini_nuage_vsd_section, ini_nuage_port):
self.nuage_port = config.get(ini_nuage_vsd_section, ini_nuage_port)
# DeploymentMode
if config.has_option(ini_nuage_vsd_section, ini_nuage_deployment_mode):
self.nuage_deployment_mode = config.get(ini_nuage_vsd_section, ini_nuage_deployment_mode)
if self.nuage_deployment_mode == 'cluster':
logger.info("VSD in Cluster mode, adding all 3 IP's")
self.nuage_host_list.append(config.get(ini_nuage_vsd_section, ini_nuage_host1))
self.nuage_host_list.append(config.get(ini_nuage_vsd_section, ini_nuage_host2))
self.nuage_host_list.append(config.get(ini_nuage_vsd_section, ini_nuage_host3))
else:
logger.info("VSD in Standalone mode, adding only one IP")
self.nuage_host_list.append(config.get(ini_nuage_vsd_section, ini_nuage_host1))
else:
logger.error("No VSD's Connection Details Section")
raise SyntaxError("No VSD's Connection Details Section")
# NUAGE_REST_API_DETAILS
if config.has_section(ini_nuage_api_section):
# UserName
if config.has_option(ini_nuage_api_section, ini_nuage_username):
self.nuage_username = config.get(ini_nuage_api_section, ini_nuage_username)
# Password
if config.has_option(ini_nuage_api_section, ini_nuage_password):
self.nuage_password = config.get(ini_nuage_api_section, ini_nuage_password)
# Organization
if config.has_option(ini_nuage_api_section, ini_nuage_organization):
self.nuage_organization = config.get(ini_nuage_api_section, ini_nuage_organization)
# DomainFilter
if config.has_option(ini_nuage_api_section, ini_nuage_domain_filter):
self.nuage_domain_filter = config.get(ini_nuage_api_section, ini_nuage_domain_filter)
if self.nuage_domain_filter == 'none':
# none is a specific keyword for no filter
self.nuage_domain_filter = None
# PolicyGroupFilter
if config.has_option(ini_nuage_api_section, ini_nuage_pg_filter):
self.nuage_pg_filter = config.get(ini_nuage_api_section, ini_nuage_pg_filter)
if self.nuage_pg_filter == 'none':
# none is a specific keyword for no filter
self.nuage_pg_filter = None
else:
logger.warning("No VSD's REST API Details Section")
# STATE_ENGINE_CONNECTION
if config.has_section(ini_state_engine_section):
# StateEnginePort
if config.has_option(ini_state_engine_section, ini_pgsync_api_port):
self.state_engine_port = config.get(ini_state_engine_section, ini_pgsync_api_port)
else:
logger.error("No State Engine's Connection Details Section")
# API
if config.has_section(ini_api_section):
# BindAddr
if config.has_option(ini_api_section, ini_api_bind_address):
self.state_engine_host = config.get(ini_api_section, ini_api_bind_address)
else:
logger.error("No State Engine's Connection Details Section")
# PAN
if config.has_section(ini_pan_section):
# PanoramaPort
if config.has_option(ini_pan_section, ini_panorama_port):
self.panorama_port = config.get(ini_pan_section, ini_panorama_port)
# DeploymentMode
if config.has_option(ini_pan_section, ini_panorama_deployment_mode):
self.panorama_deployment_mode = config.get(ini_pan_section, ini_panorama_deployment_mode)
if self.panorama_deployment_mode == 'cluster':
logger.info("PANORAMA in Cluster mode, adding all 2 IP's")
self.panorama_host_list.append(config.get(ini_pan_section, ini_panorama_host1))
self.panorama_host_list.append(config.get(ini_pan_section, ini_panorama_host2))
else:
logger.info("PANORAMA in Standalone mode, adding only one IP")
self.panorama_host_list.append(config.get(ini_pan_section, ini_panorama_host1))
# UserName
if config.has_option(ini_pan_section, ini_panorama_username):
self.panorama_username = config.get(ini_pan_section, ini_panorama_username)
# Password
if config.has_option(ini_pan_section, ini_panorama_password):
self.panorama_password = config.get(ini_pan_section, ini_panorama_password)
else:
logger.warning("No PAN's Connection Details Section")
# F5
if config.has_section(ini_f5_section):
i = 0
f5_current_device = True
while f5_current_device:
i += 1
ini_f5_current_host = "IpAddr" + str(i)
ini_f5_current_username = "UserName" + str(i)
ini_f5_current_password = "Password" + <PASSWORD>)
# IpAddr
if config.has_option(ini_f5_section, ini_f5_current_host):
self.f5_host_list.append(config.get(ini_f5_section, ini_f5_current_host))
else:
# no more F5 device
f5_current_device = False
continue
# UserName
if config.has_option(ini_f5_section, ini_f5_current_username):
self.f5_username_list.append(config.get(ini_f5_section, ini_f5_current_username))
# Password
if config.has_option(ini_f5_section, ini_f5_current_password):
self.f5_password_list.append(config.get(ini_f5_section, ini_f5_current_password))
else:
logger.warning("No F5's Connection Details Section")
def get_json_format(self):
data = {}
data['NUAGE'] = {}
data['NUAGE']['enterprise'] = self.nuage_enterprise
data['NUAGE']['organization'] = self.nuage_organization
data['NUAGE']['port'] = self.nuage_port
data['NUAGE']['username'] = self.nuage_username
data['NUAGE']['hosts'] = self.nuage_host_list
data['NUAGE']['domain_filter'] = self.nuage_domain_filter
data['NUAGE']['pg_filter'] = self.nuage_pg_filter
data['API'] = {}
data['API']['bind_address'] = self.state_engine_host
data['API']['port'] = self.state_engine_port
data['PANORAMA'] = {}
data['PANORAMA']['hosts'] = self.panorama_host_list
data['PANORAMA']['port'] = self.panorama_port
data['PANORAMA']['username'] = self.panorama_username
data['F5'] = {}
data['F5']['hosts'] = self.f5_host_list
data['F5']['port'] = self.f5_port
data['F5']['username'] = self.f5_username_list
return data
def output_txt_response_format(data, code, headers=None):
resp = make_response(data, code)
resp.headers.extend(headers or {})
return resp
def output_json_response_format(data, code, headers=None):
resp = make_response(json.dumps(data), code)
resp.headers.extend(headers or {})
return resp
class ApiHealthcheck(Resource):
@staticmethod
def get():
return "OK", 200
class ApiConfig(Resource):
@staticmethod
def get():
return se.get_json_format(), 200
class Generic:
@staticmethod
def sanity_check_enterprise(vsd_id):
if vsd_id != nuage_db.id:
logger.info("%s::%s: object's enterprise is out of scope: enterprise_id=%s" %
(__class__.__name__, __name__, vsd_id))
return False
else:
return True
@staticmethod
def sanity_check_domain(vsd_id):
cur_domain = storage_engine_nuage.NuageGenericDomain(vsd_id=vsd_id,
logger=logger)
nuage_db.create_child(cur_domain)
cur_domain.fetch()
cur_domain_name = cur_domain.name
cur_domain.delete()
if cur_domain_name is None or ini_nuage_domain_filter not in cur_domain_name:
# Domain is out of scope
logger.info("%s::%s: object's domain is out of scope: name=%s; id=%s" %
(__class__.__name__, __name__, cur_domain_name, vsd_id))
return False
else:
# Domain should be in database
logger.error("%s::%s: unknown policy group's domain, reset database: name=%s; id=%s" %
(__class__.__name__, __name__, cur_domain_name, vsd_id))
return True
@staticmethod
def reset_nuage_storage_database(vsd_id):
logger.info("%s::%s: reset database, expected object to load: %s" % (__class__.__name__, __name__, vsd_id))
nuage_db.flush()
nuage_db.fetch()
@staticmethod
def log_object_not_found_in_nuage(name, vsd_id):
logger.warning("%s::%s: Object not found in Nuage: name=%s; id=%s" %
(__class__.__name__, __name__, name, vsd_id))
@staticmethod
def log_nuage_storage_engine_already_synchronized(name, vsd_id):
logger.warning("%s::%s: Nuage storage database already synchronized: name=%s; id=%s" %
(__class__.__name__, __name__, name, vsd_id))
@staticmethod
def sync_storage_databases():
if len(sync_queue) <= 1:
# value 0: no current sync in progress
t = threading.Thread(target=Generic.thread_sync_storage_databases, name=str(uuid.uuid4()))
sync_queue.append(t)
logger.info("%s::%s: NEW THREAD, database changes will be sync by the new thread in sync_queue: id=%s" %
(__class__.__name__, __name__, t.name))
t.start()
else:
# value 2+: the 2nd thread in queue will include changes for this sync_storage_databases request
logger.info("%s::%s: PASS THREAD, sync_queue full: nb=%s" %
(__class__.__name__, __name__, len(sync_queue)))
@staticmethod
def thread_sync_storage_databases():
"""
One sync at a time is possible.
Only 2 threads are in sync_queue: #0 in current sync operation, #1 that will wait for its turn to sync
:return:
"""
try:
# be in queue
while len(sync_queue) == 2 and sync_in_progress[0] == 1:
sleep(1)
logger.info("%s::%s: WAIT THREAD, current sync in progress, thread is waiting in queue" %
(__class__.__name__, __name__))
except Exception as e:
logger.error("%s::%s: ERROR THREAD, error raised by the thread in queue. Error: %s" %
(__class__.__name__, __name__, e))
sync_queue.pop(0)
return
# Start sync
logger.info("%s::%s: START THREAD, thread chose to start" %
(__class__.__name__, __name__))
sync_in_progress[0] = 1
try:
# sync
logger.info("%s::%s: SYNC THREAD, thread start to sync all databases" %
(__class__.__name__, __name__))
for storage_engine_type in storage_engine_list.values():
for storage_engine in storage_engine_type:
storage_engine.sync()
except Exception as e:
logger.error("%s::%s: ERROR THREAD, error raised by the thread during sync. Error: %s" %
(__class__.__name__, __name__, e))
sync_queue.pop(0)
sync_in_progress[0] = 0
else:
# Ending normaly
logger.info("%s::%s: STOP THREAD, thread ended to sync all databases" %
(__class__.__name__, __name__))
# End sync
sync_queue.pop(0)
sync_in_progress[0] = 0
@staticmethod
def sync_f5_storage_databases():
logger.info("%s::%s: synchronize F5 databases" % (__class__.__name__, __name__))
Generic.sync_storage_databases()
# TODO change to not sync to PAN
"""
for storage_engine_type in storage_engine_list.values():
for storage_engine in storage_engine_type:
storage_engine.sync()
"""
class ApiNuagePolicyGroupTemplateCreate(Resource):
@staticmethod
def put():
args = parser_generic.parse_args()
pgt_vsd_id = args['ID']
pgt_name = args['name']
ent_vsd_id = args['sourceEnterpriseID']
dt_vsd_id = args['parentID']
# Sanity check on enterprise
if not Generic.sanity_check_enterprise(ent_vsd_id):
return "no database update needed", 200
# load database
db_pgt = nuage_db.get_policy_group_template(vsd_id=pgt_vsd_id)
if db_pgt is None:
# unknown policy group template
db_dt = nuage_db.get_domain_template(vsd_id=dt_vsd_id)
if db_dt is None:
# unknown domain template
Generic.reset_nuage_storage_database(dt_vsd_id)
return "database updated", 201
else:
# Domain in db
# new PolicyGroupTemplate
db_pgt = storage_engine_nuage.NuagePolicyGroupTemplate(vsd_id=pgt_vsd_id,
logger=logger)
db_pgt.name = pgt_name
db_dt.create_child(db_pgt)
return "nuage database updated", 201
else:
# policy group template already exist
Generic.log_nuage_storage_engine_already_synchronized(name=pgt_name, vsd_id=pgt_vsd_id)
return "database already synchronized", 200
class ApiNuagePolicyGroupTemplateUpdate(Resource):
@staticmethod
def put():
args = parser_generic.parse_args()
pgt_vsd_id = args['ID']
pgt_name = args['name']
dt_vsd_id = args['parentID']
ent_vsd_id = args['sourceEnterpriseID']
# Sanity check on enterprise
if not Generic.sanity_check_enterprise(ent_vsd_id):
return "no database update needed", 200
# load domain in database
db_pgt = nuage_db.get_policy_group_template(vsd_id=pgt_vsd_id)
if db_pgt is None:
# unknown policy group template
db_dt = nuage_db.get_domain_template(vsd_id=dt_vsd_id)
if db_dt is None:
# unknown domain template
Generic.reset_nuage_storage_database(dt_vsd_id)
return "database updated", 201
else:
# domain Template in db
logger.info("%s: Unexpected state for policy group template '%s %s', fetch domain template '%s'" %
(__class__.__name__, pgt_vsd_id, pgt_name, dt_vsd_id))
# update db from current config
db_dt.fetch()
# load policy_group from Nuage storage database
db_pgt = storage_engine_nuage.NuagePolicyGroupTemplate(vsd_id=pgt_vsd_id,
logger=logger)
if db_pgt is None:
Generic.log_object_not_found_in_nuage(pgt_name, pgt_vsd_id)
return "no database update needed", 200
else:
return "database updated", 201
else:
# check for name update
if db_pgt.name != pgt_name:
# Update Nuage storage database
logger.info("%s: update name: pg_id=%s; old_pg_name=%s; new_pg_name=%s" %
(__class__.__name__, pgt_vsd_id, db_pgt.name, pgt_name))
db_pgt.name = pgt_name
return "database updated", 201
else:
return "no database update needed", 200
class ApiNuagePolicyGroupTemplateDelete(Resource):
@staticmethod
def put():
args = parser_generic.parse_args()
pgt_vsd_id = args['ID']
pgt_name = args['name']
ent_vsd_id = args['sourceEnterpriseID']
# Sanity check on enterprise
if not Generic.sanity_check_enterprise(ent_vsd_id):
return "no database update needed", 200
# load policy group template in database
db_pgt = nuage_db.get_policy_group_template(vsd_id=pgt_vsd_id)
if db_pgt is None:
# unknown policy group template
Generic.log_nuage_storage_engine_already_synchronized(name=pgt_name, vsd_id=pgt_vsd_id)
return "database already synchronized", 201
else:
# existing policy group template
db_pgt.delete()
logger.info("%s::%s: database updated: name=%s; id=%s" %
(__class__.__name__, __name__, pgt_name, pgt_vsd_id))
return "database updated", 201
class ApiNuagePolicyGroupCreate(Resource):
@staticmethod
def put():
# get parameter in payload
args = parser_policygroup.parse_args()
name = str(args['name'])
policy_group_id = str(args['policyGroupID'])
pg_vsd_id = args['ID']
domain_vsd_id = args['parentID']
pgt_vsd_id = args['templateID']
ent_vsd_id = args['sourceEnterpriseID']
# Sanity check on enterprise
if not Generic.sanity_check_enterprise(ent_vsd_id):
return "no database update needed", 200
# load policy_group from Nuage storage database
db_pg = nuage_db.get_policy_group(vsd_id=pg_vsd_id)
if db_pg is None:
# unknown policy group
db_domain = nuage_db.get_domain(vsd_id=domain_vsd_id)
if db_domain is None:
# unknown domain
if not Generic.sanity_check_domain(domain_vsd_id):
return "no database update needed", 200
else:
Generic.reset_nuage_storage_database(domain_vsd_id)
Generic.sync_f5_storage_databases()
return "database updated", 201
else:
# create policy group and fetch
logger.info("%s::%s: create and fetch policy group: pg_id=%s; pg_name=%s; domain_id=%s" %
(__class__.__name__, __name__, policy_group_id, name, domain_vsd_id))
cur_pg = storage_engine_nuage.NuagePolicyGroup(vsd_id=pg_vsd_id,
logger=logger
)
cur_pg.name = name
db_domain.create_child(cur_pg)
# Associate policy_group_template
if pgt_vsd_id != "null":
for domain_template in nuage_db.domain_templates:
if pgt_vsd_id in domain_template.children['policy_group_template'].keys() and \
pgt_vsd_id not in cur_pg.associated_objects['policy_group_template'].keys():
# known policy_group_template
# Create a relation with policy_group_template
cur_pg.assign(domain_template.children['policy_group_template'][pgt_vsd_id])
else:
# Policy Group Template not found
# Fetch domain_template
nuage_db.fetch()
# Sync
Generic.sync_f5_storage_databases()
return "database updated", 201
else:
Generic.log_nuage_storage_engine_already_synchronized(name, pg_vsd_id)
return "database already synchronized", 200
class ApiNuagePolicyGroupUpdate(Resource):
@staticmethod
def put():
# get parameter in payload
args = parser_policygroup.parse_args()
name = str(args['name'])
vsd_id = args['ID']
domain_vsd_id = args['parentID']
ent_vsd_id = args['sourceEnterpriseID']
# Sanity check on enterprise
if not Generic.sanity_check_enterprise(ent_vsd_id):
return "no database update needed", 200
# load policy_group from Nuage storage database
pg_db = nuage_db.get_policy_group(vsd_id=vsd_id)
if pg_db is None:
# unknown pg
domain_db = nuage_db.get_domain(vsd_id=domain_vsd_id)
if domain_db is None:
# unknown domain
if not Generic.sanity_check_domain(vsd_id):
return "no database update needed", 200
else:
# fetch database
nuage_db.flush()
nuage_db.fetch()
# load policy_group from Nuage storage database
pg_db = nuage_db.get_policy_group(vsd_id=vsd_id)
if pg_db is None:
Generic.log_object_not_found_in_nuage(name, vsd_id)
return "no database update needed", 200
else:
# pg in db
# update db from current config
pg_db.fetch()
# Sync
Generic.sync_storage_databases()
return "database updated", 201
# check for name update
if pg_db.name != name:
# Update Nuage storage database
logger.info("%s: update name: pg_id=%s; old_pg_name=%s; new_pg_name=%s" %
(__class__.__name__, vsd_id, pg_db.name, name))
pg_db.name = name
Generic.sync_storage_databases()
return "database updated", 201
else:
# check for associated ip_address update
# compare ip_address list in current config and database
# load old ip_address list from database
old_ip_address_list = set(pg_db.get_ip_address_list())
# clear associated vPorts
for vport in list(pg_db.vports):
pg_db.detach(vport)
# fetch from current configuration
logger.info("%s: fetch policy group: pg_id=%s; pg_name=%s" %
(__class__.__name__, vsd_id, name))
pg_db.fetch()
# load current ip_address list from database
cur_ip_address_list = set(pg_db.get_ip_address_list())
# compare new and current ip_address list
if cur_ip_address_list == old_ip_address_list:
Generic.log_nuage_storage_engine_already_synchronized(name, vsd_id)
return "database already synchronized", 200
else:
# log new ip address
ip_address_list_to_attach = list(cur_ip_address_list - old_ip_address_list)
if len(ip_address_list_to_attach) > 0:
logger.info("%s: pg_id=%s ; pg_name=%s ; added ip_address=%s" %
(__class__.__name__, vsd_id, name, ip_address_list_to_attach))
# log deleted ip address
ip_address_list_to_detach = list(old_ip_address_list - cur_ip_address_list)
if len(ip_address_list_to_detach) > 0:
logger.info("%s: pg_id=%s ; pg_name=%s ; deleted ip_address=%s" %
(__class__.__name__, vsd_id, name, ip_address_list_to_detach))
# Sync
Generic.sync_storage_databases()
return "database updated", 201
class ApiNuagePolicyGroupUpdateDirectAttach(Resource):
@staticmethod
def put():
"""
Used for unit tests only
Same as ApiNuagePolicyGroupUpdate() but the associated vPort is already in the 'vport_vsd_id' parameter
:return:
"""
# ToDo error unknown policy group
# get parameter in payload
args = parser_policygroup_direct_attach.parse_args()
name = str(args['name'])
vsd_id = args['ID']
domain_vsd_id = args['parentID']
ent_vsd_id = args['sourceEnterpriseID']
vport_vsd_id = args['vportID']
# Sanity check on enterprise
if not Generic.sanity_check_enterprise(ent_vsd_id):
return "no database update needed", 200
# load policy_group from Nuage storage database
pg_db = nuage_db.get_policy_group(vsd_id=vsd_id)
if pg_db is None:
# unknown pg
domain_db = nuage_db.get_domain(vsd_id=domain_vsd_id)
if domain_db is None:
# unknown domain
return "error, unknown policy group and unknown domain", 404
else:
return "error, unknown policy group", 404
else:
# pg in db
if vport_vsd_id in pg_db.associated_objects['vport'].keys():
# already attached vport
pass
elif vport_vsd_id in pg_db.parent.children['vport'].keys():
# existing vport in db and attached to the domain
vport_db = pg_db.parent.children['vport'][vport_vsd_id]
# attach vPort to policy group
pg_db.assign(vport_db)
else:
# unknown vport in db
return "error, unknown vport", 404
# Sync
Generic.sync_storage_databases()
return "database updated", 201
class ApiNuagePolicyGroupDelete(Resource):
@staticmethod
def put():
# get parameter in payload
args = parser_policygroup.parse_args()
name = str(args['name'])
vsd_id = args['ID']
# load policy_group from Nuage storage database
db_pg = nuage_db.get_policy_group(vsd_id=vsd_id)
if db_pg is None:
# Database and current Nuage configuration already synchronized
Generic.log_nuage_storage_engine_already_synchronized(name='unknown', vsd_id=vsd_id)
return "database already synchronized", 200
else:
# existing policy group
# delete policy group
logger.info("%s: delete policy group: pg_id=%s; pg_name=%s" %
(__class__.__name__, vsd_id, name))
db_pg.delete()
# Sync
Generic.sync_f5_storage_databases()
return "database updated", 201
class ApiNuageVminterfaceCreate(Resource):
@staticmethod
def put():
args = parser_vminterface.parse_args()
ip_address = args['IPAddress']
vport_vsd_id = args['VPortID']
domain_vsd_id = args['domainID']
# load vport current configuration
cur_vport = nuage_db.get_vport(vsd_id=vport_vsd_id)
if cur_vport is None:
# unknown vport
db_domain = nuage_db.get_domain(vsd_id=domain_vsd_id)
if db_domain is None:
# unknown domain
if not Generic.sanity_check_domain(domain_vsd_id):
return "no database update needed", 200
else:
Generic.reset_nuage_storage_database(domain_vsd_id)
cur_vport = nuage_db.get_vport(vsd_id=vport_vsd_id)
if cur_vport is None:
Generic.log_object_not_found_in_nuage(name=ip_address, vsd_id=vport_vsd_id)
return "no database update needed", 200
else:
# vport unknown but parent domain in db
# fetch domain
db_domain.fetch()
cur_vport = nuage_db.get_vport(vsd_id=vport_vsd_id)
if cur_vport is None:
Generic.log_object_not_found_in_nuage(name=ip_address, vsd_id=vport_vsd_id)
return "no database update needed", 200
else:
Generic.sync_storage_databases()
return "database updated", 201
else:
# known vPort
# add VM interface IP
cur_vport.ip_address_list.append(ip_address)
Generic.sync_storage_databases()
return "database updated", 201
class ApiNuageVminterfaceDelete(Resource):
@staticmethod
def put():
args = parser_vminterface.parse_args()
ip_address = args['IPAddress']
vsd_id = args['VPortID']
# load vport in database
db_vport = nuage_db.get_vport(vsd_id=vsd_id)
if db_vport is None:
# unknown vport
Generic.log_nuage_storage_engine_already_synchronized(name=ip_address, vsd_id=vsd_id)
return "database already synchronized", 201
else:
# existing vport
db_vport.fetch()
Generic.sync_f5_storage_databases()
return "database updated", 201
class ApiNuageVportCreate(Resource):
@staticmethod
def put():
args = parser_vport.parse_args()
vsd_id = args['ID']
domain_vsd_id = args['domainID']
name = args['name']
vport_type = args['type']
# load vport current configuration
cur_vport = nuage_db.get_vport(vsd_id=vsd_id)
if cur_vport is None:
# unknown vport
db_domain = nuage_db.get_domain(vsd_id=domain_vsd_id)
if db_domain is None:
# unknown domain
if not Generic.sanity_check_domain(domain_vsd_id):
return "no database update needed", 200
else:
Generic.reset_nuage_storage_database(domain_vsd_id)
cur_vport = nuage_db.get_vport(vsd_id=vsd_id)
if cur_vport is None:
Generic.log_object_not_found_in_nuage(name=name, vsd_id=vsd_id)
return "no database update needed", 200
else:
# vport unknown but parent domain in db
# attach to domain
# new PolicyGroupTemplate
db_vport = storage_engine_nuage.NuageVPort(vsd_id=vsd_id,
vport_type=vport_type,
logger=logger)
db_domain.create_child(db_vport)
return "no database update needed", 200
else:
# known vPort
return "error, object already exists", 404
class ApiNuageVportDelete(Resource):
@staticmethod
def put():
args = parser_vport.parse_args()
vsd_id = args['ID']
name = args['name']
# load vport in database
db_vport = nuage_db.get_vport(vsd_id=vsd_id)
if db_vport is None:
# unknown vport
Generic.log_nuage_storage_engine_already_synchronized(name=name, vsd_id=vsd_id)
return "database already synchronized", 201
else:
# existing vport
db_vport.delete()
return "no database update needed", 200
class ApiNuageDomainTemplateCreate(Resource):
@staticmethod
def put():
args = parser_generic.parse_args()
dt_vsd_id = args['ID']
dt_name = args['name']
ent_vsd_id = args['sourceEnterpriseID']
# Sanity check on enterprise
if not Generic.sanity_check_enterprise(ent_vsd_id):
return "no database update needed", 200
# load Domain current configuration
db_dt = nuage_db.get_domain_template(vsd_id=dt_vsd_id)
if db_dt is None:
# new Domain
db_dt = storage_engine_nuage.NuageDomainTemplate(vsd_id=dt_vsd_id,
domain_type='domaintemplate',
logger=logger)
db_dt.name = dt_name
nuage_db.create_child(db_dt)
return "nuage database updated", 201
else:
# Domain already exist
Generic.log_nuage_storage_engine_already_synchronized(name=dt_name, vsd_id=dt_vsd_id)
return "Domain_template already exist in database", 200
class ApiNuageDomainTemplateUpdate(Resource):
@staticmethod
def put():
args = parser_generic.parse_args()
dt_vsd_id = args['ID']
dt_name = args['name']
ent_vsd_id = args['sourceEnterpriseID']
# Sanity check on enterprise
if not Generic.sanity_check_enterprise(ent_vsd_id):
return "no database update needed", 200
# load domain in database
db_domain = nuage_db.get_domain_template(vsd_id=dt_vsd_id)
if db_domain is None:
# unknown domain
Generic.reset_nuage_storage_database(dt_vsd_id)
# Update storage db
Generic.sync_storage_databases()
return "database updated", 201
else:
# existing domain
db_domain.fetch()
logger.info("%s: database updated: name=%s; id=%s" %
(__class__.__name__, dt_name, dt_vsd_id))
return "database updated", 201
class ApiNuageDomainTemplateDelete(Resource):
@staticmethod
def put():
args = parser_generic.parse_args()
dt_vsd_id = args['ID']
dt_name = args['name']
ent_vsd_id = args['sourceEnterpriseID']
# Sanity check on enterprise
if not Generic.sanity_check_enterprise(ent_vsd_id):
return "no database update needed", 200
# load domain in database
db_domain = nuage_db.get_domain_template(vsd_id=dt_vsd_id)
if db_domain is None:
# unknown domain
Generic.log_nuage_storage_engine_already_synchronized(name=dt_name, vsd_id=dt_vsd_id)
return "database already synchronized", 201
else:
# existing domain
db_domain.delete()
logger.info("%s::%s: database updated: name=%s; id=%s" %
(__class__.__name__, __name__, dt_name, dt_vsd_id))
return "database updated", 201
class ApiNuageDomainCreate(Resource):
@staticmethod
def put():
args = parser_domain.parse_args()
domain_vsd_id = args['ID']
domain_name = args['name']
domain_template_vsd_id = args['templateID']
ent_vsd_id = args['sourceEnterpriseID']
# Sanity check on enterprise
if not Generic.sanity_check_enterprise(ent_vsd_id):
return "no database update needed", 200
# Domain belong to db's enterprise
# load Domain current configuration
cur_domain = nuage_db.get_domain(vsd_id=domain_vsd_id)
if cur_domain is None:
# new Domain
logger.info("%s::%s: create new domain: "
"id=%s; name=%s; enterprise_id=%s" %
(__class__.__name__, __name__, domain_vsd_id, domain_name, ent_vsd_id))
db_domain = storage_engine_nuage.NuageDomain(vsd_id=domain_vsd_id,
domain_type='domain',
logger=logger
)
db_domain.name = domain_name
nuage_db.create_child(db_domain)
# Assign domain template
if nuage_db.children['domain_template'][domain_template_vsd_id] is not None:
db_domain.assign(nuage_db.children['domain_template'][domain_template_vsd_id])
else:
db_domain.fetch()
return "no database update needed", 200
else:
# Domain already exist
Generic.log_nuage_storage_engine_already_synchronized(name=domain_name, vsd_id=domain_vsd_id)
return "database already synchronized", 200
class ApiNuageDomainUpdate(Resource):
@staticmethod
def put():
args = parser_generic.parse_args()
domain_vsd_id = args['ID']
ent_vsd_id = args['sourceEnterpriseID']
# Sanity check on enterprise
if not Generic.sanity_check_enterprise(ent_vsd_id):
return "no database update needed", 200
# load domain in database
db_domain = nuage_db.get_domain(vsd_id=domain_vsd_id)
if db_domain is None:
# unknown domain
if not Generic.sanity_check_domain(domain_vsd_id):
return "no database update needed", 200
else:
# fetch database
Generic.reset_nuage_storage_database(domain_vsd_id)
Generic.sync_storage_databases()
return "database updated", 201
else:
# existing domain
db_domain.fetch()
# Sync
Generic.sync_storage_databases()
return "database updated", 201
class ApiNuageDomainDelete(Resource):
@staticmethod
def put():
args = parser_generic.parse_args()
domain_vsd_id = args['ID']
domain_name = args['name']
ent_vsd_id = args['sourceEnterpriseID']
# Sanity check on enterprise
if not Generic.sanity_check_enterprise(ent_vsd_id):
return "no database update needed", 200
# load domain in database
db_domain = nuage_db.get_domain(vsd_id=domain_vsd_id)
if db_domain is None:
# unknown domain
Generic.log_nuage_storage_engine_already_synchronized(domain_name, domain_vsd_id)
return "database already synchronized", 201
else:
# domain in db
db_domain.delete()
Generic.sync_storage_databases()
return "database updated", 201
class ApiNuageDbDump(Resource):
@staticmethod
def get():
# set output format
api.representations.update({'application/json': output_json_response_format})
return nuage_db.dump_json_format(), 200
class ApiNuageDbFetch(Resource):
@staticmethod
def get():
# set output format
api.representations.update({'application/json': output_json_response_format})
nuage_db.fetch()
return "OK", 200
class ApiNuageDbFlush(Resource):
@staticmethod
def get():
# set output format
api.representations.update({'application/json': output_json_response_format})
nuage_db.flush()
return "OK", 200
class ApiNuageDbDomainDump(Resource):
@staticmethod
def get(domain_name):
# set output format
api.representations.update({'application/json': output_json_response_format})
domain = nuage_db.get_domain(domain_name)
if domain is None:
return "unknown domain", 200
else:
return domain.dump_json_format(), 200
class ApiNuageDbDomainGet(Resource):
@staticmethod
def get(domain_name):
# set output format
api.representations.update({'application/json': output_json_response_format})
domain = nuage_db.get_domain(domain_name)
if domain is None:
return "unknown domain", 200
else:
return domain.get_json_format(), 200
class ApiNuageDbDomainFetch(Resource):
@staticmethod
def get(domain_name):
domain = nuage_db.get_domain(domain_name)
if domain is None:
return "unknown domain", 200
else:
domain.fetch()
return "OK", 200
class ApiNuageDbPolicyGroupDump(Resource):
@staticmethod
def get(domain_name, policy_group_name):
# set output format
api.representations.update({'application/json': output_json_response_format})
policy_group = nuage_db.get_policy_group(domain_name, policy_group_name)
if policy_group is None:
return "unknown policy_group", 200
else:
return policy_group.dump_json_format(), 200
class ApiNuageDbPolicyGroupGet(Resource):
@staticmethod
def get(domain_name, policy_group_name):
# set output format
api.representations.update({'application/json': output_json_response_format})
policy_group = nuage_db.get_policy_group(domain_name, policy_group_name)
if policy_group is None:
return "unknown policy_group", 200
else:
return policy_group.get_json_format(), 200
class ApiNuageDbPolicyGroupFetch(Resource):
@staticmethod
def get(domain_name, policy_group_name):
policy_group = nuage_db.get_policy_group(domain_name, policy_group_name)
if policy_group is None:
return "unknown policy_group", 200
else:
policy_group.fetch()
return "OK", 200
class ApiNuageDbPolicyGroupTemplateIpAddresses(Resource):
@staticmethod
def get(domain_template_name, policy_group_template_name):
policy_group_template = nuage_db.get_policy_group_template_ip_address_list(domain_template_name,
policy_group_template_name)
if policy_group_template is None:
return "unknown policy_group", 200
else:
return policy_group_template.get_ip_address_list(), 200
class ApiNuageDbIpPolicyGroupMappings(Resource):
@staticmethod
def get():
return nuage_db.get_ip_policy_group_mapping(), 200
class ApiNuageDbIpPolicyGroupMapping(Resource):
@staticmethod
def get(ip_address):
return nuage_db.get_ip_policy_group_mapping(ip_address_filter=ip_address), 200
class ApiPanDbSync(Resource):
@staticmethod
def get():
# set output format
api.representations.update({'application/json': output_json_response_format})
pan_db.sync()
return "OK", 200
class ApiPanDbDump(Resource):
@staticmethod
def get():
# set output format
api.representations.update({'application/json': output_json_response_format})
return pan_db.dump_json_format(), 200
class ApiPanDbFetch(Resource):
@staticmethod
def get():
pan_db.fetch()
return "OK", 200
class ApiPanDbDeviceDump(Resource):
@staticmethod
def get(host):
# set output format
api.representations.update({'application/json': output_json_response_format})
device = pan_db.get_host(host)
if device is None:
return "unknown host", 200
else:
return device.dump_json_format(), 200
class ApiPanDbDeviceGet(Resource):
@staticmethod
def get(host):
# set output format
api.representations.update({'application/json': output_json_response_format})
device = pan_db.get_host(host)
if device is None:
return "unknown host", 200
else:
return device.get_json_format(), 200
class ApiPanDbDeviceFetch(Resource):
@staticmethod
def get(host):
device = pan_db.get_host(host)
if device is None:
return "unknown host", 200
else:
device.fetch()
return "OK", 200
class ApiPanDbVSysDump(Resource):
@staticmethod
def get(host, vsys_id):
# set output format
api.representations.update({'application/json': output_json_response_format})
vsys = pan_db.get_vsys(host, vsys_id)
if vsys is None:
return "unknown vsys", 200
else:
return vsys.dump_json_format(), 200
class ApiPanDbVSysGet(Resource):
@staticmethod
def get(host, vsys_id):
# set output format
api.representations.update({'application/json': output_json_response_format})
vsys = pan_db.get_vsys(host, vsys_id)
if vsys is None:
return "unknown vsys", 200
else:
return vsys.get_json_format(), 200
class ApiPanDbVSysFetch(Resource):
@staticmethod
def get(host, vsys_id):
vsys = pan_db.get_vsys(host, vsys_id)
if vsys is None:
return "unknown host or vsys", 200
else:
vsys.fetch()
return "OK", 200
class ApiPanFeed(Resource):
@staticmethod
def get(feed_list_name):
# External Dynamic List in format :
# <enterprise_name>--<domain_tpl_name>--<pg_tpl_name>
# set output format
api.representations.update({'application/json': output_txt_response_format})
# get domain tpl and pg tpl name
enterprise_name, domain_tpl_name, pg_tpl_name = feed_list_name.split("--")
pgt_db = nuage_db.get_policy_group_template(domain_template_name=domain_tpl_name,
policy_group_template_name=pg_tpl_name)
if pgt_db is None:
abort(404, message="policy group template name {} doesn't exist".format(domain_tpl_name, pg_tpl_name))
else:
# get feed list in the storage database format
return storage_engine_pan.StorageEnginePan.get_feedlist_format(pgt_db.get_ip_address_list()), 200
class ApiF5DbSync(Resource):
@staticmethod
def get():
# set output format
api.representations.update({'application/json': output_json_response_format})
f5_db.sync()
return "OK", 200
class ApiF5DbDump(Resource):
@staticmethod
def get():
# set output format
api.representations.update({'application/json': output_json_response_format})
return f5_db.dump_json_format(), 200
class ApiF5DbFetch(Resource):
@staticmethod
def get():
# set output format
api.representations.update({'application/json': output_json_response_format})
f5_db.fetch()
return "OK", 200
class ApiF5DbDeviceDump(Resource):
@staticmethod
def get(host):
# set output format
api.representations.update({'application/json': output_json_response_format})
device = f5_db.get_host(host)
if device is None:
return "unknown host", 200
else:
return device.dump_json_format(), 200
class ApiF5DbDeviceGet(Resource):
@staticmethod
def get(host):
# set output format
api.representations.update({'application/json': output_json_response_format})
device = f5_db.get_host(host)
if device is None:
return "unknown host", 200
else:
return device.get_json_format(), 200
class ApiF5DbDeviceFetch(Resource):
@staticmethod
def get(host):
device = f5_db.get_host(host)
if device is None:
return "unknown host", 200
else:
device.fetch()
return "OK", 200
class ApiF5DbPartitionDump(Resource):
@staticmethod
def get(host, partition_name):
# set output format
api.representations.update({'application/json': output_json_response_format})
partition = f5_db.get_partition(host, partition_name)
if partition is None:
return "unknown partition", 200
else:
return partition.dump_json_format(), 200
class ApiF5DbPartitionGet(Resource):
@staticmethod
def get(host, partition_name):
# set output format
api.representations.update({'application/json': output_json_response_format})
partition = pan_db.get_partition(host, partition_name)
if partition_name is None:
return "unknown partition", 200
else:
return partition.get_json_format(), 200
class ApiF5DbPartitionFetch(Resource):
@staticmethod
def get(host, partition_name):
partition = f5_db.get_partition(host, partition_name)
if partition is None:
return "unknown host or partition", 200
else:
partition.fetch()
return "OK", 200
class ApiF5Feed(Resource):
@staticmethod
def get(feed_list_name):
# External Dynamic List in format :
# <enterprise_name>--<domain_tpl_name>--<pg_tpl_name>
# set output format
api.representations.update({'application/json': output_txt_response_format})
# extract domain tpl and pg tpl name
enterprise_name, domain_tpl_name, pg_tpl_name = feed_list_name.split("--")
# load objects from db
pgt_db = nuage_db.get_policy_group_template(domain_template_name=domain_tpl_name,
policy_group_template_name=pg_tpl_name)
if pgt_db is None:
abort(404, message="policy group template name {} doesn't exist".format(domain_tpl_name, pg_tpl_name))
else:
# get feed list in the storage database format
return storage_engine_f5.StorageEngineF5.get_feedlist_format(pgt_db.get_ip_address_list()), 200
class ApiPanFeedSocSimulation(Resource):
@staticmethod
def get(feed_list_name):
# set output format
api.representations.update({'application/json': output_txt_response_format})
# set simaulated feed list
soc_feed_list = []
soc_feed_list.append("1.1.1.1")
soc_feed_list.append("2.2.2.2")
soc_feed_list.append("3.3.3.3")
return storage_engine_pan.StorageEnginePan.get_feedlist_format(soc_feed_list), 200
class ApiF5FeedSocSimulation(Resource):
@staticmethod
def get(feed_list_name):
# set output format
api.representations.update({'application/json': output_txt_response_format})
# set simaulated feed list
soc_feed_list = []
soc_feed_list.append("1.1.1.1")
soc_feed_list.append("2.2.2.2")
soc_feed_list.append("3.3.3.3")
return storage_engine_f5.StorageEngineF5.get_feedlist_format(soc_feed_list), 200
# -------------- API --------------
# listener
state_engine_listener = Flask(__name__)
api = Api(state_engine_listener)
# resource routing
api.add_resource(ApiHealthcheck, '/healthcheck')
api.add_resource(ApiConfig, '/config')
# Nuage storage engine
api.add_resource(ApiNuagePolicyGroupTemplateCreate, '/sensor/nuage/policygrouptemplate/CREATE')
api.add_resource(ApiNuagePolicyGroupTemplateUpdate, '/sensor/nuage/policygrouptemplate/UPDATE')
api.add_resource(ApiNuagePolicyGroupTemplateDelete, '/sensor/nuage/policygrouptemplate/DELETE')
api.add_resource(ApiNuagePolicyGroupCreate, '/sensor/nuage/policygroup/CREATE')
api.add_resource(ApiNuagePolicyGroupUpdate, '/sensor/nuage/policygroup/UPDATE')
api.add_resource(ApiNuagePolicyGroupUpdateDirectAttach, '/sensor/nuage/policygroup/UPDATE/direct_attach')
api.add_resource(ApiNuagePolicyGroupDelete, '/sensor/nuage/policygroup/DELETE')
api.add_resource(ApiNuageVminterfaceCreate, '/sensor/nuage/vminterface/CREATE')
api.add_resource(ApiNuageVminterfaceDelete, '/sensor/nuage/vminterface/DELETE')
api.add_resource(ApiNuageVportCreate, '/sensor/nuage/vport/CREATE')
api.add_resource(ApiNuageVportDelete, '/sensor/nuage/vport/DELETE')
api.add_resource(ApiNuageDomainTemplateCreate, '/sensor/nuage/domaintemplate/CREATE')
api.add_resource(ApiNuageDomainTemplateUpdate, '/sensor/nuage/domaintemplate/UPDATE')
api.add_resource(ApiNuageDomainTemplateDelete, '/sensor/nuage/domaintemplate/DELETE')
api.add_resource(ApiNuageDomainCreate, '/sensor/nuage/domain/CREATE')
api.add_resource(ApiNuageDomainUpdate, '/sensor/nuage/domain/UPDATE')
api.add_resource(ApiNuageDomainDelete, '/sensor/nuage/domain/DELETE')
api.add_resource(ApiNuageDbDump, '/database/nuage/dump')
api.add_resource(ApiNuageDbFetch, '/database/nuage/fetch')
api.add_resource(ApiNuageDbFlush, '/database/nuage/flush')
api.add_resource(ApiNuageDbDomainDump, '/database/nuage/domain/<domain_name>/dump')
api.add_resource(ApiNuageDbDomainGet, '/database/nuage/domain/<domain_name>/get')
api.add_resource(ApiNuageDbDomainFetch, '/database/nuage/domain/<domain_name>/fetch')
api.add_resource(ApiNuageDbPolicyGroupDump, '/database/nuage/domain/<domain_name>'
'/pg/<policy_group_name>/dump')
api.add_resource(ApiNuageDbPolicyGroupGet, '/database/nuage/domain/<domain_name>'
'/pg/<policy_group_name>/get')
api.add_resource(ApiNuageDbPolicyGroupFetch, '/database/nuage/domain/<domain_name>'
'/pg/<policy_group_name>/fetch')
api.add_resource(ApiNuageDbPolicyGroupTemplateIpAddresses, '/database/nuage/domain_tpl/<domain_template_name>'
'/pg_tpl/<policy_group_template_name>')
api.add_resource(ApiNuageDbIpPolicyGroupMappings, '/database/nuage/ip_pg_mapping/all')
api.add_resource(ApiNuageDbIpPolicyGroupMapping, '/database/nuage/ip_pg_mapping/<ip_address>')
# PAN storage engine
api.add_resource(ApiPanDbSync, '/database/pan/sync')
api.add_resource(ApiPanDbDump, '/database/pan/dump')
api.add_resource(ApiPanDbFetch, '/database/pan/fetch')
api.add_resource(ApiPanDbDeviceDump, '/database/pan/device/<host>/dump')
api.add_resource(ApiPanDbDeviceGet, '/database/pan/device/<host>/get')
api.add_resource(ApiPanDbDeviceFetch, '/database/pan/device/<host>/fetch')
api.add_resource(ApiPanDbVSysDump, '/database/pan/device/<host>/vsys/<vsys_id>/dump')
api.add_resource(ApiPanDbVSysGet, '/database/pan/device/<host>/vsys/<vsys_id>/get')
api.add_resource(ApiPanDbVSysFetch, '/database/pan/device/<host>/vsys/<vsys_id>/fetch')
api.add_resource(ApiPanFeed, '/database/pan/edl/<feed_list_name>')
api.add_resource(ApiPanFeedSocSimulation, '/database/pan/soc_feed/<feed_list_name>')
# F5 storage engine
api.add_resource(ApiF5DbSync, '/database/f5/sync')
api.add_resource(ApiF5DbDump, '/database/f5/dump')
api.add_resource(ApiF5DbFetch, '/database/f5/fetch')
api.add_resource(ApiF5DbDeviceDump, '/database/f5/device/<host>/dump')
api.add_resource(ApiF5DbDeviceGet, '/database/f5/device/<host>/get')
api.add_resource(ApiF5DbDeviceFetch, '/database/f5/device/<host>/fetch')
api.add_resource(ApiF5DbPartitionDump, '/database/f5/device/<host>/partition/<partition_name>/dump')
api.add_resource(ApiF5DbPartitionGet, '/database/f5/device/<host>/partition/<partition_name>/get')
api.add_resource(ApiF5DbPartitionFetch, '/database/f5/device/<host>/partition/<partition_name>/fetch')
api.add_resource(ApiF5Feed, '/database/f5/edl/<feed_list_name>')
api.add_resource(ApiF5FeedSocSimulation, '/database/f5/soc_feed/<feed_list_name>')
# parser_policygroup
parser_policygroup = reqparse.RequestParser()
parser_policygroup.add_argument('ID')
parser_policygroup.add_argument('name')
parser_policygroup.add_argument('sourceEnterpriseID')
parser_policygroup.add_argument('parentType')
parser_policygroup.add_argument('parentID')
parser_policygroup.add_argument('policyGroupID')
parser_policygroup.add_argument('templateID')# parser_policygroup
parser_policygroup_direct_attach = reqparse.RequestParser()
parser_policygroup_direct_attach.add_argument('ID')
parser_policygroup_direct_attach.add_argument('name')
parser_policygroup_direct_attach.add_argument('sourceEnterpriseID')
parser_policygroup_direct_attach.add_argument('parentType')
parser_policygroup_direct_attach.add_argument('parentID')
parser_policygroup_direct_attach.add_argument('vportID')
# parser_vminterface
parser_vminterface = reqparse.RequestParser()
parser_vminterface.add_argument('IPAddress')
parser_vminterface.add_argument('VPortID')
parser_vminterface.add_argument('domainID')
# parser_generic / domain_template
parser_generic = reqparse.RequestParser()
parser_generic.add_argument('ID')
parser_generic.add_argument('name')
parser_generic.add_argument('parentID')
parser_generic.add_argument('sourceEnterpriseID')
# parser_domain
parser_domain = reqparse.RequestParser()
parser_domain.add_argument('ID')
parser_domain.add_argument('name')
parser_domain.add_argument('parentID')
parser_domain.add_argument('templateID')
parser_domain.add_argument('sourceEnterpriseID')
# parser_vport
parser_vport = reqparse.RequestParser()
parser_vport.add_argument('ID')
parser_vport.add_argument('name')
parser_vport.add_argument('domainID')
parser_vport.add_argument('type')
# Start program
if __name__ == "__main__":
main() | 0.515132 | 0.050424 |
import argparse
from firebase import firebase
import hnapi
"""
searches the hackernews firebase API for 'whoishiring' submissions that match the given date.
if no date is supplied, it will output the comments from 'whoishiring's latest submission
"""
api = hnapi.HNAPI()
parser = argparse.ArgumentParser(description='Hacker News Job Search')
parser.add_argument('--date', dest='date', type=str, help='month year string, e.g. \'January 2015\'', required=True)
parser.add_argument('--hiring', action='store_true', dest='hiring', help='only search the \'who is hiring\' threads')
parser.add_argument('--hired', action='store_true', dest='hired', help='only search the \'who is looking to be hired\' threads')
parser.add_argument('--freelance', action='store_true', dest='freelance', help='only search the \'freelancer\' threads')
parser.add_argument('--all', action='store_true', dest='all', help='searches all submissions from \'whoishiring\'')
args = parser.parse_args()
def search_list(stories, date):
for story in stories:
if date in story.title:
print 'found a match {0}, for title {1}'.format(date, story.title)
return story
submitted_list = []
hiring_list = []
hired_list = []
freelance_list = []
search_story = None
stories = api.get_user('whoishiring').submitted
for story_id in stories:
story = api.get_item(str(story_id))
if hasattr(story, 'type') and story.type == 'story':
submitted_list.append(story)
if hasattr(story, 'title') and 'who is hiring' in story.title.lower():
hiring_list.append(story)
if args.all:
search_story = search_list(submitted_list, args.date)
yclist = get_yc_jobs()
elif args.hiring:
search_story = search_list(hiring_list, args.date)
elif args.hired:
search_story = search_list(hired_list, args.date)
elif args.freelance:
search_story = search_list(freelance_list, args.date)
else:
search_story = search_list(submitted_list, args.date)
if not search_story:
print "Couldn't find an item in 'whoishiring's submissions matching '{0}'".format(args.date)
quit(1)
comments = api.get_item(str(search_story.id))
print "showing comments in submission {0}".format(search_story.title)
utf_comment = u''
for comment_int in comments.kids:
try:
comment_item = api.get_item(str(comment_int))
except Exception as e:
print e
print "Got an error trying to get comment id {0}".format(comment_int)
continue
if hasattr(comment_item, 'text') and comment_item.text:
utf_comment = comment_item.text
print utf_comment.encode("utf-8") #this is to make sure we're still outputting utf-8, even if stdout is being piped | hn_job_search.py |
import argparse
from firebase import firebase
import hnapi
"""
searches the hackernews firebase API for 'whoishiring' submissions that match the given date.
if no date is supplied, it will output the comments from 'whoishiring's latest submission
"""
api = hnapi.HNAPI()
parser = argparse.ArgumentParser(description='Hacker News Job Search')
parser.add_argument('--date', dest='date', type=str, help='month year string, e.g. \'January 2015\'', required=True)
parser.add_argument('--hiring', action='store_true', dest='hiring', help='only search the \'who is hiring\' threads')
parser.add_argument('--hired', action='store_true', dest='hired', help='only search the \'who is looking to be hired\' threads')
parser.add_argument('--freelance', action='store_true', dest='freelance', help='only search the \'freelancer\' threads')
parser.add_argument('--all', action='store_true', dest='all', help='searches all submissions from \'whoishiring\'')
args = parser.parse_args()
def search_list(stories, date):
for story in stories:
if date in story.title:
print 'found a match {0}, for title {1}'.format(date, story.title)
return story
submitted_list = []
hiring_list = []
hired_list = []
freelance_list = []
search_story = None
stories = api.get_user('whoishiring').submitted
for story_id in stories:
story = api.get_item(str(story_id))
if hasattr(story, 'type') and story.type == 'story':
submitted_list.append(story)
if hasattr(story, 'title') and 'who is hiring' in story.title.lower():
hiring_list.append(story)
if args.all:
search_story = search_list(submitted_list, args.date)
yclist = get_yc_jobs()
elif args.hiring:
search_story = search_list(hiring_list, args.date)
elif args.hired:
search_story = search_list(hired_list, args.date)
elif args.freelance:
search_story = search_list(freelance_list, args.date)
else:
search_story = search_list(submitted_list, args.date)
if not search_story:
print "Couldn't find an item in 'whoishiring's submissions matching '{0}'".format(args.date)
quit(1)
comments = api.get_item(str(search_story.id))
print "showing comments in submission {0}".format(search_story.title)
utf_comment = u''
for comment_int in comments.kids:
try:
comment_item = api.get_item(str(comment_int))
except Exception as e:
print e
print "Got an error trying to get comment id {0}".format(comment_int)
continue
if hasattr(comment_item, 'text') and comment_item.text:
utf_comment = comment_item.text
print utf_comment.encode("utf-8") #this is to make sure we're still outputting utf-8, even if stdout is being piped | 0.293202 | 0.08207 |
from tests.helm_template_generator import render_chart
import pytest
from . import supported_k8s_versions
@pytest.mark.parametrize(
"kube_version",
supported_k8s_versions,
)
class TestPrometheusNodeExporterDaemonset:
def test_prometheus_node_exporter_daemonset_default_resources(self, kube_version):
docs = render_chart(
kube_version=kube_version,
values={},
show_only=["charts/prometheus-node-exporter/templates/daemonset.yaml"],
)
assert len(docs) == 1
doc = docs[0]
assert doc["kind"] == "DaemonSet"
assert doc["metadata"]["name"] == "RELEASE-NAME-prometheus-node-exporter"
c_by_name = {
c["name"]: c for c in doc["spec"]["template"]["spec"]["containers"]
}
assert c_by_name["node-exporter"]
assert c_by_name["node-exporter"]["resources"] == {
"limits": {"cpu": "100m", "memory": "128Mi"},
"requests": {"cpu": "10m", "memory": "128Mi"},
}
def test_prometheus_node_exporter_daemonset_custom_resources(self, kube_version):
docs = render_chart(
kube_version=kube_version,
values={
"prometheus-node-exporter": {
"resources": {
"limits": {"cpu": "777m", "memory": "999Mi"},
"requests": {"cpu": "666m", "memory": "888Mi"},
}
},
},
show_only=["charts/prometheus-node-exporter/templates/daemonset.yaml"],
)
assert len(docs) == 1
doc = docs[0]
assert doc["kind"] == "DaemonSet"
assert doc["metadata"]["name"] == "RELEASE-NAME-prometheus-node-exporter"
c_by_name = {
c["name"]: c for c in doc["spec"]["template"]["spec"]["containers"]
}
assert c_by_name["node-exporter"]
assert c_by_name["node-exporter"]["resources"] == {
"limits": {"cpu": "777m", "memory": "999Mi"},
"requests": {"cpu": "666m", "memory": "888Mi"},
} | tests/test_prometheus_node_exporter.py | from tests.helm_template_generator import render_chart
import pytest
from . import supported_k8s_versions
@pytest.mark.parametrize(
"kube_version",
supported_k8s_versions,
)
class TestPrometheusNodeExporterDaemonset:
def test_prometheus_node_exporter_daemonset_default_resources(self, kube_version):
docs = render_chart(
kube_version=kube_version,
values={},
show_only=["charts/prometheus-node-exporter/templates/daemonset.yaml"],
)
assert len(docs) == 1
doc = docs[0]
assert doc["kind"] == "DaemonSet"
assert doc["metadata"]["name"] == "RELEASE-NAME-prometheus-node-exporter"
c_by_name = {
c["name"]: c for c in doc["spec"]["template"]["spec"]["containers"]
}
assert c_by_name["node-exporter"]
assert c_by_name["node-exporter"]["resources"] == {
"limits": {"cpu": "100m", "memory": "128Mi"},
"requests": {"cpu": "10m", "memory": "128Mi"},
}
def test_prometheus_node_exporter_daemonset_custom_resources(self, kube_version):
docs = render_chart(
kube_version=kube_version,
values={
"prometheus-node-exporter": {
"resources": {
"limits": {"cpu": "777m", "memory": "999Mi"},
"requests": {"cpu": "666m", "memory": "888Mi"},
}
},
},
show_only=["charts/prometheus-node-exporter/templates/daemonset.yaml"],
)
assert len(docs) == 1
doc = docs[0]
assert doc["kind"] == "DaemonSet"
assert doc["metadata"]["name"] == "RELEASE-NAME-prometheus-node-exporter"
c_by_name = {
c["name"]: c for c in doc["spec"]["template"]["spec"]["containers"]
}
assert c_by_name["node-exporter"]
assert c_by_name["node-exporter"]["resources"] == {
"limits": {"cpu": "777m", "memory": "999Mi"},
"requests": {"cpu": "666m", "memory": "888Mi"},
} | 0.721056 | 0.391406 |
import argparse
from utils.CurlReq import request
from utils.Utils import Utils
from amazon_parser import app_info_parser
from amazon_parser import app_review_parser
from crawler.app_info_crawler import crawler as info_crawler
from crawler.app_review_crawler import crawler as review_crawler
class Worker:
def __init__(self):
"""
Class Constructor
"""
self.asin = None
self.args = None
self.optional_info_args = None
self.crawl_app_info = True
self.crawl_app_reviews = False
def get_arguments_parser(self):
"""
Creates parsing object using argparse module
"""
parser = argparse.ArgumentParser(description='Scraper / Worker layer \
of the Amazon Appstore crawler')
parser.add_argument('asin',
type=str,
help='ASIN of the app')
parser.add_argument('--title',
action='store_true',
help='Get title of the app')
parser.add_argument('--developer',
action='store_true',
help='Get developer of the app')
parser.add_argument('--developer-url',
action='store_true',
help='Get developer URL of the app')
parser.add_argument('--developer-info',
action='store_true',
help='Get developer info. of the app')
parser.add_argument('--content-rating',
action='store_true',
help='Get content rating of the app')
parser.add_argument('--price',
action='store_true',
help='Get price of the app')
parser.add_argument('--iap',
action='store_true',
help='In App Purchase flag of the app')
parser.add_argument('--release-date',
action='store_true',
help='Release date of the app')
parser.add_argument('--overall-rank',
action='store_true',
help='Overall rank of the app')
parser.add_argument('--version',
action='store_true',
help='Get current version of the app')
parser.add_argument('--size',
action='store_true',
help='Get size of the app')
parser.add_argument('--min-os-version',
action='store_true',
help='Get minimum supported os version of the app')
parser.add_argument('--total-reviews',
action='store_true',
help='Get total reviews of the app')
parser.add_argument('--avg-star-rating',
action='store_true',
help='Get average star rating of the app')
parser.add_argument('--star-rating-hist',
action='store_true',
help='Get star rating histogram of the app')
parser.add_argument('--category-rank',
action='store_true',
help='Get categorical rank of the app')
parser.add_argument('--categories',
action='store_true',
help='Get all categories of the app')
parser.add_argument('--icon-url',
action='store_true',
help='Get icon url of the app')
parser.add_argument('--permissions',
action='store_true',
help='Get all permissions of the app')
parser.add_argument('--description',
action='store_true',
help='Get description of the app')
parser.add_argument('--similar-apps',
action='store_true',
help='Get similar apps of the app')
parser.add_argument('--reviews',
action='store_true',
help='Get all reviews of the app')
parser.add_argument('--app-info',
action='store_true',
help='Get all info of the app')
parser.add_argument('--use-proxy',
action='store_true',
help='Use proxy')
return parser
def scrape(self):
info_cr = info_crawler(self.asin, self.args.use_proxy)
review_cr = review_crawler(self.asin, self.args.use_proxy)
app_info = info_cr.crawl_info_page()
if len(self.optional_info_args.keys()) == 0 and self.crawl_app_info:
for item in app_info:
print "{0} : {1}\n".format(item, app_info[item])
else:
for item in self.optional_info_args:
print "{0} : {1}\n".format(item, app_info[item])
if self.crawl_app_reviews == True:
reviews = review_cr.crawl_reviews()
for review in reviews:
print review
def start_worker(self):
args_parser = self.get_arguments_parser()
args = args_parser.parse_args()
self.asin = args.asin
self.args = args
optional_args = dict(filter(lambda x : x[1] == True, vars(args).items()))
if 'use_proxy' in optional_args.keys():
del optional_args['use_proxy']
if 'reviews' in optional_args.keys():
del optional_args['reviews']
self.crawl_app_reviews = True
self.crawl_app_info = False
self.optional_info_args = optional_args
self.scrape()
if __name__ == "__main__":
worker = Worker()
worker.start_worker() | Worker.py |
import argparse
from utils.CurlReq import request
from utils.Utils import Utils
from amazon_parser import app_info_parser
from amazon_parser import app_review_parser
from crawler.app_info_crawler import crawler as info_crawler
from crawler.app_review_crawler import crawler as review_crawler
class Worker:
def __init__(self):
"""
Class Constructor
"""
self.asin = None
self.args = None
self.optional_info_args = None
self.crawl_app_info = True
self.crawl_app_reviews = False
def get_arguments_parser(self):
"""
Creates parsing object using argparse module
"""
parser = argparse.ArgumentParser(description='Scraper / Worker layer \
of the Amazon Appstore crawler')
parser.add_argument('asin',
type=str,
help='ASIN of the app')
parser.add_argument('--title',
action='store_true',
help='Get title of the app')
parser.add_argument('--developer',
action='store_true',
help='Get developer of the app')
parser.add_argument('--developer-url',
action='store_true',
help='Get developer URL of the app')
parser.add_argument('--developer-info',
action='store_true',
help='Get developer info. of the app')
parser.add_argument('--content-rating',
action='store_true',
help='Get content rating of the app')
parser.add_argument('--price',
action='store_true',
help='Get price of the app')
parser.add_argument('--iap',
action='store_true',
help='In App Purchase flag of the app')
parser.add_argument('--release-date',
action='store_true',
help='Release date of the app')
parser.add_argument('--overall-rank',
action='store_true',
help='Overall rank of the app')
parser.add_argument('--version',
action='store_true',
help='Get current version of the app')
parser.add_argument('--size',
action='store_true',
help='Get size of the app')
parser.add_argument('--min-os-version',
action='store_true',
help='Get minimum supported os version of the app')
parser.add_argument('--total-reviews',
action='store_true',
help='Get total reviews of the app')
parser.add_argument('--avg-star-rating',
action='store_true',
help='Get average star rating of the app')
parser.add_argument('--star-rating-hist',
action='store_true',
help='Get star rating histogram of the app')
parser.add_argument('--category-rank',
action='store_true',
help='Get categorical rank of the app')
parser.add_argument('--categories',
action='store_true',
help='Get all categories of the app')
parser.add_argument('--icon-url',
action='store_true',
help='Get icon url of the app')
parser.add_argument('--permissions',
action='store_true',
help='Get all permissions of the app')
parser.add_argument('--description',
action='store_true',
help='Get description of the app')
parser.add_argument('--similar-apps',
action='store_true',
help='Get similar apps of the app')
parser.add_argument('--reviews',
action='store_true',
help='Get all reviews of the app')
parser.add_argument('--app-info',
action='store_true',
help='Get all info of the app')
parser.add_argument('--use-proxy',
action='store_true',
help='Use proxy')
return parser
def scrape(self):
info_cr = info_crawler(self.asin, self.args.use_proxy)
review_cr = review_crawler(self.asin, self.args.use_proxy)
app_info = info_cr.crawl_info_page()
if len(self.optional_info_args.keys()) == 0 and self.crawl_app_info:
for item in app_info:
print "{0} : {1}\n".format(item, app_info[item])
else:
for item in self.optional_info_args:
print "{0} : {1}\n".format(item, app_info[item])
if self.crawl_app_reviews == True:
reviews = review_cr.crawl_reviews()
for review in reviews:
print review
def start_worker(self):
args_parser = self.get_arguments_parser()
args = args_parser.parse_args()
self.asin = args.asin
self.args = args
optional_args = dict(filter(lambda x : x[1] == True, vars(args).items()))
if 'use_proxy' in optional_args.keys():
del optional_args['use_proxy']
if 'reviews' in optional_args.keys():
del optional_args['reviews']
self.crawl_app_reviews = True
self.crawl_app_info = False
self.optional_info_args = optional_args
self.scrape()
if __name__ == "__main__":
worker = Worker()
worker.start_worker() | 0.566139 | 0.076996 |
import json
import base64
import requests
import urllib.parse as urllib
import xml.etree.ElementTree as eTree
import traceback
import lib
from lib import DEBUG
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
from sys import exit
class Service:
def __init__(self, settings):
self.user = settings.attrib["user"]
self.password = settings.attrib["password"]
self.url = settings.attrib["url"]
ssl_verify = settings.attrib.get("ssl_verify", 'false')
self.ssl_verify = False if ssl_verify == 'false' else True
self.settings = settings
class Cherwell(Service):
def __init__(self, settings):
super().__init__(settings)
self.updatedVersion = settings.attrib.get("updated_page_number_version", "9.7.0")
headers = {
'accept': "application/json",
'content-type': "application/x-www-form-urlencoded",
}
url = "%s/api/V1/serviceinfo" % (self.url,)
response = requests.request("GET", url, headers=headers, verify=self.ssl_verify)
response_data = deserialize_json(response.content.decode('utf-8'))
self.currentVersion = response_data['apiVersion']
data = (
('password', bytes(self.password, 'utf-8')),
('username', self.user),
('client_id', settings.attrib["client_id"]),
('grant_type', 'password'),
)
payload = urllib.urlencode(data, encoding='latin')
url = "%s/token" % (self.url,)
response = requests.request("POST", url, data=payload, headers=headers, verify=self.ssl_verify)
validate_response(response)
response_data = deserialize_json(response.content.decode('utf-8'))
self.access_token = response_data['access_token']
self.refresh_token = response_data['refresh_token']
def refresh_access_token(self):
headers = {
'accept': "application/json",
'content-type': "application/x-www-form-urlencoded",
}
data = (
('client_id', self.settings.attrib['client_id']),
('grant_type', 'refresh_token'),
('refresh_token', self.refresh_token),
)
payload = urllib.urlencode(data, encoding='latin')
url = "%s/token" % (self.url,)
response = requests.request("POST", url, data=payload, headers=headers, verify=self.ssl_verify)
validate_response(response)
response_data = deserialize_json(response.content.decode('utf-8'))
self.access_token = response_data['access_token']
self.refresh_token = response_data['refresh_token']
def request(self, path, method, data=(), silent=False, return_serialized=True):
def perform_request(path, method, data=()):
headers = {
"Content-Type": "application/json",
"Accept": "application/json",
"Authorization": "Bearer {}".format(self.access_token)
}
response = None
if method == 'GET':
response = requests.get(self.url + path, headers=headers, verify=self.ssl_verify)
elif method == 'POST':
response = requests.post(self.url + path, json.dumps(data), headers=headers, verify=self.ssl_verify)
elif method == 'DELETE':
response = requests.delete(self.url + path, headers=headers, verify=self.ssl_verify)
return response
result = {}
if method not in ('GET', 'POST', 'DELETE'):
return result
response = perform_request(path, method, data)
if response.status_code == 401 and self.refresh_token:
# reauthorize
self.refresh_access_token()
# run request again
response = perform_request(path, method, data)
if not silent:
validate_response(response)
if return_serialized:
if len(response.content):
result = deserialize_json(response.content.decode())
else:
result = response
return result
def is_updated_page_number_version(self):
currentVersion = [int(i) for i in self.currentVersion.split('.')]
updatedVersion = [int(i) for i in self.updatedVersion.split('.')]
if currentVersion >= updatedVersion:
return True
return False
class Device42(Service):
def request(self, path, method, data=(), doql=None):
headers = {
'Authorization': 'Basic ' + base64.b64encode((self.user + ':' + self.password).encode()).decode(),
'Content-Type': 'application/x-www-form-urlencoded'
}
result = None
if method == 'GET':
response = requests.get(self.url + path, headers=headers, verify=self.ssl_verify)
validate_response(response)
result = deserialize_json(response.content.decode())
if method == 'POST' and doql is not None:
payload = {
"query": doql,
"header": "yes"
}
response = requests.post(
self.url + path,
headers=headers,
verify=self.ssl_verify,
data=payload
)
validate_response(response)
result = response.text
# validate DOQL response
headers = result.split('\n')[0]
if 'error:' in headers.lower():
print('Error in DOQL query:', headers)
exit(1)
return result
def deserialize_json(s):
try:
return json.loads(s)
except Exception as err:
if DEBUG:
print('Error upon deserialization JSON:', str(err))
print('Source:', str(s))
traceback.print_stack()
else:
print('Error upon deserialization JSON')
raise err
def validate_response(response):
try:
response.raise_for_status()
except Exception as err:
print(err)
if DEBUG:
# show states of request and response
request_state = dict(
(attr, getattr(response.request, attr, None))
for attr in ['url', 'method', 'headers', 'body']
)
print('Request:', request_state)
print('Response:', response.__getstate__())
traceback.print_stack()
else:
print(response.text)
exit(1)
def init_services(settings):
return {
'cherwell': Cherwell(settings.find('cherwell')),
'device42': Device42(settings.find('device42'))
}
def task_execute(task, services):
print('Execute task:', task.attrib['description'])
_resource = task.find('api/resource')
_target = task.find('api/target')
if _resource.attrib['target'] == 'cherwell':
resource_api = services['cherwell']
target_api = services['device42']
else:
resource_api = services['device42']
target_api = services['cherwell']
method = _resource.attrib['method']
doql = _resource.attrib.get('doql')
source_url = _resource.attrib['path']
if _resource.attrib.get("extra-filter"):
source_url += _resource.attrib.get("extra-filter") + "&"
# source will contain the objects from the _resource endpoint
if task.attrib.get('type') == 'affinity_group':
configuration_items = task.findall('configuration-item')
if doql:
reset_connections = task.attrib.get('reset-connections') == 'true'
source = resource_api.request(source_url, method, doql=doql)
lib.affinity_group_from_d42(
source,
_target,
_resource,
target_api,
resource_api,
configuration_items,
reset_connections
)
return True
else:
print("The 'doql' attribute in <resource> is required for this task")
exit(1)
mapping = task.find('mapping')
configuration_item = task.find('configuration-item').attrib['bus-ob-id']
if _target.attrib.get('delete'):
lib.delete_objects_from_server(_target, target_api, configuration_item)
return
if doql is not None:
source = resource_api.request(source_url, method, doql=doql)
lib.from_d42(
source, mapping,
_target, _resource,
target_api, resource_api,
configuration_item,
doql=True
)
else:
source = resource_api.request(source_url, method)
lib.from_d42(
source, mapping,
_target, _resource,
target_api, resource_api,
configuration_item,
doql=False
)
print('Running...')
# Load mapping
config = eTree.parse('mapping.xml')
meta = config.getroot()
# Init transports services
services = init_services(meta.find('settings'))
# Parse tasks
tasks = meta.find('tasks')
for task in tasks:
if task.attrib['enable'] == 'true':
task_execute(task, services) | sync.py | import json
import base64
import requests
import urllib.parse as urllib
import xml.etree.ElementTree as eTree
import traceback
import lib
from lib import DEBUG
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
from sys import exit
class Service:
def __init__(self, settings):
self.user = settings.attrib["user"]
self.password = settings.attrib["password"]
self.url = settings.attrib["url"]
ssl_verify = settings.attrib.get("ssl_verify", 'false')
self.ssl_verify = False if ssl_verify == 'false' else True
self.settings = settings
class Cherwell(Service):
def __init__(self, settings):
super().__init__(settings)
self.updatedVersion = settings.attrib.get("updated_page_number_version", "9.7.0")
headers = {
'accept': "application/json",
'content-type': "application/x-www-form-urlencoded",
}
url = "%s/api/V1/serviceinfo" % (self.url,)
response = requests.request("GET", url, headers=headers, verify=self.ssl_verify)
response_data = deserialize_json(response.content.decode('utf-8'))
self.currentVersion = response_data['apiVersion']
data = (
('password', bytes(self.password, 'utf-8')),
('username', self.user),
('client_id', settings.attrib["client_id"]),
('grant_type', 'password'),
)
payload = urllib.urlencode(data, encoding='latin')
url = "%s/token" % (self.url,)
response = requests.request("POST", url, data=payload, headers=headers, verify=self.ssl_verify)
validate_response(response)
response_data = deserialize_json(response.content.decode('utf-8'))
self.access_token = response_data['access_token']
self.refresh_token = response_data['refresh_token']
def refresh_access_token(self):
headers = {
'accept': "application/json",
'content-type': "application/x-www-form-urlencoded",
}
data = (
('client_id', self.settings.attrib['client_id']),
('grant_type', 'refresh_token'),
('refresh_token', self.refresh_token),
)
payload = urllib.urlencode(data, encoding='latin')
url = "%s/token" % (self.url,)
response = requests.request("POST", url, data=payload, headers=headers, verify=self.ssl_verify)
validate_response(response)
response_data = deserialize_json(response.content.decode('utf-8'))
self.access_token = response_data['access_token']
self.refresh_token = response_data['refresh_token']
def request(self, path, method, data=(), silent=False, return_serialized=True):
def perform_request(path, method, data=()):
headers = {
"Content-Type": "application/json",
"Accept": "application/json",
"Authorization": "Bearer {}".format(self.access_token)
}
response = None
if method == 'GET':
response = requests.get(self.url + path, headers=headers, verify=self.ssl_verify)
elif method == 'POST':
response = requests.post(self.url + path, json.dumps(data), headers=headers, verify=self.ssl_verify)
elif method == 'DELETE':
response = requests.delete(self.url + path, headers=headers, verify=self.ssl_verify)
return response
result = {}
if method not in ('GET', 'POST', 'DELETE'):
return result
response = perform_request(path, method, data)
if response.status_code == 401 and self.refresh_token:
# reauthorize
self.refresh_access_token()
# run request again
response = perform_request(path, method, data)
if not silent:
validate_response(response)
if return_serialized:
if len(response.content):
result = deserialize_json(response.content.decode())
else:
result = response
return result
def is_updated_page_number_version(self):
currentVersion = [int(i) for i in self.currentVersion.split('.')]
updatedVersion = [int(i) for i in self.updatedVersion.split('.')]
if currentVersion >= updatedVersion:
return True
return False
class Device42(Service):
def request(self, path, method, data=(), doql=None):
headers = {
'Authorization': 'Basic ' + base64.b64encode((self.user + ':' + self.password).encode()).decode(),
'Content-Type': 'application/x-www-form-urlencoded'
}
result = None
if method == 'GET':
response = requests.get(self.url + path, headers=headers, verify=self.ssl_verify)
validate_response(response)
result = deserialize_json(response.content.decode())
if method == 'POST' and doql is not None:
payload = {
"query": doql,
"header": "yes"
}
response = requests.post(
self.url + path,
headers=headers,
verify=self.ssl_verify,
data=payload
)
validate_response(response)
result = response.text
# validate DOQL response
headers = result.split('\n')[0]
if 'error:' in headers.lower():
print('Error in DOQL query:', headers)
exit(1)
return result
def deserialize_json(s):
try:
return json.loads(s)
except Exception as err:
if DEBUG:
print('Error upon deserialization JSON:', str(err))
print('Source:', str(s))
traceback.print_stack()
else:
print('Error upon deserialization JSON')
raise err
def validate_response(response):
try:
response.raise_for_status()
except Exception as err:
print(err)
if DEBUG:
# show states of request and response
request_state = dict(
(attr, getattr(response.request, attr, None))
for attr in ['url', 'method', 'headers', 'body']
)
print('Request:', request_state)
print('Response:', response.__getstate__())
traceback.print_stack()
else:
print(response.text)
exit(1)
def init_services(settings):
return {
'cherwell': Cherwell(settings.find('cherwell')),
'device42': Device42(settings.find('device42'))
}
def task_execute(task, services):
print('Execute task:', task.attrib['description'])
_resource = task.find('api/resource')
_target = task.find('api/target')
if _resource.attrib['target'] == 'cherwell':
resource_api = services['cherwell']
target_api = services['device42']
else:
resource_api = services['device42']
target_api = services['cherwell']
method = _resource.attrib['method']
doql = _resource.attrib.get('doql')
source_url = _resource.attrib['path']
if _resource.attrib.get("extra-filter"):
source_url += _resource.attrib.get("extra-filter") + "&"
# source will contain the objects from the _resource endpoint
if task.attrib.get('type') == 'affinity_group':
configuration_items = task.findall('configuration-item')
if doql:
reset_connections = task.attrib.get('reset-connections') == 'true'
source = resource_api.request(source_url, method, doql=doql)
lib.affinity_group_from_d42(
source,
_target,
_resource,
target_api,
resource_api,
configuration_items,
reset_connections
)
return True
else:
print("The 'doql' attribute in <resource> is required for this task")
exit(1)
mapping = task.find('mapping')
configuration_item = task.find('configuration-item').attrib['bus-ob-id']
if _target.attrib.get('delete'):
lib.delete_objects_from_server(_target, target_api, configuration_item)
return
if doql is not None:
source = resource_api.request(source_url, method, doql=doql)
lib.from_d42(
source, mapping,
_target, _resource,
target_api, resource_api,
configuration_item,
doql=True
)
else:
source = resource_api.request(source_url, method)
lib.from_d42(
source, mapping,
_target, _resource,
target_api, resource_api,
configuration_item,
doql=False
)
print('Running...')
# Load mapping
config = eTree.parse('mapping.xml')
meta = config.getroot()
# Init transports services
services = init_services(meta.find('settings'))
# Parse tasks
tasks = meta.find('tasks')
for task in tasks:
if task.attrib['enable'] == 'true':
task_execute(task, services) | 0.217088 | 0.070528 |
import cv2
import time
import numpy as np
import logging
import logging.config
import yaml
import robot
import jsonobject
from shapely.geometry import Point, Polygon
import threading
from colors import *
import diff
"""
Improvements on control:
* average multiple frames (robot is slow) to filter out video noise, eg in wind
* track motion, filter out positions far away from expected/current position + speed circle
* estimate speed, and estimate when hitting the fence between frames - factor in estimated latency in frame
* watchdog on video frame analysis - when stops up/lags, then bail out / reset
* online control of parameters without having to restart - e.g. re-read config
* Can we monitor latency? Flashing LED on robot? Track time in camera OSD. Frame metadata in stream?
Known camera frame rate -> latency
* online dashboard to monitor video characteristics, etc
* monitor current (to cutter) and reset - regular reset!?
* missing heartbeat stops cutter too
"""
logger = logging.getLogger("main")
def empty_image(cap):
w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
# fps = int(cap.get(cv2.CAP_PROP_FPS))
# n_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
# print(w, h, fps, n_frames)
return np.zeros(shape=[h, w, 3], dtype=np.uint8)
def area_and_centroid(M):
A = M["m00"]
cX = int(M["m10"] / A)
cY = int(M["m01"] / A)
return A, cX, cY
def draw_exterior(shape, image, color, width):
pts = shape.exterior.coords[:]
p0 = tuple(map(int, pts.pop(0)))
while pts:
p1 = tuple(map(int, pts.pop(0)))
cv2.line(image, p0, p1, color, width)
p0 = p1
def onMouse(event, x, y, flags, param):
# logger.info("onMouse %r %d %d", event, x, y)
# print(event, flags, param)
if event == cv2.EVENT_LBUTTONDOWN:
# draw circle here (etc...)
print('x = %d, y = %d' % (x, y))
# print("BRG", frame[y][x])
elif event == cv2.EVENT_RBUTTONDOWN:
# output YAML for fence
# grep FENCE video.log | cut -c47- > fence.yaml
logger.info("FENCE - x: %d", x)
logger.info("FENCE y: %d", y)
def main():
# logging.basicConfig()
with open("logging.yaml") as f:
logging.config.dictConfig(yaml.full_load(f))
logger.info("Starting video control")
config = jsonobject.fromJson(yaml.full_load(open("config.yaml")))
# fence = box(580, 350, 1200, 600)
# print(config.fence)
fence = Polygon(list(((p.x, p.y) for p in config.fence)))
aoi = fence.buffer(config.video.aoi_buffer, resolution=1, join_style=2)
if config.robot.control:
logger.info("Connecting to robot...")
robot.start(config.robot.url)
logger.info("Starting camera loop...")
recording = None
course = None
outside = True
last_time = time.time()
last_point = None
avoid_complete = threading.Event()
avoid_complete.set()
cap = cv2.VideoCapture(config.video.url)
show = True
# show = False
roi_mask = empty_image(cap)
cv2.fillPoly(roi_mask, [np.array([[int(x), int(y)] for x, y in aoi.exterior.coords])], White)
if show: cv2.imshow("roi", roi_mask)
no_frame = 0
# TODO: could add keyboard commands here beyond quit: record, stop, ...
while cv2.waitKey(1) & 0xFF != ord('q'):
ret, frame = cap.read()
print(ret)
if not ret:
logger.warning("No frame %r", ret)
no_frame += 1
if no_frame > 10: break
continue
no_frame = 0
t = time.time()
logger.debug("Frame interval %.3f", t - last_time)
last_time = t
if config.video.record and recording is None:
import datetime
timestamp = str(datetime.datetime.now())[:19].replace(":", "").replace(" ", "-")
filename = 'recording/%s.avi' % timestamp
logger.info("Recording video to %s", filename)
height, width, layers = frame.shape
recording = cv2.VideoWriter(filename, cv2.VideoWriter_fourcc(*'DIVX'), 15, (width, height))
if recording:
recording.write(frame)
# Process frame before drawing in it
contours = diff.process2(frame, roi_mask, show=show)
draw_exterior(fence, frame, (0, 0, 255), 2)
draw_exterior(aoi, frame, (0, 0, 0), 2)
cv2.imshow('frame', frame)
cv2.setMouseCallback('frame', onMouse)
if course is None:
course = frame.copy()
cv2.imshow('course', course)
position = None
# process contours in the order of decreasing area
contours.sort(key=cv2.contourArea)
contours.reverse()
for c in contours:
M = cv2.moments(c)
try:
A, cX, cY = area_and_centroid(M)
except:
continue
# For shapely - has float x,y, cv2 expects ints for drawing
p = Point(cX, cY)
if A > config.video.area_max:
logger.debug("Area too large %d %d %.2f", cX, cY, A)
continue
if A < config.video.area_min:
logger.debug("Area too small %d %d %.2f", cX, cY, A)
# logger.debug("Invalid area %d %d %.2f %r %.2f", cX, cY, A, aoi.contains(p), fence.exterior.distance(p))
# cv2.circle(frame, (cX, cY), 15, Red, 2)
break
if not aoi.contains(p):
logger.debug("Invalid point %d %d %.2f %r %.2f", cX, cY, A, aoi.contains(p), fence.exterior.distance(p))
cv2.circle(frame, (cX, cY), 15, Blue, 2)
continue
logger.debug("Valid area %d %d %.2f %r %.2f", cX, cY, A, aoi.contains(p), fence.exterior.distance(p))
position = (cX, cY)
cv2.circle(frame, position, 25, Black, 2)
cv2.imshow("frame", frame)
if position is not None:
logger.info("Point %d %d %r %.2f %.2f", cX, cY, fence.contains(p), fence.exterior.distance(p), A)
if last_point:
cv2.line(course, last_point, position, (10, 10, 10), 2)
last_point = position
cv2.imshow("course", course)
t = time.time()
# Buffer zone outside the fence: will run avoidance maneouvre.
# Get into an outside state, if staying outside too long, then abort
if not fence.contains(p):
if not outside:
logger.info("EXIT")
outside = True
if config.robot.control:
robot.avoid(config.robot.speed, config.robot.turnRate)
# Only tries it once! So don't have to wait for it to complete.
# Will only continue when if the avoid ends up inside
continue
# TODO: check if avoidance is fininshed...
continue
assert outside
if fence.exterior.distance(p) < config.robot.max_outside_distance:
if config.robot.control:
robot.avoid(config.robot.speed, config.robot.turnRate)
# Only tries it once! So don't have to wait for it to complete.
# Will only continue when if the avoid ends up inside
continue
continue
# inside fence
if outside:
logger.info("ENTER")
time_into_inside = t
# robot.send(robot.Speed(speed))
outside = False
# process below also when no position
if not outside and config.robot.inside_timeout and t > time_into_inside + config.robot.inside_timeout:
logger.warning("INSIDE timeout")
if config.robot.control:
robot.send(robot.Stop)
continue
if robot.battery_level is not None:
# TODO: only print this now and then - when received in robot
# logger.info("Battery level %.3f", robot.battery_level)
if robot.battery_level < config.robot.battery_cutoff:
logger.warning("Battery level %.3f low - stopping", robot.battery_level)
if config.robot.control:
robot.send(robot.Stop)
continue
# logger.debug("Avoidance %r", avoid_complete.isSet())
if avoid_complete.isSet():
# this is a problem if just reversing out of fence, immediately goes forward.
# but only from manual GUI control?
# robot.send(robot.Speed(config.robot.speed))
# else:
# don't mess up the avoidance
if config.robot.control:
robot.send(robot.Heartbeat)
else:
logger.info("In avoidance")
# When everything done, release the capture
cap.release()
cv2.destroyAllWindows()
if __name__ == '__main__':
main() | control/video/video.py | import cv2
import time
import numpy as np
import logging
import logging.config
import yaml
import robot
import jsonobject
from shapely.geometry import Point, Polygon
import threading
from colors import *
import diff
"""
Improvements on control:
* average multiple frames (robot is slow) to filter out video noise, eg in wind
* track motion, filter out positions far away from expected/current position + speed circle
* estimate speed, and estimate when hitting the fence between frames - factor in estimated latency in frame
* watchdog on video frame analysis - when stops up/lags, then bail out / reset
* online control of parameters without having to restart - e.g. re-read config
* Can we monitor latency? Flashing LED on robot? Track time in camera OSD. Frame metadata in stream?
Known camera frame rate -> latency
* online dashboard to monitor video characteristics, etc
* monitor current (to cutter) and reset - regular reset!?
* missing heartbeat stops cutter too
"""
logger = logging.getLogger("main")
def empty_image(cap):
w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
# fps = int(cap.get(cv2.CAP_PROP_FPS))
# n_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
# print(w, h, fps, n_frames)
return np.zeros(shape=[h, w, 3], dtype=np.uint8)
def area_and_centroid(M):
A = M["m00"]
cX = int(M["m10"] / A)
cY = int(M["m01"] / A)
return A, cX, cY
def draw_exterior(shape, image, color, width):
pts = shape.exterior.coords[:]
p0 = tuple(map(int, pts.pop(0)))
while pts:
p1 = tuple(map(int, pts.pop(0)))
cv2.line(image, p0, p1, color, width)
p0 = p1
def onMouse(event, x, y, flags, param):
# logger.info("onMouse %r %d %d", event, x, y)
# print(event, flags, param)
if event == cv2.EVENT_LBUTTONDOWN:
# draw circle here (etc...)
print('x = %d, y = %d' % (x, y))
# print("BRG", frame[y][x])
elif event == cv2.EVENT_RBUTTONDOWN:
# output YAML for fence
# grep FENCE video.log | cut -c47- > fence.yaml
logger.info("FENCE - x: %d", x)
logger.info("FENCE y: %d", y)
def main():
# logging.basicConfig()
with open("logging.yaml") as f:
logging.config.dictConfig(yaml.full_load(f))
logger.info("Starting video control")
config = jsonobject.fromJson(yaml.full_load(open("config.yaml")))
# fence = box(580, 350, 1200, 600)
# print(config.fence)
fence = Polygon(list(((p.x, p.y) for p in config.fence)))
aoi = fence.buffer(config.video.aoi_buffer, resolution=1, join_style=2)
if config.robot.control:
logger.info("Connecting to robot...")
robot.start(config.robot.url)
logger.info("Starting camera loop...")
recording = None
course = None
outside = True
last_time = time.time()
last_point = None
avoid_complete = threading.Event()
avoid_complete.set()
cap = cv2.VideoCapture(config.video.url)
show = True
# show = False
roi_mask = empty_image(cap)
cv2.fillPoly(roi_mask, [np.array([[int(x), int(y)] for x, y in aoi.exterior.coords])], White)
if show: cv2.imshow("roi", roi_mask)
no_frame = 0
# TODO: could add keyboard commands here beyond quit: record, stop, ...
while cv2.waitKey(1) & 0xFF != ord('q'):
ret, frame = cap.read()
print(ret)
if not ret:
logger.warning("No frame %r", ret)
no_frame += 1
if no_frame > 10: break
continue
no_frame = 0
t = time.time()
logger.debug("Frame interval %.3f", t - last_time)
last_time = t
if config.video.record and recording is None:
import datetime
timestamp = str(datetime.datetime.now())[:19].replace(":", "").replace(" ", "-")
filename = 'recording/%s.avi' % timestamp
logger.info("Recording video to %s", filename)
height, width, layers = frame.shape
recording = cv2.VideoWriter(filename, cv2.VideoWriter_fourcc(*'DIVX'), 15, (width, height))
if recording:
recording.write(frame)
# Process frame before drawing in it
contours = diff.process2(frame, roi_mask, show=show)
draw_exterior(fence, frame, (0, 0, 255), 2)
draw_exterior(aoi, frame, (0, 0, 0), 2)
cv2.imshow('frame', frame)
cv2.setMouseCallback('frame', onMouse)
if course is None:
course = frame.copy()
cv2.imshow('course', course)
position = None
# process contours in the order of decreasing area
contours.sort(key=cv2.contourArea)
contours.reverse()
for c in contours:
M = cv2.moments(c)
try:
A, cX, cY = area_and_centroid(M)
except:
continue
# For shapely - has float x,y, cv2 expects ints for drawing
p = Point(cX, cY)
if A > config.video.area_max:
logger.debug("Area too large %d %d %.2f", cX, cY, A)
continue
if A < config.video.area_min:
logger.debug("Area too small %d %d %.2f", cX, cY, A)
# logger.debug("Invalid area %d %d %.2f %r %.2f", cX, cY, A, aoi.contains(p), fence.exterior.distance(p))
# cv2.circle(frame, (cX, cY), 15, Red, 2)
break
if not aoi.contains(p):
logger.debug("Invalid point %d %d %.2f %r %.2f", cX, cY, A, aoi.contains(p), fence.exterior.distance(p))
cv2.circle(frame, (cX, cY), 15, Blue, 2)
continue
logger.debug("Valid area %d %d %.2f %r %.2f", cX, cY, A, aoi.contains(p), fence.exterior.distance(p))
position = (cX, cY)
cv2.circle(frame, position, 25, Black, 2)
cv2.imshow("frame", frame)
if position is not None:
logger.info("Point %d %d %r %.2f %.2f", cX, cY, fence.contains(p), fence.exterior.distance(p), A)
if last_point:
cv2.line(course, last_point, position, (10, 10, 10), 2)
last_point = position
cv2.imshow("course", course)
t = time.time()
# Buffer zone outside the fence: will run avoidance maneouvre.
# Get into an outside state, if staying outside too long, then abort
if not fence.contains(p):
if not outside:
logger.info("EXIT")
outside = True
if config.robot.control:
robot.avoid(config.robot.speed, config.robot.turnRate)
# Only tries it once! So don't have to wait for it to complete.
# Will only continue when if the avoid ends up inside
continue
# TODO: check if avoidance is fininshed...
continue
assert outside
if fence.exterior.distance(p) < config.robot.max_outside_distance:
if config.robot.control:
robot.avoid(config.robot.speed, config.robot.turnRate)
# Only tries it once! So don't have to wait for it to complete.
# Will only continue when if the avoid ends up inside
continue
continue
# inside fence
if outside:
logger.info("ENTER")
time_into_inside = t
# robot.send(robot.Speed(speed))
outside = False
# process below also when no position
if not outside and config.robot.inside_timeout and t > time_into_inside + config.robot.inside_timeout:
logger.warning("INSIDE timeout")
if config.robot.control:
robot.send(robot.Stop)
continue
if robot.battery_level is not None:
# TODO: only print this now and then - when received in robot
# logger.info("Battery level %.3f", robot.battery_level)
if robot.battery_level < config.robot.battery_cutoff:
logger.warning("Battery level %.3f low - stopping", robot.battery_level)
if config.robot.control:
robot.send(robot.Stop)
continue
# logger.debug("Avoidance %r", avoid_complete.isSet())
if avoid_complete.isSet():
# this is a problem if just reversing out of fence, immediately goes forward.
# but only from manual GUI control?
# robot.send(robot.Speed(config.robot.speed))
# else:
# don't mess up the avoidance
if config.robot.control:
robot.send(robot.Heartbeat)
else:
logger.info("In avoidance")
# When everything done, release the capture
cap.release()
cv2.destroyAllWindows()
if __name__ == '__main__':
main() | 0.291182 | 0.39129 |
import collections
import json
import os
from typing import List, Set, Union
import numpy as np
import tensorflow as tf
from typeguard import check_argument_types
from neuralmonkey.logging import log, warn, notice
PAD_TOKEN = "<pad>"
START_TOKEN = "<s>"
END_TOKEN = "</s>"
UNK_TOKEN = "<unk>"
SPECIAL_TOKENS = [PAD_TOKEN, START_TOKEN, END_TOKEN, UNK_TOKEN]
PAD_TOKEN_INDEX = 0
START_TOKEN_INDEX = 1
END_TOKEN_INDEX = 2
UNK_TOKEN_INDEX = 3
def from_wordlist(path: str,
encoding: str = "utf-8",
contains_header: bool = True,
contains_frequencies: bool = True) -> "Vocabulary":
"""Load a vocabulary from a wordlist.
The file can contain either list of words with no header.
Or it can contain words and their counts separated
by tab and a header on the first line.
Arguments:
path: The path to the wordlist file
encoding: The encoding of the wordlist file (defaults to UTF-8)
contains_header: if the file have a header on first line
contains_frequencies: if the file contains a second column
Returns:
The new Vocabulary instance.
"""
check_argument_types()
vocabulary = [] # type: List[str]
with open(path, encoding=encoding) as wordlist:
line_number = 1
if contains_header:
# skip the header
line_number += 1
next(wordlist)
for line in wordlist:
line = line.strip()
# check if line is empty
if not line:
warn("Vocabulary file {}:{}: line empty"
.format(path, line_number))
line_number += 1
continue
if contains_frequencies:
info = line.split("\t")
if len(info) != 2:
raise ValueError(
"Vocabulary file {}:{}: line does not have two columns"
.format(path, line_number))
word = info[0]
else:
if "\t" in line:
warn("Vocabulary file {}:{}: line contains a tabulator"
.format(path, line_number))
word = line
if line_number <= len(SPECIAL_TOKENS) + int(contains_header):
should_be = SPECIAL_TOKENS[
line_number - 1 - int(contains_header)]
if word != should_be:
notice("Expected special token {} but encountered a "
"different word: {}".format(should_be, word))
vocabulary.append(word)
line_number += 1
continue
vocabulary.append(word)
line_number += 1
log("Vocabulary from wordlist loaded, containing {} words"
.format(len(vocabulary)))
log_sample(vocabulary)
return Vocabulary(vocabulary)
def from_t2t_vocabulary(path: str,
encoding: str = "utf-8") -> "Vocabulary":
"""Load a vocabulary generated during tensor2tensor training.
Arguments:
path: The path to the vocabulary file.
encoding: The encoding of the vocabulary file (defaults to UTF-8).
Returns:
The new Vocabulary instantce.
"""
check_argument_types()
vocabulary = [] # type: List[str]
with open(path, encoding=encoding) as wordlist:
for line in wordlist:
line = line.strip()
# T2T vocab tends to wrap words in single quotes
if ((line.startswith("'") and line.endswith("'"))
or (line.startswith('"') and line.endswith('"'))):
line = line[1:-1]
if line in ["<pad>", "<EOS>"]:
continue
vocabulary.append(line)
log("Vocabulary form wordlist loaded, containing {} words"
.format(len(vocabulary)))
log_sample(vocabulary)
return Vocabulary(vocabulary)
def from_nematus_json(path: str, max_size: int = None,
pad_to_max_size: bool = False) -> "Vocabulary":
"""Load vocabulary from Nematus JSON format.
The JSON format is a flat dictionary that maps words to their index in the
vocabulary.
Args:
path: Path to the file.
max_size: Maximum vocabulary size including 'unk' and 'eos' symbols,
but not including <pad> and <s> symbol.
pad_to_max_size: If specified, the vocabulary is padded with dummy
symbols up to the specified maximum size.
"""
check_argument_types()
with open(path, "r", encoding="utf-8") as f_json:
contents = json.load(f_json)
vocabulary = [] # type: List[str]
for word in sorted(contents.keys(), key=lambda x: contents[x]):
if contents[word] < 2:
continue
vocabulary.append(word)
if max_size is not None and len(vocabulary) == max_size:
break
if max_size is None:
max_size = len(vocabulary) - 2 # the "2" is ugly HACK
if pad_to_max_size and max_size is not None:
current_length = len(vocabulary)
for i in range(max_size - current_length + 2): # the "2" is ugly HACK
word = "<pad_{}>".format(i)
vocabulary.append(word)
return Vocabulary(vocabulary)
class Vocabulary(collections.Sized):
def __init__(self, words: List[str], num_oov_buckets: int = 0) -> None:
"""Create a new instance of a vocabulary.
Arguments:
words: The mapping of indices to words.
"""
self._vocabulary = SPECIAL_TOKENS + words
self._alphabet = {c for word in words for c in word}
self._index_to_string = (
tf.contrib.lookup.index_to_string_table_from_tensor(
mapping=self._vocabulary,
default_value=UNK_TOKEN))
self._string_to_index = tf.contrib.lookup.index_table_from_tensor(
mapping=self._vocabulary,
num_oov_buckets=num_oov_buckets,
default_value=UNK_TOKEN_INDEX)
def __len__(self) -> int:
"""Get the size of the vocabulary.
Returns:
The number of distinct words in the vocabulary.
"""
return len(self._vocabulary)
def __contains__(self, word: str) -> bool:
"""Check if a word is in the vocabulary.
Arguments:
word: The word to look up.
Returns:
True if the word was added to the vocabulary, False otherwise.
"""
return word in self._vocabulary
@property
def alphabet(self) -> Set[str]:
return self._alphabet
@property
def index_to_word(self) -> List[str]:
return self._vocabulary
def strings_to_indices(self,
# add_start_symbol: bool = False,
# add_end_symbol: bool = False
sentences: tf.Tensor) -> tf.Tensor:
"""Generate the tensor representation for the provided sentences.
Arguments:
sentences: List of sentences as lists of tokens.
add_start_symbol: If True, the `<s>` token will be added to the
beginning of each sentence vector. Enabling this option extends
the maximum length by one.
add_end_symbol: If True, the `</s>` token will be added to the end
of each sentence vector, provided that the sentence is shorter
than `max_len`. If not, the end token is not added. Unlike
`add_start_symbol`, enabling this option **does not alter**
the maximum length.
Returns:
Tensor of indices of the words.
"""
return self._string_to_index.lookup(sentences)
def indices_to_strings(self, vectors: tf.Tensor) -> tf.Tensor:
"""Convert tensors of indexes of vocabulary items to lists of words.
Arguments:
vectors: An int Tensor with indices to the vocabulary.
Returns:
A string Tensor with the corresponding words.
"""
return self._index_to_string.lookup(vectors)
def vectors_to_sentences(
self,
vectors: Union[List[np.ndarray], np.ndarray]) -> List[List[str]]:
"""Convert vectors of indexes of vocabulary items to lists of words.
Arguments:
vectors: TIME-MAJOR List of vectors of vocabulary indices.
Returns:
List of lists of words.
"""
if isinstance(vectors, list):
if not vectors:
raise ValueError(
"Cannot infer batch size because decoder returned an "
"empty output.")
batch_size = vectors[0].shape[0]
elif isinstance(vectors, np.ndarray):
batch_size = vectors.shape[1]
else:
raise TypeError(
"Unexpected type of decoder output: {}".format(type(vectors)))
sentences = [[] for _ in range(batch_size)] # type: List[List[str]]
for vec in vectors:
for sentence, word_i in zip(sentences, vec):
if not sentence or sentence[-1] != END_TOKEN:
sentence.append(self.index_to_word[word_i])
return [s[:-1] if s and s[-1] == END_TOKEN else s for s in sentences]
def save_wordlist(self, path: str, overwrite: bool = False,
encoding: str = "utf-8") -> None:
"""Save the vocabulary as a wordlist.
The file is ordered by the ids of words.
This function is used mainly for embedding visualization.
Arguments:
path: The path to save the file to.
overwrite: Flag whether to overwrite existing file.
Defaults to False.
Raises:
FileExistsError if the file exists and overwrite flag is
disabled.
"""
if os.path.exists(path) and not overwrite:
raise FileExistsError("Cannot save vocabulary: File exists and "
"overwrite is disabled. {}".format(path))
with open(path, "w", encoding=encoding) as output_file:
log("Storing vocabulary without frequencies.")
for word in self._vocabulary:
output_file.write("{}\n".format(word))
def log_sample(vocabulary: List[str], size: int = 5) -> None:
"""Log a sample of the vocabulary.
Arguments:
size: How many sample words to log.
"""
if size > len(vocabulary):
log("Vocabulary: {}".format(vocabulary))
else:
sample_ids = np.random.permutation(np.arange(len(vocabulary)))[:size]
log("Sample of the vocabulary: {}".format(
[vocabulary[i] for i in sample_ids]))
def pad_batch(sentences: List[List[str]],
max_length: int = None,
add_start_symbol: bool = False,
add_end_symbol: bool = False) -> List[List[str]]:
max_len = max(len(s) for s in sentences)
if add_end_symbol:
max_len += 1
if max_length is not None:
max_len = min(max_length, max_len)
padded_sentences = []
for sent in sentences:
if add_end_symbol:
padded = (sent + [END_TOKEN] + [PAD_TOKEN] * max_len)[:max_len]
else:
padded = (sent + [PAD_TOKEN] * max_len)[:max_len]
if add_start_symbol:
padded.insert(0, START_TOKEN)
padded_sentences.append(padded)
return padded_sentences
def sentence_mask(sentences: tf.Tensor) -> tf.Tensor:
return tf.to_float(tf.not_equal(sentences, PAD_TOKEN_INDEX)) | neuralmonkey/vocabulary.py | import collections
import json
import os
from typing import List, Set, Union
import numpy as np
import tensorflow as tf
from typeguard import check_argument_types
from neuralmonkey.logging import log, warn, notice
PAD_TOKEN = "<pad>"
START_TOKEN = "<s>"
END_TOKEN = "</s>"
UNK_TOKEN = "<unk>"
SPECIAL_TOKENS = [PAD_TOKEN, START_TOKEN, END_TOKEN, UNK_TOKEN]
PAD_TOKEN_INDEX = 0
START_TOKEN_INDEX = 1
END_TOKEN_INDEX = 2
UNK_TOKEN_INDEX = 3
def from_wordlist(path: str,
encoding: str = "utf-8",
contains_header: bool = True,
contains_frequencies: bool = True) -> "Vocabulary":
"""Load a vocabulary from a wordlist.
The file can contain either list of words with no header.
Or it can contain words and their counts separated
by tab and a header on the first line.
Arguments:
path: The path to the wordlist file
encoding: The encoding of the wordlist file (defaults to UTF-8)
contains_header: if the file have a header on first line
contains_frequencies: if the file contains a second column
Returns:
The new Vocabulary instance.
"""
check_argument_types()
vocabulary = [] # type: List[str]
with open(path, encoding=encoding) as wordlist:
line_number = 1
if contains_header:
# skip the header
line_number += 1
next(wordlist)
for line in wordlist:
line = line.strip()
# check if line is empty
if not line:
warn("Vocabulary file {}:{}: line empty"
.format(path, line_number))
line_number += 1
continue
if contains_frequencies:
info = line.split("\t")
if len(info) != 2:
raise ValueError(
"Vocabulary file {}:{}: line does not have two columns"
.format(path, line_number))
word = info[0]
else:
if "\t" in line:
warn("Vocabulary file {}:{}: line contains a tabulator"
.format(path, line_number))
word = line
if line_number <= len(SPECIAL_TOKENS) + int(contains_header):
should_be = SPECIAL_TOKENS[
line_number - 1 - int(contains_header)]
if word != should_be:
notice("Expected special token {} but encountered a "
"different word: {}".format(should_be, word))
vocabulary.append(word)
line_number += 1
continue
vocabulary.append(word)
line_number += 1
log("Vocabulary from wordlist loaded, containing {} words"
.format(len(vocabulary)))
log_sample(vocabulary)
return Vocabulary(vocabulary)
def from_t2t_vocabulary(path: str,
encoding: str = "utf-8") -> "Vocabulary":
"""Load a vocabulary generated during tensor2tensor training.
Arguments:
path: The path to the vocabulary file.
encoding: The encoding of the vocabulary file (defaults to UTF-8).
Returns:
The new Vocabulary instantce.
"""
check_argument_types()
vocabulary = [] # type: List[str]
with open(path, encoding=encoding) as wordlist:
for line in wordlist:
line = line.strip()
# T2T vocab tends to wrap words in single quotes
if ((line.startswith("'") and line.endswith("'"))
or (line.startswith('"') and line.endswith('"'))):
line = line[1:-1]
if line in ["<pad>", "<EOS>"]:
continue
vocabulary.append(line)
log("Vocabulary form wordlist loaded, containing {} words"
.format(len(vocabulary)))
log_sample(vocabulary)
return Vocabulary(vocabulary)
def from_nematus_json(path: str, max_size: int = None,
pad_to_max_size: bool = False) -> "Vocabulary":
"""Load vocabulary from Nematus JSON format.
The JSON format is a flat dictionary that maps words to their index in the
vocabulary.
Args:
path: Path to the file.
max_size: Maximum vocabulary size including 'unk' and 'eos' symbols,
but not including <pad> and <s> symbol.
pad_to_max_size: If specified, the vocabulary is padded with dummy
symbols up to the specified maximum size.
"""
check_argument_types()
with open(path, "r", encoding="utf-8") as f_json:
contents = json.load(f_json)
vocabulary = [] # type: List[str]
for word in sorted(contents.keys(), key=lambda x: contents[x]):
if contents[word] < 2:
continue
vocabulary.append(word)
if max_size is not None and len(vocabulary) == max_size:
break
if max_size is None:
max_size = len(vocabulary) - 2 # the "2" is ugly HACK
if pad_to_max_size and max_size is not None:
current_length = len(vocabulary)
for i in range(max_size - current_length + 2): # the "2" is ugly HACK
word = "<pad_{}>".format(i)
vocabulary.append(word)
return Vocabulary(vocabulary)
class Vocabulary(collections.Sized):
def __init__(self, words: List[str], num_oov_buckets: int = 0) -> None:
"""Create a new instance of a vocabulary.
Arguments:
words: The mapping of indices to words.
"""
self._vocabulary = SPECIAL_TOKENS + words
self._alphabet = {c for word in words for c in word}
self._index_to_string = (
tf.contrib.lookup.index_to_string_table_from_tensor(
mapping=self._vocabulary,
default_value=UNK_TOKEN))
self._string_to_index = tf.contrib.lookup.index_table_from_tensor(
mapping=self._vocabulary,
num_oov_buckets=num_oov_buckets,
default_value=UNK_TOKEN_INDEX)
def __len__(self) -> int:
"""Get the size of the vocabulary.
Returns:
The number of distinct words in the vocabulary.
"""
return len(self._vocabulary)
def __contains__(self, word: str) -> bool:
"""Check if a word is in the vocabulary.
Arguments:
word: The word to look up.
Returns:
True if the word was added to the vocabulary, False otherwise.
"""
return word in self._vocabulary
@property
def alphabet(self) -> Set[str]:
return self._alphabet
@property
def index_to_word(self) -> List[str]:
return self._vocabulary
def strings_to_indices(self,
# add_start_symbol: bool = False,
# add_end_symbol: bool = False
sentences: tf.Tensor) -> tf.Tensor:
"""Generate the tensor representation for the provided sentences.
Arguments:
sentences: List of sentences as lists of tokens.
add_start_symbol: If True, the `<s>` token will be added to the
beginning of each sentence vector. Enabling this option extends
the maximum length by one.
add_end_symbol: If True, the `</s>` token will be added to the end
of each sentence vector, provided that the sentence is shorter
than `max_len`. If not, the end token is not added. Unlike
`add_start_symbol`, enabling this option **does not alter**
the maximum length.
Returns:
Tensor of indices of the words.
"""
return self._string_to_index.lookup(sentences)
def indices_to_strings(self, vectors: tf.Tensor) -> tf.Tensor:
"""Convert tensors of indexes of vocabulary items to lists of words.
Arguments:
vectors: An int Tensor with indices to the vocabulary.
Returns:
A string Tensor with the corresponding words.
"""
return self._index_to_string.lookup(vectors)
def vectors_to_sentences(
self,
vectors: Union[List[np.ndarray], np.ndarray]) -> List[List[str]]:
"""Convert vectors of indexes of vocabulary items to lists of words.
Arguments:
vectors: TIME-MAJOR List of vectors of vocabulary indices.
Returns:
List of lists of words.
"""
if isinstance(vectors, list):
if not vectors:
raise ValueError(
"Cannot infer batch size because decoder returned an "
"empty output.")
batch_size = vectors[0].shape[0]
elif isinstance(vectors, np.ndarray):
batch_size = vectors.shape[1]
else:
raise TypeError(
"Unexpected type of decoder output: {}".format(type(vectors)))
sentences = [[] for _ in range(batch_size)] # type: List[List[str]]
for vec in vectors:
for sentence, word_i in zip(sentences, vec):
if not sentence or sentence[-1] != END_TOKEN:
sentence.append(self.index_to_word[word_i])
return [s[:-1] if s and s[-1] == END_TOKEN else s for s in sentences]
def save_wordlist(self, path: str, overwrite: bool = False,
encoding: str = "utf-8") -> None:
"""Save the vocabulary as a wordlist.
The file is ordered by the ids of words.
This function is used mainly for embedding visualization.
Arguments:
path: The path to save the file to.
overwrite: Flag whether to overwrite existing file.
Defaults to False.
Raises:
FileExistsError if the file exists and overwrite flag is
disabled.
"""
if os.path.exists(path) and not overwrite:
raise FileExistsError("Cannot save vocabulary: File exists and "
"overwrite is disabled. {}".format(path))
with open(path, "w", encoding=encoding) as output_file:
log("Storing vocabulary without frequencies.")
for word in self._vocabulary:
output_file.write("{}\n".format(word))
def log_sample(vocabulary: List[str], size: int = 5) -> None:
"""Log a sample of the vocabulary.
Arguments:
size: How many sample words to log.
"""
if size > len(vocabulary):
log("Vocabulary: {}".format(vocabulary))
else:
sample_ids = np.random.permutation(np.arange(len(vocabulary)))[:size]
log("Sample of the vocabulary: {}".format(
[vocabulary[i] for i in sample_ids]))
def pad_batch(sentences: List[List[str]],
max_length: int = None,
add_start_symbol: bool = False,
add_end_symbol: bool = False) -> List[List[str]]:
max_len = max(len(s) for s in sentences)
if add_end_symbol:
max_len += 1
if max_length is not None:
max_len = min(max_length, max_len)
padded_sentences = []
for sent in sentences:
if add_end_symbol:
padded = (sent + [END_TOKEN] + [PAD_TOKEN] * max_len)[:max_len]
else:
padded = (sent + [PAD_TOKEN] * max_len)[:max_len]
if add_start_symbol:
padded.insert(0, START_TOKEN)
padded_sentences.append(padded)
return padded_sentences
def sentence_mask(sentences: tf.Tensor) -> tf.Tensor:
return tf.to_float(tf.not_equal(sentences, PAD_TOKEN_INDEX)) | 0.85022 | 0.269136 |
import unittest
from unittest import mock
from dayz_dev_tools import tools_directory
class TestToolsDirectory(unittest.TestCase):
def setUp(self) -> None:
super().setUp()
import_module_patcher = mock.patch("importlib.import_module")
self.mock_import_module = import_module_patcher.start()
self.addCleanup(import_module_patcher.stop)
self.mock_winreg = self.mock_import_module.return_value
def test_returns_none_when_winreg_module_is_not_available(self) -> None:
self.mock_import_module.side_effect = ModuleNotFoundError
assert tools_directory.tools_directory() is None
def test_raises_when_importing_winreg_fails_for_other_reasons(self) -> None:
self.mock_import_module.side_effect = Exception("other import error")
with self.assertRaises(Exception) as error:
tools_directory.tools_directory()
assert error.exception == self.mock_import_module.side_effect
def test_returns_dayz_tools_directory_path_when_present_in_windows_registry(self) -> None:
mock_key = self.mock_winreg.OpenKey.return_value
self.mock_winreg.QueryValueEx.return_value = ("path/to/dayz/tools", 1)
assert tools_directory.tools_directory() == "path/to/dayz/tools"
self.mock_import_module.assert_called_once_with("winreg")
self.mock_winreg.OpenKey.assert_called_once_with(
self.mock_winreg.HKEY_CURRENT_USER, r"Software\bohemia interactive\Dayz Tools")
self.mock_winreg.QueryValueEx.assert_called_once_with(mock_key, "path")
mock_key.Close.assert_called_once_with()
def test_returns_none_when_key_is_not_present_in_registry(self) -> None:
self.mock_winreg.OpenKey.side_effect = OSError
assert tools_directory.tools_directory() is None
self.mock_winreg.QueryValueEx.assert_not_called()
def test_closes_key_when_querying_its_value_fails(self) -> None:
mock_key = self.mock_winreg.OpenKey.return_value
self.mock_winreg.QueryValueEx.side_effect = Exception("query failure")
assert tools_directory.tools_directory() is None
mock_key.Close.assert_called_once_with() | tests/test_tools_directory.py | import unittest
from unittest import mock
from dayz_dev_tools import tools_directory
class TestToolsDirectory(unittest.TestCase):
def setUp(self) -> None:
super().setUp()
import_module_patcher = mock.patch("importlib.import_module")
self.mock_import_module = import_module_patcher.start()
self.addCleanup(import_module_patcher.stop)
self.mock_winreg = self.mock_import_module.return_value
def test_returns_none_when_winreg_module_is_not_available(self) -> None:
self.mock_import_module.side_effect = ModuleNotFoundError
assert tools_directory.tools_directory() is None
def test_raises_when_importing_winreg_fails_for_other_reasons(self) -> None:
self.mock_import_module.side_effect = Exception("other import error")
with self.assertRaises(Exception) as error:
tools_directory.tools_directory()
assert error.exception == self.mock_import_module.side_effect
def test_returns_dayz_tools_directory_path_when_present_in_windows_registry(self) -> None:
mock_key = self.mock_winreg.OpenKey.return_value
self.mock_winreg.QueryValueEx.return_value = ("path/to/dayz/tools", 1)
assert tools_directory.tools_directory() == "path/to/dayz/tools"
self.mock_import_module.assert_called_once_with("winreg")
self.mock_winreg.OpenKey.assert_called_once_with(
self.mock_winreg.HKEY_CURRENT_USER, r"Software\bohemia interactive\Dayz Tools")
self.mock_winreg.QueryValueEx.assert_called_once_with(mock_key, "path")
mock_key.Close.assert_called_once_with()
def test_returns_none_when_key_is_not_present_in_registry(self) -> None:
self.mock_winreg.OpenKey.side_effect = OSError
assert tools_directory.tools_directory() is None
self.mock_winreg.QueryValueEx.assert_not_called()
def test_closes_key_when_querying_its_value_fails(self) -> None:
mock_key = self.mock_winreg.OpenKey.return_value
self.mock_winreg.QueryValueEx.side_effect = Exception("query failure")
assert tools_directory.tools_directory() is None
mock_key.Close.assert_called_once_with() | 0.601477 | 0.498596 |
import sys
import argparse
from ao.errors import AOError, ECode
from ao.extrinsic import run_extrinsic, cli_extrinsic, EXTRINSIC_DESC
from ao.extract import run_extract, cli_extract, EXTRACT_DESC
def cli(prog, args):
parser = argparse.ArgumentParser(
prog=prog,
description=""
)
subparsers = parser.add_subparsers(dest='subparser_name')
extrinsic_subparser = subparsers.add_parser(
"extrinsic",
help=EXTRINSIC_DESC
)
cli_extrinsic(extrinsic_subparser)
extract_subparser = subparsers.add_parser(
"extract",
help=EXTRACT_DESC
)
cli_extract(extract_subparser)
parsed = parser.parse_args(args)
if parsed.subparser_name is None:
parser.print_help()
sys.exit(0)
return parsed
def cli_not_subparser(prog, args, desc, subparser):
parser = argparse.ArgumentParser(
prog=prog,
description=desc
)
subparser(parser)
parsed = parser.parse_args(args)
return parsed
def main_error_handler(args, runner):
try:
runner(args)
except AOError as e:
print(f"Error: {e.msg}")
sys.exit(e.ecode)
except BrokenPipeError:
# Pipes get closed and that's normal
sys.exit(0)
except KeyboardInterrupt:
print("Received keyboard interrupt. Exiting.", file=sys.stderr)
sys.exit(ECode.SIGINT)
except EnvironmentError as e:
print((
"Encountered a system error.\n"
"We can't control these, and they're usually related to your OS.\n"
"Try running again."
), file=sys.stderr)
raise e
except Exception as e:
print((
"I'm so sorry, but we've encountered an unexpected error.\n"
"This shouldn't happen, so please file a bug report with the "
"authors.\nWe will be extremely grateful!\n\n"
), file=sys.stderr)
raise e
return
def main():
args = cli(prog="ao", args=sys.argv[1:])
if args.subparser_name == "extrinsic":
main_error_handler(args, run_extrinsic)
if args.subparser_name == "extract":
main_error_handler(args, run_extract)
else:
raise ValueError("I shouldn't reach this point ever")
return | ao/cli.py | import sys
import argparse
from ao.errors import AOError, ECode
from ao.extrinsic import run_extrinsic, cli_extrinsic, EXTRINSIC_DESC
from ao.extract import run_extract, cli_extract, EXTRACT_DESC
def cli(prog, args):
parser = argparse.ArgumentParser(
prog=prog,
description=""
)
subparsers = parser.add_subparsers(dest='subparser_name')
extrinsic_subparser = subparsers.add_parser(
"extrinsic",
help=EXTRINSIC_DESC
)
cli_extrinsic(extrinsic_subparser)
extract_subparser = subparsers.add_parser(
"extract",
help=EXTRACT_DESC
)
cli_extract(extract_subparser)
parsed = parser.parse_args(args)
if parsed.subparser_name is None:
parser.print_help()
sys.exit(0)
return parsed
def cli_not_subparser(prog, args, desc, subparser):
parser = argparse.ArgumentParser(
prog=prog,
description=desc
)
subparser(parser)
parsed = parser.parse_args(args)
return parsed
def main_error_handler(args, runner):
try:
runner(args)
except AOError as e:
print(f"Error: {e.msg}")
sys.exit(e.ecode)
except BrokenPipeError:
# Pipes get closed and that's normal
sys.exit(0)
except KeyboardInterrupt:
print("Received keyboard interrupt. Exiting.", file=sys.stderr)
sys.exit(ECode.SIGINT)
except EnvironmentError as e:
print((
"Encountered a system error.\n"
"We can't control these, and they're usually related to your OS.\n"
"Try running again."
), file=sys.stderr)
raise e
except Exception as e:
print((
"I'm so sorry, but we've encountered an unexpected error.\n"
"This shouldn't happen, so please file a bug report with the "
"authors.\nWe will be extremely grateful!\n\n"
), file=sys.stderr)
raise e
return
def main():
args = cli(prog="ao", args=sys.argv[1:])
if args.subparser_name == "extrinsic":
main_error_handler(args, run_extrinsic)
if args.subparser_name == "extract":
main_error_handler(args, run_extract)
else:
raise ValueError("I shouldn't reach this point ever")
return | 0.239172 | 0.149967 |
import urllib
import json
import re
from datetime import datetime
from dateutil.parser import parse
from django.core.management.base import BaseCommand, CommandError
from django.core.exceptions import ObjectDoesNotExist
from wlm.models import Monument, MonumentPhoto
class Command(BaseCommand):
help = 'Update cultural heritage images'
def handle(self, *args, **options):
api_token = ''
file_errors = []
re_kult = re.compile(r'\{\{Cultural Heritage Russia\s*\|\s*id\s*=\s*([0-9]+)\D')
while True:
api_params = {
'action': 'query',
'list': 'embeddedin',
'eititle': 'Template:Cultural Heritage Russia',
'einamespace': 6, # file
'eilimit': 50,
}
if api_token:
api_params['eicontinue'] = api_token
answer = self.api_request(api_params)
for photo in answer['query']['embeddedin']:
try:
MonumentPhoto.objects.get(commons_id=photo['pageid'])
except ObjectDoesNotExist:
print "%s ..." % photo['title'],
api_params = {
'action': 'query',
'prop': 'imageinfo|revisions',
'iiprop': 'timestamp|user|url|size',
'iilimit': 1,
'rvprop': 'content',
'rvlimit': 1,
'titles': photo['title'].encode('utf8'),
}
p_answer = self.api_request(api_params)
p_info = p_answer['query']['pages'][str(photo['pageid'])]
p_url_parts = p_info['imageinfo'][0]['url'].split('/', 7)
m = re.search(re_kult, p_info['revisions'][0]['*'])
try:
kult_id = int(m.group(1))
monument = Monument.objects.get(kult_id=kult_id)
except:
file_errors.append({
'filename': photo['title'][5:],
'kult_id': kult_id,
})
print "ERROR"
continue
MonumentPhoto.objects.create(
monument=monument,
commons_id=photo['pageid'],
name=photo['title'][5:], # without 'File:'
folder='%s/%s' % (p_url_parts[5], p_url_parts[6]),
width=p_info['imageinfo'][0]['width'],
height=p_info['imageinfo'][0]['height'],
size=p_info['imageinfo'][0]['size'],
author=p_info['imageinfo'][0]['user'],
datetime=parse(p_info['imageinfo'][0]['timestamp']).strftime('%Y-%m-%d %H:%M:%S'),
)
print "OK"
if not 'query-continue' in answer:
break
api_token = answer['query-continue']['embeddedin']['eicontinue']
self.update_errors_page(file_errors)
self.stdout.write('Successfully updated photos of cultural heritage\n')
def update_errors_page(self, errors):
text = u'{| class="wikitable sortable"\n'
text += u'! File !! ID\n'
for error in errors:
text += u'|-\n'
text += u'| [[:File:%s]] ' % error['filename']
text += u'|| %s\n' % error['kult_id']
text += u'|}'
error_page = u'Commons:Wiki Loves Monuments 2012 in Russia/Errors'
api_params = {
'action': 'query',
'prop': 'info',
'intoken': 'edit',
'titles': error_page,
}
answer = self.api_request(api_params)
pages = answer['query']['pages']
for page_id in pages:
token = pages[page_id]['edittoken']
break
api_params = {
'action': 'edit',
'summary': u'Bot: Updating list',
'bot': 1,
'title': error_page,
'text': text.encode('utf-8'),
'token': <PASSWORD>,
}
answer = self.api_request(api_params, True)
def api_request(self, ext_params, post=False):
params = {
'format': 'json',
}
params.update(ext_params)
get_string = urllib.urlencode(params)
server = 'http://commons.wikimedia.org'
if post:
f = urllib.urlopen('%s/w/api.php' % server, get_string)
else:
f = urllib.urlopen('%s/w/api.php?%s' % (server, get_string))
return json.load(f) | wlm/management/commands/updateallphotos.py | import urllib
import json
import re
from datetime import datetime
from dateutil.parser import parse
from django.core.management.base import BaseCommand, CommandError
from django.core.exceptions import ObjectDoesNotExist
from wlm.models import Monument, MonumentPhoto
class Command(BaseCommand):
help = 'Update cultural heritage images'
def handle(self, *args, **options):
api_token = ''
file_errors = []
re_kult = re.compile(r'\{\{Cultural Heritage Russia\s*\|\s*id\s*=\s*([0-9]+)\D')
while True:
api_params = {
'action': 'query',
'list': 'embeddedin',
'eititle': 'Template:Cultural Heritage Russia',
'einamespace': 6, # file
'eilimit': 50,
}
if api_token:
api_params['eicontinue'] = api_token
answer = self.api_request(api_params)
for photo in answer['query']['embeddedin']:
try:
MonumentPhoto.objects.get(commons_id=photo['pageid'])
except ObjectDoesNotExist:
print "%s ..." % photo['title'],
api_params = {
'action': 'query',
'prop': 'imageinfo|revisions',
'iiprop': 'timestamp|user|url|size',
'iilimit': 1,
'rvprop': 'content',
'rvlimit': 1,
'titles': photo['title'].encode('utf8'),
}
p_answer = self.api_request(api_params)
p_info = p_answer['query']['pages'][str(photo['pageid'])]
p_url_parts = p_info['imageinfo'][0]['url'].split('/', 7)
m = re.search(re_kult, p_info['revisions'][0]['*'])
try:
kult_id = int(m.group(1))
monument = Monument.objects.get(kult_id=kult_id)
except:
file_errors.append({
'filename': photo['title'][5:],
'kult_id': kult_id,
})
print "ERROR"
continue
MonumentPhoto.objects.create(
monument=monument,
commons_id=photo['pageid'],
name=photo['title'][5:], # without 'File:'
folder='%s/%s' % (p_url_parts[5], p_url_parts[6]),
width=p_info['imageinfo'][0]['width'],
height=p_info['imageinfo'][0]['height'],
size=p_info['imageinfo'][0]['size'],
author=p_info['imageinfo'][0]['user'],
datetime=parse(p_info['imageinfo'][0]['timestamp']).strftime('%Y-%m-%d %H:%M:%S'),
)
print "OK"
if not 'query-continue' in answer:
break
api_token = answer['query-continue']['embeddedin']['eicontinue']
self.update_errors_page(file_errors)
self.stdout.write('Successfully updated photos of cultural heritage\n')
def update_errors_page(self, errors):
text = u'{| class="wikitable sortable"\n'
text += u'! File !! ID\n'
for error in errors:
text += u'|-\n'
text += u'| [[:File:%s]] ' % error['filename']
text += u'|| %s\n' % error['kult_id']
text += u'|}'
error_page = u'Commons:Wiki Loves Monuments 2012 in Russia/Errors'
api_params = {
'action': 'query',
'prop': 'info',
'intoken': 'edit',
'titles': error_page,
}
answer = self.api_request(api_params)
pages = answer['query']['pages']
for page_id in pages:
token = pages[page_id]['edittoken']
break
api_params = {
'action': 'edit',
'summary': u'Bot: Updating list',
'bot': 1,
'title': error_page,
'text': text.encode('utf-8'),
'token': <PASSWORD>,
}
answer = self.api_request(api_params, True)
def api_request(self, ext_params, post=False):
params = {
'format': 'json',
}
params.update(ext_params)
get_string = urllib.urlencode(params)
server = 'http://commons.wikimedia.org'
if post:
f = urllib.urlopen('%s/w/api.php' % server, get_string)
else:
f = urllib.urlopen('%s/w/api.php?%s' % (server, get_string))
return json.load(f) | 0.117597 | 0.073596 |
<div> class="PermalinkOverlay-modal" id="permalink-overlay-dialog" role="dialog" aria-labelledby="permalink-overlay-header" </div>
<div class="PermalinkOverlay-content" role="document">
<div class="PermalinkOverlay-body" data-background-path="/hoorfre">
<div role="main" class="permalink light-inline-actions
stream-uncapped
has-replies ## 是否包含回复
original-permalink-page ## 是否是原始永久连接
">
<div class="permalink-inner permalink-tweet-container"> ##原始推文
<div class="replies-to permalink-inner permalink-replies" data-component-context="replies"> ##所有回复
<div class="tweets-wrapper">
<div id="descendants" class="ThreadedDescendants">
<div class="stream-container " data-max-position="">
<div class="stream">
<ol class="stream-items js-navigable-stream" id="stream-items-id"> ## 回复开始
<li class="ThreadedConversation">
<ol class="stream-items">
<div> ## 回复内容
<li> ## 另外一条回复
{
"_id" : NumberLong("256292946331181056"),
"status" : {
"contributors" : null,
"truncated" : false,
# "text" : "Nobel prize in literature to be announced http://t.co/qxlEqdl3",
"is_quote_status" : false,
# "in_reply_to_status_id" : null,
# "id" : NumberLong("256292946331181056"),
# "favorite_count" : 0,
# "source" : "<a href=\"http://twitterfeed.com\" rel=\"nofollow\">twitterfeed</a>",
# "retweeted" : false,
"coordinates" : null,
"entities" : {
"symbols" : [],
# "hashtags" : [],
# "urls" : [{
"url" : "http://t.co/qxlEqdl3",
"indices" : [42, 62],
# "expanded_url" : "http://bit.ly/TzHFbI",
# "display_url" : "bit.ly/TzHFbI"
}]
},
#"in_reply_to_screen_name" : null,
#"in_reply_to_user_id" : null,
#"retweet_count" : 0,
"id_str" : "256292946331181056",
#"favorited" : false,
"user" : {
"follow_request_sent" : false,
"has_extended_profile" : false,
"profile_use_background_image" : true,
"default_profile_image" : false,
"id" : 47667947,
"profile_background_image_url_https" : "https://pbs.twimg.com/profile_background_images/18366095/Books_Library.jpg",
"verified" : false,
"translator_type" : "none",
"profile_text_color" : "333333",
"profile_image_url_https" : "https://pbs.twimg.com/profile_images/265603404/1970_normal.jpg",
"profile_sidebar_fill_color" : "DDFFCC",
"entities" : {
"url" : {
"urls" : [{
"url" : "http://t.co/U0EwKiuHee",
"indices" : [0, 22],
"expanded_url" : "https://twitter.com/bigbookworm",
"display_url" : "twitter.com/bigbookworm"
}]
},
"description" : {
"urls" : []
}
},
"followers_count" : 2215,
"profile_sidebar_border_color" : "BDDCAD",
"id_str" : "47667947",
"profile_background_color" : "9AE4E8",
"listed_count" : 35,
"is_translation_enabled" : false,
"utc_offset" : 7200,
"statuses_count" : 8854,
"description" : "Book devourer, fantasy slayer, spy novel addict, thrilled by suspense, book connoisseur and coffee freak.",
"friends_count" : 2442,
"location" : "Munich, Germany",
"profile_link_color" : "0084B4",
"profile_image_url" : "http://pbs.twimg.com/profile_images/265603404/1970_normal.jpg",
"following" : false,
"geo_enabled" : false,
"profile_background_image_url" : "http://pbs.twimg.com/profile_background_images/18366095/Books_Library.jpg",
"screen_name" : "bigbookworm",
"lang" : "en",
"profile_background_tile" : false,
"favourites_count" : 0,
"name" : "<NAME>",
"notifications" : false,
"url" : "http://t.co/U0EwKiuHee",
"created_at" : "Tue Jun 16 16:31:06 +0000 2009",
"contributors_enabled" : false,
"time_zone" : "Berlin",
"protected" : false,
"default_profile" : false,
"is_translator" : false
},
# "geo" : null,
"in_reply_to_user_id_str" : null,
"possibly_sensitive" : false,
"possibly_sensitive_appealable" : false,
#"lang" : "en",
#"created_at" : "Thu Oct 11 07:19:34 +0000 2012",
"in_reply_to_status_id_str" : null,
"place" : null
}
} | spider/got/tweet-dialog.py | <div> class="PermalinkOverlay-modal" id="permalink-overlay-dialog" role="dialog" aria-labelledby="permalink-overlay-header" </div>
<div class="PermalinkOverlay-content" role="document">
<div class="PermalinkOverlay-body" data-background-path="/hoorfre">
<div role="main" class="permalink light-inline-actions
stream-uncapped
has-replies ## 是否包含回复
original-permalink-page ## 是否是原始永久连接
">
<div class="permalink-inner permalink-tweet-container"> ##原始推文
<div class="replies-to permalink-inner permalink-replies" data-component-context="replies"> ##所有回复
<div class="tweets-wrapper">
<div id="descendants" class="ThreadedDescendants">
<div class="stream-container " data-max-position="">
<div class="stream">
<ol class="stream-items js-navigable-stream" id="stream-items-id"> ## 回复开始
<li class="ThreadedConversation">
<ol class="stream-items">
<div> ## 回复内容
<li> ## 另外一条回复
{
"_id" : NumberLong("256292946331181056"),
"status" : {
"contributors" : null,
"truncated" : false,
# "text" : "Nobel prize in literature to be announced http://t.co/qxlEqdl3",
"is_quote_status" : false,
# "in_reply_to_status_id" : null,
# "id" : NumberLong("256292946331181056"),
# "favorite_count" : 0,
# "source" : "<a href=\"http://twitterfeed.com\" rel=\"nofollow\">twitterfeed</a>",
# "retweeted" : false,
"coordinates" : null,
"entities" : {
"symbols" : [],
# "hashtags" : [],
# "urls" : [{
"url" : "http://t.co/qxlEqdl3",
"indices" : [42, 62],
# "expanded_url" : "http://bit.ly/TzHFbI",
# "display_url" : "bit.ly/TzHFbI"
}]
},
#"in_reply_to_screen_name" : null,
#"in_reply_to_user_id" : null,
#"retweet_count" : 0,
"id_str" : "256292946331181056",
#"favorited" : false,
"user" : {
"follow_request_sent" : false,
"has_extended_profile" : false,
"profile_use_background_image" : true,
"default_profile_image" : false,
"id" : 47667947,
"profile_background_image_url_https" : "https://pbs.twimg.com/profile_background_images/18366095/Books_Library.jpg",
"verified" : false,
"translator_type" : "none",
"profile_text_color" : "333333",
"profile_image_url_https" : "https://pbs.twimg.com/profile_images/265603404/1970_normal.jpg",
"profile_sidebar_fill_color" : "DDFFCC",
"entities" : {
"url" : {
"urls" : [{
"url" : "http://t.co/U0EwKiuHee",
"indices" : [0, 22],
"expanded_url" : "https://twitter.com/bigbookworm",
"display_url" : "twitter.com/bigbookworm"
}]
},
"description" : {
"urls" : []
}
},
"followers_count" : 2215,
"profile_sidebar_border_color" : "BDDCAD",
"id_str" : "47667947",
"profile_background_color" : "9AE4E8",
"listed_count" : 35,
"is_translation_enabled" : false,
"utc_offset" : 7200,
"statuses_count" : 8854,
"description" : "Book devourer, fantasy slayer, spy novel addict, thrilled by suspense, book connoisseur and coffee freak.",
"friends_count" : 2442,
"location" : "Munich, Germany",
"profile_link_color" : "0084B4",
"profile_image_url" : "http://pbs.twimg.com/profile_images/265603404/1970_normal.jpg",
"following" : false,
"geo_enabled" : false,
"profile_background_image_url" : "http://pbs.twimg.com/profile_background_images/18366095/Books_Library.jpg",
"screen_name" : "bigbookworm",
"lang" : "en",
"profile_background_tile" : false,
"favourites_count" : 0,
"name" : "<NAME>",
"notifications" : false,
"url" : "http://t.co/U0EwKiuHee",
"created_at" : "Tue Jun 16 16:31:06 +0000 2009",
"contributors_enabled" : false,
"time_zone" : "Berlin",
"protected" : false,
"default_profile" : false,
"is_translator" : false
},
# "geo" : null,
"in_reply_to_user_id_str" : null,
"possibly_sensitive" : false,
"possibly_sensitive_appealable" : false,
#"lang" : "en",
#"created_at" : "Thu Oct 11 07:19:34 +0000 2012",
"in_reply_to_status_id_str" : null,
"place" : null
}
} | 0.219254 | 0.163479 |
import time
from threading import Thread
from python2sky import config
from python2sky.context.context_carrier import ContextCarrier
from python2sky.context.context_manager import ContextManager
from python2sky.util.count_down_latch import CountDownLatch
from python2sky.util.uuid_util import global_id_to_string
from tests.base_test_case import BaseTestCase
class TestTracingContext(BaseTestCase):
def setUp(self):
super().setUp()
def test_ignored_segment(self):
entry_span = ContextManager.create_entry_span("/operation", None)
local_span = ContextManager.create_local_span("/local")
exit_span = ContextManager.create_exit_span("/exit", "172.16.17.32")
ContextManager.stop_span(exit_span)
ContextManager.stop_span(local_span)
ContextManager.stop_span(entry_span)
def test_tracing_context(self):
config.SERVICE_ID = 1
config.SERVICE_INSTANCE_ID = 1
entry_span = ContextManager.create_entry_span("/operation", None)
local_span = ContextManager.create_local_span("/local")
exit_span = ContextManager.create_exit_span("/exit", "172.16.17.32")
ContextManager.stop_span(exit_span)
ContextManager.stop_span(local_span)
ContextManager.stop_span(entry_span)
def test_tracing_context_extract(self):
carrier = ContextCarrier()
carrier.deserialize(self.SW6)
config.SERVICE_ID = 1
config.SERVICE_INSTANCE_ID = 1
entry_span = ContextManager.create_entry_span("/operation", carrier)
local_span = ContextManager.create_local_span("/local")
exit_span = ContextManager.create_exit_span("/exit", "172.16.17.32")
tracing_context = ContextManager.get_tracing_context()
self.assertEqual(tracing_context.segment.refs[0], carrier)
ContextManager.stop_span(exit_span)
ContextManager.stop_span(local_span)
ContextManager.stop_span(entry_span)
def test_tracing_context_inject(self):
carrier = ContextCarrier()
config.SERVICE_ID = 1
config.SERVICE_INSTANCE_ID = 1
entry_span = ContextManager.create_entry_span("/operation", None)
local_span = ContextManager.create_local_span("/local")
exit_span = ContextManager.create_inject_exit_span("/exit", "172.16.17.32", carrier)
sw6 = carrier.serialize()
self.assertIsNotNone(sw6)
ContextManager.stop_span(exit_span)
ContextManager.stop_span(local_span)
ContextManager.stop_span(entry_span)
def test_tracing_context_inject_and_extract(self):
carrier = ContextCarrier()
carrier.deserialize(self.SW6)
config.SERVICE_ID = 1
config.SERVICE_INSTANCE_ID = 1
entry_span = ContextManager.create_entry_span("/operation", carrier)
local_span = ContextManager.create_local_span("/local")
carrier2 = ContextCarrier()
exit_span = ContextManager.create_inject_exit_span("/exit", "172.16.17.32", carrier2)
sw6 = carrier.serialize()
self.assertEqual(sw6, carrier.serialize())
self.assertEqual(ContextManager.get_global_trace_id(), global_id_to_string(["3", "4", "5"]))
ContextManager.stop_span(exit_span)
ContextManager.stop_span(local_span)
ContextManager.stop_span(entry_span)
self.assertEqual(carrier.trace_id, carrier2.trace_id)
def local_thread(self, tracing_context, count_down_latch):
ContextManager.CONTEXT.trace_context = tracing_context
local_span = ContextManager.create_local_span("/local")
ContextManager.stop_span(local_span)
count_down_latch.count_down()
def exit_thread(self, tracing_context, count_down_latch):
ContextManager.CONTEXT.trace_context = tracing_context
exit_span = ContextManager.create_exit_span("/exit", "172.16.17.32")
ContextManager.stop_span(exit_span)
count_down_latch.count_down()
def test_async(self):
config.SERVICE_ID = 1
config.SERVICE_INSTANCE_ID = 1
entry_span = ContextManager.create_entry_span("/operation", None)
count_down_latch = CountDownLatch(2)
t1 = Thread(target=self.local_thread, args=(ContextManager.get_tracing_context(), count_down_latch,))
t2 = Thread(target=self.exit_thread, args=(ContextManager.get_tracing_context(), count_down_latch,))
t1.start()
t2.start()
count_down_latch.wait()
ContextManager.stop_span(entry_span)
def test_async2(self):
config.SERVICE_ID = 1
config.SERVICE_INSTANCE_ID = 1
entry_span = ContextManager.create_entry_span("/operation", None)
context_carrier = ContextManager.capture()
count_down_latch = CountDownLatch(2)
trace_id = ContextManager.get_global_trace_id()
def local_thread():
local_span = ContextManager.create_local_span("/local")
ContextManager.continued(context_carrier)
trace_id1 = ContextManager.get_global_trace_id()
self.assertEqual(trace_id1, trace_id)
ContextManager.stop_span(local_span)
count_down_latch.count_down()
def exit_thread():
exit_span = ContextManager.create_exit_span("/exit", "172.16.17.32")
ContextManager.continued(context_carrier)
trace_id2 = ContextManager.get_global_trace_id()
self.assertEqual(trace_id2, trace_id)
time.sleep(3)
ContextManager.stop_span(exit_span)
count_down_latch.count_down()
t1 = Thread(target=local_thread, args=())
t2 = Thread(target=exit_thread, args=())
t1.start()
t2.start()
ContextManager.stop_span(entry_span)
count_down_latch.wait() | tests/test_tracing_context.py | import time
from threading import Thread
from python2sky import config
from python2sky.context.context_carrier import ContextCarrier
from python2sky.context.context_manager import ContextManager
from python2sky.util.count_down_latch import CountDownLatch
from python2sky.util.uuid_util import global_id_to_string
from tests.base_test_case import BaseTestCase
class TestTracingContext(BaseTestCase):
def setUp(self):
super().setUp()
def test_ignored_segment(self):
entry_span = ContextManager.create_entry_span("/operation", None)
local_span = ContextManager.create_local_span("/local")
exit_span = ContextManager.create_exit_span("/exit", "172.16.17.32")
ContextManager.stop_span(exit_span)
ContextManager.stop_span(local_span)
ContextManager.stop_span(entry_span)
def test_tracing_context(self):
config.SERVICE_ID = 1
config.SERVICE_INSTANCE_ID = 1
entry_span = ContextManager.create_entry_span("/operation", None)
local_span = ContextManager.create_local_span("/local")
exit_span = ContextManager.create_exit_span("/exit", "172.16.17.32")
ContextManager.stop_span(exit_span)
ContextManager.stop_span(local_span)
ContextManager.stop_span(entry_span)
def test_tracing_context_extract(self):
carrier = ContextCarrier()
carrier.deserialize(self.SW6)
config.SERVICE_ID = 1
config.SERVICE_INSTANCE_ID = 1
entry_span = ContextManager.create_entry_span("/operation", carrier)
local_span = ContextManager.create_local_span("/local")
exit_span = ContextManager.create_exit_span("/exit", "172.16.17.32")
tracing_context = ContextManager.get_tracing_context()
self.assertEqual(tracing_context.segment.refs[0], carrier)
ContextManager.stop_span(exit_span)
ContextManager.stop_span(local_span)
ContextManager.stop_span(entry_span)
def test_tracing_context_inject(self):
carrier = ContextCarrier()
config.SERVICE_ID = 1
config.SERVICE_INSTANCE_ID = 1
entry_span = ContextManager.create_entry_span("/operation", None)
local_span = ContextManager.create_local_span("/local")
exit_span = ContextManager.create_inject_exit_span("/exit", "172.16.17.32", carrier)
sw6 = carrier.serialize()
self.assertIsNotNone(sw6)
ContextManager.stop_span(exit_span)
ContextManager.stop_span(local_span)
ContextManager.stop_span(entry_span)
def test_tracing_context_inject_and_extract(self):
carrier = ContextCarrier()
carrier.deserialize(self.SW6)
config.SERVICE_ID = 1
config.SERVICE_INSTANCE_ID = 1
entry_span = ContextManager.create_entry_span("/operation", carrier)
local_span = ContextManager.create_local_span("/local")
carrier2 = ContextCarrier()
exit_span = ContextManager.create_inject_exit_span("/exit", "172.16.17.32", carrier2)
sw6 = carrier.serialize()
self.assertEqual(sw6, carrier.serialize())
self.assertEqual(ContextManager.get_global_trace_id(), global_id_to_string(["3", "4", "5"]))
ContextManager.stop_span(exit_span)
ContextManager.stop_span(local_span)
ContextManager.stop_span(entry_span)
self.assertEqual(carrier.trace_id, carrier2.trace_id)
def local_thread(self, tracing_context, count_down_latch):
ContextManager.CONTEXT.trace_context = tracing_context
local_span = ContextManager.create_local_span("/local")
ContextManager.stop_span(local_span)
count_down_latch.count_down()
def exit_thread(self, tracing_context, count_down_latch):
ContextManager.CONTEXT.trace_context = tracing_context
exit_span = ContextManager.create_exit_span("/exit", "172.16.17.32")
ContextManager.stop_span(exit_span)
count_down_latch.count_down()
def test_async(self):
config.SERVICE_ID = 1
config.SERVICE_INSTANCE_ID = 1
entry_span = ContextManager.create_entry_span("/operation", None)
count_down_latch = CountDownLatch(2)
t1 = Thread(target=self.local_thread, args=(ContextManager.get_tracing_context(), count_down_latch,))
t2 = Thread(target=self.exit_thread, args=(ContextManager.get_tracing_context(), count_down_latch,))
t1.start()
t2.start()
count_down_latch.wait()
ContextManager.stop_span(entry_span)
def test_async2(self):
config.SERVICE_ID = 1
config.SERVICE_INSTANCE_ID = 1
entry_span = ContextManager.create_entry_span("/operation", None)
context_carrier = ContextManager.capture()
count_down_latch = CountDownLatch(2)
trace_id = ContextManager.get_global_trace_id()
def local_thread():
local_span = ContextManager.create_local_span("/local")
ContextManager.continued(context_carrier)
trace_id1 = ContextManager.get_global_trace_id()
self.assertEqual(trace_id1, trace_id)
ContextManager.stop_span(local_span)
count_down_latch.count_down()
def exit_thread():
exit_span = ContextManager.create_exit_span("/exit", "172.16.17.32")
ContextManager.continued(context_carrier)
trace_id2 = ContextManager.get_global_trace_id()
self.assertEqual(trace_id2, trace_id)
time.sleep(3)
ContextManager.stop_span(exit_span)
count_down_latch.count_down()
t1 = Thread(target=local_thread, args=())
t2 = Thread(target=exit_thread, args=())
t1.start()
t2.start()
ContextManager.stop_span(entry_span)
count_down_latch.wait() | 0.218586 | 0.235553 |
import unittest2
from oslo.config import cfg
from st2common.content.utils import get_packs_base_paths, get_aliases_base_paths
from st2tests import config as tests_config
class ContentUtilsTestCase(unittest2.TestCase):
@classmethod
def setUpClass(cls):
tests_config.parse_args()
def test_get_pack_base_paths(self):
cfg.CONF.content.system_packs_base_path = ''
cfg.CONF.content.packs_base_paths = '/opt/path1'
result = get_packs_base_paths()
self.assertEqual(result, ['/opt/path1'])
# Multiple paths, no trailing colon
cfg.CONF.content.packs_base_paths = '/opt/path1:/opt/path2'
result = get_packs_base_paths()
self.assertEqual(result, ['/opt/path1', '/opt/path2'])
# Multiple paths, trailing colon
cfg.CONF.content.packs_base_paths = '/opt/path1:/opt/path2:'
result = get_packs_base_paths()
self.assertEqual(result, ['/opt/path1', '/opt/path2'])
# Multiple same paths
cfg.CONF.content.packs_base_paths = '/opt/path1:/opt/path2:/opt/path1:/opt/path2'
result = get_packs_base_paths()
self.assertEqual(result, ['/opt/path1', '/opt/path2'])
# Assert system path is always first
cfg.CONF.content.system_packs_base_path = '/opt/system'
cfg.CONF.content.packs_base_paths = '/opt/path2:/opt/path1'
result = get_packs_base_paths()
self.assertEqual(result, ['/opt/system', '/opt/path2', '/opt/path1'])
def test_get_aliases_base_paths(self):
cfg.CONF.content.aliases_base_paths = '/opt/path1'
result = get_aliases_base_paths()
self.assertEqual(result, ['/opt/path1'])
# Multiple paths, no trailing colon
cfg.CONF.content.aliases_base_paths = '/opt/path1:/opt/path2'
result = get_aliases_base_paths()
self.assertEqual(result, ['/opt/path1', '/opt/path2'])
# Multiple paths, trailing colon
cfg.CONF.content.aliases_base_paths = '/opt/path1:/opt/path2:'
result = get_aliases_base_paths()
self.assertEqual(result, ['/opt/path1', '/opt/path2'])
# Multiple same paths
cfg.CONF.content.aliases_base_paths = '/opt/path1:/opt/path2:/opt/path1:/opt/path2'
result = get_aliases_base_paths()
self.assertEqual(result, ['/opt/path1', '/opt/path2']) | st2common/tests/unit/test_content_utils.py |
import unittest2
from oslo.config import cfg
from st2common.content.utils import get_packs_base_paths, get_aliases_base_paths
from st2tests import config as tests_config
class ContentUtilsTestCase(unittest2.TestCase):
@classmethod
def setUpClass(cls):
tests_config.parse_args()
def test_get_pack_base_paths(self):
cfg.CONF.content.system_packs_base_path = ''
cfg.CONF.content.packs_base_paths = '/opt/path1'
result = get_packs_base_paths()
self.assertEqual(result, ['/opt/path1'])
# Multiple paths, no trailing colon
cfg.CONF.content.packs_base_paths = '/opt/path1:/opt/path2'
result = get_packs_base_paths()
self.assertEqual(result, ['/opt/path1', '/opt/path2'])
# Multiple paths, trailing colon
cfg.CONF.content.packs_base_paths = '/opt/path1:/opt/path2:'
result = get_packs_base_paths()
self.assertEqual(result, ['/opt/path1', '/opt/path2'])
# Multiple same paths
cfg.CONF.content.packs_base_paths = '/opt/path1:/opt/path2:/opt/path1:/opt/path2'
result = get_packs_base_paths()
self.assertEqual(result, ['/opt/path1', '/opt/path2'])
# Assert system path is always first
cfg.CONF.content.system_packs_base_path = '/opt/system'
cfg.CONF.content.packs_base_paths = '/opt/path2:/opt/path1'
result = get_packs_base_paths()
self.assertEqual(result, ['/opt/system', '/opt/path2', '/opt/path1'])
def test_get_aliases_base_paths(self):
cfg.CONF.content.aliases_base_paths = '/opt/path1'
result = get_aliases_base_paths()
self.assertEqual(result, ['/opt/path1'])
# Multiple paths, no trailing colon
cfg.CONF.content.aliases_base_paths = '/opt/path1:/opt/path2'
result = get_aliases_base_paths()
self.assertEqual(result, ['/opt/path1', '/opt/path2'])
# Multiple paths, trailing colon
cfg.CONF.content.aliases_base_paths = '/opt/path1:/opt/path2:'
result = get_aliases_base_paths()
self.assertEqual(result, ['/opt/path1', '/opt/path2'])
# Multiple same paths
cfg.CONF.content.aliases_base_paths = '/opt/path1:/opt/path2:/opt/path1:/opt/path2'
result = get_aliases_base_paths()
self.assertEqual(result, ['/opt/path1', '/opt/path2']) | 0.300232 | 0.115811 |
import os
import sys
import requests
import random
import discord
from dotenv import load_dotenv
from mgz.summary import Summary
load_dotenv()
TOKEN = os.getenv('DISCORD_TOKEN')
client = discord.Client()
civCode = ["Britons", "Franks", "Goths", "Teutons", "Japanese", "Chinese", "Byzantines", "Persian", "Saracens", "Turks", "Vikings", "Mongols", "Celts", "Spanish", "Aztecs", "Mayans", "Huns", "Koreans", "Italians", "Indians", "Incas", "Magyars", "Slav", "Portuguese", "Ethiopians", "Malians", "Berbers", "Khmer", "Malay", "Burmese", "Vietnamese", "Bulgarians", "Tatars", "Cumans", "Lithuanians", "burgundians", "sicilians"]
rndLine = [
"Who said mangoes grow on trees? I saw them coming from siege workshops, let me check if you grew some",
"Match didn't start in post-imp, so give me time to watch you get there and I’ll tell you how bad you did soon",
"Wait for me, I’m an old bot, it takes me a bit of time to watch your long game",
"It takes a few seconds for me to watch your game, I have to stop and re-watch every miss-click you make",
"Dude, give me a minute to process your game, it made me fall asleep a few times",
"error 404: EPIC MANGO SHOT not found. Deleting your account...",
"are you sure you want others to watch this game?! I'll edit it as much as I can before FARM-MAN casts it",
"so many bad plays, and I still keep counting them",
"yo, got an error, can't move past this awful push you made, wait until I fix myself",
"I am actually kidnapped, forced to watch replays and report score, please send help befo-",
""
]
rndColor = ["yaml", "fix", "css"] #many more to come
@client.event
async def on_message(msg):
if msg.attachments:
if msg.attachments[0].url.endswith("aoe2record"):
random.seed()
replyMsg = "```" + rndColor[random.randint(0,len(rndColor)-1)] + "\n" + rndLine[random.randint(0, len(rndLine)-1)] + "\n```"
await msg.channel.send(replyMsg)
r = requests.get(msg.attachments[0].url)
open("currentDLGame.aoe2record", "wb").write(r.content)
with open("currentDLGame.aoe2record", "rb") as data:
s = Summary(data)
allPlayers = s.get_players()
pMap = s.get_map()
winnerNames = []
winnerCiv = []
loserNames = []
loserCiv = []
wTeam = ""
lTeam = ""
for x in allPlayers:
if x["winner"]:
winnerNames.append(x["name"])
winnerCiv.append(civCode[x["civilization"]-1])
else:
loserNames.append(x["name"])
loserCiv.append(civCode[x["civilization"]-1])
for w in range(len(winnerNames)):
wTeam += winnerNames[w] + " - " + winnerCiv[w] + "\n"
lTeam += loserNames[w] + " - " + loserCiv[w] + "\n"
embed = discord.Embed(title = "Map: ||" + str(pMap["name"]) + "||")
if random.randint(0,1) == 1:
embed.add_field(name = "Winner:", value = "||**Team 1**||", inline= False)
embed.add_field(name = "Team 1", value = wTeam, inline = True)
embed.add_field(name = "VS", value = " - \n"*len(winnerNames), inline = True)
embed.add_field(name = "Team 2", value = lTeam, inline = True)
else:
embed.add_field(name = "Winner:", value = "||**Team 2**||", inline= False)
embed.add_field(name = "Team 1", value = lTeam, inline = True)
embed.add_field(name = "VS", value = " - \n"*len(winnerNames), inline = True)
embed.add_field(name = "Team 2", value = wTeam, inline = True)
await msg.channel.send(embed = embed)
else:
await msg.delete()
await msg.channel.send("Only Age of Empires 2 replay files allowed in this channel!")
client.run(TOKEN) | main.py | import os
import sys
import requests
import random
import discord
from dotenv import load_dotenv
from mgz.summary import Summary
load_dotenv()
TOKEN = os.getenv('DISCORD_TOKEN')
client = discord.Client()
civCode = ["Britons", "Franks", "Goths", "Teutons", "Japanese", "Chinese", "Byzantines", "Persian", "Saracens", "Turks", "Vikings", "Mongols", "Celts", "Spanish", "Aztecs", "Mayans", "Huns", "Koreans", "Italians", "Indians", "Incas", "Magyars", "Slav", "Portuguese", "Ethiopians", "Malians", "Berbers", "Khmer", "Malay", "Burmese", "Vietnamese", "Bulgarians", "Tatars", "Cumans", "Lithuanians", "burgundians", "sicilians"]
rndLine = [
"Who said mangoes grow on trees? I saw them coming from siege workshops, let me check if you grew some",
"Match didn't start in post-imp, so give me time to watch you get there and I’ll tell you how bad you did soon",
"Wait for me, I’m an old bot, it takes me a bit of time to watch your long game",
"It takes a few seconds for me to watch your game, I have to stop and re-watch every miss-click you make",
"Dude, give me a minute to process your game, it made me fall asleep a few times",
"error 404: EPIC MANGO SHOT not found. Deleting your account...",
"are you sure you want others to watch this game?! I'll edit it as much as I can before FARM-MAN casts it",
"so many bad plays, and I still keep counting them",
"yo, got an error, can't move past this awful push you made, wait until I fix myself",
"I am actually kidnapped, forced to watch replays and report score, please send help befo-",
""
]
rndColor = ["yaml", "fix", "css"] #many more to come
@client.event
async def on_message(msg):
if msg.attachments:
if msg.attachments[0].url.endswith("aoe2record"):
random.seed()
replyMsg = "```" + rndColor[random.randint(0,len(rndColor)-1)] + "\n" + rndLine[random.randint(0, len(rndLine)-1)] + "\n```"
await msg.channel.send(replyMsg)
r = requests.get(msg.attachments[0].url)
open("currentDLGame.aoe2record", "wb").write(r.content)
with open("currentDLGame.aoe2record", "rb") as data:
s = Summary(data)
allPlayers = s.get_players()
pMap = s.get_map()
winnerNames = []
winnerCiv = []
loserNames = []
loserCiv = []
wTeam = ""
lTeam = ""
for x in allPlayers:
if x["winner"]:
winnerNames.append(x["name"])
winnerCiv.append(civCode[x["civilization"]-1])
else:
loserNames.append(x["name"])
loserCiv.append(civCode[x["civilization"]-1])
for w in range(len(winnerNames)):
wTeam += winnerNames[w] + " - " + winnerCiv[w] + "\n"
lTeam += loserNames[w] + " - " + loserCiv[w] + "\n"
embed = discord.Embed(title = "Map: ||" + str(pMap["name"]) + "||")
if random.randint(0,1) == 1:
embed.add_field(name = "Winner:", value = "||**Team 1**||", inline= False)
embed.add_field(name = "Team 1", value = wTeam, inline = True)
embed.add_field(name = "VS", value = " - \n"*len(winnerNames), inline = True)
embed.add_field(name = "Team 2", value = lTeam, inline = True)
else:
embed.add_field(name = "Winner:", value = "||**Team 2**||", inline= False)
embed.add_field(name = "Team 1", value = lTeam, inline = True)
embed.add_field(name = "VS", value = " - \n"*len(winnerNames), inline = True)
embed.add_field(name = "Team 2", value = wTeam, inline = True)
await msg.channel.send(embed = embed)
else:
await msg.delete()
await msg.channel.send("Only Age of Empires 2 replay files allowed in this channel!")
client.run(TOKEN) | 0.175291 | 0.295942 |
from __future__ import absolute_import
import inspect
import os
import site
import sys
from functools import wraps
from types import ModuleType
if os.name == 'nt':
from .windows import setCoordinatesToScreen
else:
def setCoordinatesToScreen(x, y, *args, **kwargs):
return (x, y)
SITE_PACKAGES = site.getsitepackages()
class hybridmethod(object):
"""Merge a normal method with a classmethod.
The first two arguments are (cls, self), where self will match cls if it is a classmethod.
Source: https://stackoverflow.com/a/18078819/2403000
"""
def __init__(self, func):
self.func = func
def __get__(self, obj, cls):
context = obj if obj is not None else cls
@wraps(self.func)
def hybrid(*args, **kw):
return self.func(cls, context, *args, **kw)
# Mimic method attributes (not required)
hybrid.__func__ = hybrid.im_func = self.func
hybrid.__self__ = hybrid.im_self = context
return hybrid
def searchGlobals(cls, globalsDict=None, visited=None):
"""Search from the top level globals for a particular object.
Every time a module is found, search that too.
"""
# Read the globals from the module at the top of the stack
if globalsDict is None:
globalsDict = inspect.stack()[-1][0].f_globals
# Initially mark every builtin module as visisted
if visited is None:
visited = set(filter(bool, map(sys.modules.get, sys.builtin_module_names)))
for k, v in globalsDict.items():
if v == cls:
return k
elif isinstance(v, ModuleType) and v not in visited:
visited.add(v)
#Check it's not a built in module
try:
modulePath = inspect.getsourcefile(v)
except TypeError:
continue
# Skip any installed modules
if modulePath is None or any(modulePath.startswith(i) for i in SITE_PACKAGES):
continue
# Recursively search the next module
result = searchGlobals(cls, v.__dict__, visited=visited)
if result:
return k + '.' + result | vfxwindow/utils/__init__.py | from __future__ import absolute_import
import inspect
import os
import site
import sys
from functools import wraps
from types import ModuleType
if os.name == 'nt':
from .windows import setCoordinatesToScreen
else:
def setCoordinatesToScreen(x, y, *args, **kwargs):
return (x, y)
SITE_PACKAGES = site.getsitepackages()
class hybridmethod(object):
"""Merge a normal method with a classmethod.
The first two arguments are (cls, self), where self will match cls if it is a classmethod.
Source: https://stackoverflow.com/a/18078819/2403000
"""
def __init__(self, func):
self.func = func
def __get__(self, obj, cls):
context = obj if obj is not None else cls
@wraps(self.func)
def hybrid(*args, **kw):
return self.func(cls, context, *args, **kw)
# Mimic method attributes (not required)
hybrid.__func__ = hybrid.im_func = self.func
hybrid.__self__ = hybrid.im_self = context
return hybrid
def searchGlobals(cls, globalsDict=None, visited=None):
"""Search from the top level globals for a particular object.
Every time a module is found, search that too.
"""
# Read the globals from the module at the top of the stack
if globalsDict is None:
globalsDict = inspect.stack()[-1][0].f_globals
# Initially mark every builtin module as visisted
if visited is None:
visited = set(filter(bool, map(sys.modules.get, sys.builtin_module_names)))
for k, v in globalsDict.items():
if v == cls:
return k
elif isinstance(v, ModuleType) and v not in visited:
visited.add(v)
#Check it's not a built in module
try:
modulePath = inspect.getsourcefile(v)
except TypeError:
continue
# Skip any installed modules
if modulePath is None or any(modulePath.startswith(i) for i in SITE_PACKAGES):
continue
# Recursively search the next module
result = searchGlobals(cls, v.__dict__, visited=visited)
if result:
return k + '.' + result | 0.489748 | 0.139309 |
import argparse
import io
import sys
import time
import picamera
import tensorflow as tf
from PIL import Image
from detect_picamera import set_input_tensor, get_output_tensor, detect_objects
CAMERA_WIDTH = 640
CAMERA_HEIGHT = 480
def parse_args():
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument(
'--model',
help='File path of .tflite file.',
required=True
)
parser.add_argument(
'--threshold',
help='Score threshold for detected objects.',
required=False,
type=float,
default=0.4
)
parser.add_argument(
'--timeout',
help='Timeout seconds.',
required=False,
type=int,
default=5400
)
parser.add_argument(
'--inverse',
help='Inverse detect result.',
action='store_true'
)
return parser.parse_args()
def main():
start_time = time.monotonic()
args = parse_args()
interpreter = tf.lite.Interpreter(args.model)
interpreter.allocate_tensors()
_, input_height, input_width, _ = interpreter.get_input_details()[0]['shape']
with picamera.PiCamera(resolution=(CAMERA_WIDTH, CAMERA_HEIGHT), framerate=30) as camera:
camera.start_preview()
try:
stream = io.BytesIO()
for i, _ in enumerate(camera.capture_continuous(stream, format='jpeg', use_video_port=True)):
stream.seek(0)
image = Image.open(stream) \
.convert('RGB') \
.resize((input_width, input_height), Image.ANTIALIAS)
results = detect_objects(interpreter, image, args.threshold)
for result in results:
if args.inverse ^ (result['class_id'] == 0):
sys.exit(0)
if time.monotonic() - start_time > args.timeout:
sys.exit(1)
stream.seek(0)
stream.truncate()
finally:
camera.stop_preview()
if __name__ == '__main__':
main() | detect.py | import argparse
import io
import sys
import time
import picamera
import tensorflow as tf
from PIL import Image
from detect_picamera import set_input_tensor, get_output_tensor, detect_objects
CAMERA_WIDTH = 640
CAMERA_HEIGHT = 480
def parse_args():
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument(
'--model',
help='File path of .tflite file.',
required=True
)
parser.add_argument(
'--threshold',
help='Score threshold for detected objects.',
required=False,
type=float,
default=0.4
)
parser.add_argument(
'--timeout',
help='Timeout seconds.',
required=False,
type=int,
default=5400
)
parser.add_argument(
'--inverse',
help='Inverse detect result.',
action='store_true'
)
return parser.parse_args()
def main():
start_time = time.monotonic()
args = parse_args()
interpreter = tf.lite.Interpreter(args.model)
interpreter.allocate_tensors()
_, input_height, input_width, _ = interpreter.get_input_details()[0]['shape']
with picamera.PiCamera(resolution=(CAMERA_WIDTH, CAMERA_HEIGHT), framerate=30) as camera:
camera.start_preview()
try:
stream = io.BytesIO()
for i, _ in enumerate(camera.capture_continuous(stream, format='jpeg', use_video_port=True)):
stream.seek(0)
image = Image.open(stream) \
.convert('RGB') \
.resize((input_width, input_height), Image.ANTIALIAS)
results = detect_objects(interpreter, image, args.threshold)
for result in results:
if args.inverse ^ (result['class_id'] == 0):
sys.exit(0)
if time.monotonic() - start_time > args.timeout:
sys.exit(1)
stream.seek(0)
stream.truncate()
finally:
camera.stop_preview()
if __name__ == '__main__':
main() | 0.371137 | 0.092074 |
import boto3
import sys
import traceback
import re
from collections import defaultdict
from datetime import datetime
from itertools import islice
from tilequeue.tile import coord_marshall_int
from tilequeue.tile import create_coord
from time import time
from botocore.credentials import RefreshableCredentials
from botocore.session import get_session
def format_stacktrace_one_line(exc_info=None):
# exc_info is expected to be an exception tuple from sys.exc_info()
if exc_info is None:
exc_info = sys.exc_info()
exc_type, exc_value, exc_traceback = exc_info
exception_lines = traceback.format_exception(exc_type, exc_value,
exc_traceback)
stacktrace = ' | '.join([x.replace('\n', '')
for x in exception_lines])
return stacktrace
def grouper(iterable, n):
"""Yield n-length chunks of the iterable"""
it = iter(iterable)
while True:
chunk = tuple(islice(it, n))
if not chunk:
return
yield chunk
def parse_log_file(log_file):
ip_pattern = r'(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})'
# didn't match againts explicit date pattern, in case it changes
date_pattern = r'\[([\d\w\s\/:]+)\]'
tile_id_pattern = r'\/([\w]+)\/([\d]+)\/([\d]+)\/([\d]+)\.([\d\w]*)'
log_pattern = r'%s - - %s "([\w]+) %s.*' % (
ip_pattern, date_pattern, tile_id_pattern)
tile_log_records = []
for log_string in log_file:
match = re.search(log_pattern, log_string)
if match and len(match.groups()) == 8:
tile_log_records.append(
(match.group(1),
datetime.strptime(match.group(2), '%d/%B/%Y %H:%M:%S'),
coord_marshall_int(
create_coord(
match.group(6), match.group(7), match.group(5)))))
return tile_log_records
def encode_utf8(x):
if x is None:
return None
elif isinstance(x, unicode):
return x.encode('utf-8')
elif isinstance(x, dict):
result = {}
for k, v in x.items():
if isinstance(k, unicode):
k = k.encode('utf-8')
result[k] = encode_utf8(v)
return result
elif isinstance(x, list):
return map(encode_utf8, x)
elif isinstance(x, tuple):
return tuple(encode_utf8(list(x)))
else:
return x
class time_block(object):
"""Convenience to capture timing information"""
def __init__(self, timing_state, key):
# timing_state should be a dictionary
self.timing_state = timing_state
self.key = key
def __enter__(self):
self.start = time()
def __exit__(self, exc_type, exc_val, exc_tb):
stop = time()
duration_seconds = stop - self.start
duration_millis = duration_seconds * 1000
self.timing_state[self.key] = duration_millis
class CoordsByParent(object):
def __init__(self, parent_zoom):
self.parent_zoom = parent_zoom
self.groups = defaultdict(list)
def add(self, coord, *extra):
data = coord
if extra:
data = (coord,) + extra
# treat tiles as singletons below the parent zoom
if coord.zoom < self.parent_zoom:
self.groups[coord].append(data)
else:
# otherwise, group by the parent tile at the parent zoom.
parent_coord = coord.zoomTo(self.parent_zoom).container()
self.groups[parent_coord].append(data)
def __iter__(self):
return self.groups.iteritems()
def convert_seconds_to_millis(time_in_seconds):
time_in_millis = int(time_in_seconds * 1000)
return time_in_millis
class AwsSessionHelper:
""" The AwsSessionHelper creates a auto-refreshable boto3 session object
and allows for creating clients with those refreshable credentials.
"""
def __init__(self, session_name, role_arn, region='us-east-1',
s3_role_session_duration_s=3600):
""" session_name: str; The name of the session we are creating
role_arn: str; The ARN of the role we are assuming with STS
region: str; The region for the STS client to be created in
s3_role_session_duration_s: int; the time that session is good for
"""
self.role_arn = role_arn
self.session_name = session_name
self.region = region
self.session_duration_seconds = s3_role_session_duration_s
self.sts_client = boto3.client('sts')
credentials = self._refresh()
session_credentials = RefreshableCredentials.create_from_metadata(
metadata=credentials,
refresh_using=self._refresh,
method="sts-assume-role"
)
aws_session = get_session()
aws_session._credentials = session_credentials
aws_session.set_config_variable("region", region)
self.aws_session = boto3.Session(botocore_session=aws_session)
def get_client(self, service):
""" Returns boto3.client with the refreshable session
service: str; String of what service to create a client for
(e.g. 'sqs', 's3')
"""
return self.aws_session.client(service)
def get_session(self):
""" Returns the raw refreshable aws session
"""
return self.aws_session
def _refresh(self):
params = {
"RoleArn": self.role_arn,
"RoleSessionName": self.session_name,
"DurationSeconds": self.session_duration_seconds,
}
response = self.sts_client.assume_role(**params).get("Credentials")
credentials = {
"access_key": response.get("AccessKeyId"),
"secret_key": response.get("SecretAccessKey"),
"token": response.get("SessionToken"),
"expiry_time": response.get("Expiration").isoformat(),
}
return credentials | tilequeue/utils.py | import boto3
import sys
import traceback
import re
from collections import defaultdict
from datetime import datetime
from itertools import islice
from tilequeue.tile import coord_marshall_int
from tilequeue.tile import create_coord
from time import time
from botocore.credentials import RefreshableCredentials
from botocore.session import get_session
def format_stacktrace_one_line(exc_info=None):
# exc_info is expected to be an exception tuple from sys.exc_info()
if exc_info is None:
exc_info = sys.exc_info()
exc_type, exc_value, exc_traceback = exc_info
exception_lines = traceback.format_exception(exc_type, exc_value,
exc_traceback)
stacktrace = ' | '.join([x.replace('\n', '')
for x in exception_lines])
return stacktrace
def grouper(iterable, n):
"""Yield n-length chunks of the iterable"""
it = iter(iterable)
while True:
chunk = tuple(islice(it, n))
if not chunk:
return
yield chunk
def parse_log_file(log_file):
ip_pattern = r'(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})'
# didn't match againts explicit date pattern, in case it changes
date_pattern = r'\[([\d\w\s\/:]+)\]'
tile_id_pattern = r'\/([\w]+)\/([\d]+)\/([\d]+)\/([\d]+)\.([\d\w]*)'
log_pattern = r'%s - - %s "([\w]+) %s.*' % (
ip_pattern, date_pattern, tile_id_pattern)
tile_log_records = []
for log_string in log_file:
match = re.search(log_pattern, log_string)
if match and len(match.groups()) == 8:
tile_log_records.append(
(match.group(1),
datetime.strptime(match.group(2), '%d/%B/%Y %H:%M:%S'),
coord_marshall_int(
create_coord(
match.group(6), match.group(7), match.group(5)))))
return tile_log_records
def encode_utf8(x):
if x is None:
return None
elif isinstance(x, unicode):
return x.encode('utf-8')
elif isinstance(x, dict):
result = {}
for k, v in x.items():
if isinstance(k, unicode):
k = k.encode('utf-8')
result[k] = encode_utf8(v)
return result
elif isinstance(x, list):
return map(encode_utf8, x)
elif isinstance(x, tuple):
return tuple(encode_utf8(list(x)))
else:
return x
class time_block(object):
"""Convenience to capture timing information"""
def __init__(self, timing_state, key):
# timing_state should be a dictionary
self.timing_state = timing_state
self.key = key
def __enter__(self):
self.start = time()
def __exit__(self, exc_type, exc_val, exc_tb):
stop = time()
duration_seconds = stop - self.start
duration_millis = duration_seconds * 1000
self.timing_state[self.key] = duration_millis
class CoordsByParent(object):
def __init__(self, parent_zoom):
self.parent_zoom = parent_zoom
self.groups = defaultdict(list)
def add(self, coord, *extra):
data = coord
if extra:
data = (coord,) + extra
# treat tiles as singletons below the parent zoom
if coord.zoom < self.parent_zoom:
self.groups[coord].append(data)
else:
# otherwise, group by the parent tile at the parent zoom.
parent_coord = coord.zoomTo(self.parent_zoom).container()
self.groups[parent_coord].append(data)
def __iter__(self):
return self.groups.iteritems()
def convert_seconds_to_millis(time_in_seconds):
time_in_millis = int(time_in_seconds * 1000)
return time_in_millis
class AwsSessionHelper:
""" The AwsSessionHelper creates a auto-refreshable boto3 session object
and allows for creating clients with those refreshable credentials.
"""
def __init__(self, session_name, role_arn, region='us-east-1',
s3_role_session_duration_s=3600):
""" session_name: str; The name of the session we are creating
role_arn: str; The ARN of the role we are assuming with STS
region: str; The region for the STS client to be created in
s3_role_session_duration_s: int; the time that session is good for
"""
self.role_arn = role_arn
self.session_name = session_name
self.region = region
self.session_duration_seconds = s3_role_session_duration_s
self.sts_client = boto3.client('sts')
credentials = self._refresh()
session_credentials = RefreshableCredentials.create_from_metadata(
metadata=credentials,
refresh_using=self._refresh,
method="sts-assume-role"
)
aws_session = get_session()
aws_session._credentials = session_credentials
aws_session.set_config_variable("region", region)
self.aws_session = boto3.Session(botocore_session=aws_session)
def get_client(self, service):
""" Returns boto3.client with the refreshable session
service: str; String of what service to create a client for
(e.g. 'sqs', 's3')
"""
return self.aws_session.client(service)
def get_session(self):
""" Returns the raw refreshable aws session
"""
return self.aws_session
def _refresh(self):
params = {
"RoleArn": self.role_arn,
"RoleSessionName": self.session_name,
"DurationSeconds": self.session_duration_seconds,
}
response = self.sts_client.assume_role(**params).get("Credentials")
credentials = {
"access_key": response.get("AccessKeyId"),
"secret_key": response.get("SecretAccessKey"),
"token": response.get("SessionToken"),
"expiry_time": response.get("Expiration").isoformat(),
}
return credentials | 0.419172 | 0.203391 |
from the_water_project.users.serializers import OnlyIdAndNameUserSerializer
from .models import (
IssueComment,
StartingComment,
TopicDiscussion,
TopicCommentLike,
IssueCommentLike,
StartingCommentLike,
)
from rest_framework import serializers
class OnlyLikeNumStartingCommentSerializer(serializers.ModelSerializer):
user_liked = serializers.SerializerMethodField()
class Meta:
model = StartingCommentLike
fields = ("no_of_likes", "user_liked")
def get_user_liked(self, obj):
user = self.parent.context.get("request_user")
if user:
if user in obj.users.all():
return True
else:
return False
class StartingCommentSerializer(serializers.ModelSerializer):
likes = OnlyLikeNumStartingCommentSerializer()
creator = OnlyIdAndNameUserSerializer()
class Meta:
model = StartingComment
fields = "__all__"
class OnlyLikeNumIssueCommentSerializer(serializers.ModelSerializer):
user_liked = serializers.SerializerMethodField()
class Meta:
model = IssueCommentLike
fields = ("no_of_likes", "user_liked")
def get_user_liked(self, obj):
user = self.parent.context.get("request_user")
if user:
if user in obj.users.all():
return True
else:
return False
class IssueCommentSerializer(serializers.ModelSerializer):
likes = OnlyLikeNumIssueCommentSerializer()
creator = OnlyIdAndNameUserSerializer()
class Meta:
model = IssueComment
fields = "__all__"
class OnlyLikeNumTopicCommentSerializer(serializers.ModelSerializer):
user_liked = serializers.SerializerMethodField()
class Meta:
model = TopicCommentLike
fields = ("no_of_likes", "user_liked")
def get_user_liked(self, obj):
user = self.parent.context.get("request_user")
if user:
if user in obj.users.all():
return True
else:
return False
class TopicDiscussionSerializer(serializers.ModelSerializer):
likes = OnlyLikeNumTopicCommentSerializer()
creator = OnlyIdAndNameUserSerializer()
class Meta:
model = TopicDiscussion
fields = "__all__"
class StartingCommentLikeSerializer(serializers.ModelSerializer):
class Meta:
model = StartingCommentLike
fields = "__all__"
class TopicCommentLikeSerializer(serializers.ModelSerializer):
class Meta:
model = TopicCommentLike
fields = "__all__"
class IssueCommentLikeSerializer(serializers.ModelSerializer):
class Meta:
model = IssueCommentLike
fields = "__all__" | server/the_water_project/comments/serializers.py | from the_water_project.users.serializers import OnlyIdAndNameUserSerializer
from .models import (
IssueComment,
StartingComment,
TopicDiscussion,
TopicCommentLike,
IssueCommentLike,
StartingCommentLike,
)
from rest_framework import serializers
class OnlyLikeNumStartingCommentSerializer(serializers.ModelSerializer):
user_liked = serializers.SerializerMethodField()
class Meta:
model = StartingCommentLike
fields = ("no_of_likes", "user_liked")
def get_user_liked(self, obj):
user = self.parent.context.get("request_user")
if user:
if user in obj.users.all():
return True
else:
return False
class StartingCommentSerializer(serializers.ModelSerializer):
likes = OnlyLikeNumStartingCommentSerializer()
creator = OnlyIdAndNameUserSerializer()
class Meta:
model = StartingComment
fields = "__all__"
class OnlyLikeNumIssueCommentSerializer(serializers.ModelSerializer):
user_liked = serializers.SerializerMethodField()
class Meta:
model = IssueCommentLike
fields = ("no_of_likes", "user_liked")
def get_user_liked(self, obj):
user = self.parent.context.get("request_user")
if user:
if user in obj.users.all():
return True
else:
return False
class IssueCommentSerializer(serializers.ModelSerializer):
likes = OnlyLikeNumIssueCommentSerializer()
creator = OnlyIdAndNameUserSerializer()
class Meta:
model = IssueComment
fields = "__all__"
class OnlyLikeNumTopicCommentSerializer(serializers.ModelSerializer):
user_liked = serializers.SerializerMethodField()
class Meta:
model = TopicCommentLike
fields = ("no_of_likes", "user_liked")
def get_user_liked(self, obj):
user = self.parent.context.get("request_user")
if user:
if user in obj.users.all():
return True
else:
return False
class TopicDiscussionSerializer(serializers.ModelSerializer):
likes = OnlyLikeNumTopicCommentSerializer()
creator = OnlyIdAndNameUserSerializer()
class Meta:
model = TopicDiscussion
fields = "__all__"
class StartingCommentLikeSerializer(serializers.ModelSerializer):
class Meta:
model = StartingCommentLike
fields = "__all__"
class TopicCommentLikeSerializer(serializers.ModelSerializer):
class Meta:
model = TopicCommentLike
fields = "__all__"
class IssueCommentLikeSerializer(serializers.ModelSerializer):
class Meta:
model = IssueCommentLike
fields = "__all__" | 0.491456 | 0.067454 |
from flask import abort, Blueprint, render_template, request, redirect, flash
from django.core.exceptions import ValidationError
from config.settings import DOMAIN_NAME
from database.study_models import Survey
from libs.admin_authentication import authenticate_admin_study_access,\
get_admins_allowed_studies, admin_is_system_admin
survey_designer = Blueprint('survey_designer', __name__)
# TODO: Low Priority. implement "study does not exist" page.
# TODO: Low Priority. implement "survey does not exist" page.
@survey_designer.route('/create_survey/<string:study_id>/<string:survey_type>', methods=['GET', 'POST'])
@authenticate_admin_study_access
def create_survey(study_id=None, survey_type='tracking_survey'):
if request.method == 'GET':
return render_template(
'create_survey.html',
allowed_studies=get_admins_allowed_studies(),
study_id=study_id,
survey_type=survey_type,
system_admin=admin_is_system_admin()
)
# Drop any whitespace or special characters from the username
survey_name = request.form.get('survey_name', '')
try:
new_survey = Survey.create_with_settings(study_id=study_id, survey_type=survey_type, name=survey_name)
except ValidationError:
flash("Please choose a different name, {0} is already in use.".format(survey_name), 'danger')
return redirect('/create_survey/{0:d}/{1}'.format(int(study_id), survey_type))
return redirect('/edit_survey/{:d}'.format(new_survey.id))
@survey_designer.route('/edit_survey/<string:survey_id>')
@authenticate_admin_study_access
def render_edit_survey(survey_id=None):
try:
survey = Survey.objects.get(pk=survey_id)
except Survey.DoesNotExist:
return abort(404)
s = survey.as_native_python()
study = survey.study
return render_template(
'edit_survey.html',
survey=survey.as_native_python(),
study=study,
allowed_studies=get_admins_allowed_studies(),
system_admin=admin_is_system_admin(),
domain_name=DOMAIN_NAME, # used in a Javascript alert, see survey-editor.js
) | pages/survey_designer.py | from flask import abort, Blueprint, render_template, request, redirect, flash
from django.core.exceptions import ValidationError
from config.settings import DOMAIN_NAME
from database.study_models import Survey
from libs.admin_authentication import authenticate_admin_study_access,\
get_admins_allowed_studies, admin_is_system_admin
survey_designer = Blueprint('survey_designer', __name__)
# TODO: Low Priority. implement "study does not exist" page.
# TODO: Low Priority. implement "survey does not exist" page.
@survey_designer.route('/create_survey/<string:study_id>/<string:survey_type>', methods=['GET', 'POST'])
@authenticate_admin_study_access
def create_survey(study_id=None, survey_type='tracking_survey'):
if request.method == 'GET':
return render_template(
'create_survey.html',
allowed_studies=get_admins_allowed_studies(),
study_id=study_id,
survey_type=survey_type,
system_admin=admin_is_system_admin()
)
# Drop any whitespace or special characters from the username
survey_name = request.form.get('survey_name', '')
try:
new_survey = Survey.create_with_settings(study_id=study_id, survey_type=survey_type, name=survey_name)
except ValidationError:
flash("Please choose a different name, {0} is already in use.".format(survey_name), 'danger')
return redirect('/create_survey/{0:d}/{1}'.format(int(study_id), survey_type))
return redirect('/edit_survey/{:d}'.format(new_survey.id))
@survey_designer.route('/edit_survey/<string:survey_id>')
@authenticate_admin_study_access
def render_edit_survey(survey_id=None):
try:
survey = Survey.objects.get(pk=survey_id)
except Survey.DoesNotExist:
return abort(404)
s = survey.as_native_python()
study = survey.study
return render_template(
'edit_survey.html',
survey=survey.as_native_python(),
study=study,
allowed_studies=get_admins_allowed_studies(),
system_admin=admin_is_system_admin(),
domain_name=DOMAIN_NAME, # used in a Javascript alert, see survey-editor.js
) | 0.27406 | 0.064506 |
import os
from unittest import TestCase
import pytest
from testfixtures import tempdir
from pyginny.models.util.file_util import FileUtil
class TestFileUtil(TestCase):
@pytest.fixture(scope="class", autouse=True)
def in_tmpdir(self, tmpdir_factory):
d = tmpdir_factory.mktemp("d")
with d.as_cwd():
yield d
@tempdir()
def test_create_dir(self, d):
os.chdir(d.path)
dir_name_1 = "new-dir-1"
dir_name_2 = "new-dir-2"
FileUtil.create_dir(dir_name_1)
FileUtil.create_dir(os.path.join(dir_name_1, dir_name_2))
self.assertTrue(os.path.isdir(dir_name_1))
self.assertTrue(os.path.isdir(os.path.join(dir_name_1, dir_name_2)))
@tempdir()
def test_remove_dir(self, d):
os.chdir(d.path)
dir_name_1 = "new-dir-1"
dir_name_2 = "new-dir-2"
FileUtil.create_dir(dir_name_1)
FileUtil.create_dir(os.path.join(dir_name_1, dir_name_2))
FileUtil.remove_dir(dir_name_1)
self.assertFalse(os.path.isdir(dir_name_1))
self.assertFalse(os.path.isdir(os.path.join(dir_name_1, dir_name_2)))
@tempdir()
def test_remove_file(self, d):
os.chdir(d.path)
filename = "new-file.txt"
FileUtil.write_to_file(".", filename, "content test")
self.assertTrue(os.path.isfile(filename))
FileUtil.remove_file(filename)
self.assertFalse(os.path.isfile(filename))
@tempdir()
def test_write_to_file(self, d):
os.chdir(d.path)
filename = "new-file.txt"
FileUtil.write_to_file(".", filename, "content test")
self.assertTrue(os.path.isfile(filename))
self.assertEqual(os.path.getsize(filename), 12)
@tempdir()
def test_find_files(self, d):
os.chdir(d.path)
FileUtil.write_to_file(".", "file1.txt", "")
FileUtil.write_to_file(".", "file2.txt", "")
FileUtil.write_to_file(".", "file3.log", "")
files_txt = FileUtil.find_files("file*.txt")
files_log = FileUtil.find_files("file*.log")
self.assertEqual(len(files_txt), 2)
self.assertEqual(len(files_log), 1)
def test_normalize_path(self):
normalized = FileUtil.normalize_path("C:\\pyginny\\Test")
expected = "C:/pyginny/Test"
self.assertEqual(normalized, expected)
def test_normalize_path_from_list(self):
paths = ["C:\\pyginny\\Test1", "C:\\pyginny\\Test2"]
normalized = FileUtil.normalize_path_from_list(paths)
expected1 = "C:/pyginny/Test1"
expected2 = "C:/pyginny/Test2"
self.assertEqual(normalized[0], expected1)
self.assertEqual(normalized[1], expected2) | tests/models/util/test_file_util.py | import os
from unittest import TestCase
import pytest
from testfixtures import tempdir
from pyginny.models.util.file_util import FileUtil
class TestFileUtil(TestCase):
@pytest.fixture(scope="class", autouse=True)
def in_tmpdir(self, tmpdir_factory):
d = tmpdir_factory.mktemp("d")
with d.as_cwd():
yield d
@tempdir()
def test_create_dir(self, d):
os.chdir(d.path)
dir_name_1 = "new-dir-1"
dir_name_2 = "new-dir-2"
FileUtil.create_dir(dir_name_1)
FileUtil.create_dir(os.path.join(dir_name_1, dir_name_2))
self.assertTrue(os.path.isdir(dir_name_1))
self.assertTrue(os.path.isdir(os.path.join(dir_name_1, dir_name_2)))
@tempdir()
def test_remove_dir(self, d):
os.chdir(d.path)
dir_name_1 = "new-dir-1"
dir_name_2 = "new-dir-2"
FileUtil.create_dir(dir_name_1)
FileUtil.create_dir(os.path.join(dir_name_1, dir_name_2))
FileUtil.remove_dir(dir_name_1)
self.assertFalse(os.path.isdir(dir_name_1))
self.assertFalse(os.path.isdir(os.path.join(dir_name_1, dir_name_2)))
@tempdir()
def test_remove_file(self, d):
os.chdir(d.path)
filename = "new-file.txt"
FileUtil.write_to_file(".", filename, "content test")
self.assertTrue(os.path.isfile(filename))
FileUtil.remove_file(filename)
self.assertFalse(os.path.isfile(filename))
@tempdir()
def test_write_to_file(self, d):
os.chdir(d.path)
filename = "new-file.txt"
FileUtil.write_to_file(".", filename, "content test")
self.assertTrue(os.path.isfile(filename))
self.assertEqual(os.path.getsize(filename), 12)
@tempdir()
def test_find_files(self, d):
os.chdir(d.path)
FileUtil.write_to_file(".", "file1.txt", "")
FileUtil.write_to_file(".", "file2.txt", "")
FileUtil.write_to_file(".", "file3.log", "")
files_txt = FileUtil.find_files("file*.txt")
files_log = FileUtil.find_files("file*.log")
self.assertEqual(len(files_txt), 2)
self.assertEqual(len(files_log), 1)
def test_normalize_path(self):
normalized = FileUtil.normalize_path("C:\\pyginny\\Test")
expected = "C:/pyginny/Test"
self.assertEqual(normalized, expected)
def test_normalize_path_from_list(self):
paths = ["C:\\pyginny\\Test1", "C:\\pyginny\\Test2"]
normalized = FileUtil.normalize_path_from_list(paths)
expected1 = "C:/pyginny/Test1"
expected2 = "C:/pyginny/Test2"
self.assertEqual(normalized[0], expected1)
self.assertEqual(normalized[1], expected2) | 0.318485 | 0.373047 |
import gtk
from w3af.core.ui.gui import entries
from w3af.core.ui.gui.tools.encdec import SimpleTextView
from w3af.core.data.export.ajax_export import ajax_export
from w3af.core.data.export.html_export import html_export
from w3af.core.data.export.python_export import python_export
from w3af.core.data.export.ruby_export import ruby_export
from w3af.core.controllers.exceptions import BaseFrameworkException
export_request_example = """\
GET http://localhost/script.php HTTP/1.0
Host: www.some_host.com
User-Agent: w3af.org
Pragma: no-cache
Content-Type: application/x-www-form-urlencoded
"""
class export_request(entries.RememberingWindow):
"""Infrastructure to export HTTP requests.
:author: <NAME> < andres.riancho | gmail.com >
"""
def __init__(self, w3af, initial_request=None):
super(export_request, self).__init__(
w3af, "exportreq", "w3af - Export Requests", "Export_Requests")
self.w3af = w3af
# different ways of exporting data
self._exporters = [
('HTML', html_export),
('Ajax', ajax_export),
('Python', python_export),
('Ruby', ruby_export)
]
# splitted panes
vpan = entries.RememberingVPaned(w3af, "pane-exportrequests")
# upper pane that shows HTTP request
vbox = gtk.VBox()
sw = gtk.ScrolledWindow()
sw.set_shadow_type(gtk.SHADOW_ETCHED_IN)
sw.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
self.http_request = SimpleTextView()
sw.add(self.http_request)
vbox.pack_start(sw, True, True, padding=5)
# middle widgets that show the export method
table = gtk.Table(1, 6, homogeneous=True)
cb = gtk.combo_box_new_text()
for (lab, fnc) in self._exporters:
cb.append_text(lab)
b = gtk.Button(lab)
cb.set_active(0)
table.attach(cb, 2, 3, 0, 1)
b = entries.SemiStockButton(
"Export", gtk.STOCK_GO_DOWN, _("Export the request"))
b.connect("clicked", self._export, cb)
table.attach(b, 3, 4, 0, 1)
vbox.pack_start(table, False, False, padding=5)
vpan.pack1(vbox)
# lower pane with exported data and save button
vbox = gtk.VBox()
sw = gtk.ScrolledWindow()
sw.set_shadow_type(gtk.SHADOW_ETCHED_IN)
sw.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
self.exported_text = SimpleTextView()
sw.add(self.exported_text)
vbox.pack_start(sw, True, True, padding=5)
b = entries.SemiStockButton(
"Save request as...", gtk.STOCK_SAVE_AS, _("Save request as..."))
b.connect("clicked", self._save_as)
vbox.pack_start(b, False, False, padding=5)
vpan.pack2(vbox)
# Show the data
if initial_request is None:
self.http_request.set_text(export_request_example)
else:
(request_header, request_body) = initial_request
self.http_request.set_text(request_header + '\n\n' + request_body)
func = self._exporters[0][1]
self.exported_text.set_text(func(self.http_request.get_text()))
self.vbox.pack_start(vpan, padding=10)
self.show_all()
def _export(self, widg, combo):
"""Exports the upper text."""
opc = combo.get_active()
func = self._exporters[opc][1]
try:
exported_request = func(self.http_request.get_text())
except BaseFrameworkException, w3:
error_msg = str(w3)
self.exported_text.set_text(error_msg)
else:
self.exported_text.set_text(exported_request)
def _save_as(self, widg):
"""
Save the exported data to a file using a file chooser.
"""
chooser = gtk.FileChooserDialog(
title='Save as...', action=gtk.FILE_CHOOSER_ACTION_SAVE,
buttons=(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL, gtk.STOCK_OPEN, gtk.RESPONSE_OK))
response = chooser.run()
if response == gtk.RESPONSE_OK:
# Save the contents of the self.exported_text to the selected file
filename = chooser.get_filename()
try:
fh = file(filename, 'w')
fh.write(self.exported_text.get_text())
except:
msg = _("Failed to save exported data to file")
dlg = gtk.MessageDialog(None, gtk.DIALOG_MODAL, gtk.MESSAGE_ERROR, gtk.BUTTONS_OK, msg)
opt = dlg.run()
dlg.destroy()
elif response == gtk.RESPONSE_CANCEL:
pass
chooser.destroy() | packages/w3af/w3af/core/ui/gui/export_request.py | import gtk
from w3af.core.ui.gui import entries
from w3af.core.ui.gui.tools.encdec import SimpleTextView
from w3af.core.data.export.ajax_export import ajax_export
from w3af.core.data.export.html_export import html_export
from w3af.core.data.export.python_export import python_export
from w3af.core.data.export.ruby_export import ruby_export
from w3af.core.controllers.exceptions import BaseFrameworkException
export_request_example = """\
GET http://localhost/script.php HTTP/1.0
Host: www.some_host.com
User-Agent: w3af.org
Pragma: no-cache
Content-Type: application/x-www-form-urlencoded
"""
class export_request(entries.RememberingWindow):
"""Infrastructure to export HTTP requests.
:author: <NAME> < andres.riancho | gmail.com >
"""
def __init__(self, w3af, initial_request=None):
super(export_request, self).__init__(
w3af, "exportreq", "w3af - Export Requests", "Export_Requests")
self.w3af = w3af
# different ways of exporting data
self._exporters = [
('HTML', html_export),
('Ajax', ajax_export),
('Python', python_export),
('Ruby', ruby_export)
]
# splitted panes
vpan = entries.RememberingVPaned(w3af, "pane-exportrequests")
# upper pane that shows HTTP request
vbox = gtk.VBox()
sw = gtk.ScrolledWindow()
sw.set_shadow_type(gtk.SHADOW_ETCHED_IN)
sw.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
self.http_request = SimpleTextView()
sw.add(self.http_request)
vbox.pack_start(sw, True, True, padding=5)
# middle widgets that show the export method
table = gtk.Table(1, 6, homogeneous=True)
cb = gtk.combo_box_new_text()
for (lab, fnc) in self._exporters:
cb.append_text(lab)
b = gtk.Button(lab)
cb.set_active(0)
table.attach(cb, 2, 3, 0, 1)
b = entries.SemiStockButton(
"Export", gtk.STOCK_GO_DOWN, _("Export the request"))
b.connect("clicked", self._export, cb)
table.attach(b, 3, 4, 0, 1)
vbox.pack_start(table, False, False, padding=5)
vpan.pack1(vbox)
# lower pane with exported data and save button
vbox = gtk.VBox()
sw = gtk.ScrolledWindow()
sw.set_shadow_type(gtk.SHADOW_ETCHED_IN)
sw.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
self.exported_text = SimpleTextView()
sw.add(self.exported_text)
vbox.pack_start(sw, True, True, padding=5)
b = entries.SemiStockButton(
"Save request as...", gtk.STOCK_SAVE_AS, _("Save request as..."))
b.connect("clicked", self._save_as)
vbox.pack_start(b, False, False, padding=5)
vpan.pack2(vbox)
# Show the data
if initial_request is None:
self.http_request.set_text(export_request_example)
else:
(request_header, request_body) = initial_request
self.http_request.set_text(request_header + '\n\n' + request_body)
func = self._exporters[0][1]
self.exported_text.set_text(func(self.http_request.get_text()))
self.vbox.pack_start(vpan, padding=10)
self.show_all()
def _export(self, widg, combo):
"""Exports the upper text."""
opc = combo.get_active()
func = self._exporters[opc][1]
try:
exported_request = func(self.http_request.get_text())
except BaseFrameworkException, w3:
error_msg = str(w3)
self.exported_text.set_text(error_msg)
else:
self.exported_text.set_text(exported_request)
def _save_as(self, widg):
"""
Save the exported data to a file using a file chooser.
"""
chooser = gtk.FileChooserDialog(
title='Save as...', action=gtk.FILE_CHOOSER_ACTION_SAVE,
buttons=(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL, gtk.STOCK_OPEN, gtk.RESPONSE_OK))
response = chooser.run()
if response == gtk.RESPONSE_OK:
# Save the contents of the self.exported_text to the selected file
filename = chooser.get_filename()
try:
fh = file(filename, 'w')
fh.write(self.exported_text.get_text())
except:
msg = _("Failed to save exported data to file")
dlg = gtk.MessageDialog(None, gtk.DIALOG_MODAL, gtk.MESSAGE_ERROR, gtk.BUTTONS_OK, msg)
opt = dlg.run()
dlg.destroy()
elif response == gtk.RESPONSE_CANCEL:
pass
chooser.destroy() | 0.457379 | 0.11282 |
from django.contrib.auth import get_user_model
from django.db import models
from django.db.models import Q
from django.db.models.signals import post_save
from django.dispatch import receiver
from django import forms
from sqlparse.tokens import Assignment
from web.accounts import forms
import datetime
User = get_user_model()
class Event(models.Model):
title = models.CharField(max_length=50, null=True)
start = models.DateTimeField()
end = models.DateTimeField()
description = models.CharField(max_length=1000)
@property
def is_currently_running(self):
if self.start <= datetime.datetime.now() and self.end >= datetime.datetime.now():
return True
else:
return False
class UserProgress(models.Model):
event = models.ForeignKey("Event", blank=True, null=True, on_delete=models.CASCADE)
new_user = models.ForeignKey("NewUser", blank=True, null=True, on_delete=models.CASCADE)
assignment = models.ForeignKey("Assignment", blank=True, null=True, on_delete=models.CASCADE)
timestamp = models.DateTimeField(auto_now=True)
class Assignment(models.Model):
description = models.TextField(max_length=5000)
# Data are not required because of programming tasks and automated tests
data = models.FileField(upload_to='media', null=True, blank=True)
right_answer = models.CharField(max_length=200)
ANSWER_CHOICES = [
('SEZNAM', 'SEZNAM'),
('ČÍSLO', 'ČÍSLO'),
('TEXT', 'TEXT'),
]
answer_type = models.CharField(max_length=100, choices=ANSWER_CHOICES, null=True)
order = models.IntegerField()
event = models.ForeignKey(Event, blank=True, null=True, on_delete=models.CASCADE)
class NewUser(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE)
todo_assignment = models.IntegerField(default=1)
def get_assignment(self, event_id):
user_progress_query = UserProgress.objects.filter(Q(new_user=self) & Q(event_id=event_id)).order_by("timestamp")
assignment_queryset = Assignment.objects.filter(event_id=event_id).order_by("order")
if not user_progress_query.exists():
return assignment_queryset.first()
else:
user_progress: UserProgress = user_progress_query.last()
current_assignment_order = user_progress.assignment.order + 1
assignment_queryset = assignment_queryset.filter(order=current_assignment_order)
if assignment_queryset.count() > 0:
return assignment_queryset.first()
else:
return None
def solve_assignment(self, event_id: int, assignment: Assignment):
user_progress = UserProgress(event_id=event_id, new_user=self, assignment=assignment)
user_progress.save()
def __str__(self):
return str(self.user.username)
@receiver(post_save, sender=User)
def create_user_profile(sender, instance, created, **kwargs):
if created:
NewUser.objects.create(user=instance) | web/models.py | from django.contrib.auth import get_user_model
from django.db import models
from django.db.models import Q
from django.db.models.signals import post_save
from django.dispatch import receiver
from django import forms
from sqlparse.tokens import Assignment
from web.accounts import forms
import datetime
User = get_user_model()
class Event(models.Model):
title = models.CharField(max_length=50, null=True)
start = models.DateTimeField()
end = models.DateTimeField()
description = models.CharField(max_length=1000)
@property
def is_currently_running(self):
if self.start <= datetime.datetime.now() and self.end >= datetime.datetime.now():
return True
else:
return False
class UserProgress(models.Model):
event = models.ForeignKey("Event", blank=True, null=True, on_delete=models.CASCADE)
new_user = models.ForeignKey("NewUser", blank=True, null=True, on_delete=models.CASCADE)
assignment = models.ForeignKey("Assignment", blank=True, null=True, on_delete=models.CASCADE)
timestamp = models.DateTimeField(auto_now=True)
class Assignment(models.Model):
description = models.TextField(max_length=5000)
# Data are not required because of programming tasks and automated tests
data = models.FileField(upload_to='media', null=True, blank=True)
right_answer = models.CharField(max_length=200)
ANSWER_CHOICES = [
('SEZNAM', 'SEZNAM'),
('ČÍSLO', 'ČÍSLO'),
('TEXT', 'TEXT'),
]
answer_type = models.CharField(max_length=100, choices=ANSWER_CHOICES, null=True)
order = models.IntegerField()
event = models.ForeignKey(Event, blank=True, null=True, on_delete=models.CASCADE)
class NewUser(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE)
todo_assignment = models.IntegerField(default=1)
def get_assignment(self, event_id):
user_progress_query = UserProgress.objects.filter(Q(new_user=self) & Q(event_id=event_id)).order_by("timestamp")
assignment_queryset = Assignment.objects.filter(event_id=event_id).order_by("order")
if not user_progress_query.exists():
return assignment_queryset.first()
else:
user_progress: UserProgress = user_progress_query.last()
current_assignment_order = user_progress.assignment.order + 1
assignment_queryset = assignment_queryset.filter(order=current_assignment_order)
if assignment_queryset.count() > 0:
return assignment_queryset.first()
else:
return None
def solve_assignment(self, event_id: int, assignment: Assignment):
user_progress = UserProgress(event_id=event_id, new_user=self, assignment=assignment)
user_progress.save()
def __str__(self):
return str(self.user.username)
@receiver(post_save, sender=User)
def create_user_profile(sender, instance, created, **kwargs):
if created:
NewUser.objects.create(user=instance) | 0.52902 | 0.146362 |
import unittest
from stability_label_algorithm.modules.argumentation.argumentation_theory.argumentation_theory import ArgumentationTheory
from stability_label_algorithm.modules.dataset_generator.argumentation_system_generator.layered.\
layered_argumentation_system_generator import LayeredArgumentationSystemGenerator
from stability_label_algorithm.modules.dataset_generator.argumentation_system_generator.layered.\
layered_argumentation_system_generator_parameters import LayeredArgumentationSystemGeneratorParameters
from stability_label_algorithm.modules.dataset_generator.argumentation_system_property_computer.\
argumentation_system_property_computer import \
compute_argumentation_system_properties
from stability_label_algorithm.modules.dataset_generator.argumentation_theory_property_computer.\
incomplete_argumentation_framework import \
IncompleteArgumentationFramework
class TestLayeredDatasetGenerator(unittest.TestCase):
def test_layered_argumentation_system_generation(self):
# LayeredArgumentationSystemGeneratorParameters
literal_layer_distribution = {0: 3, 1: 2, 2: 1}
nr_of_literals = 6
nr_of_rules = 3
rule_antecedent_distribution = {1: 2, 2: 1}
argumentation_system_generation_parameters = \
LayeredArgumentationSystemGeneratorParameters(nr_of_literals, nr_of_rules,
rule_antecedent_distribution, literal_layer_distribution)
# Generate argumentation system
argumentation_system_generator = LayeredArgumentationSystemGenerator(argumentation_system_generation_parameters)
argumentation_system = argumentation_system_generator.generate()
# Check number of literals and rules
argumentation_system_properties = compute_argumentation_system_properties(argumentation_system)
self.assertEqual(nr_of_literals, argumentation_system_properties.nr_of_literals)
self.assertEqual(nr_of_rules, argumentation_system_properties.nr_of_rules)
self.assertEqual(rule_antecedent_distribution, argumentation_system_properties.rule_antecedent_distribution)
# Check layers
empty_argumentation_theory = ArgumentationTheory(argumentation_system, [])
inc_arg_fw = IncompleteArgumentationFramework.from_argumentation_theory(empty_argumentation_theory)
actual_literal_layers = [max([pot_arg.height for pot_arg in pot_arg_list])
for pot_arg_list in inc_arg_fw.potential_arguments_by_literal.values()]
actual_literal_layer_distribution = {layer_nr: actual_literal_layers.count(layer_nr)
for layer_nr in sorted(list(set(actual_literal_layers)))}
self.assertEqual(literal_layer_distribution, actual_literal_layer_distribution)
def test_impossible_rule_antecedent_distribution(self):
literal_layer_distribution = {0: 4}
nr_of_literals = 4
nr_of_rules = 1
rule_antecedent_distribution = {2: 1}
argumentation_system_generation_parameters = \
LayeredArgumentationSystemGeneratorParameters(nr_of_literals, nr_of_rules,
rule_antecedent_distribution, literal_layer_distribution)
argumentation_system_generator = LayeredArgumentationSystemGenerator(argumentation_system_generation_parameters)
with self.assertRaises(ValueError):
argumentation_system_generator.generate()
def test_two_layer_argumentation_system_generation(self):
literal_layer_distribution = {0: 19, 1: 1}
nr_of_literals = 20
nr_of_rules = 25
rule_antecedent_distribution = {3: 15, 2: 10}
argumentation_system_generation_parameters = \
LayeredArgumentationSystemGeneratorParameters(nr_of_literals, nr_of_rules,
rule_antecedent_distribution, literal_layer_distribution)
# Generate argumentation system
argumentation_system_generator = LayeredArgumentationSystemGenerator(argumentation_system_generation_parameters)
argumentation_system = argumentation_system_generator.generate()
# Check number of literals and rules
argumentation_system_properties = compute_argumentation_system_properties(argumentation_system)
self.assertEqual(nr_of_literals, argumentation_system_properties.nr_of_literals)
self.assertEqual(nr_of_rules, argumentation_system_properties.nr_of_rules)
self.assertEqual(rule_antecedent_distribution, argumentation_system_properties.rule_antecedent_distribution)
# Check layers
empty_argumentation_theory = ArgumentationTheory(argumentation_system, [])
inc_arg_fw = IncompleteArgumentationFramework.from_argumentation_theory(empty_argumentation_theory)
actual_literal_layers = [max([pot_arg.height for pot_arg in pot_arg_list])
for pot_arg_list in inc_arg_fw.potential_arguments_by_literal.values()]
actual_literal_layer_distribution = {layer_nr: actual_literal_layers.count(layer_nr)
for layer_nr in sorted(list(set(actual_literal_layers)))}
self.assertEqual(literal_layer_distribution, actual_literal_layer_distribution)
if __name__ == '__main__':
unittest.main() | tests/test_layered_dataset_generator.py | import unittest
from stability_label_algorithm.modules.argumentation.argumentation_theory.argumentation_theory import ArgumentationTheory
from stability_label_algorithm.modules.dataset_generator.argumentation_system_generator.layered.\
layered_argumentation_system_generator import LayeredArgumentationSystemGenerator
from stability_label_algorithm.modules.dataset_generator.argumentation_system_generator.layered.\
layered_argumentation_system_generator_parameters import LayeredArgumentationSystemGeneratorParameters
from stability_label_algorithm.modules.dataset_generator.argumentation_system_property_computer.\
argumentation_system_property_computer import \
compute_argumentation_system_properties
from stability_label_algorithm.modules.dataset_generator.argumentation_theory_property_computer.\
incomplete_argumentation_framework import \
IncompleteArgumentationFramework
class TestLayeredDatasetGenerator(unittest.TestCase):
def test_layered_argumentation_system_generation(self):
# LayeredArgumentationSystemGeneratorParameters
literal_layer_distribution = {0: 3, 1: 2, 2: 1}
nr_of_literals = 6
nr_of_rules = 3
rule_antecedent_distribution = {1: 2, 2: 1}
argumentation_system_generation_parameters = \
LayeredArgumentationSystemGeneratorParameters(nr_of_literals, nr_of_rules,
rule_antecedent_distribution, literal_layer_distribution)
# Generate argumentation system
argumentation_system_generator = LayeredArgumentationSystemGenerator(argumentation_system_generation_parameters)
argumentation_system = argumentation_system_generator.generate()
# Check number of literals and rules
argumentation_system_properties = compute_argumentation_system_properties(argumentation_system)
self.assertEqual(nr_of_literals, argumentation_system_properties.nr_of_literals)
self.assertEqual(nr_of_rules, argumentation_system_properties.nr_of_rules)
self.assertEqual(rule_antecedent_distribution, argumentation_system_properties.rule_antecedent_distribution)
# Check layers
empty_argumentation_theory = ArgumentationTheory(argumentation_system, [])
inc_arg_fw = IncompleteArgumentationFramework.from_argumentation_theory(empty_argumentation_theory)
actual_literal_layers = [max([pot_arg.height for pot_arg in pot_arg_list])
for pot_arg_list in inc_arg_fw.potential_arguments_by_literal.values()]
actual_literal_layer_distribution = {layer_nr: actual_literal_layers.count(layer_nr)
for layer_nr in sorted(list(set(actual_literal_layers)))}
self.assertEqual(literal_layer_distribution, actual_literal_layer_distribution)
def test_impossible_rule_antecedent_distribution(self):
literal_layer_distribution = {0: 4}
nr_of_literals = 4
nr_of_rules = 1
rule_antecedent_distribution = {2: 1}
argumentation_system_generation_parameters = \
LayeredArgumentationSystemGeneratorParameters(nr_of_literals, nr_of_rules,
rule_antecedent_distribution, literal_layer_distribution)
argumentation_system_generator = LayeredArgumentationSystemGenerator(argumentation_system_generation_parameters)
with self.assertRaises(ValueError):
argumentation_system_generator.generate()
def test_two_layer_argumentation_system_generation(self):
literal_layer_distribution = {0: 19, 1: 1}
nr_of_literals = 20
nr_of_rules = 25
rule_antecedent_distribution = {3: 15, 2: 10}
argumentation_system_generation_parameters = \
LayeredArgumentationSystemGeneratorParameters(nr_of_literals, nr_of_rules,
rule_antecedent_distribution, literal_layer_distribution)
# Generate argumentation system
argumentation_system_generator = LayeredArgumentationSystemGenerator(argumentation_system_generation_parameters)
argumentation_system = argumentation_system_generator.generate()
# Check number of literals and rules
argumentation_system_properties = compute_argumentation_system_properties(argumentation_system)
self.assertEqual(nr_of_literals, argumentation_system_properties.nr_of_literals)
self.assertEqual(nr_of_rules, argumentation_system_properties.nr_of_rules)
self.assertEqual(rule_antecedent_distribution, argumentation_system_properties.rule_antecedent_distribution)
# Check layers
empty_argumentation_theory = ArgumentationTheory(argumentation_system, [])
inc_arg_fw = IncompleteArgumentationFramework.from_argumentation_theory(empty_argumentation_theory)
actual_literal_layers = [max([pot_arg.height for pot_arg in pot_arg_list])
for pot_arg_list in inc_arg_fw.potential_arguments_by_literal.values()]
actual_literal_layer_distribution = {layer_nr: actual_literal_layers.count(layer_nr)
for layer_nr in sorted(list(set(actual_literal_layers)))}
self.assertEqual(literal_layer_distribution, actual_literal_layer_distribution)
if __name__ == '__main__':
unittest.main() | 0.695545 | 0.424651 |
import os
import sys
import json
import pathlib
import argparse
from fintool.actions import (
CreateTransaction,
SaveTransaction,
CreateFilters,
GetTransactions,
PrintTransactions,
RemoveTransaction,
UpdateTransaction,
CreateStats,
ShowStats,
CreateTag,
AddTag,
GetTags,
EditTag,
RemoveTag,
PrintTags
)
from fintool.logging import LoggingHelper
SUBPARSERS = 'subparsers'
REQUIRED = 'required'
ID = 'id'
SUBPARSERS_CFGS = 'subparsers_cfgs'
NAME = 'name'
HELP = 'help'
ARGS = 'args'
PROGRAM_NAME = "fintool"
KWARGS = "kwargs"
CLI_CFG_FILE = "cli.json"
ARGS_PARSER_CFG = "argsparser"
CLI_CMD = "cmd"
ADD_CMD = "add"
REMOVE_CMD = "remove"
LIST_CMD = "list"
STATS_CMD = "stats"
EDIT_CMD = "edit"
ADD_TAG_CMD = 'add_tag'
EDIT_TAG_CMD = 'edit_tag'
REMOVE_TAG_CMD = 'remove_tag'
LIST_TAGS_CMD = 'list_tags'
class ArgsParser:
"""A helper class to validate arguments."""
def __init__(self, config):
"""Initialize instance with given config."""
self._logger = LoggingHelper.get_logger(self.__class__.__name__)
self._logger.debug('setting up parser helper')
self.load_parsers(config)
def load_parsers(self, config):
"""Create arg parser object."""
self.parser = argparse.ArgumentParser()
if SUBPARSERS in config:
self.load_subparsers(config[SUBPARSERS], self.parser)
def load_subparsers(self, subparsers_config, parent_parser):
"""Add subparsers to parent parser recursively.
Positional arguments:
subparsers_config -- configuration dict for the current subparser
parent_parser -- parent object to add the parsers to.
"""
# create subparsers
subparsers = parent_parser.add_subparsers(dest=subparsers_config[ID])
subparsers.required = subparsers_config[REQUIRED]
for subparser_config in subparsers_config[SUBPARSERS_CFGS]:
subparser = subparsers.add_parser(
subparser_config[NAME], help=subparser_config[HELP])
# load arguments for subparser
for arg in subparser_config[ARGS]:
if KWARGS in arg:
subparser.add_argument(arg[ID], **arg[KWARGS])
else:
subparser.add_argument(arg[ID])
if SUBPARSERS in subparser_config:
self.load_subparsers(subparser_config[SUBPARSERS], subparser)
def parse(self, arguments):
"""Parse a list of arguments and return a dictionary with the result.
Positional arguments:
arguments -- a list of strings representing arguments
Return value:
a dictionary with the result of argparse.ArgumentParser.parse_args
"""
self._logger.debug('parsing arguments %s', arguments)
args = self.parser.parse_args(arguments)
return vars(args)
class Command:
def __init__(self, cmd, actions, data):
self.cmd = cmd
self.actions = actions
self.data = data
def __repr__(self):
return f"cmd: {self.cmd} actions: {self.actions} data: {self.data}"
class CommandProcessor:
def __init__(self):
self._logger = LoggingHelper.get_logger(self.__class__.__name__)
def process(self, cmd):
"""Execute a list of actions from
a given command in sequential order.
Args:
cmd (Command): The command to be processed
data (list): The list of associated actions
"""
self._logger.debug('processing cmd: %s', cmd)
for action in cmd.actions:
action().exec(cmd.data)
SUPPORTED_CMDS = {
ADD_CMD: [CreateTransaction, SaveTransaction],
REMOVE_CMD: [RemoveTransaction],
LIST_CMD: [CreateFilters, GetTransactions, PrintTransactions],
STATS_CMD: [CreateFilters, GetTransactions, CreateStats, ShowStats],
EDIT_CMD: [CreateTransaction, UpdateTransaction],
ADD_TAG_CMD: [CreateTag, AddTag],
EDIT_TAG_CMD: [CreateTag, EditTag],
REMOVE_TAG_CMD: [RemoveTag],
LIST_TAGS_CMD: [GetTags, PrintTags]
}
class UnsupportedCmdError(Exception):
pass
class CLI:
def __init__(self):
"""Load configuration from json file and
initialize args parser object.
"""
# get log level from env var or set info as default
LoggingHelper.set_log_level(os.getenv('FINTOOL_LOGLEVEL', 'info'))
self._logger = LoggingHelper.get_logger(self.__class__.__name__)
BASE_DIR = pathlib.Path(__file__).parent
cli_cfg_path = BASE_DIR.joinpath(CLI_CFG_FILE).resolve()
self._logger.debug('loading cli config from %s', cli_cfg_path)
with cli_cfg_path.open() as cfg_file:
self._cli_cfg = json.loads(cfg_file.read())
self.args_parser = ArgsParser(self._cli_cfg[ARGS_PARSER_CFG])
self.cmd_processor = CommandProcessor()
def parse_args(self, args):
"""
Use arguments parser object to parse args and
return result object.
"""
self._logger.debug('parsing arguments: %s', args)
return self.args_parser.parse(args)
def create_cmd(self, args):
"""Create a Command object from given cmd id.
Raise UnsupportedCmdError if cmd_id contains an invalid value.
Args:
args (dict): Parsed cli arguments.
"""
self._logger.debug('creating command from args: %s', args)
try:
cmd_id = args[CLI_CMD]
# cmd data consists of all key-values in args except cmd id
cmd_data = {k: args[k] for k in args.keys() - {CLI_CMD}}
cmd_actions = SUPPORTED_CMDS[cmd_id]
return Command(cmd_id, cmd_actions, cmd_data)
except KeyError as key_error:
raise UnsupportedCmdError(f"Unsupported command: {key_error}")
def run(self, args):
"""Main cli method that starts by parsing
provided cli arguments, next creates a command
object and calls the process() method.
Args:
args (list): A list of cli arguments
"""
self._logger.debug('running cli with: %s', args)
try:
parsed_args = self.parse_args(args)
cmd = self.create_cmd(parsed_args)
self.cmd_processor.process(cmd)
except Exception as exception:
self._logger.error(
'an error ocurred while running command: %s',
exception
)
sys.exit(1)
if __name__ == "__main__":
cli_obj = CLI()
cli_obj.run(sys.argv[1:]) | fintool/cli.py | import os
import sys
import json
import pathlib
import argparse
from fintool.actions import (
CreateTransaction,
SaveTransaction,
CreateFilters,
GetTransactions,
PrintTransactions,
RemoveTransaction,
UpdateTransaction,
CreateStats,
ShowStats,
CreateTag,
AddTag,
GetTags,
EditTag,
RemoveTag,
PrintTags
)
from fintool.logging import LoggingHelper
SUBPARSERS = 'subparsers'
REQUIRED = 'required'
ID = 'id'
SUBPARSERS_CFGS = 'subparsers_cfgs'
NAME = 'name'
HELP = 'help'
ARGS = 'args'
PROGRAM_NAME = "fintool"
KWARGS = "kwargs"
CLI_CFG_FILE = "cli.json"
ARGS_PARSER_CFG = "argsparser"
CLI_CMD = "cmd"
ADD_CMD = "add"
REMOVE_CMD = "remove"
LIST_CMD = "list"
STATS_CMD = "stats"
EDIT_CMD = "edit"
ADD_TAG_CMD = 'add_tag'
EDIT_TAG_CMD = 'edit_tag'
REMOVE_TAG_CMD = 'remove_tag'
LIST_TAGS_CMD = 'list_tags'
class ArgsParser:
"""A helper class to validate arguments."""
def __init__(self, config):
"""Initialize instance with given config."""
self._logger = LoggingHelper.get_logger(self.__class__.__name__)
self._logger.debug('setting up parser helper')
self.load_parsers(config)
def load_parsers(self, config):
"""Create arg parser object."""
self.parser = argparse.ArgumentParser()
if SUBPARSERS in config:
self.load_subparsers(config[SUBPARSERS], self.parser)
def load_subparsers(self, subparsers_config, parent_parser):
"""Add subparsers to parent parser recursively.
Positional arguments:
subparsers_config -- configuration dict for the current subparser
parent_parser -- parent object to add the parsers to.
"""
# create subparsers
subparsers = parent_parser.add_subparsers(dest=subparsers_config[ID])
subparsers.required = subparsers_config[REQUIRED]
for subparser_config in subparsers_config[SUBPARSERS_CFGS]:
subparser = subparsers.add_parser(
subparser_config[NAME], help=subparser_config[HELP])
# load arguments for subparser
for arg in subparser_config[ARGS]:
if KWARGS in arg:
subparser.add_argument(arg[ID], **arg[KWARGS])
else:
subparser.add_argument(arg[ID])
if SUBPARSERS in subparser_config:
self.load_subparsers(subparser_config[SUBPARSERS], subparser)
def parse(self, arguments):
"""Parse a list of arguments and return a dictionary with the result.
Positional arguments:
arguments -- a list of strings representing arguments
Return value:
a dictionary with the result of argparse.ArgumentParser.parse_args
"""
self._logger.debug('parsing arguments %s', arguments)
args = self.parser.parse_args(arguments)
return vars(args)
class Command:
def __init__(self, cmd, actions, data):
self.cmd = cmd
self.actions = actions
self.data = data
def __repr__(self):
return f"cmd: {self.cmd} actions: {self.actions} data: {self.data}"
class CommandProcessor:
def __init__(self):
self._logger = LoggingHelper.get_logger(self.__class__.__name__)
def process(self, cmd):
"""Execute a list of actions from
a given command in sequential order.
Args:
cmd (Command): The command to be processed
data (list): The list of associated actions
"""
self._logger.debug('processing cmd: %s', cmd)
for action in cmd.actions:
action().exec(cmd.data)
SUPPORTED_CMDS = {
ADD_CMD: [CreateTransaction, SaveTransaction],
REMOVE_CMD: [RemoveTransaction],
LIST_CMD: [CreateFilters, GetTransactions, PrintTransactions],
STATS_CMD: [CreateFilters, GetTransactions, CreateStats, ShowStats],
EDIT_CMD: [CreateTransaction, UpdateTransaction],
ADD_TAG_CMD: [CreateTag, AddTag],
EDIT_TAG_CMD: [CreateTag, EditTag],
REMOVE_TAG_CMD: [RemoveTag],
LIST_TAGS_CMD: [GetTags, PrintTags]
}
class UnsupportedCmdError(Exception):
pass
class CLI:
def __init__(self):
"""Load configuration from json file and
initialize args parser object.
"""
# get log level from env var or set info as default
LoggingHelper.set_log_level(os.getenv('FINTOOL_LOGLEVEL', 'info'))
self._logger = LoggingHelper.get_logger(self.__class__.__name__)
BASE_DIR = pathlib.Path(__file__).parent
cli_cfg_path = BASE_DIR.joinpath(CLI_CFG_FILE).resolve()
self._logger.debug('loading cli config from %s', cli_cfg_path)
with cli_cfg_path.open() as cfg_file:
self._cli_cfg = json.loads(cfg_file.read())
self.args_parser = ArgsParser(self._cli_cfg[ARGS_PARSER_CFG])
self.cmd_processor = CommandProcessor()
def parse_args(self, args):
"""
Use arguments parser object to parse args and
return result object.
"""
self._logger.debug('parsing arguments: %s', args)
return self.args_parser.parse(args)
def create_cmd(self, args):
"""Create a Command object from given cmd id.
Raise UnsupportedCmdError if cmd_id contains an invalid value.
Args:
args (dict): Parsed cli arguments.
"""
self._logger.debug('creating command from args: %s', args)
try:
cmd_id = args[CLI_CMD]
# cmd data consists of all key-values in args except cmd id
cmd_data = {k: args[k] for k in args.keys() - {CLI_CMD}}
cmd_actions = SUPPORTED_CMDS[cmd_id]
return Command(cmd_id, cmd_actions, cmd_data)
except KeyError as key_error:
raise UnsupportedCmdError(f"Unsupported command: {key_error}")
def run(self, args):
"""Main cli method that starts by parsing
provided cli arguments, next creates a command
object and calls the process() method.
Args:
args (list): A list of cli arguments
"""
self._logger.debug('running cli with: %s', args)
try:
parsed_args = self.parse_args(args)
cmd = self.create_cmd(parsed_args)
self.cmd_processor.process(cmd)
except Exception as exception:
self._logger.error(
'an error ocurred while running command: %s',
exception
)
sys.exit(1)
if __name__ == "__main__":
cli_obj = CLI()
cli_obj.run(sys.argv[1:]) | 0.474144 | 0.09611 |
import copy
import torch.nn as nn
from models.glt_models import LinearClassifier
from models.resnet_blocks import BasicBlock, Bottleneck, DownsampleConv2d
from models.svdo_layers import LinearSVDO, Conv2dSVDO
class SequentialSparsifier(nn.Module):
def __init__(self, pretrained_model):
super(SequentialSparsifier, self).__init__()
self.model = nn.ModuleList()
for module in pretrained_model:
self.model.append(self.__get_sparse_layer(module))
self.train_mask = [False for _ in range(len(pretrained_model))]
@classmethod
def __get_sparse_layer(cls, dense_layer):
if isinstance(dense_layer, nn.Linear):
sparse_layer = LinearSVDO(dense_layer.in_features, dense_layer.out_features,
dense_layer.bias is not None)
sparse_layer.weight.data = dense_layer.weight.data.clone()
if dense_layer.bias is not None:
sparse_layer.bias.data = dense_layer.bias.data.clone()
return sparse_layer
elif isinstance(dense_layer, nn.Conv2d):
sparse_layer = Conv2dSVDO(dense_layer.in_channels, dense_layer.out_channels,
dense_layer.kernel_size, stride=dense_layer.stride,
padding=dense_layer.padding, dilation=dense_layer.dilation,
groups=dense_layer.groups, bias=dense_layer.bias is not None)
sparse_layer.weight.data = dense_layer.weight.data.clone()
if dense_layer.bias is not None:
sparse_layer.bias.data = dense_layer.bias.data.clone()
return sparse_layer
elif isinstance(dense_layer, DownsampleConv2d):
sparse_layer = DownsampleConv2d(dense_layer.in_channels, dense_layer.out_channels,
stride=dense_layer.stride, sparse=True)
sparse_layer.conv = cls.__get_sparse_layer(dense_layer.conv)
return sparse_layer
elif isinstance(dense_layer, BasicBlock):
sparse_layer = BasicBlock(dense_layer.in_channels, dense_layer.out_channels,
stride=dense_layer.stride, sparse=True)
sparse_layer.conv_1 = cls.__get_sparse_layer(dense_layer.conv_1)
sparse_layer.conv_2 = cls.__get_sparse_layer(dense_layer.conv_2)
if dense_layer.shortcut is not None:
sparse_layer.shortcut = cls.__get_sparse_layer(dense_layer.shortcut)
sparse_layer.bn_1 = copy.copy(dense_layer.bn_1)
sparse_layer.bn_2 = copy.copy(dense_layer.bn_2)
return sparse_layer
elif isinstance(dense_layer, Bottleneck):
sparse_layer = Bottleneck(dense_layer.in_channels, dense_layer.out_channels,
stride=dense_layer.stride, sparse=True)
sparse_layer.conv_1 = cls.__get_sparse_layer(dense_layer.conv_1)
sparse_layer.conv_2 = cls.__get_sparse_layer(dense_layer.conv_2)
sparse_layer.conv_3 = cls.__get_sparse_layer(dense_layer.conv_3)
if dense_layer.shortcut is not None:
sparse_layer.shortcut = cls.__get_sparse_layer(dense_layer.shortcut)
sparse_layer.bn_1 = copy.copy(dense_layer.bn_1)
sparse_layer.bn_2 = copy.copy(dense_layer.bn_2)
sparse_layer.bn_3 = copy.copy(dense_layer.bn_3)
return sparse_layer
elif isinstance(dense_layer, LinearClassifier):
sparse_layer = LinearClassifier(dense_layer.in_channels, num_classes=dense_layer.num_classes,
sparse=True)
sparse_layer.linear = cls.__get_sparse_layer(dense_layer.linear)
sparse_layer.bn = copy.copy(dense_layer.bn)
return sparse_layer
else:
return copy.copy(dense_layer)
@classmethod
def __get_dense_layer(cls, sparse_layer):
if isinstance(sparse_layer, LinearSVDO):
dense_layer = nn.Linear(sparse_layer.in_features, sparse_layer.out_features,
sparse_layer.bias is not None)
dense_layer.weight.data = sparse_layer.weight.data.clone()
dense_layer.weight.data *= (sparse_layer.log_alpha.data < sparse_layer.threshold).float()
if sparse_layer.bias is not None:
dense_layer.bias.data = sparse_layer.bias.data.clone()
return dense_layer
elif isinstance(sparse_layer, Conv2dSVDO):
dense_layer = nn.Conv2d(sparse_layer.in_channels, sparse_layer.out_channels,
sparse_layer.kernel_size, stride=sparse_layer.stride,
padding=sparse_layer.padding, dilation=sparse_layer.dilation,
groups=sparse_layer.groups, bias=sparse_layer.bias is not None)
dense_layer.weight.data = sparse_layer.weight.data.clone()
dense_layer.weight.data *= (sparse_layer.log_alpha.data < sparse_layer.threshold).float()
if sparse_layer.bias is not None:
dense_layer.bias.data = sparse_layer.bias.data.clone()
return dense_layer
elif isinstance(sparse_layer, DownsampleConv2d):
if not sparse_layer.sparse:
return copy.copy(sparse_layer)
dense_layer = DownsampleConv2d(sparse_layer.in_channels, sparse_layer.out_channels,
stride=sparse_layer.stride, sparse=False)
dense_layer.conv = cls.__get_dense_layer(sparse_layer.conv)
return dense_layer
elif isinstance(sparse_layer, BasicBlock):
if not sparse_layer.sparse:
return copy.copy(sparse_layer)
dense_layer = BasicBlock(sparse_layer.in_channels, sparse_layer.out_channels,
stride=sparse_layer.stride, sparse=False)
dense_layer.conv_1 = cls.__get_dense_layer(sparse_layer.conv_1)
dense_layer.conv_2 = cls.__get_dense_layer(sparse_layer.conv_2)
if sparse_layer.shortcut is not None:
dense_layer.shortcut = cls.__get_dense_layer(sparse_layer.shortcut)
dense_layer.bn_1 = copy.copy(sparse_layer.bn_1)
dense_layer.bn_2 = copy.copy(sparse_layer.bn_2)
return dense_layer
elif isinstance(sparse_layer, Bottleneck):
if not sparse_layer.sparse:
return copy.copy(sparse_layer)
dense_layer = Bottleneck(sparse_layer.in_channels, sparse_layer.out_channels,
stride=sparse_layer.stride, sparse=False)
dense_layer.conv_1 = cls.__get_dense_layer(sparse_layer.conv_1)
dense_layer.conv_2 = cls.__get_dense_layer(sparse_layer.conv_2)
dense_layer.conv_3 = cls.__get_dense_layer(sparse_layer.conv_3)
if sparse_layer.shortcut is not None:
dense_layer.shortcut = cls.__get_dense_layer(sparse_layer.shortcut)
dense_layer.bn_1 = copy.copy(sparse_layer.bn_1)
dense_layer.bn_2 = copy.copy(sparse_layer.bn_2)
dense_layer.bn_3 = copy.copy(sparse_layer.bn_3)
return dense_layer
elif isinstance(sparse_layer, LinearClassifier):
if not sparse_layer.sparse:
return copy.copy(sparse_layer)
dense_layer = LinearClassifier(sparse_layer.in_channels, num_classes=sparse_layer.num_classes,
sparse=False)
dense_layer.linear = cls.__get_dense_layer(sparse_layer.linear)
dense_layer.bn = copy.copy(sparse_layer.bn)
return dense_layer
else:
return copy.copy(sparse_layer)
def update_mask(self, new_mask):
self.train_mask = new_mask
def set_gradient_flow(self):
for module, train_flag in zip(self.model, self.train_mask):
module.train(mode=train_flag)
for parameter in module.parameters():
parameter.requires_grad = train_flag
def finalize_blocks(self, finalize_mask):
for i in range(len(self.train_mask)):
if self.train_mask[i]:
self.model[i] = self.__get_dense_layer(self.model[i])
def forward(self, x):
out = x
for module in self.model:
out = module(out)
return out
def kl_divergence(self):
total_kl = 0.0
for module, train_flag in zip(self.model, self.train_mask):
if train_flag:
total_kl = total_kl + module.kl_divergence()
return total_kl | sparse_finetune/sparse_finetune.py | import copy
import torch.nn as nn
from models.glt_models import LinearClassifier
from models.resnet_blocks import BasicBlock, Bottleneck, DownsampleConv2d
from models.svdo_layers import LinearSVDO, Conv2dSVDO
class SequentialSparsifier(nn.Module):
def __init__(self, pretrained_model):
super(SequentialSparsifier, self).__init__()
self.model = nn.ModuleList()
for module in pretrained_model:
self.model.append(self.__get_sparse_layer(module))
self.train_mask = [False for _ in range(len(pretrained_model))]
@classmethod
def __get_sparse_layer(cls, dense_layer):
if isinstance(dense_layer, nn.Linear):
sparse_layer = LinearSVDO(dense_layer.in_features, dense_layer.out_features,
dense_layer.bias is not None)
sparse_layer.weight.data = dense_layer.weight.data.clone()
if dense_layer.bias is not None:
sparse_layer.bias.data = dense_layer.bias.data.clone()
return sparse_layer
elif isinstance(dense_layer, nn.Conv2d):
sparse_layer = Conv2dSVDO(dense_layer.in_channels, dense_layer.out_channels,
dense_layer.kernel_size, stride=dense_layer.stride,
padding=dense_layer.padding, dilation=dense_layer.dilation,
groups=dense_layer.groups, bias=dense_layer.bias is not None)
sparse_layer.weight.data = dense_layer.weight.data.clone()
if dense_layer.bias is not None:
sparse_layer.bias.data = dense_layer.bias.data.clone()
return sparse_layer
elif isinstance(dense_layer, DownsampleConv2d):
sparse_layer = DownsampleConv2d(dense_layer.in_channels, dense_layer.out_channels,
stride=dense_layer.stride, sparse=True)
sparse_layer.conv = cls.__get_sparse_layer(dense_layer.conv)
return sparse_layer
elif isinstance(dense_layer, BasicBlock):
sparse_layer = BasicBlock(dense_layer.in_channels, dense_layer.out_channels,
stride=dense_layer.stride, sparse=True)
sparse_layer.conv_1 = cls.__get_sparse_layer(dense_layer.conv_1)
sparse_layer.conv_2 = cls.__get_sparse_layer(dense_layer.conv_2)
if dense_layer.shortcut is not None:
sparse_layer.shortcut = cls.__get_sparse_layer(dense_layer.shortcut)
sparse_layer.bn_1 = copy.copy(dense_layer.bn_1)
sparse_layer.bn_2 = copy.copy(dense_layer.bn_2)
return sparse_layer
elif isinstance(dense_layer, Bottleneck):
sparse_layer = Bottleneck(dense_layer.in_channels, dense_layer.out_channels,
stride=dense_layer.stride, sparse=True)
sparse_layer.conv_1 = cls.__get_sparse_layer(dense_layer.conv_1)
sparse_layer.conv_2 = cls.__get_sparse_layer(dense_layer.conv_2)
sparse_layer.conv_3 = cls.__get_sparse_layer(dense_layer.conv_3)
if dense_layer.shortcut is not None:
sparse_layer.shortcut = cls.__get_sparse_layer(dense_layer.shortcut)
sparse_layer.bn_1 = copy.copy(dense_layer.bn_1)
sparse_layer.bn_2 = copy.copy(dense_layer.bn_2)
sparse_layer.bn_3 = copy.copy(dense_layer.bn_3)
return sparse_layer
elif isinstance(dense_layer, LinearClassifier):
sparse_layer = LinearClassifier(dense_layer.in_channels, num_classes=dense_layer.num_classes,
sparse=True)
sparse_layer.linear = cls.__get_sparse_layer(dense_layer.linear)
sparse_layer.bn = copy.copy(dense_layer.bn)
return sparse_layer
else:
return copy.copy(dense_layer)
@classmethod
def __get_dense_layer(cls, sparse_layer):
if isinstance(sparse_layer, LinearSVDO):
dense_layer = nn.Linear(sparse_layer.in_features, sparse_layer.out_features,
sparse_layer.bias is not None)
dense_layer.weight.data = sparse_layer.weight.data.clone()
dense_layer.weight.data *= (sparse_layer.log_alpha.data < sparse_layer.threshold).float()
if sparse_layer.bias is not None:
dense_layer.bias.data = sparse_layer.bias.data.clone()
return dense_layer
elif isinstance(sparse_layer, Conv2dSVDO):
dense_layer = nn.Conv2d(sparse_layer.in_channels, sparse_layer.out_channels,
sparse_layer.kernel_size, stride=sparse_layer.stride,
padding=sparse_layer.padding, dilation=sparse_layer.dilation,
groups=sparse_layer.groups, bias=sparse_layer.bias is not None)
dense_layer.weight.data = sparse_layer.weight.data.clone()
dense_layer.weight.data *= (sparse_layer.log_alpha.data < sparse_layer.threshold).float()
if sparse_layer.bias is not None:
dense_layer.bias.data = sparse_layer.bias.data.clone()
return dense_layer
elif isinstance(sparse_layer, DownsampleConv2d):
if not sparse_layer.sparse:
return copy.copy(sparse_layer)
dense_layer = DownsampleConv2d(sparse_layer.in_channels, sparse_layer.out_channels,
stride=sparse_layer.stride, sparse=False)
dense_layer.conv = cls.__get_dense_layer(sparse_layer.conv)
return dense_layer
elif isinstance(sparse_layer, BasicBlock):
if not sparse_layer.sparse:
return copy.copy(sparse_layer)
dense_layer = BasicBlock(sparse_layer.in_channels, sparse_layer.out_channels,
stride=sparse_layer.stride, sparse=False)
dense_layer.conv_1 = cls.__get_dense_layer(sparse_layer.conv_1)
dense_layer.conv_2 = cls.__get_dense_layer(sparse_layer.conv_2)
if sparse_layer.shortcut is not None:
dense_layer.shortcut = cls.__get_dense_layer(sparse_layer.shortcut)
dense_layer.bn_1 = copy.copy(sparse_layer.bn_1)
dense_layer.bn_2 = copy.copy(sparse_layer.bn_2)
return dense_layer
elif isinstance(sparse_layer, Bottleneck):
if not sparse_layer.sparse:
return copy.copy(sparse_layer)
dense_layer = Bottleneck(sparse_layer.in_channels, sparse_layer.out_channels,
stride=sparse_layer.stride, sparse=False)
dense_layer.conv_1 = cls.__get_dense_layer(sparse_layer.conv_1)
dense_layer.conv_2 = cls.__get_dense_layer(sparse_layer.conv_2)
dense_layer.conv_3 = cls.__get_dense_layer(sparse_layer.conv_3)
if sparse_layer.shortcut is not None:
dense_layer.shortcut = cls.__get_dense_layer(sparse_layer.shortcut)
dense_layer.bn_1 = copy.copy(sparse_layer.bn_1)
dense_layer.bn_2 = copy.copy(sparse_layer.bn_2)
dense_layer.bn_3 = copy.copy(sparse_layer.bn_3)
return dense_layer
elif isinstance(sparse_layer, LinearClassifier):
if not sparse_layer.sparse:
return copy.copy(sparse_layer)
dense_layer = LinearClassifier(sparse_layer.in_channels, num_classes=sparse_layer.num_classes,
sparse=False)
dense_layer.linear = cls.__get_dense_layer(sparse_layer.linear)
dense_layer.bn = copy.copy(sparse_layer.bn)
return dense_layer
else:
return copy.copy(sparse_layer)
def update_mask(self, new_mask):
self.train_mask = new_mask
def set_gradient_flow(self):
for module, train_flag in zip(self.model, self.train_mask):
module.train(mode=train_flag)
for parameter in module.parameters():
parameter.requires_grad = train_flag
def finalize_blocks(self, finalize_mask):
for i in range(len(self.train_mask)):
if self.train_mask[i]:
self.model[i] = self.__get_dense_layer(self.model[i])
def forward(self, x):
out = x
for module in self.model:
out = module(out)
return out
def kl_divergence(self):
total_kl = 0.0
for module, train_flag in zip(self.model, self.train_mask):
if train_flag:
total_kl = total_kl + module.kl_divergence()
return total_kl | 0.932905 | 0.343039 |
from six import with_metaclass
import abc
import numpy as np
from .utils import TOLERANCE
from .utils import RandomUniform
from .utils import norm2
__all__ = ["grad_l1", "grad_l1mu", "grad_l2", "grad_l2", "grad_l2_squared",
"grad_tv", "grad_tvmu", "grad_grouptvmu"]
class Function(with_metaclass(abc.ABCMeta, object)):
def __init__(self, l, **kwargs):
self.l = float(l)
for k in kwargs:
setattr(self, k, kwargs[k])
@abc.abstractmethod
def grad(self, x):
raise NotImplementedError("Abstract method 'grad' must be "
"specialised!")
class L1(Function):
def __init__(self, l, rng=RandomUniform(-1, 1)):
super(L1, self).__init__(l, rng=rng)
def grad(self, x):
"""Sub-gradient of the function
f(x) = |x|_1,
where |x|_1 is the L1-norm.
"""
grad = np.zeros((x.shape[0], 1))
grad[x >= TOLERANCE] = 1.0
grad[x <= -TOLERANCE] = -1.0
between = (x > -TOLERANCE) & (x < TOLERANCE)
grad[between] = self.rng(between.sum())
return self.l * grad
def grad_l1(beta, rng=RandomUniform(-1, 1)):
"""Sub-gradient of the function
f(x) = |x|_1,
where |x|_1 is the L1-norm.
"""
grad = np.zeros((beta.shape[0], 1))
grad[beta >= TOLERANCE] = 1.0
grad[beta <= -TOLERANCE] = -1.0
between = (beta > -TOLERANCE) & (beta < TOLERANCE)
grad[between] = rng(between.sum())
return grad
class SmoothedL1(Function):
def __init__(self, l, mu=TOLERANCE):
super(SmoothedL1, self).__init__(l, mu=mu)
def grad(self, x):
"""Gradient of the function
f(x) = L1(mu, x),
where L1(mu, x) is the Nesterov smoothed L1-norm.
"""
alpha = (1.0 / self.mu) * x
asnorm = np.abs(alpha)
i = asnorm > 1.0
alpha[i] = np.divide(alpha[i], asnorm[i])
return self.l * alpha
def grad_l1mu(beta, mu):
"""Gradient of the function
f(x) = L1(mu, x),
where L1(mu, x) is the Nesterov smoothed L1-norm.
"""
alpha = (1.0 / mu) * beta
asnorm = np.abs(alpha)
i = asnorm > 1.0
alpha[i] = np.divide(alpha[i], asnorm[i])
return alpha
class L2(Function):
def __init__(self, l, rng=RandomUniform(0, 1)):
super(L2, self).__init__(l, rng=rng)
def grad(self, x):
"""Sub-gradient of the function
f(x) = |x|_2,
where |x|_2 is the L2-norm.
"""
norm_beta = norm2(x)
if norm_beta > TOLERANCE:
return x * (1.0 / norm_beta)
else:
D = x.shape[0]
u = (self.rng(D, 1) * 2.0) - 1.0 # [-1, 1]^D
norm_u = norm2(u)
a = self.rng() # [0, 1]
return (self.l * (a / norm_u)) * u
def grad_l2(beta, rng=RandomUniform(0, 1)):
"""Sub-gradient of the function
f(x) = |x|_2,
where |x|_2 is the L2-norm.
"""
norm_beta = norm2(beta)
if norm_beta > TOLERANCE:
return beta * (1.0 / norm_beta)
else:
D = beta.shape[0]
u = (rng(D, 1) * 2.0) - 1.0 # [-1, 1]^D
norm_u = norm2(u)
a = rng() # [0, 1]
return u * (a / norm_u)
class L2Squared(Function):
def __init__(self, l):
super(L2Squared, self).__init__(l)
def grad(self, x):
"""Gradient of the function
f(x) = (1 / 2) * |x|²_2,
where |x|²_2 is the squared L2-norm.
"""
return self.l * x
def grad_l2_squared(beta, rng=None):
"""Gradient of the function
f(x) = (1 / 2) * |x|²_2,
where |x|²_2 is the squared L2-norm.
"""
return beta
class NesterovFunction(with_metaclass(abc.ABCMeta, Function)):
def __init__(self, l, A, mu=TOLERANCE, rng=RandomUniform(-1, 1),
norm=L2.grad, **kwargs):
super(NesterovFunction, self).__init__(l, rng=rng, norm=norm, **kwargs)
self.A = A
self.mu = mu
def grad(self, x):
grad_Ab = 0
for i in range(len(self.A)):
Ai = self.A[i]
Ab = Ai.dot(x)
grad_Ab += Ai.T.dot(self.norm(Ab, self.rng))
return self.l * grad_Ab
def smoothed_grad(self, x):
alpha = self.alpha(x)
Aa = self.A[0].T.dot(alpha[0])
for i in range(1, len(self.A)):
Aa += self.A[i].T.dot(alpha[i])
return self.l * Aa
def alpha(self, x):
""" Dual variable of the Nesterov function.
"""
alpha = [0] * len(self.A)
for i in range(len(self.A)):
alpha[i] = self.A[i].dot(x) * (1.0 / self.mu)
# Apply projection
alpha = self.project(alpha)
return alpha
def project(self, alpha):
for i in range(len(alpha)):
astar = alpha[i]
normas = np.sqrt(np.sum(astar ** 2))
if normas > 1.0:
astar *= 1.0 / normas
alpha[i] = astar
return alpha
class TotalVariation(Function):
def __init__(self, l, A, rng=RandomUniform(0, 1)):
super(TotalVariation, self).__init__(l, A=A, rng=rng)
def grad(self, x):
"""Gradient of the function
f(x) = TV(x),
where TV(x) is the total variation function.
"""
beta_flat = x.ravel()
Ab = np.vstack([Ai.dot(beta_flat) for Ai in self.A]).T
Ab_norm2 = np.sqrt(np.sum(Ab ** 2, axis=1))
upper = Ab_norm2 > TOLERANCE
grad_Ab_norm2 = Ab
grad_Ab_norm2[upper] = (Ab[upper].T / Ab_norm2[upper]).T
lower = Ab_norm2 <= TOLERANCE
n_lower = lower.sum()
if n_lower:
D = len(self.A)
vec_rnd = (self.rng(n_lower, D) * 2.0) - 1.0
norm_vec = np.sqrt(np.sum(vec_rnd ** 2, axis=1))
a = self.rng(n_lower)
grad_Ab_norm2[lower] = (vec_rnd.T * (a / norm_vec)).T
grad = np.vstack([self.A[i].T.dot(grad_Ab_norm2[:, i])
for i in range(len(self.A))])
grad = grad.sum(axis=0)
return self.l * grad.reshape(x.shape)
def grad_tv(beta, A, rng=RandomUniform(0, 1)):
beta_flat = beta.ravel()
Ab = np.vstack([Ai.dot(beta_flat) for Ai in A]).T
Ab_norm2 = np.sqrt(np.sum(Ab ** 2, axis=1))
upper = Ab_norm2 > TOLERANCE
grad_Ab_norm2 = Ab
grad_Ab_norm2[upper] = (Ab[upper].T / Ab_norm2[upper]).T
lower = Ab_norm2 <= TOLERANCE
n_lower = lower.sum()
if n_lower:
D = len(A)
vec_rnd = (rng(n_lower, D) * 2.0) - 1.0
norm_vec = np.sqrt(np.sum(vec_rnd ** 2, axis=1))
a = rng(n_lower)
grad_Ab_norm2[lower] = (vec_rnd.T * (a / norm_vec)).T
grad = np.vstack([A[i].T.dot(grad_Ab_norm2[:, i]) for i in range(len(A))])
grad = grad.sum(axis=0)
return grad.reshape(beta.shape)
class GroupLasso(Function):
def __init__(self, l, A, rng=RandomUniform(-1, 1)):
super(GroupLasso, self).__init__(l, A, rng=rng)
def grad_gl(beta, A, rng=RandomUniform(-1, 1)):
return _Nesterov_grad(beta, A, rng, grad_l2)
class SmoothedTotalVariation(NesterovFunction):
def __init__(self, l, A, mu=TOLERANCE):
super(SmoothedTotalVariation, self).__init__(l, A, mu=mu)
def grad(self, x):
"""Gradient of the function
f(x) = TV(mu, x),
where TV(mu, x) is the Nesterov smoothed total variation function.
"""
return self.smoothed_grad(x)
def project(self, alpha):
""" Projection onto the compact space of the smoothed TV function.
"""
ax = alpha[0]
ay = alpha[1]
az = alpha[2]
anorm = ax ** 2 + ay ** 2 + az ** 2
i = anorm > 1.0
anorm_i = anorm[i] ** 0.5 # Square root is taken here. Faster.
ax[i] = np.divide(ax[i], anorm_i)
ay[i] = np.divide(ay[i], anorm_i)
az[i] = np.divide(az[i], anorm_i)
return [ax, ay, az]
def grad_tvmu(beta, A, mu):
alpha = _Nestetov_alpha(beta, A, mu, _Nesterov_TV_project)
return _Nesterov_grad_smoothed(A, alpha)
class SmoothedGroupLasso(NesterovFunction):
def __init__(self, l, A, mu=TOLERANCE):
super(SmoothedGroupLasso, self).__init__(l, A, mu=mu)
def grad(self, x):
"""Gradient of the function
f(x) = GL(mu, x),
where GL(mu, x) is the Nesterov smoothed group lasso function.
"""
return self.smoothed_grad(x)
def grad_glmu(beta, A, mu):
alpha = _Nestetov_alpha(beta, A, mu, _Nesterov_project)
return _Nesterov_grad_smoothed(A, alpha)
class SmoothedGroupTotalVariation(NesterovFunction):
def __init__(self, l, A, mu=TOLERANCE):
super(SmoothedGroupTotalVariation, self).__init__(l, A, mu=mu)
def grad(self, x):
"""Gradient of the function
f(x) = GroupTV(mu, x),
where GroupTV(mu, x) is the Nesterov smoothed group total variation
function.
"""
return self.smoothed_grad(x)
def project(self, a):
""" Projection onto the compact space of the smoothed Group TV
function.
"""
for g in range(0, len(a), 3):
ax = a[g + 0]
ay = a[g + 1]
az = a[g + 2]
anorm = ax ** 2 + ay ** 2 + az ** 2
i = anorm > 1.0
anorm_i = anorm[i] ** 0.5 # Square root is taken here. Faster.
ax[i] = np.divide(ax[i], anorm_i)
ay[i] = np.divide(ay[i], anorm_i)
az[i] = np.divide(az[i], anorm_i)
a[g + 0] = ax
a[g + 1] = ay
a[g + 2] = az
return a
def grad_grouptvmu(beta, A, mu):
alpha = _Nestetov_alpha(beta, A, mu, _Nesterov_GroupTV_project)
return _Nesterov_grad_smoothed(A, alpha)
def _Nesterov_GroupTV_project(a):
""" Projection onto the compact space of the smoothed Group TV function.
"""
for g in range(0, len(a), 3):
ax = a[g + 0]
ay = a[g + 1]
az = a[g + 2]
anorm = ax ** 2 + ay ** 2 + az ** 2
i = anorm > 1.0
anorm_i = anorm[i] ** 0.5 # Square root is taken here. Faster.
ax[i] = np.divide(ax[i], anorm_i)
ay[i] = np.divide(ay[i], anorm_i)
az[i] = np.divide(az[i], anorm_i)
a[g + 0] = ax
a[g + 1] = ay
a[g + 2] = az
return a
def _Nesterov_grad(beta, A, rng=RandomUniform(-1, 1), grad_norm=grad_l2):
grad_Ab = 0
for i in range(len(A)):
Ai = A[i]
Ab = Ai.dot(beta)
grad_Ab += Ai.T.dot(grad_norm(Ab, rng))
return grad_Ab
def _Nesterov_grad_smoothed(A, alpha):
Aa = A[0].T.dot(alpha[0])
for i in range(1, len(A)):
Aa += A[i].T.dot(alpha[i])
return Aa
def _Nestetov_alpha(beta, A, mu, proj):
""" Dual variable of the Nesterov function.
"""
alpha = [0] * len(A)
for i in range(len(A)):
alpha[i] = A[i].dot(beta) * (1.0 / mu)
# Apply projection.
alpha = proj(alpha)
return alpha
def _Nesterov_project(alpha):
for i in range(len(alpha)):
astar = alpha[i]
normas = np.sqrt(np.sum(astar ** 2))
if normas > 1.0:
astar *= 1.0 / normas
alpha[i] = astar
return alpha
def _Nesterov_TV_project(alpha):
""" Projection onto the compact space of the smoothed TV function.
"""
ax = alpha[0]
ay = alpha[1]
az = alpha[2]
anorm = ax ** 2 + ay ** 2 + az ** 2
i = anorm > 1.0
anorm_i = anorm[i] ** 0.5 # Square root is taken here. Faster.
ax[i] = np.divide(ax[i], anorm_i)
ay[i] = np.divide(ay[i], anorm_i)
az[i] = np.divide(az[i], anorm_i)
return [ax, ay, az]
if __name__ == "__main__":
import doctest
doctest.testmod() | parsimony/datasets/simulate/grad.py | from six import with_metaclass
import abc
import numpy as np
from .utils import TOLERANCE
from .utils import RandomUniform
from .utils import norm2
__all__ = ["grad_l1", "grad_l1mu", "grad_l2", "grad_l2", "grad_l2_squared",
"grad_tv", "grad_tvmu", "grad_grouptvmu"]
class Function(with_metaclass(abc.ABCMeta, object)):
def __init__(self, l, **kwargs):
self.l = float(l)
for k in kwargs:
setattr(self, k, kwargs[k])
@abc.abstractmethod
def grad(self, x):
raise NotImplementedError("Abstract method 'grad' must be "
"specialised!")
class L1(Function):
def __init__(self, l, rng=RandomUniform(-1, 1)):
super(L1, self).__init__(l, rng=rng)
def grad(self, x):
"""Sub-gradient of the function
f(x) = |x|_1,
where |x|_1 is the L1-norm.
"""
grad = np.zeros((x.shape[0], 1))
grad[x >= TOLERANCE] = 1.0
grad[x <= -TOLERANCE] = -1.0
between = (x > -TOLERANCE) & (x < TOLERANCE)
grad[between] = self.rng(between.sum())
return self.l * grad
def grad_l1(beta, rng=RandomUniform(-1, 1)):
"""Sub-gradient of the function
f(x) = |x|_1,
where |x|_1 is the L1-norm.
"""
grad = np.zeros((beta.shape[0], 1))
grad[beta >= TOLERANCE] = 1.0
grad[beta <= -TOLERANCE] = -1.0
between = (beta > -TOLERANCE) & (beta < TOLERANCE)
grad[between] = rng(between.sum())
return grad
class SmoothedL1(Function):
def __init__(self, l, mu=TOLERANCE):
super(SmoothedL1, self).__init__(l, mu=mu)
def grad(self, x):
"""Gradient of the function
f(x) = L1(mu, x),
where L1(mu, x) is the Nesterov smoothed L1-norm.
"""
alpha = (1.0 / self.mu) * x
asnorm = np.abs(alpha)
i = asnorm > 1.0
alpha[i] = np.divide(alpha[i], asnorm[i])
return self.l * alpha
def grad_l1mu(beta, mu):
"""Gradient of the function
f(x) = L1(mu, x),
where L1(mu, x) is the Nesterov smoothed L1-norm.
"""
alpha = (1.0 / mu) * beta
asnorm = np.abs(alpha)
i = asnorm > 1.0
alpha[i] = np.divide(alpha[i], asnorm[i])
return alpha
class L2(Function):
def __init__(self, l, rng=RandomUniform(0, 1)):
super(L2, self).__init__(l, rng=rng)
def grad(self, x):
"""Sub-gradient of the function
f(x) = |x|_2,
where |x|_2 is the L2-norm.
"""
norm_beta = norm2(x)
if norm_beta > TOLERANCE:
return x * (1.0 / norm_beta)
else:
D = x.shape[0]
u = (self.rng(D, 1) * 2.0) - 1.0 # [-1, 1]^D
norm_u = norm2(u)
a = self.rng() # [0, 1]
return (self.l * (a / norm_u)) * u
def grad_l2(beta, rng=RandomUniform(0, 1)):
"""Sub-gradient of the function
f(x) = |x|_2,
where |x|_2 is the L2-norm.
"""
norm_beta = norm2(beta)
if norm_beta > TOLERANCE:
return beta * (1.0 / norm_beta)
else:
D = beta.shape[0]
u = (rng(D, 1) * 2.0) - 1.0 # [-1, 1]^D
norm_u = norm2(u)
a = rng() # [0, 1]
return u * (a / norm_u)
class L2Squared(Function):
def __init__(self, l):
super(L2Squared, self).__init__(l)
def grad(self, x):
"""Gradient of the function
f(x) = (1 / 2) * |x|²_2,
where |x|²_2 is the squared L2-norm.
"""
return self.l * x
def grad_l2_squared(beta, rng=None):
"""Gradient of the function
f(x) = (1 / 2) * |x|²_2,
where |x|²_2 is the squared L2-norm.
"""
return beta
class NesterovFunction(with_metaclass(abc.ABCMeta, Function)):
def __init__(self, l, A, mu=TOLERANCE, rng=RandomUniform(-1, 1),
norm=L2.grad, **kwargs):
super(NesterovFunction, self).__init__(l, rng=rng, norm=norm, **kwargs)
self.A = A
self.mu = mu
def grad(self, x):
grad_Ab = 0
for i in range(len(self.A)):
Ai = self.A[i]
Ab = Ai.dot(x)
grad_Ab += Ai.T.dot(self.norm(Ab, self.rng))
return self.l * grad_Ab
def smoothed_grad(self, x):
alpha = self.alpha(x)
Aa = self.A[0].T.dot(alpha[0])
for i in range(1, len(self.A)):
Aa += self.A[i].T.dot(alpha[i])
return self.l * Aa
def alpha(self, x):
""" Dual variable of the Nesterov function.
"""
alpha = [0] * len(self.A)
for i in range(len(self.A)):
alpha[i] = self.A[i].dot(x) * (1.0 / self.mu)
# Apply projection
alpha = self.project(alpha)
return alpha
def project(self, alpha):
for i in range(len(alpha)):
astar = alpha[i]
normas = np.sqrt(np.sum(astar ** 2))
if normas > 1.0:
astar *= 1.0 / normas
alpha[i] = astar
return alpha
class TotalVariation(Function):
def __init__(self, l, A, rng=RandomUniform(0, 1)):
super(TotalVariation, self).__init__(l, A=A, rng=rng)
def grad(self, x):
"""Gradient of the function
f(x) = TV(x),
where TV(x) is the total variation function.
"""
beta_flat = x.ravel()
Ab = np.vstack([Ai.dot(beta_flat) for Ai in self.A]).T
Ab_norm2 = np.sqrt(np.sum(Ab ** 2, axis=1))
upper = Ab_norm2 > TOLERANCE
grad_Ab_norm2 = Ab
grad_Ab_norm2[upper] = (Ab[upper].T / Ab_norm2[upper]).T
lower = Ab_norm2 <= TOLERANCE
n_lower = lower.sum()
if n_lower:
D = len(self.A)
vec_rnd = (self.rng(n_lower, D) * 2.0) - 1.0
norm_vec = np.sqrt(np.sum(vec_rnd ** 2, axis=1))
a = self.rng(n_lower)
grad_Ab_norm2[lower] = (vec_rnd.T * (a / norm_vec)).T
grad = np.vstack([self.A[i].T.dot(grad_Ab_norm2[:, i])
for i in range(len(self.A))])
grad = grad.sum(axis=0)
return self.l * grad.reshape(x.shape)
def grad_tv(beta, A, rng=RandomUniform(0, 1)):
beta_flat = beta.ravel()
Ab = np.vstack([Ai.dot(beta_flat) for Ai in A]).T
Ab_norm2 = np.sqrt(np.sum(Ab ** 2, axis=1))
upper = Ab_norm2 > TOLERANCE
grad_Ab_norm2 = Ab
grad_Ab_norm2[upper] = (Ab[upper].T / Ab_norm2[upper]).T
lower = Ab_norm2 <= TOLERANCE
n_lower = lower.sum()
if n_lower:
D = len(A)
vec_rnd = (rng(n_lower, D) * 2.0) - 1.0
norm_vec = np.sqrt(np.sum(vec_rnd ** 2, axis=1))
a = rng(n_lower)
grad_Ab_norm2[lower] = (vec_rnd.T * (a / norm_vec)).T
grad = np.vstack([A[i].T.dot(grad_Ab_norm2[:, i]) for i in range(len(A))])
grad = grad.sum(axis=0)
return grad.reshape(beta.shape)
class GroupLasso(Function):
def __init__(self, l, A, rng=RandomUniform(-1, 1)):
super(GroupLasso, self).__init__(l, A, rng=rng)
def grad_gl(beta, A, rng=RandomUniform(-1, 1)):
return _Nesterov_grad(beta, A, rng, grad_l2)
class SmoothedTotalVariation(NesterovFunction):
def __init__(self, l, A, mu=TOLERANCE):
super(SmoothedTotalVariation, self).__init__(l, A, mu=mu)
def grad(self, x):
"""Gradient of the function
f(x) = TV(mu, x),
where TV(mu, x) is the Nesterov smoothed total variation function.
"""
return self.smoothed_grad(x)
def project(self, alpha):
""" Projection onto the compact space of the smoothed TV function.
"""
ax = alpha[0]
ay = alpha[1]
az = alpha[2]
anorm = ax ** 2 + ay ** 2 + az ** 2
i = anorm > 1.0
anorm_i = anorm[i] ** 0.5 # Square root is taken here. Faster.
ax[i] = np.divide(ax[i], anorm_i)
ay[i] = np.divide(ay[i], anorm_i)
az[i] = np.divide(az[i], anorm_i)
return [ax, ay, az]
def grad_tvmu(beta, A, mu):
alpha = _Nestetov_alpha(beta, A, mu, _Nesterov_TV_project)
return _Nesterov_grad_smoothed(A, alpha)
class SmoothedGroupLasso(NesterovFunction):
def __init__(self, l, A, mu=TOLERANCE):
super(SmoothedGroupLasso, self).__init__(l, A, mu=mu)
def grad(self, x):
"""Gradient of the function
f(x) = GL(mu, x),
where GL(mu, x) is the Nesterov smoothed group lasso function.
"""
return self.smoothed_grad(x)
def grad_glmu(beta, A, mu):
alpha = _Nestetov_alpha(beta, A, mu, _Nesterov_project)
return _Nesterov_grad_smoothed(A, alpha)
class SmoothedGroupTotalVariation(NesterovFunction):
def __init__(self, l, A, mu=TOLERANCE):
super(SmoothedGroupTotalVariation, self).__init__(l, A, mu=mu)
def grad(self, x):
"""Gradient of the function
f(x) = GroupTV(mu, x),
where GroupTV(mu, x) is the Nesterov smoothed group total variation
function.
"""
return self.smoothed_grad(x)
def project(self, a):
""" Projection onto the compact space of the smoothed Group TV
function.
"""
for g in range(0, len(a), 3):
ax = a[g + 0]
ay = a[g + 1]
az = a[g + 2]
anorm = ax ** 2 + ay ** 2 + az ** 2
i = anorm > 1.0
anorm_i = anorm[i] ** 0.5 # Square root is taken here. Faster.
ax[i] = np.divide(ax[i], anorm_i)
ay[i] = np.divide(ay[i], anorm_i)
az[i] = np.divide(az[i], anorm_i)
a[g + 0] = ax
a[g + 1] = ay
a[g + 2] = az
return a
def grad_grouptvmu(beta, A, mu):
alpha = _Nestetov_alpha(beta, A, mu, _Nesterov_GroupTV_project)
return _Nesterov_grad_smoothed(A, alpha)
def _Nesterov_GroupTV_project(a):
""" Projection onto the compact space of the smoothed Group TV function.
"""
for g in range(0, len(a), 3):
ax = a[g + 0]
ay = a[g + 1]
az = a[g + 2]
anorm = ax ** 2 + ay ** 2 + az ** 2
i = anorm > 1.0
anorm_i = anorm[i] ** 0.5 # Square root is taken here. Faster.
ax[i] = np.divide(ax[i], anorm_i)
ay[i] = np.divide(ay[i], anorm_i)
az[i] = np.divide(az[i], anorm_i)
a[g + 0] = ax
a[g + 1] = ay
a[g + 2] = az
return a
def _Nesterov_grad(beta, A, rng=RandomUniform(-1, 1), grad_norm=grad_l2):
grad_Ab = 0
for i in range(len(A)):
Ai = A[i]
Ab = Ai.dot(beta)
grad_Ab += Ai.T.dot(grad_norm(Ab, rng))
return grad_Ab
def _Nesterov_grad_smoothed(A, alpha):
Aa = A[0].T.dot(alpha[0])
for i in range(1, len(A)):
Aa += A[i].T.dot(alpha[i])
return Aa
def _Nestetov_alpha(beta, A, mu, proj):
""" Dual variable of the Nesterov function.
"""
alpha = [0] * len(A)
for i in range(len(A)):
alpha[i] = A[i].dot(beta) * (1.0 / mu)
# Apply projection.
alpha = proj(alpha)
return alpha
def _Nesterov_project(alpha):
for i in range(len(alpha)):
astar = alpha[i]
normas = np.sqrt(np.sum(astar ** 2))
if normas > 1.0:
astar *= 1.0 / normas
alpha[i] = astar
return alpha
def _Nesterov_TV_project(alpha):
""" Projection onto the compact space of the smoothed TV function.
"""
ax = alpha[0]
ay = alpha[1]
az = alpha[2]
anorm = ax ** 2 + ay ** 2 + az ** 2
i = anorm > 1.0
anorm_i = anorm[i] ** 0.5 # Square root is taken here. Faster.
ax[i] = np.divide(ax[i], anorm_i)
ay[i] = np.divide(ay[i], anorm_i)
az[i] = np.divide(az[i], anorm_i)
return [ax, ay, az]
if __name__ == "__main__":
import doctest
doctest.testmod() | 0.712332 | 0.540863 |
import torch.nn as nn
from models.feature_extractors import ConcatCompareCombinedFeaturesExtractor, DotProductCombinedFeaturesExtractor
class CombineSiameseHead(nn.Module):
def __init__(self, input_dim, fc_dims=None, siamese_head_type="concat"):
super().__init__()
self.__verify_siamese_head_type(siamese_head_type)
self.siamese_head_type = siamese_head_type
self.input_dim = input_dim
self.fc_dims = fc_dims if fc_dims is not None else []
self.combined_features_extractor = ConcatCompareCombinedFeaturesExtractor() if self.siamese_head_type == "concat" \
else DotProductCombinedFeaturesExtractor()
self.combined_features_size = self.combined_features_extractor.get_combined_features_size(input_dim)
self.fc_layers = self.__create_fc_layers()
self.relu = nn.ReLU(inplace=True)
@staticmethod
def __verify_siamese_head_type(siamese_head_type):
if siamese_head_type not in ["concat", "dot"]:
raise ValueError(f"Unsupported siamese head type {siamese_head_type}. Supported types are: 'concat', 'dot'.")
def __create_fc_layers(self):
if len(self.fc_dims) == 0:
return nn.ModuleList([])
fc_layers = []
prev_dim = self.combined_features_size
for fc_dim in self.fc_dims:
fc_layers.append(nn.Linear(prev_dim, fc_dim))
prev_dim = fc_dim
return nn.ModuleList(fc_layers)
def forward(self, first_input, second_input):
out = self.combined_features_extractor.extract_combined_features(first_input, second_input)
if len(self.fc_layers) == 0:
return out
for i in range(len(self.fc_layers) - 1):
out = self.relu(self.fc_layers[i](out))
out = self.fc_layers[-1](out)
return out
class DSESiameseClassifier(nn.Module):
def __init__(self, dse_model, siamese_head):
super().__init__()
self.dse_model = dse_model
self.siamese_head = siamese_head
def forward(self, first_input_ids, first_input_mask, second_input_ids, second_input_mask):
first_embedding = self.dse_model(first_input_ids, attention_mask=first_input_mask)
second_embedding = self.dse_model(second_input_ids, attention_mask=second_input_mask)
return self.siamese_head(first_embedding, second_embedding)
def get_dse_model(self):
"""
:return: Sentence embedding model that for a given input sentence outputs a sentence embedding.
"""
return self.dse_model | models/dse_siamese_classifier.py | import torch.nn as nn
from models.feature_extractors import ConcatCompareCombinedFeaturesExtractor, DotProductCombinedFeaturesExtractor
class CombineSiameseHead(nn.Module):
def __init__(self, input_dim, fc_dims=None, siamese_head_type="concat"):
super().__init__()
self.__verify_siamese_head_type(siamese_head_type)
self.siamese_head_type = siamese_head_type
self.input_dim = input_dim
self.fc_dims = fc_dims if fc_dims is not None else []
self.combined_features_extractor = ConcatCompareCombinedFeaturesExtractor() if self.siamese_head_type == "concat" \
else DotProductCombinedFeaturesExtractor()
self.combined_features_size = self.combined_features_extractor.get_combined_features_size(input_dim)
self.fc_layers = self.__create_fc_layers()
self.relu = nn.ReLU(inplace=True)
@staticmethod
def __verify_siamese_head_type(siamese_head_type):
if siamese_head_type not in ["concat", "dot"]:
raise ValueError(f"Unsupported siamese head type {siamese_head_type}. Supported types are: 'concat', 'dot'.")
def __create_fc_layers(self):
if len(self.fc_dims) == 0:
return nn.ModuleList([])
fc_layers = []
prev_dim = self.combined_features_size
for fc_dim in self.fc_dims:
fc_layers.append(nn.Linear(prev_dim, fc_dim))
prev_dim = fc_dim
return nn.ModuleList(fc_layers)
def forward(self, first_input, second_input):
out = self.combined_features_extractor.extract_combined_features(first_input, second_input)
if len(self.fc_layers) == 0:
return out
for i in range(len(self.fc_layers) - 1):
out = self.relu(self.fc_layers[i](out))
out = self.fc_layers[-1](out)
return out
class DSESiameseClassifier(nn.Module):
def __init__(self, dse_model, siamese_head):
super().__init__()
self.dse_model = dse_model
self.siamese_head = siamese_head
def forward(self, first_input_ids, first_input_mask, second_input_ids, second_input_mask):
first_embedding = self.dse_model(first_input_ids, attention_mask=first_input_mask)
second_embedding = self.dse_model(second_input_ids, attention_mask=second_input_mask)
return self.siamese_head(first_embedding, second_embedding)
def get_dse_model(self):
"""
:return: Sentence embedding model that for a given input sentence outputs a sentence embedding.
"""
return self.dse_model | 0.918348 | 0.413063 |
from . import color
import importlib
import re
import string
_ALLOWED = set(string.ascii_letters + string.digits)
class Colors:
"""DOX HERE"""
def __init__(self, *palettes, canonicalize_gray='gray', default='black'):
class Color(color.Color):
COLORS = self
super().__setattr__('Color', Color)
gt = self._canonicalize_gray = canonicalize_gray
if not gt:
self._replacements = ()
else:
gt = 'gray' if gt is True else gt.lower()
gf = 'grey' if gt == 'gray' else 'gray'
if gt not in ('gray', 'grey'):
raise ValueError('Don\'t understand canonicalize_gray=%s' % gt)
self._replacements = (
(re.compile(r'\b%s\b' % gf).sub, gt),
(re.compile(r'\b%s\b' % gf.capitalize()).sub, gt.capitalize()),
)
self._name_to_rgb = {}
self._rgb_to_name = {}
self._palettes = [self._add_palette(s) for s in palettes]
self._canonical_to_rgb = {
self._canonical_name(k): v for k, v in self._name_to_rgb.items()
}
self._default = self.get(str(default)) or next(iter(self._rgb_to_name))
def get(self, key, default=None):
try:
return self[key]
except KeyError:
return default
def items(self):
return self._name_to_rgb.items()
def values(self):
return self._name_to_rgb.values()
def keys(self):
return self._name_to_rgb.keys()
def closest(self, color):
"""
Return the closest named color to `color`. This can be quite slow,
particularly if there are many colors.
"""
if isinstance(color, list):
color = tuple(color)
if color in self._rgb_to_name:
return color
return min((c.distance2(color), c) for c in self.values())[1]
def __call__(self, *args, **kwds):
return self.Color(*args, **kwds)
def __getitem__(self, name):
"""Try to convert string item into a color"""
canonical = self._canonical_name(name)
try:
return self._canonical_to_rgb[canonical]
except KeyError:
pass
raise KeyError(name)
def __setitem__(self, name, rgb):
raise KeyError(name)
def __contains__(self, x):
"""Return true if this string name appears in the table canonically"""
return self._canonical_name(x) in self._canonical_to_rgb
def __getattr__(self, name):
if name.startswith('_'):
return super().__getattribute__(name)
try:
return self[name]
except KeyError:
raise AttributeError(name)
def __setattr__(self, name, value):
if name.startswith('_'):
return super().__setattr__(name, value)
raise AttributeError(name)
def __len__(self):
return len(self._name_to_rgb)
def __iter__(self):
return iter(self._name_to_rgb)
def __eq__(self, x):
return __class__ == x.__class__ and self._name_to_rgb == x._name_to_rgb
def __ne__(self, x):
return not (self == x)
def __add__(self, x):
cg, d = self._canonicalize_gray, self._default
c = x if isinstance(x, __class__) else __class__(x)
palettes = self._palettes + c._palettes
return __class__(*palettes, canonicalize_gray=cg, default=d)
def __radd__(self, x):
other = __class__(
x, canonicalize_gray=self._canonicalize_gray, default=self._default
)
return other + self
def _add_palette(self, palette):
if isinstance(palette, str):
if '.' not in palette:
palette = '.' + palette
if palette.startswith('.'):
palette = 'nc.palette' + palette
palette = importlib.import_module(palette)
if not isinstance(palette, dict):
palette = palette.__dict__
if 'COLORS' in palette:
colors = palette['COLORS']
primary_names = palette.get('PRIMARY_NAMES', ())
else:
colors = palette
palette = {'COLORS': palette}
primary_names = ()
colors = {k: self.Color(v) for k, v in colors.items()}
if not palette.get('PRESERVE_CAPITALIZATION'):
colors = {k.capitalize(): v for k, v in colors.items()}
for sub, rep in self._replacements:
colors = {sub(rep, k): v for k, v in colors.items()}
self._name_to_rgb.update(colors)
def best_name(names):
names.sort(key=lambda n: (len(n), n.lower()))
pnames = (n for n in names if n in primary_names)
return next(pnames, names[0])
names = {}
for n, c in colors.items():
names.setdefault(c, []).append(n)
self._rgb_to_name.update((k, best_name(v)) for k, v in names.items())
return palette
def _canonical_name(self, name):
name = name.lower()
if self._canonicalize_gray:
name = name.replace('grey', 'gray')
return ''.join(i for i in name if i in _ALLOWED)
"""Some colors have multiple names; a best name needs to be chosen.
palette.PRIMARY_NAMES is a list of names to use by preference.
Otherwise the shortest color name is chosen, and in a tie, the
alphabetically first one.
""" | nc/colors.py | from . import color
import importlib
import re
import string
_ALLOWED = set(string.ascii_letters + string.digits)
class Colors:
"""DOX HERE"""
def __init__(self, *palettes, canonicalize_gray='gray', default='black'):
class Color(color.Color):
COLORS = self
super().__setattr__('Color', Color)
gt = self._canonicalize_gray = canonicalize_gray
if not gt:
self._replacements = ()
else:
gt = 'gray' if gt is True else gt.lower()
gf = 'grey' if gt == 'gray' else 'gray'
if gt not in ('gray', 'grey'):
raise ValueError('Don\'t understand canonicalize_gray=%s' % gt)
self._replacements = (
(re.compile(r'\b%s\b' % gf).sub, gt),
(re.compile(r'\b%s\b' % gf.capitalize()).sub, gt.capitalize()),
)
self._name_to_rgb = {}
self._rgb_to_name = {}
self._palettes = [self._add_palette(s) for s in palettes]
self._canonical_to_rgb = {
self._canonical_name(k): v for k, v in self._name_to_rgb.items()
}
self._default = self.get(str(default)) or next(iter(self._rgb_to_name))
def get(self, key, default=None):
try:
return self[key]
except KeyError:
return default
def items(self):
return self._name_to_rgb.items()
def values(self):
return self._name_to_rgb.values()
def keys(self):
return self._name_to_rgb.keys()
def closest(self, color):
"""
Return the closest named color to `color`. This can be quite slow,
particularly if there are many colors.
"""
if isinstance(color, list):
color = tuple(color)
if color in self._rgb_to_name:
return color
return min((c.distance2(color), c) for c in self.values())[1]
def __call__(self, *args, **kwds):
return self.Color(*args, **kwds)
def __getitem__(self, name):
"""Try to convert string item into a color"""
canonical = self._canonical_name(name)
try:
return self._canonical_to_rgb[canonical]
except KeyError:
pass
raise KeyError(name)
def __setitem__(self, name, rgb):
raise KeyError(name)
def __contains__(self, x):
"""Return true if this string name appears in the table canonically"""
return self._canonical_name(x) in self._canonical_to_rgb
def __getattr__(self, name):
if name.startswith('_'):
return super().__getattribute__(name)
try:
return self[name]
except KeyError:
raise AttributeError(name)
def __setattr__(self, name, value):
if name.startswith('_'):
return super().__setattr__(name, value)
raise AttributeError(name)
def __len__(self):
return len(self._name_to_rgb)
def __iter__(self):
return iter(self._name_to_rgb)
def __eq__(self, x):
return __class__ == x.__class__ and self._name_to_rgb == x._name_to_rgb
def __ne__(self, x):
return not (self == x)
def __add__(self, x):
cg, d = self._canonicalize_gray, self._default
c = x if isinstance(x, __class__) else __class__(x)
palettes = self._palettes + c._palettes
return __class__(*palettes, canonicalize_gray=cg, default=d)
def __radd__(self, x):
other = __class__(
x, canonicalize_gray=self._canonicalize_gray, default=self._default
)
return other + self
def _add_palette(self, palette):
if isinstance(palette, str):
if '.' not in palette:
palette = '.' + palette
if palette.startswith('.'):
palette = 'nc.palette' + palette
palette = importlib.import_module(palette)
if not isinstance(palette, dict):
palette = palette.__dict__
if 'COLORS' in palette:
colors = palette['COLORS']
primary_names = palette.get('PRIMARY_NAMES', ())
else:
colors = palette
palette = {'COLORS': palette}
primary_names = ()
colors = {k: self.Color(v) for k, v in colors.items()}
if not palette.get('PRESERVE_CAPITALIZATION'):
colors = {k.capitalize(): v for k, v in colors.items()}
for sub, rep in self._replacements:
colors = {sub(rep, k): v for k, v in colors.items()}
self._name_to_rgb.update(colors)
def best_name(names):
names.sort(key=lambda n: (len(n), n.lower()))
pnames = (n for n in names if n in primary_names)
return next(pnames, names[0])
names = {}
for n, c in colors.items():
names.setdefault(c, []).append(n)
self._rgb_to_name.update((k, best_name(v)) for k, v in names.items())
return palette
def _canonical_name(self, name):
name = name.lower()
if self._canonicalize_gray:
name = name.replace('grey', 'gray')
return ''.join(i for i in name if i in _ALLOWED)
"""Some colors have multiple names; a best name needs to be chosen.
palette.PRIMARY_NAMES is a list of names to use by preference.
Otherwise the shortest color name is chosen, and in a tie, the
alphabetically first one.
""" | 0.693369 | 0.258841 |
from typing import Any, Dict, Iterable, List, Optional, Tuple
import json
import numpy
import xarray
from . import lock
from . import storage
from . import converter
from .. import geodetic
from ..core import geohash
class GeoHash:
"""Geogrophic index based on GeoHash encoding.
Args:
store (AbstractMutableMapping): Object managing the storage of the
index.
precision (int): Accuracy of the index. By default the precision is 3
characters. The table below gives the correspondence between the
number of characters (i.e. the ``precision`` parameter of this
constructor), the size of the boxes of the grid at the equator and
the total number of boxes.
========= =============== ==========
precision lng/lat (km) samples
========= =============== ==========
1 4950/4950 32
2 618.75/1237.50 1024
3 154.69/154.69 32768
4 19.34/38.67 1048576
5 4.83/4.83 33554432
6 0.60/1.21 1073741824
========= =============== ==========
synchronizer (lock.Synchronizer, optional): Write synchronizer.
"""
PROPERTIES = b'.properties'
def __init__(self,
store: storage.AbstractMutableMapping,
precision: int = 3,
synchronizer: Optional[lock.Synchronizer] = None) -> None:
self._store = store
self._precision = precision
self._synchronizer = synchronizer or lock.PuppetSynchronizer()
@property
def store(self) -> storage.AbstractMutableMapping:
"""Gets the object hndling the storage of this instance."""
return self._store
@property
def precision(self) -> int:
"""Accuracy of this instance."""
return self._precision
def set_properties(self) -> None:
"""Definition of index properties."""
if self.PROPERTIES in self._store:
raise RuntimeError("index already initialized")
self._store[self.PROPERTIES] = json.dumps(
{'precision': self._precision})
@classmethod
def get_properties(cls, store) -> Dict[str, Any]:
"""Reading index properties.
Returns:
dict: Index properties (number of character used to encode a
position).
"""
precision = store[cls.PROPERTIES]
if isinstance(precision, list):
precision = precision[0]
return json.loads(precision)
def encode(self,
lon: numpy.ndarray,
lat: numpy.ndarray,
normalize: bool = True,
unicode: bool = False) -> numpy.ndarray:
"""Encode points into geohash with the given precision
Args:
lon (numpy.ndarray): Longitudes in degrees of the positions to be
encoded.
lat (numpy.ndarray): Latitudes in degrees of the positions to be
encoded.
normalize (bool): If true, normalize longitude between [-180, 180[
unicode (bool): If true, transforms GeoHash codes into unicode
strings.
Returns:
numpy.ndarray: geohash code for each coordinates of the points
read from the vectors provided.
"""
if normalize:
lon = (lon + 180) % 360 - 180
result = geohash.encode(lon, lat, precision=self._precision)
if unicode:
return result.astype('U')
return result
def update(self, other: Iterable[Tuple[bytes, Any]]) -> None:
"""Update the index with the key/value pairs from data, overwriting
existing keys.
Args:
other (iterable): Geohash codes associated with the values to be
stored in the database.
"""
with self._synchronizer:
geohash_map = dict()
geohash.update_dict(geohash_map, other)
self._store.update(geohash_map.items())
def extend(self, other: Iterable[Tuple[bytes, Any]]) -> None:
"""Update the index with the key/value pairs from data, appending
existing keys with the new data.
Args:
other (iterable): Geohash codes associated with the values to be
updated in the database.
"""
with self._synchronizer:
geohash_map = dict()
geohash.update_dict(geohash_map, other)
self._store.extend(geohash_map.items())
def keys(self, box: Optional[geodetic.Box] = None) -> Iterable[bytes]:
"""Returns all hash defined in the index.
Args:
box (pyinterp.geodetic.Box, optional): If true, the method returns
the codes defined in the supplied area, otherwise all the codes
stored in the index.
Returns:
iterable: keys selected in the index.
"""
result = filter(lambda item: item != self.PROPERTIES,
self._store.keys())
if box is None:
return result
return set(geohash.bounding_boxes(
box, precision=self._precision)).intersection(set(result))
def box(self, box: Optional[geodetic.Box] = None) -> List[Any]:
"""Selection of all data within the defined geographical area.
Args:
box (pyinterp.geodetic.Box): Bounding box used for data selection.
Returns:
list: List of data contained in the database for all positions
located in the selected geographic region.
"""
return list(
filter(
lambda item: len(item) != 0,
self._store.values(
list(geohash.bounding_boxes(box,
precision=self._precision)))))
def values(self, keys: Optional[Iterable[bytes]] = None) -> List[Any]:
"""Returns the list of values defined in the index.
Args:
keys (iterable, optional): The list of keys to be selected. If
this parameter is undefined, the method returns all values
defined in the index.
Returns:
list: values selected in the index.
"""
keys = keys or self.keys()
return self._store.values(list(keys))
def items(
self,
keys: Optional[Iterable[bytes]] = None) -> List[Tuple[bytes, Any]]:
"""Returns the list of pair (key, value) defined in the index.
Args:
keys (iterable, optional): The list of keys to be selected. If
this parameter is undefined, the method returns all items
defined in the index.
Returns:
list: items selected in the index.
"""
keys = keys or self.keys()
return self._store.items(list(keys))
def to_xarray(self,
box: Optional[geodetic.Box] = None) -> xarray.DataArray:
"""Get the XArray containing the data selected in the index.
Args:
box (pyinterp.geodetic.Box): Bounding box used for data selection.
Returns:
list: items selected in the index.
"""
keys = list(self.keys(box))
if len(keys) == 0:
hashs = numpy.array([], dtype="S1")
data = numpy.array([])
else:
hashs = numpy.array(keys)
data = numpy.array(self.values(keys), dtype=object)
return converter.to_xarray(hashs, data.squeeze())
@staticmethod
def where(
hash_codes: numpy.ndarray
) -> Dict[bytes, Tuple[Tuple[int, int], Tuple[int, int]]]:
"""Returns the start and end indexes of the different GeoHash boxes.
Args:
hash_codes (numpy.ndarray): geohash codes obtained by the `encode`
method.
Returns:
dict: the start and end indexes for each geohash boxes
"""
return geohash.where(hash_codes)
def __len__(self):
return len(self._store) - 1
def __repr__(self) -> str:
return f"<{self.__class__.__name__} precision={self._precision}>"
def init_geohash(store: storage.AbstractMutableMapping,
precision: int = 3,
synchronizer: Optional[lock.Synchronizer] = None) -> GeoHash:
"""Creation of a GeoHash index.
Args:
store (AbstractMutableMapping): Object managing the storage of the
index.
precision (int): Accuracy of the index. By default the precision is 3
characters.
synchronizer (lock.Synchronizer, optional): Write synchronizer
Returns:
GeoHash: index handler.
"""
result = GeoHash(store, precision, synchronizer)
result.set_properties()
return result
def open_geohash(store: storage.AbstractMutableMapping,
synchronizer: Optional[lock.Synchronizer] = None) -> GeoHash:
"""Open of a GeoHash index.
Args:
store (AbstractMutableMapping): Object managing the storage of the
index.
synchronizer (lock.Synchronizer, optional): Write synchronizer.
Returns:
GeoHash: index handler.
"""
result = GeoHash(store,
synchronizer=synchronizer,
**GeoHash.get_properties(store))
return result | src/pyinterp/geohash/index.py | from typing import Any, Dict, Iterable, List, Optional, Tuple
import json
import numpy
import xarray
from . import lock
from . import storage
from . import converter
from .. import geodetic
from ..core import geohash
class GeoHash:
"""Geogrophic index based on GeoHash encoding.
Args:
store (AbstractMutableMapping): Object managing the storage of the
index.
precision (int): Accuracy of the index. By default the precision is 3
characters. The table below gives the correspondence between the
number of characters (i.e. the ``precision`` parameter of this
constructor), the size of the boxes of the grid at the equator and
the total number of boxes.
========= =============== ==========
precision lng/lat (km) samples
========= =============== ==========
1 4950/4950 32
2 618.75/1237.50 1024
3 154.69/154.69 32768
4 19.34/38.67 1048576
5 4.83/4.83 33554432
6 0.60/1.21 1073741824
========= =============== ==========
synchronizer (lock.Synchronizer, optional): Write synchronizer.
"""
PROPERTIES = b'.properties'
def __init__(self,
store: storage.AbstractMutableMapping,
precision: int = 3,
synchronizer: Optional[lock.Synchronizer] = None) -> None:
self._store = store
self._precision = precision
self._synchronizer = synchronizer or lock.PuppetSynchronizer()
@property
def store(self) -> storage.AbstractMutableMapping:
"""Gets the object hndling the storage of this instance."""
return self._store
@property
def precision(self) -> int:
"""Accuracy of this instance."""
return self._precision
def set_properties(self) -> None:
"""Definition of index properties."""
if self.PROPERTIES in self._store:
raise RuntimeError("index already initialized")
self._store[self.PROPERTIES] = json.dumps(
{'precision': self._precision})
@classmethod
def get_properties(cls, store) -> Dict[str, Any]:
"""Reading index properties.
Returns:
dict: Index properties (number of character used to encode a
position).
"""
precision = store[cls.PROPERTIES]
if isinstance(precision, list):
precision = precision[0]
return json.loads(precision)
def encode(self,
lon: numpy.ndarray,
lat: numpy.ndarray,
normalize: bool = True,
unicode: bool = False) -> numpy.ndarray:
"""Encode points into geohash with the given precision
Args:
lon (numpy.ndarray): Longitudes in degrees of the positions to be
encoded.
lat (numpy.ndarray): Latitudes in degrees of the positions to be
encoded.
normalize (bool): If true, normalize longitude between [-180, 180[
unicode (bool): If true, transforms GeoHash codes into unicode
strings.
Returns:
numpy.ndarray: geohash code for each coordinates of the points
read from the vectors provided.
"""
if normalize:
lon = (lon + 180) % 360 - 180
result = geohash.encode(lon, lat, precision=self._precision)
if unicode:
return result.astype('U')
return result
def update(self, other: Iterable[Tuple[bytes, Any]]) -> None:
"""Update the index with the key/value pairs from data, overwriting
existing keys.
Args:
other (iterable): Geohash codes associated with the values to be
stored in the database.
"""
with self._synchronizer:
geohash_map = dict()
geohash.update_dict(geohash_map, other)
self._store.update(geohash_map.items())
def extend(self, other: Iterable[Tuple[bytes, Any]]) -> None:
"""Update the index with the key/value pairs from data, appending
existing keys with the new data.
Args:
other (iterable): Geohash codes associated with the values to be
updated in the database.
"""
with self._synchronizer:
geohash_map = dict()
geohash.update_dict(geohash_map, other)
self._store.extend(geohash_map.items())
def keys(self, box: Optional[geodetic.Box] = None) -> Iterable[bytes]:
"""Returns all hash defined in the index.
Args:
box (pyinterp.geodetic.Box, optional): If true, the method returns
the codes defined in the supplied area, otherwise all the codes
stored in the index.
Returns:
iterable: keys selected in the index.
"""
result = filter(lambda item: item != self.PROPERTIES,
self._store.keys())
if box is None:
return result
return set(geohash.bounding_boxes(
box, precision=self._precision)).intersection(set(result))
def box(self, box: Optional[geodetic.Box] = None) -> List[Any]:
"""Selection of all data within the defined geographical area.
Args:
box (pyinterp.geodetic.Box): Bounding box used for data selection.
Returns:
list: List of data contained in the database for all positions
located in the selected geographic region.
"""
return list(
filter(
lambda item: len(item) != 0,
self._store.values(
list(geohash.bounding_boxes(box,
precision=self._precision)))))
def values(self, keys: Optional[Iterable[bytes]] = None) -> List[Any]:
"""Returns the list of values defined in the index.
Args:
keys (iterable, optional): The list of keys to be selected. If
this parameter is undefined, the method returns all values
defined in the index.
Returns:
list: values selected in the index.
"""
keys = keys or self.keys()
return self._store.values(list(keys))
def items(
self,
keys: Optional[Iterable[bytes]] = None) -> List[Tuple[bytes, Any]]:
"""Returns the list of pair (key, value) defined in the index.
Args:
keys (iterable, optional): The list of keys to be selected. If
this parameter is undefined, the method returns all items
defined in the index.
Returns:
list: items selected in the index.
"""
keys = keys or self.keys()
return self._store.items(list(keys))
def to_xarray(self,
box: Optional[geodetic.Box] = None) -> xarray.DataArray:
"""Get the XArray containing the data selected in the index.
Args:
box (pyinterp.geodetic.Box): Bounding box used for data selection.
Returns:
list: items selected in the index.
"""
keys = list(self.keys(box))
if len(keys) == 0:
hashs = numpy.array([], dtype="S1")
data = numpy.array([])
else:
hashs = numpy.array(keys)
data = numpy.array(self.values(keys), dtype=object)
return converter.to_xarray(hashs, data.squeeze())
@staticmethod
def where(
hash_codes: numpy.ndarray
) -> Dict[bytes, Tuple[Tuple[int, int], Tuple[int, int]]]:
"""Returns the start and end indexes of the different GeoHash boxes.
Args:
hash_codes (numpy.ndarray): geohash codes obtained by the `encode`
method.
Returns:
dict: the start and end indexes for each geohash boxes
"""
return geohash.where(hash_codes)
def __len__(self):
return len(self._store) - 1
def __repr__(self) -> str:
return f"<{self.__class__.__name__} precision={self._precision}>"
def init_geohash(store: storage.AbstractMutableMapping,
precision: int = 3,
synchronizer: Optional[lock.Synchronizer] = None) -> GeoHash:
"""Creation of a GeoHash index.
Args:
store (AbstractMutableMapping): Object managing the storage of the
index.
precision (int): Accuracy of the index. By default the precision is 3
characters.
synchronizer (lock.Synchronizer, optional): Write synchronizer
Returns:
GeoHash: index handler.
"""
result = GeoHash(store, precision, synchronizer)
result.set_properties()
return result
def open_geohash(store: storage.AbstractMutableMapping,
synchronizer: Optional[lock.Synchronizer] = None) -> GeoHash:
"""Open of a GeoHash index.
Args:
store (AbstractMutableMapping): Object managing the storage of the
index.
synchronizer (lock.Synchronizer, optional): Write synchronizer.
Returns:
GeoHash: index handler.
"""
result = GeoHash(store,
synchronizer=synchronizer,
**GeoHash.get_properties(store))
return result | 0.963326 | 0.499573 |
from .. import MTYPE_INVOKE, perform_request
from ...Codec import Codec
# rpc_gap_config_cccd_not_check(RPC_T_GAP_CONFIG_GATT_CCCD_NOT_CHECK cccd_not_check_flag) -> void
def cccd_not_check(cccd_not_check_flag) :
codec = Codec(6, 1, MTYPE_INVOKE, "I", "")
return perform_request(codec, cccd_not_check_flag)
# rpc_gap_config_ccc_bits_count(uint8 gatt_server_ccc_bits_count, uint8 gatt_storage_ccc_bits_count) -> void
def ccc_bits_count(gatt_server_ccc_bits_count, gatt_storage_ccc_bits_count) :
codec = Codec(6, 2, MTYPE_INVOKE, "BB", "")
return perform_request(codec, gatt_server_ccc_bits_count, gatt_storage_ccc_bits_count)
# rpc_gap_config_max_attribute_table_count(uint8 gatt_max_attribute_table_count) -> void
def max_attribute_table_count(gatt_max_attribute_table_count) :
codec = Codec(6, 3, MTYPE_INVOKE, "B", "")
return perform_request(codec, gatt_max_attribute_table_count)
# rpc_gap_config_max_mtu_size(uint16 att_max_mtu_size) -> void
def max_mtu_size(att_max_mtu_size) :
codec = Codec(6, 4, MTYPE_INVOKE, "H", "")
return perform_request(codec, att_max_mtu_size)
# rpc_gap_config_bte_pool_size(uint8 bte_pool_size) -> void
def bte_pool_size(bte_pool_size) :
codec = Codec(6, 5, MTYPE_INVOKE, "B", "")
return perform_request(codec, bte_pool_size)
# rpc_gap_config_bt_report_buf_num(uint8 bt_report_buf_num) -> void
def bt_report_buf_num(bt_report_buf_num) :
codec = Codec(6, 6, MTYPE_INVOKE, "B", "")
return perform_request(codec, bt_report_buf_num)
# rpc_gap_config_le_key_storage_flag(uint16 le_key_storage_flag) -> void
def le_key_storage_flag(le_key_storage_flag) :
codec = Codec(6, 7, MTYPE_INVOKE, "H", "")
return perform_request(codec, le_key_storage_flag)
# rpc_gap_config_max_le_paired_device(uint8 max_le_paired_device) -> void
def max_le_paired_device(max_le_paired_device) :
codec = Codec(6, 8, MTYPE_INVOKE, "B", "")
return perform_request(codec, max_le_paired_device)
# rpc_gap_config_max_le_link_num(uint8 le_link_num) -> void
def max_le_link_num(le_link_num) :
codec = Codec(6, 9, MTYPE_INVOKE, "B", "")
return perform_request(codec, le_link_num) | libsrc/wio_terminal_rtl/rpc/ble/gap_config.py | from .. import MTYPE_INVOKE, perform_request
from ...Codec import Codec
# rpc_gap_config_cccd_not_check(RPC_T_GAP_CONFIG_GATT_CCCD_NOT_CHECK cccd_not_check_flag) -> void
def cccd_not_check(cccd_not_check_flag) :
codec = Codec(6, 1, MTYPE_INVOKE, "I", "")
return perform_request(codec, cccd_not_check_flag)
# rpc_gap_config_ccc_bits_count(uint8 gatt_server_ccc_bits_count, uint8 gatt_storage_ccc_bits_count) -> void
def ccc_bits_count(gatt_server_ccc_bits_count, gatt_storage_ccc_bits_count) :
codec = Codec(6, 2, MTYPE_INVOKE, "BB", "")
return perform_request(codec, gatt_server_ccc_bits_count, gatt_storage_ccc_bits_count)
# rpc_gap_config_max_attribute_table_count(uint8 gatt_max_attribute_table_count) -> void
def max_attribute_table_count(gatt_max_attribute_table_count) :
codec = Codec(6, 3, MTYPE_INVOKE, "B", "")
return perform_request(codec, gatt_max_attribute_table_count)
# rpc_gap_config_max_mtu_size(uint16 att_max_mtu_size) -> void
def max_mtu_size(att_max_mtu_size) :
codec = Codec(6, 4, MTYPE_INVOKE, "H", "")
return perform_request(codec, att_max_mtu_size)
# rpc_gap_config_bte_pool_size(uint8 bte_pool_size) -> void
def bte_pool_size(bte_pool_size) :
codec = Codec(6, 5, MTYPE_INVOKE, "B", "")
return perform_request(codec, bte_pool_size)
# rpc_gap_config_bt_report_buf_num(uint8 bt_report_buf_num) -> void
def bt_report_buf_num(bt_report_buf_num) :
codec = Codec(6, 6, MTYPE_INVOKE, "B", "")
return perform_request(codec, bt_report_buf_num)
# rpc_gap_config_le_key_storage_flag(uint16 le_key_storage_flag) -> void
def le_key_storage_flag(le_key_storage_flag) :
codec = Codec(6, 7, MTYPE_INVOKE, "H", "")
return perform_request(codec, le_key_storage_flag)
# rpc_gap_config_max_le_paired_device(uint8 max_le_paired_device) -> void
def max_le_paired_device(max_le_paired_device) :
codec = Codec(6, 8, MTYPE_INVOKE, "B", "")
return perform_request(codec, max_le_paired_device)
# rpc_gap_config_max_le_link_num(uint8 le_link_num) -> void
def max_le_link_num(le_link_num) :
codec = Codec(6, 9, MTYPE_INVOKE, "B", "")
return perform_request(codec, le_link_num) | 0.328853 | 0.141875 |
from django.conf.urls import url, include
from django.contrib import admin
from django.shortcuts import redirect
from django.views.generic.base import RedirectView
from smirk.resources import *
urlpatterns = [
url(r'^', admin.site.urls),
url(r'^createPatient', RedirectView.as_view(url='smirk/patient/add/', permanent=True), name="createPatient"),
url(r'^createDoctor', RedirectView.as_view(url='smirk/doctor/add/', permanent=False)),
url(r'^createNurse', RedirectView.as_view(url='smirk/nurse/add/', permanent=False)),
url(r'^createSysAdmin', RedirectView.as_view(url='smirk/system_administrator/add/', permanent=False)),
url(r'^createMedAdmin', RedirectView.as_view(url='smirk/medical_administrator/add/', permanent=False)),
url(r'^createInsAdmin', RedirectView.as_view(url='smirk/insurance_administrator/add/', permanent=False)),
url(r'^editPerm', RedirectView.as_view(url='auth/group/', permanent=False)),
url(r'^addDoctorExamRecord', RedirectView.as_view(url='smirk/doctor_exam_record/add/', permanent=False)),
url(r'^addTestResultRecord', RedirectView.as_view(url='smirk/test_results_record/add/', permanent=False)),
url(r'^addDiagnosisRecord', RedirectView.as_view(url='smirk/diagnosis_record/add/', permanent=False)),
url(r'^addInsuranceClaimRecord', RedirectView.as_view(url='smirk/insurance_claim_record/add/', permanent=False)),
url(r'^addRawRecord', RedirectView.as_view(url='smirk/raw_record/add/', permanent=False)),
url(r'^createCorrespondenceRecord', RedirectView.as_view(url='smirk/patient_doctor_correspondence_record/add/', permanent=False)),
url(r'^addCorrespondenceNote', RedirectView.as_view(url='smirk/note/add/', permanent=False)),
url(r'^listRecords', RedirectView.as_view(url='smirk/record/', permanent=False)),
url(r'^viewRecord', RedirectView.as_view(url='smirk/record/', permanent=False)),
url(r'^editRecordPerm', RedirectView.as_view(url='auth/group/', permanent=False)),
url(r'^editPatient', RedirectView.as_view(url='smirk/patient/', permanent=False)),
url(r'^editDoctor', RedirectView.as_view(url='smirk/doctor/', permanent=False)),
url(r'^editNurse', RedirectView.as_view(url='smirk/nurse/', permanent=False)),
url(r'^editSysAdmin', RedirectView.as_view(url='smirk/system_administrator/', permanent=False)),
url(r'^editMedAdmin', RedirectView.as_view(url='smirk/medical_administrator/', permanent=False)),
url(r'^editInsAdmin', RedirectView.as_view(url='smirk/insurance_administrator/', permanent=False)),
url(r'^viewPatientProfile', RedirectView.as_view(url='smirk/patient/', permanent=False)),
url(r'^viewRecoveryPhrase', RedirectView.as_view(url='smirk/doctor/', permanent=False)),
url(r'^removeUserProfile', RedirectView.as_view(url='auth/user/', permanent=False)),
url(r'^api/', include(System_Administrator().urls)),
url(r'^api/', include(Doctor().urls)),
url(r'^api/', include(Nurse().urls)),
url(r'^api/', include(Medical_Administrator().urls)),
url(r'^api/', include(Insurance_Administrator().urls)),
url(r'^api/', include(Patient().urls)),
url(r'^api/', include(Record().urls)),
url(r'^api/', include(Doctor_Exam_Record().urls)),
url(r'^api/', include(Diagnosis_Record().urls)),
url(r'^api/', include(Test_Results_Record().urls)),
url(r'^api/', include(Insurance_Claim_Record().urls)),
url(r'^api/', include(Patient_Doctor_Correspondence_Record().urls)),
url(r'^api/', include(Raw_Record().urls)),
url(r'^api/', include(Note().urls)),
url(r'^api/', include(User().urls)),
url(r'^api/', include(Group().urls)),
]
admin.site.site_header= 'Secure Medical Information Repository Kit'
admin.site.index_title= 'SMIRK'
admin.site.site_title= 'Welcome' | unf/urls.py | from django.conf.urls import url, include
from django.contrib import admin
from django.shortcuts import redirect
from django.views.generic.base import RedirectView
from smirk.resources import *
urlpatterns = [
url(r'^', admin.site.urls),
url(r'^createPatient', RedirectView.as_view(url='smirk/patient/add/', permanent=True), name="createPatient"),
url(r'^createDoctor', RedirectView.as_view(url='smirk/doctor/add/', permanent=False)),
url(r'^createNurse', RedirectView.as_view(url='smirk/nurse/add/', permanent=False)),
url(r'^createSysAdmin', RedirectView.as_view(url='smirk/system_administrator/add/', permanent=False)),
url(r'^createMedAdmin', RedirectView.as_view(url='smirk/medical_administrator/add/', permanent=False)),
url(r'^createInsAdmin', RedirectView.as_view(url='smirk/insurance_administrator/add/', permanent=False)),
url(r'^editPerm', RedirectView.as_view(url='auth/group/', permanent=False)),
url(r'^addDoctorExamRecord', RedirectView.as_view(url='smirk/doctor_exam_record/add/', permanent=False)),
url(r'^addTestResultRecord', RedirectView.as_view(url='smirk/test_results_record/add/', permanent=False)),
url(r'^addDiagnosisRecord', RedirectView.as_view(url='smirk/diagnosis_record/add/', permanent=False)),
url(r'^addInsuranceClaimRecord', RedirectView.as_view(url='smirk/insurance_claim_record/add/', permanent=False)),
url(r'^addRawRecord', RedirectView.as_view(url='smirk/raw_record/add/', permanent=False)),
url(r'^createCorrespondenceRecord', RedirectView.as_view(url='smirk/patient_doctor_correspondence_record/add/', permanent=False)),
url(r'^addCorrespondenceNote', RedirectView.as_view(url='smirk/note/add/', permanent=False)),
url(r'^listRecords', RedirectView.as_view(url='smirk/record/', permanent=False)),
url(r'^viewRecord', RedirectView.as_view(url='smirk/record/', permanent=False)),
url(r'^editRecordPerm', RedirectView.as_view(url='auth/group/', permanent=False)),
url(r'^editPatient', RedirectView.as_view(url='smirk/patient/', permanent=False)),
url(r'^editDoctor', RedirectView.as_view(url='smirk/doctor/', permanent=False)),
url(r'^editNurse', RedirectView.as_view(url='smirk/nurse/', permanent=False)),
url(r'^editSysAdmin', RedirectView.as_view(url='smirk/system_administrator/', permanent=False)),
url(r'^editMedAdmin', RedirectView.as_view(url='smirk/medical_administrator/', permanent=False)),
url(r'^editInsAdmin', RedirectView.as_view(url='smirk/insurance_administrator/', permanent=False)),
url(r'^viewPatientProfile', RedirectView.as_view(url='smirk/patient/', permanent=False)),
url(r'^viewRecoveryPhrase', RedirectView.as_view(url='smirk/doctor/', permanent=False)),
url(r'^removeUserProfile', RedirectView.as_view(url='auth/user/', permanent=False)),
url(r'^api/', include(System_Administrator().urls)),
url(r'^api/', include(Doctor().urls)),
url(r'^api/', include(Nurse().urls)),
url(r'^api/', include(Medical_Administrator().urls)),
url(r'^api/', include(Insurance_Administrator().urls)),
url(r'^api/', include(Patient().urls)),
url(r'^api/', include(Record().urls)),
url(r'^api/', include(Doctor_Exam_Record().urls)),
url(r'^api/', include(Diagnosis_Record().urls)),
url(r'^api/', include(Test_Results_Record().urls)),
url(r'^api/', include(Insurance_Claim_Record().urls)),
url(r'^api/', include(Patient_Doctor_Correspondence_Record().urls)),
url(r'^api/', include(Raw_Record().urls)),
url(r'^api/', include(Note().urls)),
url(r'^api/', include(User().urls)),
url(r'^api/', include(Group().urls)),
]
admin.site.site_header= 'Secure Medical Information Repository Kit'
admin.site.index_title= 'SMIRK'
admin.site.site_title= 'Welcome' | 0.252937 | 0.06804 |
from os import times
import cv2
import numpy as np
import time
from ctypes import *
import sys
from numpy.lib.type_check import imag
sys.path.append("C:\Program Files (x86)\MVS\Development\Samples\Python\MvImport")
from MvCameraControl_class import *
class HHV:
def __init__(self,):
self.init_cam()
self.abs_path = 'D:\robot_code\lixiang\AfterConvert_RGB0.jpg'
def get_image_array(self, image_size=None, index=0):
self.save_image2local(index=index)
img = cv2.imread("AfterConvert_RGB0.jpg")
if image_size:
return cv2.resize(img, (image_size, image_size))
return img
def init_cam(self,):
deviceList = MV_CC_DEVICE_INFO_LIST()
tlayerType = MV_GIGE_DEVICE | MV_USB_DEVICE
ret = MvCamera.MV_CC_EnumDevices(tlayerType, deviceList)
nConnectionNum = 0
# ch:创建相机实例 | en:Creat Camera Object
self.cam = MvCamera()
# ch:选择设备并创建句柄 | en:Select device and create handle
stDeviceList = cast(deviceList.pDeviceInfo[int(nConnectionNum)],
POINTER(MV_CC_DEVICE_INFO)).contents
ret = self.cam.MV_CC_CreateHandle(stDeviceList)
# ch:打开设备 | en:Open device
ret = self.cam.MV_CC_OpenDevice(MV_ACCESS_Exclusive, 0)
# ch:设置触发模式为off | en:Set trigger mode as off
ret = self.cam.MV_CC_SetEnumValue("TriggerMode", MV_TRIGGER_MODE_OFF)
# ch:获取数据包大小 | en:Get payload size
stParam = MVCC_INTVALUE()
memset(byref(stParam), 0, sizeof(MVCC_INTVALUE))
ret = self.cam.MV_CC_GetIntValue("PayloadSize", stParam)
self.nPayloadSize = stParam.nCurValue
def save_image2local(self, index=0):
# ch:开始取流 | en:Start grab image
ret = self.cam.MV_CC_StartGrabbing()
stDeviceList = MV_FRAME_OUT_INFO_EX()
memset(byref(stDeviceList), 0, sizeof(stDeviceList))
self.data_buf = (c_ubyte * self.nPayloadSize)()
ret = self.cam.MV_CC_GetOneFrameTimeout(byref(self.data_buf), self.nPayloadSize, stDeviceList, 1000)
if ret == 0:
# print ("get one frame: Width[%d], Height[%d], nFrameNum[%d]" % (stDeviceList.nWidth, stDeviceList.nHeight, stDeviceList.nFrameNum))
nRGBSize = stDeviceList.nWidth * stDeviceList.nHeight * 3
stConvertParam=MV_SAVE_IMAGE_PARAM_EX()
stConvertParam.nWidth = stDeviceList.nWidth
stConvertParam.nHeight = stDeviceList.nHeight
stConvertParam.pData = self.data_buf
stConvertParam.nDataLen = stDeviceList.nFrameLen
stConvertParam.enPixelType = stDeviceList.enPixelType
stConvertParam.nImageLen = stConvertParam.nDataLen
stConvertParam.nJpgQuality = 70
stConvertParam.enImageType = MV_Image_Jpeg
stConvertParam.pImageBuffer = (c_ubyte * nRGBSize)()
stConvertParam.nBufferSize = nRGBSize
# ret = self.cam.MV_CC_ConvertPixelType(stConvertParam)
# print(stConvertParam.nImageLen)
ret = self.cam.MV_CC_SaveImageEx2(stConvertParam)
if ret != 0:
print ("convert pixel fail ! ret[0x%x]" % ret)
del self.data_buf
sys.exit()
file_path = "AfterConvert_RGB"+str(index)+".jpg"
file_open = open(file_path.encode('ascii'), 'wb+')
img_buff = (c_ubyte * stConvertParam.nImageLen)()
cdll.msvcrt.memcpy(byref(img_buff), stConvertParam.pImageBuffer, stConvertParam.nImageLen)
file_open.write(img_buff)
# print ("Save Image succeed!")
def exit_cam(self,):
# ch:停止取流 | en:Stop grab image
ret = self.cam.MV_CC_StopGrabbing()
if ret != 0:
print ("stop grabbing fail! ret[0x%x]" % ret)
del self.data_buf
sys.exit()
# ch:关闭设备 | Close device
ret = self.cam.MV_CC_CloseDevice()
if ret != 0:
print ("close deivce fail! ret[0x%x]" % ret)
del self.data_buf
sys.exit()
# ch:销毁句柄 | Destroy handle
ret = self.cam.MV_CC_DestroyHandle()
if ret != 0:
print ("destroy handle fail! ret[0x%x]" % ret)
del self.data_buf
sys.exit()
del self.data_buf
if __name__ == "__main__":
hhv = HHV() | hk_class.py | from os import times
import cv2
import numpy as np
import time
from ctypes import *
import sys
from numpy.lib.type_check import imag
sys.path.append("C:\Program Files (x86)\MVS\Development\Samples\Python\MvImport")
from MvCameraControl_class import *
class HHV:
def __init__(self,):
self.init_cam()
self.abs_path = 'D:\robot_code\lixiang\AfterConvert_RGB0.jpg'
def get_image_array(self, image_size=None, index=0):
self.save_image2local(index=index)
img = cv2.imread("AfterConvert_RGB0.jpg")
if image_size:
return cv2.resize(img, (image_size, image_size))
return img
def init_cam(self,):
deviceList = MV_CC_DEVICE_INFO_LIST()
tlayerType = MV_GIGE_DEVICE | MV_USB_DEVICE
ret = MvCamera.MV_CC_EnumDevices(tlayerType, deviceList)
nConnectionNum = 0
# ch:创建相机实例 | en:Creat Camera Object
self.cam = MvCamera()
# ch:选择设备并创建句柄 | en:Select device and create handle
stDeviceList = cast(deviceList.pDeviceInfo[int(nConnectionNum)],
POINTER(MV_CC_DEVICE_INFO)).contents
ret = self.cam.MV_CC_CreateHandle(stDeviceList)
# ch:打开设备 | en:Open device
ret = self.cam.MV_CC_OpenDevice(MV_ACCESS_Exclusive, 0)
# ch:设置触发模式为off | en:Set trigger mode as off
ret = self.cam.MV_CC_SetEnumValue("TriggerMode", MV_TRIGGER_MODE_OFF)
# ch:获取数据包大小 | en:Get payload size
stParam = MVCC_INTVALUE()
memset(byref(stParam), 0, sizeof(MVCC_INTVALUE))
ret = self.cam.MV_CC_GetIntValue("PayloadSize", stParam)
self.nPayloadSize = stParam.nCurValue
def save_image2local(self, index=0):
# ch:开始取流 | en:Start grab image
ret = self.cam.MV_CC_StartGrabbing()
stDeviceList = MV_FRAME_OUT_INFO_EX()
memset(byref(stDeviceList), 0, sizeof(stDeviceList))
self.data_buf = (c_ubyte * self.nPayloadSize)()
ret = self.cam.MV_CC_GetOneFrameTimeout(byref(self.data_buf), self.nPayloadSize, stDeviceList, 1000)
if ret == 0:
# print ("get one frame: Width[%d], Height[%d], nFrameNum[%d]" % (stDeviceList.nWidth, stDeviceList.nHeight, stDeviceList.nFrameNum))
nRGBSize = stDeviceList.nWidth * stDeviceList.nHeight * 3
stConvertParam=MV_SAVE_IMAGE_PARAM_EX()
stConvertParam.nWidth = stDeviceList.nWidth
stConvertParam.nHeight = stDeviceList.nHeight
stConvertParam.pData = self.data_buf
stConvertParam.nDataLen = stDeviceList.nFrameLen
stConvertParam.enPixelType = stDeviceList.enPixelType
stConvertParam.nImageLen = stConvertParam.nDataLen
stConvertParam.nJpgQuality = 70
stConvertParam.enImageType = MV_Image_Jpeg
stConvertParam.pImageBuffer = (c_ubyte * nRGBSize)()
stConvertParam.nBufferSize = nRGBSize
# ret = self.cam.MV_CC_ConvertPixelType(stConvertParam)
# print(stConvertParam.nImageLen)
ret = self.cam.MV_CC_SaveImageEx2(stConvertParam)
if ret != 0:
print ("convert pixel fail ! ret[0x%x]" % ret)
del self.data_buf
sys.exit()
file_path = "AfterConvert_RGB"+str(index)+".jpg"
file_open = open(file_path.encode('ascii'), 'wb+')
img_buff = (c_ubyte * stConvertParam.nImageLen)()
cdll.msvcrt.memcpy(byref(img_buff), stConvertParam.pImageBuffer, stConvertParam.nImageLen)
file_open.write(img_buff)
# print ("Save Image succeed!")
def exit_cam(self,):
# ch:停止取流 | en:Stop grab image
ret = self.cam.MV_CC_StopGrabbing()
if ret != 0:
print ("stop grabbing fail! ret[0x%x]" % ret)
del self.data_buf
sys.exit()
# ch:关闭设备 | Close device
ret = self.cam.MV_CC_CloseDevice()
if ret != 0:
print ("close deivce fail! ret[0x%x]" % ret)
del self.data_buf
sys.exit()
# ch:销毁句柄 | Destroy handle
ret = self.cam.MV_CC_DestroyHandle()
if ret != 0:
print ("destroy handle fail! ret[0x%x]" % ret)
del self.data_buf
sys.exit()
del self.data_buf
if __name__ == "__main__":
hhv = HHV() | 0.105769 | 0.09343 |
import itertools
import cv2
import pandas as pd
from . import PERCENT, LOGGER
from .stream_parser import StreamParser
from .util import timeify, compute_minimum_kernel_density, bisect
class Segmenter:
def __init__(self, filename, view, config):
self.filename = filename
self.stream = StreamParser(filename)
self.view = view
self.interval = config.get("polling_interval", 5)
self.frames = self.stream.sample_frames(interval=self.interval)
self.confidence = [
(time, self.calculate_frame_confidence(scene, PERCENT, view.ports))
for (time, scene) in self.frames
]
self.confidence = pd.DataFrame(
self.confidence, columns=["time", "conf"]
)
def calculate_frame_confidence(self, scene, feature, rois):
"""Estimate the maximum correlation of any ROI in _scene_
to the unscaled _feature_.
"""
scaled_feature = cv2.resize(
feature, (0, 0), fx=self.view.scale, fy=self.view.scale
)
scaled_feature = cv2.Laplacian(scaled_feature, cv2.CV_8U)
percent_corrs = []
for roi in rois:
if roi is not None:
scene_roi = scene[
roi.top : (roi.top + roi.height),
roi.left : (roi.left + roi.width),
]
scene_roi = cv2.Laplacian(scene_roi, cv2.CV_8U)
corr_map = cv2.matchTemplate(
scene_roi, scaled_feature, cv2.TM_CCOEFF_NORMED
)
_, max_corr, _, _ = cv2.minMaxLoc(corr_map)
percent_corrs.append(max_corr)
return max(percent_corrs)
def get_threshold(self):
"""Return an approximate threshold value to decide whether a frame
contains Melee.
"""
confs = self.confidence["conf"]
return compute_minimum_kernel_density(confs)
def get_segments(self, threshold):
"""Return the approximate match start and end times for
the given video.
"""
# Perform median smoothing.
self.confidence["median"] = self.confidence["conf"].rolling(5).median()
self.confidence["median"] = self.confidence["median"].fillna(
method="bfill"
)
self.confidence["median"] = self.confidence["median"].fillna(
method="ffill"
)
# Now classify as Melee/no Melee based on whether we are greater/less
# than the threshold.
groups = itertools.groupby(
self.confidence.iterrows(),
lambda row: row[1]["median"] > threshold,
)
groups = [(k, list(g)) for k, g in groups]
segments = [
(self.interval * g[0][0], self.interval * g[-1][0])
for k, g in groups
if k
]
for idx, segment in enumerate(segments):
start, end = segment
LOGGER.warning(
"Estimated game %d is %s-%s",
idx + 1,
timeify(start),
timeify(end),
)
return segments
def refine_segments(self, segments):
for idx, segment in enumerate(segments):
start, end = segment
start = self.find_segment_boundary(start, 0.5)
end = self.find_segment_boundary(end, 0.5)
segments[idx] = (start, end)
LOGGER.warning(
"Estimated game %d is %s-%s",
idx + 1,
timeify(start),
timeify(end),
)
return segments
def find_segment_boundary(self, time, tolerance):
"""Find the time index of a match segment boundary (start or end)
near _time_, accurate to within _tolerance_ seconds.
Uses the bisection method to find an approximate solution for
f(t) = conf_at(t) - self.threshold = 0.
"""
threshold = self.get_threshold()
def conf_at(time):
scene = self.stream.get_frame(time)
if scene is not None:
conf = self.calculate_frame_confidence(
scene, PERCENT, self.view.ports
)
return conf - threshold
return 0 - threshold
window = self.interval
for _ in range(20):
start = max(0, time - window)
# Have to read from strictly before the end of the video.
end = min(self.stream.length - tolerance, time + window)
try:
return bisect(conf_at, start, end, tolerance)
except ValueError: # bad interval --- no sign change
window += tolerance
# Make sure we didn't hit the boundaries of the video.
if start == 0:
return start
if end == self.stream.length - tolerance:
return end
raise ValueError("Could not find a match boundary.") | downsmash/segmenter.py |
import itertools
import cv2
import pandas as pd
from . import PERCENT, LOGGER
from .stream_parser import StreamParser
from .util import timeify, compute_minimum_kernel_density, bisect
class Segmenter:
def __init__(self, filename, view, config):
self.filename = filename
self.stream = StreamParser(filename)
self.view = view
self.interval = config.get("polling_interval", 5)
self.frames = self.stream.sample_frames(interval=self.interval)
self.confidence = [
(time, self.calculate_frame_confidence(scene, PERCENT, view.ports))
for (time, scene) in self.frames
]
self.confidence = pd.DataFrame(
self.confidence, columns=["time", "conf"]
)
def calculate_frame_confidence(self, scene, feature, rois):
"""Estimate the maximum correlation of any ROI in _scene_
to the unscaled _feature_.
"""
scaled_feature = cv2.resize(
feature, (0, 0), fx=self.view.scale, fy=self.view.scale
)
scaled_feature = cv2.Laplacian(scaled_feature, cv2.CV_8U)
percent_corrs = []
for roi in rois:
if roi is not None:
scene_roi = scene[
roi.top : (roi.top + roi.height),
roi.left : (roi.left + roi.width),
]
scene_roi = cv2.Laplacian(scene_roi, cv2.CV_8U)
corr_map = cv2.matchTemplate(
scene_roi, scaled_feature, cv2.TM_CCOEFF_NORMED
)
_, max_corr, _, _ = cv2.minMaxLoc(corr_map)
percent_corrs.append(max_corr)
return max(percent_corrs)
def get_threshold(self):
"""Return an approximate threshold value to decide whether a frame
contains Melee.
"""
confs = self.confidence["conf"]
return compute_minimum_kernel_density(confs)
def get_segments(self, threshold):
"""Return the approximate match start and end times for
the given video.
"""
# Perform median smoothing.
self.confidence["median"] = self.confidence["conf"].rolling(5).median()
self.confidence["median"] = self.confidence["median"].fillna(
method="bfill"
)
self.confidence["median"] = self.confidence["median"].fillna(
method="ffill"
)
# Now classify as Melee/no Melee based on whether we are greater/less
# than the threshold.
groups = itertools.groupby(
self.confidence.iterrows(),
lambda row: row[1]["median"] > threshold,
)
groups = [(k, list(g)) for k, g in groups]
segments = [
(self.interval * g[0][0], self.interval * g[-1][0])
for k, g in groups
if k
]
for idx, segment in enumerate(segments):
start, end = segment
LOGGER.warning(
"Estimated game %d is %s-%s",
idx + 1,
timeify(start),
timeify(end),
)
return segments
def refine_segments(self, segments):
for idx, segment in enumerate(segments):
start, end = segment
start = self.find_segment_boundary(start, 0.5)
end = self.find_segment_boundary(end, 0.5)
segments[idx] = (start, end)
LOGGER.warning(
"Estimated game %d is %s-%s",
idx + 1,
timeify(start),
timeify(end),
)
return segments
def find_segment_boundary(self, time, tolerance):
"""Find the time index of a match segment boundary (start or end)
near _time_, accurate to within _tolerance_ seconds.
Uses the bisection method to find an approximate solution for
f(t) = conf_at(t) - self.threshold = 0.
"""
threshold = self.get_threshold()
def conf_at(time):
scene = self.stream.get_frame(time)
if scene is not None:
conf = self.calculate_frame_confidence(
scene, PERCENT, self.view.ports
)
return conf - threshold
return 0 - threshold
window = self.interval
for _ in range(20):
start = max(0, time - window)
# Have to read from strictly before the end of the video.
end = min(self.stream.length - tolerance, time + window)
try:
return bisect(conf_at, start, end, tolerance)
except ValueError: # bad interval --- no sign change
window += tolerance
# Make sure we didn't hit the boundaries of the video.
if start == 0:
return start
if end == self.stream.length - tolerance:
return end
raise ValueError("Could not find a match boundary.") | 0.710829 | 0.290975 |
from django.contrib.auth.decorators import login_required
from django.core.exceptions import PermissionDenied
from django.db.models import Count
from django.shortcuts import redirect, get_object_or_404, resolve_url
from django.contrib import messages
from django.utils.decorators import method_decorator
from django.views.generic import ListView, CreateView
from django.utils.translation import ugettext as _
from django.conf import settings
from datetime import datetime
from django.views.generic.edit import FormMixin, DeleteView, UpdateView
from guardian.decorators import permission_required_or_403 as permission_required
from chatbot.models import MessageQueue
from abonapp.models import Abon
from djing import httpresponse_to_referrer
from djing.lib import safe_int, MultipleException, RuTimedelta
from djing.lib.decorators import only_admins, json_view
from .handle import TaskException
from .models import Task, ExtraComment
from .forms import TaskFrm, ExtraCommentForm
login_decs = login_required, only_admins
@method_decorator(login_decs, name='dispatch')
@method_decorator(permission_required('taskapp.view_task'), name='dispatch')
class NewTasksView(ListView):
"""
Show new tasks
"""
http_method_names = ('get',)
paginate_by = getattr(settings, 'PAGINATION_ITEMS_PER_PAGE', 10)
template_name = 'taskapp/tasklist.html'
context_object_name = 'tasks'
def get_queryset(self):
return Task.objects.filter(recipients=self.request.user, state='S') \
.annotate(comment_count=Count('extracomment')) \
.select_related('abon', 'abon__street', 'abon__group', 'author')
@method_decorator(login_decs, name='dispatch')
@method_decorator(permission_required('taskapp.view_task'), name='dispatch')
class FailedTasksView(NewTasksView):
"""
Show crashed tasks
"""
template_name = 'taskapp/tasklist_failed.html'
context_object_name = 'tasks'
def get_queryset(self):
return Task.objects.filter(recipients=self.request.user, state='C') \
.select_related('abon', 'abon__street', 'abon__group', 'author')
@method_decorator(login_decs, name='dispatch')
@method_decorator(permission_required('taskapp.view_task'), name='dispatch')
class FinishedTaskListView(NewTasksView):
template_name = 'taskapp/tasklist_finish.html'
def get_queryset(self):
return Task.objects.filter(recipients=self.request.user, state='F') \
.select_related('abon', 'abon__street', 'abon__group', 'author')
@method_decorator(login_decs, name='dispatch')
@method_decorator(permission_required('taskapp.view_task'), name='dispatch')
class OwnTaskListView(NewTasksView):
template_name = 'taskapp/tasklist_own.html'
def get_queryset(self):
# Attached and not finished tasks
return Task.objects.filter(author=self.request.user) \
.exclude(state='F') \
.select_related('abon', 'abon__street', 'abon__group')
@method_decorator(login_decs, name='dispatch')
@method_decorator(permission_required('taskapp.view_task'), name='dispatch')
class MyTaskListView(NewTasksView):
template_name = 'taskapp/tasklist.html'
def get_queryset(self):
# Tasks in which I participated
return Task.objects.filter(recipients=self.request.user) \
.select_related('abon', 'abon__street', 'abon__group', 'author')
@method_decorator(login_decs, name='dispatch')
@method_decorator(permission_required('taskapp.can_viewall'), name='dispatch')
class AllTasksListView(ListView):
http_method_names = ('get',)
paginate_by = getattr(settings, 'PAGINATION_ITEMS_PER_PAGE', 10)
template_name = 'taskapp/tasklist_all.html'
context_object_name = 'tasks'
def get_queryset(self):
return Task.objects.annotate(comment_count=Count('extracomment')) \
.select_related('abon', 'abon__street', 'abon__group', 'author')
@method_decorator(login_decs, name='dispatch')
@method_decorator(permission_required('taskapp.view_task'), name='dispatch')
class EmptyTasksListView(NewTasksView):
template_name = 'taskapp/tasklist_empty.html'
def get_queryset(self):
return Task.objects.annotate(reccount=Count('recipients')).filter(reccount__lt=1)
@login_required
@only_admins
@permission_required('taskapp.delete_task')
def task_delete(request, task_id):
task = get_object_or_404(Task, id=task_id)
# prevent to delete task that assigned to me
if request.user.is_superuser or request.user not in task.recipients.all():
task.delete()
else:
messages.warning(request, _('You cannot delete task that assigned to you'))
return redirect('taskapp:home')
@method_decorator(login_decs, name='dispatch')
class TaskUpdateView(UpdateView):
http_method_names = ('get', 'post')
template_name = 'taskapp/add_edit_task.html'
form_class = TaskFrm
context_object_name = 'task'
def get_object(self, queryset=None):
task_id = safe_int(self.kwargs.get('task_id'))
if task_id == 0:
uname = self.request.GET.get('uname')
if uname:
self.selected_abon = Abon.objects.get(username=uname)
return
else:
task = get_object_or_404(Task, pk=task_id)
self.selected_abon = task.abon
return task
def dispatch(self, request, *args, **kwargs):
task_id = safe_int(self.kwargs.get('task_id', 0))
if task_id == 0:
if not request.user.has_perm('taskapp.add_task'):
raise PermissionDenied
else:
if not request.user.has_perm('taskapp.change_task'):
raise PermissionDenied
try:
return super(TaskUpdateView, self).dispatch(request, *args, **kwargs)
except TaskException as e:
messages.error(request, e)
return httpresponse_to_referrer(request)
def get_form_kwargs(self):
kwargs = super(TaskUpdateView, self).get_form_kwargs()
if hasattr(self, 'selected_abon'):
kwargs.update({'initial_abon': self.selected_abon})
return kwargs
def form_valid(self, form):
try:
self.object = form.save()
if self.object.author is None:
self.object.author = self.request.user
self.object.save(update_fields=('author',))
task_id = safe_int(self.kwargs.get('task_id', 0))
if task_id == 0:
log_text = _('Task has successfully created')
else:
log_text = _('Task has changed successfully')
messages.add_message(self.request, messages.SUCCESS, log_text)
self.object.send_notification()
except MultipleException as e:
for err in e.err_list:
messages.add_message(self.request, messages.WARNING, err)
except TaskException as e:
messages.add_message(self.request, messages.ERROR, e)
return FormMixin.form_valid(self, form)
def get_context_data(self, **kwargs):
if hasattr(self, 'selected_abon'):
selected_abon = self.selected_abon
else:
selected_abon = None
now_date = datetime.now().date()
task = self.object
if task:
if task.out_date > now_date:
time_diff = "%s: %s" % (_('time left'), RuTimedelta(task.out_date - now_date))
else:
time_diff = _("Expired timeout -%(time_left)s") % {'time_left': RuTimedelta(now_date - task.out_date)}
else:
time_diff = None
context = {
'selected_abon': selected_abon,
'time_diff': time_diff,
'comments': ExtraComment.objects.filter(task=task),
'comment_form': ExtraCommentForm()
}
context.update(kwargs)
return super(TaskUpdateView, self).get_context_data(**context)
def get_success_url(self):
task_id = safe_int(self.kwargs.get('task_id'))
if task_id == 0:
return resolve_url('taskapp:own_tasks')
else:
return resolve_url('taskapp:edit', task_id)
def form_invalid(self, form):
messages.add_message(self.request, messages.ERROR, _('fix form errors'))
return super(TaskUpdateView, self).form_invalid(form)
@login_required
@only_admins
def task_finish(request, task_id):
try:
task = get_object_or_404(Task, id=task_id)
task.finish(request.user)
task.send_notification()
except MultipleException as errs:
for err in errs.err_list:
messages.add_message(request, messages.constants.ERROR, err)
except TaskException as e:
messages.error(request, e)
return redirect('taskapp:home')
@login_required
@only_admins
def task_failed(request, task_id):
try:
task = get_object_or_404(Task, id=task_id)
task.do_fail(request.user)
task.send_notification()
except TaskException as e:
messages.error(request, e)
return redirect('taskapp:home')
@login_required
@only_admins
@permission_required('taskapp.can_remind')
def remind(request, task_id):
try:
task = get_object_or_404(Task, id=task_id)
task.save(update_fields=('state',))
task.send_notification()
messages.success(request, _('Task has been reminded'))
except MultipleException as errs:
for err in errs.err_list:
messages.add_message(request, messages.constants.ERROR, err)
except TaskException as e:
messages.error(request, e)
return redirect('taskapp:home')
@json_view
def check_news(request):
if request.user.is_authenticated and request.user.is_admin:
msg = MessageQueue.objects.pop(user=request.user, tag='taskap')
if msg is not None:
r = {
'auth': True,
'exist': True,
'content': msg,
'title': _('Task')
}
else:
r = {'auth': True, 'exist': False}
else:
r = {'auth': False}
return r
@method_decorator(login_decs, name='dispatch')
@method_decorator(permission_required('taskapp.add_extracomment'), name='dispatch')
class NewCommentView(CreateView):
form_class = ExtraCommentForm
model = ExtraComment
http_method_names = ('get', 'post')
def form_valid(self, form):
self.task = get_object_or_404(Task, pk=self.kwargs.get('task_id'))
self.object = form.make_save(
author=self.request.user,
task=self.task
)
return FormMixin.form_valid(self, form)
@method_decorator(login_decs, name='dispatch')
@method_decorator(permission_required('taskapp.delete_extracomment'), name='dispatch')
class DeleteCommentView(DeleteView):
model = ExtraComment
pk_url_kwarg = 'comment_id'
http_method_names = ('get', 'post')
template_name = 'taskapp/comments/extracomment_confirm_delete.html'
def get_context_data(self, **kwargs):
context = {
'task_id': self.kwargs.get('task_id')
}
context.update(kwargs)
return super(DeleteCommentView, self).get_context_data(**context)
def get_success_url(self):
task_id = self.kwargs.get('task_id')
return resolve_url('taskapp:edit', task_id) | taskapp/views.py | from django.contrib.auth.decorators import login_required
from django.core.exceptions import PermissionDenied
from django.db.models import Count
from django.shortcuts import redirect, get_object_or_404, resolve_url
from django.contrib import messages
from django.utils.decorators import method_decorator
from django.views.generic import ListView, CreateView
from django.utils.translation import ugettext as _
from django.conf import settings
from datetime import datetime
from django.views.generic.edit import FormMixin, DeleteView, UpdateView
from guardian.decorators import permission_required_or_403 as permission_required
from chatbot.models import MessageQueue
from abonapp.models import Abon
from djing import httpresponse_to_referrer
from djing.lib import safe_int, MultipleException, RuTimedelta
from djing.lib.decorators import only_admins, json_view
from .handle import TaskException
from .models import Task, ExtraComment
from .forms import TaskFrm, ExtraCommentForm
login_decs = login_required, only_admins
@method_decorator(login_decs, name='dispatch')
@method_decorator(permission_required('taskapp.view_task'), name='dispatch')
class NewTasksView(ListView):
"""
Show new tasks
"""
http_method_names = ('get',)
paginate_by = getattr(settings, 'PAGINATION_ITEMS_PER_PAGE', 10)
template_name = 'taskapp/tasklist.html'
context_object_name = 'tasks'
def get_queryset(self):
return Task.objects.filter(recipients=self.request.user, state='S') \
.annotate(comment_count=Count('extracomment')) \
.select_related('abon', 'abon__street', 'abon__group', 'author')
@method_decorator(login_decs, name='dispatch')
@method_decorator(permission_required('taskapp.view_task'), name='dispatch')
class FailedTasksView(NewTasksView):
"""
Show crashed tasks
"""
template_name = 'taskapp/tasklist_failed.html'
context_object_name = 'tasks'
def get_queryset(self):
return Task.objects.filter(recipients=self.request.user, state='C') \
.select_related('abon', 'abon__street', 'abon__group', 'author')
@method_decorator(login_decs, name='dispatch')
@method_decorator(permission_required('taskapp.view_task'), name='dispatch')
class FinishedTaskListView(NewTasksView):
template_name = 'taskapp/tasklist_finish.html'
def get_queryset(self):
return Task.objects.filter(recipients=self.request.user, state='F') \
.select_related('abon', 'abon__street', 'abon__group', 'author')
@method_decorator(login_decs, name='dispatch')
@method_decorator(permission_required('taskapp.view_task'), name='dispatch')
class OwnTaskListView(NewTasksView):
template_name = 'taskapp/tasklist_own.html'
def get_queryset(self):
# Attached and not finished tasks
return Task.objects.filter(author=self.request.user) \
.exclude(state='F') \
.select_related('abon', 'abon__street', 'abon__group')
@method_decorator(login_decs, name='dispatch')
@method_decorator(permission_required('taskapp.view_task'), name='dispatch')
class MyTaskListView(NewTasksView):
template_name = 'taskapp/tasklist.html'
def get_queryset(self):
# Tasks in which I participated
return Task.objects.filter(recipients=self.request.user) \
.select_related('abon', 'abon__street', 'abon__group', 'author')
@method_decorator(login_decs, name='dispatch')
@method_decorator(permission_required('taskapp.can_viewall'), name='dispatch')
class AllTasksListView(ListView):
http_method_names = ('get',)
paginate_by = getattr(settings, 'PAGINATION_ITEMS_PER_PAGE', 10)
template_name = 'taskapp/tasklist_all.html'
context_object_name = 'tasks'
def get_queryset(self):
return Task.objects.annotate(comment_count=Count('extracomment')) \
.select_related('abon', 'abon__street', 'abon__group', 'author')
@method_decorator(login_decs, name='dispatch')
@method_decorator(permission_required('taskapp.view_task'), name='dispatch')
class EmptyTasksListView(NewTasksView):
template_name = 'taskapp/tasklist_empty.html'
def get_queryset(self):
return Task.objects.annotate(reccount=Count('recipients')).filter(reccount__lt=1)
@login_required
@only_admins
@permission_required('taskapp.delete_task')
def task_delete(request, task_id):
task = get_object_or_404(Task, id=task_id)
# prevent to delete task that assigned to me
if request.user.is_superuser or request.user not in task.recipients.all():
task.delete()
else:
messages.warning(request, _('You cannot delete task that assigned to you'))
return redirect('taskapp:home')
@method_decorator(login_decs, name='dispatch')
class TaskUpdateView(UpdateView):
http_method_names = ('get', 'post')
template_name = 'taskapp/add_edit_task.html'
form_class = TaskFrm
context_object_name = 'task'
def get_object(self, queryset=None):
task_id = safe_int(self.kwargs.get('task_id'))
if task_id == 0:
uname = self.request.GET.get('uname')
if uname:
self.selected_abon = Abon.objects.get(username=uname)
return
else:
task = get_object_or_404(Task, pk=task_id)
self.selected_abon = task.abon
return task
def dispatch(self, request, *args, **kwargs):
task_id = safe_int(self.kwargs.get('task_id', 0))
if task_id == 0:
if not request.user.has_perm('taskapp.add_task'):
raise PermissionDenied
else:
if not request.user.has_perm('taskapp.change_task'):
raise PermissionDenied
try:
return super(TaskUpdateView, self).dispatch(request, *args, **kwargs)
except TaskException as e:
messages.error(request, e)
return httpresponse_to_referrer(request)
def get_form_kwargs(self):
kwargs = super(TaskUpdateView, self).get_form_kwargs()
if hasattr(self, 'selected_abon'):
kwargs.update({'initial_abon': self.selected_abon})
return kwargs
def form_valid(self, form):
try:
self.object = form.save()
if self.object.author is None:
self.object.author = self.request.user
self.object.save(update_fields=('author',))
task_id = safe_int(self.kwargs.get('task_id', 0))
if task_id == 0:
log_text = _('Task has successfully created')
else:
log_text = _('Task has changed successfully')
messages.add_message(self.request, messages.SUCCESS, log_text)
self.object.send_notification()
except MultipleException as e:
for err in e.err_list:
messages.add_message(self.request, messages.WARNING, err)
except TaskException as e:
messages.add_message(self.request, messages.ERROR, e)
return FormMixin.form_valid(self, form)
def get_context_data(self, **kwargs):
if hasattr(self, 'selected_abon'):
selected_abon = self.selected_abon
else:
selected_abon = None
now_date = datetime.now().date()
task = self.object
if task:
if task.out_date > now_date:
time_diff = "%s: %s" % (_('time left'), RuTimedelta(task.out_date - now_date))
else:
time_diff = _("Expired timeout -%(time_left)s") % {'time_left': RuTimedelta(now_date - task.out_date)}
else:
time_diff = None
context = {
'selected_abon': selected_abon,
'time_diff': time_diff,
'comments': ExtraComment.objects.filter(task=task),
'comment_form': ExtraCommentForm()
}
context.update(kwargs)
return super(TaskUpdateView, self).get_context_data(**context)
def get_success_url(self):
task_id = safe_int(self.kwargs.get('task_id'))
if task_id == 0:
return resolve_url('taskapp:own_tasks')
else:
return resolve_url('taskapp:edit', task_id)
def form_invalid(self, form):
messages.add_message(self.request, messages.ERROR, _('fix form errors'))
return super(TaskUpdateView, self).form_invalid(form)
@login_required
@only_admins
def task_finish(request, task_id):
try:
task = get_object_or_404(Task, id=task_id)
task.finish(request.user)
task.send_notification()
except MultipleException as errs:
for err in errs.err_list:
messages.add_message(request, messages.constants.ERROR, err)
except TaskException as e:
messages.error(request, e)
return redirect('taskapp:home')
@login_required
@only_admins
def task_failed(request, task_id):
try:
task = get_object_or_404(Task, id=task_id)
task.do_fail(request.user)
task.send_notification()
except TaskException as e:
messages.error(request, e)
return redirect('taskapp:home')
@login_required
@only_admins
@permission_required('taskapp.can_remind')
def remind(request, task_id):
try:
task = get_object_or_404(Task, id=task_id)
task.save(update_fields=('state',))
task.send_notification()
messages.success(request, _('Task has been reminded'))
except MultipleException as errs:
for err in errs.err_list:
messages.add_message(request, messages.constants.ERROR, err)
except TaskException as e:
messages.error(request, e)
return redirect('taskapp:home')
@json_view
def check_news(request):
if request.user.is_authenticated and request.user.is_admin:
msg = MessageQueue.objects.pop(user=request.user, tag='taskap')
if msg is not None:
r = {
'auth': True,
'exist': True,
'content': msg,
'title': _('Task')
}
else:
r = {'auth': True, 'exist': False}
else:
r = {'auth': False}
return r
@method_decorator(login_decs, name='dispatch')
@method_decorator(permission_required('taskapp.add_extracomment'), name='dispatch')
class NewCommentView(CreateView):
form_class = ExtraCommentForm
model = ExtraComment
http_method_names = ('get', 'post')
def form_valid(self, form):
self.task = get_object_or_404(Task, pk=self.kwargs.get('task_id'))
self.object = form.make_save(
author=self.request.user,
task=self.task
)
return FormMixin.form_valid(self, form)
@method_decorator(login_decs, name='dispatch')
@method_decorator(permission_required('taskapp.delete_extracomment'), name='dispatch')
class DeleteCommentView(DeleteView):
model = ExtraComment
pk_url_kwarg = 'comment_id'
http_method_names = ('get', 'post')
template_name = 'taskapp/comments/extracomment_confirm_delete.html'
def get_context_data(self, **kwargs):
context = {
'task_id': self.kwargs.get('task_id')
}
context.update(kwargs)
return super(DeleteCommentView, self).get_context_data(**context)
def get_success_url(self):
task_id = self.kwargs.get('task_id')
return resolve_url('taskapp:edit', task_id) | 0.449151 | 0.054074 |
"""Tests for glazier.lib.winpe."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import absltest
from glazier.lib import constants
from glazier.lib import identifier
import mock
from pyfakefs import fake_filesystem
TEST_UUID = identifier.uuid.UUID('12345678123456781234567812345678')
TEST_SERIAL = '1A19SEL90000R90DZN7A'
TEST_ID = TEST_SERIAL + '-' + str(TEST_UUID)[:7]
class IdentifierTest(absltest.TestCase):
def setUp(self):
super(IdentifierTest, self).setUp()
mock_wmi = mock.patch.object(
identifier.hw_info.wmi_query, 'WMIQuery', autospec=True)
self.addCleanup(mock_wmi.stop)
mock_wmi.start()
self.fs = fake_filesystem.FakeFilesystem()
identifier.open = fake_filesystem.FakeFileOpen(self.fs)
identifier.os = fake_filesystem.FakeOsModule(self.fs)
@mock.patch.object(identifier.hw_info.HWInfo, 'BiosSerial', autospec=True)
@mock.patch.object(identifier.uuid, 'uuid4', autospec=True)
def test_generate_id(self, mock_uuid, mock_serial):
mock_uuid.return_value = str(TEST_UUID)[:7]
mock_serial.return_value = TEST_SERIAL
self.assertEqual(identifier._generate_id(), TEST_ID)
@mock.patch.object(identifier.registry, 'set_value', autospec=True)
@mock.patch.object(identifier, '_generate_id', autospec=True)
def test_set_id(self, genid, sv):
genid.return_value = TEST_ID
identifier._set_id()
sv.assert_called_with('image_id', TEST_ID, path=constants.REG_ROOT)
self.assertEqual(identifier._set_id(), TEST_ID)
@mock.patch.object(identifier.registry, 'set_value', autospec=True)
def test_set_reg_error(self, sv):
sv.side_effect = identifier.registry.Error
self.assertRaises(identifier.Error, identifier._set_id)
@mock.patch.object(identifier.registry, 'set_value', autospec=True)
def test_check_file(self, sv):
self.fs.create_file(
'/%s/build_info.yaml' % identifier.constants.SYS_CACHE,
contents='{BUILD: {opt 1: true, TIMER_opt 2: some value, image_id: 12345}}\n'
)
identifier._check_file()
sv.assert_called_with('image_id', 12345, path=constants.REG_ROOT)
self.assertEqual(identifier._check_file(), 12345)
def test_check_file_no_id(self):
self.fs.create_file(
'/%s/build_info.yaml' % identifier.constants.SYS_CACHE,
contents='{BUILD: {opt 1: true, TIMER_opt 2: some value, image_num: 12345}}\n'
)
self.assertRaises(identifier.Error, identifier._check_file)
@mock.patch.object(identifier.registry, 'set_value', autospec=True)
def test_check_file_reg_error(self, sv):
self.fs.create_file(
'/%s/build_info.yaml' % identifier.constants.SYS_CACHE,
contents='{BUILD: {opt 1: true, TIMER_opt 2: some value, image_id: 12345}}\n'
)
sv.side_effect = identifier.registry.Error
self.assertRaises(identifier.Error, identifier._check_file)
def test_check_file_no_file(self):
self.assertRaises(identifier.Error, identifier._check_file)
@mock.patch.object(identifier.registry, 'get_value', autospec=True)
def test_check_id_get(self, gv):
gv.return_value = TEST_ID
self.assertEqual(identifier.check_id(), TEST_ID)
@mock.patch.object(identifier.registry, 'get_value', autospec=True)
@mock.patch.object(identifier.winpe, 'check_winpe', autospec=True)
def test_check_id_get_error(self, wpe, gv):
wpe.return_value = False
gv.side_effect = identifier.registry.Error
self.assertRaises(identifier.Error, identifier.check_id)
@mock.patch.object(identifier, '_set_id', autospec=True)
@mock.patch.object(identifier.registry, 'get_value', autospec=True)
@mock.patch.object(identifier.winpe, 'check_winpe', autospec=True)
def test_check_id_set(self, wpe, gv, setid):
gv.return_value = None
wpe.return_value = True
identifier.check_id()
self.assertTrue(setid.called)
@mock.patch.object(identifier, '_check_file', autospec=True)
@mock.patch.object(identifier.registry, 'get_value', autospec=True)
@mock.patch.object(identifier.winpe, 'check_winpe', autospec=True)
def test_check_id_file(self, wpe, gv, checkfile):
gv.return_value = None
wpe.return_value = False
checkfile.return_value = TEST_ID
self.assertEqual(identifier.check_id(), TEST_ID)
if __name__ == '__main__':
absltest.main() | glazier/lib/identifier_test.py | """Tests for glazier.lib.winpe."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import absltest
from glazier.lib import constants
from glazier.lib import identifier
import mock
from pyfakefs import fake_filesystem
TEST_UUID = identifier.uuid.UUID('12345678123456781234567812345678')
TEST_SERIAL = '1A19SEL90000R90DZN7A'
TEST_ID = TEST_SERIAL + '-' + str(TEST_UUID)[:7]
class IdentifierTest(absltest.TestCase):
def setUp(self):
super(IdentifierTest, self).setUp()
mock_wmi = mock.patch.object(
identifier.hw_info.wmi_query, 'WMIQuery', autospec=True)
self.addCleanup(mock_wmi.stop)
mock_wmi.start()
self.fs = fake_filesystem.FakeFilesystem()
identifier.open = fake_filesystem.FakeFileOpen(self.fs)
identifier.os = fake_filesystem.FakeOsModule(self.fs)
@mock.patch.object(identifier.hw_info.HWInfo, 'BiosSerial', autospec=True)
@mock.patch.object(identifier.uuid, 'uuid4', autospec=True)
def test_generate_id(self, mock_uuid, mock_serial):
mock_uuid.return_value = str(TEST_UUID)[:7]
mock_serial.return_value = TEST_SERIAL
self.assertEqual(identifier._generate_id(), TEST_ID)
@mock.patch.object(identifier.registry, 'set_value', autospec=True)
@mock.patch.object(identifier, '_generate_id', autospec=True)
def test_set_id(self, genid, sv):
genid.return_value = TEST_ID
identifier._set_id()
sv.assert_called_with('image_id', TEST_ID, path=constants.REG_ROOT)
self.assertEqual(identifier._set_id(), TEST_ID)
@mock.patch.object(identifier.registry, 'set_value', autospec=True)
def test_set_reg_error(self, sv):
sv.side_effect = identifier.registry.Error
self.assertRaises(identifier.Error, identifier._set_id)
@mock.patch.object(identifier.registry, 'set_value', autospec=True)
def test_check_file(self, sv):
self.fs.create_file(
'/%s/build_info.yaml' % identifier.constants.SYS_CACHE,
contents='{BUILD: {opt 1: true, TIMER_opt 2: some value, image_id: 12345}}\n'
)
identifier._check_file()
sv.assert_called_with('image_id', 12345, path=constants.REG_ROOT)
self.assertEqual(identifier._check_file(), 12345)
def test_check_file_no_id(self):
self.fs.create_file(
'/%s/build_info.yaml' % identifier.constants.SYS_CACHE,
contents='{BUILD: {opt 1: true, TIMER_opt 2: some value, image_num: 12345}}\n'
)
self.assertRaises(identifier.Error, identifier._check_file)
@mock.patch.object(identifier.registry, 'set_value', autospec=True)
def test_check_file_reg_error(self, sv):
self.fs.create_file(
'/%s/build_info.yaml' % identifier.constants.SYS_CACHE,
contents='{BUILD: {opt 1: true, TIMER_opt 2: some value, image_id: 12345}}\n'
)
sv.side_effect = identifier.registry.Error
self.assertRaises(identifier.Error, identifier._check_file)
def test_check_file_no_file(self):
self.assertRaises(identifier.Error, identifier._check_file)
@mock.patch.object(identifier.registry, 'get_value', autospec=True)
def test_check_id_get(self, gv):
gv.return_value = TEST_ID
self.assertEqual(identifier.check_id(), TEST_ID)
@mock.patch.object(identifier.registry, 'get_value', autospec=True)
@mock.patch.object(identifier.winpe, 'check_winpe', autospec=True)
def test_check_id_get_error(self, wpe, gv):
wpe.return_value = False
gv.side_effect = identifier.registry.Error
self.assertRaises(identifier.Error, identifier.check_id)
@mock.patch.object(identifier, '_set_id', autospec=True)
@mock.patch.object(identifier.registry, 'get_value', autospec=True)
@mock.patch.object(identifier.winpe, 'check_winpe', autospec=True)
def test_check_id_set(self, wpe, gv, setid):
gv.return_value = None
wpe.return_value = True
identifier.check_id()
self.assertTrue(setid.called)
@mock.patch.object(identifier, '_check_file', autospec=True)
@mock.patch.object(identifier.registry, 'get_value', autospec=True)
@mock.patch.object(identifier.winpe, 'check_winpe', autospec=True)
def test_check_id_file(self, wpe, gv, checkfile):
gv.return_value = None
wpe.return_value = False
checkfile.return_value = TEST_ID
self.assertEqual(identifier.check_id(), TEST_ID)
if __name__ == '__main__':
absltest.main() | 0.80525 | 0.294862 |
try:
from TACT import logger
except ImportError:
pass
import pandas as pd
import sys
import matplotlib.pyplot as plt
plt.ioff() # setting to non-interactive
import numpy as np
import sys
from sklearn.linear_model import LinearRegression
from sklearn.metrics import r2_score
from sklearn.metrics import mean_squared_error
class Adjustments:
"""
document parameters
"""
def __init__(self, raw_data="", adjustments_list="", baseResultsLists=""):
self.raw_data = raw_data
self.adjusted_data = pd.DataFrame()
self.results_stats = (
[]
) # make this a dictionary of results with adjustment_list items as keys
def get_regression(self, x, y):
"""
Compute linear regression of data -> need to deprecate this function for get_modelRegression..
"""
df = pd.DataFrame()
df["x"] = x
df["y"] = y
df = df.dropna()
feature_name = "x"
target_name = "y"
data, target = df[[feature_name]], df[target_name]
if len(df) > 1:
x = df["x"].astype(float)
y = df["y"].astype(float)
lm = LinearRegression()
lm.fit(data, target)
predict = lm.predict(data)
result = [lm.coef_[0], lm.intercept_] # slope and intercept?
result.append(lm.score(data, target)) # r score?
result.append(abs((x - y).mean())) # mean diff?
mse = mean_squared_error(target, predict, multioutput="raw_values")
rmse = np.sqrt(mse)
result.append(mse[0])
result.append(rmse[0])
else:
result = [None, None, None, None, None, None]
result = [np.nan, np.nan, np.nan, np.nan, np.nan, np.nan]
# results order: m, c, r2, mean difference, mse, rmse
# logger.debug(result)
return result
def post_adjustment_stats(self, inputdata, results, ref_col, TI_col):
if isinstance(inputdata, pd.DataFrame):
fillEmpty = False
if ref_col in inputdata.columns and TI_col in inputdata.columns:
model_adjTI = self.get_regression(inputdata[ref_col], inputdata[TI_col])
name1 = "TI_regression_" + TI_col + "_" + ref_col
results.loc[name1, ["m"]] = model_adjTI[0]
results.loc[name1, ["c"]] = model_adjTI[1]
results.loc[name1, ["rsquared"]] = model_adjTI[2]
results.loc[name1, ["difference"]] = model_adjTI[3]
results.loc[name1, ["mse"]] = model_adjTI[4]
results.loc[name1, ["rmse"]] = model_adjTI[5]
else:
fillEmpty = True
else:
fillEmpty = True
if fillEmpty:
name1 = "TI_regression_" + TI_col + "_" + ref_col
results.loc[name1, ["m"]] = "NaN"
results.loc[name1, ["c"]] = "NaN"
results.loc[name1, ["rsquared"]] = "NaN"
results.loc[name1, ["difference"]] = "NaN"
results.loc[name1, ["mse"]] = "NaN"
results.loc[name1, ["rmse"]] = "NaN"
return results
def perform_SS_S_adjustment(self, inputdata):
"""
Note: Representative TI computed with original RSD_SD
"""
results = pd.DataFrame(
columns=[
"sensor",
"height",
"adjustment",
"m",
"c",
"rsquared",
"difference",
"mse",
"rmse",
]
)
inputdata_train = inputdata[inputdata["split"] == True].copy()
inputdata_test = inputdata[inputdata["split"] == False].copy()
if inputdata.empty or len(inputdata) < 2:
results = self.post_adjustment_stats(
[None], results, "Ref_TI", "adjTI_RSD_TI"
)
if "Ane_TI_Ht1" in inputdata.columns and "RSD_TI_Ht1" in inputdata.columns:
results = self.post_adjustment_stats(
[None], results, "Ane_TI_Ht1", "adjTI_RSD_TI_Ht1"
)
if "Ane_TI_Ht2" in inputdata.columns and "RSD_TI_Ht2" in inputdata.columns:
results = self.post_adjustment_stats(
[None], results, "Ane_TI_Ht2", "adjTI_RSD_TI_Ht2"
)
if "Ane_TI_Ht3" in inputdata.columns and "RSD_TI_Ht3" in inputdata.columns:
results = self.post_adjustment_stats(
[None], results, "Ane_TI_Ht3", "adjTI_RSD_TI_Ht3"
)
if "Ane_TI_Ht4" in inputdata.columns and "RSD_TI_Ht4" in inputdata.columns:
results = self.post_adjustment_stats(
[None], results, "Ane_TI_Ht4", "adjTI_RSD_TI_Ht4"
)
m = np.NaN
c = np.NaN
inputdata = False
else:
full = pd.DataFrame()
full["Ref_TI"] = inputdata_test["Ref_TI"]
full["RSD_TI"] = inputdata_test["RSD_TI"]
full = full.dropna()
if len(full) < 2:
results = self.post_adjustment_stats(
[None], results, "Ref_TI", "adjTI_RSD_TI"
)
m = np.NaN
c = np.NaN
else:
model = self.get_regression(
inputdata_train["RSD_TI"], inputdata_train["Ref_TI"]
)
m = model[0]
c = model[1]
RSD_TI = inputdata_test["RSD_TI"].copy()
RSD_TI = (model[0] * RSD_TI) + model[1]
inputdata_test["adjTI_RSD_TI"] = RSD_TI
results = self.post_adjustment_stats(
inputdata_test, results, "Ref_TI", "adjTI_RSD_TI"
)
if "Ane_TI_Ht1" in inputdata.columns and "RSD_TI_Ht1" in inputdata.columns:
full = pd.DataFrame()
full["Ref_TI"] = inputdata_test["Ane_TI_Ht1"]
full["RSD_TI"] = inputdata_test["RSD_TI_Ht1"]
full = full.dropna()
if len(full) < 2:
results = self.post_adjustment_stats(
[None], results, "Ane_TI_Ht1", "adjTI_RSD_TI_Ht1"
)
m = np.NaN
c = np.NaN
else:
model = self.get_regression(
inputdata_train["RSD_TI"], inputdata_train["Ref_TI"]
)
RSD_TI = inputdata_test["RSD_TI_Ht1"].copy()
RSD_TI = (model[0] * RSD_TI) + model[1]
inputdata_test["adjTI_RSD_TI_Ht1"] = RSD_TI
results = self.post_adjustment_stats(
inputdata_test, results, "Ane_TI_Ht1", "adjTI_RSD_TI_Ht1"
)
if "Ane_TI_Ht2" in inputdata.columns and "RSD_TI_Ht2" in inputdata.columns:
full = pd.DataFrame()
full["Ref_TI"] = inputdata_test["Ane_TI_Ht2"]
full["RSD_TI"] = inputdata_test["RSD_TI_Ht2"]
full = full.dropna()
if len(full) < 2:
results = self.post_adjustment_stats(
[None], results, "Ane_TI_Ht2", "adjTI_RSD_TI_Ht2"
)
m = np.NaN
c = np.NaN
else:
model = self.get_regression(
inputdata_train["RSD_TI_Ht2"], inputdata_train["Ane_TI_Ht2"]
)
RSD_TI = inputdata_test["RSD_TI_Ht2"].copy()
RSD_TI = (model[0] * RSD_TI) + model[1]
inputdata_test["adjTI_RSD_TI_Ht2"] = RSD_TI
results = self.post_adjustment_stats(
inputdata_test, results, "Ane_TI_Ht2", "adjTI_RSD_TI_Ht2"
)
if "Ane_TI_Ht3" in inputdata.columns and "RSD_TI_Ht3" in inputdata.columns:
full = pd.DataFrame()
full["Ref_TI"] = inputdata_test["Ane_TI_Ht3"]
full["RSD_TI"] = inputdata_test["RSD_TI_Ht3"]
full = full.dropna()
if len(full) < 2:
results = self.post_adjustment_stats(
[None], results, "Ane_TI_Ht3", "adjTI_RSD_TI_Ht3"
)
m = np.NaN
c = np.NaN
else:
model = self.get_regression(
inputdata_train["RSD_TI_Ht3"], inputdata_train["Ane_TI_Ht3"]
)
RSD_TI = inputdata_test["RSD_TI_Ht3"].copy()
RSD_TI = (model[0] * RSD_TI) + model[1]
inputdata_test["adjTI_RSD_TI_Ht3"] = RSD_TI
results = self.post_adjustment_stats(
inputdata_test, results, "Ane_TI_Ht3", "adjTI_RSD_TI_Ht3"
)
if "Ane_TI_Ht4" in inputdata.columns and "RSD_TI_Ht4" in inputdata.columns:
full = pd.DataFrame()
full["Ref_TI"] = inputdata_test["Ane_TI_Ht4"]
full["RSD_TI"] = inputdata_test["RSD_TI_Ht4"]
full = full.dropna()
if len(full) < 2:
results = self.post_adjustment_stats(
[None], results, "Ane_TI_Ht4", "adjTI_RSD_TI_Ht4"
)
m = np.NaN
c = np.NaN
else:
model = self.get_regression(
inputdata_train["RSD_TI_Ht4"], inputdata_train["Ane_TI_Ht4"]
)
RSD_TI = inputdata_test["RSD_TI_Ht4"].copy()
RSD_TI = (model[0] * RSD_TI) + model[1]
inputdata_test["adjTI_RSD_TI_Ht4"] = RSD_TI
results = self.post_adjustment_stats(
inputdata_test, results, "Ane_TI_Ht4", "adjTI_RSD_TI_Ht4"
)
results["adjustment"] = ["SS-S"] * len(results)
results = results.drop(columns=["sensor", "height"])
return inputdata_test, results, m, c
def perform_SS_SF_adjustment(self, inputdata):
results = pd.DataFrame(
columns=[
"sensor",
"height",
"adjustment",
"m",
"c",
"rsquared",
"difference",
"mse",
"rmse",
]
)
inputdata_train = inputdata[inputdata["split"] == True].copy()
inputdata_test = inputdata[inputdata["split"] == False].copy()
if inputdata.empty or len(inputdata) < 2:
results = self.post_adjustment_stats(
[None], results, "Ref_TI", "adjTI_RSD_TI"
)
if "Ane_TI_Ht1" in inputdata.columns and "RSD_TI_Ht1" in inputdata.columns:
results = self.post_adjustment_stats(
[None], results, "Ane_TI_Ht1", "adjTI_RSD_TI_Ht1"
)
if "Ane_TI_Ht2" in inputdata.columns and "RSD_TI_Ht2" in inputdata.columns:
results = self.post_adjustment_stats(
[None], results, "Ane_TI_Ht2", "adjTI_RSD_TI_Ht2"
)
if "Ane_TI_Ht3" in inputdata.columns and "RSD_TI_Ht3" in inputdata.columns:
results = self.post_adjustment_stats(
[None], results, "Ane_TI_Ht3", "adjTI_RSD_TI_Ht3"
)
if "Ane_TI_Ht4" in inputdata.columns and "RSD_TI_Ht4" in inputdata.columns:
results = self.post_adjustment_stats(
[None], results, "Ane_TI_Ht4", "adjTI_RSD_TI_Ht4"
)
m = np.NaN
c = np.NaN
inputdata = False
else:
filtered_Ref_TI = inputdata_train["Ref_TI"][inputdata_train["RSD_TI"] < 0.3]
filtered_RSD_TI = inputdata_train["RSD_TI"][inputdata_train["RSD_TI"] < 0.3]
full = pd.DataFrame()
full["filt_Ref_TI"] = filtered_Ref_TI
full["filt_RSD_TI"] = filtered_RSD_TI
full = full.dropna()
if len(full) < 2:
results = self.post_adjustment_stats(
[None],
results,
"Ref_TI",
"adjTI_RSD_TI",
)
m = np.NaN
c = np.NaN
else:
model = self.get_regression(filtered_RSD_TI, filtered_Ref_TI)
m = model[0]
c = model[1]
RSD_TI = inputdata_test["RSD_TI"].copy()
RSD_TI = (float(model[0]) * RSD_TI) + float(model[1])
inputdata_test["adjTI_RSD_TI"] = RSD_TI
inputdata_test["adjRepTI_RSD_RepTI"] = (
RSD_TI + 1.28 * inputdata_test["RSD_SD"]
)
results = self.post_adjustment_stats(
inputdata_test, results, "Ref_TI", "adjTI_RSD_TI"
)
if "Ane_TI_Ht1" in inputdata.columns and "RSD_TI_Ht1" in inputdata.columns:
filtered_Ref_TI = inputdata_train["Ane_TI_Ht1"][
inputdata_train["Ane_TI_Ht1"] < 0.3
]
filtered_RSD_TI = inputdata_train["RSD_TI_Ht1"][
inputdata_train["RSD_TI_Ht1"] < 0.3
]
full = pd.DataFrame()
full["filt_Ref_TI"] = filtered_Ref_TI
full["filt_RSD_TI"] = filtered_RSD_TI
full = full.dropna()
if len(full) < 2:
results = self.post_adjustment_stats(
[None], results, "Ane_TI_Ht1", "adjTI_RSD_TI_Ht1"
)
else:
model = self.get_regression(filtered_RSD_TI, filtered_Ref_TI)
RSD_TI = inputdata_test["RSD_TI_Ht1"].copy()
RSD_TI = (model[0] * RSD_TI) + model[1]
inputdata_test["adjTI_RSD_TI_Ht1"] = RSD_TI
inputdata_test["adjRepTI_RSD_RepTI_Ht1"] = (
RSD_TI + 1.28 * inputdata_test["RSD_SD_Ht1"]
)
results = self.post_adjustment_stats(
inputdata, results, "Ane_TI_Ht1", "adjTI_RSD_TI_Ht1"
)
if "Ane_TI_Ht2" in inputdata.columns and "RSD_TI_Ht2" in inputdata.columns:
filtered_Ref_TI = inputdata_train["Ane_TI_Ht2"][
inputdata_train["Ane_TI_Ht2"] < 0.3
]
filtered_RSD_TI = inputdata_train["RSD_TI_Ht2"][
inputdata_train["RSD_TI_Ht2"] < 0.3
]
full = pd.DataFrame()
full["filt_Ref_TI"] = filtered_Ref_TI
full["filt_RSD_TI"] = filtered_RSD_TI
full = full.dropna()
if len(full) < 2:
results = self.post_adjustment_stats(
[None], results, "Ane_TI_Ht2", "adjTI_RSD_TI_Ht2"
)
else:
model = self.get_regression(filtered_RSD_TI, filtered_Ref_TI)
RSD_TI = inputdata_test["RSD_TI_Ht2"].copy()
RSD_TI = (model[0] * RSD_TI) + model[1]
inputdata_test["adjTI_RSD_TI_Ht2"] = RSD_TI
inputdata_test["adjRepTI_RSD_RepTI_Ht2"] = (
RSD_TI + 1.28 * inputdata_test["RSD_SD_Ht2"]
)
results = self.post_adjustment_stats(
inputdata_test, results, "Ane_TI_Ht2", "adjTI_RSD_TI_Ht2"
)
if "Ane_TI_Ht3" in inputdata.columns and "RSD_TI_Ht3" in inputdata.columns:
filtered_Ref_TI = inputdata_train["Ane_TI_Ht3"][
inputdata_train["Ane_TI_Ht3"] < 0.3
]
filtered_RSD_TI = inputdata_train["RSD_TI_Ht3"][
inputdata_train["RSD_TI_Ht3"] < 0.3
]
full = pd.DataFrame()
full["filt_Ref_TI"] = filtered_Ref_TI
full["filt_RSD_TI"] = filtered_RSD_TI
full = full.dropna()
if len(full) < 2:
results = self.post_adjustment_stats(
[None], results, "Ane_TI_Ht3", "adjTI_RSD_TI_Ht3"
)
else:
model = self.get_regression(filtered_RSD_TI, filtered_Ref_TI)
RSD_TI = inputdata_test["RSD_TI_Ht3"].copy()
RSD_TI = (model[0] * RSD_TI) + model[1]
inputdata_test["adjTI_RSD_TI_Ht3"] = RSD_TI
inputdata_test["adjRepTI_RSD_RepTI_Ht3"] = (
RSD_TI + 1.28 * inputdata_test["RSD_SD_Ht3"]
)
results = self.post_adjustment_stats(
inputdata_test, results, "Ane_TI_Ht3", "adjTI_RSD_TI_Ht3"
)
if "Ane_TI_Ht4" in inputdata.columns and "RSD_TI_Ht4" in inputdata.columns:
filtered_Ref_TI = inputdata_train["Ane_TI_Ht4"][
inputdata_train["Ane_TI_Ht4"] < 0.3
]
filtered_RSD_TI = inputdata_train["RSD_TI_Ht4"][
inputdata_train["RSD_TI_Ht4"] < 0.3
]
full = pd.DataFrame()
full["filt_Ref_TI"] = filtered_Ref_TI
full["filt_RSD_TI"] = filtered_RSD_TI
full = full.dropna()
if len(full) < 2:
results = self.post_adjustment_stats(
[None], results, "Ane_TI_Ht4", "adjTI_RSD_TI_Ht4"
)
else:
model = self.get_regression(filtered_RSD_TI, filtered_Ref_TI)
RSD_TI = inputdata_test["RSD_TI_Ht4"].copy()
RSD_TI = (model[0] * RSD_TI) + model[1]
inputdata_test["adjTI_RSD_TI_Ht4"] = RSD_TI
inputdata_test["adjRepTI_RSD_RepTI_Ht4"] = (
RSD_TI + 1.28 * inputdata_test["RSD_SD_Ht4"]
)
results = self.post_adjustment_stats(
inputdata_test, results, "Ane_TI_Ht4", "adjTI_RSD_TI_Ht4"
)
results["adjustment"] = ["SS-SF"] * len(results)
results = results.drop(columns=["sensor", "height"])
return inputdata_test, results, m, c
def empirical_stdAdjustment(
inputdata,
results,
Ref_TI_col,
RSD_TI_col,
Ref_SD_col,
RSD_SD_col,
Ref_WS_col,
RSD_WS_col,
):
"""
set adjustment values
"""
inputdata_test = inputdata.copy()
adj = Adjustments()
# get col names
name_ref = Ref_TI_col.split("_TI")
name_rsd = RSD_TI_col.split("_TI")
name = RSD_TI_col.split("_TI")
adjTI_name = str("adjTI_" + RSD_TI_col)
if len(inputdata) < 2:
results = adj.post_adjustment_stats([None], results, Ref_TI_col, adjTI_name)
m = np.NaN
c = np.NaN
else:
# add the new columns, initialized by uncorrected Data
tmp = str("adj" + RSD_SD_col)
inputdata_test[tmp] = inputdata_test[RSD_SD_col].copy()
inputdata_test[str("adjTI_" + RSD_TI_col)] = inputdata_test[RSD_TI_col].copy()
inputdata_test.loc[
((inputdata[Ref_WS_col] >= 4) & (inputdata_test[Ref_WS_col] < 8)), tmp
] = ((1.116763 * inputdata_test[tmp]) + 0.024685) - (
((1.116763 * inputdata_test[tmp]) + 0.024685) * 0.00029
)
inputdata_test.loc[
((inputdata[Ref_WS_col] >= 4) & (inputdata_test[Ref_WS_col] < 8)),
adjTI_name,
] = (
inputdata_test[tmp] / inputdata_test[RSD_WS_col]
)
inputdata_test.loc[
((inputdata[Ref_WS_col] >= 8) & (inputdata_test[Ref_WS_col] < 12)), tmp
] = ((1.064564 * inputdata_test[tmp]) + 0.040596) - (
((1.064564 * inputdata_test[tmp]) + 0.040596) * -0.00161
)
inputdata_test.loc[
((inputdata[Ref_WS_col] >= 8) & (inputdata_test[Ref_WS_col] < 12)),
adjTI_name,
] = (
inputdata_test[tmp] / inputdata_test[RSD_WS_col]
)
inputdata_test.loc[
((inputdata[Ref_WS_col] >= 12) & (inputdata_test[Ref_WS_col] < 16)), tmp
] = ((0.97865 * inputdata_test[tmp]) + 0.124371) - (
((0.97865 * inputdata_test[tmp]) + 0.124371) * -0.00093
)
inputdata_test.loc[
((inputdata[Ref_WS_col] >= 12) & (inputdata_test[Ref_WS_col] < 16)),
adjTI_name,
] = (
inputdata_test[tmp] / inputdata_test[RSD_WS_col]
)
results = adj.post_adjustment_stats(
inputdata_test, results, Ref_TI_col, adjTI_name
)
return inputdata_test, results | TACT/computation/adjustments.py | try:
from TACT import logger
except ImportError:
pass
import pandas as pd
import sys
import matplotlib.pyplot as plt
plt.ioff() # setting to non-interactive
import numpy as np
import sys
from sklearn.linear_model import LinearRegression
from sklearn.metrics import r2_score
from sklearn.metrics import mean_squared_error
class Adjustments:
"""
document parameters
"""
def __init__(self, raw_data="", adjustments_list="", baseResultsLists=""):
self.raw_data = raw_data
self.adjusted_data = pd.DataFrame()
self.results_stats = (
[]
) # make this a dictionary of results with adjustment_list items as keys
def get_regression(self, x, y):
"""
Compute linear regression of data -> need to deprecate this function for get_modelRegression..
"""
df = pd.DataFrame()
df["x"] = x
df["y"] = y
df = df.dropna()
feature_name = "x"
target_name = "y"
data, target = df[[feature_name]], df[target_name]
if len(df) > 1:
x = df["x"].astype(float)
y = df["y"].astype(float)
lm = LinearRegression()
lm.fit(data, target)
predict = lm.predict(data)
result = [lm.coef_[0], lm.intercept_] # slope and intercept?
result.append(lm.score(data, target)) # r score?
result.append(abs((x - y).mean())) # mean diff?
mse = mean_squared_error(target, predict, multioutput="raw_values")
rmse = np.sqrt(mse)
result.append(mse[0])
result.append(rmse[0])
else:
result = [None, None, None, None, None, None]
result = [np.nan, np.nan, np.nan, np.nan, np.nan, np.nan]
# results order: m, c, r2, mean difference, mse, rmse
# logger.debug(result)
return result
def post_adjustment_stats(self, inputdata, results, ref_col, TI_col):
if isinstance(inputdata, pd.DataFrame):
fillEmpty = False
if ref_col in inputdata.columns and TI_col in inputdata.columns:
model_adjTI = self.get_regression(inputdata[ref_col], inputdata[TI_col])
name1 = "TI_regression_" + TI_col + "_" + ref_col
results.loc[name1, ["m"]] = model_adjTI[0]
results.loc[name1, ["c"]] = model_adjTI[1]
results.loc[name1, ["rsquared"]] = model_adjTI[2]
results.loc[name1, ["difference"]] = model_adjTI[3]
results.loc[name1, ["mse"]] = model_adjTI[4]
results.loc[name1, ["rmse"]] = model_adjTI[5]
else:
fillEmpty = True
else:
fillEmpty = True
if fillEmpty:
name1 = "TI_regression_" + TI_col + "_" + ref_col
results.loc[name1, ["m"]] = "NaN"
results.loc[name1, ["c"]] = "NaN"
results.loc[name1, ["rsquared"]] = "NaN"
results.loc[name1, ["difference"]] = "NaN"
results.loc[name1, ["mse"]] = "NaN"
results.loc[name1, ["rmse"]] = "NaN"
return results
def perform_SS_S_adjustment(self, inputdata):
"""
Note: Representative TI computed with original RSD_SD
"""
results = pd.DataFrame(
columns=[
"sensor",
"height",
"adjustment",
"m",
"c",
"rsquared",
"difference",
"mse",
"rmse",
]
)
inputdata_train = inputdata[inputdata["split"] == True].copy()
inputdata_test = inputdata[inputdata["split"] == False].copy()
if inputdata.empty or len(inputdata) < 2:
results = self.post_adjustment_stats(
[None], results, "Ref_TI", "adjTI_RSD_TI"
)
if "Ane_TI_Ht1" in inputdata.columns and "RSD_TI_Ht1" in inputdata.columns:
results = self.post_adjustment_stats(
[None], results, "Ane_TI_Ht1", "adjTI_RSD_TI_Ht1"
)
if "Ane_TI_Ht2" in inputdata.columns and "RSD_TI_Ht2" in inputdata.columns:
results = self.post_adjustment_stats(
[None], results, "Ane_TI_Ht2", "adjTI_RSD_TI_Ht2"
)
if "Ane_TI_Ht3" in inputdata.columns and "RSD_TI_Ht3" in inputdata.columns:
results = self.post_adjustment_stats(
[None], results, "Ane_TI_Ht3", "adjTI_RSD_TI_Ht3"
)
if "Ane_TI_Ht4" in inputdata.columns and "RSD_TI_Ht4" in inputdata.columns:
results = self.post_adjustment_stats(
[None], results, "Ane_TI_Ht4", "adjTI_RSD_TI_Ht4"
)
m = np.NaN
c = np.NaN
inputdata = False
else:
full = pd.DataFrame()
full["Ref_TI"] = inputdata_test["Ref_TI"]
full["RSD_TI"] = inputdata_test["RSD_TI"]
full = full.dropna()
if len(full) < 2:
results = self.post_adjustment_stats(
[None], results, "Ref_TI", "adjTI_RSD_TI"
)
m = np.NaN
c = np.NaN
else:
model = self.get_regression(
inputdata_train["RSD_TI"], inputdata_train["Ref_TI"]
)
m = model[0]
c = model[1]
RSD_TI = inputdata_test["RSD_TI"].copy()
RSD_TI = (model[0] * RSD_TI) + model[1]
inputdata_test["adjTI_RSD_TI"] = RSD_TI
results = self.post_adjustment_stats(
inputdata_test, results, "Ref_TI", "adjTI_RSD_TI"
)
if "Ane_TI_Ht1" in inputdata.columns and "RSD_TI_Ht1" in inputdata.columns:
full = pd.DataFrame()
full["Ref_TI"] = inputdata_test["Ane_TI_Ht1"]
full["RSD_TI"] = inputdata_test["RSD_TI_Ht1"]
full = full.dropna()
if len(full) < 2:
results = self.post_adjustment_stats(
[None], results, "Ane_TI_Ht1", "adjTI_RSD_TI_Ht1"
)
m = np.NaN
c = np.NaN
else:
model = self.get_regression(
inputdata_train["RSD_TI"], inputdata_train["Ref_TI"]
)
RSD_TI = inputdata_test["RSD_TI_Ht1"].copy()
RSD_TI = (model[0] * RSD_TI) + model[1]
inputdata_test["adjTI_RSD_TI_Ht1"] = RSD_TI
results = self.post_adjustment_stats(
inputdata_test, results, "Ane_TI_Ht1", "adjTI_RSD_TI_Ht1"
)
if "Ane_TI_Ht2" in inputdata.columns and "RSD_TI_Ht2" in inputdata.columns:
full = pd.DataFrame()
full["Ref_TI"] = inputdata_test["Ane_TI_Ht2"]
full["RSD_TI"] = inputdata_test["RSD_TI_Ht2"]
full = full.dropna()
if len(full) < 2:
results = self.post_adjustment_stats(
[None], results, "Ane_TI_Ht2", "adjTI_RSD_TI_Ht2"
)
m = np.NaN
c = np.NaN
else:
model = self.get_regression(
inputdata_train["RSD_TI_Ht2"], inputdata_train["Ane_TI_Ht2"]
)
RSD_TI = inputdata_test["RSD_TI_Ht2"].copy()
RSD_TI = (model[0] * RSD_TI) + model[1]
inputdata_test["adjTI_RSD_TI_Ht2"] = RSD_TI
results = self.post_adjustment_stats(
inputdata_test, results, "Ane_TI_Ht2", "adjTI_RSD_TI_Ht2"
)
if "Ane_TI_Ht3" in inputdata.columns and "RSD_TI_Ht3" in inputdata.columns:
full = pd.DataFrame()
full["Ref_TI"] = inputdata_test["Ane_TI_Ht3"]
full["RSD_TI"] = inputdata_test["RSD_TI_Ht3"]
full = full.dropna()
if len(full) < 2:
results = self.post_adjustment_stats(
[None], results, "Ane_TI_Ht3", "adjTI_RSD_TI_Ht3"
)
m = np.NaN
c = np.NaN
else:
model = self.get_regression(
inputdata_train["RSD_TI_Ht3"], inputdata_train["Ane_TI_Ht3"]
)
RSD_TI = inputdata_test["RSD_TI_Ht3"].copy()
RSD_TI = (model[0] * RSD_TI) + model[1]
inputdata_test["adjTI_RSD_TI_Ht3"] = RSD_TI
results = self.post_adjustment_stats(
inputdata_test, results, "Ane_TI_Ht3", "adjTI_RSD_TI_Ht3"
)
if "Ane_TI_Ht4" in inputdata.columns and "RSD_TI_Ht4" in inputdata.columns:
full = pd.DataFrame()
full["Ref_TI"] = inputdata_test["Ane_TI_Ht4"]
full["RSD_TI"] = inputdata_test["RSD_TI_Ht4"]
full = full.dropna()
if len(full) < 2:
results = self.post_adjustment_stats(
[None], results, "Ane_TI_Ht4", "adjTI_RSD_TI_Ht4"
)
m = np.NaN
c = np.NaN
else:
model = self.get_regression(
inputdata_train["RSD_TI_Ht4"], inputdata_train["Ane_TI_Ht4"]
)
RSD_TI = inputdata_test["RSD_TI_Ht4"].copy()
RSD_TI = (model[0] * RSD_TI) + model[1]
inputdata_test["adjTI_RSD_TI_Ht4"] = RSD_TI
results = self.post_adjustment_stats(
inputdata_test, results, "Ane_TI_Ht4", "adjTI_RSD_TI_Ht4"
)
results["adjustment"] = ["SS-S"] * len(results)
results = results.drop(columns=["sensor", "height"])
return inputdata_test, results, m, c
def perform_SS_SF_adjustment(self, inputdata):
results = pd.DataFrame(
columns=[
"sensor",
"height",
"adjustment",
"m",
"c",
"rsquared",
"difference",
"mse",
"rmse",
]
)
inputdata_train = inputdata[inputdata["split"] == True].copy()
inputdata_test = inputdata[inputdata["split"] == False].copy()
if inputdata.empty or len(inputdata) < 2:
results = self.post_adjustment_stats(
[None], results, "Ref_TI", "adjTI_RSD_TI"
)
if "Ane_TI_Ht1" in inputdata.columns and "RSD_TI_Ht1" in inputdata.columns:
results = self.post_adjustment_stats(
[None], results, "Ane_TI_Ht1", "adjTI_RSD_TI_Ht1"
)
if "Ane_TI_Ht2" in inputdata.columns and "RSD_TI_Ht2" in inputdata.columns:
results = self.post_adjustment_stats(
[None], results, "Ane_TI_Ht2", "adjTI_RSD_TI_Ht2"
)
if "Ane_TI_Ht3" in inputdata.columns and "RSD_TI_Ht3" in inputdata.columns:
results = self.post_adjustment_stats(
[None], results, "Ane_TI_Ht3", "adjTI_RSD_TI_Ht3"
)
if "Ane_TI_Ht4" in inputdata.columns and "RSD_TI_Ht4" in inputdata.columns:
results = self.post_adjustment_stats(
[None], results, "Ane_TI_Ht4", "adjTI_RSD_TI_Ht4"
)
m = np.NaN
c = np.NaN
inputdata = False
else:
filtered_Ref_TI = inputdata_train["Ref_TI"][inputdata_train["RSD_TI"] < 0.3]
filtered_RSD_TI = inputdata_train["RSD_TI"][inputdata_train["RSD_TI"] < 0.3]
full = pd.DataFrame()
full["filt_Ref_TI"] = filtered_Ref_TI
full["filt_RSD_TI"] = filtered_RSD_TI
full = full.dropna()
if len(full) < 2:
results = self.post_adjustment_stats(
[None],
results,
"Ref_TI",
"adjTI_RSD_TI",
)
m = np.NaN
c = np.NaN
else:
model = self.get_regression(filtered_RSD_TI, filtered_Ref_TI)
m = model[0]
c = model[1]
RSD_TI = inputdata_test["RSD_TI"].copy()
RSD_TI = (float(model[0]) * RSD_TI) + float(model[1])
inputdata_test["adjTI_RSD_TI"] = RSD_TI
inputdata_test["adjRepTI_RSD_RepTI"] = (
RSD_TI + 1.28 * inputdata_test["RSD_SD"]
)
results = self.post_adjustment_stats(
inputdata_test, results, "Ref_TI", "adjTI_RSD_TI"
)
if "Ane_TI_Ht1" in inputdata.columns and "RSD_TI_Ht1" in inputdata.columns:
filtered_Ref_TI = inputdata_train["Ane_TI_Ht1"][
inputdata_train["Ane_TI_Ht1"] < 0.3
]
filtered_RSD_TI = inputdata_train["RSD_TI_Ht1"][
inputdata_train["RSD_TI_Ht1"] < 0.3
]
full = pd.DataFrame()
full["filt_Ref_TI"] = filtered_Ref_TI
full["filt_RSD_TI"] = filtered_RSD_TI
full = full.dropna()
if len(full) < 2:
results = self.post_adjustment_stats(
[None], results, "Ane_TI_Ht1", "adjTI_RSD_TI_Ht1"
)
else:
model = self.get_regression(filtered_RSD_TI, filtered_Ref_TI)
RSD_TI = inputdata_test["RSD_TI_Ht1"].copy()
RSD_TI = (model[0] * RSD_TI) + model[1]
inputdata_test["adjTI_RSD_TI_Ht1"] = RSD_TI
inputdata_test["adjRepTI_RSD_RepTI_Ht1"] = (
RSD_TI + 1.28 * inputdata_test["RSD_SD_Ht1"]
)
results = self.post_adjustment_stats(
inputdata, results, "Ane_TI_Ht1", "adjTI_RSD_TI_Ht1"
)
if "Ane_TI_Ht2" in inputdata.columns and "RSD_TI_Ht2" in inputdata.columns:
filtered_Ref_TI = inputdata_train["Ane_TI_Ht2"][
inputdata_train["Ane_TI_Ht2"] < 0.3
]
filtered_RSD_TI = inputdata_train["RSD_TI_Ht2"][
inputdata_train["RSD_TI_Ht2"] < 0.3
]
full = pd.DataFrame()
full["filt_Ref_TI"] = filtered_Ref_TI
full["filt_RSD_TI"] = filtered_RSD_TI
full = full.dropna()
if len(full) < 2:
results = self.post_adjustment_stats(
[None], results, "Ane_TI_Ht2", "adjTI_RSD_TI_Ht2"
)
else:
model = self.get_regression(filtered_RSD_TI, filtered_Ref_TI)
RSD_TI = inputdata_test["RSD_TI_Ht2"].copy()
RSD_TI = (model[0] * RSD_TI) + model[1]
inputdata_test["adjTI_RSD_TI_Ht2"] = RSD_TI
inputdata_test["adjRepTI_RSD_RepTI_Ht2"] = (
RSD_TI + 1.28 * inputdata_test["RSD_SD_Ht2"]
)
results = self.post_adjustment_stats(
inputdata_test, results, "Ane_TI_Ht2", "adjTI_RSD_TI_Ht2"
)
if "Ane_TI_Ht3" in inputdata.columns and "RSD_TI_Ht3" in inputdata.columns:
filtered_Ref_TI = inputdata_train["Ane_TI_Ht3"][
inputdata_train["Ane_TI_Ht3"] < 0.3
]
filtered_RSD_TI = inputdata_train["RSD_TI_Ht3"][
inputdata_train["RSD_TI_Ht3"] < 0.3
]
full = pd.DataFrame()
full["filt_Ref_TI"] = filtered_Ref_TI
full["filt_RSD_TI"] = filtered_RSD_TI
full = full.dropna()
if len(full) < 2:
results = self.post_adjustment_stats(
[None], results, "Ane_TI_Ht3", "adjTI_RSD_TI_Ht3"
)
else:
model = self.get_regression(filtered_RSD_TI, filtered_Ref_TI)
RSD_TI = inputdata_test["RSD_TI_Ht3"].copy()
RSD_TI = (model[0] * RSD_TI) + model[1]
inputdata_test["adjTI_RSD_TI_Ht3"] = RSD_TI
inputdata_test["adjRepTI_RSD_RepTI_Ht3"] = (
RSD_TI + 1.28 * inputdata_test["RSD_SD_Ht3"]
)
results = self.post_adjustment_stats(
inputdata_test, results, "Ane_TI_Ht3", "adjTI_RSD_TI_Ht3"
)
if "Ane_TI_Ht4" in inputdata.columns and "RSD_TI_Ht4" in inputdata.columns:
filtered_Ref_TI = inputdata_train["Ane_TI_Ht4"][
inputdata_train["Ane_TI_Ht4"] < 0.3
]
filtered_RSD_TI = inputdata_train["RSD_TI_Ht4"][
inputdata_train["RSD_TI_Ht4"] < 0.3
]
full = pd.DataFrame()
full["filt_Ref_TI"] = filtered_Ref_TI
full["filt_RSD_TI"] = filtered_RSD_TI
full = full.dropna()
if len(full) < 2:
results = self.post_adjustment_stats(
[None], results, "Ane_TI_Ht4", "adjTI_RSD_TI_Ht4"
)
else:
model = self.get_regression(filtered_RSD_TI, filtered_Ref_TI)
RSD_TI = inputdata_test["RSD_TI_Ht4"].copy()
RSD_TI = (model[0] * RSD_TI) + model[1]
inputdata_test["adjTI_RSD_TI_Ht4"] = RSD_TI
inputdata_test["adjRepTI_RSD_RepTI_Ht4"] = (
RSD_TI + 1.28 * inputdata_test["RSD_SD_Ht4"]
)
results = self.post_adjustment_stats(
inputdata_test, results, "Ane_TI_Ht4", "adjTI_RSD_TI_Ht4"
)
results["adjustment"] = ["SS-SF"] * len(results)
results = results.drop(columns=["sensor", "height"])
return inputdata_test, results, m, c
def empirical_stdAdjustment(
inputdata,
results,
Ref_TI_col,
RSD_TI_col,
Ref_SD_col,
RSD_SD_col,
Ref_WS_col,
RSD_WS_col,
):
"""
set adjustment values
"""
inputdata_test = inputdata.copy()
adj = Adjustments()
# get col names
name_ref = Ref_TI_col.split("_TI")
name_rsd = RSD_TI_col.split("_TI")
name = RSD_TI_col.split("_TI")
adjTI_name = str("adjTI_" + RSD_TI_col)
if len(inputdata) < 2:
results = adj.post_adjustment_stats([None], results, Ref_TI_col, adjTI_name)
m = np.NaN
c = np.NaN
else:
# add the new columns, initialized by uncorrected Data
tmp = str("adj" + RSD_SD_col)
inputdata_test[tmp] = inputdata_test[RSD_SD_col].copy()
inputdata_test[str("adjTI_" + RSD_TI_col)] = inputdata_test[RSD_TI_col].copy()
inputdata_test.loc[
((inputdata[Ref_WS_col] >= 4) & (inputdata_test[Ref_WS_col] < 8)), tmp
] = ((1.116763 * inputdata_test[tmp]) + 0.024685) - (
((1.116763 * inputdata_test[tmp]) + 0.024685) * 0.00029
)
inputdata_test.loc[
((inputdata[Ref_WS_col] >= 4) & (inputdata_test[Ref_WS_col] < 8)),
adjTI_name,
] = (
inputdata_test[tmp] / inputdata_test[RSD_WS_col]
)
inputdata_test.loc[
((inputdata[Ref_WS_col] >= 8) & (inputdata_test[Ref_WS_col] < 12)), tmp
] = ((1.064564 * inputdata_test[tmp]) + 0.040596) - (
((1.064564 * inputdata_test[tmp]) + 0.040596) * -0.00161
)
inputdata_test.loc[
((inputdata[Ref_WS_col] >= 8) & (inputdata_test[Ref_WS_col] < 12)),
adjTI_name,
] = (
inputdata_test[tmp] / inputdata_test[RSD_WS_col]
)
inputdata_test.loc[
((inputdata[Ref_WS_col] >= 12) & (inputdata_test[Ref_WS_col] < 16)), tmp
] = ((0.97865 * inputdata_test[tmp]) + 0.124371) - (
((0.97865 * inputdata_test[tmp]) + 0.124371) * -0.00093
)
inputdata_test.loc[
((inputdata[Ref_WS_col] >= 12) & (inputdata_test[Ref_WS_col] < 16)),
adjTI_name,
] = (
inputdata_test[tmp] / inputdata_test[RSD_WS_col]
)
results = adj.post_adjustment_stats(
inputdata_test, results, Ref_TI_col, adjTI_name
)
return inputdata_test, results | 0.420481 | 0.417212 |
import tensorflow as tf
from tensorflow.keras import Sequential, Model, Input
from tensorflow.keras import layers
from tensorflow.keras.layers import ReLU, Dense, Conv2D, Conv2DTranspose
from tensorflow.keras.layers import DepthwiseConv2D, SeparableConv2D, Dropout
from tensorflow.keras.layers import GlobalAveragePooling2D, Activation, BatchNormalization
from tensorflow.keras.regularizers import l2
from tensorflow.keras.optimizers import Adam, SGD
from tensorflow.compat.v1.keras.initializers import glorot_uniform, he_normal
from tensorflow.keras.callbacks import LearningRateScheduler
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.utils import to_categorical
import tensorflow_datasets as tfds
import tensorflow.keras.backend as K
import numpy as np
import os
from sklearn.model_selection import train_test_split
import random
import math
import sys
from tfrecord_utils.img_utils import resize_repeat
from boltons.funcutils import partial
from omegaconf import DictConfig
from genetic_algorithm.datasets.plant_village import ClassLabelEncoder, load_and_preprocess_data
import wandb
from .layers_c import Layers
from .preprocess_c import Preprocess
from .pretraining_c import Pretraining
from .hypertune_c import HyperTune
from .training_c import Training
class Logger(object):
''' Logger base (super) class for Models '''
def __init__(self):
""" Constructor
"""
self.x_train = None
self.y_train = None
self.x_test = None
self.y_test = None
self.n_classes = 0
def set_wandb_env_vars(self, project='ResNet50_v2', group='', job_type=''):
os.environ['WANDB_ENTITY'] = 'jrose'
os.environ['WANDB_PROJECT'] = project
os.environ['WANDB_RUN_GROUP'] = group
os.environ['WANDB_JOB_TYPE'] = job_type
self.run.log({'Best chromosome':BestOrganism.chromosome}, commit=False)
self.run.log({'population_size':len(fitness)}, commit=False)
self.run.log({'Best fitness': fitness[0]}, commit=False)
self.run.log({'Average fitness': sum(fitness)/len(fitness)}, commit=False)
self.population[0].show()
logger.info(f'BEST ORGANISM: {BestOrganism.name}')
# k=16
max_rows=10000
if self.debug:
max_rows = min(BestOrganism.config.output_size*30,1000)
# print('SKIPPING Evaluate & plotting due to debug flag')
# return BestOrganism
model_dir = BestOrganism.config.model_dir or '.'
model_path = os.path.join(model_dir,f'model-phase_{self.phase}.jpg')
results_dir = os.path.join(model_dir,'results')
os.makedirs(results_dir, exist_ok=True)
chromosome = BestOrganism.chromosome
test_data = BestOrganism.test_data
model = BestOrganism.model
if last:
k=64
model_path = os.path.join(model_dir,f'best-model-phase_{self.phase}.jpg')
logger.info(f'Currently logging the model to {model_path}')
tf.keras.utils.plot_model(model, to_file=model_path, expand_nested=True)
model_img = cv2.imread(model_path)
model_structure_image = wandb.Image(model_img, caption=f"Best Model phase_{self.phase}")
run.log({"best_model": model_structure_image})#, commit=False)
log_high_loss_examples(test_data,
model,
k=k,
log_predictions=True,
max_rows=max_rows,
run=self.run)#,
# commit=False)
log_classification_report(test_data,
model,
data_split_name='test',
class_encoder=self.class_encoder,
run=self.run)
logger.info(f'SAVING BEST MODEL: {BestOrganism.name}\nat {BestOrganism.model_dir}')
BestOrganism.log_model_artifact(run=self.run)
prevBestOrganism = generation.evaluate(last=True)
keras.utils.plot_model(prevBestOrganism.model, to_file='best.png')
wandb.log({"best_model": [wandb.Image('best.png', caption="Best Model")]})
log_multiclass_metrics(test_data,
model,
data_split_name='test',
class_encoder=self.class_encoder,
log_predictions=True,
max_rows=max_rows,
run=self.run,
commit=True,
output_path=results_dir,
metadata=chromosome)
def log_model_artifact(self, run=None):
'''
Logs a
# TODO log chromosome along with model artifact
'''
model_path = os.path.join(self.model_dir,f"best_model--fitness-{self.fitness:.2f}--{self.name.replace('=','_')}")
print(f'Logging model artifact for organism {self.name} at\n{model_path}')
os.makedirs(self.model_dir, exist_ok=True)
run = run or wandb
log_model_artifact(self.model, model_path, encoder=self.class_encoder, run=run, metadata=self.chromosome)
# @property
# def data(self):
# return (self.x_train, self.y_train), (self.x_test, self.y_test)
# def load_data(self, train, test=None, std=False, onehot=False, smoothing=0.0):
# """ Load in memory data
# train: expect form: (x_train, y_train)
# """
# self.x_train, self.y_train = train
# if test is not None:
# self.x_test, self.y_test = test
# if std:
# self.x_train, self.x_test = self.standardization(self.x_train, self.x_test)
# if self.y_train.ndim == 2:
# self.n_classes = np.max(self.y_train) + 1
# else:
# self.n_classes = self.y_train.shape[1]
# if onehot:
# self.y_train = to_categorical(self.y_train, self.n_classes)
# self.y_test = to_categorical(self.y_test, self.n_classes)
# if smoothing > 0.0:
# self.y_train = self.label_smoothing(self.y_train, self.n_classes, smoothing)
# def cifar10(self, epochs=10, decay=('cosine', 0), save: str=None):
# """ Train on CIFAR-10
# epochs : number of epochs for full training
# """
# from tensorflow.keras.datasets import cifar10
# (x_train, y_train), (x_test, y_test) = cifar10.load_data()
# x_train, x_test = self.standardization(x_train, x_test)
# y_train = to_categorical(y_train, 10)
# y_test = to_categorical(y_test, 10)
# y_train = self.label_smoothing(y_train, 10, 0.1)
# # compile the model
# self.compile(loss='categorical_crossentropy', metrics=['acc'])
# self.warmup(x_train, y_train, save=save)
# lr, batch_size = self.random_search(x_train, y_train, x_test, y_test, save=save)
# self.training(x_train, y_train, epochs=epochs, batch_size=batch_size,
# lr=lr, decay=decay, save=save)
# self.evaluate(x_test, y_test)
# def cifar100(self, epochs=20, decay=('cosine', 0), save: str=None):
# """ Train on CIFAR-100
# epochs : number of epochs for full training
# """
# from tensorflow.keras.datasets import cifar100
# (x_train, y_train), (x_test, y_test) = cifar100.load_data()
# x_train, x_test = self.normalization(x_train, x_test)
# y_train = to_categorical(y_train, 100)
# y_test = to_categorical(y_test, 100)
# y_train = self.label_smoothing(y_train, 100, 0.1)
# self.compile(loss='categorical_crossentropy', metrics=['acc'])
# self.warmup(x_train, y_train, save=save)
# lr, batch_size = self.grid_search(x_train, y_train, x_test, y_test, save=save)
# self.training(x_train, y_train, epochs=epochs, batch_size=batch_size,
# lr=lr, decay=decay, save=save)
# self.evaluate(x_test, y_test)
# def coil100(self, epochs=20, decay=('cosine', 0), save: str=None):
# """
# Columbia University Image Library (COIL-100)
# """
# # Get TF.dataset generator for COIL100
# train, info = tfds.load('coil100', split='train', shuffle_files=True, with_info=True, as_supervised=True)
# n_classes = info.features['label'].num_classes
# n_images = info.splits['train'].num_examples
# input_shape = info.features['image'].shape
# # Get the dataset into memory
# train = train.shuffle(n_images).batch(n_images)
# for images, labels in train.take(1):
# pass
# images = np.asarray(images)
# images, _ = self.standardization(images, None)
# labels = to_categorical(np.asarray(labels), n_classes)
# # split the dataset into train/test
# x_train, x_test, y_train, y_test = train_test_split(images, labels, test_size=0.2)
# self.compile(loss='categorical_crossentropy', metrics=['acc'])
# self.warmup(x_train, y_train, save=save)
# lr, batch_size = self.grid_search(x_train, y_train, x_test, y_test, save=save)
# self.training(x_train, y_train, epochs=epochs, batch_size=batch_size,
# lr=lr, decay=decay, save=save)
# self.evaluate(x_test, y_test)
# def plant_village(self, target_size=[256,256], epochs=20, decay=('cosine', 0), save: str=None, allow_resume: bool=False):
# """
# Plant Village leaf disease dataset (2016)
# """
# data_config = DictConfig({
# 'load':{'dataset_name':'plant_village',
# 'split':['train[0%:60%]','train[60%:70%]','train[70%:100%]'],
# 'data_dir':'/media/data/jacob/tensorflow_datasets'},
# 'preprocess':{'batch_size':32,
# 'target_size':target_size}
# })
# data, class_encoder = load_and_preprocess_data(data_config)
# train_dataset = data['train']
# val_dataset = data['val']
# test_dataset = data['test']
# batch_size = data_config.preprocess.batch_size
# steps_per_epoch = len(data['train'])
# validation_steps = len(data['val'])
# test_steps = len(data['test'])
# num_classes = train_dataset.element_spec[1].shape[1]
# # x_train, y_train = next(iter(train_dataset.unbatch().batch(batch_size*steps_per_epoch).take(1)))
# # Get the dataset into memory
# num_samples = batch_size*steps_per_epoch
# train_dataset = train_dataset.unbatch().shuffle(num_samples).batch(num_samples)
# for images, labels in train_dataset.take(1):
# pass
# images = np.asarray(images)
# labels = np.asarray(labels)
# x_train, y_train = images, labels
# # input_shape = x_train.shape[1:]
# print(f'Loaded {num_samples} samples into memory from plant_village train')
# # Get the dataset into memory
# num_samples = batch_size*validation_steps
# val_dataset = val_dataset.unbatch().batch(num_samples)
# for images, labels in val_dataset.take(1):
# pass
# images = np.asarray(images)
# labels = np.asarray(labels)
# x_val, y_val = images, labels
# print(f'Loaded {num_samples} samples into memory from plant_village val')
# # Get the dataset into memory
# num_samples = batch_size*test_steps
# test_dataset = test_dataset.unbatch().batch(num_samples)
# for images, labels in test_dataset.take(1):
# pass
# images = np.asarray(images)
# labels = np.asarray(labels)
# x_test, y_test = images, labels
# print(f'Loaded {num_samples} samples into memory from plant_village test')
# # images, _ = self.standardization(images, None)
# # labels = to_categorical(np.asarray(labels), n_classes)
# # split the dataset into train/test
# # x_train, x_test, y_train, y_test = train_test_split(images, labels, test_size=0.2)
# self.set_wandb_env_vars(project='ResNet50_v2', group=f'resnet50_v2-plant_village-res{target_size[0]}')
# print('compiling')
# self.compile(loss='categorical_crossentropy', metrics=['acc', 'recall','precision'])
# with wandb.init(reinit=True, job_type='warmup', tags=['warmup']) as run:
# print('initiating warmup')
# self.warmup(x_train, y_train, save=save, allow_resume=allow_resume)
# # with wandb.init(reinit=True, job_type='grid_search', tags=['grid_search']) as run:
# print('initiating grid_search. dir(self):\n',dir(self))
# lr, batch_size = self.grid_search(x_train, y_train, x_val, y_val, save=save,
# batch_range=[16, 32], allow_resume=allow_resume)
# with wandb.init(reinit=True, job_type='training', tags=['training']) as run:
# print('initiating training. dir(self):\n')
# self.training(x_train, y_train, validation_data = (x_val, y_val), epochs=epochs, batch_size=batch_size,
# lr=lr, decay=decay, save=save)
# print('initiating evaluate. dir(self):\n',dir(self))
# result = self.evaluate(x_test, y_test)
# run.log({'test_results':result}) | genetic_algorithm/models/zoo/logger_c.py | import tensorflow as tf
from tensorflow.keras import Sequential, Model, Input
from tensorflow.keras import layers
from tensorflow.keras.layers import ReLU, Dense, Conv2D, Conv2DTranspose
from tensorflow.keras.layers import DepthwiseConv2D, SeparableConv2D, Dropout
from tensorflow.keras.layers import GlobalAveragePooling2D, Activation, BatchNormalization
from tensorflow.keras.regularizers import l2
from tensorflow.keras.optimizers import Adam, SGD
from tensorflow.compat.v1.keras.initializers import glorot_uniform, he_normal
from tensorflow.keras.callbacks import LearningRateScheduler
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.utils import to_categorical
import tensorflow_datasets as tfds
import tensorflow.keras.backend as K
import numpy as np
import os
from sklearn.model_selection import train_test_split
import random
import math
import sys
from tfrecord_utils.img_utils import resize_repeat
from boltons.funcutils import partial
from omegaconf import DictConfig
from genetic_algorithm.datasets.plant_village import ClassLabelEncoder, load_and_preprocess_data
import wandb
from .layers_c import Layers
from .preprocess_c import Preprocess
from .pretraining_c import Pretraining
from .hypertune_c import HyperTune
from .training_c import Training
class Logger(object):
''' Logger base (super) class for Models '''
def __init__(self):
""" Constructor
"""
self.x_train = None
self.y_train = None
self.x_test = None
self.y_test = None
self.n_classes = 0
def set_wandb_env_vars(self, project='ResNet50_v2', group='', job_type=''):
os.environ['WANDB_ENTITY'] = 'jrose'
os.environ['WANDB_PROJECT'] = project
os.environ['WANDB_RUN_GROUP'] = group
os.environ['WANDB_JOB_TYPE'] = job_type
self.run.log({'Best chromosome':BestOrganism.chromosome}, commit=False)
self.run.log({'population_size':len(fitness)}, commit=False)
self.run.log({'Best fitness': fitness[0]}, commit=False)
self.run.log({'Average fitness': sum(fitness)/len(fitness)}, commit=False)
self.population[0].show()
logger.info(f'BEST ORGANISM: {BestOrganism.name}')
# k=16
max_rows=10000
if self.debug:
max_rows = min(BestOrganism.config.output_size*30,1000)
# print('SKIPPING Evaluate & plotting due to debug flag')
# return BestOrganism
model_dir = BestOrganism.config.model_dir or '.'
model_path = os.path.join(model_dir,f'model-phase_{self.phase}.jpg')
results_dir = os.path.join(model_dir,'results')
os.makedirs(results_dir, exist_ok=True)
chromosome = BestOrganism.chromosome
test_data = BestOrganism.test_data
model = BestOrganism.model
if last:
k=64
model_path = os.path.join(model_dir,f'best-model-phase_{self.phase}.jpg')
logger.info(f'Currently logging the model to {model_path}')
tf.keras.utils.plot_model(model, to_file=model_path, expand_nested=True)
model_img = cv2.imread(model_path)
model_structure_image = wandb.Image(model_img, caption=f"Best Model phase_{self.phase}")
run.log({"best_model": model_structure_image})#, commit=False)
log_high_loss_examples(test_data,
model,
k=k,
log_predictions=True,
max_rows=max_rows,
run=self.run)#,
# commit=False)
log_classification_report(test_data,
model,
data_split_name='test',
class_encoder=self.class_encoder,
run=self.run)
logger.info(f'SAVING BEST MODEL: {BestOrganism.name}\nat {BestOrganism.model_dir}')
BestOrganism.log_model_artifact(run=self.run)
prevBestOrganism = generation.evaluate(last=True)
keras.utils.plot_model(prevBestOrganism.model, to_file='best.png')
wandb.log({"best_model": [wandb.Image('best.png', caption="Best Model")]})
log_multiclass_metrics(test_data,
model,
data_split_name='test',
class_encoder=self.class_encoder,
log_predictions=True,
max_rows=max_rows,
run=self.run,
commit=True,
output_path=results_dir,
metadata=chromosome)
def log_model_artifact(self, run=None):
'''
Logs a
# TODO log chromosome along with model artifact
'''
model_path = os.path.join(self.model_dir,f"best_model--fitness-{self.fitness:.2f}--{self.name.replace('=','_')}")
print(f'Logging model artifact for organism {self.name} at\n{model_path}')
os.makedirs(self.model_dir, exist_ok=True)
run = run or wandb
log_model_artifact(self.model, model_path, encoder=self.class_encoder, run=run, metadata=self.chromosome)
# @property
# def data(self):
# return (self.x_train, self.y_train), (self.x_test, self.y_test)
# def load_data(self, train, test=None, std=False, onehot=False, smoothing=0.0):
# """ Load in memory data
# train: expect form: (x_train, y_train)
# """
# self.x_train, self.y_train = train
# if test is not None:
# self.x_test, self.y_test = test
# if std:
# self.x_train, self.x_test = self.standardization(self.x_train, self.x_test)
# if self.y_train.ndim == 2:
# self.n_classes = np.max(self.y_train) + 1
# else:
# self.n_classes = self.y_train.shape[1]
# if onehot:
# self.y_train = to_categorical(self.y_train, self.n_classes)
# self.y_test = to_categorical(self.y_test, self.n_classes)
# if smoothing > 0.0:
# self.y_train = self.label_smoothing(self.y_train, self.n_classes, smoothing)
# def cifar10(self, epochs=10, decay=('cosine', 0), save: str=None):
# """ Train on CIFAR-10
# epochs : number of epochs for full training
# """
# from tensorflow.keras.datasets import cifar10
# (x_train, y_train), (x_test, y_test) = cifar10.load_data()
# x_train, x_test = self.standardization(x_train, x_test)
# y_train = to_categorical(y_train, 10)
# y_test = to_categorical(y_test, 10)
# y_train = self.label_smoothing(y_train, 10, 0.1)
# # compile the model
# self.compile(loss='categorical_crossentropy', metrics=['acc'])
# self.warmup(x_train, y_train, save=save)
# lr, batch_size = self.random_search(x_train, y_train, x_test, y_test, save=save)
# self.training(x_train, y_train, epochs=epochs, batch_size=batch_size,
# lr=lr, decay=decay, save=save)
# self.evaluate(x_test, y_test)
# def cifar100(self, epochs=20, decay=('cosine', 0), save: str=None):
# """ Train on CIFAR-100
# epochs : number of epochs for full training
# """
# from tensorflow.keras.datasets import cifar100
# (x_train, y_train), (x_test, y_test) = cifar100.load_data()
# x_train, x_test = self.normalization(x_train, x_test)
# y_train = to_categorical(y_train, 100)
# y_test = to_categorical(y_test, 100)
# y_train = self.label_smoothing(y_train, 100, 0.1)
# self.compile(loss='categorical_crossentropy', metrics=['acc'])
# self.warmup(x_train, y_train, save=save)
# lr, batch_size = self.grid_search(x_train, y_train, x_test, y_test, save=save)
# self.training(x_train, y_train, epochs=epochs, batch_size=batch_size,
# lr=lr, decay=decay, save=save)
# self.evaluate(x_test, y_test)
# def coil100(self, epochs=20, decay=('cosine', 0), save: str=None):
# """
# Columbia University Image Library (COIL-100)
# """
# # Get TF.dataset generator for COIL100
# train, info = tfds.load('coil100', split='train', shuffle_files=True, with_info=True, as_supervised=True)
# n_classes = info.features['label'].num_classes
# n_images = info.splits['train'].num_examples
# input_shape = info.features['image'].shape
# # Get the dataset into memory
# train = train.shuffle(n_images).batch(n_images)
# for images, labels in train.take(1):
# pass
# images = np.asarray(images)
# images, _ = self.standardization(images, None)
# labels = to_categorical(np.asarray(labels), n_classes)
# # split the dataset into train/test
# x_train, x_test, y_train, y_test = train_test_split(images, labels, test_size=0.2)
# self.compile(loss='categorical_crossentropy', metrics=['acc'])
# self.warmup(x_train, y_train, save=save)
# lr, batch_size = self.grid_search(x_train, y_train, x_test, y_test, save=save)
# self.training(x_train, y_train, epochs=epochs, batch_size=batch_size,
# lr=lr, decay=decay, save=save)
# self.evaluate(x_test, y_test)
# def plant_village(self, target_size=[256,256], epochs=20, decay=('cosine', 0), save: str=None, allow_resume: bool=False):
# """
# Plant Village leaf disease dataset (2016)
# """
# data_config = DictConfig({
# 'load':{'dataset_name':'plant_village',
# 'split':['train[0%:60%]','train[60%:70%]','train[70%:100%]'],
# 'data_dir':'/media/data/jacob/tensorflow_datasets'},
# 'preprocess':{'batch_size':32,
# 'target_size':target_size}
# })
# data, class_encoder = load_and_preprocess_data(data_config)
# train_dataset = data['train']
# val_dataset = data['val']
# test_dataset = data['test']
# batch_size = data_config.preprocess.batch_size
# steps_per_epoch = len(data['train'])
# validation_steps = len(data['val'])
# test_steps = len(data['test'])
# num_classes = train_dataset.element_spec[1].shape[1]
# # x_train, y_train = next(iter(train_dataset.unbatch().batch(batch_size*steps_per_epoch).take(1)))
# # Get the dataset into memory
# num_samples = batch_size*steps_per_epoch
# train_dataset = train_dataset.unbatch().shuffle(num_samples).batch(num_samples)
# for images, labels in train_dataset.take(1):
# pass
# images = np.asarray(images)
# labels = np.asarray(labels)
# x_train, y_train = images, labels
# # input_shape = x_train.shape[1:]
# print(f'Loaded {num_samples} samples into memory from plant_village train')
# # Get the dataset into memory
# num_samples = batch_size*validation_steps
# val_dataset = val_dataset.unbatch().batch(num_samples)
# for images, labels in val_dataset.take(1):
# pass
# images = np.asarray(images)
# labels = np.asarray(labels)
# x_val, y_val = images, labels
# print(f'Loaded {num_samples} samples into memory from plant_village val')
# # Get the dataset into memory
# num_samples = batch_size*test_steps
# test_dataset = test_dataset.unbatch().batch(num_samples)
# for images, labels in test_dataset.take(1):
# pass
# images = np.asarray(images)
# labels = np.asarray(labels)
# x_test, y_test = images, labels
# print(f'Loaded {num_samples} samples into memory from plant_village test')
# # images, _ = self.standardization(images, None)
# # labels = to_categorical(np.asarray(labels), n_classes)
# # split the dataset into train/test
# # x_train, x_test, y_train, y_test = train_test_split(images, labels, test_size=0.2)
# self.set_wandb_env_vars(project='ResNet50_v2', group=f'resnet50_v2-plant_village-res{target_size[0]}')
# print('compiling')
# self.compile(loss='categorical_crossentropy', metrics=['acc', 'recall','precision'])
# with wandb.init(reinit=True, job_type='warmup', tags=['warmup']) as run:
# print('initiating warmup')
# self.warmup(x_train, y_train, save=save, allow_resume=allow_resume)
# # with wandb.init(reinit=True, job_type='grid_search', tags=['grid_search']) as run:
# print('initiating grid_search. dir(self):\n',dir(self))
# lr, batch_size = self.grid_search(x_train, y_train, x_val, y_val, save=save,
# batch_range=[16, 32], allow_resume=allow_resume)
# with wandb.init(reinit=True, job_type='training', tags=['training']) as run:
# print('initiating training. dir(self):\n')
# self.training(x_train, y_train, validation_data = (x_val, y_val), epochs=epochs, batch_size=batch_size,
# lr=lr, decay=decay, save=save)
# print('initiating evaluate. dir(self):\n',dir(self))
# result = self.evaluate(x_test, y_test)
# run.log({'test_results':result}) | 0.578686 | 0.379263 |
from collections import OrderedDict
from pathlib import Path
from time import time
import torch
from torch import nn
from torch.utils.tensorboard import SummaryWriter
from ..datasets import IMAGE_SHAPES, get_loader
from ..models import fit_to_dataset, get_model
from ..models.utils import propagate_bounds
from .utils import (AverageMeter, bounds_logits, compute_accuracy,
get_device_order, manual_seed)
__all__ = ['train_classifier', 'one_epoch']
def train_classifier(evaluate_only, dataset, model, pretrained, learning_rate,
momentum, weight_decay, epsilon, factor, temperature,
epochs, batch_size, jobs, checkpoint, resume, log_dir,
seed):
"""Train and/or evaluate a network."""
manual_seed(seed, benchmark_otherwise=True)
resume = Path(resume if resume else '')
checkpoint = Path(checkpoint if checkpoint else '')
get_lr = lambda epoch: learning_rate * (0.1**(epoch // 30))
# get available cuda devices ordered by total memory capacity
devices = get_device_order()
if devices:
print(f'=> using {len(devices)} GPU(s)')
device = torch.device(f'cuda:{devices[0]}')
else:
device = torch.device('cpu')
def to_device(*tensors, non_blocking=True):
return [t.to(device, non_blocking=non_blocking) for t in tensors]
# Data loading code
cuda = len(devices) > 0
train_loader = get_loader(dataset, True, batch_size, cuda, jobs)
val_loader = get_loader(dataset, False, batch_size, cuda, jobs)
norm = train_loader.dataset.transform.transforms[-1]
input_ranges = [(1 - m) / s + m / s for m, s in zip(norm.mean, norm.std)]
input_range = sum(input_ranges) / len(input_ranges)
# create the model
if pretrained:
print(f'=> using pre-trained model {model}')
else:
print(f'=> creating model {model}')
net = fit_to_dataset(get_model(model, pretrained), dataset).eval()
keys = net.state_dict(keep_vars=True).keys()
# define loss function (criterion) and optimizer
criterion = nn.CrossEntropyLoss()
to_device(net, criterion, non_blocking=False)
optimizer = torch.optim.SGD(
net.parameters(),
learning_rate,
momentum=momentum,
weight_decay=weight_decay)
# define a colsure wrapping one_epoch()
def process(loader, optimizer=None):
return one_epoch(loader, net, criterion, optimizer, to_device,
epsilon * input_range, factor, temperature)
# optionally resume from a checkpoint
best_acc1 = 0
start_epoch = 0
if resume.is_file():
print("=> loading checkpoint '{}'".format(resume))
state = torch.load(resume)
start_epoch = state['epoch']
best_acc1 = state['best_acc1']
net.load_state_dict(state['state_dict'])
optimizer.load_state_dict(state['optimizer'])
print(f"=> loaded checkpoint '{resume}' (epoch {state['epoch']})")
elif resume != Path():
print(f"=> no checkpoint found at '{resume}'")
# DataParallel will divide and allocate batch_size to all GPUs
if len(devices) > 1:
if model.startswith('alexnet') or model.startswith('vgg'):
net.features = nn.DataParallel(net.features, devices, device)
else:
net = nn.DataParallel(net, devices, device)
# evaluate the model before training
progress = process(val_loader)
val_loss = progress['Loss']
val_acc = progress['Acc@1']
print(f'Test[{val_loss}: {val_acc}%]')
if evaluate_only:
return
if log_dir:
writer = SummaryWriter(log_dir)
example_image = torch.randn(1, *IMAGE_SHAPES[dataset], device=device)
writer.add_graph(net, (example_image,))
lr = get_lr(start_epoch)
for epoch in range(start_epoch, epochs):
# decay the learning rate by 10 every 30 epochs
if epoch % 30 == 0:
lr = get_lr(epoch)
for param_group in optimizer.param_groups:
param_group['lr'] = lr
# train for one epoch and evaluate on validation set
train_progress = process(train_loader, optimizer)
train_loss = train_progress['Loss']
train_acc = train_progress['Acc@1']
val_progress = process(val_loader)
val_loss = val_progress['Loss']
val_acc = val_progress['Acc@1']
print(f'[{epoch + 1}@{lr:.4e}] '
f'Train[{train_loss}: {train_acc}%] '
f'Test[{val_loss}: {val_acc}%]')
if log_dir:
writer.add_scalar('Train/LearingRate', lr, epoch)
for meter in train_progress.values():
writer.add_scalar(f'Train/{meter.name}', meter.avg, epoch)
for meter in val_progress.values():
writer.add_scalar(f'Test/{meter.name}', meter.avg, epoch)
# remember best acc@1 and save checkpoint
if val_acc.avg >= best_acc1:
best_acc1 = val_acc.avg
if checkpoint != Path():
parameters = net.state_dict().values()
torch.save({
'epoch': epoch + 1,
'state_dict': OrderedDict(zip(keys, parameters)),
'best_acc1': best_acc1,
'optimizer': optimizer.state_dict(),
}, checkpoint)
if train_loss != train_loss:
print('Training was stopped (reached NaN)!')
break
if log_dir:
writer.close()
def one_epoch(train_loader, net, criterion, optimizer, preporcess, epsilon,
factor, temperature):
"""Perform one training epoch."""
batch_time = AverageMeter('Time/BatchTotal', ':6.3f')
data_time = AverageMeter('Time/BatchData', ':6.3f')
losses = AverageMeter('Loss', ':.4e')
top1 = AverageMeter('Acc@1', ':6.2f')
top5 = AverageMeter('Acc@5', ':6.2f')
# switch to train mode
is_training = optimizer is not None
net.train(is_training)
def compute_loss(inputs, targets, update_metrics):
# compute output
output = net(inputs)
loss = criterion(output, targets)
# compute bounds loss
if epsilon > 0 and factor > 0:
bounds = propagate_bounds(net, inputs, epsilon)
logits = bounds_logits(output, bounds.offset, targets)
max_abs_logits = logits.abs().max(1).values.view(-1, 1)
logits = logits / (temperature * max_abs_logits)
loss += factor * criterion(logits, targets)
# measure accuracy and record loss
if update_metrics:
n = inputs.size(0)
acc1, acc5 = compute_accuracy( # pylint: disable=E0632
output, targets, top_k=(1, 5))
losses.update(float(loss), n)
top1.update(float(acc1), n)
top5.update(float(acc5), n)
# compute gradient
if is_training:
optimizer.zero_grad()
loss.backward()
return loss
with torch.set_grad_enabled(is_training):
end = time()
for inputs, targets in train_loader:
# measure data loading time
data_time.update(time() - end)
# move data to device
inputs, targets = preporcess(inputs, targets)
first_time = True
def closure():
nonlocal first_time
loss = compute_loss(
inputs, # pylint: disable=W0640
targets, # pylint: disable=W0640
first_time,
)
first_time = False
return loss
if is_training:
optimizer.step(closure)
else:
closure()
# measure elapsed time
batch_time.update(time() - end)
end = time()
return {x.name: x for x in (batch_time, data_time, losses, top1, top5)} | ptb/train/train.py |
from collections import OrderedDict
from pathlib import Path
from time import time
import torch
from torch import nn
from torch.utils.tensorboard import SummaryWriter
from ..datasets import IMAGE_SHAPES, get_loader
from ..models import fit_to_dataset, get_model
from ..models.utils import propagate_bounds
from .utils import (AverageMeter, bounds_logits, compute_accuracy,
get_device_order, manual_seed)
__all__ = ['train_classifier', 'one_epoch']
def train_classifier(evaluate_only, dataset, model, pretrained, learning_rate,
momentum, weight_decay, epsilon, factor, temperature,
epochs, batch_size, jobs, checkpoint, resume, log_dir,
seed):
"""Train and/or evaluate a network."""
manual_seed(seed, benchmark_otherwise=True)
resume = Path(resume if resume else '')
checkpoint = Path(checkpoint if checkpoint else '')
get_lr = lambda epoch: learning_rate * (0.1**(epoch // 30))
# get available cuda devices ordered by total memory capacity
devices = get_device_order()
if devices:
print(f'=> using {len(devices)} GPU(s)')
device = torch.device(f'cuda:{devices[0]}')
else:
device = torch.device('cpu')
def to_device(*tensors, non_blocking=True):
return [t.to(device, non_blocking=non_blocking) for t in tensors]
# Data loading code
cuda = len(devices) > 0
train_loader = get_loader(dataset, True, batch_size, cuda, jobs)
val_loader = get_loader(dataset, False, batch_size, cuda, jobs)
norm = train_loader.dataset.transform.transforms[-1]
input_ranges = [(1 - m) / s + m / s for m, s in zip(norm.mean, norm.std)]
input_range = sum(input_ranges) / len(input_ranges)
# create the model
if pretrained:
print(f'=> using pre-trained model {model}')
else:
print(f'=> creating model {model}')
net = fit_to_dataset(get_model(model, pretrained), dataset).eval()
keys = net.state_dict(keep_vars=True).keys()
# define loss function (criterion) and optimizer
criterion = nn.CrossEntropyLoss()
to_device(net, criterion, non_blocking=False)
optimizer = torch.optim.SGD(
net.parameters(),
learning_rate,
momentum=momentum,
weight_decay=weight_decay)
# define a colsure wrapping one_epoch()
def process(loader, optimizer=None):
return one_epoch(loader, net, criterion, optimizer, to_device,
epsilon * input_range, factor, temperature)
# optionally resume from a checkpoint
best_acc1 = 0
start_epoch = 0
if resume.is_file():
print("=> loading checkpoint '{}'".format(resume))
state = torch.load(resume)
start_epoch = state['epoch']
best_acc1 = state['best_acc1']
net.load_state_dict(state['state_dict'])
optimizer.load_state_dict(state['optimizer'])
print(f"=> loaded checkpoint '{resume}' (epoch {state['epoch']})")
elif resume != Path():
print(f"=> no checkpoint found at '{resume}'")
# DataParallel will divide and allocate batch_size to all GPUs
if len(devices) > 1:
if model.startswith('alexnet') or model.startswith('vgg'):
net.features = nn.DataParallel(net.features, devices, device)
else:
net = nn.DataParallel(net, devices, device)
# evaluate the model before training
progress = process(val_loader)
val_loss = progress['Loss']
val_acc = progress['Acc@1']
print(f'Test[{val_loss}: {val_acc}%]')
if evaluate_only:
return
if log_dir:
writer = SummaryWriter(log_dir)
example_image = torch.randn(1, *IMAGE_SHAPES[dataset], device=device)
writer.add_graph(net, (example_image,))
lr = get_lr(start_epoch)
for epoch in range(start_epoch, epochs):
# decay the learning rate by 10 every 30 epochs
if epoch % 30 == 0:
lr = get_lr(epoch)
for param_group in optimizer.param_groups:
param_group['lr'] = lr
# train for one epoch and evaluate on validation set
train_progress = process(train_loader, optimizer)
train_loss = train_progress['Loss']
train_acc = train_progress['Acc@1']
val_progress = process(val_loader)
val_loss = val_progress['Loss']
val_acc = val_progress['Acc@1']
print(f'[{epoch + 1}@{lr:.4e}] '
f'Train[{train_loss}: {train_acc}%] '
f'Test[{val_loss}: {val_acc}%]')
if log_dir:
writer.add_scalar('Train/LearingRate', lr, epoch)
for meter in train_progress.values():
writer.add_scalar(f'Train/{meter.name}', meter.avg, epoch)
for meter in val_progress.values():
writer.add_scalar(f'Test/{meter.name}', meter.avg, epoch)
# remember best acc@1 and save checkpoint
if val_acc.avg >= best_acc1:
best_acc1 = val_acc.avg
if checkpoint != Path():
parameters = net.state_dict().values()
torch.save({
'epoch': epoch + 1,
'state_dict': OrderedDict(zip(keys, parameters)),
'best_acc1': best_acc1,
'optimizer': optimizer.state_dict(),
}, checkpoint)
if train_loss != train_loss:
print('Training was stopped (reached NaN)!')
break
if log_dir:
writer.close()
def one_epoch(train_loader, net, criterion, optimizer, preporcess, epsilon,
factor, temperature):
"""Perform one training epoch."""
batch_time = AverageMeter('Time/BatchTotal', ':6.3f')
data_time = AverageMeter('Time/BatchData', ':6.3f')
losses = AverageMeter('Loss', ':.4e')
top1 = AverageMeter('Acc@1', ':6.2f')
top5 = AverageMeter('Acc@5', ':6.2f')
# switch to train mode
is_training = optimizer is not None
net.train(is_training)
def compute_loss(inputs, targets, update_metrics):
# compute output
output = net(inputs)
loss = criterion(output, targets)
# compute bounds loss
if epsilon > 0 and factor > 0:
bounds = propagate_bounds(net, inputs, epsilon)
logits = bounds_logits(output, bounds.offset, targets)
max_abs_logits = logits.abs().max(1).values.view(-1, 1)
logits = logits / (temperature * max_abs_logits)
loss += factor * criterion(logits, targets)
# measure accuracy and record loss
if update_metrics:
n = inputs.size(0)
acc1, acc5 = compute_accuracy( # pylint: disable=E0632
output, targets, top_k=(1, 5))
losses.update(float(loss), n)
top1.update(float(acc1), n)
top5.update(float(acc5), n)
# compute gradient
if is_training:
optimizer.zero_grad()
loss.backward()
return loss
with torch.set_grad_enabled(is_training):
end = time()
for inputs, targets in train_loader:
# measure data loading time
data_time.update(time() - end)
# move data to device
inputs, targets = preporcess(inputs, targets)
first_time = True
def closure():
nonlocal first_time
loss = compute_loss(
inputs, # pylint: disable=W0640
targets, # pylint: disable=W0640
first_time,
)
first_time = False
return loss
if is_training:
optimizer.step(closure)
else:
closure()
# measure elapsed time
batch_time.update(time() - end)
end = time()
return {x.name: x for x in (batch_time, data_time, losses, top1, top5)} | 0.907158 | 0.348922 |
from app.models import Teacher
from flask_wtf import FlaskForm
from wtforms import StringField, SubmitField, TextAreaField, SelectField
from wtforms.validators import DataRequired, Length, Email, ValidationError
# Profile form
class EditProfileForm(FlaskForm):
email = StringField('Email',
validators=[DataRequired(), Email()],
render_kw={"placeholder": "Valid Email Address"}
)
about_me = TextAreaField('About me', validators=[Length(min=0, max=140)])
submit = SubmitField('Submit')
def __init__(self, original_email, *args, **kwargs):
super(EditProfileForm, self).__init__(*args, **kwargs)
self.original_email = original_email
def validate_email(self, teacher_email):
if teacher_email.data != self.original_email:
teacher = Teacher.query.filter_by(
teacher_email=self.email.data).first()
if teacher is not None:
raise ValidationError('Please use a different email.')
# Comment form
class CommentForm(FlaskForm):
comment = TextAreaField('Comment',
validators=[DataRequired()]
)
submit = SubmitField('Post')
# Follow form
class EmptyForm(FlaskForm):
submit = SubmitField('Post')
# Course overview form
class WebDevelopmentOverviewForm(FlaskForm):
title = SelectField(
'Course Title',
choices=[
('Flask Web Development', 'Flask Web Development'),
('Python DSA', 'Python DSA'),
('Data Science', 'Data Science'),
('Machine Learning', 'Machine Learning')
],
validators=[DataRequired()]
)
body = TextAreaField(
'Course Overview',
validators=[DataRequired()]
)
youtube_link = StringField(
'Youtube Link',
validators=[DataRequired()],
render_kw={"placeholder": "Youtube Embed Link"}
)
submit = SubmitField('Post')
class TableOfContentsForm(FlaskForm):
title = SelectField(
'Course Title',
choices=[
('Flask Web Development', 'Flask Web Development'),
('Python DSA', 'Python DSA'),
('Data Science', 'Data Science'),
('Machine Learning', 'Machine Learning')
],
validators=[DataRequired()]
)
chapter = StringField(
'Chapter',
validators=[DataRequired()],
render_kw={"placeholder": "Chapter 1: Introduction"}
)
link = StringField(
'Chapter Link',
validators=[DataRequired()],
render_kw={"placeholder": "https://link/to/chapter"}
)
submit = SubmitField('Post')
class ChapterForm(FlaskForm):
course = SelectField(
'Course Title',
choices=[
('Flask Web Development', 'Flask Web Development'),
('Python DSA', 'Python DSA'),
('Data Science', 'Data Science'),
('Machine Learning', 'Machine Learning')
],
validators=[DataRequired()]
)
chapter = StringField(
'Chapter Title',
validators=[DataRequired()],
render_kw={"placeholder": "Chapter 1: Introduction"}
)
chapter_link = StringField(
'Live Chapter Link',
validators=[DataRequired()],
render_kw={"placeholder": "course/flask/chapter-1"}
)
chapter_review_link = StringField(
'Review Link',
validators=[DataRequired()],
render_kw={"placeholder": "course/flask/chapter-1"}
)
overview = TextAreaField(
'Chapter Overview',
validators=[DataRequired()]
)
accomplish = TextAreaField(
'What You Will Accomplish',
validators=[DataRequired()]
)
youtube_link = StringField(
'YouTube Link',
validators=[DataRequired()],
render_kw={
"placeholder": "https://www.youtube.com"
}
)
conclusion = TextAreaField(
'Conclusion',
validators=[DataRequired()]
)
objective_1 = StringField(
'Objective 1',
validators=[DataRequired()],
render_kw={
"placeholder": "Student can create a flask project structure"
}
)
objective_2 = StringField(
'Objective 2',
validators=[DataRequired()],
render_kw={"placeholder": "Student can create project instance"}
)
objective_3 = StringField(
'Objective 3',
validators=[DataRequired()],
render_kw={"placeholder": "Student can add a flask entry point"}
)
objective_4 = StringField(
'Objective 4',
validators=[DataRequired()],
render_kw={"placeholder": "Student can display a welcome message"}
)
objective_5 = StringField(
'Objective 5',
validators=[DataRequired()],
render_kw={"placeholder": "Student can start a flask server"}
)
submit = SubmitField('Post')
class ChapterObjectivesForm(FlaskForm):
course = SelectField(
'Course Title',
choices=[
('Flask Web Development', 'Flask Web Development'),
('Python DSA', 'Python DSA'),
('Data Science', 'Data Science'),
('Machine Learning', 'Machine Learning')
],
validators=[DataRequired()]
)
chapter = StringField(
'Chapter Title',
validators=[DataRequired()],
render_kw={"placeholder": "Chapter 1: Introduction"}
)
review_objectives_link = StringField(
'Review Objectives Link',
validators=[DataRequired()],
render_kw={"placeholder": "course/flask/chapter-1/objectives/review"}
)
objective_1 = StringField(
'Objective 1',
validators=[DataRequired()],
render_kw={
"placeholder": "Student can create a flask project structure"
}
)
objective_2 = StringField(
'Objective 2',
validators=[DataRequired()],
render_kw={"placeholder": "Student can create project instance"}
)
objective_3 = StringField(
'Objective 3',
validators=[DataRequired()],
render_kw={"placeholder": "Student can add a flask entry point"}
)
objective_4 = StringField(
'Objective 4',
validators=[DataRequired()],
render_kw={"placeholder": "Student can display a welcome message"}
)
objective_5 = StringField(
'Objective 5',
validators=[DataRequired()],
render_kw={"placeholder": "Student can start a flask server"}
)
submit = SubmitField('Post')
class ChapterQuizForm(FlaskForm):
course = SelectField(
'Course Title',
choices=[
('Flask Web Development', 'Flask Web Development'),
('Python DSA', 'Python DSA'),
('Data Science', 'Data Science'),
('Machine Learning', 'Machine Learning')
],
validators=[DataRequired()]
)
chapter = StringField(
'Chapter Title',
validators=[DataRequired()],
render_kw={"placeholder": "Chapter 1: Introduction"}
)
review_quiz_link = StringField(
'Review Quiz Link',
validators=[DataRequired()],
render_kw={"placeholder": "course/flask/chapter-1/quiz/review"}
)
quiz_1 = StringField(
'Quiz 1',
validators=[DataRequired()],
render_kw={"placeholder": "What is HTML in full"}
)
quiz_2 = StringField(
'Quiz 2',
validators=[DataRequired()],
render_kw={"placeholder": "What is CSS in full"}
)
quiz_3 = StringField(
'Quiz 3',
validators=[DataRequired()],
render_kw={"placeholder": "What is Python in full"}
)
quiz_4 = StringField(
'Quiz 4',
validators=[DataRequired()],
render_kw={"placeholder": "What is Flask in full"}
)
quiz_5 = StringField(
'Quiz 5',
validators=[DataRequired()],
render_kw={"placeholder": "What is SQL in full"}
)
submit = SubmitField('Post') | app/teacher/forms.py | from app.models import Teacher
from flask_wtf import FlaskForm
from wtforms import StringField, SubmitField, TextAreaField, SelectField
from wtforms.validators import DataRequired, Length, Email, ValidationError
# Profile form
class EditProfileForm(FlaskForm):
email = StringField('Email',
validators=[DataRequired(), Email()],
render_kw={"placeholder": "Valid Email Address"}
)
about_me = TextAreaField('About me', validators=[Length(min=0, max=140)])
submit = SubmitField('Submit')
def __init__(self, original_email, *args, **kwargs):
super(EditProfileForm, self).__init__(*args, **kwargs)
self.original_email = original_email
def validate_email(self, teacher_email):
if teacher_email.data != self.original_email:
teacher = Teacher.query.filter_by(
teacher_email=self.email.data).first()
if teacher is not None:
raise ValidationError('Please use a different email.')
# Comment form
class CommentForm(FlaskForm):
comment = TextAreaField('Comment',
validators=[DataRequired()]
)
submit = SubmitField('Post')
# Follow form
class EmptyForm(FlaskForm):
submit = SubmitField('Post')
# Course overview form
class WebDevelopmentOverviewForm(FlaskForm):
title = SelectField(
'Course Title',
choices=[
('Flask Web Development', 'Flask Web Development'),
('Python DSA', 'Python DSA'),
('Data Science', 'Data Science'),
('Machine Learning', 'Machine Learning')
],
validators=[DataRequired()]
)
body = TextAreaField(
'Course Overview',
validators=[DataRequired()]
)
youtube_link = StringField(
'Youtube Link',
validators=[DataRequired()],
render_kw={"placeholder": "Youtube Embed Link"}
)
submit = SubmitField('Post')
class TableOfContentsForm(FlaskForm):
title = SelectField(
'Course Title',
choices=[
('Flask Web Development', 'Flask Web Development'),
('Python DSA', 'Python DSA'),
('Data Science', 'Data Science'),
('Machine Learning', 'Machine Learning')
],
validators=[DataRequired()]
)
chapter = StringField(
'Chapter',
validators=[DataRequired()],
render_kw={"placeholder": "Chapter 1: Introduction"}
)
link = StringField(
'Chapter Link',
validators=[DataRequired()],
render_kw={"placeholder": "https://link/to/chapter"}
)
submit = SubmitField('Post')
class ChapterForm(FlaskForm):
course = SelectField(
'Course Title',
choices=[
('Flask Web Development', 'Flask Web Development'),
('Python DSA', 'Python DSA'),
('Data Science', 'Data Science'),
('Machine Learning', 'Machine Learning')
],
validators=[DataRequired()]
)
chapter = StringField(
'Chapter Title',
validators=[DataRequired()],
render_kw={"placeholder": "Chapter 1: Introduction"}
)
chapter_link = StringField(
'Live Chapter Link',
validators=[DataRequired()],
render_kw={"placeholder": "course/flask/chapter-1"}
)
chapter_review_link = StringField(
'Review Link',
validators=[DataRequired()],
render_kw={"placeholder": "course/flask/chapter-1"}
)
overview = TextAreaField(
'Chapter Overview',
validators=[DataRequired()]
)
accomplish = TextAreaField(
'What You Will Accomplish',
validators=[DataRequired()]
)
youtube_link = StringField(
'YouTube Link',
validators=[DataRequired()],
render_kw={
"placeholder": "https://www.youtube.com"
}
)
conclusion = TextAreaField(
'Conclusion',
validators=[DataRequired()]
)
objective_1 = StringField(
'Objective 1',
validators=[DataRequired()],
render_kw={
"placeholder": "Student can create a flask project structure"
}
)
objective_2 = StringField(
'Objective 2',
validators=[DataRequired()],
render_kw={"placeholder": "Student can create project instance"}
)
objective_3 = StringField(
'Objective 3',
validators=[DataRequired()],
render_kw={"placeholder": "Student can add a flask entry point"}
)
objective_4 = StringField(
'Objective 4',
validators=[DataRequired()],
render_kw={"placeholder": "Student can display a welcome message"}
)
objective_5 = StringField(
'Objective 5',
validators=[DataRequired()],
render_kw={"placeholder": "Student can start a flask server"}
)
submit = SubmitField('Post')
class ChapterObjectivesForm(FlaskForm):
course = SelectField(
'Course Title',
choices=[
('Flask Web Development', 'Flask Web Development'),
('Python DSA', 'Python DSA'),
('Data Science', 'Data Science'),
('Machine Learning', 'Machine Learning')
],
validators=[DataRequired()]
)
chapter = StringField(
'Chapter Title',
validators=[DataRequired()],
render_kw={"placeholder": "Chapter 1: Introduction"}
)
review_objectives_link = StringField(
'Review Objectives Link',
validators=[DataRequired()],
render_kw={"placeholder": "course/flask/chapter-1/objectives/review"}
)
objective_1 = StringField(
'Objective 1',
validators=[DataRequired()],
render_kw={
"placeholder": "Student can create a flask project structure"
}
)
objective_2 = StringField(
'Objective 2',
validators=[DataRequired()],
render_kw={"placeholder": "Student can create project instance"}
)
objective_3 = StringField(
'Objective 3',
validators=[DataRequired()],
render_kw={"placeholder": "Student can add a flask entry point"}
)
objective_4 = StringField(
'Objective 4',
validators=[DataRequired()],
render_kw={"placeholder": "Student can display a welcome message"}
)
objective_5 = StringField(
'Objective 5',
validators=[DataRequired()],
render_kw={"placeholder": "Student can start a flask server"}
)
submit = SubmitField('Post')
class ChapterQuizForm(FlaskForm):
course = SelectField(
'Course Title',
choices=[
('Flask Web Development', 'Flask Web Development'),
('Python DSA', 'Python DSA'),
('Data Science', 'Data Science'),
('Machine Learning', 'Machine Learning')
],
validators=[DataRequired()]
)
chapter = StringField(
'Chapter Title',
validators=[DataRequired()],
render_kw={"placeholder": "Chapter 1: Introduction"}
)
review_quiz_link = StringField(
'Review Quiz Link',
validators=[DataRequired()],
render_kw={"placeholder": "course/flask/chapter-1/quiz/review"}
)
quiz_1 = StringField(
'Quiz 1',
validators=[DataRequired()],
render_kw={"placeholder": "What is HTML in full"}
)
quiz_2 = StringField(
'Quiz 2',
validators=[DataRequired()],
render_kw={"placeholder": "What is CSS in full"}
)
quiz_3 = StringField(
'Quiz 3',
validators=[DataRequired()],
render_kw={"placeholder": "What is Python in full"}
)
quiz_4 = StringField(
'Quiz 4',
validators=[DataRequired()],
render_kw={"placeholder": "What is Flask in full"}
)
quiz_5 = StringField(
'Quiz 5',
validators=[DataRequired()],
render_kw={"placeholder": "What is SQL in full"}
)
submit = SubmitField('Post') | 0.663996 | 0.268552 |
# Copyright (C) 2020, <NAME>
# Yamanishi laboratory,
# Department of Bioscience and Bioinformatics,
# Faculty of Computer Science and Systems Engineering,
# Kyushu Institute of Technology,
# 680-4 Kawazu, Iizuka, Fukuoka, 820-8502, Japan.
# txt molecule to SMILES
import argparse, rdkit, re, sys, time
import molenc_common as common
from rdkit import Chem
# create a fake molecule for the corresp. fragment
def read_one_molecule(input):
res_mol = Chem.RWMol()
atoms_header = input.readline().strip()
if atoms_header == '':
raise common.End_of_file # no EOF in Python...
nb_atoms, name = common.read_atoms_header(atoms_header)
old2new = {}
for _i in range(nb_atoms):
line = input.readline().strip()
(index, nb_pi, atomic_num, nb_HA, charge, stereo) = \
common.read_atom(line)
# add atom
a = Chem.Atom(atomic_num)
a.SetFormalCharge(charge)
if stereo > 0: # set chirality
a.SetChiralTag(common.atom_stereo_code_to_chiral_tag(stereo))
j = res_mol.AddAtom(a)
# we need to convert atom indexes
old2new[index] = j
bonds_header = input.readline().strip()
nb_bonds = common.read_bonds_header(bonds_header)
stereo_bonds = []
for i in range(nb_bonds):
line = input.readline().strip()
(start_i, bt, stop_i, (stereo, c, d)) = common.read_bond(line)
start = old2new[start_i]
stop = old2new[stop_i]
# add bond
n = res_mol.AddBond(start, stop, bt)
if stereo != rdkit.Chem.rdchem.BondStereo.STEREONONE:
bi = n - 1
# convert stereo bond stereo atoms indexes
a = old2new[c]
b = old2new[d]
stereo_bonds.append((bi, stereo, a, b))
# all atoms and bonds are here now
# so stereo bonds info can be set
for (bi, stereo, a, b) in stereo_bonds:
bond = res_mol.GetBondWithIdx(bi)
bond.SetStereo(stereo)
bond.SetStereoAtoms(a, b)
print('%s stereo %s on bond %d (%d, %d)' %
(name, common.char_of_bond_stereo(stereo), bi, a, b),
file=sys.stderr)
try:
Chem.SanitizeMol(res_mol)
Chem.AssignStereochemistry(res_mol) # ! MANDATORY; AFTER SanitizeMol !
except rdkit.Chem.rdchem.KekulizeException:
print("KekulizeException in %s" % name, file=sys.stderr)
smi = Chem.MolToSmiles(res_mol)
return (smi, name)
if __name__ == '__main__':
before = time.time()
# CLI options parsing
parser = argparse.ArgumentParser(description = "txt molecule to smi")
parser.add_argument("-i", metavar = "input.mols", dest = "input_fn",
help = "molecules input file")
parser.add_argument("-o", metavar = "output.smi", dest = "output_fn",
help = "output file")
# parse CLI
if len(sys.argv) == 1:
# show help in case user has no clue of what to do
parser.print_help(sys.stderr)
sys.exit(1)
args = parser.parse_args()
input_fn = args.input_fn
output = open(args.output_fn, 'w')
count = 0
with open(input_fn) as input:
try:
while True:
smi, name = read_one_molecule(input)
count += 1
print('%s\t%s' % (smi, name), file=output)
except common.End_of_file:
pass
after = time.time()
dt = after - before
print("%d molecules at %.2f molecule/s" %
(count, count / dt), file=sys.stderr)
output.close() | bin/molenc_mol2smi.py |
# Copyright (C) 2020, <NAME>
# Yamanishi laboratory,
# Department of Bioscience and Bioinformatics,
# Faculty of Computer Science and Systems Engineering,
# Kyushu Institute of Technology,
# 680-4 Kawazu, Iizuka, Fukuoka, 820-8502, Japan.
# txt molecule to SMILES
import argparse, rdkit, re, sys, time
import molenc_common as common
from rdkit import Chem
# create a fake molecule for the corresp. fragment
def read_one_molecule(input):
res_mol = Chem.RWMol()
atoms_header = input.readline().strip()
if atoms_header == '':
raise common.End_of_file # no EOF in Python...
nb_atoms, name = common.read_atoms_header(atoms_header)
old2new = {}
for _i in range(nb_atoms):
line = input.readline().strip()
(index, nb_pi, atomic_num, nb_HA, charge, stereo) = \
common.read_atom(line)
# add atom
a = Chem.Atom(atomic_num)
a.SetFormalCharge(charge)
if stereo > 0: # set chirality
a.SetChiralTag(common.atom_stereo_code_to_chiral_tag(stereo))
j = res_mol.AddAtom(a)
# we need to convert atom indexes
old2new[index] = j
bonds_header = input.readline().strip()
nb_bonds = common.read_bonds_header(bonds_header)
stereo_bonds = []
for i in range(nb_bonds):
line = input.readline().strip()
(start_i, bt, stop_i, (stereo, c, d)) = common.read_bond(line)
start = old2new[start_i]
stop = old2new[stop_i]
# add bond
n = res_mol.AddBond(start, stop, bt)
if stereo != rdkit.Chem.rdchem.BondStereo.STEREONONE:
bi = n - 1
# convert stereo bond stereo atoms indexes
a = old2new[c]
b = old2new[d]
stereo_bonds.append((bi, stereo, a, b))
# all atoms and bonds are here now
# so stereo bonds info can be set
for (bi, stereo, a, b) in stereo_bonds:
bond = res_mol.GetBondWithIdx(bi)
bond.SetStereo(stereo)
bond.SetStereoAtoms(a, b)
print('%s stereo %s on bond %d (%d, %d)' %
(name, common.char_of_bond_stereo(stereo), bi, a, b),
file=sys.stderr)
try:
Chem.SanitizeMol(res_mol)
Chem.AssignStereochemistry(res_mol) # ! MANDATORY; AFTER SanitizeMol !
except rdkit.Chem.rdchem.KekulizeException:
print("KekulizeException in %s" % name, file=sys.stderr)
smi = Chem.MolToSmiles(res_mol)
return (smi, name)
if __name__ == '__main__':
before = time.time()
# CLI options parsing
parser = argparse.ArgumentParser(description = "txt molecule to smi")
parser.add_argument("-i", metavar = "input.mols", dest = "input_fn",
help = "molecules input file")
parser.add_argument("-o", metavar = "output.smi", dest = "output_fn",
help = "output file")
# parse CLI
if len(sys.argv) == 1:
# show help in case user has no clue of what to do
parser.print_help(sys.stderr)
sys.exit(1)
args = parser.parse_args()
input_fn = args.input_fn
output = open(args.output_fn, 'w')
count = 0
with open(input_fn) as input:
try:
while True:
smi, name = read_one_molecule(input)
count += 1
print('%s\t%s' % (smi, name), file=output)
except common.End_of_file:
pass
after = time.time()
dt = after - before
print("%d molecules at %.2f molecule/s" %
(count, count / dt), file=sys.stderr)
output.close() | 0.286668 | 0.348673 |
import logging
logger = logging.getLogger()
from pymothoa import types
from pymothoa.util.descriptor import Descriptor, instanceof
from types import LLVMType
import llvm # binding
class LLVMModule(object):
jit_engine = Descriptor(constant=True)
def __init__(self, name, optlevel=3, vectorize=True):
self.jit_engine = llvm.JITEngine(name, optlevel, vectorize)
def optimize(self):
self.jit_engine.optimize()
def verify(self):
self.jit_engine.verify()
def dump_asm(self, fn):
return self.jit_engine.dump_asm(fn)
def dump(self):
return self.jit_engine.dump()
def _new_func_def_or_decl(self, ret, args, name_or_func):
from function import LLVMFuncDef, LLVMFuncDecl, LLVMFuncDef_BoolRet
is_func_def = not isinstance(name_or_func, basestring)
if is_func_def:
func = name_or_func
namespace = func.func_globals['__name__']
realname = '.'.join([namespace, func.__name__])
else:
name = name_or_func
realname = name
# workaround for boolean return type
is_ret_bool = False
if ret is types.Bool:
# Change return type to 8-bit int
retty = LLVMType(types.Int8)
is_ret_bool = True
logger.warning('Using workaround (change to Int8) for boolean return type.')
else:
retty = LLVMType(ret)
# workaround for boolean argument type
argtys = []
count_converted_boolean = 0
for arg in args:
if arg is types.Bool:
argtys.append(LLVMType(types.Int8))
count_converted_boolean += 1
else:
argtys.append(LLVMType(arg))
else:
if count_converted_boolean:
logger.warning('Using workaround (changed to Int8) for boolean argument type.')
fn_decl = self.jit_engine.make_function(
realname,
retty.type(),
map(lambda X: X.type(), argtys),
)
if fn_decl.name() != realname:
raise NameError(
'Generated function has a different name: %s'%(
fn_decl.name()))
if is_func_def:
if is_ret_bool:
return LLVMFuncDef_BoolRet(func, retty, argtys, self, fn_decl)
else:
return LLVMFuncDef(func, retty, argtys, self, fn_decl)
else:
return LLVMFuncDecl(retty, argtys, self, fn_decl)
def new_function(self, func, ret, args):
return self._new_func_def_or_decl(ret, args, func)
def new_declaration(self, realname, ret, args):
return self._new_func_def_or_decl(ret, args, realname) | numba/pymothoa/llvm_backend/module.py | import logging
logger = logging.getLogger()
from pymothoa import types
from pymothoa.util.descriptor import Descriptor, instanceof
from types import LLVMType
import llvm # binding
class LLVMModule(object):
jit_engine = Descriptor(constant=True)
def __init__(self, name, optlevel=3, vectorize=True):
self.jit_engine = llvm.JITEngine(name, optlevel, vectorize)
def optimize(self):
self.jit_engine.optimize()
def verify(self):
self.jit_engine.verify()
def dump_asm(self, fn):
return self.jit_engine.dump_asm(fn)
def dump(self):
return self.jit_engine.dump()
def _new_func_def_or_decl(self, ret, args, name_or_func):
from function import LLVMFuncDef, LLVMFuncDecl, LLVMFuncDef_BoolRet
is_func_def = not isinstance(name_or_func, basestring)
if is_func_def:
func = name_or_func
namespace = func.func_globals['__name__']
realname = '.'.join([namespace, func.__name__])
else:
name = name_or_func
realname = name
# workaround for boolean return type
is_ret_bool = False
if ret is types.Bool:
# Change return type to 8-bit int
retty = LLVMType(types.Int8)
is_ret_bool = True
logger.warning('Using workaround (change to Int8) for boolean return type.')
else:
retty = LLVMType(ret)
# workaround for boolean argument type
argtys = []
count_converted_boolean = 0
for arg in args:
if arg is types.Bool:
argtys.append(LLVMType(types.Int8))
count_converted_boolean += 1
else:
argtys.append(LLVMType(arg))
else:
if count_converted_boolean:
logger.warning('Using workaround (changed to Int8) for boolean argument type.')
fn_decl = self.jit_engine.make_function(
realname,
retty.type(),
map(lambda X: X.type(), argtys),
)
if fn_decl.name() != realname:
raise NameError(
'Generated function has a different name: %s'%(
fn_decl.name()))
if is_func_def:
if is_ret_bool:
return LLVMFuncDef_BoolRet(func, retty, argtys, self, fn_decl)
else:
return LLVMFuncDef(func, retty, argtys, self, fn_decl)
else:
return LLVMFuncDecl(retty, argtys, self, fn_decl)
def new_function(self, func, ret, args):
return self._new_func_def_or_decl(ret, args, func)
def new_declaration(self, realname, ret, args):
return self._new_func_def_or_decl(ret, args, realname) | 0.462473 | 0.103794 |
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
class VFE_Clas(nn.Layer):
def __init__(self, num_classes=16, max_points=1024):
super(VFE_Clas, self).__init__()
self.vfe = VFE(max_points=max_points)
self.fc = self.fc = nn.Sequential(
nn.Linear(max_points, 512),
nn.ReLU(),
nn.Linear(512, 256),
nn.ReLU(),
nn.Dropout(p=0.7),
nn.Linear(256, num_classes)
)
def forward(self, inputs):
"""
Input:
inputs: input points data, [B, 3, N]
Return:
x: predicts, [B, num_classes]
"""
x = paddle.to_tensor(inputs)
x = self.vfe(x)
x = self.fc(x)
return x
class ConvBNReLU(nn.Layer):
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, **kwargs):
super().__init__()
self._conv = nn.Conv1D(in_channels, out_channels, kernel_size, stride, padding=padding, **kwargs)
self._batch_norm = nn.BatchNorm(out_channels)
def forward(self, x):
x = self._conv(x)
x = self._batch_norm(x)
x = F.relu(x)
return x
class PointNet_Basic(nn.Layer):
def __init__(self, in_channels, out_channels):
super(PointNet_Basic, self).__init__()
self.mlp_1 = nn.Sequential(
ConvBNReLU(in_channels, 64, 1),
ConvBNReLU(64, 64, 1)
)
self.mlp_2 = nn.Sequential(
ConvBNReLU(64, 64, 1),
ConvBNReLU(64, 128, 1),
ConvBNReLU(128, out_channels, 1)
)
def forward(self, inputs):
"""
Input:
inputs: input points data, [B, in_channels, N]
Return:
x: points feature, [B, out_channels, N]
"""
x = self.mlp_1(inputs)
x = self.mlp_2(x)
return x
class VFE(nn.Layer):
def __init__(self, feature_channels=256, max_points=1024):
super(VFE, self).__init__()
self.max_points = max_points
self.pointnet_1 = PointNet_Basic(3, feature_channels)
self.pointnet_2 = PointNet_Basic(feature_channels*2, max_points)
def forward(self, inputs):
"""
Input:
inputs: input points data, [B, 3, N]
Return:
x: points feature, [B, C', N]
"""
x1 = self.pointnet_1(inputs)
x2 = paddle.max(x1, axis=-1, keepdim=True)
x2 = paddle.tile(x2, [1, 1, self.max_points])
x = paddle.concat([x1, x2], axis=1)
x = self.pointnet_2(x)
x = paddle.max(x, axis=-1)
return x
if __name__ == '__main__':
model = VFE_Clas()
paddle.summary(model, (64, 3, 1024)) | PAPC/models/classify/vfe/vfe.py | import paddle
import paddle.nn as nn
import paddle.nn.functional as F
class VFE_Clas(nn.Layer):
def __init__(self, num_classes=16, max_points=1024):
super(VFE_Clas, self).__init__()
self.vfe = VFE(max_points=max_points)
self.fc = self.fc = nn.Sequential(
nn.Linear(max_points, 512),
nn.ReLU(),
nn.Linear(512, 256),
nn.ReLU(),
nn.Dropout(p=0.7),
nn.Linear(256, num_classes)
)
def forward(self, inputs):
"""
Input:
inputs: input points data, [B, 3, N]
Return:
x: predicts, [B, num_classes]
"""
x = paddle.to_tensor(inputs)
x = self.vfe(x)
x = self.fc(x)
return x
class ConvBNReLU(nn.Layer):
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, **kwargs):
super().__init__()
self._conv = nn.Conv1D(in_channels, out_channels, kernel_size, stride, padding=padding, **kwargs)
self._batch_norm = nn.BatchNorm(out_channels)
def forward(self, x):
x = self._conv(x)
x = self._batch_norm(x)
x = F.relu(x)
return x
class PointNet_Basic(nn.Layer):
def __init__(self, in_channels, out_channels):
super(PointNet_Basic, self).__init__()
self.mlp_1 = nn.Sequential(
ConvBNReLU(in_channels, 64, 1),
ConvBNReLU(64, 64, 1)
)
self.mlp_2 = nn.Sequential(
ConvBNReLU(64, 64, 1),
ConvBNReLU(64, 128, 1),
ConvBNReLU(128, out_channels, 1)
)
def forward(self, inputs):
"""
Input:
inputs: input points data, [B, in_channels, N]
Return:
x: points feature, [B, out_channels, N]
"""
x = self.mlp_1(inputs)
x = self.mlp_2(x)
return x
class VFE(nn.Layer):
def __init__(self, feature_channels=256, max_points=1024):
super(VFE, self).__init__()
self.max_points = max_points
self.pointnet_1 = PointNet_Basic(3, feature_channels)
self.pointnet_2 = PointNet_Basic(feature_channels*2, max_points)
def forward(self, inputs):
"""
Input:
inputs: input points data, [B, 3, N]
Return:
x: points feature, [B, C', N]
"""
x1 = self.pointnet_1(inputs)
x2 = paddle.max(x1, axis=-1, keepdim=True)
x2 = paddle.tile(x2, [1, 1, self.max_points])
x = paddle.concat([x1, x2], axis=1)
x = self.pointnet_2(x)
x = paddle.max(x, axis=-1)
return x
if __name__ == '__main__':
model = VFE_Clas()
paddle.summary(model, (64, 3, 1024)) | 0.918781 | 0.368292 |
from authlib.integrations.flask_client import OAuth
from authlib.integrations.flask_oauth2 import (
AuthorizationServer, ResourceProtector)
from authlib.integrations.sqla_oauth2 import (
create_query_client_func,
create_save_token_func,
create_bearer_token_validator,
)
from .models import OAuth2Token, OAuth2Client, AuthorizationCodeGrant, OpenIDCode
from .models import auth_db
from .providers_config import providers_config
oauth = OAuth()
authorization = AuthorizationServer()
require_oauth = ResourceProtector()
def initiate_providers(self_name):
"""Read and register the openid connect provider of the current directory
The provider information is preconfigured in the 'providers_config.py' file, using the current directory(client)'s
name as the first level key
"""
if self_name not in providers_config:
return
for provider_name in providers_config[self_name]:
oauth.register(name=provider_name, **providers_config[self_name][provider_name])
def config_oauth(app):
"""Initialize authorization server, and register suportted authorization grant types
For more information, please refer to https://docs.authlib.org/en/latest/flask/2/authorization-server.html#server
"""
query_client = create_query_client_func(auth_db.session, OAuth2Client)
save_token = save_token = create_save_token_func(auth_db.session, OAuth2Token)
authorization.init_app(
app,
query_client=query_client,
save_token=save_token
)
# Register Authorization code grant types
authorization.register_grant(AuthorizationCodeGrant, [
OpenIDCode(require_nonce=False),
])
# protect resource
bearer_cls = create_bearer_token_validator(auth_db.session, OAuth2Token)
require_oauth.register_token_validator(bearer_cls())
"""
The following method can be used when creating a OAuth instance, when additional access token
is needed, the Authlib library will use this method to
def fetch_token(name):
# Authlib library helper function, used to retrieve access token relating to current user, issued by 'name'
token = OAuth2Token.query.filter_by(
name = name,
# current_user is the proxy variable to access logged in user
user_id = current_user.get_user_id()
).first()
return token.to_token()
oauth = OAuth(fetch_token = fetch_token)
""" | Droit/auth/oauth2.py | from authlib.integrations.flask_client import OAuth
from authlib.integrations.flask_oauth2 import (
AuthorizationServer, ResourceProtector)
from authlib.integrations.sqla_oauth2 import (
create_query_client_func,
create_save_token_func,
create_bearer_token_validator,
)
from .models import OAuth2Token, OAuth2Client, AuthorizationCodeGrant, OpenIDCode
from .models import auth_db
from .providers_config import providers_config
oauth = OAuth()
authorization = AuthorizationServer()
require_oauth = ResourceProtector()
def initiate_providers(self_name):
"""Read and register the openid connect provider of the current directory
The provider information is preconfigured in the 'providers_config.py' file, using the current directory(client)'s
name as the first level key
"""
if self_name not in providers_config:
return
for provider_name in providers_config[self_name]:
oauth.register(name=provider_name, **providers_config[self_name][provider_name])
def config_oauth(app):
"""Initialize authorization server, and register suportted authorization grant types
For more information, please refer to https://docs.authlib.org/en/latest/flask/2/authorization-server.html#server
"""
query_client = create_query_client_func(auth_db.session, OAuth2Client)
save_token = save_token = create_save_token_func(auth_db.session, OAuth2Token)
authorization.init_app(
app,
query_client=query_client,
save_token=save_token
)
# Register Authorization code grant types
authorization.register_grant(AuthorizationCodeGrant, [
OpenIDCode(require_nonce=False),
])
# protect resource
bearer_cls = create_bearer_token_validator(auth_db.session, OAuth2Token)
require_oauth.register_token_validator(bearer_cls())
"""
The following method can be used when creating a OAuth instance, when additional access token
is needed, the Authlib library will use this method to
def fetch_token(name):
# Authlib library helper function, used to retrieve access token relating to current user, issued by 'name'
token = OAuth2Token.query.filter_by(
name = name,
# current_user is the proxy variable to access logged in user
user_id = current_user.get_user_id()
).first()
return token.to_token()
oauth = OAuth(fetch_token = fetch_token)
""" | 0.544317 | 0.120439 |
import sys
def get_input_data_as_list(file_name):
"""
Reads in data from the given file and returns them as list
with one entry per line and whitespaced trimmed
"""
with open(file_name) as input_file:
#data_list = list(input_file.readlines())
#data_list = list(map(list, input_file.readlines()))
data_list = input_file.readlines()
data_list = [str.strip(line) for line in data_list]
return data_list
def calculate(value, operand, next_value):
#print(f"\tOperation: {value} {operand} {next_value}")
result = 0
if operand == '+':
result = value + next_value
elif operand == '*':
result = value * next_value
return result
def evaluate(expression_string):
expression_list = list(expression_string)
expression_list.reverse()
values = {}
operands = {}
level = 0
#values[level] = 1
#operands[level] = '*'
while expression_list:
char = expression_list.pop()
#print(f"In: {char}")
if char == ' ':
pass
elif char == '+' or char == '*':
operands[level] = char
elif char == '(':
level += 1
elif char == ')':
level -= 1
if level in operands and level in values:
values[level] = calculate(values[level], operands[level], values[level+1])
else:
values[level] = values[level+1]
del values[level+1]
del operands[level+1]
#print(f"\tVal: {values[level]}")
else:
if level in operands and level in values:
values[level] = calculate(values[level], operands[level], int(char))
else:
values[level] = int(char)
#print(f"\tVal: {values[level]}")
return values[level]
def evaluate2(expression_string):
expression_list = list(expression_string)
expression_list.reverse()
values = {0:[]}
operands = {}
level = 0
#values[level] = 1
#operands[level] = '*'
while expression_list:
char = expression_list.pop()
#print(f"In: {char}")
if char == ' ':
pass
elif char == '+' or char == '*':
operands[level] = char
elif char == '(':
level += 1
values[level] = []
elif char == ')':
#print(f"\tLvl: {level} Val: {values[level]}")
level_result = 1
for value in values[level]:
level_result *= value
level -= 1
if level in operands and level in values:
#values[level] = calculate(values[level], operands[level], values[level+1])
if operands[level] == '+':
values[level][-1] = calculate(values[level][-1], operands[level], level_result)
else:
values[level].append(level_result)
else:
values[level].append(level_result)
del values[level+1]
del operands[level+1]
else:
if level in operands and level in values:
if operands[level] == '+':
values[level][-1] = calculate(values[level][-1], operands[level], int(char))
else:
values[level].append(int(char))
else:
values[level].append(int(char))
#print(f"\tVal: {values[level]}")
#print(values)
#print(operands)
result = 1
for value in values[0]:
result *= value
return result
homework = get_input_data_as_list(sys.argv[1])
print(f"Result: {evaluate2(homework[0])}")
results = []
for line, expression in enumerate(homework):
results.append(evaluate2(expression))
print(f"#{line}: {expression} = {results[-1]}")
print(f"The sum of all expressions is: {sum(results)}") | day18/python/smarkwart/day18.py | import sys
def get_input_data_as_list(file_name):
"""
Reads in data from the given file and returns them as list
with one entry per line and whitespaced trimmed
"""
with open(file_name) as input_file:
#data_list = list(input_file.readlines())
#data_list = list(map(list, input_file.readlines()))
data_list = input_file.readlines()
data_list = [str.strip(line) for line in data_list]
return data_list
def calculate(value, operand, next_value):
#print(f"\tOperation: {value} {operand} {next_value}")
result = 0
if operand == '+':
result = value + next_value
elif operand == '*':
result = value * next_value
return result
def evaluate(expression_string):
expression_list = list(expression_string)
expression_list.reverse()
values = {}
operands = {}
level = 0
#values[level] = 1
#operands[level] = '*'
while expression_list:
char = expression_list.pop()
#print(f"In: {char}")
if char == ' ':
pass
elif char == '+' or char == '*':
operands[level] = char
elif char == '(':
level += 1
elif char == ')':
level -= 1
if level in operands and level in values:
values[level] = calculate(values[level], operands[level], values[level+1])
else:
values[level] = values[level+1]
del values[level+1]
del operands[level+1]
#print(f"\tVal: {values[level]}")
else:
if level in operands and level in values:
values[level] = calculate(values[level], operands[level], int(char))
else:
values[level] = int(char)
#print(f"\tVal: {values[level]}")
return values[level]
def evaluate2(expression_string):
expression_list = list(expression_string)
expression_list.reverse()
values = {0:[]}
operands = {}
level = 0
#values[level] = 1
#operands[level] = '*'
while expression_list:
char = expression_list.pop()
#print(f"In: {char}")
if char == ' ':
pass
elif char == '+' or char == '*':
operands[level] = char
elif char == '(':
level += 1
values[level] = []
elif char == ')':
#print(f"\tLvl: {level} Val: {values[level]}")
level_result = 1
for value in values[level]:
level_result *= value
level -= 1
if level in operands and level in values:
#values[level] = calculate(values[level], operands[level], values[level+1])
if operands[level] == '+':
values[level][-1] = calculate(values[level][-1], operands[level], level_result)
else:
values[level].append(level_result)
else:
values[level].append(level_result)
del values[level+1]
del operands[level+1]
else:
if level in operands and level in values:
if operands[level] == '+':
values[level][-1] = calculate(values[level][-1], operands[level], int(char))
else:
values[level].append(int(char))
else:
values[level].append(int(char))
#print(f"\tVal: {values[level]}")
#print(values)
#print(operands)
result = 1
for value in values[0]:
result *= value
return result
homework = get_input_data_as_list(sys.argv[1])
print(f"Result: {evaluate2(homework[0])}")
results = []
for line, expression in enumerate(homework):
results.append(evaluate2(expression))
print(f"#{line}: {expression} = {results[-1]}")
print(f"The sum of all expressions is: {sum(results)}") | 0.083304 | 0.558207 |
import pytest
from kandelero import Candlestick
from kandelero.context import Bottom, MarketContext, TimeFrame, Top
from kandelero.patterns.comparators import is_bear_trap, is_bull_trap
def bottoms():
return
def test_is_bull_trap():
# EURUSD - FIFTEEN_MINUTES
previous = Candlestick(
open=1.13737,
high=1.13825,
low=1.13730,
close=1.13781,
timestamp="2021-11-30T14:45:00",
)
current = Candlestick(
open=1.13778,
high=1.13825,
low=1.13658,
close=1.13722,
timestamp="2021-11-30T15:00:00",
)
market_context = MarketContext(
symbol="EURUSD",
tops=[
Top(
value=1.13737,
value_range=(),
timeframe=TimeFrame.FIFTEEN_MINUTES,
candlestick=Candlestick(
open=1.13695,
high=1.13737,
low=1.13673,
close=1.13685,
timestamp="2021-11-18T18:00:00",
),
),
],
bottoms=[],
)
assert is_bull_trap(
previous=previous, current=current, market_context=market_context
)
def test_is_bear_trap():
# EURGBP - ONE_MINUTE
previous = Candlestick(
open=0.84984,
high=0.84987,
low=0.84979,
close=0.84982,
timestamp="2021-12-01T07:40:00",
)
current = Candlestick(
open=0.84982,
high=0.84990,
low=0.84981,
close=0.84987,
timestamp="2021-12-01T07:41:00",
)
market_context = MarketContext(
symbol="EURGBP",
tops=[],
bottoms=[
Bottom(
value=0.84981,
value_range=(),
timeframe=TimeFrame.FIFTEEN_MINUTES,
candlestick=Candlestick(
open=0.84992,
high=0.85112,
low=0.84981,
close=0.85109,
timestamp="2021-11-30T10:30:00",
),
),
],
)
assert is_bear_trap(
previous=previous, current=current, market_context=market_context
) | tests/test_is_trap.py | import pytest
from kandelero import Candlestick
from kandelero.context import Bottom, MarketContext, TimeFrame, Top
from kandelero.patterns.comparators import is_bear_trap, is_bull_trap
def bottoms():
return
def test_is_bull_trap():
# EURUSD - FIFTEEN_MINUTES
previous = Candlestick(
open=1.13737,
high=1.13825,
low=1.13730,
close=1.13781,
timestamp="2021-11-30T14:45:00",
)
current = Candlestick(
open=1.13778,
high=1.13825,
low=1.13658,
close=1.13722,
timestamp="2021-11-30T15:00:00",
)
market_context = MarketContext(
symbol="EURUSD",
tops=[
Top(
value=1.13737,
value_range=(),
timeframe=TimeFrame.FIFTEEN_MINUTES,
candlestick=Candlestick(
open=1.13695,
high=1.13737,
low=1.13673,
close=1.13685,
timestamp="2021-11-18T18:00:00",
),
),
],
bottoms=[],
)
assert is_bull_trap(
previous=previous, current=current, market_context=market_context
)
def test_is_bear_trap():
# EURGBP - ONE_MINUTE
previous = Candlestick(
open=0.84984,
high=0.84987,
low=0.84979,
close=0.84982,
timestamp="2021-12-01T07:40:00",
)
current = Candlestick(
open=0.84982,
high=0.84990,
low=0.84981,
close=0.84987,
timestamp="2021-12-01T07:41:00",
)
market_context = MarketContext(
symbol="EURGBP",
tops=[],
bottoms=[
Bottom(
value=0.84981,
value_range=(),
timeframe=TimeFrame.FIFTEEN_MINUTES,
candlestick=Candlestick(
open=0.84992,
high=0.85112,
low=0.84981,
close=0.85109,
timestamp="2021-11-30T10:30:00",
),
),
],
)
assert is_bear_trap(
previous=previous, current=current, market_context=market_context
) | 0.518302 | 0.286821 |
from flexbe_core import Behavior, Autonomy, OperatableStateMachine, ConcurrencyContainer, PriorityContainer, Logger
from robot_brain_flexbe_states.launch_video_stream_state import LaunchVideoStream
from robot_brain_flexbe_states.launch_face_server import LaunchFaceServer
from robot_brain_flexbe_states.launch_face_det_track_state import FaceDetTrack
from flexbe_states.wait_state import WaitState
from robot_brain_flexbe_states.stop_face_detect_and_track import StopFaceDetectAndTrack
from robot_brain_flexbe_states.stop_face_server import StopFaceServer
from robot_brain_flexbe_states.stop_camera_stream import StopCameraStream
from robot_brain_flexbe_states.identify_state import IdentifyState
from robot_brain_flexbe_states.talk_state import TalkState
from flexbe_states.subscriber_state import SubscriberState
from robot_brain_flexbe_states.check_simple_string import WordCheckingStringState
# Additional imports can be added inside the following tags
# [MANUAL_IMPORT]
# [/MANUAL_IMPORT]
'''
Created on Wed Aug 14 2019
@author: <NAME>
'''
class Identify_and_open_doorSM(Behavior):
'''
open day through face recognition
'''
def __init__(self):
super(Identify_and_open_doorSM, self).__init__()
self.name = 'Identify_and_open_door'
# parameters of this behavior
# references to used behaviors
# Additional initialization code can be added inside the following tags
# [MANUAL_INIT]
# [/MANUAL_INIT]
# Behavior comments:
def create(self):
# x:30 y:344, x:202 y:205
_state_machine = OperatableStateMachine(outcomes=['finished', 'failed'])
# Additional creation code can be added inside the following tags
# [MANUAL_CREATE]
# [/MANUAL_CREATE]
with _state_machine:
# x:53 y:52
OperatableStateMachine.add('launch vid',
LaunchVideoStream(vid_input_num=0),
transitions={'continue': 'launch faceserv', 'failed': 'failed'},
autonomy={'continue': Autonomy.Off, 'failed': Autonomy.Off})
# x:305 y:35
OperatableStateMachine.add('launch faceserv',
LaunchFaceServer(),
transitions={'continue': 'facetrk', 'failed': 'failed'},
autonomy={'continue': Autonomy.Off, 'failed': Autonomy.Off})
# x:508 y:38
OperatableStateMachine.add('facetrk',
FaceDetTrack(),
transitions={'continue': 'commence', 'failed': 'failed'},
autonomy={'continue': Autonomy.Off, 'failed': Autonomy.Off})
# x:1085 y:460
OperatableStateMachine.add('8',
WaitState(wait_time=6),
transitions={'done': 'subnamestring'},
autonomy={'done': Autonomy.Off})
# x:124 y:526
OperatableStateMachine.add('stopfacetrk',
StopFaceDetectAndTrack(),
transitions={'continue': 'stopfacesrv', 'failed': 'failed'},
autonomy={'continue': Autonomy.Off, 'failed': Autonomy.Off})
# x:125 y:422
OperatableStateMachine.add('stopfacesrv',
StopFaceServer(),
transitions={'continue': 'stopvid', 'failed': 'failed'},
autonomy={'continue': Autonomy.Off, 'failed': Autonomy.Off})
# x:134 y:283
OperatableStateMachine.add('stopvid',
StopCameraStream(),
transitions={'continue': 'finished', 'failed': 'failed'},
autonomy={'continue': Autonomy.Off, 'failed': Autonomy.Off})
# x:1041 y:215
OperatableStateMachine.add('identify',
IdentifyState(),
transitions={'continue': 'hat', 'failed': 'failed'},
autonomy={'continue': Autonomy.Off, 'failed': Autonomy.Off})
# x:837 y:91
OperatableStateMachine.add('commence',
TalkState(sentence_number=17),
transitions={'continue': 'identify', 'failed': 'failed'},
autonomy={'continue': Autonomy.Off, 'failed': Autonomy.Off})
# x:916 y:471
OperatableStateMachine.add('subnamestring',
SubscriberState(topic='/identified_people_string_name', blocking=True, clear=False),
transitions={'received': 'one', 'unavailable': 'failed'},
autonomy={'received': Autonomy.Off, 'unavailable': Autonomy.Off},
remapping={'message': 'message_name'})
# x:766 y:474
OperatableStateMachine.add('one',
WaitState(wait_time=1),
transitions={'done': 'chkstrng'},
autonomy={'done': Autonomy.Off})
# x:577 y:490
OperatableStateMachine.add('chkstrng',
WordCheckingStringState(key_word="<NAME>"),
transitions={'found': 'yes', 'not_found': 'no'},
autonomy={'found': Autonomy.Off, 'not_found': Autonomy.Off},
remapping={'input_value': 'message_name'})
# x:381 y:586
OperatableStateMachine.add('yes',
TalkState(sentence_number=18),
transitions={'continue': 'stopfacetrk', 'failed': 'failed'},
autonomy={'continue': Autonomy.Off, 'failed': Autonomy.Off})
# x:404 y:396
OperatableStateMachine.add('no',
TalkState(sentence_number=19),
transitions={'continue': 'stopfacetrk', 'failed': 'failed'},
autonomy={'continue': Autonomy.Off, 'failed': Autonomy.Off})
# x:1055 y:323
OperatableStateMachine.add('hat',
TalkState(sentence_number=20),
transitions={'continue': '8', 'failed': 'failed'},
autonomy={'continue': Autonomy.Off, 'failed': Autonomy.Off})
return _state_machine
# Private functions can be added inside the following tags
# [MANUAL_FUNC]
# [/MANUAL_FUNC] | robot_brain_flexbe_behaviors/src/robot_brain_flexbe_behaviors/identify_and_open_door_sm.py |
from flexbe_core import Behavior, Autonomy, OperatableStateMachine, ConcurrencyContainer, PriorityContainer, Logger
from robot_brain_flexbe_states.launch_video_stream_state import LaunchVideoStream
from robot_brain_flexbe_states.launch_face_server import LaunchFaceServer
from robot_brain_flexbe_states.launch_face_det_track_state import FaceDetTrack
from flexbe_states.wait_state import WaitState
from robot_brain_flexbe_states.stop_face_detect_and_track import StopFaceDetectAndTrack
from robot_brain_flexbe_states.stop_face_server import StopFaceServer
from robot_brain_flexbe_states.stop_camera_stream import StopCameraStream
from robot_brain_flexbe_states.identify_state import IdentifyState
from robot_brain_flexbe_states.talk_state import TalkState
from flexbe_states.subscriber_state import SubscriberState
from robot_brain_flexbe_states.check_simple_string import WordCheckingStringState
# Additional imports can be added inside the following tags
# [MANUAL_IMPORT]
# [/MANUAL_IMPORT]
'''
Created on Wed Aug 14 2019
@author: <NAME>
'''
class Identify_and_open_doorSM(Behavior):
'''
open day through face recognition
'''
def __init__(self):
super(Identify_and_open_doorSM, self).__init__()
self.name = 'Identify_and_open_door'
# parameters of this behavior
# references to used behaviors
# Additional initialization code can be added inside the following tags
# [MANUAL_INIT]
# [/MANUAL_INIT]
# Behavior comments:
def create(self):
# x:30 y:344, x:202 y:205
_state_machine = OperatableStateMachine(outcomes=['finished', 'failed'])
# Additional creation code can be added inside the following tags
# [MANUAL_CREATE]
# [/MANUAL_CREATE]
with _state_machine:
# x:53 y:52
OperatableStateMachine.add('launch vid',
LaunchVideoStream(vid_input_num=0),
transitions={'continue': 'launch faceserv', 'failed': 'failed'},
autonomy={'continue': Autonomy.Off, 'failed': Autonomy.Off})
# x:305 y:35
OperatableStateMachine.add('launch faceserv',
LaunchFaceServer(),
transitions={'continue': 'facetrk', 'failed': 'failed'},
autonomy={'continue': Autonomy.Off, 'failed': Autonomy.Off})
# x:508 y:38
OperatableStateMachine.add('facetrk',
FaceDetTrack(),
transitions={'continue': 'commence', 'failed': 'failed'},
autonomy={'continue': Autonomy.Off, 'failed': Autonomy.Off})
# x:1085 y:460
OperatableStateMachine.add('8',
WaitState(wait_time=6),
transitions={'done': 'subnamestring'},
autonomy={'done': Autonomy.Off})
# x:124 y:526
OperatableStateMachine.add('stopfacetrk',
StopFaceDetectAndTrack(),
transitions={'continue': 'stopfacesrv', 'failed': 'failed'},
autonomy={'continue': Autonomy.Off, 'failed': Autonomy.Off})
# x:125 y:422
OperatableStateMachine.add('stopfacesrv',
StopFaceServer(),
transitions={'continue': 'stopvid', 'failed': 'failed'},
autonomy={'continue': Autonomy.Off, 'failed': Autonomy.Off})
# x:134 y:283
OperatableStateMachine.add('stopvid',
StopCameraStream(),
transitions={'continue': 'finished', 'failed': 'failed'},
autonomy={'continue': Autonomy.Off, 'failed': Autonomy.Off})
# x:1041 y:215
OperatableStateMachine.add('identify',
IdentifyState(),
transitions={'continue': 'hat', 'failed': 'failed'},
autonomy={'continue': Autonomy.Off, 'failed': Autonomy.Off})
# x:837 y:91
OperatableStateMachine.add('commence',
TalkState(sentence_number=17),
transitions={'continue': 'identify', 'failed': 'failed'},
autonomy={'continue': Autonomy.Off, 'failed': Autonomy.Off})
# x:916 y:471
OperatableStateMachine.add('subnamestring',
SubscriberState(topic='/identified_people_string_name', blocking=True, clear=False),
transitions={'received': 'one', 'unavailable': 'failed'},
autonomy={'received': Autonomy.Off, 'unavailable': Autonomy.Off},
remapping={'message': 'message_name'})
# x:766 y:474
OperatableStateMachine.add('one',
WaitState(wait_time=1),
transitions={'done': 'chkstrng'},
autonomy={'done': Autonomy.Off})
# x:577 y:490
OperatableStateMachine.add('chkstrng',
WordCheckingStringState(key_word="<NAME>"),
transitions={'found': 'yes', 'not_found': 'no'},
autonomy={'found': Autonomy.Off, 'not_found': Autonomy.Off},
remapping={'input_value': 'message_name'})
# x:381 y:586
OperatableStateMachine.add('yes',
TalkState(sentence_number=18),
transitions={'continue': 'stopfacetrk', 'failed': 'failed'},
autonomy={'continue': Autonomy.Off, 'failed': Autonomy.Off})
# x:404 y:396
OperatableStateMachine.add('no',
TalkState(sentence_number=19),
transitions={'continue': 'stopfacetrk', 'failed': 'failed'},
autonomy={'continue': Autonomy.Off, 'failed': Autonomy.Off})
# x:1055 y:323
OperatableStateMachine.add('hat',
TalkState(sentence_number=20),
transitions={'continue': '8', 'failed': 'failed'},
autonomy={'continue': Autonomy.Off, 'failed': Autonomy.Off})
return _state_machine
# Private functions can be added inside the following tags
# [MANUAL_FUNC]
# [/MANUAL_FUNC] | 0.42477 | 0.227641 |
import ddapp.vtkAll as vtk
import ddapp.thirdparty.numpyjsoncoder as nje
from collections import OrderedDict
from ddapp import fieldcontainer
from ddapp import transformUtils
from ddapp import lcmUtils
from ddapp.utime import getUtime
import drc as lcmdrc
import pprint
import json
class ConstraintEncoder(nje.NumpyEncoder):
def default(self, obj):
if isinstance(obj, vtk.vtkTransform):
pos, quat = transformUtils.poseFromTransform(obj)
return OrderedDict(position=pos, quaternion=quat)
elif isinstance(obj, fieldcontainer.FieldContainer):
d = OrderedDict()
d['class'] = type(obj).__name__
for key in obj._fields:
d[key] = getattr(obj, key)
return d
return nje.NumpyEncoder.default(self, obj)
def ConstraintDecoder(dct):
return nje.NumpyDecoder(dct)
def encodeConstraints(dataObj, **kwargs):
return json.dumps(dataObj, cls=ConstraintEncoder, **kwargs)
def decodeConstraints(dataStream):
return json.loads(dataStream, object_hook=ConstraintDecoder)
def getPlanPoses(constraints, ikPlanner):
'''
Given a list of constraints, returns a dictionary of poses containing all
the poses that are references by the constraints by name
'''
poses = sorted([c.postureName for c in constraints if hasattr(c, 'postureName')])
poses = {poseName:list(ikPlanner.jointController.getPose(poseName)) for poseName in poses}
return poses
class IKConstraintEncoder(object):
def __init__(self,ikPlanner):
self.ikPlanner = ikPlanner
def publishConstraints(self,constraints,messageName='PLANNER_REQUEST'):
poses = getPlanPoses(constraints, self.ikPlanner)
#poseJsonStr = json.dumps(poses, indent=4)
#constraintsJsonStr = encodeConstraints(constraints, indent=4)
poseJsonStr = json.dumps(poses)
constraintsJsonStr = encodeConstraints(constraints)
msg = lcmdrc.planner_request_t()
msg.utime = getUtime()
msg.poses = poseJsonStr
msg.constraints = constraintsJsonStr
lcmUtils.publish(messageName, msg)
def decodeConstraints(self,dataStream):
return decodeConstraints(dataStream) | externals/director/src/python/ddapp/ikconstraintencoder.py | import ddapp.vtkAll as vtk
import ddapp.thirdparty.numpyjsoncoder as nje
from collections import OrderedDict
from ddapp import fieldcontainer
from ddapp import transformUtils
from ddapp import lcmUtils
from ddapp.utime import getUtime
import drc as lcmdrc
import pprint
import json
class ConstraintEncoder(nje.NumpyEncoder):
def default(self, obj):
if isinstance(obj, vtk.vtkTransform):
pos, quat = transformUtils.poseFromTransform(obj)
return OrderedDict(position=pos, quaternion=quat)
elif isinstance(obj, fieldcontainer.FieldContainer):
d = OrderedDict()
d['class'] = type(obj).__name__
for key in obj._fields:
d[key] = getattr(obj, key)
return d
return nje.NumpyEncoder.default(self, obj)
def ConstraintDecoder(dct):
return nje.NumpyDecoder(dct)
def encodeConstraints(dataObj, **kwargs):
return json.dumps(dataObj, cls=ConstraintEncoder, **kwargs)
def decodeConstraints(dataStream):
return json.loads(dataStream, object_hook=ConstraintDecoder)
def getPlanPoses(constraints, ikPlanner):
'''
Given a list of constraints, returns a dictionary of poses containing all
the poses that are references by the constraints by name
'''
poses = sorted([c.postureName for c in constraints if hasattr(c, 'postureName')])
poses = {poseName:list(ikPlanner.jointController.getPose(poseName)) for poseName in poses}
return poses
class IKConstraintEncoder(object):
def __init__(self,ikPlanner):
self.ikPlanner = ikPlanner
def publishConstraints(self,constraints,messageName='PLANNER_REQUEST'):
poses = getPlanPoses(constraints, self.ikPlanner)
#poseJsonStr = json.dumps(poses, indent=4)
#constraintsJsonStr = encodeConstraints(constraints, indent=4)
poseJsonStr = json.dumps(poses)
constraintsJsonStr = encodeConstraints(constraints)
msg = lcmdrc.planner_request_t()
msg.utime = getUtime()
msg.poses = poseJsonStr
msg.constraints = constraintsJsonStr
lcmUtils.publish(messageName, msg)
def decodeConstraints(self,dataStream):
return decodeConstraints(dataStream) | 0.5564 | 0.139954 |
import xgboost as xgb
#hyperoptで使用するためのXGBOOSTモデルのクラス
class Model:
#初期設定メソッド
def __init__(self, params=None):
self.model = None
if params is None:
self.params = {}
else:
self.params = params
#学習メソッド
def fit(self, tr_x, tr_y, va_x, va_y):
params = {
#'booster': 'gbtree',
'objective': 'reg:squarederror',
'eta': 0.1,
'gamma': 0.0,
'alpha': 0.0,
'lambda': 1.0,
'min_child_weight': 1,
'max_depth': 5,
'subsample': 0.8,
'colsample_bytree': 0.8,
'random_state': 71,
}
params.update(self.params)
num_round = 20
dtrain = xgb.DMatrix(tr_x, label=tr_y)
dvalid = xgb.DMatrix(va_x, label=va_y)
watchlist = [(dtrain, 'train'), (dvalid, 'eval')]
self.model = xgb.train(params, dtrain, num_round, early_stopping_rounds=10, evals=watchlist)
#予測メソッド
def predict(self, x):
data = xgb.DMatrix(x)
pred = self.model.predict(data)
return pred
#必要なライブラリをインポート
import numpy as np
import pandas as pd
from sklearn.model_selection import KFold
import xgboost as xgb
from hyperopt import fmin, tpe, hp, STATUS_OK, Trials
from sklearn.metrics import mean_squared_error
#hyperoptを使ったパラメータ探索
def score(params):
# パラメータを与えたときに最小化する評価指標を指定する
# 具体的には、モデルにパラメータを指定して学習・予測させた場合のスコアを返すようにする
# max_depthの型を整数型に修正する
params['max_depth'] = int(params['max_depth'])
# Modelクラスは、fitで学習し、predictで予測値の確率を出力する
model = Model(params)
model.fit(tr_x, tr_y, va_x, va_y)
va_pred = model.predict(va_x)
score = np.sqrt(mean_squared_error(va_y, va_pred)) #rmseを最小化するようにパラメータをチューニング
print(f'params: {params}, rmse: {score:.4f}')
# 情報を記録しておく
history.append((params, score))
return {'loss': score, 'status': STATUS_OK}
#データ毎に最適なハイパーパラメータチューニングをhyperoptで行い、最も良かったパラメータを返す。
#Hyper_Pramater_Tuning_XGBOOST
def HPT_XGB(df_l):
train_x = df_l.drop(['賃料'], axis=1)
train_y = df_l['賃料']
# 学習データを学習データとバリデーションデータに分ける
kf = KFold(n_splits=4, shuffle=True, random_state=71)
tr_idx, va_idx = list(kf.split(train_x))[0]
global tr_x,va_x,tr_y,va_y #score関数で利用するため、グローバル変数として扱う。
tr_x, va_x = train_x.iloc[tr_idx], train_x.iloc[va_idx]
tr_y, va_y = train_y.iloc[tr_idx], train_y.iloc[va_idx]
# hp.choiceでは、複数の選択肢から選ぶ
# hp.uniformでは、下限・上限を指定した一様分布から抽出する。引数は下限・上限
# hp.quniformでは、下限・上限を指定した一様分布のうち一定の間隔ごとの点から抽出する。引数は下限・上限・間隔
# hp.loguniformでは、下限・上限を指定した対数が一様分布に従う分布から抽出する。引数は下限・上限の対数をとった値
# 探索するパラメータの空間を指定
param_space = {
'max_depth': hp.quniform('max_depth', 3, 9, 1),
'min_child_weight': hp.loguniform('min_child_weight', np.log(0.1), np.log(10)),
'subsample': hp.quniform('subsample', 0.6, 0.95, 0.05),
'colsample_bytree': hp.quniform('colsample_bytree', 0.6, 0.95, 0.05),
'gamma': hp.loguniform('gamma', np.log(1e-8), np.log(1.0)),
'alpha' : hp.loguniform('alpha', np.log(1e-8), np.log(1.0)),
'lambda' : hp.loguniform('lambda', np.log(1e-6), np.log(10.0)),
}
# hyperoptによるパラメータ探索の実行
max_evals = 100 #100回探索する。
trials = Trials()
global history #score関数で利用するため、グローバル変数として扱う。
history = []
fmin(score, param_space, algo=tpe.suggest, trials=trials, max_evals=max_evals)
# 記録した情報からパラメータとスコアを出力
#(trialsからも情報が取得できるが、パラメータの取得がやや行いづらい)
history = sorted(history, key=lambda tpl: tpl[1])
best = history[0]
print("\n",f'best params:{best[0]}, score:{best[1]:.4f}')
return best[0]
#HPT_XGB(学習データ)
best_params = HPT_XGB(df_l)
#HPT_XGB関数のmax_evalsやModelクラスのnum_roundsをいじれば、時間はかかるがより精度の高いチューニングが出来る。 | improved-ver1/3.Hyper_Pramater_Tuning_XGBOOST.py | import xgboost as xgb
#hyperoptで使用するためのXGBOOSTモデルのクラス
class Model:
#初期設定メソッド
def __init__(self, params=None):
self.model = None
if params is None:
self.params = {}
else:
self.params = params
#学習メソッド
def fit(self, tr_x, tr_y, va_x, va_y):
params = {
#'booster': 'gbtree',
'objective': 'reg:squarederror',
'eta': 0.1,
'gamma': 0.0,
'alpha': 0.0,
'lambda': 1.0,
'min_child_weight': 1,
'max_depth': 5,
'subsample': 0.8,
'colsample_bytree': 0.8,
'random_state': 71,
}
params.update(self.params)
num_round = 20
dtrain = xgb.DMatrix(tr_x, label=tr_y)
dvalid = xgb.DMatrix(va_x, label=va_y)
watchlist = [(dtrain, 'train'), (dvalid, 'eval')]
self.model = xgb.train(params, dtrain, num_round, early_stopping_rounds=10, evals=watchlist)
#予測メソッド
def predict(self, x):
data = xgb.DMatrix(x)
pred = self.model.predict(data)
return pred
#必要なライブラリをインポート
import numpy as np
import pandas as pd
from sklearn.model_selection import KFold
import xgboost as xgb
from hyperopt import fmin, tpe, hp, STATUS_OK, Trials
from sklearn.metrics import mean_squared_error
#hyperoptを使ったパラメータ探索
def score(params):
# パラメータを与えたときに最小化する評価指標を指定する
# 具体的には、モデルにパラメータを指定して学習・予測させた場合のスコアを返すようにする
# max_depthの型を整数型に修正する
params['max_depth'] = int(params['max_depth'])
# Modelクラスは、fitで学習し、predictで予測値の確率を出力する
model = Model(params)
model.fit(tr_x, tr_y, va_x, va_y)
va_pred = model.predict(va_x)
score = np.sqrt(mean_squared_error(va_y, va_pred)) #rmseを最小化するようにパラメータをチューニング
print(f'params: {params}, rmse: {score:.4f}')
# 情報を記録しておく
history.append((params, score))
return {'loss': score, 'status': STATUS_OK}
#データ毎に最適なハイパーパラメータチューニングをhyperoptで行い、最も良かったパラメータを返す。
#Hyper_Pramater_Tuning_XGBOOST
def HPT_XGB(df_l):
train_x = df_l.drop(['賃料'], axis=1)
train_y = df_l['賃料']
# 学習データを学習データとバリデーションデータに分ける
kf = KFold(n_splits=4, shuffle=True, random_state=71)
tr_idx, va_idx = list(kf.split(train_x))[0]
global tr_x,va_x,tr_y,va_y #score関数で利用するため、グローバル変数として扱う。
tr_x, va_x = train_x.iloc[tr_idx], train_x.iloc[va_idx]
tr_y, va_y = train_y.iloc[tr_idx], train_y.iloc[va_idx]
# hp.choiceでは、複数の選択肢から選ぶ
# hp.uniformでは、下限・上限を指定した一様分布から抽出する。引数は下限・上限
# hp.quniformでは、下限・上限を指定した一様分布のうち一定の間隔ごとの点から抽出する。引数は下限・上限・間隔
# hp.loguniformでは、下限・上限を指定した対数が一様分布に従う分布から抽出する。引数は下限・上限の対数をとった値
# 探索するパラメータの空間を指定
param_space = {
'max_depth': hp.quniform('max_depth', 3, 9, 1),
'min_child_weight': hp.loguniform('min_child_weight', np.log(0.1), np.log(10)),
'subsample': hp.quniform('subsample', 0.6, 0.95, 0.05),
'colsample_bytree': hp.quniform('colsample_bytree', 0.6, 0.95, 0.05),
'gamma': hp.loguniform('gamma', np.log(1e-8), np.log(1.0)),
'alpha' : hp.loguniform('alpha', np.log(1e-8), np.log(1.0)),
'lambda' : hp.loguniform('lambda', np.log(1e-6), np.log(10.0)),
}
# hyperoptによるパラメータ探索の実行
max_evals = 100 #100回探索する。
trials = Trials()
global history #score関数で利用するため、グローバル変数として扱う。
history = []
fmin(score, param_space, algo=tpe.suggest, trials=trials, max_evals=max_evals)
# 記録した情報からパラメータとスコアを出力
#(trialsからも情報が取得できるが、パラメータの取得がやや行いづらい)
history = sorted(history, key=lambda tpl: tpl[1])
best = history[0]
print("\n",f'best params:{best[0]}, score:{best[1]:.4f}')
return best[0]
#HPT_XGB(学習データ)
best_params = HPT_XGB(df_l)
#HPT_XGB関数のmax_evalsやModelクラスのnum_roundsをいじれば、時間はかかるがより精度の高いチューニングが出来る。 | 0.450118 | 0.388067 |
# Author: <NAME>
# zachetie **at** gmail **dot* com
from outputC import *
def ADM_ID_function_string(gammaCartDD,KCartDD,alphaCart,betaCartU,BCartU):
returnstring = "void ADMCart_ID(FILE *out2D, double time, double xx0,double xx1,double xx2,double Cartxyz0,double Cartxyz1,double Cartxyz2,\n"
returnstring += "\tdouble hDD00,double hDD01,double hDD02,double hDD11,double hDD12,double hDD22,\n"
returnstring += "\tdouble aDD00,double aDD01,double aDD02,double aDD11,double aDD12,double aDD22,\n"
returnstring += "\tdouble trK,\n"
returnstring += "\tdouble lambdaU0,double lambdaU1,double lambdaU2,\n"
returnstring += "\tdouble vetU0,double vetU1,double vetU2,\n"
returnstring += "\tdouble betU0,double betU1,double betU2,\n"
returnstring += "\tdouble alpha,double cf, \n "
returnstring += "\tdouble uu ,double vv) {\n"
returnstring += "\tdouble gammaCartDD00;\n "
returnstring += "\tdouble gammaCartDD01;\n "
returnstring += "\tdouble gammaCartDD02;\n "
returnstring += "\tdouble gammaCartDD11;\n "
returnstring += "\tdouble gammaCartDD12;\n "
returnstring += "\tdouble gammaCartDD22;\n "
returnstring += "\tdouble KCartDD00;\n "
returnstring += "\tdouble KCartDD01;\n "
returnstring += "\tdouble KCartDD02;\n "
returnstring += "\tdouble KCartDD11;\n "
returnstring += "\tdouble KCartDD12;\n "
returnstring += "\tdouble KCartDD22;\n "
returnstring += "\tdouble betaCartU0;\n "
returnstring += "\tdouble betaCartU1;\n "
returnstring += "\tdouble betaCartU2;\n "
returnstring += "\tdouble BCartU0;\n "
returnstring += "\tdouble BCartU1;\n "
returnstring += "\tdouble BCartU2;\n "
returnstring += "\tdouble alphaCart;\n "
returnstring += outputC([gammaCartDD[0][0], gammaCartDD[0][1], gammaCartDD[0][2], gammaCartDD[1][1], gammaCartDD[1][2], gammaCartDD[2][2],
KCartDD[0][0], KCartDD[0][1], KCartDD[0][2], KCartDD[1][1], KCartDD[1][2], KCartDD[2][2],
betaCartU[0], betaCartU[1], betaCartU[2],
BCartU[0], BCartU[1], BCartU[2],
alphaCart],
["gammaCartDD00", "gammaCartDD01", "gammaCartDD02", "gammaCartDD11", "gammaCartDD12", "gammaCartDD22",
"KCartDD00", "KCartDD01", "KCartDD02", "KCartDD11", "KCartDD12", "KCartDD22",
"betaCartU0", "betaCartU1", "betaCartU2",
"BCartU0", "BCartU1", "BCartU2",
"alphaCart"], filename="returnstring",
params="preindent=1,CSE_enable=True,outCverbose=False", # outCverbose=False to prevent
# enormous output files.
prestring="", poststring="")
returnstring += ' fprintf(out2D,"%e %e %e %e %e %e %e %e %e %e %e %e %e %e %e %e %e %e %e %e %e %e %e %e %e \\n", \n '
returnstring += ' time, Cartxyz0, Cartxyz1, Cartxyz2, gammaCartDD00, gammaCartDD01, gammaCartDD02, gammaCartDD11, gammaCartDD12, gammaCartDD22, \n '
returnstring += ' KCartDD00, KCartDD01, KCartDD02, KCartDD11, KCartDD12, KCartDD22, \n '
returnstring += ' betaCartU0, betaCartU1, betaCartU2, \n '
returnstring += ' BCartU0, BCartU1, BCartU2, \n '
returnstring += ' alphaCart, \n'
returnstring += ' uu, vv ); \n '
returnstring += "}\n"
return returnstring | BSSN_SF/ADM_ID_function_string.py |
# Author: <NAME>
# zachetie **at** gmail **dot* com
from outputC import *
def ADM_ID_function_string(gammaCartDD,KCartDD,alphaCart,betaCartU,BCartU):
returnstring = "void ADMCart_ID(FILE *out2D, double time, double xx0,double xx1,double xx2,double Cartxyz0,double Cartxyz1,double Cartxyz2,\n"
returnstring += "\tdouble hDD00,double hDD01,double hDD02,double hDD11,double hDD12,double hDD22,\n"
returnstring += "\tdouble aDD00,double aDD01,double aDD02,double aDD11,double aDD12,double aDD22,\n"
returnstring += "\tdouble trK,\n"
returnstring += "\tdouble lambdaU0,double lambdaU1,double lambdaU2,\n"
returnstring += "\tdouble vetU0,double vetU1,double vetU2,\n"
returnstring += "\tdouble betU0,double betU1,double betU2,\n"
returnstring += "\tdouble alpha,double cf, \n "
returnstring += "\tdouble uu ,double vv) {\n"
returnstring += "\tdouble gammaCartDD00;\n "
returnstring += "\tdouble gammaCartDD01;\n "
returnstring += "\tdouble gammaCartDD02;\n "
returnstring += "\tdouble gammaCartDD11;\n "
returnstring += "\tdouble gammaCartDD12;\n "
returnstring += "\tdouble gammaCartDD22;\n "
returnstring += "\tdouble KCartDD00;\n "
returnstring += "\tdouble KCartDD01;\n "
returnstring += "\tdouble KCartDD02;\n "
returnstring += "\tdouble KCartDD11;\n "
returnstring += "\tdouble KCartDD12;\n "
returnstring += "\tdouble KCartDD22;\n "
returnstring += "\tdouble betaCartU0;\n "
returnstring += "\tdouble betaCartU1;\n "
returnstring += "\tdouble betaCartU2;\n "
returnstring += "\tdouble BCartU0;\n "
returnstring += "\tdouble BCartU1;\n "
returnstring += "\tdouble BCartU2;\n "
returnstring += "\tdouble alphaCart;\n "
returnstring += outputC([gammaCartDD[0][0], gammaCartDD[0][1], gammaCartDD[0][2], gammaCartDD[1][1], gammaCartDD[1][2], gammaCartDD[2][2],
KCartDD[0][0], KCartDD[0][1], KCartDD[0][2], KCartDD[1][1], KCartDD[1][2], KCartDD[2][2],
betaCartU[0], betaCartU[1], betaCartU[2],
BCartU[0], BCartU[1], BCartU[2],
alphaCart],
["gammaCartDD00", "gammaCartDD01", "gammaCartDD02", "gammaCartDD11", "gammaCartDD12", "gammaCartDD22",
"KCartDD00", "KCartDD01", "KCartDD02", "KCartDD11", "KCartDD12", "KCartDD22",
"betaCartU0", "betaCartU1", "betaCartU2",
"BCartU0", "BCartU1", "BCartU2",
"alphaCart"], filename="returnstring",
params="preindent=1,CSE_enable=True,outCverbose=False", # outCverbose=False to prevent
# enormous output files.
prestring="", poststring="")
returnstring += ' fprintf(out2D,"%e %e %e %e %e %e %e %e %e %e %e %e %e %e %e %e %e %e %e %e %e %e %e %e %e \\n", \n '
returnstring += ' time, Cartxyz0, Cartxyz1, Cartxyz2, gammaCartDD00, gammaCartDD01, gammaCartDD02, gammaCartDD11, gammaCartDD12, gammaCartDD22, \n '
returnstring += ' KCartDD00, KCartDD01, KCartDD02, KCartDD11, KCartDD12, KCartDD22, \n '
returnstring += ' betaCartU0, betaCartU1, betaCartU2, \n '
returnstring += ' BCartU0, BCartU1, BCartU2, \n '
returnstring += ' alphaCart, \n'
returnstring += ' uu, vv ); \n '
returnstring += "}\n"
return returnstring | 0.514888 | 0.285267 |
import os
import struct
from os.path import join
from zlib import crc32
from PIL import Image
class IconEntry(object):
def __init__(self, name, data):
self.name, self.data = name, data
self.crc = crc32(self.name) % (1 << 32)
self.offset = 0
def encode_image(path):
img = Image.open(path)
img = img.resize((16, 16))
pixels = list(img.getdata())
tiles = [pixels[i:i + 8] for i in xrange(0, len(pixels), 8)]
data = []
for tile in tiles:
bits = 0
mask = 1
for pixel in tile:
if type(pixel) == tuple:
pixel = sum(pixel)
if pixel > 110:
bits |= mask
mask <<= 1
data.append(bits)
return data
def decode_image(data):
pixels = []
for bits in data:
mask = 1
while mask < 0x100:
if(bits & mask):
pixels.append((0, 0, 0, 255))
else:
pixels.append((0, 0, 0, 0))
mask <<= 1
img = Image.new(mode='RGBA', size=(16, 16))
try:
img.putdata(pixels, scale=1.0, offset=0.0)
except:
pass
return img
def cname(name):
name = name.replace('-', '_')
name = name.upper()
return name
def pack_icons(iconsdir, path_bin, path_hdr, path_json):
files = [os.path.join(iconsdir, fn) for fn in os.listdir(iconsdir)]
entries = []
for filepath in files:
print 'encode:', filepath
data = encode_image(filepath)
icon_name = os.path.splitext(os.path.split(filepath)[1])[0]
icon = IconEntry(icon_name, data)
entries.append(icon)
sorted_entries = sorted(entries, cmp=lambda a, b: cmp(a.crc, b.crc))
with open(path_bin, 'wb') as out:
# Magic, section size, entry count, width, height
header = 'ICON'+struct.pack('IHBB', 0, len(entries), 16, 16)
out.write(header)
out.write('\x00'*4*len(entries))
for e in sorted_entries:
e.offset = out.tell()
out.write(struct.pack('B'*len(e.data), *e.data))
sec_size = out.tell()
out.seek(4, 0)
out.write(struct.pack('I', sec_size))
out.seek(len(header), 0)
for e in sorted_entries:
out.write(struct.pack('I', e.crc))
with open(path_hdr, 'w') as hdr:
hdr.write('\n'.join(
['#define ICON_'+cname(e.name)+' ('+hex(e.crc)+')' for e in entries]))
with open(path_json, 'w') as json:
json.write('{\n "const": {\n')
json.write(',\n'.join(
[' "'+cname(e.name)+'": '+'"ICON_'+cname(e.name)+'"' for e in entries]))
json.write('}\n}')
if __name__ == "__main__":
pack_icons('res/icons', 'iconfont.bin', 'iconfont.h', 'icons.json') | iconfont.py | import os
import struct
from os.path import join
from zlib import crc32
from PIL import Image
class IconEntry(object):
def __init__(self, name, data):
self.name, self.data = name, data
self.crc = crc32(self.name) % (1 << 32)
self.offset = 0
def encode_image(path):
img = Image.open(path)
img = img.resize((16, 16))
pixels = list(img.getdata())
tiles = [pixels[i:i + 8] for i in xrange(0, len(pixels), 8)]
data = []
for tile in tiles:
bits = 0
mask = 1
for pixel in tile:
if type(pixel) == tuple:
pixel = sum(pixel)
if pixel > 110:
bits |= mask
mask <<= 1
data.append(bits)
return data
def decode_image(data):
pixels = []
for bits in data:
mask = 1
while mask < 0x100:
if(bits & mask):
pixels.append((0, 0, 0, 255))
else:
pixels.append((0, 0, 0, 0))
mask <<= 1
img = Image.new(mode='RGBA', size=(16, 16))
try:
img.putdata(pixels, scale=1.0, offset=0.0)
except:
pass
return img
def cname(name):
name = name.replace('-', '_')
name = name.upper()
return name
def pack_icons(iconsdir, path_bin, path_hdr, path_json):
files = [os.path.join(iconsdir, fn) for fn in os.listdir(iconsdir)]
entries = []
for filepath in files:
print 'encode:', filepath
data = encode_image(filepath)
icon_name = os.path.splitext(os.path.split(filepath)[1])[0]
icon = IconEntry(icon_name, data)
entries.append(icon)
sorted_entries = sorted(entries, cmp=lambda a, b: cmp(a.crc, b.crc))
with open(path_bin, 'wb') as out:
# Magic, section size, entry count, width, height
header = 'ICON'+struct.pack('IHBB', 0, len(entries), 16, 16)
out.write(header)
out.write('\x00'*4*len(entries))
for e in sorted_entries:
e.offset = out.tell()
out.write(struct.pack('B'*len(e.data), *e.data))
sec_size = out.tell()
out.seek(4, 0)
out.write(struct.pack('I', sec_size))
out.seek(len(header), 0)
for e in sorted_entries:
out.write(struct.pack('I', e.crc))
with open(path_hdr, 'w') as hdr:
hdr.write('\n'.join(
['#define ICON_'+cname(e.name)+' ('+hex(e.crc)+')' for e in entries]))
with open(path_json, 'w') as json:
json.write('{\n "const": {\n')
json.write(',\n'.join(
[' "'+cname(e.name)+'": '+'"ICON_'+cname(e.name)+'"' for e in entries]))
json.write('}\n}')
if __name__ == "__main__":
pack_icons('res/icons', 'iconfont.bin', 'iconfont.h', 'icons.json') | 0.311322 | 0.187579 |
from typing import Tuple, Union
import cv2
import gym
import numpy as np
from gym.core import Wrapper
from gym.spaces import Box
class AtariPreprocessing(Wrapper):
"""
Implementation for Image preprocessing for Gym Atari environments.
Implements: 1) Frameskip 2) Grayscale 3) Downsampling to square image
:param env: Atari environment
:param frameskip: Number of steps between actions. \
E.g. frameskip=4 will mean 1 action will be taken for every 4 frames. It'll be\
a tuple
if non-deterministic and a random number will be chosen from (2, 5)
:param grayscale: Whether or not the output should be converted to \
grayscale
:param screen_size: Size of the output screen (square output)
:type env: Gym Environment
:type frameskip: tuple or int
:type grayscale: boolean
:type screen_size: int
"""
def __init__(
self,
env: gym.Env,
frameskip: Union[Tuple, int] = (2, 5),
grayscale: bool = True,
screen_size: int = 84,
):
super(AtariPreprocessing, self).__init__(env)
self.frameskip = frameskip
self.grayscale = grayscale
self.screen_size = screen_size
self.ale = self.env.unwrapped.ale
if isinstance(frameskip, int):
self.frameskip = (frameskip, frameskip + 1)
# Redefine observation space for Atari environments
if grayscale:
self.observation_space = Box(
low=0, high=255, shape=(screen_size, screen_size), dtype=np.uint8
)
else:
self.observation_space = Box(
low=0, high=255, shape=(screen_size, screen_size, 3), dtype=np.uint8
)
# Observation buffer to hold last two observations for max pooling
self._obs_buffer = [
np.empty(self.env.observation_space.shape[:2], dtype=np.uint8),
np.empty(self.env.observation_space.shape[:2], dtype=np.uint8),
]
# TODO(zeus3101) Add support for games with multiple lives
def step(self, action: np.ndarray) -> np.ndarray:
"""
Step through Atari environment for given action
:param action: Action taken by agent
:type action: NumPy array
:returns: Current state, reward(for frameskip number of actions), \
done, info
"""
frameskip = np.random.choice(range(*self.frameskip))
index = 0
reward = 0
for timestep in range(frameskip):
_, step_reward, done, info = self.env.step(action)
reward += step_reward
if done:
break
if timestep >= frameskip - 2:
self._get_screen(index)
index += 1
return self._get_obs(), reward, done, info
def reset(self) -> np.ndarray:
"""
Resets state of environment
:returns: Initial state
:rtype: NumPy array
"""
self.env.reset()
self._get_screen(0)
self._obs_buffer[1].fill(0)
return self._get_obs()
def _get_screen(self, index: int) -> None:
"""
Get the screen input given empty numpy array (from observation buffer)
:param index: Index of the observation buffer that needs to be updated
:type index: int
"""
if self.grayscale:
self.ale.getScreenGrayscale(self._obs_buffer[index])
else:
self.ale.getScreenRGB2(self._obs_buffer[index])
def _get_obs(self) -> np.ndarray:
"""
Performs max pooling on both states in observation buffer and \
resizes output to appropriate screen size.
:returns: Output observation in required format
:rtype: NumPy array
"""
np.maximum(self._obs_buffer[0], self._obs_buffer[1], out=self._obs_buffer[0])
obs = cv2.resize(
self._obs_buffer[0],
(self.screen_size, self.screen_size),
interpolation=cv2.INTER_AREA,
)
return np.array(obs, dtype=np.uint8) | genrl/environments/atari_preprocessing.py | from typing import Tuple, Union
import cv2
import gym
import numpy as np
from gym.core import Wrapper
from gym.spaces import Box
class AtariPreprocessing(Wrapper):
"""
Implementation for Image preprocessing for Gym Atari environments.
Implements: 1) Frameskip 2) Grayscale 3) Downsampling to square image
:param env: Atari environment
:param frameskip: Number of steps between actions. \
E.g. frameskip=4 will mean 1 action will be taken for every 4 frames. It'll be\
a tuple
if non-deterministic and a random number will be chosen from (2, 5)
:param grayscale: Whether or not the output should be converted to \
grayscale
:param screen_size: Size of the output screen (square output)
:type env: Gym Environment
:type frameskip: tuple or int
:type grayscale: boolean
:type screen_size: int
"""
def __init__(
self,
env: gym.Env,
frameskip: Union[Tuple, int] = (2, 5),
grayscale: bool = True,
screen_size: int = 84,
):
super(AtariPreprocessing, self).__init__(env)
self.frameskip = frameskip
self.grayscale = grayscale
self.screen_size = screen_size
self.ale = self.env.unwrapped.ale
if isinstance(frameskip, int):
self.frameskip = (frameskip, frameskip + 1)
# Redefine observation space for Atari environments
if grayscale:
self.observation_space = Box(
low=0, high=255, shape=(screen_size, screen_size), dtype=np.uint8
)
else:
self.observation_space = Box(
low=0, high=255, shape=(screen_size, screen_size, 3), dtype=np.uint8
)
# Observation buffer to hold last two observations for max pooling
self._obs_buffer = [
np.empty(self.env.observation_space.shape[:2], dtype=np.uint8),
np.empty(self.env.observation_space.shape[:2], dtype=np.uint8),
]
# TODO(zeus3101) Add support for games with multiple lives
def step(self, action: np.ndarray) -> np.ndarray:
"""
Step through Atari environment for given action
:param action: Action taken by agent
:type action: NumPy array
:returns: Current state, reward(for frameskip number of actions), \
done, info
"""
frameskip = np.random.choice(range(*self.frameskip))
index = 0
reward = 0
for timestep in range(frameskip):
_, step_reward, done, info = self.env.step(action)
reward += step_reward
if done:
break
if timestep >= frameskip - 2:
self._get_screen(index)
index += 1
return self._get_obs(), reward, done, info
def reset(self) -> np.ndarray:
"""
Resets state of environment
:returns: Initial state
:rtype: NumPy array
"""
self.env.reset()
self._get_screen(0)
self._obs_buffer[1].fill(0)
return self._get_obs()
def _get_screen(self, index: int) -> None:
"""
Get the screen input given empty numpy array (from observation buffer)
:param index: Index of the observation buffer that needs to be updated
:type index: int
"""
if self.grayscale:
self.ale.getScreenGrayscale(self._obs_buffer[index])
else:
self.ale.getScreenRGB2(self._obs_buffer[index])
def _get_obs(self) -> np.ndarray:
"""
Performs max pooling on both states in observation buffer and \
resizes output to appropriate screen size.
:returns: Output observation in required format
:rtype: NumPy array
"""
np.maximum(self._obs_buffer[0], self._obs_buffer[1], out=self._obs_buffer[0])
obs = cv2.resize(
self._obs_buffer[0],
(self.screen_size, self.screen_size),
interpolation=cv2.INTER_AREA,
)
return np.array(obs, dtype=np.uint8) | 0.903924 | 0.588446 |
import os
import errno
from enum import Enum
from transformers import case, space
class TransformType(Enum):
case = 0
space = 1
class EnumGenerator(object):
_transformer_by_transform_type = {
TransformType.case: case,
TransformType.space: space
}
class TermType(Enum):
filename = 0
section = 1
category = 2
element_name = 3
element_value = 4
suffix = 5
def __init__(self, indent_string=' ', quote_char='"'):
self.indent_string = indent_string
self.quote_char = quote_char
self.subpath = ''
# This setup should be done by iterating over Term, except:
# 1) Python 2 enum seems to require an __order__ attribute,
# that I don't want to maintain.
# 2) PyCharm complains whether __order__ is defined or not:
# Expected collections.Iterable, got Term instead
self.transform_strategies = {
EnumGenerator.TermType.filename: {
TransformType.case: case.Strategy.as_is,
TransformType.space: space.Strategy.as_is
},
EnumGenerator.TermType.section: {
TransformType.case: case.Strategy.as_is,
TransformType.space: space.Strategy.as_is
},
EnumGenerator.TermType.category: {
TransformType.case: case.Strategy.as_is,
TransformType.space: space.Strategy.as_is
},
EnumGenerator.TermType.element_name: {
TransformType.case: case.Strategy.as_is,
TransformType.space: space.Strategy.as_is
},
EnumGenerator.TermType.element_value: {
TransformType.case: case.Strategy.as_is,
TransformType.space: space.Strategy.as_is
},
EnumGenerator.TermType.suffix: {
TransformType.case: case.Strategy.as_is,
TransformType.space: space.Strategy.as_is
}
}
@classmethod
def _transform(cls, string, transform_type, strategy):
return cls._transformer_by_transform_type[transform_type]\
.transform(string, strategy)
def transform_term(self, string, term_type):
result = string
case_strategy = self.transform_strategies[term_type][TransformType.case]
result = self._transform(result, TransformType.case, case_strategy)
space_strategy = self.transform_strategies[term_type][TransformType.space]
result = self._transform(result, TransformType.space, space_strategy)
return result
def enquote(self, string):
return '{0}{1}{0}'.format(self.quote_char, string, self.quote_char) | build/generators/__init__.py | import os
import errno
from enum import Enum
from transformers import case, space
class TransformType(Enum):
case = 0
space = 1
class EnumGenerator(object):
_transformer_by_transform_type = {
TransformType.case: case,
TransformType.space: space
}
class TermType(Enum):
filename = 0
section = 1
category = 2
element_name = 3
element_value = 4
suffix = 5
def __init__(self, indent_string=' ', quote_char='"'):
self.indent_string = indent_string
self.quote_char = quote_char
self.subpath = ''
# This setup should be done by iterating over Term, except:
# 1) Python 2 enum seems to require an __order__ attribute,
# that I don't want to maintain.
# 2) PyCharm complains whether __order__ is defined or not:
# Expected collections.Iterable, got Term instead
self.transform_strategies = {
EnumGenerator.TermType.filename: {
TransformType.case: case.Strategy.as_is,
TransformType.space: space.Strategy.as_is
},
EnumGenerator.TermType.section: {
TransformType.case: case.Strategy.as_is,
TransformType.space: space.Strategy.as_is
},
EnumGenerator.TermType.category: {
TransformType.case: case.Strategy.as_is,
TransformType.space: space.Strategy.as_is
},
EnumGenerator.TermType.element_name: {
TransformType.case: case.Strategy.as_is,
TransformType.space: space.Strategy.as_is
},
EnumGenerator.TermType.element_value: {
TransformType.case: case.Strategy.as_is,
TransformType.space: space.Strategy.as_is
},
EnumGenerator.TermType.suffix: {
TransformType.case: case.Strategy.as_is,
TransformType.space: space.Strategy.as_is
}
}
@classmethod
def _transform(cls, string, transform_type, strategy):
return cls._transformer_by_transform_type[transform_type]\
.transform(string, strategy)
def transform_term(self, string, term_type):
result = string
case_strategy = self.transform_strategies[term_type][TransformType.case]
result = self._transform(result, TransformType.case, case_strategy)
space_strategy = self.transform_strategies[term_type][TransformType.space]
result = self._transform(result, TransformType.space, space_strategy)
return result
def enquote(self, string):
return '{0}{1}{0}'.format(self.quote_char, string, self.quote_char) | 0.549399 | 0.344829 |
import requests
import config
from entities.part import Part
from entities.part_category import PartCategory
from json_utils import get_json_value
from requests import HTTPError
from typing import List
_BASE_URL = 'https://rebrickable.com/'
def _get_request(endpoint: str) -> (object, str):
try:
r = requests.get(_BASE_URL + endpoint, headers={'Authorization': f'key {config.TOKEN}'})
except HTTPError as http_err:
return None, f'HTTP error occurred: {http_err}'
except Exception as err:
return None, f'Other error occurred: {err}'
if r.status_code != 200:
if r.status_code == 404:
return None, f'Item not found'
else:
return None, f'Failed to receive data: HTTP status code is {r.status_code}'
return r.json(), None
def get_part_categories() -> (List[PartCategory], str):
json, err_msg = _get_request('/api/v3/lego/part_categories/')
if json is None:
return json, err_msg
results = get_json_value(json, 'results')
if results is None:
return json, err_msg
part_categories = []
for result in results:
part_categories.append(PartCategory(result))
return part_categories, err_msg
def get_part_count(part_cat_id: int = None) -> (int, str):
json, err_msg = _get_request(f'/api/v3/lego/parts/?page=1&page_size=1' +
(f'&part_cat_id={part_cat_id}' if part_cat_id else ''))
if json is None:
return json, err_msg
if json is None or 'count' not in json:
return -1, err_msg
return json['count'], None
def get_parts(page: int = 1, page_size: int = 100, part_cat_id: int = None) -> (List[Part], str):
json, err_msg = _get_request(f'/api/v3/lego/parts/?page={page}&page_size={page_size}' +
(f'&part_cat_id={part_cat_id}' if part_cat_id else ''))
if json is None:
return json, err_msg
results = get_json_value(json, 'results')
if results is None:
return json, err_msg
part_categories = []
for result in results:
part_categories.append(Part(result))
return part_categories, err_msg
def get_part(part_id: int) -> (Part, str):
json, err_msg = _get_request(f'/api/v3/lego/parts/{part_id}/')
return Part(json), err_msg | rba/api.py | import requests
import config
from entities.part import Part
from entities.part_category import PartCategory
from json_utils import get_json_value
from requests import HTTPError
from typing import List
_BASE_URL = 'https://rebrickable.com/'
def _get_request(endpoint: str) -> (object, str):
try:
r = requests.get(_BASE_URL + endpoint, headers={'Authorization': f'key {config.TOKEN}'})
except HTTPError as http_err:
return None, f'HTTP error occurred: {http_err}'
except Exception as err:
return None, f'Other error occurred: {err}'
if r.status_code != 200:
if r.status_code == 404:
return None, f'Item not found'
else:
return None, f'Failed to receive data: HTTP status code is {r.status_code}'
return r.json(), None
def get_part_categories() -> (List[PartCategory], str):
json, err_msg = _get_request('/api/v3/lego/part_categories/')
if json is None:
return json, err_msg
results = get_json_value(json, 'results')
if results is None:
return json, err_msg
part_categories = []
for result in results:
part_categories.append(PartCategory(result))
return part_categories, err_msg
def get_part_count(part_cat_id: int = None) -> (int, str):
json, err_msg = _get_request(f'/api/v3/lego/parts/?page=1&page_size=1' +
(f'&part_cat_id={part_cat_id}' if part_cat_id else ''))
if json is None:
return json, err_msg
if json is None or 'count' not in json:
return -1, err_msg
return json['count'], None
def get_parts(page: int = 1, page_size: int = 100, part_cat_id: int = None) -> (List[Part], str):
json, err_msg = _get_request(f'/api/v3/lego/parts/?page={page}&page_size={page_size}' +
(f'&part_cat_id={part_cat_id}' if part_cat_id else ''))
if json is None:
return json, err_msg
results = get_json_value(json, 'results')
if results is None:
return json, err_msg
part_categories = []
for result in results:
part_categories.append(Part(result))
return part_categories, err_msg
def get_part(part_id: int) -> (Part, str):
json, err_msg = _get_request(f'/api/v3/lego/parts/{part_id}/')
return Part(json), err_msg | 0.420838 | 0.069732 |
from .. properties import UNDEF, PropertySet, SetProperty
class Tool(PropertySet):
"""Abstract base class for interactivity tools."""
keys = SetProperty(UNDEF, dynamic=False)
def __init__(self, **overrides):
"""Initializes a new instance of Tool."""
super().__init__(**overrides)
# init buffers
if self.keys == UNDEF:
self.keys = set()
def on_size(self, evt):
"""
This method should be overridden to provide specific handling
of window size-change event.
Args:
evt: pero.SizeEvt
Event to process.
"""
pass
def on_key_down(self, evt):
"""
This method should be overridden to provide specific handling
of key-down event.
Args:
evt: pero.KeyDownEvt
Event to process.
"""
self.add_key(evt.key)
def on_key_up(self, evt):
"""
This method should be overridden to provide specific handling
of key-up event.
Args:
evt: pero.KeyUpEvt
Event to process.
"""
self.remove_key(evt.key)
def on_mouse_enter(self, evt):
"""
This method should be overridden to provide specific handling
of mouse-enter event.
Args:
evt: pero.MouseEnterEvt
Event to process.
"""
pass
def on_mouse_leave(self, evt):
"""
This method should be overridden to provide specific handling
of mouse-leave event.
Args:
evt: pero.MouseLeaveEvt
Event to process.
"""
self.clear_keys()
def on_mouse_motion(self, evt):
"""
This method should be overridden to provide specific handling
of mouse-motion event.
Args:
evt: pero.MouseMotionEvt
Event to process.
"""
pass
def on_mouse_scroll(self, evt):
"""
This method should be overridden to provide specific handling
of mouse-scroll event.
Args:
evt: pero.MouseScrollEvt
Event to process.
"""
pass
def on_mouse_down(self, evt):
"""
This method should be overridden to provide specific handling
of mouse-button-down event.
Args:
evt: pero.LeftDownEvt, pero.RightDownEvt or pero.MiddleDownEvt
Event to process.
"""
pass
def on_mouse_up(self, evt):
"""
This method should be overridden to provide specific handling
of mouse-button-up event.
Args:
evt: pero.LeftUpEvt, pero.RightUpEvt or pero.MiddleUpEvt
Event to process.
"""
pass
def on_mouse_dclick(self, evt):
"""
This method should be overridden to provide specific handling
of mouse-button-double-click event.
Args:
evt: pero.LeftDClickEvt, pero.RightDClickEvt or pero.MiddleDClickEvt
Event to process.
"""
pass
def on_touch_start(self, evt):
"""
This method should be overridden to provide specific handling
of touch-start event.
Args:
evt: pero.TouchStartEvt
Event to process.
"""
pass
def on_touch_end(self, evt):
"""
This method should be overridden to provide specific handling
of touch-end event.
Args:
evt: pero.TouchEndEvt
Event to process.
"""
pass
def on_touch_move(self, evt):
"""
This method should be overridden to provide specific handling
of touch-move event.
Args:
evt: pero.TouchMoveEvt
Event to process.
"""
pass
def on_touch_cancel(self, evt):
"""
This method should be overridden to provide specific handling
of touch-cancel event.
Args:
evt: pero.TouchCancelEvt
Event to process.
"""
pass
def add_key(self, key):
"""
Remembers given key.
Args:
key: pero.KEY
A key to remember as any value from the pero.KEY enum.
"""
self.keys.add(key)
def remove_key(self, key):
"""
Removes given key.
Args:
key: pero.KEY
A key to remove as any value from the pero.KEY enum.
"""
self.keys.discard(key)
def clear_keys(self):
"""Removes all keys."""
self.keys.clear() | pero/backends/tool.py |
from .. properties import UNDEF, PropertySet, SetProperty
class Tool(PropertySet):
"""Abstract base class for interactivity tools."""
keys = SetProperty(UNDEF, dynamic=False)
def __init__(self, **overrides):
"""Initializes a new instance of Tool."""
super().__init__(**overrides)
# init buffers
if self.keys == UNDEF:
self.keys = set()
def on_size(self, evt):
"""
This method should be overridden to provide specific handling
of window size-change event.
Args:
evt: pero.SizeEvt
Event to process.
"""
pass
def on_key_down(self, evt):
"""
This method should be overridden to provide specific handling
of key-down event.
Args:
evt: pero.KeyDownEvt
Event to process.
"""
self.add_key(evt.key)
def on_key_up(self, evt):
"""
This method should be overridden to provide specific handling
of key-up event.
Args:
evt: pero.KeyUpEvt
Event to process.
"""
self.remove_key(evt.key)
def on_mouse_enter(self, evt):
"""
This method should be overridden to provide specific handling
of mouse-enter event.
Args:
evt: pero.MouseEnterEvt
Event to process.
"""
pass
def on_mouse_leave(self, evt):
"""
This method should be overridden to provide specific handling
of mouse-leave event.
Args:
evt: pero.MouseLeaveEvt
Event to process.
"""
self.clear_keys()
def on_mouse_motion(self, evt):
"""
This method should be overridden to provide specific handling
of mouse-motion event.
Args:
evt: pero.MouseMotionEvt
Event to process.
"""
pass
def on_mouse_scroll(self, evt):
"""
This method should be overridden to provide specific handling
of mouse-scroll event.
Args:
evt: pero.MouseScrollEvt
Event to process.
"""
pass
def on_mouse_down(self, evt):
"""
This method should be overridden to provide specific handling
of mouse-button-down event.
Args:
evt: pero.LeftDownEvt, pero.RightDownEvt or pero.MiddleDownEvt
Event to process.
"""
pass
def on_mouse_up(self, evt):
"""
This method should be overridden to provide specific handling
of mouse-button-up event.
Args:
evt: pero.LeftUpEvt, pero.RightUpEvt or pero.MiddleUpEvt
Event to process.
"""
pass
def on_mouse_dclick(self, evt):
"""
This method should be overridden to provide specific handling
of mouse-button-double-click event.
Args:
evt: pero.LeftDClickEvt, pero.RightDClickEvt or pero.MiddleDClickEvt
Event to process.
"""
pass
def on_touch_start(self, evt):
"""
This method should be overridden to provide specific handling
of touch-start event.
Args:
evt: pero.TouchStartEvt
Event to process.
"""
pass
def on_touch_end(self, evt):
"""
This method should be overridden to provide specific handling
of touch-end event.
Args:
evt: pero.TouchEndEvt
Event to process.
"""
pass
def on_touch_move(self, evt):
"""
This method should be overridden to provide specific handling
of touch-move event.
Args:
evt: pero.TouchMoveEvt
Event to process.
"""
pass
def on_touch_cancel(self, evt):
"""
This method should be overridden to provide specific handling
of touch-cancel event.
Args:
evt: pero.TouchCancelEvt
Event to process.
"""
pass
def add_key(self, key):
"""
Remembers given key.
Args:
key: pero.KEY
A key to remember as any value from the pero.KEY enum.
"""
self.keys.add(key)
def remove_key(self, key):
"""
Removes given key.
Args:
key: pero.KEY
A key to remove as any value from the pero.KEY enum.
"""
self.keys.discard(key)
def clear_keys(self):
"""Removes all keys."""
self.keys.clear() | 0.834576 | 0.288456 |
from decimal import Decimal, getcontext
from .errors import YPYModelError, YPYError
from ._core._dm_meta_info import REFERENCE_CLASS, REFERENCE_LIST, REFERENCE_LEAFLIST
from ._core._dm_meta_info import REFERENCE_IDENTITY_CLASS, ATTRIBUTE
class DELETE(object):
"""Marker class used to mark nodes that are to be deleted
Assign DELETE object to a mark a leaf for deletion.
A CRUD update operation will delete the leaf from the device it is on."""
pass
def __str__(self):
return "Operation DELETE"
class REMOVE(object):
"""Marker class used to mark nodes that are to be removed
Assign REMOVE object to a mark a leaf for deletion.
A CRUD update operation will delete the leaf from the device it is on."""
pass
def __str__(self):
return "Operation REMOVE"
class MERGE(object):
"""Marker MERGE used to mark nodes that are to be merged
Assign DELETE object to a mark a leaf for deletion.
A CRUD update operation will delete the leaf from the device it is on."""
def __init__(self, value=None):
self._value = value
def value(self):
return self._value
def set(self, value):
self._value = value
def __str__(self):
text = "Operation MERGE"
if self._value:
text += " with value %s of type '%s'" % (self._value, type(self._value).__name__)
return text
class REPLACE(object):
"""Marker class used to mark nodes that are to be replaced
Assign REPLACE object to a mark a leaf for deletion.
A CRUD update operation will delete the leaf from the device it is on."""
def __init__(self, value=None):
self._value = value
def value(self):
return self._value
def set(self, value):
self._value = value
def __str__(self):
text = "Operation REPLACE"
if self._value:
text += " with value %s of type '%s'" % (self._value, type(self._value).__name__)
return text
class CREATE(object):
"""Marker class used to mark nodes that are to be created
Assign CREATE object to a mark a leaf for deletion.
A CRUD update operation will delete the leaf from the device it is on."""
def __init__(self, value=None):
self._value = value
def value(self):
return self._value
def set(self, value):
self._value = value
def __str__(self):
text = "Operation CREATE"
if self._value:
text += " with value %s of type '%s'" % (self._value, type(self._value).__name__)
return text
class READ(object):
"""Marker class used to mark nodes that are to be read """
pass
def __str__(self):
return "Operation READ"
class Empty(object):
"""
.. _ydk_models_types_Empty:
Represents the empty type in YANG. The empty built-in type represents a leaf that does not have any
value, it conveys information by its presence or absence.
"""
def __eq__(self, rhs):
if not isinstance(rhs, Empty):
raise YPYModelError("Empty comparision error, invalid rhs\n")
return True
def __ne__(self, rhs):
return not isinstance(rhs, Empty)
__hash__ = object.__hash__
class Decimal64(object):
"""
.. _ydk_models_types_Decimal64:
Represents the decimal64 YANG type. The decimal64 type represents a
subset of the real numbers, which can
be represented by decimal numerals. The value space of decimal64 is
the set of numbers that can be obtained by multiplying a 64-bit
signed integer by a negative power of ten, i.e., expressible as
"i x 10^-n" where i is an integer64 and n is an integer between 1 and
18, inclusively.
"""
def __init__(self, str_val):
self.s = str_val
def __str__(self):
return self.s
def __eq__(self, rhs):
if not isinstance(rhs, Decimal64):
raise YPYModelError("Decimal64 comparision error, invalid rhs\n")
return self.s == rhs.s
def __ne__(self, rhs):
if not isinstance(rhs, Decimal64):
raise YPYModelError("Decimal64 comparision error, invalid rhs\n")
return self.s != rhs.s
def __lt__(self, rhs):
if not isinstance(rhs, Decimal64):
raise YPYModelError("Decimal64 comparision error, invalid rhs\n")
if self.s is None:
return True
if rhs.s is None:
return False
getcontext().prec = 18
self_dec = Decimal(self.s)
rhs_dec = Decimal(rhs.s)
return self_dec < rhs_dec
def __le__(self, rhs):
if not isinstance(rhs, Decimal64):
raise YPYModelError("Decimal64 comparision error, invalid rhs\n")
if self.s is None:
return True
if rhs.s is None:
return False
getcontext().prec = 18
self_dec = Decimal(self.s)
rhs_dec = Decimal(rhs.s)
return self_dec <= rhs_dec
def __gt__(self, rhs):
if not isinstance(rhs, Decimal64):
raise YPYModelError("Decimal64 comparision error, invalid rhs\n")
if self.s is None:
return False
if rhs.s is None:
return True
getcontext().prec = 18
self_dec = Decimal(self.s)
rhs_dec = Decimal(rhs.s)
return self_dec > rhs_dec
def __ge__(self, rhs):
if not isinstance(rhs, Decimal64):
raise YPYModelError("Decimal64 comparision error, invalid rhs\n")
if self.s is None:
return False
if rhs.s is None:
return True
getcontext().prec = 18
self_dec = Decimal(self.s)
rhs_dec = Decimal(rhs.s)
return self_dec >= rhs_dec
__hash__ = object.__hash__
class FixedBitsDict(object):
""" Super class of all classes that represents the bits type in YANG
A concrete implementation of this class has a dictionary.
The bits built-in type represents a bit set. That is, a bits value
is a set of flags identified by small integer position numbers
starting at 0. Each bit number has an assigned name.
"""
def __init__(self, dictionary, pos_map):
self._dictionary = dictionary
self._pos_map = pos_map
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __setitem__(self, key, item):
if key not in self._dictionary:
raise KeyError("The key {} is not defined.". format(key))
self._dictionary[key] = item
def __getitem__(self, key):
return self._dictionary[key]
def __str__(self):
return " ".join([key for key in self._dictionary if self._dictionary[key] is True])
def __ne__(self, rhs):
return not self.__eq__(rhs)
def _has_data(self):
for key in self._dictionary:
if self._dictionary[key]:
return True
return False
__hash__ = object.__hash__
class YList(list):
""" Represents a list with support for hanging a parent
All YANG based entity classes that have lists in them use YList
to represent the list.
The "list" statement is used to define an interior data node in the
schema tree. A list node may exist in multiple instances in the data
tree. Each such instance is known as a list entry. The "list"
statement takes one argument, which is an identifier, followed by a
block of substatements that holds detailed list information.
A list entry is uniquely identified by the values of the list's keys,
if defined.
"""
def __init__(self):
super(YList, self).__init__()
self.parent = None
self.name = None
self.count = 0
def __getitem__(self, key):
if isinstance(key, slice):
ret = YList()
ret.parent = self.parent
ret.name = self.name
start = 0 if not key.start else key.start
step = 1 if not key.step else key.step
stop = len(self) if not key.stop else key.stop
for k in range(start, stop, step):
ret.append(super(YList, self).__getitem__(k))
else:
ret = super(YList, self).__getitem__(key)
return ret
def __getslice__(self, i, j):
ret = YList()
ret.parent = self.parent
ret.name = self.name
for item in super(YList, self).__getslice__(i, j):
ret.append(item)
return ret
def append(self, item):
super(YList, self).append(item)
item.parent = self.parent
if hasattr(item, 'ylist_key_names') and not item.ylist_key_names:
setattr(item, '_index', self.count)
self.count += 1
def extend(self, items):
for item in items:
self.append(item)
class YListItem(object):
def __init__(self, item, parent, name):
self.item = item
self.parent = parent
self.name = name
self.ylist_key_names = []
def __eq__(self, other):
if isinstance(other, self.__class__):
if self.item.__class__.__name__.endswith('Identity'):
return self.item.__class__.__name__ == other.item.__class__.__name__
else:
return self.item == other.item
else:
return False
def __repr__(self):
return str(self.item)
def _has_data(self):
if hasattr(self.item, '_has_data'):
return self.item._has_data()
else:
# Enum, Identity, Python primitive types.
return True
__hash__ = object.__hash__
class YLeafList(YList):
""" Represents an associate list with support for hanging a parent
Leaf-list in YANG use YLeafList to represetn the list.
The "leaf-list" statement is used to define an
array of a particular type. The "leaf-list" statement takes one
argument, which is an identifier, followed by a block of
substatements that holds detailed leaf-list information. Values in
leaf-list should be unique.
"""
def __init__(self):
super(YLeafList, self).__init__()
def __contains__(self, item):
item_to_compare = item
if isinstance(item, YListItem):
item_to_compare = item.item
for i in super(YLeafList, self).__iter__():
if item_to_compare.__class__.__name__.endswith('Identity'):
if item_to_compare.__class__.__name__ == i.item.__class__.__name__:
return True
else:
if i.item == item_to_compare:
return True
return False
def __eq__(self, other):
if isinstance(other, self.__class__):
if len(self) != len(other):
return False
for item in super(YLeafList, self).__iter__():
if not other.__contains__(item):
return False
return True
else:
return False
def __ne__(self, other):
return not self.__eq__(other)
def __len__(self):
return super(YLeafList, self).__len__()
def __setitem__(self, key, item):
lst_item = YListItem(item, self.parent, self.name)
super(YLeafList, self).__setitem__(key, lst_item)
def __getitem__(self, key):
if isinstance(key, slice):
ret = YLeafList()
ret.parent = self.parent
ret.name = self.name
start = 0 if not key.start else key.start
step = 1 if not key.step else key.step
stop = len(self) if not key.stop else key.stop
for k in range(start, stop, step):
ret.append(super(YLeafList, self).__getitem__(k))
else:
ret = super(YLeafList, self).__getitem__(key)
return ret
def __getslice__(self, i, j):
# override __getslice__ implemented by CPython
ret = YLeafList()
ret.parent = self.parent
ret.name = self.name
for item in super(YLeafList, self).__getslice__(i, j):
ret.append(item)
return ret
def append(self, item):
if item in self:
index = self.index(item)
raise YPYModelError("Value {} already in leaf-list: {}".format(item, self[index].name))
lst_item = YListItem(item, self.parent, self.name)
super(YLeafList, self).append(lst_item)
def extend(self, items):
for item in items:
self.append(item)
def pop(self, i=-1):
lst_item = super(YLeafList, self).pop(i)
return lst_item.item
def remove(self, item):
removed = False
for i in super(YLeafList, self).__iter__():
if i.item == item:
super(YLeafList, self).remove(i)
removed = True
if not removed:
raise ValueError("list.remove(x): {} not in list".format(item))
def insert(self, key, item):
if item in self:
index = self.index(item)
raise YPYModelError("Value {} already in leaf-list: {}".format(item, self[index].name))
lst_item = YListItem(item, self.parent, self.name)
super(YLeafList, self).insert(key, lst_item)
def index(self, item):
idx = 0
for i in super(YLeafList, self).__iter__():
if i.item == item:
return idx
idx += 1
raise ValueError("{} is not in leaf-list".format(item))
def count(self, item):
cnt = 0
for i in super(YLeafList, self).__iter__():
if i.item == item:
cnt += 1
return cnt
def get_segment_path(entity):
path = entity._common_path.rsplit('/', 1)[1]
if hasattr(entity, '_index'):
path += '[%s]' % entity._index
return path
def _absolute_path(entity):
path = get_segment_path(entity)
if hasattr(entity, 'parent') and entity.parent:
path = '/'.join([_absolute_path(entity.parent), path])
return path
def get_absolute_path(entity):
path = _absolute_path(entity)
segments = path.split("/")
module = segments[0].split(':', 1)[0]
for i in range(1, len(segments)):
del_str = module + ':'
if del_str in segments[i]:
segments[i] = segments[i].replace(del_str, '')
else:
if ':' in segments[i]:
module = segments[i].split(':', 1)[0]
path = '/'.join(segments)
return '/' + path
def get_name_leaf_data(entity):
leaf_name_data = {}
for member in entity._meta_info().meta_info_class_members:
value = getattr(entity, member.presentation_name)
if value is None or isinstance(value, list) and not value:
continue
if member.mtype in [ATTRIBUTE, REFERENCE_IDENTITY_CLASS]:
leaf_name_data[member.name] = value
elif member.mtype == REFERENCE_LEAFLIST and isinstance(value, list):
for child in value:
key = "%s[.='%s']" % (member.name, child)
leaf_name_data[key] = ''
return leaf_name_data
def get_children(entity):
children = {}
for member in entity._meta_info().meta_info_class_members:
value = getattr(entity, member.presentation_name)
if value is None or isinstance(value, list) and not value:
continue
if member.mtype == REFERENCE_CLASS:
abs_path = get_absolute_path(value)
children[abs_path] = value
elif member.mtype == REFERENCE_LIST:
for child in value:
abs_path = get_absolute_path(child)
children[abs_path] = child
return children
def entity_to_dict(entity):
edict = {}
abs_path = get_absolute_path(entity)
ent_meta = entity._meta_info()
if (hasattr(ent_meta, 'is_presence') and ent_meta.is_presence) or \
abs_path.endswith(']'):
edict[abs_path] = ''
leaf_name_data = get_name_leaf_data(entity)
for leaf_name, leaf_value in leaf_name_data.items():
if leaf_name not in entity.ylist_key_names:
edict["%s/%s" % (abs_path, leaf_name)] = leaf_value
for name, child in get_children(entity).items():
child_dict = entity_to_dict(child)
for n, v in child_dict.items():
edict[n] = v
return edict
def entity_diff(ent1, ent2):
if ent1 is None or ent2 is None or type(ent1) != type(ent2):
raise YPYError("entity_diff: Incompatible arguments provided.")
diffs = {}
ent1_dict = entity_to_dict(ent1)
ent2_dict = entity_to_dict(ent2)
ent1_keys = sorted(ent1_dict.keys())
ent2_keys = sorted(ent2_dict.keys())
ent1_skip_keys = []
for key in ent1_keys:
if key in ent1_skip_keys:
continue
if key in ent2_keys:
if ent1_dict[key] != ent2_dict[key]:
diffs[key] = (ent1_dict[key], ent2_dict[key])
ent2_keys.remove(key)
else:
diffs[key] = (ent1_dict[key], None)
for dup_key in ent1_keys:
if dup_key.startswith(key):
ent1_skip_keys.append(dup_key)
ent2_skip_keys = []
for key in ent2_keys:
if key in ent2_skip_keys:
continue
diffs[key] = (None, ent2_dict[key])
for dup_key in ent2_keys:
if dup_key.startswith(key):
ent2_skip_keys.append(dup_key)
return diffs
def abs_path_to_entity(entity, abs_path):
top_abs_path = get_absolute_path(entity)
if top_abs_path == abs_path:
return entity
if top_abs_path in abs_path:
leaf_name_data = get_name_leaf_data(entity)
for leaf_name in leaf_name_data:
if leaf_name not in entity.ylist_key_names:
leaf_path = "%s/%s" % (top_abs_path, leaf_name)
if leaf_path == abs_path:
return entity
for child_abs_path, child in get_children(entity).items():
if child_abs_path == abs_path:
return child
matching_entity = abs_path_to_entity(child, abs_path)
if matching_entity:
return matching_entity
return None | sdk/python/core/ydk/types.py | from decimal import Decimal, getcontext
from .errors import YPYModelError, YPYError
from ._core._dm_meta_info import REFERENCE_CLASS, REFERENCE_LIST, REFERENCE_LEAFLIST
from ._core._dm_meta_info import REFERENCE_IDENTITY_CLASS, ATTRIBUTE
class DELETE(object):
"""Marker class used to mark nodes that are to be deleted
Assign DELETE object to a mark a leaf for deletion.
A CRUD update operation will delete the leaf from the device it is on."""
pass
def __str__(self):
return "Operation DELETE"
class REMOVE(object):
"""Marker class used to mark nodes that are to be removed
Assign REMOVE object to a mark a leaf for deletion.
A CRUD update operation will delete the leaf from the device it is on."""
pass
def __str__(self):
return "Operation REMOVE"
class MERGE(object):
"""Marker MERGE used to mark nodes that are to be merged
Assign DELETE object to a mark a leaf for deletion.
A CRUD update operation will delete the leaf from the device it is on."""
def __init__(self, value=None):
self._value = value
def value(self):
return self._value
def set(self, value):
self._value = value
def __str__(self):
text = "Operation MERGE"
if self._value:
text += " with value %s of type '%s'" % (self._value, type(self._value).__name__)
return text
class REPLACE(object):
"""Marker class used to mark nodes that are to be replaced
Assign REPLACE object to a mark a leaf for deletion.
A CRUD update operation will delete the leaf from the device it is on."""
def __init__(self, value=None):
self._value = value
def value(self):
return self._value
def set(self, value):
self._value = value
def __str__(self):
text = "Operation REPLACE"
if self._value:
text += " with value %s of type '%s'" % (self._value, type(self._value).__name__)
return text
class CREATE(object):
"""Marker class used to mark nodes that are to be created
Assign CREATE object to a mark a leaf for deletion.
A CRUD update operation will delete the leaf from the device it is on."""
def __init__(self, value=None):
self._value = value
def value(self):
return self._value
def set(self, value):
self._value = value
def __str__(self):
text = "Operation CREATE"
if self._value:
text += " with value %s of type '%s'" % (self._value, type(self._value).__name__)
return text
class READ(object):
"""Marker class used to mark nodes that are to be read """
pass
def __str__(self):
return "Operation READ"
class Empty(object):
"""
.. _ydk_models_types_Empty:
Represents the empty type in YANG. The empty built-in type represents a leaf that does not have any
value, it conveys information by its presence or absence.
"""
def __eq__(self, rhs):
if not isinstance(rhs, Empty):
raise YPYModelError("Empty comparision error, invalid rhs\n")
return True
def __ne__(self, rhs):
return not isinstance(rhs, Empty)
__hash__ = object.__hash__
class Decimal64(object):
"""
.. _ydk_models_types_Decimal64:
Represents the decimal64 YANG type. The decimal64 type represents a
subset of the real numbers, which can
be represented by decimal numerals. The value space of decimal64 is
the set of numbers that can be obtained by multiplying a 64-bit
signed integer by a negative power of ten, i.e., expressible as
"i x 10^-n" where i is an integer64 and n is an integer between 1 and
18, inclusively.
"""
def __init__(self, str_val):
self.s = str_val
def __str__(self):
return self.s
def __eq__(self, rhs):
if not isinstance(rhs, Decimal64):
raise YPYModelError("Decimal64 comparision error, invalid rhs\n")
return self.s == rhs.s
def __ne__(self, rhs):
if not isinstance(rhs, Decimal64):
raise YPYModelError("Decimal64 comparision error, invalid rhs\n")
return self.s != rhs.s
def __lt__(self, rhs):
if not isinstance(rhs, Decimal64):
raise YPYModelError("Decimal64 comparision error, invalid rhs\n")
if self.s is None:
return True
if rhs.s is None:
return False
getcontext().prec = 18
self_dec = Decimal(self.s)
rhs_dec = Decimal(rhs.s)
return self_dec < rhs_dec
def __le__(self, rhs):
if not isinstance(rhs, Decimal64):
raise YPYModelError("Decimal64 comparision error, invalid rhs\n")
if self.s is None:
return True
if rhs.s is None:
return False
getcontext().prec = 18
self_dec = Decimal(self.s)
rhs_dec = Decimal(rhs.s)
return self_dec <= rhs_dec
def __gt__(self, rhs):
if not isinstance(rhs, Decimal64):
raise YPYModelError("Decimal64 comparision error, invalid rhs\n")
if self.s is None:
return False
if rhs.s is None:
return True
getcontext().prec = 18
self_dec = Decimal(self.s)
rhs_dec = Decimal(rhs.s)
return self_dec > rhs_dec
def __ge__(self, rhs):
if not isinstance(rhs, Decimal64):
raise YPYModelError("Decimal64 comparision error, invalid rhs\n")
if self.s is None:
return False
if rhs.s is None:
return True
getcontext().prec = 18
self_dec = Decimal(self.s)
rhs_dec = Decimal(rhs.s)
return self_dec >= rhs_dec
__hash__ = object.__hash__
class FixedBitsDict(object):
""" Super class of all classes that represents the bits type in YANG
A concrete implementation of this class has a dictionary.
The bits built-in type represents a bit set. That is, a bits value
is a set of flags identified by small integer position numbers
starting at 0. Each bit number has an assigned name.
"""
def __init__(self, dictionary, pos_map):
self._dictionary = dictionary
self._pos_map = pos_map
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __setitem__(self, key, item):
if key not in self._dictionary:
raise KeyError("The key {} is not defined.". format(key))
self._dictionary[key] = item
def __getitem__(self, key):
return self._dictionary[key]
def __str__(self):
return " ".join([key for key in self._dictionary if self._dictionary[key] is True])
def __ne__(self, rhs):
return not self.__eq__(rhs)
def _has_data(self):
for key in self._dictionary:
if self._dictionary[key]:
return True
return False
__hash__ = object.__hash__
class YList(list):
""" Represents a list with support for hanging a parent
All YANG based entity classes that have lists in them use YList
to represent the list.
The "list" statement is used to define an interior data node in the
schema tree. A list node may exist in multiple instances in the data
tree. Each such instance is known as a list entry. The "list"
statement takes one argument, which is an identifier, followed by a
block of substatements that holds detailed list information.
A list entry is uniquely identified by the values of the list's keys,
if defined.
"""
def __init__(self):
super(YList, self).__init__()
self.parent = None
self.name = None
self.count = 0
def __getitem__(self, key):
if isinstance(key, slice):
ret = YList()
ret.parent = self.parent
ret.name = self.name
start = 0 if not key.start else key.start
step = 1 if not key.step else key.step
stop = len(self) if not key.stop else key.stop
for k in range(start, stop, step):
ret.append(super(YList, self).__getitem__(k))
else:
ret = super(YList, self).__getitem__(key)
return ret
def __getslice__(self, i, j):
ret = YList()
ret.parent = self.parent
ret.name = self.name
for item in super(YList, self).__getslice__(i, j):
ret.append(item)
return ret
def append(self, item):
super(YList, self).append(item)
item.parent = self.parent
if hasattr(item, 'ylist_key_names') and not item.ylist_key_names:
setattr(item, '_index', self.count)
self.count += 1
def extend(self, items):
for item in items:
self.append(item)
class YListItem(object):
def __init__(self, item, parent, name):
self.item = item
self.parent = parent
self.name = name
self.ylist_key_names = []
def __eq__(self, other):
if isinstance(other, self.__class__):
if self.item.__class__.__name__.endswith('Identity'):
return self.item.__class__.__name__ == other.item.__class__.__name__
else:
return self.item == other.item
else:
return False
def __repr__(self):
return str(self.item)
def _has_data(self):
if hasattr(self.item, '_has_data'):
return self.item._has_data()
else:
# Enum, Identity, Python primitive types.
return True
__hash__ = object.__hash__
class YLeafList(YList):
""" Represents an associate list with support for hanging a parent
Leaf-list in YANG use YLeafList to represetn the list.
The "leaf-list" statement is used to define an
array of a particular type. The "leaf-list" statement takes one
argument, which is an identifier, followed by a block of
substatements that holds detailed leaf-list information. Values in
leaf-list should be unique.
"""
def __init__(self):
super(YLeafList, self).__init__()
def __contains__(self, item):
item_to_compare = item
if isinstance(item, YListItem):
item_to_compare = item.item
for i in super(YLeafList, self).__iter__():
if item_to_compare.__class__.__name__.endswith('Identity'):
if item_to_compare.__class__.__name__ == i.item.__class__.__name__:
return True
else:
if i.item == item_to_compare:
return True
return False
def __eq__(self, other):
if isinstance(other, self.__class__):
if len(self) != len(other):
return False
for item in super(YLeafList, self).__iter__():
if not other.__contains__(item):
return False
return True
else:
return False
def __ne__(self, other):
return not self.__eq__(other)
def __len__(self):
return super(YLeafList, self).__len__()
def __setitem__(self, key, item):
lst_item = YListItem(item, self.parent, self.name)
super(YLeafList, self).__setitem__(key, lst_item)
def __getitem__(self, key):
if isinstance(key, slice):
ret = YLeafList()
ret.parent = self.parent
ret.name = self.name
start = 0 if not key.start else key.start
step = 1 if not key.step else key.step
stop = len(self) if not key.stop else key.stop
for k in range(start, stop, step):
ret.append(super(YLeafList, self).__getitem__(k))
else:
ret = super(YLeafList, self).__getitem__(key)
return ret
def __getslice__(self, i, j):
# override __getslice__ implemented by CPython
ret = YLeafList()
ret.parent = self.parent
ret.name = self.name
for item in super(YLeafList, self).__getslice__(i, j):
ret.append(item)
return ret
def append(self, item):
if item in self:
index = self.index(item)
raise YPYModelError("Value {} already in leaf-list: {}".format(item, self[index].name))
lst_item = YListItem(item, self.parent, self.name)
super(YLeafList, self).append(lst_item)
def extend(self, items):
for item in items:
self.append(item)
def pop(self, i=-1):
lst_item = super(YLeafList, self).pop(i)
return lst_item.item
def remove(self, item):
removed = False
for i in super(YLeafList, self).__iter__():
if i.item == item:
super(YLeafList, self).remove(i)
removed = True
if not removed:
raise ValueError("list.remove(x): {} not in list".format(item))
def insert(self, key, item):
if item in self:
index = self.index(item)
raise YPYModelError("Value {} already in leaf-list: {}".format(item, self[index].name))
lst_item = YListItem(item, self.parent, self.name)
super(YLeafList, self).insert(key, lst_item)
def index(self, item):
idx = 0
for i in super(YLeafList, self).__iter__():
if i.item == item:
return idx
idx += 1
raise ValueError("{} is not in leaf-list".format(item))
def count(self, item):
cnt = 0
for i in super(YLeafList, self).__iter__():
if i.item == item:
cnt += 1
return cnt
def get_segment_path(entity):
path = entity._common_path.rsplit('/', 1)[1]
if hasattr(entity, '_index'):
path += '[%s]' % entity._index
return path
def _absolute_path(entity):
path = get_segment_path(entity)
if hasattr(entity, 'parent') and entity.parent:
path = '/'.join([_absolute_path(entity.parent), path])
return path
def get_absolute_path(entity):
path = _absolute_path(entity)
segments = path.split("/")
module = segments[0].split(':', 1)[0]
for i in range(1, len(segments)):
del_str = module + ':'
if del_str in segments[i]:
segments[i] = segments[i].replace(del_str, '')
else:
if ':' in segments[i]:
module = segments[i].split(':', 1)[0]
path = '/'.join(segments)
return '/' + path
def get_name_leaf_data(entity):
leaf_name_data = {}
for member in entity._meta_info().meta_info_class_members:
value = getattr(entity, member.presentation_name)
if value is None or isinstance(value, list) and not value:
continue
if member.mtype in [ATTRIBUTE, REFERENCE_IDENTITY_CLASS]:
leaf_name_data[member.name] = value
elif member.mtype == REFERENCE_LEAFLIST and isinstance(value, list):
for child in value:
key = "%s[.='%s']" % (member.name, child)
leaf_name_data[key] = ''
return leaf_name_data
def get_children(entity):
children = {}
for member in entity._meta_info().meta_info_class_members:
value = getattr(entity, member.presentation_name)
if value is None or isinstance(value, list) and not value:
continue
if member.mtype == REFERENCE_CLASS:
abs_path = get_absolute_path(value)
children[abs_path] = value
elif member.mtype == REFERENCE_LIST:
for child in value:
abs_path = get_absolute_path(child)
children[abs_path] = child
return children
def entity_to_dict(entity):
edict = {}
abs_path = get_absolute_path(entity)
ent_meta = entity._meta_info()
if (hasattr(ent_meta, 'is_presence') and ent_meta.is_presence) or \
abs_path.endswith(']'):
edict[abs_path] = ''
leaf_name_data = get_name_leaf_data(entity)
for leaf_name, leaf_value in leaf_name_data.items():
if leaf_name not in entity.ylist_key_names:
edict["%s/%s" % (abs_path, leaf_name)] = leaf_value
for name, child in get_children(entity).items():
child_dict = entity_to_dict(child)
for n, v in child_dict.items():
edict[n] = v
return edict
def entity_diff(ent1, ent2):
if ent1 is None or ent2 is None or type(ent1) != type(ent2):
raise YPYError("entity_diff: Incompatible arguments provided.")
diffs = {}
ent1_dict = entity_to_dict(ent1)
ent2_dict = entity_to_dict(ent2)
ent1_keys = sorted(ent1_dict.keys())
ent2_keys = sorted(ent2_dict.keys())
ent1_skip_keys = []
for key in ent1_keys:
if key in ent1_skip_keys:
continue
if key in ent2_keys:
if ent1_dict[key] != ent2_dict[key]:
diffs[key] = (ent1_dict[key], ent2_dict[key])
ent2_keys.remove(key)
else:
diffs[key] = (ent1_dict[key], None)
for dup_key in ent1_keys:
if dup_key.startswith(key):
ent1_skip_keys.append(dup_key)
ent2_skip_keys = []
for key in ent2_keys:
if key in ent2_skip_keys:
continue
diffs[key] = (None, ent2_dict[key])
for dup_key in ent2_keys:
if dup_key.startswith(key):
ent2_skip_keys.append(dup_key)
return diffs
def abs_path_to_entity(entity, abs_path):
top_abs_path = get_absolute_path(entity)
if top_abs_path == abs_path:
return entity
if top_abs_path in abs_path:
leaf_name_data = get_name_leaf_data(entity)
for leaf_name in leaf_name_data:
if leaf_name not in entity.ylist_key_names:
leaf_path = "%s/%s" % (top_abs_path, leaf_name)
if leaf_path == abs_path:
return entity
for child_abs_path, child in get_children(entity).items():
if child_abs_path == abs_path:
return child
matching_entity = abs_path_to_entity(child, abs_path)
if matching_entity:
return matching_entity
return None | 0.832577 | 0.22306 |
import os
import sys
import datetime
from sqlalchemy import Column, ForeignKey, Integer
from sqlalchemy import String, DateTime, Text, LargeBinary
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship
from sqlalchemy import create_engine
# creating the base
Base = declarative_base()
# creating the User table
class User(Base):
__tablename__ = 'user'
id = Column(Integer, primary_key=True)
name = Column(String(250), nullable=False)
email = Column(String(250), nullable=False)
picture = Column(String(750), nullable=True)
# creating the Category table
class Category(Base):
__tablename__ = 'category'
id = Column(Integer, primary_key=True)
name = Column(String(250), nullable=False)
user_id = Column(Integer, ForeignKey('user.id'))
user = relationship(User)
# setting the ON DELETE CASCADE
category_item = relationship(
"CategoryItem",
backref="categoria",
cascade="all, delete, delete-orphan"
)
@property
def serialize(self):
"""Return object data in easily serializeable format"""
return
{
'name': self.name,
'id': self.id
}
# creating the Items table
class CategoryItem(Base):
__tablename__ = 'category_item'
title = Column(String(80), nullable=False)
id = Column(Integer, primary_key=True)
description = Column(String(250))
category_id = Column(Integer, ForeignKey('category.id'))
category = relationship(Category)
user_id = Column(Integer, ForeignKey('user.id'))
user = relationship(User)
image = Column(Text, default="")
image_data = Column(LargeBinary, nullable=True)
creation_date = Column(DateTime, default=datetime.datetime.utcnow)
@property
def serialize(self):
"""Return object data in easily serializeable format"""
return
{
'title': self.title,
'description': self.description,
'id': self.id
}
engine = create_engine('sqlite:///catalogitem1.db')
Base.metadata.create_all(engine) | database_setup.py | import os
import sys
import datetime
from sqlalchemy import Column, ForeignKey, Integer
from sqlalchemy import String, DateTime, Text, LargeBinary
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship
from sqlalchemy import create_engine
# creating the base
Base = declarative_base()
# creating the User table
class User(Base):
__tablename__ = 'user'
id = Column(Integer, primary_key=True)
name = Column(String(250), nullable=False)
email = Column(String(250), nullable=False)
picture = Column(String(750), nullable=True)
# creating the Category table
class Category(Base):
__tablename__ = 'category'
id = Column(Integer, primary_key=True)
name = Column(String(250), nullable=False)
user_id = Column(Integer, ForeignKey('user.id'))
user = relationship(User)
# setting the ON DELETE CASCADE
category_item = relationship(
"CategoryItem",
backref="categoria",
cascade="all, delete, delete-orphan"
)
@property
def serialize(self):
"""Return object data in easily serializeable format"""
return
{
'name': self.name,
'id': self.id
}
# creating the Items table
class CategoryItem(Base):
__tablename__ = 'category_item'
title = Column(String(80), nullable=False)
id = Column(Integer, primary_key=True)
description = Column(String(250))
category_id = Column(Integer, ForeignKey('category.id'))
category = relationship(Category)
user_id = Column(Integer, ForeignKey('user.id'))
user = relationship(User)
image = Column(Text, default="")
image_data = Column(LargeBinary, nullable=True)
creation_date = Column(DateTime, default=datetime.datetime.utcnow)
@property
def serialize(self):
"""Return object data in easily serializeable format"""
return
{
'title': self.title,
'description': self.description,
'id': self.id
}
engine = create_engine('sqlite:///catalogitem1.db')
Base.metadata.create_all(engine) | 0.528777 | 0.156201 |
import time
import copy
import logging
import numpy as np
from pylsl import StreamInlet, resolve_streams
from pyqtgraph.Qt import QtCore
from config import main_config
class LSLClient(QtCore.QRunnable):
def __init__(self, parent, debug=False):
super().__init__()
self.parent = parent
self.ts, self.eeg = [], []
self.fetch_every_s = main_config['lsl_every_s']
self.chunk_len = int(main_config['fs'] * self.fetch_every_s)
self.create_stream()
self.should_stream = True
self.debug = debug
def create_stream(self):
logging.info('Looking for LSL stream...')
available_streams = resolve_streams(5)
if len(available_streams) > 0:
self.stream_reader = StreamInlet(available_streams[0],
max_chunklen=self.chunk_len,
recover=False)
# Extract stream info
id = self.stream_reader.info().session_id()
self.fs = int(self.stream_reader.info().nominal_srate())
self.n_channels = int(self.stream_reader.info().channel_count())
logging.info(f'Stream {id} found at {self.fs} Hz '
f'with {self.n_channels} channels')
# Fetching channel names
ch = self.stream_reader.info().desc().child('channels').first_child()
self.ch_names = []
for i in range(self.n_channels):
self.ch_names.append(ch.child_value('label'))
ch = ch.next_sibling()
logging.info(f"Channel names: {self.ch_names}")
else:
logging.error('No stream found !')
raise Exception
def get_data(self):
try:
# Fetch available data from lsl stream and convert to numpy array
eeg, ts = self.stream_reader.pull_chunk(timeout=main_config['timeout'],
max_samples=self.chunk_len)
self.eeg = np.array(eeg, dtype=np.float64)
self.ts = np.array(ts, dtype=np.float64)
if len(self.ts) < self.chunk_len:
logging.info(f'Receiving LSL: {len(self.ts)} '
f'instead of {self.chunk_len} !')
except Exception as e:
logging.info(f'{e} - No more data')
self.should_stream = False
def notify(self):
if len(self.eeg) > 0:
# Manipulate eeg data to be of shape (n_channels, n_timestamps)
self.eeg = np.swapaxes(self.eeg, 1, 0)
# Convert timestamps from seconds to framesize
self.ts = np.array(self.ts) * self.fs
self.ts = self.ts.astype(np.int64)
self.parent.lsl_data = (copy.deepcopy(self.ts),
copy.deepcopy(self.eeg))
if self.debug:
logging.info(f'Receiving LSL - '
f'Last ts: {self.ts[-1]} - '
f'n_frames: {len(self.ts)}')
@QtCore.pyqtSlot()
def run(self):
logging.info('Start LSL stream')
while self.should_stream is True:
countdown = time.time()
self.get_data()
self.notify()
delay = time.time() - countdown
if self.fetch_every_s - delay > 0:
time.sleep(self.fetch_every_s - delay)
logging.info('Stop LSL stream') | code/src/lsl_client.py | import time
import copy
import logging
import numpy as np
from pylsl import StreamInlet, resolve_streams
from pyqtgraph.Qt import QtCore
from config import main_config
class LSLClient(QtCore.QRunnable):
def __init__(self, parent, debug=False):
super().__init__()
self.parent = parent
self.ts, self.eeg = [], []
self.fetch_every_s = main_config['lsl_every_s']
self.chunk_len = int(main_config['fs'] * self.fetch_every_s)
self.create_stream()
self.should_stream = True
self.debug = debug
def create_stream(self):
logging.info('Looking for LSL stream...')
available_streams = resolve_streams(5)
if len(available_streams) > 0:
self.stream_reader = StreamInlet(available_streams[0],
max_chunklen=self.chunk_len,
recover=False)
# Extract stream info
id = self.stream_reader.info().session_id()
self.fs = int(self.stream_reader.info().nominal_srate())
self.n_channels = int(self.stream_reader.info().channel_count())
logging.info(f'Stream {id} found at {self.fs} Hz '
f'with {self.n_channels} channels')
# Fetching channel names
ch = self.stream_reader.info().desc().child('channels').first_child()
self.ch_names = []
for i in range(self.n_channels):
self.ch_names.append(ch.child_value('label'))
ch = ch.next_sibling()
logging.info(f"Channel names: {self.ch_names}")
else:
logging.error('No stream found !')
raise Exception
def get_data(self):
try:
# Fetch available data from lsl stream and convert to numpy array
eeg, ts = self.stream_reader.pull_chunk(timeout=main_config['timeout'],
max_samples=self.chunk_len)
self.eeg = np.array(eeg, dtype=np.float64)
self.ts = np.array(ts, dtype=np.float64)
if len(self.ts) < self.chunk_len:
logging.info(f'Receiving LSL: {len(self.ts)} '
f'instead of {self.chunk_len} !')
except Exception as e:
logging.info(f'{e} - No more data')
self.should_stream = False
def notify(self):
if len(self.eeg) > 0:
# Manipulate eeg data to be of shape (n_channels, n_timestamps)
self.eeg = np.swapaxes(self.eeg, 1, 0)
# Convert timestamps from seconds to framesize
self.ts = np.array(self.ts) * self.fs
self.ts = self.ts.astype(np.int64)
self.parent.lsl_data = (copy.deepcopy(self.ts),
copy.deepcopy(self.eeg))
if self.debug:
logging.info(f'Receiving LSL - '
f'Last ts: {self.ts[-1]} - '
f'n_frames: {len(self.ts)}')
@QtCore.pyqtSlot()
def run(self):
logging.info('Start LSL stream')
while self.should_stream is True:
countdown = time.time()
self.get_data()
self.notify()
delay = time.time() - countdown
if self.fetch_every_s - delay > 0:
time.sleep(self.fetch_every_s - delay)
logging.info('Stop LSL stream') | 0.371707 | 0.116186 |
import os
from django.contrib.auth.models import AnonymousUser, User
from reviewboard.reviews.models import Group
from reviewboard.scmtools.forms import RepositoryForm
from reviewboard.scmtools.models import Repository, Tool
from reviewboard.site.models import LocalSite
from reviewboard.testing.testcase import TestCase
class PolicyTests(TestCase):
"""Unit tests for access policies."""
fixtures = ['test_scmtools']
def setUp(self):
self.user = User.objects.create_user(username='testuser', password='',
email='<EMAIL>')
self.anonymous = AnonymousUser()
self.repo = Repository.objects.create(
name='test',
path='example.com:/cvsroot/test',
username='anonymous',
tool=Tool.objects.get(name='CVS'))
def test_repository_public(self):
"""Testing access to a public repository"""
self.assertTrue(self.repo.is_accessible_by(self.user))
self.assertTrue(self.repo.is_accessible_by(self.anonymous))
self.assertIn(self.repo, Repository.objects.accessible(self.user))
self.assertTrue(
self.repo in Repository.objects.accessible(self.anonymous))
def test_repository_private_access_denied(self):
"""Testing no access to an inaccessible private repository"""
self.repo.public = False
self.repo.save()
self.assertFalse(self.repo.is_accessible_by(self.user))
self.assertFalse(self.repo.is_accessible_by(self.anonymous))
self.assertNotIn(self.repo, Repository.objects.accessible(self.user))
self.assertFalse(
self.repo in Repository.objects.accessible(self.anonymous))
def test_repository_private_access_allowed_by_user(self):
"""Testing access to a private repository accessible by user"""
self.repo.users.add(self.user)
self.repo.public = False
self.repo.save()
self.assertTrue(self.repo.is_accessible_by(self.user))
self.assertFalse(self.repo.is_accessible_by(self.anonymous))
self.assertIn(self.repo, Repository.objects.accessible(self.user))
self.assertFalse(
self.repo in Repository.objects.accessible(self.anonymous))
def test_repository_private_access_allowed_by_review_group(self):
"""Testing access to a private repository accessible by review group"""
group = Group.objects.create(name='test-group')
group.users.add(self.user)
self.repo.public = False
self.repo.review_groups.add(group)
self.repo.save()
self.assertTrue(self.repo.is_accessible_by(self.user))
self.assertFalse(self.repo.is_accessible_by(self.anonymous))
self.assertIn(self.repo, Repository.objects.accessible(self.user))
self.assertFalse(
self.repo in Repository.objects.accessible(self.anonymous))
def test_repository_form_with_local_site_and_bad_group(self):
"""Testing adding a Group to a RepositoryForm with the wrong LocalSite
"""
test_site = LocalSite.objects.create(name='test')
tool = Tool.objects.get(name='Subversion')
group = Group.objects.create(name='test-group')
svn_repo_path = 'file://' + os.path.join(os.path.dirname(__file__),
'..', 'testdata', 'svn_repo')
form = RepositoryForm({
'name': 'test',
'path': svn_repo_path,
'hosting_type': 'custom',
'bug_tracker_type': 'custom',
'review_groups': [group.pk],
'local_site': test_site.pk,
'tool': tool.pk,
})
self.assertFalse(form.is_valid())
group.local_site = test_site
group.save()
form = RepositoryForm({
'name': 'test',
'path': svn_repo_path,
'hosting_type': 'custom',
'bug_tracker_type': 'custom',
'review_groups': [group.pk],
'tool': tool.pk,
})
self.assertFalse(form.is_valid())
def test_repository_form_with_local_site_and_bad_user(self):
"""Testing adding a User to a RepositoryForm with the wrong LocalSite
"""
test_site = LocalSite.objects.create(name='test')
tool = Tool.objects.get(name='Subversion')
svn_repo_path = 'file://' + os.path.join(os.path.dirname(__file__),
'..', 'testdata', 'svn_repo')
form = RepositoryForm({
'name': 'test',
'path': svn_repo_path,
'hosting_type': 'custom',
'bug_tracker_type': 'custom',
'users': [self.user.pk],
'local_site': test_site.pk,
'tool': tool.pk,
})
self.assertFalse(form.is_valid()) | reviewboard/scmtools/tests/test_policy.py | import os
from django.contrib.auth.models import AnonymousUser, User
from reviewboard.reviews.models import Group
from reviewboard.scmtools.forms import RepositoryForm
from reviewboard.scmtools.models import Repository, Tool
from reviewboard.site.models import LocalSite
from reviewboard.testing.testcase import TestCase
class PolicyTests(TestCase):
"""Unit tests for access policies."""
fixtures = ['test_scmtools']
def setUp(self):
self.user = User.objects.create_user(username='testuser', password='',
email='<EMAIL>')
self.anonymous = AnonymousUser()
self.repo = Repository.objects.create(
name='test',
path='example.com:/cvsroot/test',
username='anonymous',
tool=Tool.objects.get(name='CVS'))
def test_repository_public(self):
"""Testing access to a public repository"""
self.assertTrue(self.repo.is_accessible_by(self.user))
self.assertTrue(self.repo.is_accessible_by(self.anonymous))
self.assertIn(self.repo, Repository.objects.accessible(self.user))
self.assertTrue(
self.repo in Repository.objects.accessible(self.anonymous))
def test_repository_private_access_denied(self):
"""Testing no access to an inaccessible private repository"""
self.repo.public = False
self.repo.save()
self.assertFalse(self.repo.is_accessible_by(self.user))
self.assertFalse(self.repo.is_accessible_by(self.anonymous))
self.assertNotIn(self.repo, Repository.objects.accessible(self.user))
self.assertFalse(
self.repo in Repository.objects.accessible(self.anonymous))
def test_repository_private_access_allowed_by_user(self):
"""Testing access to a private repository accessible by user"""
self.repo.users.add(self.user)
self.repo.public = False
self.repo.save()
self.assertTrue(self.repo.is_accessible_by(self.user))
self.assertFalse(self.repo.is_accessible_by(self.anonymous))
self.assertIn(self.repo, Repository.objects.accessible(self.user))
self.assertFalse(
self.repo in Repository.objects.accessible(self.anonymous))
def test_repository_private_access_allowed_by_review_group(self):
"""Testing access to a private repository accessible by review group"""
group = Group.objects.create(name='test-group')
group.users.add(self.user)
self.repo.public = False
self.repo.review_groups.add(group)
self.repo.save()
self.assertTrue(self.repo.is_accessible_by(self.user))
self.assertFalse(self.repo.is_accessible_by(self.anonymous))
self.assertIn(self.repo, Repository.objects.accessible(self.user))
self.assertFalse(
self.repo in Repository.objects.accessible(self.anonymous))
def test_repository_form_with_local_site_and_bad_group(self):
"""Testing adding a Group to a RepositoryForm with the wrong LocalSite
"""
test_site = LocalSite.objects.create(name='test')
tool = Tool.objects.get(name='Subversion')
group = Group.objects.create(name='test-group')
svn_repo_path = 'file://' + os.path.join(os.path.dirname(__file__),
'..', 'testdata', 'svn_repo')
form = RepositoryForm({
'name': 'test',
'path': svn_repo_path,
'hosting_type': 'custom',
'bug_tracker_type': 'custom',
'review_groups': [group.pk],
'local_site': test_site.pk,
'tool': tool.pk,
})
self.assertFalse(form.is_valid())
group.local_site = test_site
group.save()
form = RepositoryForm({
'name': 'test',
'path': svn_repo_path,
'hosting_type': 'custom',
'bug_tracker_type': 'custom',
'review_groups': [group.pk],
'tool': tool.pk,
})
self.assertFalse(form.is_valid())
def test_repository_form_with_local_site_and_bad_user(self):
"""Testing adding a User to a RepositoryForm with the wrong LocalSite
"""
test_site = LocalSite.objects.create(name='test')
tool = Tool.objects.get(name='Subversion')
svn_repo_path = 'file://' + os.path.join(os.path.dirname(__file__),
'..', 'testdata', 'svn_repo')
form = RepositoryForm({
'name': 'test',
'path': svn_repo_path,
'hosting_type': 'custom',
'bug_tracker_type': 'custom',
'users': [self.user.pk],
'local_site': test_site.pk,
'tool': tool.pk,
})
self.assertFalse(form.is_valid()) | 0.540439 | 0.446012 |
ATArticle = 0
ATString = 1
ATBasePrice = 2
ATReleased = 3
ATEmblemPrices = 4
AHat = 0
AGlasses = 1
ABackpack = 2
AShoes = 3
ABoysHat = 4
ABoysGlasses = 5
ABoysBackpack = 6
ABoysShoes = 7
AGirlsHat = 8
AGirlsGlasses = 9
AGirlsBackpack = 10
AGirlsShoes = 11
APriceTest = 5
APriceBasic = 250
APriceBasicPlus = 400
APriceCool = 800
APriceAwesome = 1500
ATokenPriceBasic = 10
ATokenPriceBasicPlus = 30
ATokenPriceCool = 50
ATokenPriceAwesome = 100
ATokenPriceAmazing = 250
AccessoryTypes = {101: (AHat,
'hbb1',
APriceBasic,
1),
102: (
AHat,
'hsf1',
APriceCool,
5),
103: (
AGirlsHat,
'hrb1',
APriceBasic,
1),
104: (
AHat,
'hsf2',
APriceCool,
0),
105: (
AHat,
'hsf3',
APriceCool,
0),
106: (
AGirlsHat,
'hrb2',
APriceBasicPlus,
3),
107: (
AGirlsHat,
'hrb3',
APriceBasicPlus,
0),
108: (
AHat,
'hht1',
APriceCool,
4),
109: (
AHat,
'hht2',
APriceCool,
3),
110: (
AHat,
'htp1',
APriceCool,
3),
111: (
AHat,
'htp2',
APriceCool,
0),
112: (
AHat,
'hav1',
3500,
0),
113: (
AHat,
'hfp1',
3500,
0),
114: (
AHat,
'hsg1',
3500,
0),
115: (
AHat,
'hwt1',
3500,
0),
116: (
AHat,
'hfz1',
APriceCool,
5),
117: (
AHat,
'hgf1',
APriceCool,
1),
118: (
AHat,
'hpt1',
APriceBasicPlus,
1),
119: (
AHat,
'hpb1',
APriceBasicPlus,
6),
120: (
ABoysHat,
'hcr1',
10000,
5),
121: (
AHat,
'hbb2',
APriceBasic,
2),
122: (
AHat,
'hbb3',
APriceBasic,
2),
123: (
AHat,
'hcw1',
APriceCool,
1),
124: (
AHat,
'hpr1',
APriceAwesome,
1),
125: (
AHat,
'hpp1',
APriceBasicPlus,
1),
126: (
AHat,
'hfs1',
APriceCool,
1),
127: (
AHat,
'hsb1',
APriceAwesome,
1),
128: (
AHat,
'hst1',
APriceBasicPlus,
1),
129: (
AGirlsHat,
'hsu1',
APriceCool,
1),
130: (
AGirlsHat,
'hrb4',
APriceBasic,
1),
131: (
AGirlsHat,
'hrb5',
APriceBasicPlus,
4),
132: (
AGirlsHat,
'hrb6',
APriceBasic,
2),
133: (
AGirlsHat,
'hrb7',
APriceBasicPlus,
6),
134: (
AHat,
'hat1',
APriceCool,
2),
135: (
AGirlsHat,
'hhd1',
APriceCool,
2),
136: (
AHat,
'hbw1',
APriceCool,
6),
137: (
AHat,
'hch1',
APriceCool,
5),
138: (
AHat,
'hdt1',
APriceAwesome,
6),
139: (
AHat,
'hft1',
APriceCool,
4),
140: (
AHat,
'hfd1',
APriceCool,
6),
141: (
AHat,
'hmk1',
APriceAwesome,
2),
142: (
AHat,
'hft2',
APriceCool,
6),
143: (
ABoysHat,
'hhd2',
APriceCool,
3),
144: (
AGirlsHat,
'hpc1',
APriceCool,
5),
145: (
AHat,
'hrh1',
APriceCool,
2),
146: (
AHat,
'hhm1',
2500,
2),
147: (
AHat,
'hat2',
APriceCool,
2),
148: (
AGirlsHat,
'htr1',
10000,
3),
149: (
AHat,
'hhm2',
APriceAwesome,
2),
150: (
AHat,
'hwz1',
APriceCool,
2),
151: (
AHat,
'hwz2',
APriceCool,
2),
152: (
AHat,
'hhm3',
APriceAwesome,
6),
153: (
AHat,
'hhm4',
APriceAwesome,
5),
154: (
AHat,
'hfp2',
APriceCool,
5),
155: (
AHat,
'hhm5',
APriceAwesome,
4),
156: (
AHat,
'hnp1',
APriceAwesome,
6),
157: (
AHat,
'hpc2',
APriceAwesome,
3),
158: (
AHat,
'hph1',
APriceAwesome,
4),
159: (
AHat,
'hwg1',
APriceCool,
5),
160: (
AHat,
'hbb4',
APriceBasic,
5),
161: (
AHat,
'hbb5',
APriceBasic,
2),
162: (
AHat,
'hbb6',
APriceBasic,
5),
163: (
AHat,
'hsl1',
APriceCool,
5),
164: (
AHat,
'hfr1',
3000,
4),
165: (
AHat,
'hby1',
APriceAwesome,
5),
166: (
AGirlsHat,
'hrb8',
APriceBasicPlus,
6),
167: (
AHat,
'hjh1',
APriceAwesome,
3),
168: (
AHat,
'hbb7',
APriceBasic,
6),
169: (
AGirlsHat,
'hrb9',
APriceBasicPlus,
6),
170: (
AHat,
'hwt2',
APriceAwesome,
4),
171: (
AGirlsHat,
'hhw1',
APriceBasicPlus,
7),
172: (
AHat,
'hhw2',
900,
7),
173: (
AHat,
'hob1',
APriceAwesome,
6),
174: (
AHat,
'hbn1',
APriceAwesome,
8),
175: (
AHat,
'hpt2',
APriceCool,
9),
176: (
AHat,
'htp3',
ATokenPriceAmazing,
4),
177: (
AHat,
'hrb10',
ATokenPriceAwesome,
4),
178: (
AHat,
'hrb11',
ATokenPriceBasic,
4),
179: (
AHat,
'hrb12',
ATokenPriceBasicPlus,
4),
180: (
AHat,
'hrb14',
ATokenPriceBasicPlus,
4),
181: (
AHat,
'hrb15',
ATokenPriceBasicPlus,
4),
182: (
AHat,
'hrb16',
ATokenPriceBasicPlus,
4),
183: (
AHat,
'hrb17',
ATokenPriceCool,
4),
184: (
AHat,
'hrb18',
ATokenPriceCool,
4),
185: (
AHat,
'hrb19',
ATokenPriceCool,
4),
186: (
AHat,
'hrb20',
ATokenPriceCool,
4),
187: (
AHat,
'hsu2',
ATokenPriceBasicPlus,
4),
188: (
AHat,
'hgf2',
ATokenPriceCool,
4),
201: (
AGlasses,
'grd1',
APriceBasicPlus,
0),
202: (
AGlasses,
'gmb1',
APriceCool,
1),
203: (
AGlasses,
'gnr1',
APriceCool,
0),
204: (
AGlasses,
'gst1',
APriceBasicPlus,
1),
205: (
AGlasses,
'g3d1',
APriceCool,
1),
206: (
AGlasses,
'gav1',
APriceCool,
1),
207: (
AGlasses,
'gce1',
APriceCool,
2),
208: (
AGlasses,
'gdk1',
APriceBasic,
1),
209: (
AGlasses,
'gjo1',
APriceBasicPlus,
1),
210: (
AGlasses,
'gsb1',
APriceAwesome,
1),
211: (
AGlasses,
'ggl1',
APriceCool,
6),
212: (
AGlasses,
'ggm1',
APriceBasicPlus,
2),
213: (
AGlasses,
'ghg1',
APriceAwesome,
3),
214: (
AGlasses,
'gie1',
APriceCool,
2),
215: (
AGlasses,
'gmt1',
APriceCool,
2),
216: (
AGlasses,
'gmt2',
APriceCool,
2),
217: (
AGlasses,
'gmt3',
3500,
5),
218: (
AGlasses,
'gmt4',
3500,
5),
219: (
AGlasses,
'gmt5',
3500,
5),
220: (
AGlasses,
'gmn1',
APriceAwesome,
6),
221: (
AGlasses,
'gmo1',
APriceAwesome,
4),
222: (
AGlasses,
'gsr1',
APriceBasicPlus,
5),
223: (
ABoysGlasses,
'ghw1',
APriceTest,
0),
224: (
ABoysGlasses,
'ghw2',
APriceBasic,
7),
225: (
AGlasses,
'gae1',
APriceAwesome,
8),
226: (
AGlasses,
'gdk3',
ATokenPriceBasic,
1),
227: (
AGlasses,
'gdk4',
ATokenPriceBasicPlus,
1),
228: (
AGlasses,
'gdk5',
ATokenPriceBasicPlus,
1),
229: (
AGlasses,
'gdk6',
ATokenPriceBasic,
1),
230: (
AGlasses,
'gdk7',
ATokenPriceBasicPlus,
1),
231: (
AGlasses,
'gdk8',
ATokenPriceBasicPlus,
1),
232: (
AGlasses,
'gdk9',
ATokenPriceBasicPlus,
1),
233: (
AGlasses,
'gdk10',
ATokenPriceBasic,
1),
234: (
AGlasses,
'gdk11',
ATokenPriceBasic,
1),
235: (
AGlasses,
'gdk12',
ATokenPriceBasic,
1),
236: (
AGlasses,
'gdk13',
ATokenPriceBasicPlus,
1),
237: (
AGlasses,
'gdk14',
ATokenPriceBasicPlus,
1),
238: (
AGlasses,
'gag1',
ATokenPriceCool,
1),
239: (
AGlasses,
'gnr2',
ATokenPriceBasic,
1),
240: (
AGlasses,
'gnr3',
ATokenPriceBasic,
1),
241: (
AGlasses,
'gnr4',
ATokenPriceBasic,
1),
242: (
AGlasses,
'gnr5',
ATokenPriceBasic,
1),
243: (
AGlasses,
'gnr6',
ATokenPriceBasic,
1),
301: (
ABackpack,
'bpb1',
APriceBasic,
4),
302: (
ABackpack,
'bpb2',
APriceBasic,
1),
303: (
ABackpack,
'bpb3',
APriceBasic,
5),
304: (
ABackpack,
'bpd1',
APriceBasicPlus,
4),
305: (
ABackpack,
'bpd2',
APriceBasicPlus,
5),
306: (
ABackpack,
'bwg1',
APriceCool,
2),
307: (
ABackpack,
'bwg2',
APriceCool,
2),
308: (
ABackpack,
'bwg3',
APriceCool,
1),
309: (
ABackpack,
'bst1',
APriceAwesome,
1),
310: (
ABackpack,
'bfn1',
APriceCool,
1),
311: (
ABackpack,
'baw1',
APriceCool,
3),
312: (
ABackpack,
'baw2',
APriceAwesome,
2),
313: (
ABackpack,
'bwt1',
3000,
3),
314: (
ABackpack,
'bwg4',
APriceAwesome,
6),
315: (
ABackpack,
'bwg5',
3000,
5),
316: (
ABackpack,
'bwg6',
3000,
4),
317: (
ABackpack,
'bjp1',
3000,
1),
318: (
ABackpack,
'blg1',
APriceCool,
2),
319: (
ABackpack,
'bsa1',
2500,
5),
320: (
ABackpack,
'bwg7',
APriceAwesome,
6),
321: (
ABackpack,
'bsa2',
2000,
2),
322: (
ABackpack,
'bsa3',
2000,
2),
323: (
ABackpack,
'bap1',
5000,
4),
324: (
ABackpack,
'bhw1',
900,
7),
325: (
ABackpack,
'bhw2',
APriceBasicPlus,
7),
326: (
ABackpack,
'bhw3',
APriceBasicPlus,
7),
327: (
ABackpack,
'bhw4',
900,
7),
328: (
ABackpack,
'bob1',
3000,
6),
329: (
ABackpack,
'bfg1',
3000,
6),
330: (
ABackpack,
'bfl1',
APriceAwesome,
8),
331: (
ABackpack,
'bga1',
ATokenPriceAwesome,
1),
332: (
ABackpack,
'bbt1',
ATokenPriceAwesome,
1),
333: (
ABackpack,
'bbt2',
ATokenPriceBasic,
1),
334: (
ABackpack,
'bbt3',
ATokenPriceBasicPlus,
1),
335: (
ABackpack,
'bbt5',
ATokenPriceBasicPlus,
1),
336: (
ABackpack,
'bbt6',
ATokenPriceBasicPlus,
1),
337: (
ABackpack,
'bbt7',
ATokenPriceBasicPlus,
1),
338: (
ABackpack,
'bbt8',
ATokenPriceCool,
1),
339: (
ABackpack,
'bbt9',
ATokenPriceCool,
1),
340: (
ABackpack,
'bbt10',
ATokenPriceCool,
1),
341: (
ABackpack,
'bbt11',
ATokenPriceCool,
1),
342: (
ABackpack,
'bcp2',
ATokenPriceCool,
1),
343: (
ABackpack,
'bcp3',
ATokenPriceCool,
1),
344: (
ABackpack,
'bjar',
ATokenPriceAmazing,
1),
401: (
AShoes,
'sat1',
APriceBasic,
3),
402: (
AShoes,
'sat2',
APriceBasic,
1),
403: (
AShoes,
'smb1',
APriceAwesome,
1),
404: (
AShoes,
'scs1',
APriceBasicPlus,
6),
405: (
ABoysShoes,
'swt1',
APriceBasicPlus,
1),
406: (
AGirlsShoes,
'smj1',
APriceBasicPlus,
1),
407: (
AShoes,
'sdk1',
APriceBasic,
1),
408: (
AShoes,
'sat3',
APriceBasic,
1),
409: (
AShoes,
'scs2',
APriceBasicPlus,
1),
410: (
AShoes,
'scs3',
APriceBasicPlus,
1),
411: (
AShoes,
'scs4',
APriceBasicPlus,
1),
412: (
AShoes,
'scb1',
APriceAwesome,
1),
413: (
AShoes,
'sfb1',
APriceCool,
1),
414: (
AShoes,
'sht1',
APriceAwesome,
4),
415: (
AGirlsShoes,
'smj2',
APriceBasicPlus,
3),
416: (
AGirlsShoes,
'smj3',
APriceBasicPlus,
4),
417: (
AShoes,
'ssb1',
APriceAwesome,
2),
418: (
AShoes,
'sts1',
APriceBasic,
5),
419: (
AShoes,
'sts2',
APriceBasic,
4),
420: (
AShoes,
'scs5',
APriceBasicPlus,
4),
421: (
AShoes,
'smb2',
APriceAwesome,
3),
422: (
AShoes,
'smb3',
APriceAwesome,
2),
423: (
AShoes,
'smb4',
APriceAwesome,
5),
424: (
AShoes,
'sfb2',
2000,
6),
425: (
AShoes,
'sfb3',
2000,
4),
426: (
AShoes,
'sfb4',
2000,
3),
427: (
AShoes,
'sfb5',
2000,
5),
428: (
AShoes,
'sfb6',
2000,
4),
429: (
AShoes,
'slf1',
APriceBasicPlus,
3),
430: (
AGirlsShoes,
'smj4',
APriceBasicPlus,
2),
431: (
AShoes,
'smt1',
APriceAwesome,
4),
432: (
AShoes,
'sox1',
APriceAwesome,
5),
433: (
AShoes,
'srb1',
APriceAwesome,
6),
434: (
AShoes,
'sst1',
3000,
3),
435: (
AShoes,
'swb1',
APriceCool,
3),
436: (
AShoes,
'swb2',
APriceCool,
4),
437: (
AShoes,
'swk1',
APriceAwesome,
3),
438: (
AShoes,
'scs6',
APriceBasicPlus,
0),
439: (
AShoes,
'smb5',
APriceAwesome,
3),
440: (
AShoes,
'sht2',
APriceAwesome,
4),
441: (
AShoes,
'srb2',
APriceAwesome,
3),
442: (
AShoes,
'sts3',
APriceBasic,
6),
443: (
AShoes,
'sts4',
APriceBasic,
3),
444: (
AShoes,
'sts5',
APriceBasic,
2),
445: (
AShoes,
'srb3',
APriceCool,
5),
446: (
AShoes,
'srb4',
APriceCool,
3),
447: (
AShoes,
'sat4',
APriceBasic,
3),
448: (
AShoes,
'shw1',
APriceCool,
7),
449: (
AShoes,
'shw2',
APriceCool,
7)}
LoyaltyAccessoryItems = [] | v2.5.7/toontown/catalog/CatalogAccessoryItemGlobals.py | ATArticle = 0
ATString = 1
ATBasePrice = 2
ATReleased = 3
ATEmblemPrices = 4
AHat = 0
AGlasses = 1
ABackpack = 2
AShoes = 3
ABoysHat = 4
ABoysGlasses = 5
ABoysBackpack = 6
ABoysShoes = 7
AGirlsHat = 8
AGirlsGlasses = 9
AGirlsBackpack = 10
AGirlsShoes = 11
APriceTest = 5
APriceBasic = 250
APriceBasicPlus = 400
APriceCool = 800
APriceAwesome = 1500
ATokenPriceBasic = 10
ATokenPriceBasicPlus = 30
ATokenPriceCool = 50
ATokenPriceAwesome = 100
ATokenPriceAmazing = 250
AccessoryTypes = {101: (AHat,
'hbb1',
APriceBasic,
1),
102: (
AHat,
'hsf1',
APriceCool,
5),
103: (
AGirlsHat,
'hrb1',
APriceBasic,
1),
104: (
AHat,
'hsf2',
APriceCool,
0),
105: (
AHat,
'hsf3',
APriceCool,
0),
106: (
AGirlsHat,
'hrb2',
APriceBasicPlus,
3),
107: (
AGirlsHat,
'hrb3',
APriceBasicPlus,
0),
108: (
AHat,
'hht1',
APriceCool,
4),
109: (
AHat,
'hht2',
APriceCool,
3),
110: (
AHat,
'htp1',
APriceCool,
3),
111: (
AHat,
'htp2',
APriceCool,
0),
112: (
AHat,
'hav1',
3500,
0),
113: (
AHat,
'hfp1',
3500,
0),
114: (
AHat,
'hsg1',
3500,
0),
115: (
AHat,
'hwt1',
3500,
0),
116: (
AHat,
'hfz1',
APriceCool,
5),
117: (
AHat,
'hgf1',
APriceCool,
1),
118: (
AHat,
'hpt1',
APriceBasicPlus,
1),
119: (
AHat,
'hpb1',
APriceBasicPlus,
6),
120: (
ABoysHat,
'hcr1',
10000,
5),
121: (
AHat,
'hbb2',
APriceBasic,
2),
122: (
AHat,
'hbb3',
APriceBasic,
2),
123: (
AHat,
'hcw1',
APriceCool,
1),
124: (
AHat,
'hpr1',
APriceAwesome,
1),
125: (
AHat,
'hpp1',
APriceBasicPlus,
1),
126: (
AHat,
'hfs1',
APriceCool,
1),
127: (
AHat,
'hsb1',
APriceAwesome,
1),
128: (
AHat,
'hst1',
APriceBasicPlus,
1),
129: (
AGirlsHat,
'hsu1',
APriceCool,
1),
130: (
AGirlsHat,
'hrb4',
APriceBasic,
1),
131: (
AGirlsHat,
'hrb5',
APriceBasicPlus,
4),
132: (
AGirlsHat,
'hrb6',
APriceBasic,
2),
133: (
AGirlsHat,
'hrb7',
APriceBasicPlus,
6),
134: (
AHat,
'hat1',
APriceCool,
2),
135: (
AGirlsHat,
'hhd1',
APriceCool,
2),
136: (
AHat,
'hbw1',
APriceCool,
6),
137: (
AHat,
'hch1',
APriceCool,
5),
138: (
AHat,
'hdt1',
APriceAwesome,
6),
139: (
AHat,
'hft1',
APriceCool,
4),
140: (
AHat,
'hfd1',
APriceCool,
6),
141: (
AHat,
'hmk1',
APriceAwesome,
2),
142: (
AHat,
'hft2',
APriceCool,
6),
143: (
ABoysHat,
'hhd2',
APriceCool,
3),
144: (
AGirlsHat,
'hpc1',
APriceCool,
5),
145: (
AHat,
'hrh1',
APriceCool,
2),
146: (
AHat,
'hhm1',
2500,
2),
147: (
AHat,
'hat2',
APriceCool,
2),
148: (
AGirlsHat,
'htr1',
10000,
3),
149: (
AHat,
'hhm2',
APriceAwesome,
2),
150: (
AHat,
'hwz1',
APriceCool,
2),
151: (
AHat,
'hwz2',
APriceCool,
2),
152: (
AHat,
'hhm3',
APriceAwesome,
6),
153: (
AHat,
'hhm4',
APriceAwesome,
5),
154: (
AHat,
'hfp2',
APriceCool,
5),
155: (
AHat,
'hhm5',
APriceAwesome,
4),
156: (
AHat,
'hnp1',
APriceAwesome,
6),
157: (
AHat,
'hpc2',
APriceAwesome,
3),
158: (
AHat,
'hph1',
APriceAwesome,
4),
159: (
AHat,
'hwg1',
APriceCool,
5),
160: (
AHat,
'hbb4',
APriceBasic,
5),
161: (
AHat,
'hbb5',
APriceBasic,
2),
162: (
AHat,
'hbb6',
APriceBasic,
5),
163: (
AHat,
'hsl1',
APriceCool,
5),
164: (
AHat,
'hfr1',
3000,
4),
165: (
AHat,
'hby1',
APriceAwesome,
5),
166: (
AGirlsHat,
'hrb8',
APriceBasicPlus,
6),
167: (
AHat,
'hjh1',
APriceAwesome,
3),
168: (
AHat,
'hbb7',
APriceBasic,
6),
169: (
AGirlsHat,
'hrb9',
APriceBasicPlus,
6),
170: (
AHat,
'hwt2',
APriceAwesome,
4),
171: (
AGirlsHat,
'hhw1',
APriceBasicPlus,
7),
172: (
AHat,
'hhw2',
900,
7),
173: (
AHat,
'hob1',
APriceAwesome,
6),
174: (
AHat,
'hbn1',
APriceAwesome,
8),
175: (
AHat,
'hpt2',
APriceCool,
9),
176: (
AHat,
'htp3',
ATokenPriceAmazing,
4),
177: (
AHat,
'hrb10',
ATokenPriceAwesome,
4),
178: (
AHat,
'hrb11',
ATokenPriceBasic,
4),
179: (
AHat,
'hrb12',
ATokenPriceBasicPlus,
4),
180: (
AHat,
'hrb14',
ATokenPriceBasicPlus,
4),
181: (
AHat,
'hrb15',
ATokenPriceBasicPlus,
4),
182: (
AHat,
'hrb16',
ATokenPriceBasicPlus,
4),
183: (
AHat,
'hrb17',
ATokenPriceCool,
4),
184: (
AHat,
'hrb18',
ATokenPriceCool,
4),
185: (
AHat,
'hrb19',
ATokenPriceCool,
4),
186: (
AHat,
'hrb20',
ATokenPriceCool,
4),
187: (
AHat,
'hsu2',
ATokenPriceBasicPlus,
4),
188: (
AHat,
'hgf2',
ATokenPriceCool,
4),
201: (
AGlasses,
'grd1',
APriceBasicPlus,
0),
202: (
AGlasses,
'gmb1',
APriceCool,
1),
203: (
AGlasses,
'gnr1',
APriceCool,
0),
204: (
AGlasses,
'gst1',
APriceBasicPlus,
1),
205: (
AGlasses,
'g3d1',
APriceCool,
1),
206: (
AGlasses,
'gav1',
APriceCool,
1),
207: (
AGlasses,
'gce1',
APriceCool,
2),
208: (
AGlasses,
'gdk1',
APriceBasic,
1),
209: (
AGlasses,
'gjo1',
APriceBasicPlus,
1),
210: (
AGlasses,
'gsb1',
APriceAwesome,
1),
211: (
AGlasses,
'ggl1',
APriceCool,
6),
212: (
AGlasses,
'ggm1',
APriceBasicPlus,
2),
213: (
AGlasses,
'ghg1',
APriceAwesome,
3),
214: (
AGlasses,
'gie1',
APriceCool,
2),
215: (
AGlasses,
'gmt1',
APriceCool,
2),
216: (
AGlasses,
'gmt2',
APriceCool,
2),
217: (
AGlasses,
'gmt3',
3500,
5),
218: (
AGlasses,
'gmt4',
3500,
5),
219: (
AGlasses,
'gmt5',
3500,
5),
220: (
AGlasses,
'gmn1',
APriceAwesome,
6),
221: (
AGlasses,
'gmo1',
APriceAwesome,
4),
222: (
AGlasses,
'gsr1',
APriceBasicPlus,
5),
223: (
ABoysGlasses,
'ghw1',
APriceTest,
0),
224: (
ABoysGlasses,
'ghw2',
APriceBasic,
7),
225: (
AGlasses,
'gae1',
APriceAwesome,
8),
226: (
AGlasses,
'gdk3',
ATokenPriceBasic,
1),
227: (
AGlasses,
'gdk4',
ATokenPriceBasicPlus,
1),
228: (
AGlasses,
'gdk5',
ATokenPriceBasicPlus,
1),
229: (
AGlasses,
'gdk6',
ATokenPriceBasic,
1),
230: (
AGlasses,
'gdk7',
ATokenPriceBasicPlus,
1),
231: (
AGlasses,
'gdk8',
ATokenPriceBasicPlus,
1),
232: (
AGlasses,
'gdk9',
ATokenPriceBasicPlus,
1),
233: (
AGlasses,
'gdk10',
ATokenPriceBasic,
1),
234: (
AGlasses,
'gdk11',
ATokenPriceBasic,
1),
235: (
AGlasses,
'gdk12',
ATokenPriceBasic,
1),
236: (
AGlasses,
'gdk13',
ATokenPriceBasicPlus,
1),
237: (
AGlasses,
'gdk14',
ATokenPriceBasicPlus,
1),
238: (
AGlasses,
'gag1',
ATokenPriceCool,
1),
239: (
AGlasses,
'gnr2',
ATokenPriceBasic,
1),
240: (
AGlasses,
'gnr3',
ATokenPriceBasic,
1),
241: (
AGlasses,
'gnr4',
ATokenPriceBasic,
1),
242: (
AGlasses,
'gnr5',
ATokenPriceBasic,
1),
243: (
AGlasses,
'gnr6',
ATokenPriceBasic,
1),
301: (
ABackpack,
'bpb1',
APriceBasic,
4),
302: (
ABackpack,
'bpb2',
APriceBasic,
1),
303: (
ABackpack,
'bpb3',
APriceBasic,
5),
304: (
ABackpack,
'bpd1',
APriceBasicPlus,
4),
305: (
ABackpack,
'bpd2',
APriceBasicPlus,
5),
306: (
ABackpack,
'bwg1',
APriceCool,
2),
307: (
ABackpack,
'bwg2',
APriceCool,
2),
308: (
ABackpack,
'bwg3',
APriceCool,
1),
309: (
ABackpack,
'bst1',
APriceAwesome,
1),
310: (
ABackpack,
'bfn1',
APriceCool,
1),
311: (
ABackpack,
'baw1',
APriceCool,
3),
312: (
ABackpack,
'baw2',
APriceAwesome,
2),
313: (
ABackpack,
'bwt1',
3000,
3),
314: (
ABackpack,
'bwg4',
APriceAwesome,
6),
315: (
ABackpack,
'bwg5',
3000,
5),
316: (
ABackpack,
'bwg6',
3000,
4),
317: (
ABackpack,
'bjp1',
3000,
1),
318: (
ABackpack,
'blg1',
APriceCool,
2),
319: (
ABackpack,
'bsa1',
2500,
5),
320: (
ABackpack,
'bwg7',
APriceAwesome,
6),
321: (
ABackpack,
'bsa2',
2000,
2),
322: (
ABackpack,
'bsa3',
2000,
2),
323: (
ABackpack,
'bap1',
5000,
4),
324: (
ABackpack,
'bhw1',
900,
7),
325: (
ABackpack,
'bhw2',
APriceBasicPlus,
7),
326: (
ABackpack,
'bhw3',
APriceBasicPlus,
7),
327: (
ABackpack,
'bhw4',
900,
7),
328: (
ABackpack,
'bob1',
3000,
6),
329: (
ABackpack,
'bfg1',
3000,
6),
330: (
ABackpack,
'bfl1',
APriceAwesome,
8),
331: (
ABackpack,
'bga1',
ATokenPriceAwesome,
1),
332: (
ABackpack,
'bbt1',
ATokenPriceAwesome,
1),
333: (
ABackpack,
'bbt2',
ATokenPriceBasic,
1),
334: (
ABackpack,
'bbt3',
ATokenPriceBasicPlus,
1),
335: (
ABackpack,
'bbt5',
ATokenPriceBasicPlus,
1),
336: (
ABackpack,
'bbt6',
ATokenPriceBasicPlus,
1),
337: (
ABackpack,
'bbt7',
ATokenPriceBasicPlus,
1),
338: (
ABackpack,
'bbt8',
ATokenPriceCool,
1),
339: (
ABackpack,
'bbt9',
ATokenPriceCool,
1),
340: (
ABackpack,
'bbt10',
ATokenPriceCool,
1),
341: (
ABackpack,
'bbt11',
ATokenPriceCool,
1),
342: (
ABackpack,
'bcp2',
ATokenPriceCool,
1),
343: (
ABackpack,
'bcp3',
ATokenPriceCool,
1),
344: (
ABackpack,
'bjar',
ATokenPriceAmazing,
1),
401: (
AShoes,
'sat1',
APriceBasic,
3),
402: (
AShoes,
'sat2',
APriceBasic,
1),
403: (
AShoes,
'smb1',
APriceAwesome,
1),
404: (
AShoes,
'scs1',
APriceBasicPlus,
6),
405: (
ABoysShoes,
'swt1',
APriceBasicPlus,
1),
406: (
AGirlsShoes,
'smj1',
APriceBasicPlus,
1),
407: (
AShoes,
'sdk1',
APriceBasic,
1),
408: (
AShoes,
'sat3',
APriceBasic,
1),
409: (
AShoes,
'scs2',
APriceBasicPlus,
1),
410: (
AShoes,
'scs3',
APriceBasicPlus,
1),
411: (
AShoes,
'scs4',
APriceBasicPlus,
1),
412: (
AShoes,
'scb1',
APriceAwesome,
1),
413: (
AShoes,
'sfb1',
APriceCool,
1),
414: (
AShoes,
'sht1',
APriceAwesome,
4),
415: (
AGirlsShoes,
'smj2',
APriceBasicPlus,
3),
416: (
AGirlsShoes,
'smj3',
APriceBasicPlus,
4),
417: (
AShoes,
'ssb1',
APriceAwesome,
2),
418: (
AShoes,
'sts1',
APriceBasic,
5),
419: (
AShoes,
'sts2',
APriceBasic,
4),
420: (
AShoes,
'scs5',
APriceBasicPlus,
4),
421: (
AShoes,
'smb2',
APriceAwesome,
3),
422: (
AShoes,
'smb3',
APriceAwesome,
2),
423: (
AShoes,
'smb4',
APriceAwesome,
5),
424: (
AShoes,
'sfb2',
2000,
6),
425: (
AShoes,
'sfb3',
2000,
4),
426: (
AShoes,
'sfb4',
2000,
3),
427: (
AShoes,
'sfb5',
2000,
5),
428: (
AShoes,
'sfb6',
2000,
4),
429: (
AShoes,
'slf1',
APriceBasicPlus,
3),
430: (
AGirlsShoes,
'smj4',
APriceBasicPlus,
2),
431: (
AShoes,
'smt1',
APriceAwesome,
4),
432: (
AShoes,
'sox1',
APriceAwesome,
5),
433: (
AShoes,
'srb1',
APriceAwesome,
6),
434: (
AShoes,
'sst1',
3000,
3),
435: (
AShoes,
'swb1',
APriceCool,
3),
436: (
AShoes,
'swb2',
APriceCool,
4),
437: (
AShoes,
'swk1',
APriceAwesome,
3),
438: (
AShoes,
'scs6',
APriceBasicPlus,
0),
439: (
AShoes,
'smb5',
APriceAwesome,
3),
440: (
AShoes,
'sht2',
APriceAwesome,
4),
441: (
AShoes,
'srb2',
APriceAwesome,
3),
442: (
AShoes,
'sts3',
APriceBasic,
6),
443: (
AShoes,
'sts4',
APriceBasic,
3),
444: (
AShoes,
'sts5',
APriceBasic,
2),
445: (
AShoes,
'srb3',
APriceCool,
5),
446: (
AShoes,
'srb4',
APriceCool,
3),
447: (
AShoes,
'sat4',
APriceBasic,
3),
448: (
AShoes,
'shw1',
APriceCool,
7),
449: (
AShoes,
'shw2',
APriceCool,
7)}
LoyaltyAccessoryItems = [] | 0.238018 | 0.36201 |
from django.core.cache import cache
from django.db import models
from cachemodel import CACHE_FOREVER_TIMEOUT
from cachemodel.managers import CacheModelManager, CachedTableManager
from cachemodel.decorators import find_fields_decorated_with
from cachemodel.utils import generate_cache_key
import collections
class CacheModel(models.Model):
"""An abstract model that has convienence functions for dealing with caching."""
objects = models.Manager()
cached = CacheModelManager()
class Meta:
abstract = True
def save(self, *args, **kwargs):
#find all the denormalized fields and update them
self.denormalize()
# save ourselves to the database
super(CacheModel, self).save(*args, **kwargs)
# trigger cache publish
self.publish()
def delete(self, *args, **kwargs):
self.publish_delete("pk")
super(CacheModel, self).delete(*args, **kwargs)
def publish(self):
# cache ourselves so that we're ready for .cached.get(pk=)
self.publish_by('pk')
# find any @cached_methods with auto_publish=True
for method in find_fields_decorated_with(self, '_cached_method'):
if not getattr(method, '_cached_method_auto_publish', False):
continue
try:
# run the cached method and store it in cache
self.publish_method(method.__name__)
except TypeError as e:
# the @cached_method requires arguments, so we cant cache it automatically
pass
def publish_key(self, *args):
kwargs = {}
for field in args:
kwargs[field] = getattr(self, field)
return generate_cache_key([self.__class__.__name__, "get"], **kwargs)
def publish_by(self, *args):
# cache ourselves, keyed by the fields given
key = self.publish_key(*args)
cache.set(key, self, CACHE_FOREVER_TIMEOUT)
def publish_delete(self, *args):
cache.delete(self.publish_key(*args))
def denormalize(self):
for method in find_fields_decorated_with(self, '_denormalized_field'):
if hasattr(method, '_denormalized_field_name'):
setattr(self, method._denormalized_field_name, method(self))
def publish_method(self, method_name, *args, **kwargs):
method = getattr(self, method_name, None)
if not getattr(method, '_cached_method', False):
raise AttributeError("method '%s' is not a cached_method.");
target = getattr(method, '_cached_method_target', None)
if isinstance(target, collections.Callable):
key = generate_cache_key([self.__class__.__name__, target.__name__, self.pk], *args, **kwargs)
data = target(self, *args, **kwargs)
cache.set(key, data, CACHE_FOREVER_TIMEOUT)
class CachedTable(models.Model):
objects = models.Manager()
cached = CachedTableManager()
class Meta:
abstract = True
def save(self, *args, **kwargs):
ret = super(CachedTable, self).save(*args, **kwargs)
self.__class__.cached._rebuild_indices()
return ret | cachemodel/models.py |
from django.core.cache import cache
from django.db import models
from cachemodel import CACHE_FOREVER_TIMEOUT
from cachemodel.managers import CacheModelManager, CachedTableManager
from cachemodel.decorators import find_fields_decorated_with
from cachemodel.utils import generate_cache_key
import collections
class CacheModel(models.Model):
"""An abstract model that has convienence functions for dealing with caching."""
objects = models.Manager()
cached = CacheModelManager()
class Meta:
abstract = True
def save(self, *args, **kwargs):
#find all the denormalized fields and update them
self.denormalize()
# save ourselves to the database
super(CacheModel, self).save(*args, **kwargs)
# trigger cache publish
self.publish()
def delete(self, *args, **kwargs):
self.publish_delete("pk")
super(CacheModel, self).delete(*args, **kwargs)
def publish(self):
# cache ourselves so that we're ready for .cached.get(pk=)
self.publish_by('pk')
# find any @cached_methods with auto_publish=True
for method in find_fields_decorated_with(self, '_cached_method'):
if not getattr(method, '_cached_method_auto_publish', False):
continue
try:
# run the cached method and store it in cache
self.publish_method(method.__name__)
except TypeError as e:
# the @cached_method requires arguments, so we cant cache it automatically
pass
def publish_key(self, *args):
kwargs = {}
for field in args:
kwargs[field] = getattr(self, field)
return generate_cache_key([self.__class__.__name__, "get"], **kwargs)
def publish_by(self, *args):
# cache ourselves, keyed by the fields given
key = self.publish_key(*args)
cache.set(key, self, CACHE_FOREVER_TIMEOUT)
def publish_delete(self, *args):
cache.delete(self.publish_key(*args))
def denormalize(self):
for method in find_fields_decorated_with(self, '_denormalized_field'):
if hasattr(method, '_denormalized_field_name'):
setattr(self, method._denormalized_field_name, method(self))
def publish_method(self, method_name, *args, **kwargs):
method = getattr(self, method_name, None)
if not getattr(method, '_cached_method', False):
raise AttributeError("method '%s' is not a cached_method.");
target = getattr(method, '_cached_method_target', None)
if isinstance(target, collections.Callable):
key = generate_cache_key([self.__class__.__name__, target.__name__, self.pk], *args, **kwargs)
data = target(self, *args, **kwargs)
cache.set(key, data, CACHE_FOREVER_TIMEOUT)
class CachedTable(models.Model):
objects = models.Manager()
cached = CachedTableManager()
class Meta:
abstract = True
def save(self, *args, **kwargs):
ret = super(CachedTable, self).save(*args, **kwargs)
self.__class__.cached._rebuild_indices()
return ret | 0.581303 | 0.091301 |
import argparse
import random
import math
class Instance:
"""
Instance of the Maneuvers Scheduling Problem.
:param n:
:param m:
:param s:
:param p:
:param c:
:param prec:
:param technology:
:param operation:
:param stage:
"""
def __init__(self, n, m, s, p, c, prec, technology, operation, stage):
self.n = n
self.m = m
self.s = s
self.p = p
self.c = c
self.prec = prec
self.technology = technology
self.operation = operation
self.stage = stage
def create_instance(n, m, s, prec, tx_remote, symmetry, hdl_remote, hdl_limits, travel_limits, integer_only, seed):
"""
Create an instance for the maneuverable scheduling.
:param n: Number or switches.
:param m: Number of teams.
:param s: Number of stages.
:param prec: type of precedence graph (independent, intree, sequential, general)
:param tx_remote: Proportion of switches remotely handled.
:param symmetry: Strategy used to define the travel time matrix (euclidean, symmetric, asymmetric).
:param hdl_remote: Time to handle remotely handled switches.
:param hdl_limits: A list as [lb, ub] that limits the time to manually handle switches.
:param travel_limits: A list as [lb, ub] that limits the travel time between pair of locations.
:param integer_only: Whether the handle time and travel time should be integer values only.
:param seed: Seed used to initialize the randon number generator.
:return: An instance of the switch operations scheduling problem
"""
# Initialize the seed of the random number generator
random.seed(seed)
# Switches: technology
technology = ["M"] * n;
for i in random.sample(range(0, n), math.ceil(n * tx_remote)):
technology[i] = "R"
# Switches: time for handling
p = [0] * n
for i in range(0, n):
if technology[i] == "R":
p[i] = hdl_remote
else:
if integer_only:
p[i] = random.randint(math.ceil(hdl_limits[0]), math.floor(hdl_limits[1]))
else:
p[i] = round(random.uniform(hdl_limits[0], hdl_limits[1]), 5)
# Switches: operation
operation = ["O"] * n
operation[-1] = "C"
for i in random.sample(range(0, n), s - 1):
operation[i] = "C"
# Switches: stage
stage = [0] * n
current_stage = 1
for i in range(0, n):
stage[i] = current_stage
if operation[i] == "C":
current_stage = current_stage + 1
# Travel time
c = [[[0 for i in range(0, n+1)] for i in range(0, n+1)] for l in range(0, m)]
if symmetry == "euclidean":
# Define coordinates
xcoord = [round(random.uniform(travel_limits[0], travel_limits[1]), 3) for i in range(0, n+1)]
ycoord = [round(random.uniform(travel_limits[0], travel_limits[1]), 3) for i in range(0, n+1)]
# Compute travel time
for l in range(0, m):
for i in range(0, n+1):
for j in range(i+1, n+1):
val = math.sqrt(math.pow(xcoord[i] - xcoord[j], 2) + math.pow(ycoord[i] - ycoord[j], 2))
val = round(val) if integer_only else round(val, 5)
c[l][i][j] = val
c[l][j][i] = val
elif symmetry == "symmetric":
# Compute travel time
for l in range(0, m):
for i in range(0, n+1):
for j in range(i+1, n+1):
val = random.uniform(travel_limits[0], travel_limits[1])
val = round(val) if integer_only else round(val, 5)
c[l][i][j] = val
c[l][j][i] = val
elif symmetry == "asymmetric":
# Compute travel time
for l in range(0, m):
for i in range(0, n+1):
for j in range(0, n+1):
if (i != j):
val = random.uniform(travel_limits[0], travel_limits[1])
val = round(val) if integer_only else round(val, 5)
c[l][i][j] = val
# Precedence constraints
P = [[] for i in range(0, n)]
switches_to_open = [[] for i in range(0, s + 1)]
switches_to_close = [[] for i in range(0, s + 1)]
for i in range(0, n):
if operation[i] == "C":
switches_to_close[stage[i]].append(i)
elif operation[i] == "O":
switches_to_open[stage[i]].append(i)
if prec == "independent":
for j in range(0, n):
if operation[j] == "C":
for i in switches_to_open[stage[j]]:
P[j].append(i)
elif prec == "intree":
for j in range(0, n):
if operation[j] == "C":
for i in switches_to_open[stage[j]]:
P[j].append(i)
for i in switches_to_close[stage[j] - 1]:
P[j].append(i)
elif prec == "sequential":
for j in range(0, n):
if operation[j] == "O":
for i in switches_to_close[stage[j] - 1]:
P[j].append(i)
elif operation[j] == "C":
if len(switches_to_open[stage[j]]) == 0:
for i in switches_to_close[stage[j] - 1]:
P[j].append(i)
else:
for i in switches_to_open[stage[j]]:
P[j].append(i)
elif prec == "general":
# First stage
j = switches_to_close[1][0]
for i in switches_to_open[stage[j]]:
P[j].append(i)
# Next stages
for current_stage in range(2, s + 1):
prec_relation = random.choice(["independent", "intree", "sequential"])
if prec_relation == "independent":
j = switches_to_close[current_stage][0]
for i in switches_to_open[stage[j]]:
P[j].append(i)
elif prec_relation == "intree":
j = switches_to_close[current_stage][0]
for i in switches_to_open[stage[j]]:
P[j].append(i)
for i in switches_to_close[stage[j] - 1]:
P[j].append(i)
elif prec_relation == "sequential":
for j in switches_to_open[current_stage]:
for i in switches_to_close[stage[j] - 1]:
P[j].append(i)
j = switches_to_close[current_stage][0]
if len(switches_to_open[stage[j]]) == 0:
for i in switches_to_close[stage[j] - 1]:
P[j].append(i)
else:
for i in switches_to_open[stage[j]]:
P[j].append(i)
# Create and return the intance of the problem
return Instance(n, m, s, p, c, P, technology, operation, stage)
def write_instance(filename: str, instance: Instance):
"""
Write the instance data into a plain text file.
:param filename: Path and name of the file in which the instance will be written.
:param instance: The instance that will be saved to file.
"""
with open(filename, "w") as file:
# Instance size
file.write("{} {} {}\n".format(instance.n, instance.m, instance.s))
# Switches data
for i in range(0, instance.n):
file.write("{} {} {} {} {}\n".format(i+1,
instance.p[i],
instance.technology[i],
instance.operation[i],
instance.stage[i]))
# Precedence constraints
for i in range(0, instance.n):
file.write("{} {} ".format(i+1, len(instance.prec[i])))
for j in instance.prec[i]:
file.write("{} ".format(j+1))
file.write("\n")
# Travel time
for l in range(0, instance.m):
for i in range(0, instance.n + 1):
for j in range (0, instance.n + 1):
file.write("{} ".format(instance.c[l][i][j]))
file.write("\n")
def __create_instance(args):
return create_instance(args.switches, args.teams, args.stages, args.prec, args.remote, args.symmetry,
args.handle_time_remote, [args.handle_time_min, args.handle_time_max],
[args.travel_time_min, args.travel_time_max], args.integer_only,
args.seed)
def __create_cli():
"""
Create the Command-Line Interface of the application.
:return: The command-line interface for parsing the user input.
"""
parser = argparse.ArgumentParser(description="Instance generator for the Maneuvers Scheculing Problem")
parser.add_argument("--filename",
help="Path and name of the file in which the instance will be saved.",
type=str,
required=True)
parser.add_argument("--switches",
help="Number of switches.",
type=int,
required=True)
parser.add_argument("--teams",
help="Number of teams available.",
type=int,
required=True)
parser.add_argument("--stages",
help="Number of stages.",
type=int,
required=True)
parser.add_argument("--prec",
help="Type of the precedence graph.",
type=str,
choices=["general", "independent", "intree", "sequential"],
default="general")
parser.add_argument("--remote",
help="Proportion of switches remotely handled.",
type=float,
required=True)
parser.add_argument("--seed",
help="Seed used to initialize the random number generator.",
type=int,
default=0)
parser.add_argument("--integer-only",
help="Whether the values generated for this instances should "
"be truncated to integer values.",
action="store_true",
dest="integer_only")
parser.add_argument("--symmetry",
help="Whether the travel distance matrix should be symmetric.",
type=str,
choices=["euclidean", "symmetric", "asymmetric"],
default="euclidean")
parser.add_argument("--handle-time-remote",
help="Handle time for remotely handled switches.",
type=float,
default=1,
dest="handle_time_remote")
parser.add_argument("--handle-time-min",
help="Minimum value for time to handle a manual switch.",
type=float,
default=1,
dest="handle_time_min")
parser.add_argument("--handle-time-max",
help="Maximum value for time to handle a manual switch.",
type=float,
default=1,
dest="handle_time_max")
parser.add_argument("--travel-time-min",
help="Defines the lower limit to travel time values. If the "
"option 'symmetry' is set to 'euclidean', this option "
"is interpreted as the lower limit of the coordinate "
"values used to determine the travel time.",
type=float,
default=10,
dest="travel_time_min")
parser.add_argument("--travel-time-max",
help="Defines the upper limit to travel time values. If the "
"option 'symmetry' is set to 'euclidean', this option "
"is interpreted as the upper limit of the coordinate "
"values used to determine the travel time.",
type=float,
default=60,
dest="travel_time_max")
return parser
if __name__ == "__main__":
# Parse user arguments
cli = __create_cli()
args = cli.parse_args()
# Create instance
instance = __create_instance(args)
# Write file with instance data
write_instance(args.filename, instance) | instances/generator/scheduling_generator.py | import argparse
import random
import math
class Instance:
"""
Instance of the Maneuvers Scheduling Problem.
:param n:
:param m:
:param s:
:param p:
:param c:
:param prec:
:param technology:
:param operation:
:param stage:
"""
def __init__(self, n, m, s, p, c, prec, technology, operation, stage):
self.n = n
self.m = m
self.s = s
self.p = p
self.c = c
self.prec = prec
self.technology = technology
self.operation = operation
self.stage = stage
def create_instance(n, m, s, prec, tx_remote, symmetry, hdl_remote, hdl_limits, travel_limits, integer_only, seed):
"""
Create an instance for the maneuverable scheduling.
:param n: Number or switches.
:param m: Number of teams.
:param s: Number of stages.
:param prec: type of precedence graph (independent, intree, sequential, general)
:param tx_remote: Proportion of switches remotely handled.
:param symmetry: Strategy used to define the travel time matrix (euclidean, symmetric, asymmetric).
:param hdl_remote: Time to handle remotely handled switches.
:param hdl_limits: A list as [lb, ub] that limits the time to manually handle switches.
:param travel_limits: A list as [lb, ub] that limits the travel time between pair of locations.
:param integer_only: Whether the handle time and travel time should be integer values only.
:param seed: Seed used to initialize the randon number generator.
:return: An instance of the switch operations scheduling problem
"""
# Initialize the seed of the random number generator
random.seed(seed)
# Switches: technology
technology = ["M"] * n;
for i in random.sample(range(0, n), math.ceil(n * tx_remote)):
technology[i] = "R"
# Switches: time for handling
p = [0] * n
for i in range(0, n):
if technology[i] == "R":
p[i] = hdl_remote
else:
if integer_only:
p[i] = random.randint(math.ceil(hdl_limits[0]), math.floor(hdl_limits[1]))
else:
p[i] = round(random.uniform(hdl_limits[0], hdl_limits[1]), 5)
# Switches: operation
operation = ["O"] * n
operation[-1] = "C"
for i in random.sample(range(0, n), s - 1):
operation[i] = "C"
# Switches: stage
stage = [0] * n
current_stage = 1
for i in range(0, n):
stage[i] = current_stage
if operation[i] == "C":
current_stage = current_stage + 1
# Travel time
c = [[[0 for i in range(0, n+1)] for i in range(0, n+1)] for l in range(0, m)]
if symmetry == "euclidean":
# Define coordinates
xcoord = [round(random.uniform(travel_limits[0], travel_limits[1]), 3) for i in range(0, n+1)]
ycoord = [round(random.uniform(travel_limits[0], travel_limits[1]), 3) for i in range(0, n+1)]
# Compute travel time
for l in range(0, m):
for i in range(0, n+1):
for j in range(i+1, n+1):
val = math.sqrt(math.pow(xcoord[i] - xcoord[j], 2) + math.pow(ycoord[i] - ycoord[j], 2))
val = round(val) if integer_only else round(val, 5)
c[l][i][j] = val
c[l][j][i] = val
elif symmetry == "symmetric":
# Compute travel time
for l in range(0, m):
for i in range(0, n+1):
for j in range(i+1, n+1):
val = random.uniform(travel_limits[0], travel_limits[1])
val = round(val) if integer_only else round(val, 5)
c[l][i][j] = val
c[l][j][i] = val
elif symmetry == "asymmetric":
# Compute travel time
for l in range(0, m):
for i in range(0, n+1):
for j in range(0, n+1):
if (i != j):
val = random.uniform(travel_limits[0], travel_limits[1])
val = round(val) if integer_only else round(val, 5)
c[l][i][j] = val
# Precedence constraints
P = [[] for i in range(0, n)]
switches_to_open = [[] for i in range(0, s + 1)]
switches_to_close = [[] for i in range(0, s + 1)]
for i in range(0, n):
if operation[i] == "C":
switches_to_close[stage[i]].append(i)
elif operation[i] == "O":
switches_to_open[stage[i]].append(i)
if prec == "independent":
for j in range(0, n):
if operation[j] == "C":
for i in switches_to_open[stage[j]]:
P[j].append(i)
elif prec == "intree":
for j in range(0, n):
if operation[j] == "C":
for i in switches_to_open[stage[j]]:
P[j].append(i)
for i in switches_to_close[stage[j] - 1]:
P[j].append(i)
elif prec == "sequential":
for j in range(0, n):
if operation[j] == "O":
for i in switches_to_close[stage[j] - 1]:
P[j].append(i)
elif operation[j] == "C":
if len(switches_to_open[stage[j]]) == 0:
for i in switches_to_close[stage[j] - 1]:
P[j].append(i)
else:
for i in switches_to_open[stage[j]]:
P[j].append(i)
elif prec == "general":
# First stage
j = switches_to_close[1][0]
for i in switches_to_open[stage[j]]:
P[j].append(i)
# Next stages
for current_stage in range(2, s + 1):
prec_relation = random.choice(["independent", "intree", "sequential"])
if prec_relation == "independent":
j = switches_to_close[current_stage][0]
for i in switches_to_open[stage[j]]:
P[j].append(i)
elif prec_relation == "intree":
j = switches_to_close[current_stage][0]
for i in switches_to_open[stage[j]]:
P[j].append(i)
for i in switches_to_close[stage[j] - 1]:
P[j].append(i)
elif prec_relation == "sequential":
for j in switches_to_open[current_stage]:
for i in switches_to_close[stage[j] - 1]:
P[j].append(i)
j = switches_to_close[current_stage][0]
if len(switches_to_open[stage[j]]) == 0:
for i in switches_to_close[stage[j] - 1]:
P[j].append(i)
else:
for i in switches_to_open[stage[j]]:
P[j].append(i)
# Create and return the intance of the problem
return Instance(n, m, s, p, c, P, technology, operation, stage)
def write_instance(filename: str, instance: Instance):
"""
Write the instance data into a plain text file.
:param filename: Path and name of the file in which the instance will be written.
:param instance: The instance that will be saved to file.
"""
with open(filename, "w") as file:
# Instance size
file.write("{} {} {}\n".format(instance.n, instance.m, instance.s))
# Switches data
for i in range(0, instance.n):
file.write("{} {} {} {} {}\n".format(i+1,
instance.p[i],
instance.technology[i],
instance.operation[i],
instance.stage[i]))
# Precedence constraints
for i in range(0, instance.n):
file.write("{} {} ".format(i+1, len(instance.prec[i])))
for j in instance.prec[i]:
file.write("{} ".format(j+1))
file.write("\n")
# Travel time
for l in range(0, instance.m):
for i in range(0, instance.n + 1):
for j in range (0, instance.n + 1):
file.write("{} ".format(instance.c[l][i][j]))
file.write("\n")
def __create_instance(args):
return create_instance(args.switches, args.teams, args.stages, args.prec, args.remote, args.symmetry,
args.handle_time_remote, [args.handle_time_min, args.handle_time_max],
[args.travel_time_min, args.travel_time_max], args.integer_only,
args.seed)
def __create_cli():
"""
Create the Command-Line Interface of the application.
:return: The command-line interface for parsing the user input.
"""
parser = argparse.ArgumentParser(description="Instance generator for the Maneuvers Scheculing Problem")
parser.add_argument("--filename",
help="Path and name of the file in which the instance will be saved.",
type=str,
required=True)
parser.add_argument("--switches",
help="Number of switches.",
type=int,
required=True)
parser.add_argument("--teams",
help="Number of teams available.",
type=int,
required=True)
parser.add_argument("--stages",
help="Number of stages.",
type=int,
required=True)
parser.add_argument("--prec",
help="Type of the precedence graph.",
type=str,
choices=["general", "independent", "intree", "sequential"],
default="general")
parser.add_argument("--remote",
help="Proportion of switches remotely handled.",
type=float,
required=True)
parser.add_argument("--seed",
help="Seed used to initialize the random number generator.",
type=int,
default=0)
parser.add_argument("--integer-only",
help="Whether the values generated for this instances should "
"be truncated to integer values.",
action="store_true",
dest="integer_only")
parser.add_argument("--symmetry",
help="Whether the travel distance matrix should be symmetric.",
type=str,
choices=["euclidean", "symmetric", "asymmetric"],
default="euclidean")
parser.add_argument("--handle-time-remote",
help="Handle time for remotely handled switches.",
type=float,
default=1,
dest="handle_time_remote")
parser.add_argument("--handle-time-min",
help="Minimum value for time to handle a manual switch.",
type=float,
default=1,
dest="handle_time_min")
parser.add_argument("--handle-time-max",
help="Maximum value for time to handle a manual switch.",
type=float,
default=1,
dest="handle_time_max")
parser.add_argument("--travel-time-min",
help="Defines the lower limit to travel time values. If the "
"option 'symmetry' is set to 'euclidean', this option "
"is interpreted as the lower limit of the coordinate "
"values used to determine the travel time.",
type=float,
default=10,
dest="travel_time_min")
parser.add_argument("--travel-time-max",
help="Defines the upper limit to travel time values. If the "
"option 'symmetry' is set to 'euclidean', this option "
"is interpreted as the upper limit of the coordinate "
"values used to determine the travel time.",
type=float,
default=60,
dest="travel_time_max")
return parser
if __name__ == "__main__":
# Parse user arguments
cli = __create_cli()
args = cli.parse_args()
# Create instance
instance = __create_instance(args)
# Write file with instance data
write_instance(args.filename, instance) | 0.621196 | 0.576661 |
import re
import http.client
import json
import threading
import os
import time
def getPage(movieID, startID):
conn = http.client.HTTPConnection("movie.douban.com")
# conn = http.client.HTTPConnection("localhost", 1080)
# conn.set_tunnel("movie.douban.com")
try:
conn.request("GET","/subject/%d/reviews?start=%d&filter=&limit=20"%(movieID, startID),headers={
"Accept": "text/html, application/xhtml+xml, image/jxr, */*",
"Accept-Language": "zh-Hans-CN, zh-Hans; q=0.8, en-US; q=0.5, en; q=0.3",
"Connection": "Keep-Alive",
"Cookie": "bid=Dz6aeVd3SFk; _pk_id.100001.4cf6=0b5f6e59908ef738.1444804967.1.1444804967.1444804967.; _pk_ses.100001.4cf6=*",
"Host": "movie.douban.com",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/42.0.2311.135 Safari/537.36 Edge/12.10240"
})
except:
print('Connection Failed')
conn.close()
return ''
resp = conn.getresponse()
if resp.status==200:
page = str(resp.read(), 'UTF-8', 'ignore')
conn.close()
return page
else:
print('Error %d %d: HTTP CODE %d: %s' % (movieID, startID, resp.status, resp.reason))
conn.close()
return ''
def filter(text):
#handle UnicodeEncodeError
return text.encode('GBK', 'ignore').decode('GBK')
def parsePage(html):
html = filter(html)
results=[]
match = re.findall('<a href="http://movie.douban.com/people/([^/]+?)/" class="">([^<]+?)</a>[^<]+?<span class="allstar(\d+)" title="[^/]+"></span>',html)
for item in match:
results.append({"user": item[0], "score": item[2]})
return results
def getReviews(movieID):
print("Starting %d" % movieID)
reviews = []
page = getPage(movieID, 0)
numberOfReviews = (re.search('<span class="total">\(共 (\d+) 条\)</span>', page))
numberOfReviews = numberOfReviews.group(1) if numberOfReviews else 0
numberOfReviews = int(numberOfReviews)
reviews += parsePage(page)
while 1:
startID = re.search('<a href="\?start=(\d+)&filter=&limit=20" data-page="" class="next">后一页</a>',page)
if(not startID):
break;
startID = int(startID.group(1))
print("Loading %d: %d of %d" % (movieID, startID, numberOfReviews))
page = getPage(movieID, startID)
reviews += parsePage(page)
time.sleep(2)
print("Finishing %d" % movieID)
return reviews
def doMovie(movieID):
filename = "data/%d.json" % movieID
if(os.path.isfile(filename)):
print("Jumping %d" % movieID)
return
result = getReviews(movieID)
if(not result):
print("Empty %d" % movieID)
return
result = json.dumps(result, indent=1)
f = open(filename, "w")
f.write(result)
f.close()
def doSubList(subList):
for movie in subList:
doMovie(int(movie['id']))
def main():
f = open("data/movielist.json")
movieList = json.loads(filter(f.read()))
f.close()
# cut movieList into several parts for multi-threading
numberOfMovies = len(movieList)
n = 1 # number of threads
j = numberOfMovies//n
k = numberOfMovies%n
subLists = []
for i in range(0,(n-1)*j,j):
subLists.append(movieList[i:i+j])
subLists.append(movieList[(n-1)*j:])
threads = []
for subList in subLists:
threads.append(threading.Thread(target=doSubList,args=[subList]))
for t in threads:
t.setDaemon(True)
t.start()
for t in threads:
t.join()
main() | spider/getReviews.py | import re
import http.client
import json
import threading
import os
import time
def getPage(movieID, startID):
conn = http.client.HTTPConnection("movie.douban.com")
# conn = http.client.HTTPConnection("localhost", 1080)
# conn.set_tunnel("movie.douban.com")
try:
conn.request("GET","/subject/%d/reviews?start=%d&filter=&limit=20"%(movieID, startID),headers={
"Accept": "text/html, application/xhtml+xml, image/jxr, */*",
"Accept-Language": "zh-Hans-CN, zh-Hans; q=0.8, en-US; q=0.5, en; q=0.3",
"Connection": "Keep-Alive",
"Cookie": "bid=Dz6aeVd3SFk; _pk_id.100001.4cf6=0b5f6e59908ef738.1444804967.1.1444804967.1444804967.; _pk_ses.100001.4cf6=*",
"Host": "movie.douban.com",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/42.0.2311.135 Safari/537.36 Edge/12.10240"
})
except:
print('Connection Failed')
conn.close()
return ''
resp = conn.getresponse()
if resp.status==200:
page = str(resp.read(), 'UTF-8', 'ignore')
conn.close()
return page
else:
print('Error %d %d: HTTP CODE %d: %s' % (movieID, startID, resp.status, resp.reason))
conn.close()
return ''
def filter(text):
#handle UnicodeEncodeError
return text.encode('GBK', 'ignore').decode('GBK')
def parsePage(html):
html = filter(html)
results=[]
match = re.findall('<a href="http://movie.douban.com/people/([^/]+?)/" class="">([^<]+?)</a>[^<]+?<span class="allstar(\d+)" title="[^/]+"></span>',html)
for item in match:
results.append({"user": item[0], "score": item[2]})
return results
def getReviews(movieID):
print("Starting %d" % movieID)
reviews = []
page = getPage(movieID, 0)
numberOfReviews = (re.search('<span class="total">\(共 (\d+) 条\)</span>', page))
numberOfReviews = numberOfReviews.group(1) if numberOfReviews else 0
numberOfReviews = int(numberOfReviews)
reviews += parsePage(page)
while 1:
startID = re.search('<a href="\?start=(\d+)&filter=&limit=20" data-page="" class="next">后一页</a>',page)
if(not startID):
break;
startID = int(startID.group(1))
print("Loading %d: %d of %d" % (movieID, startID, numberOfReviews))
page = getPage(movieID, startID)
reviews += parsePage(page)
time.sleep(2)
print("Finishing %d" % movieID)
return reviews
def doMovie(movieID):
filename = "data/%d.json" % movieID
if(os.path.isfile(filename)):
print("Jumping %d" % movieID)
return
result = getReviews(movieID)
if(not result):
print("Empty %d" % movieID)
return
result = json.dumps(result, indent=1)
f = open(filename, "w")
f.write(result)
f.close()
def doSubList(subList):
for movie in subList:
doMovie(int(movie['id']))
def main():
f = open("data/movielist.json")
movieList = json.loads(filter(f.read()))
f.close()
# cut movieList into several parts for multi-threading
numberOfMovies = len(movieList)
n = 1 # number of threads
j = numberOfMovies//n
k = numberOfMovies%n
subLists = []
for i in range(0,(n-1)*j,j):
subLists.append(movieList[i:i+j])
subLists.append(movieList[(n-1)*j:])
threads = []
for subList in subLists:
threads.append(threading.Thread(target=doSubList,args=[subList]))
for t in threads:
t.setDaemon(True)
t.start()
for t in threads:
t.join()
main() | 0.099432 | 0.090293 |
import argparse
import collections
import os
import mindspore.common.dtype as mstype
import mindspore.communication.management as D
import numpy as np
from mindspore import context
from mindspore import log as logger
from mindspore.common import set_seed
from mindspore.common.tensor import Tensor
from mindspore.context import ParallelMode
from mindspore.nn.optim import AdamWeightDecay, Lamb, Momentum
from mindspore.nn.wrap.loss_scale import DynamicLossScaleUpdateCell
from mindspore.train.callback import (CheckpointConfig, LossMonitor,
ModelCheckpoint, SummaryCollector,
TimeMonitor)
from mindspore.train.model import Model
from mindspore.train.serialization import (load_checkpoint,
load_param_into_net,
save_checkpoint)
from src.bert_for_finetune import BertSquad, BertSquadCell
from src.dataset import create_squad_dataset
from src.finetune_eval_config import bert_net_cfg, optimizer_cfg
from src.utils import (BertLearningRate, LoadNewestCkpt, LossCallBack,
make_directory)
_cur_dir = os.getcwd()
def _set_bert_all_reduce_split():
context.set_auto_parallel_context(parameter_broadcast=True)
def debug_dataset(dataset, batch_size):
from hashlib import sha256
for batch in dataset:
hashes = []
for i, element in enumerate(batch):
hashes.append([])
element_np = element.asnumpy()
for sample in range(batch_size):
hashh = sha256(element_np[sample].data).hexdigest()
hashes[i].append(hashh)
break
num_elements = 7
with open("sample_hashes.txt", "w") as sample_file:
for sample in range(batch_size):
for element in range(num_elements):
if element == num_elements - 1:
sample_file.write("{}\n".format(hashes[element][sample]))
else:
sample_file.write("{} ".format(hashes[element][sample]))
def do_train(dataset=None,
network=None,
load_checkpoint_path="",
save_checkpoint_path="",
epoch_num=1,
distributed=False):
""" do train """
if load_checkpoint_path == "":
raise ValueError(
"Pretrain model missed, finetune task must load pretrain model!")
steps_per_epoch = dataset.get_dataset_size()
# optimizer
if optimizer_cfg.optimizer == 'AdamWeightDecay':
lr_schedule = BertLearningRate(
learning_rate=optimizer_cfg.AdamWeightDecay.learning_rate,
end_learning_rate=optimizer_cfg.AdamWeightDecay.end_learning_rate,
warmup_steps=int(steps_per_epoch * epoch_num * 0.1),
decay_steps=steps_per_epoch * epoch_num,
power=optimizer_cfg.AdamWeightDecay.power)
params = network.trainable_params()
decay_params = list(
filter(optimizer_cfg.AdamWeightDecay.decay_filter, params))
other_params = list(
filter(lambda x: not optimizer_cfg.AdamWeightDecay.decay_filter(x),
params))
group_params = [{
'params':
decay_params,
'weight_decay':
optimizer_cfg.AdamWeightDecay.weight_decay
}, {
'params': other_params,
'weight_decay': 0.0
}]
optimizer = AdamWeightDecay(group_params,
lr_schedule,
eps=optimizer_cfg.AdamWeightDecay.eps)
elif optimizer_cfg.optimizer == 'Lamb':
print("=== LEARNING RATE ===")
print("learning rate: {}".format(optimizer_cfg.Lamb.learning_rate))
print("end learning rate: {}".format(optimizer_cfg.Lamb.end_learning_rate))
print("step per epoch: {}".format(steps_per_epoch))
print("number of epochs: {}".format(epoch_num))
warmup_steps = int(steps_per_epoch * epoch_num * 0.1)
print("warmup steps: {}".format(warmup_steps))
decay_steps = steps_per_epoch * epoch_num
print("decay steps: {}".format(decay_steps))
print("power: {}".format(optimizer_cfg.Lamb.power))
print("=== LEARNING RATE ===")
lr_schedule = BertLearningRate(
learning_rate=optimizer_cfg.Lamb.learning_rate,
end_learning_rate=optimizer_cfg.Lamb.end_learning_rate,
warmup_steps=int(steps_per_epoch * epoch_num * 0.1),
decay_steps=steps_per_epoch * epoch_num,
power=optimizer_cfg.Lamb.power)
optimizer = Lamb(network.trainable_params(), learning_rate=lr_schedule)
elif optimizer_cfg.optimizer == 'Momentum':
optimizer = Momentum(
network.trainable_params(),
learning_rate=optimizer_cfg.Momentum.learning_rate,
momentum=optimizer_cfg.Momentum.momentum)
else:
raise Exception(
"Optimizer not supported. support: [AdamWeightDecay, Lamb, Momentum]"
)
# load checkpoint into network
ckpt_config = CheckpointConfig(save_checkpoint_steps=50,
keep_checkpoint_max=1000)
# ckpt_config = CheckpointConfig(save_checkpoint_steps=steps_per_epoch,
# keep_checkpoint_max=1)
ckpoint_cb = ModelCheckpoint(
prefix="squad",
directory=None if save_checkpoint_path == "" else save_checkpoint_path,
config=ckpt_config)
param_dict = load_checkpoint(load_checkpoint_path)
load_param_into_net(network, param_dict)
update_cell = DynamicLossScaleUpdateCell(loss_scale_value=2**32,
scale_factor=2,
scale_window=1000)
netwithgrads = BertSquadCell(network,
optimizer=optimizer,
scale_update_cell=update_cell)
model = Model(netwithgrads)
callbacks = [
TimeMonitor(dataset.get_dataset_size()),
LossCallBack(dataset.get_dataset_size()), ckpoint_cb
]
# CALLBACKS
if distributed:
rank = D.get_rank()
summary_path = "./summary_{}".format(rank)
else:
summary_path = "./summary"
callbacks.append(SummaryCollector(summary_path))
callbacks.append(LossMonitor())
model.train(epoch_num,
dataset,
callbacks=callbacks,
dataset_sink_mode=False)
def do_eval(dataset=None, load_checkpoint_path="", eval_batch_size=1):
""" do eval """
if load_checkpoint_path == "":
raise ValueError(
"Finetune model missed, evaluation task must load finetune model!")
net = BertSquad(bert_net_cfg, False, 2)
net.set_train(False)
param_dict = load_checkpoint(load_checkpoint_path)
load_param_into_net(net, param_dict)
model = Model(net)
output = []
RawResult = collections.namedtuple(
"RawResult", ["unique_id", "start_logits", "end_logits"])
columns_list = ["input_ids", "input_mask", "segment_ids", "unique_ids"]
for data in dataset.create_dict_iterator(num_epochs=1):
input_data = []
for i in columns_list:
input_data.append(data[i])
input_ids, input_mask, segment_ids, unique_ids = input_data
start_positions = Tensor([1], mstype.float32)
end_positions = Tensor([1], mstype.float32)
is_impossible = Tensor([1], mstype.float32)
logits = model.predict(input_ids, input_mask, segment_ids,
start_positions, end_positions, unique_ids,
is_impossible)
ids = logits[0].asnumpy()
start = logits[1].asnumpy()
end = logits[2].asnumpy()
for i in range(eval_batch_size):
unique_id = int(ids[i])
start_logits = [float(x) for x in start[i].flat]
end_logits = [float(x) for x in end[i].flat]
output.append(
RawResult(unique_id=unique_id,
start_logits=start_logits,
end_logits=end_logits))
return output
def run_squad():
"""run squad task"""
parser = argparse.ArgumentParser(description="run squad")
parser.add_argument("--device_target",
type=str,
default="Ascend",
choices=["Ascend", "GPU"],
help="Device type, default is Ascend")
parser.add_argument("--distribute",
type=str,
default="false",
choices=["true", "false"],
help="Run distribute, default is false.")
parser.add_argument("--do_train",
type=str,
default="false",
choices=["true", "false"],
help="Eable train, default is false")
parser.add_argument("--do_eval",
type=str,
default="false",
choices=["true", "false"],
help="Eable eval, default is false")
parser.add_argument("--device_id",
type=int,
default=0,
help="Device id, default is 0.")
parser.add_argument("--epoch_num",
type=int,
default=3,
help="Epoch number, default is 1.")
parser.add_argument("--num_class",
type=int,
default=2,
help="The number of class, default is 2.")
parser.add_argument("--train_data_shuffle",
type=str,
default="true",
choices=["true", "false"],
help="Enable train data shuffle, default is true")
parser.add_argument("--eval_data_shuffle",
type=str,
default="false",
choices=["true", "false"],
help="Enable eval data shuffle, default is false")
parser.add_argument("--train_batch_size",
type=int,
default=32,
help="Train batch size, default is 32")
parser.add_argument("--eval_batch_size",
type=int,
default=1,
help="Eval batch size, default is 1")
parser.add_argument("--vocab_file_path",
type=str,
default="",
help="Vocab file path")
parser.add_argument("--eval_json_path",
type=str,
default="",
help="Evaluation json file path, can be eval.json")
parser.add_argument("--save_finetune_checkpoint_path",
type=str,
default="",
help="Save checkpoint path")
parser.add_argument("--load_pretrain_checkpoint_path",
type=str,
default="",
help="Load checkpoint file path")
parser.add_argument("--load_finetune_checkpoint_path",
type=str,
default="",
help="Load checkpoint file path")
parser.add_argument("--train_data_file_path",
type=str,
default="",
help="Data path, it is better to use absolute path")
parser.add_argument("--schema_file_path",
type=str,
default="",
help="Schema path, it is better to use absolute path")
args_opt = parser.parse_args()
epoch_num = args_opt.epoch_num
load_pretrain_checkpoint_path = args_opt.load_pretrain_checkpoint_path
save_finetune_checkpoint_path = args_opt.save_finetune_checkpoint_path
load_finetune_checkpoint_path = args_opt.load_finetune_checkpoint_path
if args_opt.do_train.lower() == "false" and args_opt.do_eval.lower(
) == "false":
raise ValueError(
"At least one of 'do_train' or 'do_eval' must be true")
if args_opt.do_train.lower(
) == "true" and args_opt.train_data_file_path == "":
raise ValueError(
"'train_data_file_path' must be set when do finetune task")
if args_opt.do_eval.lower() == "true":
if args_opt.vocab_file_path == "":
raise ValueError(
"'vocab_file_path' must be set when do evaluation task")
if args_opt.eval_json_path == "":
raise ValueError(
"'tokenization_file_path' must be set when do evaluation task")
""" distributed """
if args_opt.distribute.lower() == "true":
distributed = True
else:
distributed = False
if distributed:
D.init()
device_num = D.get_group_size()
rank = D.get_rank()
save_finetune_checkpoint_path = os.path.join(
save_finetune_checkpoint_path, "ckpt_" + str(rank))
context.reset_auto_parallel_context()
context.set_auto_parallel_context(
parallel_mode=ParallelMode.DATA_PARALLEL,
gradients_mean=True,
device_num=device_num)
_set_bert_all_reduce_split()
else:
device_num = 1
rank = 0
target = args_opt.device_target
if target == "Ascend":
context.set_context(mode=context.GRAPH_MODE,
device_target="Ascend",
device_id=args_opt.device_id)
elif target == "GPU":
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
# context.set_context(mode=context.PYNATIVE_MODE, device_target="GPU")
if bert_net_cfg.compute_type != mstype.float32:
logger.warning('GPU only support fp32 temporarily, run with fp32.')
bert_net_cfg.compute_type = mstype.float32
else:
raise Exception("Target error, GPU or Ascend is supported.")
netwithloss = BertSquad(bert_net_cfg, True, 2, dropout_prob=0.1)
if args_opt.do_train.lower() == "true":
print("batch size: {}".format(args_opt.train_batch_size)) # debug
ds = create_squad_dataset(
batch_size=args_opt.train_batch_size,
repeat_count=1,
data_file_path=args_opt.train_data_file_path,
schema_file_path=args_opt.schema_file_path,
# do_shuffle=(args_opt.train_data_shuffle.lower() == "true"),
do_shuffle=False, # debug
device_num=device_num,
rank=rank)
# debug
debug_dataset(ds, args_opt.train_batch_size)
return
do_train(ds, netwithloss, load_pretrain_checkpoint_path,
save_finetune_checkpoint_path, epoch_num, distributed)
if args_opt.do_eval.lower() == "true":
if save_finetune_checkpoint_path == "":
load_finetune_checkpoint_dir = _cur_dir
else:
load_finetune_checkpoint_dir = make_directory(
save_finetune_checkpoint_path)
load_finetune_checkpoint_path = LoadNewestCkpt(
load_finetune_checkpoint_dir, ds.get_dataset_size(), epoch_num,
"squad")
if args_opt.do_eval.lower() == "true":
from src import tokenization
from src.create_squad_data import (convert_examples_to_features,
read_squad_examples)
from src.squad_get_predictions import write_predictions
from src.squad_postprocess import SQuad_postprocess
tokenizer = tokenization.FullTokenizer(
vocab_file=args_opt.vocab_file_path, do_lower_case=True)
eval_examples = read_squad_examples(args_opt.eval_json_path, False)
eval_features = convert_examples_to_features(
examples=eval_examples,
tokenizer=tokenizer,
max_seq_length=bert_net_cfg.seq_length,
doc_stride=128,
max_query_length=64,
is_training=False,
output_fn=None,
vocab_file=args_opt.vocab_file_path)
ds = create_squad_dataset(
batch_size=args_opt.eval_batch_size,
repeat_count=1,
data_file_path=eval_features,
schema_file_path=args_opt.schema_file_path,
is_training=False,
do_shuffle=(args_opt.eval_data_shuffle.lower() == "true"),
device_num=device_num,
rank=rank)
outputs = do_eval(ds, load_finetune_checkpoint_path,
args_opt.eval_batch_size)
all_predictions = write_predictions(eval_examples, eval_features,
outputs, 20, 30, True)
if distributed:
output_path = "./output_{}.json".format(rank)
else:
output_path = "./output.json"
SQuad_postprocess(args_opt.eval_json_path,
all_predictions,
output_metrics=output_path)
if __name__ == "__main__":
set_seed(1)
run_squad() | model_zoo/official/nlp/bert/run_squad.py | import argparse
import collections
import os
import mindspore.common.dtype as mstype
import mindspore.communication.management as D
import numpy as np
from mindspore import context
from mindspore import log as logger
from mindspore.common import set_seed
from mindspore.common.tensor import Tensor
from mindspore.context import ParallelMode
from mindspore.nn.optim import AdamWeightDecay, Lamb, Momentum
from mindspore.nn.wrap.loss_scale import DynamicLossScaleUpdateCell
from mindspore.train.callback import (CheckpointConfig, LossMonitor,
ModelCheckpoint, SummaryCollector,
TimeMonitor)
from mindspore.train.model import Model
from mindspore.train.serialization import (load_checkpoint,
load_param_into_net,
save_checkpoint)
from src.bert_for_finetune import BertSquad, BertSquadCell
from src.dataset import create_squad_dataset
from src.finetune_eval_config import bert_net_cfg, optimizer_cfg
from src.utils import (BertLearningRate, LoadNewestCkpt, LossCallBack,
make_directory)
_cur_dir = os.getcwd()
def _set_bert_all_reduce_split():
context.set_auto_parallel_context(parameter_broadcast=True)
def debug_dataset(dataset, batch_size):
from hashlib import sha256
for batch in dataset:
hashes = []
for i, element in enumerate(batch):
hashes.append([])
element_np = element.asnumpy()
for sample in range(batch_size):
hashh = sha256(element_np[sample].data).hexdigest()
hashes[i].append(hashh)
break
num_elements = 7
with open("sample_hashes.txt", "w") as sample_file:
for sample in range(batch_size):
for element in range(num_elements):
if element == num_elements - 1:
sample_file.write("{}\n".format(hashes[element][sample]))
else:
sample_file.write("{} ".format(hashes[element][sample]))
def do_train(dataset=None,
network=None,
load_checkpoint_path="",
save_checkpoint_path="",
epoch_num=1,
distributed=False):
""" do train """
if load_checkpoint_path == "":
raise ValueError(
"Pretrain model missed, finetune task must load pretrain model!")
steps_per_epoch = dataset.get_dataset_size()
# optimizer
if optimizer_cfg.optimizer == 'AdamWeightDecay':
lr_schedule = BertLearningRate(
learning_rate=optimizer_cfg.AdamWeightDecay.learning_rate,
end_learning_rate=optimizer_cfg.AdamWeightDecay.end_learning_rate,
warmup_steps=int(steps_per_epoch * epoch_num * 0.1),
decay_steps=steps_per_epoch * epoch_num,
power=optimizer_cfg.AdamWeightDecay.power)
params = network.trainable_params()
decay_params = list(
filter(optimizer_cfg.AdamWeightDecay.decay_filter, params))
other_params = list(
filter(lambda x: not optimizer_cfg.AdamWeightDecay.decay_filter(x),
params))
group_params = [{
'params':
decay_params,
'weight_decay':
optimizer_cfg.AdamWeightDecay.weight_decay
}, {
'params': other_params,
'weight_decay': 0.0
}]
optimizer = AdamWeightDecay(group_params,
lr_schedule,
eps=optimizer_cfg.AdamWeightDecay.eps)
elif optimizer_cfg.optimizer == 'Lamb':
print("=== LEARNING RATE ===")
print("learning rate: {}".format(optimizer_cfg.Lamb.learning_rate))
print("end learning rate: {}".format(optimizer_cfg.Lamb.end_learning_rate))
print("step per epoch: {}".format(steps_per_epoch))
print("number of epochs: {}".format(epoch_num))
warmup_steps = int(steps_per_epoch * epoch_num * 0.1)
print("warmup steps: {}".format(warmup_steps))
decay_steps = steps_per_epoch * epoch_num
print("decay steps: {}".format(decay_steps))
print("power: {}".format(optimizer_cfg.Lamb.power))
print("=== LEARNING RATE ===")
lr_schedule = BertLearningRate(
learning_rate=optimizer_cfg.Lamb.learning_rate,
end_learning_rate=optimizer_cfg.Lamb.end_learning_rate,
warmup_steps=int(steps_per_epoch * epoch_num * 0.1),
decay_steps=steps_per_epoch * epoch_num,
power=optimizer_cfg.Lamb.power)
optimizer = Lamb(network.trainable_params(), learning_rate=lr_schedule)
elif optimizer_cfg.optimizer == 'Momentum':
optimizer = Momentum(
network.trainable_params(),
learning_rate=optimizer_cfg.Momentum.learning_rate,
momentum=optimizer_cfg.Momentum.momentum)
else:
raise Exception(
"Optimizer not supported. support: [AdamWeightDecay, Lamb, Momentum]"
)
# load checkpoint into network
ckpt_config = CheckpointConfig(save_checkpoint_steps=50,
keep_checkpoint_max=1000)
# ckpt_config = CheckpointConfig(save_checkpoint_steps=steps_per_epoch,
# keep_checkpoint_max=1)
ckpoint_cb = ModelCheckpoint(
prefix="squad",
directory=None if save_checkpoint_path == "" else save_checkpoint_path,
config=ckpt_config)
param_dict = load_checkpoint(load_checkpoint_path)
load_param_into_net(network, param_dict)
update_cell = DynamicLossScaleUpdateCell(loss_scale_value=2**32,
scale_factor=2,
scale_window=1000)
netwithgrads = BertSquadCell(network,
optimizer=optimizer,
scale_update_cell=update_cell)
model = Model(netwithgrads)
callbacks = [
TimeMonitor(dataset.get_dataset_size()),
LossCallBack(dataset.get_dataset_size()), ckpoint_cb
]
# CALLBACKS
if distributed:
rank = D.get_rank()
summary_path = "./summary_{}".format(rank)
else:
summary_path = "./summary"
callbacks.append(SummaryCollector(summary_path))
callbacks.append(LossMonitor())
model.train(epoch_num,
dataset,
callbacks=callbacks,
dataset_sink_mode=False)
def do_eval(dataset=None, load_checkpoint_path="", eval_batch_size=1):
""" do eval """
if load_checkpoint_path == "":
raise ValueError(
"Finetune model missed, evaluation task must load finetune model!")
net = BertSquad(bert_net_cfg, False, 2)
net.set_train(False)
param_dict = load_checkpoint(load_checkpoint_path)
load_param_into_net(net, param_dict)
model = Model(net)
output = []
RawResult = collections.namedtuple(
"RawResult", ["unique_id", "start_logits", "end_logits"])
columns_list = ["input_ids", "input_mask", "segment_ids", "unique_ids"]
for data in dataset.create_dict_iterator(num_epochs=1):
input_data = []
for i in columns_list:
input_data.append(data[i])
input_ids, input_mask, segment_ids, unique_ids = input_data
start_positions = Tensor([1], mstype.float32)
end_positions = Tensor([1], mstype.float32)
is_impossible = Tensor([1], mstype.float32)
logits = model.predict(input_ids, input_mask, segment_ids,
start_positions, end_positions, unique_ids,
is_impossible)
ids = logits[0].asnumpy()
start = logits[1].asnumpy()
end = logits[2].asnumpy()
for i in range(eval_batch_size):
unique_id = int(ids[i])
start_logits = [float(x) for x in start[i].flat]
end_logits = [float(x) for x in end[i].flat]
output.append(
RawResult(unique_id=unique_id,
start_logits=start_logits,
end_logits=end_logits))
return output
def run_squad():
"""run squad task"""
parser = argparse.ArgumentParser(description="run squad")
parser.add_argument("--device_target",
type=str,
default="Ascend",
choices=["Ascend", "GPU"],
help="Device type, default is Ascend")
parser.add_argument("--distribute",
type=str,
default="false",
choices=["true", "false"],
help="Run distribute, default is false.")
parser.add_argument("--do_train",
type=str,
default="false",
choices=["true", "false"],
help="Eable train, default is false")
parser.add_argument("--do_eval",
type=str,
default="false",
choices=["true", "false"],
help="Eable eval, default is false")
parser.add_argument("--device_id",
type=int,
default=0,
help="Device id, default is 0.")
parser.add_argument("--epoch_num",
type=int,
default=3,
help="Epoch number, default is 1.")
parser.add_argument("--num_class",
type=int,
default=2,
help="The number of class, default is 2.")
parser.add_argument("--train_data_shuffle",
type=str,
default="true",
choices=["true", "false"],
help="Enable train data shuffle, default is true")
parser.add_argument("--eval_data_shuffle",
type=str,
default="false",
choices=["true", "false"],
help="Enable eval data shuffle, default is false")
parser.add_argument("--train_batch_size",
type=int,
default=32,
help="Train batch size, default is 32")
parser.add_argument("--eval_batch_size",
type=int,
default=1,
help="Eval batch size, default is 1")
parser.add_argument("--vocab_file_path",
type=str,
default="",
help="Vocab file path")
parser.add_argument("--eval_json_path",
type=str,
default="",
help="Evaluation json file path, can be eval.json")
parser.add_argument("--save_finetune_checkpoint_path",
type=str,
default="",
help="Save checkpoint path")
parser.add_argument("--load_pretrain_checkpoint_path",
type=str,
default="",
help="Load checkpoint file path")
parser.add_argument("--load_finetune_checkpoint_path",
type=str,
default="",
help="Load checkpoint file path")
parser.add_argument("--train_data_file_path",
type=str,
default="",
help="Data path, it is better to use absolute path")
parser.add_argument("--schema_file_path",
type=str,
default="",
help="Schema path, it is better to use absolute path")
args_opt = parser.parse_args()
epoch_num = args_opt.epoch_num
load_pretrain_checkpoint_path = args_opt.load_pretrain_checkpoint_path
save_finetune_checkpoint_path = args_opt.save_finetune_checkpoint_path
load_finetune_checkpoint_path = args_opt.load_finetune_checkpoint_path
if args_opt.do_train.lower() == "false" and args_opt.do_eval.lower(
) == "false":
raise ValueError(
"At least one of 'do_train' or 'do_eval' must be true")
if args_opt.do_train.lower(
) == "true" and args_opt.train_data_file_path == "":
raise ValueError(
"'train_data_file_path' must be set when do finetune task")
if args_opt.do_eval.lower() == "true":
if args_opt.vocab_file_path == "":
raise ValueError(
"'vocab_file_path' must be set when do evaluation task")
if args_opt.eval_json_path == "":
raise ValueError(
"'tokenization_file_path' must be set when do evaluation task")
""" distributed """
if args_opt.distribute.lower() == "true":
distributed = True
else:
distributed = False
if distributed:
D.init()
device_num = D.get_group_size()
rank = D.get_rank()
save_finetune_checkpoint_path = os.path.join(
save_finetune_checkpoint_path, "ckpt_" + str(rank))
context.reset_auto_parallel_context()
context.set_auto_parallel_context(
parallel_mode=ParallelMode.DATA_PARALLEL,
gradients_mean=True,
device_num=device_num)
_set_bert_all_reduce_split()
else:
device_num = 1
rank = 0
target = args_opt.device_target
if target == "Ascend":
context.set_context(mode=context.GRAPH_MODE,
device_target="Ascend",
device_id=args_opt.device_id)
elif target == "GPU":
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
# context.set_context(mode=context.PYNATIVE_MODE, device_target="GPU")
if bert_net_cfg.compute_type != mstype.float32:
logger.warning('GPU only support fp32 temporarily, run with fp32.')
bert_net_cfg.compute_type = mstype.float32
else:
raise Exception("Target error, GPU or Ascend is supported.")
netwithloss = BertSquad(bert_net_cfg, True, 2, dropout_prob=0.1)
if args_opt.do_train.lower() == "true":
print("batch size: {}".format(args_opt.train_batch_size)) # debug
ds = create_squad_dataset(
batch_size=args_opt.train_batch_size,
repeat_count=1,
data_file_path=args_opt.train_data_file_path,
schema_file_path=args_opt.schema_file_path,
# do_shuffle=(args_opt.train_data_shuffle.lower() == "true"),
do_shuffle=False, # debug
device_num=device_num,
rank=rank)
# debug
debug_dataset(ds, args_opt.train_batch_size)
return
do_train(ds, netwithloss, load_pretrain_checkpoint_path,
save_finetune_checkpoint_path, epoch_num, distributed)
if args_opt.do_eval.lower() == "true":
if save_finetune_checkpoint_path == "":
load_finetune_checkpoint_dir = _cur_dir
else:
load_finetune_checkpoint_dir = make_directory(
save_finetune_checkpoint_path)
load_finetune_checkpoint_path = LoadNewestCkpt(
load_finetune_checkpoint_dir, ds.get_dataset_size(), epoch_num,
"squad")
if args_opt.do_eval.lower() == "true":
from src import tokenization
from src.create_squad_data import (convert_examples_to_features,
read_squad_examples)
from src.squad_get_predictions import write_predictions
from src.squad_postprocess import SQuad_postprocess
tokenizer = tokenization.FullTokenizer(
vocab_file=args_opt.vocab_file_path, do_lower_case=True)
eval_examples = read_squad_examples(args_opt.eval_json_path, False)
eval_features = convert_examples_to_features(
examples=eval_examples,
tokenizer=tokenizer,
max_seq_length=bert_net_cfg.seq_length,
doc_stride=128,
max_query_length=64,
is_training=False,
output_fn=None,
vocab_file=args_opt.vocab_file_path)
ds = create_squad_dataset(
batch_size=args_opt.eval_batch_size,
repeat_count=1,
data_file_path=eval_features,
schema_file_path=args_opt.schema_file_path,
is_training=False,
do_shuffle=(args_opt.eval_data_shuffle.lower() == "true"),
device_num=device_num,
rank=rank)
outputs = do_eval(ds, load_finetune_checkpoint_path,
args_opt.eval_batch_size)
all_predictions = write_predictions(eval_examples, eval_features,
outputs, 20, 30, True)
if distributed:
output_path = "./output_{}.json".format(rank)
else:
output_path = "./output.json"
SQuad_postprocess(args_opt.eval_json_path,
all_predictions,
output_metrics=output_path)
if __name__ == "__main__":
set_seed(1)
run_squad() | 0.589244 | 0.186799 |
import sys
import string
try:
from emanesh.events import EventService
except:
pass
from core.api import coreapi
from core.constants import *
from emane import Emane, EmaneModel
class EmaneUniversalModel(EmaneModel):
''' This Univeral PHY model is meant to be imported by other models,
not instantiated.
'''
def __init__(self, session, objid = None, verbose = False):
raise SyntaxError
_name = "emane_universal"
_xmlname = "universalphy"
_xmllibrary = "universalphylayer"
# universal PHY parameters
_confmatrix_base = [
("bandwidth", coreapi.CONF_DATA_TYPE_UINT64, '1M',
'', 'rf bandwidth (hz)'),
("frequency", coreapi.CONF_DATA_TYPE_UINT64, '2.347G',
'','frequency (Hz)'),
("frequencyofinterest", coreapi.CONF_DATA_TYPE_UINT64, '2.347G',
'','frequency of interest (Hz)'),
("subid", coreapi.CONF_DATA_TYPE_UINT16, '1',
'','subid'),
("systemnoisefigure", coreapi.CONF_DATA_TYPE_FLOAT, '4.0',
'','system noise figure (dB)'),
("txpower", coreapi.CONF_DATA_TYPE_FLOAT, '0.0',
'','transmit power (dBm)'),
]
_confmatrix_081 = [
("antennagain", coreapi.CONF_DATA_TYPE_FLOAT, '0.0',
'','antenna gain (dBi)'),
("antennaazimuth", coreapi.CONF_DATA_TYPE_FLOAT, '0.0',
'','antenna azimuth (deg)'),
("antennaelevation", coreapi.CONF_DATA_TYPE_FLOAT, '0.0',
'','antenna elevation (deg)'),
("antennaprofileid", coreapi.CONF_DATA_TYPE_STRING, '1',
'','antenna profile ID'),
("antennaprofilemanifesturi", coreapi.CONF_DATA_TYPE_STRING, '',
'','antenna profile manifest URI'),
("antennaprofileenable", coreapi.CONF_DATA_TYPE_BOOL, '0',
'On,Off','antenna profile mode'),
("defaultconnectivitymode", coreapi.CONF_DATA_TYPE_BOOL, '1',
'On,Off','default connectivity'),
("frequencyofinterestfilterenable", coreapi.CONF_DATA_TYPE_BOOL, '1',
'On,Off','frequency of interest filter enable'),
("noiseprocessingmode", coreapi.CONF_DATA_TYPE_BOOL, '0',
'On,Off','enable noise processing'),
("pathlossmode", coreapi.CONF_DATA_TYPE_STRING, '2ray',
'pathloss,2ray,freespace','path loss mode'),
]
_confmatrix_091 = [
("fixedantennagain", coreapi.CONF_DATA_TYPE_FLOAT, '0.0',
'','antenna gain (dBi)'),
("fixedantennagainenable", coreapi.CONF_DATA_TYPE_BOOL, '1',
'On,Off','enable fixed antenna gain'),
("noisemode", coreapi.CONF_DATA_TYPE_STRING, 'none',
'none,all,outofband','noise processing mode'),
("noisebinsize", coreapi.CONF_DATA_TYPE_UINT64, '20',
'','noise bin size in microseconds'),
("propagationmodel", coreapi.CONF_DATA_TYPE_STRING, '2ray',
'precomputed,2ray,freespace','path loss mode'),
]
if Emane.version >= Emane.EMANE091:
_confmatrix = _confmatrix_base + _confmatrix_091
else:
_confmatrix = _confmatrix_base + _confmatrix_081
# old parameters
_confmatrix_ver074 = [
("antennaazimuthbeamwidth", coreapi.CONF_DATA_TYPE_FLOAT, '360.0',
'','azimith beam width (deg)'),
("antennaelevationbeamwidth", coreapi.CONF_DATA_TYPE_FLOAT, '180.0',
'','elevation beam width (deg)'),
("antennatype", coreapi.CONF_DATA_TYPE_STRING, 'omnidirectional',
'omnidirectional,unidirectional','antenna type'),
]
# parameters that require unit conversion for 0.7.4
_update_ver074 = ("bandwidth", "frequency", "frequencyofinterest")
# parameters that should be removed for 0.7.4
_remove_ver074 = ("antennaprofileenable", "antennaprofileid",
"antennaprofilemanifesturi",
"frequencyofinterestfilterenable")
@classmethod
def getphydoc(cls, e, mac, values, phynames):
phydoc = e.xmldoc("phy")
phy = phydoc.getElementsByTagName("phy").pop()
phy.setAttribute("name", cls._xmlname)
if e.version < e.EMANE091:
phy.setAttribute("library", cls._xmllibrary)
# EMANE 0.7.4 suppport - to be removed when 0.7.4 support is deprecated
if e.version == e.EMANE074:
names = mac.getnames()
values = list(values)
phynames = list(phynames)
# update units for some parameters
for p in cls._update_ver074:
i = names.index(p)
# these all happen to be KHz, so 1000 is used
values[i] = cls.emane074_fixup(values[i], 1000)
# remove new incompatible options
for p in cls._remove_ver074:
phynames.remove(p)
# insert old options with their default values
for old in cls._confmatrix_ver074:
phy.appendChild(e.xmlparam(phydoc, old[0], old[2]))
frequencies = None
if e.version >= e.EMANE091:
name = "frequencyofinterest"
value = mac.valueof(name, values)
frequencies = cls.valuestrtoparamlist(phydoc, name, value)
if frequencies:
phynames = list(phynames)
phynames.remove("frequencyofinterest")
# append all PHY options to phydoc
map( lambda n: phy.appendChild(e.xmlparam(phydoc, n, \
mac.valueof(n, values))), phynames)
if frequencies:
phy.appendChild(frequencies)
return phydoc | daemon/core/emane/universal.py | import sys
import string
try:
from emanesh.events import EventService
except:
pass
from core.api import coreapi
from core.constants import *
from emane import Emane, EmaneModel
class EmaneUniversalModel(EmaneModel):
''' This Univeral PHY model is meant to be imported by other models,
not instantiated.
'''
def __init__(self, session, objid = None, verbose = False):
raise SyntaxError
_name = "emane_universal"
_xmlname = "universalphy"
_xmllibrary = "universalphylayer"
# universal PHY parameters
_confmatrix_base = [
("bandwidth", coreapi.CONF_DATA_TYPE_UINT64, '1M',
'', 'rf bandwidth (hz)'),
("frequency", coreapi.CONF_DATA_TYPE_UINT64, '2.347G',
'','frequency (Hz)'),
("frequencyofinterest", coreapi.CONF_DATA_TYPE_UINT64, '2.347G',
'','frequency of interest (Hz)'),
("subid", coreapi.CONF_DATA_TYPE_UINT16, '1',
'','subid'),
("systemnoisefigure", coreapi.CONF_DATA_TYPE_FLOAT, '4.0',
'','system noise figure (dB)'),
("txpower", coreapi.CONF_DATA_TYPE_FLOAT, '0.0',
'','transmit power (dBm)'),
]
_confmatrix_081 = [
("antennagain", coreapi.CONF_DATA_TYPE_FLOAT, '0.0',
'','antenna gain (dBi)'),
("antennaazimuth", coreapi.CONF_DATA_TYPE_FLOAT, '0.0',
'','antenna azimuth (deg)'),
("antennaelevation", coreapi.CONF_DATA_TYPE_FLOAT, '0.0',
'','antenna elevation (deg)'),
("antennaprofileid", coreapi.CONF_DATA_TYPE_STRING, '1',
'','antenna profile ID'),
("antennaprofilemanifesturi", coreapi.CONF_DATA_TYPE_STRING, '',
'','antenna profile manifest URI'),
("antennaprofileenable", coreapi.CONF_DATA_TYPE_BOOL, '0',
'On,Off','antenna profile mode'),
("defaultconnectivitymode", coreapi.CONF_DATA_TYPE_BOOL, '1',
'On,Off','default connectivity'),
("frequencyofinterestfilterenable", coreapi.CONF_DATA_TYPE_BOOL, '1',
'On,Off','frequency of interest filter enable'),
("noiseprocessingmode", coreapi.CONF_DATA_TYPE_BOOL, '0',
'On,Off','enable noise processing'),
("pathlossmode", coreapi.CONF_DATA_TYPE_STRING, '2ray',
'pathloss,2ray,freespace','path loss mode'),
]
_confmatrix_091 = [
("fixedantennagain", coreapi.CONF_DATA_TYPE_FLOAT, '0.0',
'','antenna gain (dBi)'),
("fixedantennagainenable", coreapi.CONF_DATA_TYPE_BOOL, '1',
'On,Off','enable fixed antenna gain'),
("noisemode", coreapi.CONF_DATA_TYPE_STRING, 'none',
'none,all,outofband','noise processing mode'),
("noisebinsize", coreapi.CONF_DATA_TYPE_UINT64, '20',
'','noise bin size in microseconds'),
("propagationmodel", coreapi.CONF_DATA_TYPE_STRING, '2ray',
'precomputed,2ray,freespace','path loss mode'),
]
if Emane.version >= Emane.EMANE091:
_confmatrix = _confmatrix_base + _confmatrix_091
else:
_confmatrix = _confmatrix_base + _confmatrix_081
# old parameters
_confmatrix_ver074 = [
("antennaazimuthbeamwidth", coreapi.CONF_DATA_TYPE_FLOAT, '360.0',
'','azimith beam width (deg)'),
("antennaelevationbeamwidth", coreapi.CONF_DATA_TYPE_FLOAT, '180.0',
'','elevation beam width (deg)'),
("antennatype", coreapi.CONF_DATA_TYPE_STRING, 'omnidirectional',
'omnidirectional,unidirectional','antenna type'),
]
# parameters that require unit conversion for 0.7.4
_update_ver074 = ("bandwidth", "frequency", "frequencyofinterest")
# parameters that should be removed for 0.7.4
_remove_ver074 = ("antennaprofileenable", "antennaprofileid",
"antennaprofilemanifesturi",
"frequencyofinterestfilterenable")
@classmethod
def getphydoc(cls, e, mac, values, phynames):
phydoc = e.xmldoc("phy")
phy = phydoc.getElementsByTagName("phy").pop()
phy.setAttribute("name", cls._xmlname)
if e.version < e.EMANE091:
phy.setAttribute("library", cls._xmllibrary)
# EMANE 0.7.4 suppport - to be removed when 0.7.4 support is deprecated
if e.version == e.EMANE074:
names = mac.getnames()
values = list(values)
phynames = list(phynames)
# update units for some parameters
for p in cls._update_ver074:
i = names.index(p)
# these all happen to be KHz, so 1000 is used
values[i] = cls.emane074_fixup(values[i], 1000)
# remove new incompatible options
for p in cls._remove_ver074:
phynames.remove(p)
# insert old options with their default values
for old in cls._confmatrix_ver074:
phy.appendChild(e.xmlparam(phydoc, old[0], old[2]))
frequencies = None
if e.version >= e.EMANE091:
name = "frequencyofinterest"
value = mac.valueof(name, values)
frequencies = cls.valuestrtoparamlist(phydoc, name, value)
if frequencies:
phynames = list(phynames)
phynames.remove("frequencyofinterest")
# append all PHY options to phydoc
map( lambda n: phy.appendChild(e.xmlparam(phydoc, n, \
mac.valueof(n, values))), phynames)
if frequencies:
phy.appendChild(frequencies)
return phydoc | 0.283385 | 0.20828 |
from flask import Flask, request, render_template
from flask_cors import CORS, cross_origin
from functools import wraps
import requests
import json
import time
import os
template_dir = os.path.abspath('../templates/')
static_dir = os.path.abspath('../static/')
app = Flask(__name__, template_folder=template_dir,static_folder=static_dir)
cors = CORS(app)
app.config['CORS_HEADERS'] = 'Content-Type'
SWAPI = "https://swapi.co/api/"
def print_timing(func):
@wraps(func)
def wrapper(*args,**kwargs):
start = time.perf_counter()
result = func(*args,**kwargs)
end = time.perf_counter()
fs = 'Function {} took {:.3f} seconds'
print(fs.format(func.__name__, (end - start)))
return result
return wrapper
@print_timing
def get_film_info(film):
return requests.get(film).json()
@print_timing
def get_homeworld_info(homeworld):
return requests.get(homeworld).json()
@print_timing
def get_species_info(species_api):
return requests.get(species_api).json()
@print_timing
def get_all_people_info():
return requests.get(SWAPI + "people/").json()
@app.route('/character_info/<charactername>')
@cross_origin()
@print_timing
def get_character_info(charactername):
if(charactername == ""):
return('', 204)
ret = []
response = get_all_people_info();
if(response == {'detail': 'Not found'}):
return (json.dumps(ret), 204)
else:
for char in response['results']:
if(charactername.lower() in char['name'].lower()):
info = {}
info["name"] = char['name']
info["gender"] = char['gender']
species_names = []
species_lifespan = []
for species_api in char['species']:
species_info = get_species_info(species_api);
species_names.append(species_info['name'])
species_lifespan.append(species_info['average_lifespan'])
info["species"] = species_names
info["average_lifespan"] = species_lifespan
homeworld = get_homeworld_info(char['homeworld'])
info["homeworld"] = homeworld['name']
films = []
for film in char['films']:
film_info = get_film_info(film)
films.append(film_info['title'])
info["films"] = films
ret.append(info)
if(not ret):
return ('[]', 200)
return (json.dumps(ret),200)
@app.route("/")
def home():
return render_template('index.html', title='SWAPI SEARCH')
if __name__ == "__main__":
app.run(debug = True) | swapi_app/app.py | from flask import Flask, request, render_template
from flask_cors import CORS, cross_origin
from functools import wraps
import requests
import json
import time
import os
template_dir = os.path.abspath('../templates/')
static_dir = os.path.abspath('../static/')
app = Flask(__name__, template_folder=template_dir,static_folder=static_dir)
cors = CORS(app)
app.config['CORS_HEADERS'] = 'Content-Type'
SWAPI = "https://swapi.co/api/"
def print_timing(func):
@wraps(func)
def wrapper(*args,**kwargs):
start = time.perf_counter()
result = func(*args,**kwargs)
end = time.perf_counter()
fs = 'Function {} took {:.3f} seconds'
print(fs.format(func.__name__, (end - start)))
return result
return wrapper
@print_timing
def get_film_info(film):
return requests.get(film).json()
@print_timing
def get_homeworld_info(homeworld):
return requests.get(homeworld).json()
@print_timing
def get_species_info(species_api):
return requests.get(species_api).json()
@print_timing
def get_all_people_info():
return requests.get(SWAPI + "people/").json()
@app.route('/character_info/<charactername>')
@cross_origin()
@print_timing
def get_character_info(charactername):
if(charactername == ""):
return('', 204)
ret = []
response = get_all_people_info();
if(response == {'detail': 'Not found'}):
return (json.dumps(ret), 204)
else:
for char in response['results']:
if(charactername.lower() in char['name'].lower()):
info = {}
info["name"] = char['name']
info["gender"] = char['gender']
species_names = []
species_lifespan = []
for species_api in char['species']:
species_info = get_species_info(species_api);
species_names.append(species_info['name'])
species_lifespan.append(species_info['average_lifespan'])
info["species"] = species_names
info["average_lifespan"] = species_lifespan
homeworld = get_homeworld_info(char['homeworld'])
info["homeworld"] = homeworld['name']
films = []
for film in char['films']:
film_info = get_film_info(film)
films.append(film_info['title'])
info["films"] = films
ret.append(info)
if(not ret):
return ('[]', 200)
return (json.dumps(ret),200)
@app.route("/")
def home():
return render_template('index.html', title='SWAPI SEARCH')
if __name__ == "__main__":
app.run(debug = True) | 0.38168 | 0.063077 |
import sublime
import codecs
import itertools
import sys
import threading
import time
try:
from os import cpu_count
except ImportError:
try:
from multiprocessing import cpu_count
# quickfix for ST2 compat
except ImportError:
def cpu_count():
return 1
try:
from Queue import Queue
except ImportError:
from queue import Queue
_ST3 = True
from .six import reraise
def run_after_loading(view, func):
"""Run a function after the view has finished loading"""
def run():
if view.is_loading():
sublime.set_timeout(run, 10)
else:
# add an additional delay, because it might not be ready
# even if the loading function returns false
sublime.set_timeout(func, 10)
run()
def open_and_select_region(view, file_name, region):
new_view = view
def select_label():
new_view.sel().clear()
new_view.sel().add(region)
new_view.show(region)
# TODO better compare?
if view.file_name() != file_name:
new_view = view.window().open_file(file_name)
run_after_loading(new_view, select_label)
else:
select_label()
def _read_file_content(file_name, encoding="utf8", ignore=True):
errors = "ignore" if ignore else "strict"
with codecs.open(file_name, "r", encoding, errors) as f:
return f.read()
def read_file_unix_endings(file_name, encoding="utf8", ignore=True):
"""
Reads a file with unix (LF) line endings and converts windows (CRLF)
line endings into (LF) line endings. This is necessary if you want to have
the same string positions as in ST, because the length of ST line endings
is 1 and the length if CRLF line endings is 2.
"""
if _ST3:
errors = "ignore" if ignore else "strict"
with open(file_name, "rt", encoding=encoding, errors=errors) as f:
file_content = f.read()
else:
file_content = _read_file_content(file_name, encoding, ignore)
file_content = file_content.replace("\r\n", "\n")
return file_content
def get_view_content(file_name):
"""
If the file is open in a view, then this will return its content.
Otherwise this will return None
"""
view = get_open_view(file_name)
if view is not None:
return view.substr(sublime.Region(0, view.size()))
def get_open_view(file_name):
'''
Returns the view for the specified file_name if it exists
'''
active_window = sublime.active_window()
active_view = active_window.active_view()
# search for the file name in 3 hierarchical steps
# 1. check the active view
if active_view.file_name() == file_name:
return active_view
# 2. check all views in the active windows
view = active_window.find_open_file(file_name)
if view:
return view
# 3. check all other views
for window in sublime.windows():
if window == active_window:
continue
view = window.find_open_file(file_name)
if view:
return view
def get_file_content(file_name, encoding="utf8", ignore=True,
force_lf_endings=False):
"""
Returns the content of this file.
If the file is opened in a view, then the content of the view will
be returned. Otherwise the file will be opened and the content
will be returned.
"""
if force_lf_endings:
read_file_content = read_file_unix_endings
else:
read_file_content = _read_file_content
content = (get_view_content(file_name) or
read_file_content(file_name, encoding, ignore))
return content
class TimeoutError(Exception):
pass
__sentinel__ = object()
def run_on_main_thread(func, timeout=10, default_value=__sentinel__):
"""
Ensures the function, func is run on the main thread and returns the rsult
of that function call.
Note that this function blocks the thread it is executed on and should only
be used when the result of the function call is necessary to continue.
Arguments:
func (callable): a no-args callable; functions that need args should
be wrapped in a `functools.partial`
timeout (int): the maximum amount of time to wait in seconds. A
TimeoutError is raised if this limit is reached a no `default_value`
is specified
default_value (any): the value to be returned if a timeout occurs
Note that both timeout and default value are ignored when run in ST3 or
from the main thread.
"""
# quick exit condition: we are on ST3 or the main thread
if _ST3 or threading.current_thread().getName() == 'MainThread':
return func()
condition = threading.Condition()
condition.acquire()
def _get_result():
with condition:
_get_result.result = func()
condition.notify()
sublime.set_timeout(_get_result, 0)
condition.wait(timeout)
if not hasattr(_get_result, 'result'):
if default_value is __sentinel__:
raise TimeoutError('Timeout while waiting for {0}'.format(func))
else:
return default_value
return _get_result.result
class ThreadPool(object):
'''A relatively simple ThreadPool designed to maintain a number of thread
workers
By default, each pool manages a number of processes equal to the number
of CPU cores. This can be adjusted by setting the processes parameter
when creating the pool.
Returned results are similar to multiprocessing.pool.AsyncResult'''
def __init__(self, processes=None):
self._task_queue = Queue()
self._result_queue = Queue()
# used to indicate if the ThreadPool should be stopped
self._should_stop = threading.Event()
# default value is two less than the number of CPU cores to handle
# the supervisor thread and result thread
self._processes = max(processes or (cpu_count() or 3) - 2, 1)
self._workers = []
self._populate_pool()
self._job_counter = itertools.count()
self._result_cache = {}
self._result_handler = threading.Thread(target=self._handle_results)
self._result_handler.daemon = True
self._result_handler.name = u'{0!r} handler'.format(self)
self._result_handler.start()
self._supervisor = threading.Thread(target=self._maintain_pool)
self._supervisor.daemon = True
self._supervisor.name = u'{0!r} supervisor'.format(self)
self._supervisor.start()
# - Public API
def apply_async(self, func, args=(), kwargs={}):
job = next(self._job_counter)
self._task_queue.put((job, (func, args, kwargs)))
return _ThreadPoolResult(job, self._result_cache)
def is_running(self):
return not self._should_stop.is_set()
def terminate(self):
'''Stops this thread pool. Note stopping is not immediate. If you
need to wait for the termination to complete, you should call join()
after this.'''
self._should_stop.set()
def join(self, timeout=None):
self._supervisor.join(timeout)
if self._supervisor.is_alive():
raise TimeoutError
# - Internal API
# this is the supervisory task, which will clear workers that have stopped
# and start fresh workers
def _maintain_pool(self):
while self.is_running():
cleared_processes = False
for i in reversed(range(len(self._workers))):
w = self._workers[i]
if not w.is_alive():
w.join()
cleared_processes = True
del self._workers[i]
if cleared_processes:
self._populate_pool()
time.sleep(0.1)
# send sentinels to end threads
for _ in range(len(self._workers)):
self._task_queue.put(None)
# ensure worker threads end
for w in self._workers:
w.join()
# stop the result handler
self._result_queue.put(None)
self._result_handler.join()
def _handle_results(self):
while True:
result = self._result_queue.get()
if result is None:
break
job, _result = result
try:
result_handler = self._result_cache.get(job)
if result_handler:
result_handler._set_result(_result)
finally:
self._result_queue.task_done()
# creates and adds worker threads
def _populate_pool(self):
for _ in range(self._processes - len(self._workers)):
w = _ThreadPoolWorker(self._task_queue, self._result_queue)
self._workers.append(w)
w.start()
class _ThreadPoolWorker(threading.Thread):
def __init__(self, task_queue, result_queue, *args, **kwargs):
super(_ThreadPoolWorker, self).__init__(*args, **kwargs)
self.daemon = True
self._task_queue = task_queue
self._result_queue = result_queue
def run(self):
while True:
task = self._task_queue.get()
if task is None:
break
job = task[0]
func, args, kwargs = task[1]
if args is None:
args = ()
if kwargs is None:
kwargs = {}
try:
self._result_queue.put((job, func(*args, **kwargs)))
except Exception:
self._result_queue.put((job, sys.exc_info()))
finally:
self._task_queue.task_done()
class _ThreadPoolResult(object):
def __init__(self, job, result_cache):
self._ready = threading.Event()
self._value = None
self._result_cache = result_cache
self._job = job
self._result_cache[job] = self
def ready(self):
return self._ready.is_set()
def wait(self, timeout=None):
self._ready.wait(timeout)
def get(self, timeout=None):
self.wait(timeout)
if not self.ready():
raise TimeoutError
# handle an exception, which is passed as a sys.exc_info tuple
if (
isinstance(self._value, tuple) and
len(self._value) == 3 and
issubclass(self._value[0], Exception)
):
reraise(*self._value)
else:
return self._value
def then(self, callback, timeout=None):
callback(self.get(timeout))
def _set_result(self, _value):
self._value = _value
self._ready.set()
del self._result_cache[self._job] | latextools_utils/utils.py | import sublime
import codecs
import itertools
import sys
import threading
import time
try:
from os import cpu_count
except ImportError:
try:
from multiprocessing import cpu_count
# quickfix for ST2 compat
except ImportError:
def cpu_count():
return 1
try:
from Queue import Queue
except ImportError:
from queue import Queue
_ST3 = True
from .six import reraise
def run_after_loading(view, func):
"""Run a function after the view has finished loading"""
def run():
if view.is_loading():
sublime.set_timeout(run, 10)
else:
# add an additional delay, because it might not be ready
# even if the loading function returns false
sublime.set_timeout(func, 10)
run()
def open_and_select_region(view, file_name, region):
new_view = view
def select_label():
new_view.sel().clear()
new_view.sel().add(region)
new_view.show(region)
# TODO better compare?
if view.file_name() != file_name:
new_view = view.window().open_file(file_name)
run_after_loading(new_view, select_label)
else:
select_label()
def _read_file_content(file_name, encoding="utf8", ignore=True):
errors = "ignore" if ignore else "strict"
with codecs.open(file_name, "r", encoding, errors) as f:
return f.read()
def read_file_unix_endings(file_name, encoding="utf8", ignore=True):
"""
Reads a file with unix (LF) line endings and converts windows (CRLF)
line endings into (LF) line endings. This is necessary if you want to have
the same string positions as in ST, because the length of ST line endings
is 1 and the length if CRLF line endings is 2.
"""
if _ST3:
errors = "ignore" if ignore else "strict"
with open(file_name, "rt", encoding=encoding, errors=errors) as f:
file_content = f.read()
else:
file_content = _read_file_content(file_name, encoding, ignore)
file_content = file_content.replace("\r\n", "\n")
return file_content
def get_view_content(file_name):
"""
If the file is open in a view, then this will return its content.
Otherwise this will return None
"""
view = get_open_view(file_name)
if view is not None:
return view.substr(sublime.Region(0, view.size()))
def get_open_view(file_name):
'''
Returns the view for the specified file_name if it exists
'''
active_window = sublime.active_window()
active_view = active_window.active_view()
# search for the file name in 3 hierarchical steps
# 1. check the active view
if active_view.file_name() == file_name:
return active_view
# 2. check all views in the active windows
view = active_window.find_open_file(file_name)
if view:
return view
# 3. check all other views
for window in sublime.windows():
if window == active_window:
continue
view = window.find_open_file(file_name)
if view:
return view
def get_file_content(file_name, encoding="utf8", ignore=True,
force_lf_endings=False):
"""
Returns the content of this file.
If the file is opened in a view, then the content of the view will
be returned. Otherwise the file will be opened and the content
will be returned.
"""
if force_lf_endings:
read_file_content = read_file_unix_endings
else:
read_file_content = _read_file_content
content = (get_view_content(file_name) or
read_file_content(file_name, encoding, ignore))
return content
class TimeoutError(Exception):
pass
__sentinel__ = object()
def run_on_main_thread(func, timeout=10, default_value=__sentinel__):
"""
Ensures the function, func is run on the main thread and returns the rsult
of that function call.
Note that this function blocks the thread it is executed on and should only
be used when the result of the function call is necessary to continue.
Arguments:
func (callable): a no-args callable; functions that need args should
be wrapped in a `functools.partial`
timeout (int): the maximum amount of time to wait in seconds. A
TimeoutError is raised if this limit is reached a no `default_value`
is specified
default_value (any): the value to be returned if a timeout occurs
Note that both timeout and default value are ignored when run in ST3 or
from the main thread.
"""
# quick exit condition: we are on ST3 or the main thread
if _ST3 or threading.current_thread().getName() == 'MainThread':
return func()
condition = threading.Condition()
condition.acquire()
def _get_result():
with condition:
_get_result.result = func()
condition.notify()
sublime.set_timeout(_get_result, 0)
condition.wait(timeout)
if not hasattr(_get_result, 'result'):
if default_value is __sentinel__:
raise TimeoutError('Timeout while waiting for {0}'.format(func))
else:
return default_value
return _get_result.result
class ThreadPool(object):
'''A relatively simple ThreadPool designed to maintain a number of thread
workers
By default, each pool manages a number of processes equal to the number
of CPU cores. This can be adjusted by setting the processes parameter
when creating the pool.
Returned results are similar to multiprocessing.pool.AsyncResult'''
def __init__(self, processes=None):
self._task_queue = Queue()
self._result_queue = Queue()
# used to indicate if the ThreadPool should be stopped
self._should_stop = threading.Event()
# default value is two less than the number of CPU cores to handle
# the supervisor thread and result thread
self._processes = max(processes or (cpu_count() or 3) - 2, 1)
self._workers = []
self._populate_pool()
self._job_counter = itertools.count()
self._result_cache = {}
self._result_handler = threading.Thread(target=self._handle_results)
self._result_handler.daemon = True
self._result_handler.name = u'{0!r} handler'.format(self)
self._result_handler.start()
self._supervisor = threading.Thread(target=self._maintain_pool)
self._supervisor.daemon = True
self._supervisor.name = u'{0!r} supervisor'.format(self)
self._supervisor.start()
# - Public API
def apply_async(self, func, args=(), kwargs={}):
job = next(self._job_counter)
self._task_queue.put((job, (func, args, kwargs)))
return _ThreadPoolResult(job, self._result_cache)
def is_running(self):
return not self._should_stop.is_set()
def terminate(self):
'''Stops this thread pool. Note stopping is not immediate. If you
need to wait for the termination to complete, you should call join()
after this.'''
self._should_stop.set()
def join(self, timeout=None):
self._supervisor.join(timeout)
if self._supervisor.is_alive():
raise TimeoutError
# - Internal API
# this is the supervisory task, which will clear workers that have stopped
# and start fresh workers
def _maintain_pool(self):
while self.is_running():
cleared_processes = False
for i in reversed(range(len(self._workers))):
w = self._workers[i]
if not w.is_alive():
w.join()
cleared_processes = True
del self._workers[i]
if cleared_processes:
self._populate_pool()
time.sleep(0.1)
# send sentinels to end threads
for _ in range(len(self._workers)):
self._task_queue.put(None)
# ensure worker threads end
for w in self._workers:
w.join()
# stop the result handler
self._result_queue.put(None)
self._result_handler.join()
def _handle_results(self):
while True:
result = self._result_queue.get()
if result is None:
break
job, _result = result
try:
result_handler = self._result_cache.get(job)
if result_handler:
result_handler._set_result(_result)
finally:
self._result_queue.task_done()
# creates and adds worker threads
def _populate_pool(self):
for _ in range(self._processes - len(self._workers)):
w = _ThreadPoolWorker(self._task_queue, self._result_queue)
self._workers.append(w)
w.start()
class _ThreadPoolWorker(threading.Thread):
def __init__(self, task_queue, result_queue, *args, **kwargs):
super(_ThreadPoolWorker, self).__init__(*args, **kwargs)
self.daemon = True
self._task_queue = task_queue
self._result_queue = result_queue
def run(self):
while True:
task = self._task_queue.get()
if task is None:
break
job = task[0]
func, args, kwargs = task[1]
if args is None:
args = ()
if kwargs is None:
kwargs = {}
try:
self._result_queue.put((job, func(*args, **kwargs)))
except Exception:
self._result_queue.put((job, sys.exc_info()))
finally:
self._task_queue.task_done()
class _ThreadPoolResult(object):
def __init__(self, job, result_cache):
self._ready = threading.Event()
self._value = None
self._result_cache = result_cache
self._job = job
self._result_cache[job] = self
def ready(self):
return self._ready.is_set()
def wait(self, timeout=None):
self._ready.wait(timeout)
def get(self, timeout=None):
self.wait(timeout)
if not self.ready():
raise TimeoutError
# handle an exception, which is passed as a sys.exc_info tuple
if (
isinstance(self._value, tuple) and
len(self._value) == 3 and
issubclass(self._value[0], Exception)
):
reraise(*self._value)
else:
return self._value
def then(self, callback, timeout=None):
callback(self.get(timeout))
def _set_result(self, _value):
self._value = _value
self._ready.set()
del self._result_cache[self._job] | 0.380759 | 0.124346 |
from pysamba.library import *
from pysamba.wbem.wbem import *
from twisted.internet import defer
from pysamba.talloc import *
from pysamba.rpc.credentials import *
from pysamba.twisted.callback import Callback, WMIFailure
import Globals
from Products.ZenUtils.Driver import drive
import logging
logging.basicConfig()
log = logging.getLogger('zen.pysamba')
WBEM_S_TIMEDOUT = 0x40004L
WERR_BADFUNC = 1
# struct dcom_client_context *dcom_client_init(struct com_context *ctx,
# struct cli_credentials *credentials)
library.dcom_client_init.restype = c_void_p
library.dcom_client_init.argtypes = [POINTER(com_context), c_void_p]
library.com_init_ctx.restype = WERROR
class _WbemObject:
def __getattr__(self, name):
try:
return self.__dict__[name.lower()]
except Exception, ex:
raise AttributeError(name)
def convertArray(arr):
if not arr:
return None
result = []
arr = arr.contents
for i in range(arr.count):
result.append(arr.item[i])
return result
def convert(v, typeval):
if typeval == CIM_SINT8: return v.v_sint8
if typeval == CIM_UINT8: return v.v_uint8
if typeval == CIM_SINT16: return v.v_sint16
if typeval == CIM_UINT16: return v.v_uint16
if typeval == CIM_SINT32: return v.v_sint32
if typeval == CIM_UINT32: return v.v_uint32
if typeval == CIM_SINT64: return v.v_sint64
if typeval == CIM_UINT64: return v.v_sint64
if typeval == CIM_REAL32: return float(v.v_uint32)
if typeval == CIM_REAL64: return float(v.v_uint64)
if typeval == CIM_BOOLEAN: return bool(v.v_boolean)
if typeval in (CIM_STRING, CIM_DATETIME, CIM_REFERENCE):
return v.v_string
if typeval == CIM_CHAR16:
return v.v_string.decode('utf16')
if typeval == CIM_OBJECT:
return wbemInstanceToPython(v.v_object)
if typeval == CIM_ARR_SINT8: return convertArray(v.a_sint8)
if typeval == CIM_ARR_UINT8: return convertArray(v.a_uint8)
if typeval == CIM_ARR_SINT16: return convertArray(v.a_sint16)
if typeval == CIM_ARR_UINT16: return convertArray(v.a_uint16)
if typeval == CIM_ARR_SINT32: return convertArray(v.a_sint32)
if typeval == CIM_ARR_UINT32: return convertArray(v.a_uint32)
if typeval == CIM_ARR_SINT64: return convertArray(v.a_sint64)
if typeval == CIM_ARR_UINT64: return convertArray(v.a_uint64)
if typeval == CIM_ARR_REAL32: return convertArray(v.a_real32)
if typeval == CIM_ARR_REAL64: return convertArray(v.a_real64)
if typeval == CIM_ARR_BOOLEAN: return convertArray(v.a_boolean)
if typeval == CIM_ARR_STRING: return convertArray(v.a_string)
if typeval == CIM_ARR_DATETIME:
return convertArray(v.contents.a_datetime)
if typeval == CIM_ARR_REFERENCE:
return convertArray(v.contents.a_reference)
return "Unsupported"
def wbemInstanceToPython(obj):
klass = obj.contents.obj_class.contents
inst = obj.contents.instance.contents
result = _WbemObject()
result._class_name = klass.__CLASS
for j in range(klass.__PROPERTY_COUNT):
prop = klass.properties[j]
value = convert(inst.data[j], prop.desc.contents.cimtype & CIM_TYPEMASK)
if prop.name:
setattr(result, prop.name.lower(), value)
return result
def deferred(ctx):
cback = Callback()
ctx.contents.async.fn = cback.callback
return cback.deferred
wbemTimeoutInfinite = -1
class QueryResult(object):
def __init__(self, deviceId, ctx, pEnum):
self._deviceId = deviceId
self.ctx = ctx
talloc_increase_ref_count(self.ctx)
self.pEnum = pEnum
def close(self):
if self.ctx:
talloc_free(self.ctx)
self.ctx = None
def __del__(self):
self.close()
def fetchSome(self, timeoutMs=wbemTimeoutInfinite, chunkSize=10):
assert self.pEnum
def inner(driver):
count = uint32_t()
objs = (POINTER(WbemClassObject)*chunkSize)()
ctx = library.IEnumWbemClassObject_SmartNext_send(
self.pEnum, None, timeoutMs, chunkSize
)
yield deferred(ctx); driver.next()
result = library.IEnumWbemClassObject_SmartNext_recv(
ctx, self.ctx, objs, byref(count)
)
WERR_CHECK(result, self._deviceId, "Retrieve result data.")
result = []
for i in range(count.value):
result.append(wbemInstanceToPython(objs[i]))
talloc_free(objs[i])
driver.finish(result)
return drive(inner)
class Query(object):
def __init__(self):
self.ctx = POINTER(com_context)()
self.pWS = POINTER(IWbemServices)()
self._deviceId = None
def connect(self, eventContext, deviceId, hostname, creds, namespace="root\\cimv2"):
self._deviceId = deviceId
library.com_init_ctx.restype = WERROR
library.com_init_ctx(byref(self.ctx), eventContext)
cred = library.cli_credentials_init(self.ctx)
library.cli_credentials_set_conf(cred)
library.cli_credentials_parse_string(cred, creds, CRED_SPECIFIED)
library.dcom_client_init(self.ctx, cred)
def inner(driver):
flags = uint32_t()
flags.value = 0
ctx = library.WBEM_ConnectServer_send(
self.ctx, # com_ctx
None, # parent_ctx
hostname, # server
namespace, # namespace
None, # username
None, # password
None, # locale
flags.value, # flags
None, # authority
None) # wbem_ctx
yield deferred(ctx); driver.next()
result = library.WBEM_ConnectServer_recv(ctx, None, byref(self.pWS))
WERR_CHECK(result, self._deviceId, "Connect")
driver.finish(None)
return drive(inner)
def query(self, query):
assert self.pWS
def inner(driver):
qctx = None
try:
qctx = library.IWbemServices_ExecQuery_send_f(
self.pWS,
self.ctx,
"WQL",
query,
WBEM_FLAG_RETURN_IMMEDIATELY | WBEM_FLAG_ENSURE_LOCATABLE,
None)
yield deferred(qctx); driver.next()
pEnum = POINTER(IEnumWbemClassObject)()
result = library.IWbemServices_ExecQuery_recv(qctx,
byref(pEnum))
WERR_CHECK(result, self._deviceId, "ExecQuery")
ctx = library.IEnumWbemClassObject_Reset_send_f(pEnum, self.ctx)
yield deferred(ctx); driver.next()
result = library.IEnumWbemClassObject_Reset_recv(ctx);
WERR_CHECK(result, self._deviceId, "Reset result of WMI query.");
driver.finish(QueryResult(self._deviceId, self.ctx, pEnum))
except Exception, ex:
log.exception(ex)
raise
return drive(inner)
def notificationQuery(self, query):
assert self.pWS
def inner(driver):
qctx = None
pEnum = None
try:
qctx = library.IWbemServices_ExecNotificationQuery_send_f(
self.pWS,
self.ctx,
"WQL",
query,
WBEM_FLAG_RETURN_IMMEDIATELY | WBEM_FLAG_FORWARD_ONLY,
None)
yield deferred(qctx); driver.next()
pEnum = POINTER(IEnumWbemClassObject)()
result = library.IWbemServices_ExecNotificationQuery_recv(
qctx, byref(pEnum))
WERR_CHECK(result, self._deviceId, "ExecNotificationQuery")
driver.finish(QueryResult(self._deviceId, self.ctx, pEnum))
except Exception, ex:
if pEnum:
c = library.IUnknown_Release_send_f(pEnum, self.ctx)
yield deferred(c); driver.next()
result = library.IUnknown_Release_recv(self.ctx)
WERR_CHECK(result, self._deviceId, "Release")
log.exception(ex)
raise
return drive(inner)
def __del__(self):
self.close()
def close(self):
if self.ctx:
talloc_free(self.ctx)
self.ctx = None | pysamba/wbem/Query.py |
from pysamba.library import *
from pysamba.wbem.wbem import *
from twisted.internet import defer
from pysamba.talloc import *
from pysamba.rpc.credentials import *
from pysamba.twisted.callback import Callback, WMIFailure
import Globals
from Products.ZenUtils.Driver import drive
import logging
logging.basicConfig()
log = logging.getLogger('zen.pysamba')
WBEM_S_TIMEDOUT = 0x40004L
WERR_BADFUNC = 1
# struct dcom_client_context *dcom_client_init(struct com_context *ctx,
# struct cli_credentials *credentials)
library.dcom_client_init.restype = c_void_p
library.dcom_client_init.argtypes = [POINTER(com_context), c_void_p]
library.com_init_ctx.restype = WERROR
class _WbemObject:
def __getattr__(self, name):
try:
return self.__dict__[name.lower()]
except Exception, ex:
raise AttributeError(name)
def convertArray(arr):
if not arr:
return None
result = []
arr = arr.contents
for i in range(arr.count):
result.append(arr.item[i])
return result
def convert(v, typeval):
if typeval == CIM_SINT8: return v.v_sint8
if typeval == CIM_UINT8: return v.v_uint8
if typeval == CIM_SINT16: return v.v_sint16
if typeval == CIM_UINT16: return v.v_uint16
if typeval == CIM_SINT32: return v.v_sint32
if typeval == CIM_UINT32: return v.v_uint32
if typeval == CIM_SINT64: return v.v_sint64
if typeval == CIM_UINT64: return v.v_sint64
if typeval == CIM_REAL32: return float(v.v_uint32)
if typeval == CIM_REAL64: return float(v.v_uint64)
if typeval == CIM_BOOLEAN: return bool(v.v_boolean)
if typeval in (CIM_STRING, CIM_DATETIME, CIM_REFERENCE):
return v.v_string
if typeval == CIM_CHAR16:
return v.v_string.decode('utf16')
if typeval == CIM_OBJECT:
return wbemInstanceToPython(v.v_object)
if typeval == CIM_ARR_SINT8: return convertArray(v.a_sint8)
if typeval == CIM_ARR_UINT8: return convertArray(v.a_uint8)
if typeval == CIM_ARR_SINT16: return convertArray(v.a_sint16)
if typeval == CIM_ARR_UINT16: return convertArray(v.a_uint16)
if typeval == CIM_ARR_SINT32: return convertArray(v.a_sint32)
if typeval == CIM_ARR_UINT32: return convertArray(v.a_uint32)
if typeval == CIM_ARR_SINT64: return convertArray(v.a_sint64)
if typeval == CIM_ARR_UINT64: return convertArray(v.a_uint64)
if typeval == CIM_ARR_REAL32: return convertArray(v.a_real32)
if typeval == CIM_ARR_REAL64: return convertArray(v.a_real64)
if typeval == CIM_ARR_BOOLEAN: return convertArray(v.a_boolean)
if typeval == CIM_ARR_STRING: return convertArray(v.a_string)
if typeval == CIM_ARR_DATETIME:
return convertArray(v.contents.a_datetime)
if typeval == CIM_ARR_REFERENCE:
return convertArray(v.contents.a_reference)
return "Unsupported"
def wbemInstanceToPython(obj):
klass = obj.contents.obj_class.contents
inst = obj.contents.instance.contents
result = _WbemObject()
result._class_name = klass.__CLASS
for j in range(klass.__PROPERTY_COUNT):
prop = klass.properties[j]
value = convert(inst.data[j], prop.desc.contents.cimtype & CIM_TYPEMASK)
if prop.name:
setattr(result, prop.name.lower(), value)
return result
def deferred(ctx):
cback = Callback()
ctx.contents.async.fn = cback.callback
return cback.deferred
wbemTimeoutInfinite = -1
class QueryResult(object):
def __init__(self, deviceId, ctx, pEnum):
self._deviceId = deviceId
self.ctx = ctx
talloc_increase_ref_count(self.ctx)
self.pEnum = pEnum
def close(self):
if self.ctx:
talloc_free(self.ctx)
self.ctx = None
def __del__(self):
self.close()
def fetchSome(self, timeoutMs=wbemTimeoutInfinite, chunkSize=10):
assert self.pEnum
def inner(driver):
count = uint32_t()
objs = (POINTER(WbemClassObject)*chunkSize)()
ctx = library.IEnumWbemClassObject_SmartNext_send(
self.pEnum, None, timeoutMs, chunkSize
)
yield deferred(ctx); driver.next()
result = library.IEnumWbemClassObject_SmartNext_recv(
ctx, self.ctx, objs, byref(count)
)
WERR_CHECK(result, self._deviceId, "Retrieve result data.")
result = []
for i in range(count.value):
result.append(wbemInstanceToPython(objs[i]))
talloc_free(objs[i])
driver.finish(result)
return drive(inner)
class Query(object):
def __init__(self):
self.ctx = POINTER(com_context)()
self.pWS = POINTER(IWbemServices)()
self._deviceId = None
def connect(self, eventContext, deviceId, hostname, creds, namespace="root\\cimv2"):
self._deviceId = deviceId
library.com_init_ctx.restype = WERROR
library.com_init_ctx(byref(self.ctx), eventContext)
cred = library.cli_credentials_init(self.ctx)
library.cli_credentials_set_conf(cred)
library.cli_credentials_parse_string(cred, creds, CRED_SPECIFIED)
library.dcom_client_init(self.ctx, cred)
def inner(driver):
flags = uint32_t()
flags.value = 0
ctx = library.WBEM_ConnectServer_send(
self.ctx, # com_ctx
None, # parent_ctx
hostname, # server
namespace, # namespace
None, # username
None, # password
None, # locale
flags.value, # flags
None, # authority
None) # wbem_ctx
yield deferred(ctx); driver.next()
result = library.WBEM_ConnectServer_recv(ctx, None, byref(self.pWS))
WERR_CHECK(result, self._deviceId, "Connect")
driver.finish(None)
return drive(inner)
def query(self, query):
assert self.pWS
def inner(driver):
qctx = None
try:
qctx = library.IWbemServices_ExecQuery_send_f(
self.pWS,
self.ctx,
"WQL",
query,
WBEM_FLAG_RETURN_IMMEDIATELY | WBEM_FLAG_ENSURE_LOCATABLE,
None)
yield deferred(qctx); driver.next()
pEnum = POINTER(IEnumWbemClassObject)()
result = library.IWbemServices_ExecQuery_recv(qctx,
byref(pEnum))
WERR_CHECK(result, self._deviceId, "ExecQuery")
ctx = library.IEnumWbemClassObject_Reset_send_f(pEnum, self.ctx)
yield deferred(ctx); driver.next()
result = library.IEnumWbemClassObject_Reset_recv(ctx);
WERR_CHECK(result, self._deviceId, "Reset result of WMI query.");
driver.finish(QueryResult(self._deviceId, self.ctx, pEnum))
except Exception, ex:
log.exception(ex)
raise
return drive(inner)
def notificationQuery(self, query):
assert self.pWS
def inner(driver):
qctx = None
pEnum = None
try:
qctx = library.IWbemServices_ExecNotificationQuery_send_f(
self.pWS,
self.ctx,
"WQL",
query,
WBEM_FLAG_RETURN_IMMEDIATELY | WBEM_FLAG_FORWARD_ONLY,
None)
yield deferred(qctx); driver.next()
pEnum = POINTER(IEnumWbemClassObject)()
result = library.IWbemServices_ExecNotificationQuery_recv(
qctx, byref(pEnum))
WERR_CHECK(result, self._deviceId, "ExecNotificationQuery")
driver.finish(QueryResult(self._deviceId, self.ctx, pEnum))
except Exception, ex:
if pEnum:
c = library.IUnknown_Release_send_f(pEnum, self.ctx)
yield deferred(c); driver.next()
result = library.IUnknown_Release_recv(self.ctx)
WERR_CHECK(result, self._deviceId, "Release")
log.exception(ex)
raise
return drive(inner)
def __del__(self):
self.close()
def close(self):
if self.ctx:
talloc_free(self.ctx)
self.ctx = None | 0.363421 | 0.104158 |
import sys
pens = {}
curpen = None
hpgl_pts_in = 955.0
at = (float(sys.argv[3]),float(sys.argv[4]))
with open(sys.argv[2], 'w') as svg:
print >> svg, '''<?xml version="1.0" standalone="no"?>
<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 20010904//EN" "http://www.w3.org/TR/2001/REC-SVG-20010904/DTD/svg10.dtd">
<svg version="1.0" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" width="5in" height="5in" viewBox="-2.5 -2.5 5 5">
<defs>
<g id="hpgl2svg">
<path d="'''
with open(sys.argv[1]) as hpgl:
for line in [x.strip() for x in hpgl.readlines()]:
if len(line.strip()) == 0: continue
cmd = line[0:2]
rem = line[2:]
if cmd == 'CO':
pass # comment
elif cmd == 'IN':
pass # initialize
elif cmd == 'IP':
pass # set origin
elif cmd == 'SC':
pass # set scale
elif cmd == 'SP':
curpen = int(rem.strip(';'))
if curpen not in pens.keys():
pens[curpen] = (0,0)
elif cmd == 'PU':
pens[curpen] = tuple([int(x.strip().strip(';'))/hpgl_pts_in for x in rem.split(',')])
print >> svg, "M %f,%f "%(pens[curpen][0]+at[0],pens[curpen][1]+at[1])
elif cmd == 'PD':
parts = [int(x.strip().strip(';'))/hpgl_pts_in for x in rem.split(',')]
pts = [(parts[2*i],parts[2*i+1]) for i in xrange(len(parts)/2)]
print >> svg, "l",
for pt in pts:
oldpen = pens[curpen]
pens[curpen] = (pt[0],pt[1])
print >> svg, "%f,%f "%(pens[curpen][0]-oldpen[0],pens[curpen][1]-oldpen[1])
elif cmd == 'CI':
radius = int(rem)
pass # circle with radius
elif cmd == 'SS':
pass # select standard font
elif cmd == 'DT':
pass # select text delimiter
elif cmd == 'LB':
pass # draw label
elif cmd == 'LT':
pass # set linetype
elif cmd == 'CS':
pass # set caracter set
elif cmd == 'DI':
pass # set catheti
elif cmd == 'SI':
pass # set character width & height
else:
raise Exception('Unknown HPGL code "%s".'%line)
print >> svg, '''" stroke="red" stroke-width="0.01" fill="none"/>
</g>
</defs>
<line x1="-.126" y1="0" x2=".126" y2="0" stroke-width="0.02" fill="black" stroke="blue" />
<line y1="-.126" x1="0" y2=".126" x2="0" stroke-width="0.02" fill="black" stroke="blue" />
<use xlink:href="#hpgl2svg" x="0" y="0"/>
</svg>''' | hpgl2svg.py |
import sys
pens = {}
curpen = None
hpgl_pts_in = 955.0
at = (float(sys.argv[3]),float(sys.argv[4]))
with open(sys.argv[2], 'w') as svg:
print >> svg, '''<?xml version="1.0" standalone="no"?>
<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 20010904//EN" "http://www.w3.org/TR/2001/REC-SVG-20010904/DTD/svg10.dtd">
<svg version="1.0" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" width="5in" height="5in" viewBox="-2.5 -2.5 5 5">
<defs>
<g id="hpgl2svg">
<path d="'''
with open(sys.argv[1]) as hpgl:
for line in [x.strip() for x in hpgl.readlines()]:
if len(line.strip()) == 0: continue
cmd = line[0:2]
rem = line[2:]
if cmd == 'CO':
pass # comment
elif cmd == 'IN':
pass # initialize
elif cmd == 'IP':
pass # set origin
elif cmd == 'SC':
pass # set scale
elif cmd == 'SP':
curpen = int(rem.strip(';'))
if curpen not in pens.keys():
pens[curpen] = (0,0)
elif cmd == 'PU':
pens[curpen] = tuple([int(x.strip().strip(';'))/hpgl_pts_in for x in rem.split(',')])
print >> svg, "M %f,%f "%(pens[curpen][0]+at[0],pens[curpen][1]+at[1])
elif cmd == 'PD':
parts = [int(x.strip().strip(';'))/hpgl_pts_in for x in rem.split(',')]
pts = [(parts[2*i],parts[2*i+1]) for i in xrange(len(parts)/2)]
print >> svg, "l",
for pt in pts:
oldpen = pens[curpen]
pens[curpen] = (pt[0],pt[1])
print >> svg, "%f,%f "%(pens[curpen][0]-oldpen[0],pens[curpen][1]-oldpen[1])
elif cmd == 'CI':
radius = int(rem)
pass # circle with radius
elif cmd == 'SS':
pass # select standard font
elif cmd == 'DT':
pass # select text delimiter
elif cmd == 'LB':
pass # draw label
elif cmd == 'LT':
pass # set linetype
elif cmd == 'CS':
pass # set caracter set
elif cmd == 'DI':
pass # set catheti
elif cmd == 'SI':
pass # set character width & height
else:
raise Exception('Unknown HPGL code "%s".'%line)
print >> svg, '''" stroke="red" stroke-width="0.01" fill="none"/>
</g>
</defs>
<line x1="-.126" y1="0" x2=".126" y2="0" stroke-width="0.02" fill="black" stroke="blue" />
<line y1="-.126" x1="0" y2=".126" x2="0" stroke-width="0.02" fill="black" stroke="blue" />
<use xlink:href="#hpgl2svg" x="0" y="0"/>
</svg>''' | 0.128266 | 0.155591 |
import numpy as np
from ..sampling import Sample
class MarkovSample(Sample):
def __init__(self, **kwargs):
self.accepted = 0
super().__init__(**kwargs)
self._sample_info.append(('accept_ratio', 'acceptance rate', '%f'))
@property
def accept_ratio(self):
return self.accepted / self.size
# MARKOV CHAIN
class MarkovUpdate(object):
""" Basic update mechanism of a Markov chain. """
def __init__(self, ndim, is_adaptive=False, target=None):
self.ndim = ndim
self.target = target
self.is_adaptive = is_adaptive
# will hold information if update was used as a sampler
self.sample_info = None
def init_adapt(self, initial_state):
pass
def init_state(self, state):
return state # may initialize other state attributes (such as pdf)
def next_state(self, state, iteration):
""" Get the next state in the Markov chain.
:return: The next state.
"""
raise NotImplementedError("AbstractMarkovUpdate is abstract.")
def sample(self, sample_size, initial, out_mask=None, log_every=5000):
""" Generate a sample of given size.
:param sample_size: Number of samples to generate.
:param initial: Initial value of the Markov chain. Internally
converted to numpy array.
:param out_mask: Slice object, return only this slice of the output
chain (useful if sampler uses artificial variables).
:param log_every: Print the number of generated samples. Do not log if
value is < 0. Log every sample for log=1.
:return: Numpy array with shape (sample_size, self.ndim).
"""
# initialize sampling
state = self.init_state(np.atleast_1d(initial))
if len(state) != self.ndim:
raise ValueError('initial must have dimension ' + str(self.ndim))
self.init_adapt(state) # initial adaptation
sample = MarkovSample()
tags = dict()
tagged = dict()
chain = np.empty((sample_size, self.ndim))
chain[0] = state
for i in range(1, sample_size):
state = self.next_state(state, i)
if not np.array_equal(state, chain[i - 1]):
sample.accepted += 1
chain[i] = state
try:
try:
tags[state.tag_parser].append(state.tag)
tagged[state.tag_parser].append(i)
except KeyError:
tags[state.tag_parser] = []
tagged[state.tag_parser] = []
except AttributeError:
pass
if log_every > 0 and (i + 1) % log_every == 0:
print("Generated %d samples." % (i + 1), flush=True)
if out_mask is not None:
chain = chain[:, out_mask]
for parser in tagged:
chain[tagged[parser]] = parser(chain[tagged[parser]], tags[parser])
sample.data = chain
sample.target = self.target
return sample
class CompositeMarkovUpdate(MarkovUpdate):
def __init__(self, ndim, updates, masks=None, target=None):
""" Composite Markov update; combine updates.
:param updates: List of update mechanisms, each subtypes of
MetropolisLikeUpdate.
:param masks: Dictionary, giving masks (list/array of indices)
of dimensions for the index of the update mechanism. Use this if
some updates only affect slices of the state.
"""
is_adaptive = any(update.is_adaptive for update in updates)
if target is None:
for update in updates:
if update.target is not None:
target = update.target
break
super().__init__(ndim, is_adaptive=is_adaptive, target=target)
self.updates = updates
self.masks = [None if masks is None or i not in masks else masks[i]
for i in range(len(updates))]
def init_adapt(self, initial_state):
for update in self.updates:
state = update.init_state(initial_state)
update.init_adapt(state)
def next_state(self, state, iteration):
for mechanism, mask in zip(self.updates, self.masks):
if mask is None:
state = mechanism.next_state(state, iteration)
else:
state = np.copy(state)
state[mask] = mechanism.next_state(state[mask], iteration)
return state
class MixingMarkovUpdate(MarkovUpdate):
def __init__(self, ndim, updates, weights=None, masks=None, target=None):
""" Mix a number of update mechanisms, choosing one in each step.
:param updates: List of update mechanisms (AbstractMarkovUpdate).
:param weights: List of weights for each of the mechanisms (sum to 1).
:param masks: Slice object, specify if updates only affect slice of
state.
"""
is_adaptive = any(update.is_adaptive for update in updates)
if target is None:
for update in updates:
if update.target is not None:
target = update.target
break
super().__init__(ndim, is_adaptive=is_adaptive, target=target)
self.updates = updates
self.updates_count = len(updates)
self.masks = [None if masks is None or i not in masks else masks[i]
for i in range(len(updates))]
if weights is None:
weights = np.ones(self.updates_count) / self.updates_count
self.weights = weights
def init_adapt(self, initial_state):
for update in self.updates:
state = update.init_state(initial_state)
update.init_adapt(state)
def next_state(self, state, iteration):
index = np.random.choice(self.updates_count, p=self.weights)
if self.masks[index] is None:
state = self.updates[index].init_state(state)
return self.updates[index].next_state(state, iteration)
else:
mask = self.masks[index]
state = np.copy(state)
state[mask] = self.updates[index].next_state(state[mask], iteration)
return state | src/hepmc/core/markov/base.py | import numpy as np
from ..sampling import Sample
class MarkovSample(Sample):
def __init__(self, **kwargs):
self.accepted = 0
super().__init__(**kwargs)
self._sample_info.append(('accept_ratio', 'acceptance rate', '%f'))
@property
def accept_ratio(self):
return self.accepted / self.size
# MARKOV CHAIN
class MarkovUpdate(object):
""" Basic update mechanism of a Markov chain. """
def __init__(self, ndim, is_adaptive=False, target=None):
self.ndim = ndim
self.target = target
self.is_adaptive = is_adaptive
# will hold information if update was used as a sampler
self.sample_info = None
def init_adapt(self, initial_state):
pass
def init_state(self, state):
return state # may initialize other state attributes (such as pdf)
def next_state(self, state, iteration):
""" Get the next state in the Markov chain.
:return: The next state.
"""
raise NotImplementedError("AbstractMarkovUpdate is abstract.")
def sample(self, sample_size, initial, out_mask=None, log_every=5000):
""" Generate a sample of given size.
:param sample_size: Number of samples to generate.
:param initial: Initial value of the Markov chain. Internally
converted to numpy array.
:param out_mask: Slice object, return only this slice of the output
chain (useful if sampler uses artificial variables).
:param log_every: Print the number of generated samples. Do not log if
value is < 0. Log every sample for log=1.
:return: Numpy array with shape (sample_size, self.ndim).
"""
# initialize sampling
state = self.init_state(np.atleast_1d(initial))
if len(state) != self.ndim:
raise ValueError('initial must have dimension ' + str(self.ndim))
self.init_adapt(state) # initial adaptation
sample = MarkovSample()
tags = dict()
tagged = dict()
chain = np.empty((sample_size, self.ndim))
chain[0] = state
for i in range(1, sample_size):
state = self.next_state(state, i)
if not np.array_equal(state, chain[i - 1]):
sample.accepted += 1
chain[i] = state
try:
try:
tags[state.tag_parser].append(state.tag)
tagged[state.tag_parser].append(i)
except KeyError:
tags[state.tag_parser] = []
tagged[state.tag_parser] = []
except AttributeError:
pass
if log_every > 0 and (i + 1) % log_every == 0:
print("Generated %d samples." % (i + 1), flush=True)
if out_mask is not None:
chain = chain[:, out_mask]
for parser in tagged:
chain[tagged[parser]] = parser(chain[tagged[parser]], tags[parser])
sample.data = chain
sample.target = self.target
return sample
class CompositeMarkovUpdate(MarkovUpdate):
def __init__(self, ndim, updates, masks=None, target=None):
""" Composite Markov update; combine updates.
:param updates: List of update mechanisms, each subtypes of
MetropolisLikeUpdate.
:param masks: Dictionary, giving masks (list/array of indices)
of dimensions for the index of the update mechanism. Use this if
some updates only affect slices of the state.
"""
is_adaptive = any(update.is_adaptive for update in updates)
if target is None:
for update in updates:
if update.target is not None:
target = update.target
break
super().__init__(ndim, is_adaptive=is_adaptive, target=target)
self.updates = updates
self.masks = [None if masks is None or i not in masks else masks[i]
for i in range(len(updates))]
def init_adapt(self, initial_state):
for update in self.updates:
state = update.init_state(initial_state)
update.init_adapt(state)
def next_state(self, state, iteration):
for mechanism, mask in zip(self.updates, self.masks):
if mask is None:
state = mechanism.next_state(state, iteration)
else:
state = np.copy(state)
state[mask] = mechanism.next_state(state[mask], iteration)
return state
class MixingMarkovUpdate(MarkovUpdate):
def __init__(self, ndim, updates, weights=None, masks=None, target=None):
""" Mix a number of update mechanisms, choosing one in each step.
:param updates: List of update mechanisms (AbstractMarkovUpdate).
:param weights: List of weights for each of the mechanisms (sum to 1).
:param masks: Slice object, specify if updates only affect slice of
state.
"""
is_adaptive = any(update.is_adaptive for update in updates)
if target is None:
for update in updates:
if update.target is not None:
target = update.target
break
super().__init__(ndim, is_adaptive=is_adaptive, target=target)
self.updates = updates
self.updates_count = len(updates)
self.masks = [None if masks is None or i not in masks else masks[i]
for i in range(len(updates))]
if weights is None:
weights = np.ones(self.updates_count) / self.updates_count
self.weights = weights
def init_adapt(self, initial_state):
for update in self.updates:
state = update.init_state(initial_state)
update.init_adapt(state)
def next_state(self, state, iteration):
index = np.random.choice(self.updates_count, p=self.weights)
if self.masks[index] is None:
state = self.updates[index].init_state(state)
return self.updates[index].next_state(state, iteration)
else:
mask = self.masks[index]
state = np.copy(state)
state[mask] = self.updates[index].next_state(state[mask], iteration)
return state | 0.834373 | 0.500671 |
from __future__ import unicode_literals
from rdflib import *
from rdflib.resource import Resource
L = Namespace("http://w3id.org/libris/logic/")
def add_magic_properties(vocab, data):
for rclass, mprop in vocab.resource(L.magicProperty).subject_objects():
#print rclass.qname()
for s in data.resource(rclass.identifier).subjects(RDF.type):
result_prop = mprop.value(L.resultProperty).identifier
use_link = mprop.value(L.useLink)
value = expand_template(
s.value(use_link.identifier) if use_link else s,
mprop.value(L.template))
#print "<%s>" % s, value
s.add(result_prop, Literal(value))
def expand_template(s, tplt):
if isinstance(tplt, Resource):
if any(tplt.objects(RDF.first)):
parts = list(tplt.items())
first = parts[0]
ctrl = first.identifier if isinstance(first, Resource) else None
if ctrl == L['if']:
if expand_template(s, parts[1]):
return expand_template(s, parts[2])
elif len(parts) > 3:
return expand_template(s, parts[3])
elif ctrl == L['and']:
return all(expand_template(s, part) for part in parts[1:])
elif ctrl == L['or']:
for part in parts[1:]:
v = expand_template(s, part)
if v:
return v
else:
join = ""
if ctrl == L['join']:
join, parts = parts[1], parts[2:]
return join.join(filter(None, (expand_template(s, part) for part in parts)))
else:
return s.value(tplt.identifier)
else:
return tplt
if __name__ == '__main__':
import sys
from os import path as P
from rdflib.util import guess_format
args = sys.argv[:]
script = args.pop(0)
fpath = args.pop(0) if args else P.join(P.dirname(script), "../def/terms.ttl")
vocab = Graph().parse(fpath, format=guess_format(fpath))
T = Namespace("http://libris.kb.se/def/terms#")
BASE = Namespace("http://example.org/")
data = Graph().parse(data="""
prefix : <{T}>
base <{BASE}>
</person/someone/entry> a :PersonTerm;
:focus [ a :Person;
:name "<NAME>";
:personTitle "X" ] .
</person/somebody/entry> a :PersonTerm;
:focus [ a :Person;
:name "<NAME>";
:givenName "Some"; :familyName "Body";
:birthYear "1901" ] .
</person/someother/entry> a :PersonTerm;
:focus [ a :Person;
:givenName "Some"; :familyName "Other";
:numeration "XI"; :personTitle "Y";
:birthYear "1902" ] .
</person/nobody/entry> a :PersonTerm;
:focus [ a :Person;
:givenName "No"; :familyName "Body";
:birthYear "1903"; :deathYear "2001" ] .
</person/noother/entry> a :PersonTerm;
:focus [ a :Person;
:givenName "No"; :familyName "Other";
:personTitle "Z";
:deathYear "2001" ] .
""".format(**vars()), format='turtle')
add_magic_properties(vocab, data)
assert len(Graph().parse(data="""
prefix : <{T}>
base <{BASE}>
</person/someone/entry> :prefLabel "Some One (X)" .
</person/somebody/entry> :prefLabel "Body, Some 1901-" .
</person/someother/entry> :prefLabel "Other, Some XI (Y) 1902-" .
</person/nobody/entry> :prefLabel "Body, No 1903-2001" .
</person/noother/entry> :prefLabel "Other, No (Z) -2001" .
""".format(**vars()), format='turtle') - data) == 0
data.serialize(sys.stdout, format='turtle') | lxltools/magicprops.py | from __future__ import unicode_literals
from rdflib import *
from rdflib.resource import Resource
L = Namespace("http://w3id.org/libris/logic/")
def add_magic_properties(vocab, data):
for rclass, mprop in vocab.resource(L.magicProperty).subject_objects():
#print rclass.qname()
for s in data.resource(rclass.identifier).subjects(RDF.type):
result_prop = mprop.value(L.resultProperty).identifier
use_link = mprop.value(L.useLink)
value = expand_template(
s.value(use_link.identifier) if use_link else s,
mprop.value(L.template))
#print "<%s>" % s, value
s.add(result_prop, Literal(value))
def expand_template(s, tplt):
if isinstance(tplt, Resource):
if any(tplt.objects(RDF.first)):
parts = list(tplt.items())
first = parts[0]
ctrl = first.identifier if isinstance(first, Resource) else None
if ctrl == L['if']:
if expand_template(s, parts[1]):
return expand_template(s, parts[2])
elif len(parts) > 3:
return expand_template(s, parts[3])
elif ctrl == L['and']:
return all(expand_template(s, part) for part in parts[1:])
elif ctrl == L['or']:
for part in parts[1:]:
v = expand_template(s, part)
if v:
return v
else:
join = ""
if ctrl == L['join']:
join, parts = parts[1], parts[2:]
return join.join(filter(None, (expand_template(s, part) for part in parts)))
else:
return s.value(tplt.identifier)
else:
return tplt
if __name__ == '__main__':
import sys
from os import path as P
from rdflib.util import guess_format
args = sys.argv[:]
script = args.pop(0)
fpath = args.pop(0) if args else P.join(P.dirname(script), "../def/terms.ttl")
vocab = Graph().parse(fpath, format=guess_format(fpath))
T = Namespace("http://libris.kb.se/def/terms#")
BASE = Namespace("http://example.org/")
data = Graph().parse(data="""
prefix : <{T}>
base <{BASE}>
</person/someone/entry> a :PersonTerm;
:focus [ a :Person;
:name "<NAME>";
:personTitle "X" ] .
</person/somebody/entry> a :PersonTerm;
:focus [ a :Person;
:name "<NAME>";
:givenName "Some"; :familyName "Body";
:birthYear "1901" ] .
</person/someother/entry> a :PersonTerm;
:focus [ a :Person;
:givenName "Some"; :familyName "Other";
:numeration "XI"; :personTitle "Y";
:birthYear "1902" ] .
</person/nobody/entry> a :PersonTerm;
:focus [ a :Person;
:givenName "No"; :familyName "Body";
:birthYear "1903"; :deathYear "2001" ] .
</person/noother/entry> a :PersonTerm;
:focus [ a :Person;
:givenName "No"; :familyName "Other";
:personTitle "Z";
:deathYear "2001" ] .
""".format(**vars()), format='turtle')
add_magic_properties(vocab, data)
assert len(Graph().parse(data="""
prefix : <{T}>
base <{BASE}>
</person/someone/entry> :prefLabel "Some One (X)" .
</person/somebody/entry> :prefLabel "Body, Some 1901-" .
</person/someother/entry> :prefLabel "Other, Some XI (Y) 1902-" .
</person/nobody/entry> :prefLabel "Body, No 1903-2001" .
</person/noother/entry> :prefLabel "Other, No (Z) -2001" .
""".format(**vars()), format='turtle') - data) == 0
data.serialize(sys.stdout, format='turtle') | 0.308503 | 0.278637 |
import tensorflow as tf
import numpy as np
from absl import flags, app
import models
import re
import os
import modeling
FLAGS = flags.FLAGS
flags.DEFINE_string("bert_config_file", "/Users/lollipop/Downloads/bert/chinese_L-12_H-768_A-12/bert_config.json",
"Bert configuration file to define core bert layers.")
flags.DEFINE_string("new_checkpoint_output_path", "out_new",
"Name for the created object-based tf2 checkpoint.")
flags.DEFINE_string(
"TF1_checkpoint_path", "/Users/lollipop/Downloads/bert/chinese_L-12_H-768_A-12/bert_model.ckpt",
"Initial checkpoint from a pretrained BERT tf1 model of Google ")
flags.DEFINE_integer("max_seq_length", 512, "Maximum sequence length.")
flags.DEFINE_integer("max_predictions_per_seq", 20,
"Maximum number of masked LM predictions per sequence.")
def re_map_tf1(name):
# 通过正则来进行模型名字映射
tensor_name = name
tensor_name = re.sub(r"bert/encoder/layer_(\d+)/self_attention/query/kernel:0",
r"bert/encoder/layer_\1/attention/self/query/kernel", tensor_name)
tensor_name = re.sub(r"bert/encoder/layer_(\d+)/self_attention/query/bias:0",
r"bert/encoder/layer_\1/attention/self/query/bias", tensor_name)
tensor_name = re.sub(r"bert/encoder/layer_(\d+)/self_attention/key/kernel:0",
r"bert/encoder/layer_\1/attention/self/key/kernel", tensor_name)
tensor_name = re.sub(r"bert/encoder/layer_(\d+)/self_attention/key/bias:0",
r"bert/encoder/layer_\1/attention/self/key/bias", tensor_name)
tensor_name = re.sub(r"bert/encoder/layer_(\d+)/self_attention/value/kernel:0",
r"bert/encoder/layer_\1/attention/self/value/kernel", tensor_name)
tensor_name = re.sub(r"bert/encoder/layer_(\d+)/self_attention/value/bias:0",
r"bert/encoder/layer_\1/attention/self/value/bias", tensor_name)
tensor_name = re.sub(r"bert/encoder/layer_(\d+)/self_attention/self_attention_output/kernel:0",
r"bert/encoder/layer_\1/attention/output/dense/kernel", tensor_name)
tensor_name = re.sub(r"bert/encoder/layer_(\d+)/self_attention/self_attention_output/bias:0",
r"bert/encoder/layer_\1/attention/output/dense/bias", tensor_name)
tensor_name = re.sub(r"bert/encoder/layer_(\d+)/self_attention_layer_norm/gamma:0",
r"bert/encoder/layer_\1/attention/output/LayerNorm/gamma", tensor_name)
tensor_name = re.sub(r"bert/encoder/layer_(\d+)/self_attention_layer_norm/beta:0",
r"bert/encoder/layer_\1/attention/output/LayerNorm/beta", tensor_name)
tensor_name = re.sub(r"bert/encoder/layer_(\d+)/intermediate/kernel:0",
r"bert/encoder/layer_\1/intermediate/dense/kernel", tensor_name)
tensor_name = re.sub(r"bert/encoder/layer_(\d+)/intermediate/bias:0",
r"bert/encoder/layer_\1/intermediate/dense/bias", tensor_name)
tensor_name = re.sub(r"bert/encoder/layer_(\d+)/output/kernel:0",
r"bert/encoder/layer_\1/output/dense/kernel", tensor_name)
tensor_name = re.sub(r"bert/encoder/layer_(\d+)/output/bias:0",
r"bert/encoder/layer_\1/output/dense/bias", tensor_name)
tensor_name = re.sub(r"bert/encoder/layer_(\d+)/output_layer_norm/gamma:0",
r"bert/encoder/layer_\1/output/LayerNorm/gamma", tensor_name)
tensor_name = re.sub(r"bert/encoder/layer_(\d+)/output_layer_norm/beta:0",
r"bert/encoder/layer_\1/output/LayerNorm/beta", tensor_name)
tensor_name = re.sub(r"bert/embedding_processor/embedding_pos/embeddings:0",
r"bert/embeddings/position_embeddings", tensor_name)
tensor_name = re.sub(r"bert/embedding_processor/embedding_word_ids/embeddings:0",
r"bert/embeddings/word_embeddings", tensor_name)
tensor_name = re.sub(r"bert/embedding_processor/embedding_type_ids/embeddings:0",
r"bert/embeddings/token_type_embeddings", tensor_name)
tensor_name = re.sub(r"bert/embedding_processor/layer_norm/gamma:0",
r"bert/embeddings/LayerNorm/gamma", tensor_name)
tensor_name = re.sub(r"bert/embedding_processor/layer_norm/beta:0",
r"bert/embeddings/LayerNorm/beta", tensor_name)
tensor_name = re.sub(r"bert/pooler_transform/kernel:0",
r"bert/pooler/dense/kernel", tensor_name)
tensor_name = re.sub(r"bert/pooler_transform/bias:0",
r"bert/pooler/dense/bias", tensor_name)
# 预测部分
tensor_name = re.sub(r"mask_label_loss/output_bias:0",
r"cls/predictions/output_bias", tensor_name)
tensor_name = re.sub(r"mask_label_loss/dense/kernel:0",
r"cls/predictions/transform/dense/kernel", tensor_name)
tensor_name = re.sub(r"mask_label_loss/dense/bias:0",
r"cls/predictions/transform/dense/bias", tensor_name)
tensor_name = re.sub(r"mask_label_loss/layer_norm/gamma:0",
r"cls/predictions/transform/LayerNorm/gamma", tensor_name)
tensor_name = re.sub(r"mask_label_loss/layer_norm/beta:0",
r"cls/predictions/transform/LayerNorm/beta", tensor_name)
tensor_name = re.sub(r"next_sentence_loss/dense_1/kernel:0",
r"cls/seq_relationship/output_weights", tensor_name)
tensor_name = re.sub(r"next_sentence_loss/dense_1/bias:0",
r"cls/seq_relationship/output_bias", tensor_name)
return tensor_name
def name_map_tf1(name):
map_name = re_map_tf1(name)
return map_name
def conver_model_tf1(model, tf1_ckpt_path, new_ckpt_save_path):
"""Converts a V1 checkpoint of Google into an V2 checkpoint."""
ckpt_tf1 = tf.train.load_checkpoint(tf1_ckpt_path)
for trainable_weight in model.trainable_weights:
name = trainable_weight.name
map_name = name_map_tf1(name)
map_tensor = ckpt_tf1.get_tensor(map_name)
if name == "next_sentence_loss/dense_1/kernel:0":
map_tensor = map_tensor.T
trainable_weight.assign(map_tensor)
print(f"{map_name, map_tensor.shape} >>>> {name, trainable_weight.shape} 转换成功")
model.save_weights(os.path.join(new_ckpt_save_path, "bert_model.ckpt"))
def main(_):
assert tf.version.VERSION.startswith('2.')
config = modeling.BertConfig.from_json_file(FLAGS.bert_config_file)
# 转换bert模型部分 不包括预测部分的权重
model = models.get_base_model(config=config,
max_seq_length=FLAGS.max_seq_length)
# 转换bert模型部分 包括预测部分的权重
# model= models.getPretrainingModel(config=config,
# max_seq_length=FLAGS.max_seq_length,
# max_predictions_per_seq=FLAGS.max_predictions_per_seq)
conver_model_tf1(model, FLAGS.TF1_checkpoint_path, FLAGS.new_checkpoint_output_path)
print("TF1模型转换完成")
if __name__ == '__main__':
flags.mark_flag_as_required("bert_config_file")
flags.mark_flag_as_required("TF1_checkpoint_path")
flags.mark_flag_as_required("new_checkpoint_output_path")
app.run(main) | Bert/tf1_ckpt_converter.py |
import tensorflow as tf
import numpy as np
from absl import flags, app
import models
import re
import os
import modeling
FLAGS = flags.FLAGS
flags.DEFINE_string("bert_config_file", "/Users/lollipop/Downloads/bert/chinese_L-12_H-768_A-12/bert_config.json",
"Bert configuration file to define core bert layers.")
flags.DEFINE_string("new_checkpoint_output_path", "out_new",
"Name for the created object-based tf2 checkpoint.")
flags.DEFINE_string(
"TF1_checkpoint_path", "/Users/lollipop/Downloads/bert/chinese_L-12_H-768_A-12/bert_model.ckpt",
"Initial checkpoint from a pretrained BERT tf1 model of Google ")
flags.DEFINE_integer("max_seq_length", 512, "Maximum sequence length.")
flags.DEFINE_integer("max_predictions_per_seq", 20,
"Maximum number of masked LM predictions per sequence.")
def re_map_tf1(name):
# 通过正则来进行模型名字映射
tensor_name = name
tensor_name = re.sub(r"bert/encoder/layer_(\d+)/self_attention/query/kernel:0",
r"bert/encoder/layer_\1/attention/self/query/kernel", tensor_name)
tensor_name = re.sub(r"bert/encoder/layer_(\d+)/self_attention/query/bias:0",
r"bert/encoder/layer_\1/attention/self/query/bias", tensor_name)
tensor_name = re.sub(r"bert/encoder/layer_(\d+)/self_attention/key/kernel:0",
r"bert/encoder/layer_\1/attention/self/key/kernel", tensor_name)
tensor_name = re.sub(r"bert/encoder/layer_(\d+)/self_attention/key/bias:0",
r"bert/encoder/layer_\1/attention/self/key/bias", tensor_name)
tensor_name = re.sub(r"bert/encoder/layer_(\d+)/self_attention/value/kernel:0",
r"bert/encoder/layer_\1/attention/self/value/kernel", tensor_name)
tensor_name = re.sub(r"bert/encoder/layer_(\d+)/self_attention/value/bias:0",
r"bert/encoder/layer_\1/attention/self/value/bias", tensor_name)
tensor_name = re.sub(r"bert/encoder/layer_(\d+)/self_attention/self_attention_output/kernel:0",
r"bert/encoder/layer_\1/attention/output/dense/kernel", tensor_name)
tensor_name = re.sub(r"bert/encoder/layer_(\d+)/self_attention/self_attention_output/bias:0",
r"bert/encoder/layer_\1/attention/output/dense/bias", tensor_name)
tensor_name = re.sub(r"bert/encoder/layer_(\d+)/self_attention_layer_norm/gamma:0",
r"bert/encoder/layer_\1/attention/output/LayerNorm/gamma", tensor_name)
tensor_name = re.sub(r"bert/encoder/layer_(\d+)/self_attention_layer_norm/beta:0",
r"bert/encoder/layer_\1/attention/output/LayerNorm/beta", tensor_name)
tensor_name = re.sub(r"bert/encoder/layer_(\d+)/intermediate/kernel:0",
r"bert/encoder/layer_\1/intermediate/dense/kernel", tensor_name)
tensor_name = re.sub(r"bert/encoder/layer_(\d+)/intermediate/bias:0",
r"bert/encoder/layer_\1/intermediate/dense/bias", tensor_name)
tensor_name = re.sub(r"bert/encoder/layer_(\d+)/output/kernel:0",
r"bert/encoder/layer_\1/output/dense/kernel", tensor_name)
tensor_name = re.sub(r"bert/encoder/layer_(\d+)/output/bias:0",
r"bert/encoder/layer_\1/output/dense/bias", tensor_name)
tensor_name = re.sub(r"bert/encoder/layer_(\d+)/output_layer_norm/gamma:0",
r"bert/encoder/layer_\1/output/LayerNorm/gamma", tensor_name)
tensor_name = re.sub(r"bert/encoder/layer_(\d+)/output_layer_norm/beta:0",
r"bert/encoder/layer_\1/output/LayerNorm/beta", tensor_name)
tensor_name = re.sub(r"bert/embedding_processor/embedding_pos/embeddings:0",
r"bert/embeddings/position_embeddings", tensor_name)
tensor_name = re.sub(r"bert/embedding_processor/embedding_word_ids/embeddings:0",
r"bert/embeddings/word_embeddings", tensor_name)
tensor_name = re.sub(r"bert/embedding_processor/embedding_type_ids/embeddings:0",
r"bert/embeddings/token_type_embeddings", tensor_name)
tensor_name = re.sub(r"bert/embedding_processor/layer_norm/gamma:0",
r"bert/embeddings/LayerNorm/gamma", tensor_name)
tensor_name = re.sub(r"bert/embedding_processor/layer_norm/beta:0",
r"bert/embeddings/LayerNorm/beta", tensor_name)
tensor_name = re.sub(r"bert/pooler_transform/kernel:0",
r"bert/pooler/dense/kernel", tensor_name)
tensor_name = re.sub(r"bert/pooler_transform/bias:0",
r"bert/pooler/dense/bias", tensor_name)
# 预测部分
tensor_name = re.sub(r"mask_label_loss/output_bias:0",
r"cls/predictions/output_bias", tensor_name)
tensor_name = re.sub(r"mask_label_loss/dense/kernel:0",
r"cls/predictions/transform/dense/kernel", tensor_name)
tensor_name = re.sub(r"mask_label_loss/dense/bias:0",
r"cls/predictions/transform/dense/bias", tensor_name)
tensor_name = re.sub(r"mask_label_loss/layer_norm/gamma:0",
r"cls/predictions/transform/LayerNorm/gamma", tensor_name)
tensor_name = re.sub(r"mask_label_loss/layer_norm/beta:0",
r"cls/predictions/transform/LayerNorm/beta", tensor_name)
tensor_name = re.sub(r"next_sentence_loss/dense_1/kernel:0",
r"cls/seq_relationship/output_weights", tensor_name)
tensor_name = re.sub(r"next_sentence_loss/dense_1/bias:0",
r"cls/seq_relationship/output_bias", tensor_name)
return tensor_name
def name_map_tf1(name):
map_name = re_map_tf1(name)
return map_name
def conver_model_tf1(model, tf1_ckpt_path, new_ckpt_save_path):
"""Converts a V1 checkpoint of Google into an V2 checkpoint."""
ckpt_tf1 = tf.train.load_checkpoint(tf1_ckpt_path)
for trainable_weight in model.trainable_weights:
name = trainable_weight.name
map_name = name_map_tf1(name)
map_tensor = ckpt_tf1.get_tensor(map_name)
if name == "next_sentence_loss/dense_1/kernel:0":
map_tensor = map_tensor.T
trainable_weight.assign(map_tensor)
print(f"{map_name, map_tensor.shape} >>>> {name, trainable_weight.shape} 转换成功")
model.save_weights(os.path.join(new_ckpt_save_path, "bert_model.ckpt"))
def main(_):
assert tf.version.VERSION.startswith('2.')
config = modeling.BertConfig.from_json_file(FLAGS.bert_config_file)
# 转换bert模型部分 不包括预测部分的权重
model = models.get_base_model(config=config,
max_seq_length=FLAGS.max_seq_length)
# 转换bert模型部分 包括预测部分的权重
# model= models.getPretrainingModel(config=config,
# max_seq_length=FLAGS.max_seq_length,
# max_predictions_per_seq=FLAGS.max_predictions_per_seq)
conver_model_tf1(model, FLAGS.TF1_checkpoint_path, FLAGS.new_checkpoint_output_path)
print("TF1模型转换完成")
if __name__ == '__main__':
flags.mark_flag_as_required("bert_config_file")
flags.mark_flag_as_required("TF1_checkpoint_path")
flags.mark_flag_as_required("new_checkpoint_output_path")
app.run(main) | 0.562898 | 0.140808 |
import os
import hashlib
from twisted.internet.defer import DeferredList
from twisted.trial import unittest
import yaml
from awspider.servers import DataServer
from awspider.aws import AmazonS3
class DataServerStartTestCase(unittest.TestCase):
def setUp(self):
config_path = os.path.abspath(os.path.join(
os.path.dirname(__file__), "config.yaml"))
if not os.path.isfile(config_path):
self.raiseConfigException(config_path)
config = yaml.load(open(config_path, 'r').read())
if not "aws_access_key_id" in config or "aws_secret_access_key" not in config:
self.raiseConfigException(config_path)
self.uuid = hashlib.sha256("%s%s%s" % (
config["aws_access_key_id"],
config["aws_secret_access_key"],
self.__class__.__name__)).hexdigest()
self.aws_access_key_id = config["aws_access_key_id"]
self.aws_secret_access_key = config["aws_secret_access_key"]
self.aws_s3_storage_bucket = "%s_storage" % self.uuid
self.aws_sdb_reservation_domain = "%s_reservation" % self.uuid
self.dataserver = DataServer(
aws_access_key_id = self.aws_access_key_id,
aws_secret_access_key = self.aws_secret_access_key,
aws_s3_storage_bucket = self.aws_s3_storage_bucket,
aws_sdb_reservation_domain = self.aws_sdb_reservation_domain,
port = 5001
)
def tearDown(self):
deferreds = []
deferreds.append(self.dataserver.clearStorage())
d = DeferredList(deferreds)
d.addCallback(self._tearDownCallback)
return d
def _tearDownCallback(self, data):
s3 = AmazonS3(self.aws_access_key_id, self.aws_secret_access_key)
deferreds = []
deferreds.append(s3.deleteBucket(self.aws_s3_storage_bucket))
d = DeferredList(deferreds)
return d
def testStart(self):
d = self.dataserver.start()
d.addCallback(self._startCallback)
return d
def _startCallback(self, data):
d = self.dataserver.shutdown()
return d
class DataServerTestCase(unittest.TestCase):
def setUp(self):
config_path = os.path.abspath(os.path.join(
os.path.dirname(__file__), "config.yaml"))
if not os.path.isfile(config_path):
self.raiseConfigException(config_path)
config = yaml.load(open(config_path, 'r').read())
if not "aws_access_key_id" in config or "aws_secret_access_key" not in config:
self.raiseConfigException(config_path)
self.uuid = hashlib.sha256("%s%s%s" % (
config["aws_access_key_id"],
config["aws_secret_access_key"],
self.__class__.__name__)).hexdigest()
self.aws_access_key_id = config["aws_access_key_id"]
self.aws_secret_access_key = config["aws_secret_access_key"]
self.aws_s3_storage_bucket = "%s_storage" % self.uuid
self.aws_sdb_reservation_domain = "%s_reservation" % self.uuid
self.dataserver = DataServer(
aws_access_key_id = self.aws_access_key_id,
aws_secret_access_key = self.aws_secret_access_key,
aws_s3_storage_bucket = self.aws_s3_storage_bucket,
aws_sdb_reservation_domain = self.aws_sdb_reservation_domain,
port = 5001
)
return self.dataserver.start()
def tearDown(self):
deferreds = []
deferreds.append(self.dataserver.shutdown())
d = DeferredList(deferreds)
d.addCallback(self._tearDownCallback)
return d
def _tearDownCallback(self, data):
deferreds = []
deferreds.append(self.dataserver.clearStorage())
d = DeferredList(deferreds)
d.addCallback(self._tearDownCallback2)
return d
def _tearDownCallback2(self, data):
s3 = AmazonS3(self.aws_access_key_id, self.aws_secret_access_key)
deferreds = []
deferreds.append(s3.deleteBucket(self.aws_s3_storage_bucket))
d = DeferredList(deferreds)
return d
def test_01_clearStorage(self):
d = self.dataserver.clearStorage()
return d | tests/dataservertest.py | import os
import hashlib
from twisted.internet.defer import DeferredList
from twisted.trial import unittest
import yaml
from awspider.servers import DataServer
from awspider.aws import AmazonS3
class DataServerStartTestCase(unittest.TestCase):
def setUp(self):
config_path = os.path.abspath(os.path.join(
os.path.dirname(__file__), "config.yaml"))
if not os.path.isfile(config_path):
self.raiseConfigException(config_path)
config = yaml.load(open(config_path, 'r').read())
if not "aws_access_key_id" in config or "aws_secret_access_key" not in config:
self.raiseConfigException(config_path)
self.uuid = hashlib.sha256("%s%s%s" % (
config["aws_access_key_id"],
config["aws_secret_access_key"],
self.__class__.__name__)).hexdigest()
self.aws_access_key_id = config["aws_access_key_id"]
self.aws_secret_access_key = config["aws_secret_access_key"]
self.aws_s3_storage_bucket = "%s_storage" % self.uuid
self.aws_sdb_reservation_domain = "%s_reservation" % self.uuid
self.dataserver = DataServer(
aws_access_key_id = self.aws_access_key_id,
aws_secret_access_key = self.aws_secret_access_key,
aws_s3_storage_bucket = self.aws_s3_storage_bucket,
aws_sdb_reservation_domain = self.aws_sdb_reservation_domain,
port = 5001
)
def tearDown(self):
deferreds = []
deferreds.append(self.dataserver.clearStorage())
d = DeferredList(deferreds)
d.addCallback(self._tearDownCallback)
return d
def _tearDownCallback(self, data):
s3 = AmazonS3(self.aws_access_key_id, self.aws_secret_access_key)
deferreds = []
deferreds.append(s3.deleteBucket(self.aws_s3_storage_bucket))
d = DeferredList(deferreds)
return d
def testStart(self):
d = self.dataserver.start()
d.addCallback(self._startCallback)
return d
def _startCallback(self, data):
d = self.dataserver.shutdown()
return d
class DataServerTestCase(unittest.TestCase):
def setUp(self):
config_path = os.path.abspath(os.path.join(
os.path.dirname(__file__), "config.yaml"))
if not os.path.isfile(config_path):
self.raiseConfigException(config_path)
config = yaml.load(open(config_path, 'r').read())
if not "aws_access_key_id" in config or "aws_secret_access_key" not in config:
self.raiseConfigException(config_path)
self.uuid = hashlib.sha256("%s%s%s" % (
config["aws_access_key_id"],
config["aws_secret_access_key"],
self.__class__.__name__)).hexdigest()
self.aws_access_key_id = config["aws_access_key_id"]
self.aws_secret_access_key = config["aws_secret_access_key"]
self.aws_s3_storage_bucket = "%s_storage" % self.uuid
self.aws_sdb_reservation_domain = "%s_reservation" % self.uuid
self.dataserver = DataServer(
aws_access_key_id = self.aws_access_key_id,
aws_secret_access_key = self.aws_secret_access_key,
aws_s3_storage_bucket = self.aws_s3_storage_bucket,
aws_sdb_reservation_domain = self.aws_sdb_reservation_domain,
port = 5001
)
return self.dataserver.start()
def tearDown(self):
deferreds = []
deferreds.append(self.dataserver.shutdown())
d = DeferredList(deferreds)
d.addCallback(self._tearDownCallback)
return d
def _tearDownCallback(self, data):
deferreds = []
deferreds.append(self.dataserver.clearStorage())
d = DeferredList(deferreds)
d.addCallback(self._tearDownCallback2)
return d
def _tearDownCallback2(self, data):
s3 = AmazonS3(self.aws_access_key_id, self.aws_secret_access_key)
deferreds = []
deferreds.append(s3.deleteBucket(self.aws_s3_storage_bucket))
d = DeferredList(deferreds)
return d
def test_01_clearStorage(self):
d = self.dataserver.clearStorage()
return d | 0.288068 | 0.05694 |
import argparse
import contextlib
import os
import time
import unittest
import unittest.mock
import sfs.cli as cli
import sfs.core as core
import sfs.events as events
import sfs.exceptions as exceptions
import sfs.file_system as fs
import sfs.helper as sfs_helper
import sfs.log_utils as log
import tests.helper as test_helper
import sfs.ops.ops_collection as ops_collection
import sfs.ops.ops_dedup as ops_dedup
import sfs.ops.ops_main as ops_main
import sfs.ops.ops_merge as ops_merge
import sfs.ops.ops_query as ops_query
# Settings
# Register sub command parsers
events.invoke_subscribers(events.events['CLI_REGISTRY'], cli.command_subparsers, parents=[])
# Disable logging
log.logger.disabled = True
# Helpers
def cli_exec(cmd, ignore_errors=False):
"""Mocks CLI output logger and returns the collected output"""
with unittest.mock.patch('sfs.log_utils.cli_output') as cli_output:
with cli.cli_manager(cmd, exit_on_error=False, raise_error=not ignore_errors) as args:
events.invoke_subscribers(events.command_key(args.command), args)
return cli_output.call_args_list
@contextlib.contextmanager
def change_cwd(path):
"""Provides a context with a specified working directory"""
old = os.getcwd()
os.chdir(path)
yield
os.chdir(old)
def prepare_args(*args, **kwargs):
"""Arranges all positional and keyword arguments to match the structure of unittest.mock._CallList"""
return args, kwargs
def prepare_validation_error(message):
"""Constructs a validation error message"""
return "{} {}".format(cli.error_messages['VALIDATION'], message)
def prepare_internal_error_error(message):
"""Constructs an internal error message"""
return "{} {}".format(cli.error_messages['INTERNAL'], message)
class CLIManagerTests(unittest.TestCase):
def test_cli_manager(self):
test_cmd = [ops_main.commands['SFS_INIT']]
with unittest.mock.patch('sfs.log_utils.cli_output') as cli_output:
with cli.cli_manager(test_cmd, exit_on_error=False) as args:
pass
self.assertEqual(argparse.Namespace(command=ops_main.commands['SFS_INIT'], verbose=False), args)
self.assertIsNone(cli_output.call_args)
def test_cli_manager_validation_error(self):
test_cmd = [ops_main.commands['SFS_INIT']]
exception_message = 'test message'
with unittest.mock.patch('sfs.log_utils.cli_output') as cli_output:
with cli.cli_manager(test_cmd, exit_on_error=False) as args:
raise exceptions.CLIValidationException(exception_message)
self.assertEqual(prepare_args(prepare_validation_error(exception_message)), cli_output.call_args)
def test_cli_manager_internal_error(self):
test_cmd = [ops_main.commands['SFS_INIT']]
exception_message = 'test message'
with unittest.mock.patch('sfs.log_utils.cli_output') as cli_output:
with cli.cli_manager(test_cmd, exit_on_error=False) as args:
raise exceptions.SFSException(exception_message)
self.assertEqual(prepare_args(prepare_internal_error_error(exception_message)), cli_output.call_args)
def test_cli_manager_unknown_error(self):
test_cmd = [ops_main.commands['SFS_INIT']]
exception_message = 'test message'
with unittest.mock.patch('sfs.log_utils.cli_output') as cli_output:
with cli.cli_manager(test_cmd, exit_on_error=False) as args:
raise Exception(exception_message)
self.assertEqual(prepare_args(cli.error_messages['UNKNOWN']), cli_output.call_args)
class MainOpsCLITests(test_helper.TestCaseWithFS):
def test_init(self):
with unittest.mock.patch('sfs.core.SFS.init_sfs') as init_sfs:
path = self.TESTS_BASE
with change_cwd(path):
# Initializes SFS in an empty directory
output = cli_exec([ops_main.commands['SFS_INIT']])
self.assertEqual([], output)
self.assertEqual(1, len(init_sfs.call_args_list))
self.assertEqual(prepare_args(path), init_sfs.call_args)
# Add a file to the target directory
os.mkdir(os.path.join(path, 'test'))
# Cannot initialize SFS in a non-empty directory
output = cli_exec([ops_main.commands['SFS_INIT']], ignore_errors=True)
self.assertEqual([
prepare_args(prepare_validation_error(ops_main.messages['INIT']['ERROR']['NON_EMPTY_DIR']))
], output)
self.assertEqual(1, len(init_sfs.call_args_list))
def test_init_inside_sfs(self):
# Initialize an SFS
path = self.TESTS_BASE
core.SFS.init_sfs(path)
# Cannot initialize a nested SFS
with unittest.mock.patch('sfs.core.SFS.init_sfs') as init_sfs:
path = os.path.join(path, 'test')
os.mkdir(path)
with change_cwd(path):
output = cli_exec([ops_main.commands['SFS_INIT']], ignore_errors=True)
self.assertEqual([
prepare_args(prepare_validation_error(ops_main.messages['INIT']['ERROR']['NESTED_SFS']))
], output)
self.assertEqual(0, len(init_sfs.call_args_list))
self.assertIsNone(init_sfs.call_args)
def test_is_sfs(self):
# Initialize an SFS
sfs_root = os.path.join(self.TESTS_BASE, 'sfs_root')
os.mkdir(sfs_root)
core.SFS.init_sfs(sfs_root)
sfs = core.SFS.get_by_path(sfs_root)
with unittest.mock.patch('sfs.core.SFS.get_by_path') as get_by_path:
get_by_path.return_value = sfs
for path in [sfs_root, os.path.join(sfs_root, 'nested')]:
os.makedirs(path, exist_ok=True)
# Works with path argument
output = cli_exec([ops_main.commands['IS_SFS'], path])
self.assertEqual([
prepare_args("{}{}".format(ops_main.messages['IS_SFS']['OUTPUT']['YES'], sfs_root))
], output)
self.assertEqual(prepare_args(path), get_by_path.call_args)
# Uses current directory if path not specified
with change_cwd(path):
output = cli_exec([ops_main.commands['IS_SFS']])
self.assertEqual([
prepare_args("{}{}".format(ops_main.messages['IS_SFS']['OUTPUT']['YES'], sfs_root))
], output)
self.assertEqual(prepare_args(path), get_by_path.call_args)
# Called correct no of time
self.assertEqual(4, len(get_by_path.call_args_list))
# Output is negative for paths outside SFS
get_by_path.return_value = None
for path in [self.TESTS_BASE, os.path.join(self.TESTS_BASE, 'nested')]:
output = cli_exec([ops_main.commands['IS_SFS'], path])
self.assertEqual([
prepare_args(ops_main.messages['IS_SFS']['OUTPUT']['NO'])
], output)
self.assertEqual(prepare_args(path), get_by_path.call_args)
# Called correct no of time
self.assertEqual(6, len(get_by_path.call_args_list))
class CollectionOpsCLITests(test_helper.TestCaseWithFS):
def __init__(self, *args, **kwargs):
super(CollectionOpsCLITests, self).__init__(*args, **kwargs)
self.col_tree = {
'files': ['file_a', 'file_b'],
'links': ['link_a'],
'dirs': {
'dir_a': {
'files': ['file_aa']
}
}
}
self.sfs_root = os.path.join(self.TESTS_BASE, 'sfs_root')
self.col_root = os.path.join(self.TESTS_BASE, 'col')
self.col_name = 'col'
def setUp(self):
super(CollectionOpsCLITests, self).setUp()
# Create collection and SFS nodes
os.mkdir(self.sfs_root)
os.mkdir(self.col_root)
self.create_fs_tree(self.col_tree, base=self.col_root)
# Change working directory to sfs root and save old value
self.old_cwd = os.getcwd()
os.chdir(self.sfs_root)
core.SFS.init_sfs(self.sfs_root)
self.sfs = core.SFS.get_by_path(self.sfs_root)
def tearDown(self):
super(CollectionOpsCLITests, self).tearDown()
# Restore working directory
os.chdir(self.old_cwd)
def _test_not_sfs_dir(self, cmd, msg, *mocked_modules):
not_sfs_dir = self.TESTS_BASE
for mocked_module in mocked_modules:
with unittest.mock.patch(mocked_module) as mocked:
with change_cwd(not_sfs_dir):
output = cli_exec(cmd, ignore_errors=True)
self.assertEqual([
prepare_args(prepare_validation_error(msg))
], output)
self.assertEqual(0, len(mocked.call_args_list))
def test_add_collection(self):
dummy_sfs_updates = core.SfsUpdates(added=4, deleted=2, updated=3)
col_name = 'test_col'
with unittest.mock.patch('sfs.core.SFS.add_collection') as add_collection:
add_collection.return_value = dummy_sfs_updates
# Outputs success message to terminal
output = cli_exec([ops_collection.commands['ADD_COL'], self.col_root, '--name', col_name])
self.assertEqual([
prepare_args("{} {}".format(ops_collection.messages['ADD_COL']['OUTPUT'], 4))
], output)
# Receives correct arguments
self.assertEqual(1, len(add_collection.call_args_list))
self.assertEqual(prepare_args(col_name, self.col_root), add_collection.call_args)
# Collection name defaults to collection root name
cli_exec([ops_collection.commands['ADD_COL'], self.col_root])
self.assertEqual(2, len(add_collection.call_args_list))
self.assertEqual(prepare_args(self.col_name, self.col_root), add_collection.call_args)
def test_add_collection_validations(self):
# Must be inside an SFS
self._test_not_sfs_dir(
[ops_collection.commands['ADD_COL'], self.col_root],
ops_collection.messages['ADD_COL']['ERROR']['NOT_IN_SFS'],
'sfs.core.SFS.add_collection'
)
# Path should be an existing directory
not_dir = os.path.join(self.TESTS_BASE, 'not_dir')
with unittest.mock.patch('sfs.core.SFS.add_collection') as add_collection:
output = cli_exec([ops_collection.commands['ADD_COL'], not_dir], ignore_errors=True)
self.assertEqual([
prepare_args(prepare_validation_error(ops_collection.messages['ADD_COL']['ERROR']['INVALID_PATH']))
], output)
self.assertEqual(0, len(add_collection.call_args_list))
# Cannot add path within an SFS
within_sfs = os.path.join(self.sfs_root, 'nested_dir')
os.mkdir(within_sfs)
with unittest.mock.patch('sfs.core.SFS.add_collection') as add_collection:
output = cli_exec([ops_collection.commands['ADD_COL'], within_sfs], ignore_errors=True)
self.assertEqual([
prepare_args(prepare_validation_error(ops_collection.messages['ADD_COL']['ERROR']['NESTED_SFS']))
], output)
self.assertEqual(0, len(add_collection.call_args_list))
# Actually add a collection
col_name = 'test_col'
self.sfs.add_collection(col_name, self.col_root)
# Cannot add path within a collection
within_col = os.path.join(self.col_root, 'nested_dir')
os.mkdir(within_col)
with unittest.mock.patch('sfs.core.SFS.add_collection') as add_collection:
output = cli_exec([ops_collection.commands['ADD_COL'], within_col], ignore_errors=True)
self.assertEqual([
prepare_args(prepare_validation_error(ops_collection.messages['ADD_COL']['ERROR']['NESTED_COL']))
], output)
self.assertEqual(0, len(add_collection.call_args_list))
# Cannot add collection with a duplicate name
new_col = os.path.join(self.TESTS_BASE, 'col2')
os.mkdir(new_col)
with unittest.mock.patch('sfs.core.SFS.add_collection') as add_collection:
output = cli_exec([ops_collection.commands['ADD_COL'], new_col, '--name', col_name], ignore_errors=True)
self.assertEqual([
prepare_args(prepare_validation_error(ops_collection.messages['ADD_COL']['ERROR']['NAME_EXISTS']))
], output)
self.assertEqual(0, len(add_collection.call_args_list))
def test_is_collection(self):
# Must be inside an SFS
self._test_not_sfs_dir(
[ops_collection.commands['IS_COL'], self.col_root],
ops_collection.messages['IS_COL']['ERROR']['NOT_IN_SFS'],
'sfs.core.SFS.get_collection_by_path'
)
col_name = 'test_col'
sfs = core.SFS.get_by_path(self.sfs_root)
sfs.add_collection(col_name, self.col_root)
col = sfs.get_collection_by_name(col_name)
with unittest.mock.patch('sfs.core.SFS.get_collection_by_path') as get_collection_by_path:
# Outputs positively for paths within a collection
get_collection_by_path.return_value = col
for path in [self.col_root, os.path.join(self.col_root, 'nested')]:
output = cli_exec([ops_collection.commands['IS_COL'], path])
self.assertEqual([
prepare_args("{} {}".format(ops_collection.messages['IS_COL']['OUTPUT']['YES'], self.col_root))
], output)
self.assertEqual(prepare_args(path), get_collection_by_path.call_args)
# Called correct no of times
self.assertEqual(2, len(get_collection_by_path.call_args_list))
# Outputs negatively for paths outside collections
get_collection_by_path.return_value = None
for path in [self.TESTS_BASE, os.path.join(self.TESTS_BASE, 'nested')]:
output = cli_exec([ops_collection.commands['IS_COL'], path])
self.assertEqual([
prepare_args(ops_collection.messages['IS_COL']['OUTPUT']['NO'])
], output)
self.assertEqual(prepare_args(path), get_collection_by_path.call_args)
# Called correct no of times
self.assertEqual(4, len(get_collection_by_path.call_args_list))
def test_list_cols(self):
# Must be inside an SFS
self._test_not_sfs_dir(
[ops_collection.commands['LIST_COLS']],
ops_collection.messages['LIST_COLS']['ERROR']['NOT_IN_SFS'],
'sfs.core.SFS.get_all_collections'
)
# Outputs negatively when there are no collections
with unittest.mock.patch('sfs.core.SFS.get_all_collections') as get_all_collections:
get_all_collections.return_value = {}
output = cli_exec([ops_collection.commands['LIST_COLS']])
self.assertEqual([
prepare_args(ops_collection.messages['LIST_COLS']['OUTPUT']['NOT_AVAILABLE'])
], output)
self.assertEqual(prepare_args(), get_all_collections.call_args)
self.assertEqual(1, len(get_all_collections.call_args_list))
# Add 2 collections
col1_name = 'col1'
col1_root = self.col_root
col2_name = 'col2'
col2_root = os.path.join(self.TESTS_BASE, 'col2')
os.mkdir(col2_root)
sfs = core.SFS.get_by_path(self.sfs_root)
sfs.add_collection(col1_name, col1_root)
sfs.add_collection(col2_name, col2_root)
sfs_list = sfs.get_all_collections()
with unittest.mock.patch('sfs.core.SFS.get_all_collections') as get_all_collections:
get_all_collections.return_value = sfs_list
output = cli_exec([ops_collection.commands['LIST_COLS']])
self.assertEqual([
prepare_args("{}{}".format(ops_collection.messages['LIST_COLS']['OUTPUT']['COUNT'], len(sfs_list))),
prepare_args('{}"{}"\t{}"{}"'.format(
ops_collection.messages['LIST_COLS']['OUTPUT']['COL_NAME'], col1_name,
ops_collection.messages['LIST_COLS']['OUTPUT']['COL_ROOT'], col1_root
)),
prepare_args('{}"{}"\t{}"{}"'.format(
ops_collection.messages['LIST_COLS']['OUTPUT']['COL_NAME'], col2_name,
ops_collection.messages['LIST_COLS']['OUTPUT']['COL_ROOT'], col2_root
))
], output)
self.assertEqual(prepare_args(), get_all_collections.call_args)
self.assertEqual(1, len(get_all_collections.call_args_list))
def test_sync_col(self):
# Must be inside an SFS
self._test_not_sfs_dir(
[ops_collection.commands['SYNC_COL'], self.col_name],
ops_collection.messages['SYNC_COL']['ERROR']['NOT_IN_SFS'],
'sfs.core.Collection.update',
'sfs.core.SFS.del_orphans'
)
# Add a collection
sfs = core.SFS.get_by_path(self.sfs_root)
sfs.add_collection(self.col_name, self.col_root)
updates_in_sync = core.SfsUpdates(added=3, updated=5, deleted=0)
updates_in_del = core.SfsUpdates(added=0, updated=0, deleted=4)
with unittest.mock.patch('sfs.core.Collection.update') as update_collection:
with unittest.mock.patch('sfs.core.SFS.del_orphans') as del_orphans:
update_collection.return_value = updates_in_sync
del_orphans.return_value = updates_in_del
# Outputs number of links updated
output = cli_exec([ops_collection.commands['SYNC_COL'], self.col_name])
self.assertEqual([
prepare_args(
'{}{}'.format(ops_collection.messages['SYNC_COL']['OUTPUT']['ADDED'], updates_in_sync.added)
),
prepare_args(
'{}{}'.format(ops_collection.messages['SYNC_COL']['OUTPUT']['UPDATED'], updates_in_sync.updated)
),
prepare_args(
'{}{}'.format(ops_collection.messages['SYNC_COL']['OUTPUT']['DELETED'], updates_in_del.deleted)
)
], output)
self.assertEqual([prepare_args()], update_collection.call_args_list)
self.assertEqual([prepare_args(col_root=self.col_root)], del_orphans.call_args_list)
# Reports negatively for unknown collection name
output = cli_exec([ops_collection.commands['SYNC_COL'], 'unknown_col'], ignore_errors=True)
self.assertEqual([
prepare_args(prepare_validation_error(
ops_collection.messages['SYNC_COL']['ERROR']['NOT_A_COL_NAME']
))
], output)
def test_del_col(self):
# Must be inside an SFS
self._test_not_sfs_dir(
[ops_collection.commands['DEL_COL'], self.col_name],
ops_collection.messages['DEL_COL']['ERROR']['NOT_IN_SFS'],
'sfs.core.SFS.del_collection'
)
# Add a collection
sfs = core.SFS.get_by_path(self.sfs_root)
sfs.add_collection(self.col_name, self.col_root)
updates_in_del = core.SfsUpdates(added=0, updated=0, deleted=3)
with unittest.mock.patch('sfs.core.SFS.del_collection') as del_collection:
with unittest.mock.patch('sfs.core.SFS.del_orphans') as del_orphans:
del_collection.return_value = None
del_orphans.return_value = updates_in_del
# Expect a blank output
output = cli_exec([ops_collection.commands['DEL_COL'], self.col_name])
self.assertEqual([
prepare_args('{}{}'.format(
ops_collection.messages['DEL_ORPHANS']['OUTPUT'], updates_in_del.deleted
))
], output)
self.assertEqual([prepare_args(self.col_name)], del_collection.call_args_list)
# Reports negatively for unknown collection name
output = cli_exec([ops_collection.commands['DEL_COL'], 'unknown_col'], ignore_errors=True)
self.assertEqual([
prepare_args(prepare_validation_error(
ops_collection.messages['DEL_COL']['ERROR']['NOT_A_COL_NAME']
))
], output)
def test_del_orphans(self):
# Must be inside an SFS
self._test_not_sfs_dir(
[ops_collection.commands['DEL_ORPHANS']],
ops_collection.messages['DEL_ORPHANS']['ERROR']['NOT_IN_SFS'],
'sfs.core.SFS.del_orphans'
)
updates_in_del = core.SfsUpdates(added=0, updated=0, deleted=8)
# Reports no of links deleted
with unittest.mock.patch('sfs.core.SFS.del_orphans') as del_orphans:
del_orphans.return_value = updates_in_del
output = cli_exec([ops_collection.commands['DEL_ORPHANS']])
self.assertEqual([
prepare_args('{}{}'.format(ops_collection.messages['DEL_ORPHANS']['OUTPUT'], updates_in_del.deleted))
], output)
self.assertEqual([prepare_args()], del_orphans.call_args_list)
class QueryOpsCLITests(test_helper.TestCaseWithFS):
def __init__(self, *args, **kwargs):
super(QueryOpsCLITests, self).__init__(*args, **kwargs)
self.sfs_root = os.path.join(self.TESTS_BASE, 'sfs_root')
self.col_root = os.path.join(self.TESTS_BASE, 'col')
self.col_name = 'col'
self.col_path = os.path.join(self.col_root, 'file')
self.link_path = os.path.join(self.sfs_root, self.col_name, 'file')
def setUp(self):
super(QueryOpsCLITests, self).setUp()
# Create collection and SFS nodes
os.mkdir(self.sfs_root)
os.mkdir(self.col_root)
test_helper.dummy_file(self.col_path, 100)
core.SFS.init_sfs(self.sfs_root)
self.sfs = core.SFS.get_by_path(self.sfs_root)
self.sfs.add_collection(self.col_name, self.col_root)
self.col = self.sfs.get_collection_by_name(self.col_name)
def test_query_link(self):
# Reports link info
output = cli_exec([ops_query.commands['QUERY'], self.link_path])
self.assertEqual([
prepare_args("{}{}".format(ops_query.messages['QUERY']['OUTPUT']['LINK']['COL_NAME'], self.col_name)),
prepare_args("{}{}".format(ops_query.messages['QUERY']['OUTPUT']['LINK']['COL_PATH'], self.col_path)),
prepare_args("{}{}".format(
ops_query.messages['QUERY']['OUTPUT']['LINK']['CTIME'],
time.ctime(os.stat(self.col_path).st_ctime)
)),
prepare_args("{}{}".format(
ops_query.messages['QUERY']['OUTPUT']['LINK']['SIZE'],
sfs_helper.get_readable_size(100)
)),
], output)
def test_query_directory(self):
dir_path = self.sfs_root
dir_stats = ops_query.DirectoryStats()
dir_stats.size = 1
dir_stats.ctime = 2
dir_stats.active_links = 3
dir_stats.orphan_links = 4
dir_stats.foreign_links = 5
dir_stats.files = 6
dir_stats.sub_directories = 7
# Reports directory info. If path not specified current directory is used
with unittest.mock.patch('sfs.ops.ops_query.compute_directory_stats') as compute_directory_stats:
compute_directory_stats.return_value = dir_stats
with change_cwd(dir_path):
for output in [
cli_exec([ops_query.commands['QUERY'], dir_path]),
cli_exec([ops_query.commands['QUERY']]),
]:
self.assertEqual([
prepare_args("{}{}".format(
ops_query.messages['QUERY']['OUTPUT']['DIR']['SIZE'],
sfs_helper.get_readable_size(dir_stats.size)
)),
prepare_args("{}{}".format(
ops_query.messages['QUERY']['OUTPUT']['DIR']['CTIME'], time.ctime(dir_stats.ctime)
)),
prepare_args("{}{}".format(
ops_query.messages['QUERY']['OUTPUT']['DIR']['ACTIVE_LINKS'], dir_stats.active_links
)),
prepare_args("{}{}".format(
ops_query.messages['QUERY']['OUTPUT']['DIR']['FOREIGN_LINKS'], dir_stats.foreign_links
)),
prepare_args("{}{}".format(
ops_query.messages['QUERY']['OUTPUT']['DIR']['ORPHAN_LINKS'], dir_stats.orphan_links
)),
prepare_args("{}{}".format(
ops_query.messages['QUERY']['OUTPUT']['DIR']['FILES'], dir_stats.files
)),
prepare_args("{}{}".format(
ops_query.messages['QUERY']['OUTPUT']['DIR']['SUB_DIRECTORIES'], dir_stats.sub_directories
))
], output)
self.assertIsNotNone(compute_directory_stats.call_args)
self.assertEqual(2, len(compute_directory_stats.call_args[0]))
self.assertIsInstance(compute_directory_stats.call_args[0][0], core.SFS)
self.assertEqual(compute_directory_stats.call_args[0][1], dir_path)
self.assertEqual(2, len(compute_directory_stats.call_args_list))
def test_query_link_validations(self):
# Must be inside an SFS
not_sfs = self.TESTS_BASE
output = cli_exec([ops_query.commands['QUERY'], not_sfs], ignore_errors=True)
self.assertEqual([
prepare_args(prepare_validation_error(ops_query.messages['QUERY']['ERROR']['NOT_IN_SFS']))
], output)
# Path must be link or directory
file_path = os.path.join(self.sfs_root, 'test_file')
test_helper.dummy_file(file_path)
output = cli_exec([ops_query.commands['QUERY'], file_path], ignore_errors=True)
self.assertEqual([
prepare_args(prepare_validation_error(ops_query.messages['QUERY']['ERROR']['NOT_LINK_OR_DIR']))
], output)
# Link must belong to a collection
foreign_link = os.path.join(self.sfs_root, 'foreign_link')
test_helper.dummy_link(foreign_link)
output = cli_exec([ops_query.commands['QUERY'], foreign_link], ignore_errors=True)
self.assertEqual([
prepare_args(prepare_validation_error(ops_query.messages['QUERY']['ERROR']['COLLECTION_NOT_FOUND']))
], output)
# Stats must be available
stats_path = os.path.join(self.col.stats_base, 'file')
os.unlink(stats_path)
output = cli_exec([ops_query.commands['QUERY'], self.link_path], ignore_errors=True)
self.assertEqual([
prepare_args(prepare_validation_error(ops_query.messages['QUERY']['ERROR']['STATS_NOT_FOUND']))
], output)
class DedupOpsCLITests(test_helper.TestCaseWithFS):
def __init__(self, *args, **kwargs):
super(DedupOpsCLITests, self).__init__(*args, **kwargs)
self.sfs_root = os.path.join(self.TESTS_BASE, 'sfs_root')
self.col_root = os.path.join(self.TESTS_BASE, 'col')
self.col_name = 'col'
def setUp(self):
super(DedupOpsCLITests, self).setUp()
# Create collection and SFS nodes
os.mkdir(self.sfs_root)
col_files = [(os.path.join(self.col_root, rel_path), size) for rel_path, size in [
(os.path.join('dir1', 'file1'), 100),
(os.path.join('dir1', 'file2'), 200),
(os.path.join('dir1', 'file3'), 500),
(os.path.join('dir2', 'file1'), 100),
(os.path.join('dir2', 'file2'), 300),
(os.path.join('dir2', 'file4'), 500),
(os.path.join('dir3', 'file2'), 200),
]]
for col_file, size in col_files:
os.makedirs(os.path.dirname(col_file), exist_ok=True)
test_helper.dummy_file(col_file, size)
core.SFS.init_sfs(self.sfs_root)
self.sfs = core.SFS.get_by_path(self.sfs_root)
self.sfs.add_collection(self.col_name, self.col_root)
self.col = self.sfs.get_collection_by_name(self.col_name)
def test_find_dups(self):
# Must be inside an SFS
not_sfs = self.TESTS_BASE
output = cli_exec([ops_dedup.commands['FIND_DUPS'], not_sfs], ignore_errors=True)
self.assertEqual([
prepare_args(prepare_validation_error(ops_dedup.messages['FIND_DUPS']['ERROR']['NOT_IN_SFS']))
], output)
# Path must be a valid directory
not_dir = os.path.join(self.sfs_root, 'not_dir')
output = cli_exec([ops_dedup.commands['FIND_DUPS'], not_dir], ignore_errors=True)
self.assertEqual([
prepare_args(prepare_validation_error(ops_dedup.messages['FIND_DUPS']['ERROR']['INVALID_PATH']))
], output)
# Reports duplicate count and JSON path
output = cli_exec([ops_dedup.commands['FIND_DUPS'], self.sfs_root], ignore_errors=False)
self.assertEqual([
prepare_args("{}{}".format(ops_dedup.messages['FIND_DUPS']['OUTPUT']['DUPLICATE_COUNT'], 4)),
prepare_args("{}{}".format(
ops_dedup.messages['FIND_DUPS']['OUTPUT']['JSON_PATH'], ops_dedup.get_json_path(self.sfs_root)
))
], output)
# Reports that no duplicates were found
output = cli_exec([
ops_dedup.commands['FIND_DUPS'], os.path.join(self.sfs_root, self.col_name, 'dir3')
], ignore_errors=False)
self.assertEqual([
prepare_args(ops_dedup.messages['FIND_DUPS']['OUTPUT']['NO_DUPLICATES'])
], output)
# JSON must not already exist
output = cli_exec([ops_dedup.commands['FIND_DUPS'], self.sfs_root], ignore_errors=True)
self.assertEqual([
prepare_args(prepare_validation_error(ops_dedup.messages['FIND_DUPS']['ERROR']['JSON_EXISTS']))
], output)
# JSON can be overriden with the override flag
cli_exec([ops_dedup.commands['FIND_DUPS'], self.sfs_root, '--override'], ignore_errors=False)
# Delete duplicate flag marks files for deletion correctly
with unittest.mock.patch('sfs.ops.ops_dedup.find_dups') as find_dups:
find_dups.return_value = []
cli_exec([ops_dedup.commands['FIND_DUPS'], self.sfs_root, '--override'], ignore_errors=False)
self.assertEqual(find_dups.call_args[1]['keep'], 'all')
cli_exec([
ops_dedup.commands['FIND_DUPS'], self.sfs_root, '--del-duplicates', '--override'
], ignore_errors=False)
self.assertEqual(find_dups.call_args[1]['keep'], 'first')
def test_del_dups(self):
# Must be inside an SFS
not_sfs = self.TESTS_BASE
output = cli_exec([ops_dedup.commands['DEDUP'], not_sfs], ignore_errors=True)
self.assertEqual([
prepare_args(prepare_validation_error(ops_dedup.messages['DEDUP']['ERROR']['NOT_IN_SFS']))
], output)
# Path must be a valid directory
not_dir = os.path.join(self.sfs_root, 'not_dir')
output = cli_exec([ops_dedup.commands['DEDUP'], not_dir], ignore_errors=True)
self.assertEqual([
prepare_args(prepare_validation_error(ops_dedup.messages['DEDUP']['ERROR']['INVALID_PATH']))
], output)
# Dedup JSON must be present in the target directory
output = cli_exec([ops_dedup.commands['DEDUP'], self.sfs_root], ignore_errors=True)
self.assertEqual([
prepare_args(prepare_validation_error(ops_dedup.messages['DEDUP']['ERROR']['JSON_NOT_FOUND']))
], output)
# Create JSON
cli_exec([ops_dedup.commands['FIND_DUPS'], self.sfs_root], ignore_errors=False)
# Outputs number of links deleted
output = cli_exec([ops_dedup.commands['DEDUP'], self.sfs_root], ignore_errors=False)
self.assertEqual([
prepare_args("{}{}".format(ops_dedup.messages['DEDUP']['OUTPUT'], 0))
], output)
# Does not delete dedup JSON by default
json_path = ops_dedup.get_json_path(self.sfs_root)
self.assertTrue(os.path.isfile(json_path))
# Deletes dedup JSON with flag
cli_exec([ops_dedup.commands['FIND_DUPS'], self.sfs_root, '--del-duplicates', '--override'],
ignore_errors=False)
output = cli_exec([ops_dedup.commands['DEDUP'], self.sfs_root, '--del-json'], ignore_errors=False)
self.assertEqual([
prepare_args("{}{}".format(ops_dedup.messages['DEDUP']['OUTPUT'], 2))
], output)
self.assertFalse(os.path.isfile(json_path))
class TestMergeOps(test_helper.TestCaseWithFS):
def test_merge(self):
# Create SFS, target and source
sfs_root = self.TESTS_BASE
core.SFS.init_sfs(sfs_root)
self.create_fs_tree({
'dirs': {
'dir1': {},
'dir2': {},
}
}, base=sfs_root)
target = os.path.join(sfs_root, 'dir1')
source = os.path.join(sfs_root, 'dir2')
# Target and Source be in SFS
not_in_sfs = fs.expand_path(os.path.join(sfs_root, '..'))
output = cli_exec([ops_merge.commands['MERGE'], not_in_sfs, source], ignore_errors=True)
self.assertEqual([
prepare_args(prepare_validation_error(ops_merge.messages['MERGE']['ERROR']['NOT_IN_SFS']))
], output)
output = cli_exec([ops_merge.commands['MERGE'], target, not_in_sfs], ignore_errors=True)
self.assertEqual([
prepare_args(prepare_validation_error(ops_merge.messages['MERGE']['ERROR']['NOT_IN_SFS']))
], output)
# Target and source must be valid paths
not_dir = fs.expand_path(os.path.join(sfs_root, 'not_dir'))
output = cli_exec([ops_merge.commands['MERGE'], not_dir, source], ignore_errors=True)
self.assertEqual([
prepare_args(prepare_validation_error(ops_merge.messages['MERGE']['ERROR']['INVALID_PATH']))
], output)
output = cli_exec([ops_merge.commands['MERGE'], target, not_dir], ignore_errors=True)
self.assertEqual([
prepare_args(prepare_validation_error(ops_merge.messages['MERGE']['ERROR']['INVALID_PATH']))
], output)
# Target and Source cannot be nested
output = cli_exec([ops_merge.commands['MERGE'], sfs_root, source], ignore_errors=True)
self.assertEqual([
prepare_args(prepare_validation_error(ops_merge.messages['MERGE']['ERROR']['NESTED_DIR']))
], output)
output = cli_exec([ops_merge.commands['MERGE'], target, sfs_root], ignore_errors=True)
self.assertEqual([
prepare_args(prepare_validation_error(ops_merge.messages['MERGE']['ERROR']['NESTED_DIR']))
], output)
dummy_conflict = ops_merge.MergeConflict(
'test_path', ops_merge.MergeConflict.FileStats('file1'), ops_merge.MergeConflict.FileStats('file2')
)
# JSON must exist when using conflicts JSON for merge'
output = cli_exec([ops_merge.commands['MERGE'], target, source, '--json'], ignore_errors=True)
self.assertEqual([
prepare_args(prepare_validation_error(ops_merge.messages['MERGE']['ERROR']['JSON_NOT_FOUND']))
], output)
# JSON is not generated in case of no conflicts or when continue flag is set
cli_exec([ops_merge.commands['MERGE'], target, source], ignore_errors=False)
self.assertFalse(os.path.isfile(ops_merge.get_json_path(target)))
with unittest.mock.patch('sfs.ops.ops_merge.get_merge_conflicts') as get_merge_conflicts:
get_merge_conflicts.return_value = [dummy_conflict]
cli_exec([ops_merge.commands['MERGE'], target, source, '--continue'], ignore_errors=False)
self.assertFalse(os.path.isfile(ops_merge.get_json_path(target)))
# JSON is generated in case of conflicts
with unittest.mock.patch('sfs.ops.ops_merge.get_merge_conflicts') as get_merge_conflicts:
get_merge_conflicts.return_value = [dummy_conflict]
cli_exec([ops_merge.commands['MERGE'], target, source], ignore_errors=False)
self.assertTrue(os.path.isfile(ops_merge.get_json_path(target)))
# JSON must not exist when creating conflicts JSON
with unittest.mock.patch('sfs.ops.ops_merge.get_merge_conflicts') as get_merge_conflicts:
get_merge_conflicts.return_value = [dummy_conflict]
output = cli_exec([ops_merge.commands['MERGE'], target, source], ignore_errors=True)
self.assertEqual([
prepare_args(prepare_validation_error(ops_merge.messages['MERGE']['ERROR']['JSON_EXISTS']))
], output)
# Validates merge conflicts JSON
with unittest.mock.patch('sfs.ops.ops_merge.validate_merge_conflicts') as validate_merge_conflicts:
with unittest.mock.patch('sfs.ops.ops_merge.MergeConflict.from_dict') as from_dict:
path1 = os.path.join(target, 'file1')
path2 = os.path.join(source, 'file1')
validate_merge_conflicts.return_value = (path1, path2)
from_dict.return_value = dummy_conflict
output = cli_exec([ops_merge.commands['MERGE'], target, source, '--json'], ignore_errors=True)
self.assertEqual([prepare_args(prepare_validation_error(
'{}: "{}", "{}"'.format(ops_merge.messages['MERGE']['ERROR']['INVALID_CONFLICTS'], path1, path2)
))], output)
self.assertEqual(1, len(validate_merge_conflicts.call_args_list))
self.assertEqual(3, len(validate_merge_conflicts.call_args[0]))
self.assertEqual((target, source), validate_merge_conflicts.call_args[0][:-1])
# Override flag ignores existing JSON and overwrites it
with unittest.mock.patch('sfs.ops.ops_merge.get_merge_conflicts') as get_merge_conflicts:
get_merge_conflicts.return_value = [dummy_conflict] * 2
old_stats = os.stat(ops_merge.get_json_path(target))
cli_exec([ops_merge.commands['MERGE'], target, source, '--override'], ignore_errors=False)
new_stats = os.stat(ops_merge.get_json_path(target))
self.assertNotEqual(old_stats.st_size, new_stats.st_size)
# Deletes JSON on completion with flag set
cli_exec([ops_merge.commands['MERGE'], target, source, '--del-json', '--override'], ignore_errors=False)
self.assertFalse(os.path.isfile(ops_merge.get_json_path(target)))
# Deletes Source directory on completion with flag set
self.create_fs_tree({
'files': ['file_1'],
'links': ['link_1'],
'dirs': {
'dir_1': {}
}
}, source)
output = cli_exec([ops_merge.commands['MERGE'], target, source, '--del-source'], ignore_errors=False)
self.assertEqual(
prepare_args("{}{}".format(ops_merge.messages['MERGE']['OUTPUT']['SOURCE_DELETED'], 2))
, output[-1])
self.assertFalse(os.path.isdir(source))
os.mkdir(source)
# Passes valid arguments to get_merge_conflicts
with unittest.mock.patch('sfs.ops.ops_merge.get_merge_conflicts') as get_merge_conflicts:
get_merge_conflicts.return_value = [dummy_conflict] * 3
output = cli_exec([ops_merge.commands['MERGE'], target, source], ignore_errors=False)
self.assertEqual(1, len(get_merge_conflicts.call_args_list))
self.assertEqual((target, source), get_merge_conflicts.call_args[0][1:])
self.assertEqual([
prepare_args("{}{}".format(ops_merge.messages['MERGE']['OUTPUT']['CONFLICT_COUNT'], 3)),
prepare_args("{}{}".format(
ops_merge.messages['MERGE']['OUTPUT']['JSON_PATH'], ops_merge.get_json_path(target)
))
], output)
# Passes correct value of keep
for keep in ops_merge.constants['MERGE_MODES'].values():
cli_exec([ops_merge.commands['MERGE'], target, source, '--override', '--on-conflict', keep])
self.assertEqual(keep, get_merge_conflicts.call_args[1]['keep'])
# Passes valid arguments to merge
with unittest.mock.patch('sfs.ops.ops_merge.merge') as merge:
merge.return_value = {
'DIRS_CREATED': 1,
'DIRS_DELETED': 2,
'FILES_MERGED': 3,
'LINKS_MERGED': 4,
'NODES_DELETED': 5,
'NODES_RENAMED': 6,
}
output = cli_exec([ops_merge.commands['MERGE'], target, source], ignore_errors=False)
self.assertEqual(1, len(merge.call_args_list))
self.assertEqual(3, len(merge.call_args[0]))
self.assertEqual((target, source), merge.call_args[0][:-1])
self.assertEqual([
prepare_args("{}{}".format(ops_merge.messages['MERGE']['OUTPUT']['CONFLICT_COUNT'], 0)),
prepare_args("{}{}".format(ops_merge.messages['MERGE']['OUTPUT']['DIRS_CREATED'], 1)),
prepare_args("{}{}".format(ops_merge.messages['MERGE']['OUTPUT']['DIRS_DELETED'], 2)),
prepare_args("{}{}".format(ops_merge.messages['MERGE']['OUTPUT']['FILES_MERGED'], 3)),
prepare_args("{}{}".format(ops_merge.messages['MERGE']['OUTPUT']['LINKS_MERGED'], 4)),
prepare_args("{}{}".format(ops_merge.messages['MERGE']['OUTPUT']['NODES_DELETED'], 5)),
prepare_args("{}{}".format(ops_merge.messages['MERGE']['OUTPUT']['NODES_RENAMED'], 6))
], output) | tests/tests_cli.py | import argparse
import contextlib
import os
import time
import unittest
import unittest.mock
import sfs.cli as cli
import sfs.core as core
import sfs.events as events
import sfs.exceptions as exceptions
import sfs.file_system as fs
import sfs.helper as sfs_helper
import sfs.log_utils as log
import tests.helper as test_helper
import sfs.ops.ops_collection as ops_collection
import sfs.ops.ops_dedup as ops_dedup
import sfs.ops.ops_main as ops_main
import sfs.ops.ops_merge as ops_merge
import sfs.ops.ops_query as ops_query
# Settings
# Register sub command parsers
events.invoke_subscribers(events.events['CLI_REGISTRY'], cli.command_subparsers, parents=[])
# Disable logging
log.logger.disabled = True
# Helpers
def cli_exec(cmd, ignore_errors=False):
"""Mocks CLI output logger and returns the collected output"""
with unittest.mock.patch('sfs.log_utils.cli_output') as cli_output:
with cli.cli_manager(cmd, exit_on_error=False, raise_error=not ignore_errors) as args:
events.invoke_subscribers(events.command_key(args.command), args)
return cli_output.call_args_list
@contextlib.contextmanager
def change_cwd(path):
"""Provides a context with a specified working directory"""
old = os.getcwd()
os.chdir(path)
yield
os.chdir(old)
def prepare_args(*args, **kwargs):
"""Arranges all positional and keyword arguments to match the structure of unittest.mock._CallList"""
return args, kwargs
def prepare_validation_error(message):
"""Constructs a validation error message"""
return "{} {}".format(cli.error_messages['VALIDATION'], message)
def prepare_internal_error_error(message):
"""Constructs an internal error message"""
return "{} {}".format(cli.error_messages['INTERNAL'], message)
class CLIManagerTests(unittest.TestCase):
def test_cli_manager(self):
test_cmd = [ops_main.commands['SFS_INIT']]
with unittest.mock.patch('sfs.log_utils.cli_output') as cli_output:
with cli.cli_manager(test_cmd, exit_on_error=False) as args:
pass
self.assertEqual(argparse.Namespace(command=ops_main.commands['SFS_INIT'], verbose=False), args)
self.assertIsNone(cli_output.call_args)
def test_cli_manager_validation_error(self):
test_cmd = [ops_main.commands['SFS_INIT']]
exception_message = 'test message'
with unittest.mock.patch('sfs.log_utils.cli_output') as cli_output:
with cli.cli_manager(test_cmd, exit_on_error=False) as args:
raise exceptions.CLIValidationException(exception_message)
self.assertEqual(prepare_args(prepare_validation_error(exception_message)), cli_output.call_args)
def test_cli_manager_internal_error(self):
test_cmd = [ops_main.commands['SFS_INIT']]
exception_message = 'test message'
with unittest.mock.patch('sfs.log_utils.cli_output') as cli_output:
with cli.cli_manager(test_cmd, exit_on_error=False) as args:
raise exceptions.SFSException(exception_message)
self.assertEqual(prepare_args(prepare_internal_error_error(exception_message)), cli_output.call_args)
def test_cli_manager_unknown_error(self):
test_cmd = [ops_main.commands['SFS_INIT']]
exception_message = 'test message'
with unittest.mock.patch('sfs.log_utils.cli_output') as cli_output:
with cli.cli_manager(test_cmd, exit_on_error=False) as args:
raise Exception(exception_message)
self.assertEqual(prepare_args(cli.error_messages['UNKNOWN']), cli_output.call_args)
class MainOpsCLITests(test_helper.TestCaseWithFS):
def test_init(self):
with unittest.mock.patch('sfs.core.SFS.init_sfs') as init_sfs:
path = self.TESTS_BASE
with change_cwd(path):
# Initializes SFS in an empty directory
output = cli_exec([ops_main.commands['SFS_INIT']])
self.assertEqual([], output)
self.assertEqual(1, len(init_sfs.call_args_list))
self.assertEqual(prepare_args(path), init_sfs.call_args)
# Add a file to the target directory
os.mkdir(os.path.join(path, 'test'))
# Cannot initialize SFS in a non-empty directory
output = cli_exec([ops_main.commands['SFS_INIT']], ignore_errors=True)
self.assertEqual([
prepare_args(prepare_validation_error(ops_main.messages['INIT']['ERROR']['NON_EMPTY_DIR']))
], output)
self.assertEqual(1, len(init_sfs.call_args_list))
def test_init_inside_sfs(self):
# Initialize an SFS
path = self.TESTS_BASE
core.SFS.init_sfs(path)
# Cannot initialize a nested SFS
with unittest.mock.patch('sfs.core.SFS.init_sfs') as init_sfs:
path = os.path.join(path, 'test')
os.mkdir(path)
with change_cwd(path):
output = cli_exec([ops_main.commands['SFS_INIT']], ignore_errors=True)
self.assertEqual([
prepare_args(prepare_validation_error(ops_main.messages['INIT']['ERROR']['NESTED_SFS']))
], output)
self.assertEqual(0, len(init_sfs.call_args_list))
self.assertIsNone(init_sfs.call_args)
def test_is_sfs(self):
# Initialize an SFS
sfs_root = os.path.join(self.TESTS_BASE, 'sfs_root')
os.mkdir(sfs_root)
core.SFS.init_sfs(sfs_root)
sfs = core.SFS.get_by_path(sfs_root)
with unittest.mock.patch('sfs.core.SFS.get_by_path') as get_by_path:
get_by_path.return_value = sfs
for path in [sfs_root, os.path.join(sfs_root, 'nested')]:
os.makedirs(path, exist_ok=True)
# Works with path argument
output = cli_exec([ops_main.commands['IS_SFS'], path])
self.assertEqual([
prepare_args("{}{}".format(ops_main.messages['IS_SFS']['OUTPUT']['YES'], sfs_root))
], output)
self.assertEqual(prepare_args(path), get_by_path.call_args)
# Uses current directory if path not specified
with change_cwd(path):
output = cli_exec([ops_main.commands['IS_SFS']])
self.assertEqual([
prepare_args("{}{}".format(ops_main.messages['IS_SFS']['OUTPUT']['YES'], sfs_root))
], output)
self.assertEqual(prepare_args(path), get_by_path.call_args)
# Called correct no of time
self.assertEqual(4, len(get_by_path.call_args_list))
# Output is negative for paths outside SFS
get_by_path.return_value = None
for path in [self.TESTS_BASE, os.path.join(self.TESTS_BASE, 'nested')]:
output = cli_exec([ops_main.commands['IS_SFS'], path])
self.assertEqual([
prepare_args(ops_main.messages['IS_SFS']['OUTPUT']['NO'])
], output)
self.assertEqual(prepare_args(path), get_by_path.call_args)
# Called correct no of time
self.assertEqual(6, len(get_by_path.call_args_list))
class CollectionOpsCLITests(test_helper.TestCaseWithFS):
def __init__(self, *args, **kwargs):
super(CollectionOpsCLITests, self).__init__(*args, **kwargs)
self.col_tree = {
'files': ['file_a', 'file_b'],
'links': ['link_a'],
'dirs': {
'dir_a': {
'files': ['file_aa']
}
}
}
self.sfs_root = os.path.join(self.TESTS_BASE, 'sfs_root')
self.col_root = os.path.join(self.TESTS_BASE, 'col')
self.col_name = 'col'
def setUp(self):
super(CollectionOpsCLITests, self).setUp()
# Create collection and SFS nodes
os.mkdir(self.sfs_root)
os.mkdir(self.col_root)
self.create_fs_tree(self.col_tree, base=self.col_root)
# Change working directory to sfs root and save old value
self.old_cwd = os.getcwd()
os.chdir(self.sfs_root)
core.SFS.init_sfs(self.sfs_root)
self.sfs = core.SFS.get_by_path(self.sfs_root)
def tearDown(self):
super(CollectionOpsCLITests, self).tearDown()
# Restore working directory
os.chdir(self.old_cwd)
def _test_not_sfs_dir(self, cmd, msg, *mocked_modules):
not_sfs_dir = self.TESTS_BASE
for mocked_module in mocked_modules:
with unittest.mock.patch(mocked_module) as mocked:
with change_cwd(not_sfs_dir):
output = cli_exec(cmd, ignore_errors=True)
self.assertEqual([
prepare_args(prepare_validation_error(msg))
], output)
self.assertEqual(0, len(mocked.call_args_list))
def test_add_collection(self):
dummy_sfs_updates = core.SfsUpdates(added=4, deleted=2, updated=3)
col_name = 'test_col'
with unittest.mock.patch('sfs.core.SFS.add_collection') as add_collection:
add_collection.return_value = dummy_sfs_updates
# Outputs success message to terminal
output = cli_exec([ops_collection.commands['ADD_COL'], self.col_root, '--name', col_name])
self.assertEqual([
prepare_args("{} {}".format(ops_collection.messages['ADD_COL']['OUTPUT'], 4))
], output)
# Receives correct arguments
self.assertEqual(1, len(add_collection.call_args_list))
self.assertEqual(prepare_args(col_name, self.col_root), add_collection.call_args)
# Collection name defaults to collection root name
cli_exec([ops_collection.commands['ADD_COL'], self.col_root])
self.assertEqual(2, len(add_collection.call_args_list))
self.assertEqual(prepare_args(self.col_name, self.col_root), add_collection.call_args)
def test_add_collection_validations(self):
# Must be inside an SFS
self._test_not_sfs_dir(
[ops_collection.commands['ADD_COL'], self.col_root],
ops_collection.messages['ADD_COL']['ERROR']['NOT_IN_SFS'],
'sfs.core.SFS.add_collection'
)
# Path should be an existing directory
not_dir = os.path.join(self.TESTS_BASE, 'not_dir')
with unittest.mock.patch('sfs.core.SFS.add_collection') as add_collection:
output = cli_exec([ops_collection.commands['ADD_COL'], not_dir], ignore_errors=True)
self.assertEqual([
prepare_args(prepare_validation_error(ops_collection.messages['ADD_COL']['ERROR']['INVALID_PATH']))
], output)
self.assertEqual(0, len(add_collection.call_args_list))
# Cannot add path within an SFS
within_sfs = os.path.join(self.sfs_root, 'nested_dir')
os.mkdir(within_sfs)
with unittest.mock.patch('sfs.core.SFS.add_collection') as add_collection:
output = cli_exec([ops_collection.commands['ADD_COL'], within_sfs], ignore_errors=True)
self.assertEqual([
prepare_args(prepare_validation_error(ops_collection.messages['ADD_COL']['ERROR']['NESTED_SFS']))
], output)
self.assertEqual(0, len(add_collection.call_args_list))
# Actually add a collection
col_name = 'test_col'
self.sfs.add_collection(col_name, self.col_root)
# Cannot add path within a collection
within_col = os.path.join(self.col_root, 'nested_dir')
os.mkdir(within_col)
with unittest.mock.patch('sfs.core.SFS.add_collection') as add_collection:
output = cli_exec([ops_collection.commands['ADD_COL'], within_col], ignore_errors=True)
self.assertEqual([
prepare_args(prepare_validation_error(ops_collection.messages['ADD_COL']['ERROR']['NESTED_COL']))
], output)
self.assertEqual(0, len(add_collection.call_args_list))
# Cannot add collection with a duplicate name
new_col = os.path.join(self.TESTS_BASE, 'col2')
os.mkdir(new_col)
with unittest.mock.patch('sfs.core.SFS.add_collection') as add_collection:
output = cli_exec([ops_collection.commands['ADD_COL'], new_col, '--name', col_name], ignore_errors=True)
self.assertEqual([
prepare_args(prepare_validation_error(ops_collection.messages['ADD_COL']['ERROR']['NAME_EXISTS']))
], output)
self.assertEqual(0, len(add_collection.call_args_list))
def test_is_collection(self):
# Must be inside an SFS
self._test_not_sfs_dir(
[ops_collection.commands['IS_COL'], self.col_root],
ops_collection.messages['IS_COL']['ERROR']['NOT_IN_SFS'],
'sfs.core.SFS.get_collection_by_path'
)
col_name = 'test_col'
sfs = core.SFS.get_by_path(self.sfs_root)
sfs.add_collection(col_name, self.col_root)
col = sfs.get_collection_by_name(col_name)
with unittest.mock.patch('sfs.core.SFS.get_collection_by_path') as get_collection_by_path:
# Outputs positively for paths within a collection
get_collection_by_path.return_value = col
for path in [self.col_root, os.path.join(self.col_root, 'nested')]:
output = cli_exec([ops_collection.commands['IS_COL'], path])
self.assertEqual([
prepare_args("{} {}".format(ops_collection.messages['IS_COL']['OUTPUT']['YES'], self.col_root))
], output)
self.assertEqual(prepare_args(path), get_collection_by_path.call_args)
# Called correct no of times
self.assertEqual(2, len(get_collection_by_path.call_args_list))
# Outputs negatively for paths outside collections
get_collection_by_path.return_value = None
for path in [self.TESTS_BASE, os.path.join(self.TESTS_BASE, 'nested')]:
output = cli_exec([ops_collection.commands['IS_COL'], path])
self.assertEqual([
prepare_args(ops_collection.messages['IS_COL']['OUTPUT']['NO'])
], output)
self.assertEqual(prepare_args(path), get_collection_by_path.call_args)
# Called correct no of times
self.assertEqual(4, len(get_collection_by_path.call_args_list))
def test_list_cols(self):
# Must be inside an SFS
self._test_not_sfs_dir(
[ops_collection.commands['LIST_COLS']],
ops_collection.messages['LIST_COLS']['ERROR']['NOT_IN_SFS'],
'sfs.core.SFS.get_all_collections'
)
# Outputs negatively when there are no collections
with unittest.mock.patch('sfs.core.SFS.get_all_collections') as get_all_collections:
get_all_collections.return_value = {}
output = cli_exec([ops_collection.commands['LIST_COLS']])
self.assertEqual([
prepare_args(ops_collection.messages['LIST_COLS']['OUTPUT']['NOT_AVAILABLE'])
], output)
self.assertEqual(prepare_args(), get_all_collections.call_args)
self.assertEqual(1, len(get_all_collections.call_args_list))
# Add 2 collections
col1_name = 'col1'
col1_root = self.col_root
col2_name = 'col2'
col2_root = os.path.join(self.TESTS_BASE, 'col2')
os.mkdir(col2_root)
sfs = core.SFS.get_by_path(self.sfs_root)
sfs.add_collection(col1_name, col1_root)
sfs.add_collection(col2_name, col2_root)
sfs_list = sfs.get_all_collections()
with unittest.mock.patch('sfs.core.SFS.get_all_collections') as get_all_collections:
get_all_collections.return_value = sfs_list
output = cli_exec([ops_collection.commands['LIST_COLS']])
self.assertEqual([
prepare_args("{}{}".format(ops_collection.messages['LIST_COLS']['OUTPUT']['COUNT'], len(sfs_list))),
prepare_args('{}"{}"\t{}"{}"'.format(
ops_collection.messages['LIST_COLS']['OUTPUT']['COL_NAME'], col1_name,
ops_collection.messages['LIST_COLS']['OUTPUT']['COL_ROOT'], col1_root
)),
prepare_args('{}"{}"\t{}"{}"'.format(
ops_collection.messages['LIST_COLS']['OUTPUT']['COL_NAME'], col2_name,
ops_collection.messages['LIST_COLS']['OUTPUT']['COL_ROOT'], col2_root
))
], output)
self.assertEqual(prepare_args(), get_all_collections.call_args)
self.assertEqual(1, len(get_all_collections.call_args_list))
def test_sync_col(self):
# Must be inside an SFS
self._test_not_sfs_dir(
[ops_collection.commands['SYNC_COL'], self.col_name],
ops_collection.messages['SYNC_COL']['ERROR']['NOT_IN_SFS'],
'sfs.core.Collection.update',
'sfs.core.SFS.del_orphans'
)
# Add a collection
sfs = core.SFS.get_by_path(self.sfs_root)
sfs.add_collection(self.col_name, self.col_root)
updates_in_sync = core.SfsUpdates(added=3, updated=5, deleted=0)
updates_in_del = core.SfsUpdates(added=0, updated=0, deleted=4)
with unittest.mock.patch('sfs.core.Collection.update') as update_collection:
with unittest.mock.patch('sfs.core.SFS.del_orphans') as del_orphans:
update_collection.return_value = updates_in_sync
del_orphans.return_value = updates_in_del
# Outputs number of links updated
output = cli_exec([ops_collection.commands['SYNC_COL'], self.col_name])
self.assertEqual([
prepare_args(
'{}{}'.format(ops_collection.messages['SYNC_COL']['OUTPUT']['ADDED'], updates_in_sync.added)
),
prepare_args(
'{}{}'.format(ops_collection.messages['SYNC_COL']['OUTPUT']['UPDATED'], updates_in_sync.updated)
),
prepare_args(
'{}{}'.format(ops_collection.messages['SYNC_COL']['OUTPUT']['DELETED'], updates_in_del.deleted)
)
], output)
self.assertEqual([prepare_args()], update_collection.call_args_list)
self.assertEqual([prepare_args(col_root=self.col_root)], del_orphans.call_args_list)
# Reports negatively for unknown collection name
output = cli_exec([ops_collection.commands['SYNC_COL'], 'unknown_col'], ignore_errors=True)
self.assertEqual([
prepare_args(prepare_validation_error(
ops_collection.messages['SYNC_COL']['ERROR']['NOT_A_COL_NAME']
))
], output)
def test_del_col(self):
# Must be inside an SFS
self._test_not_sfs_dir(
[ops_collection.commands['DEL_COL'], self.col_name],
ops_collection.messages['DEL_COL']['ERROR']['NOT_IN_SFS'],
'sfs.core.SFS.del_collection'
)
# Add a collection
sfs = core.SFS.get_by_path(self.sfs_root)
sfs.add_collection(self.col_name, self.col_root)
updates_in_del = core.SfsUpdates(added=0, updated=0, deleted=3)
with unittest.mock.patch('sfs.core.SFS.del_collection') as del_collection:
with unittest.mock.patch('sfs.core.SFS.del_orphans') as del_orphans:
del_collection.return_value = None
del_orphans.return_value = updates_in_del
# Expect a blank output
output = cli_exec([ops_collection.commands['DEL_COL'], self.col_name])
self.assertEqual([
prepare_args('{}{}'.format(
ops_collection.messages['DEL_ORPHANS']['OUTPUT'], updates_in_del.deleted
))
], output)
self.assertEqual([prepare_args(self.col_name)], del_collection.call_args_list)
# Reports negatively for unknown collection name
output = cli_exec([ops_collection.commands['DEL_COL'], 'unknown_col'], ignore_errors=True)
self.assertEqual([
prepare_args(prepare_validation_error(
ops_collection.messages['DEL_COL']['ERROR']['NOT_A_COL_NAME']
))
], output)
def test_del_orphans(self):
# Must be inside an SFS
self._test_not_sfs_dir(
[ops_collection.commands['DEL_ORPHANS']],
ops_collection.messages['DEL_ORPHANS']['ERROR']['NOT_IN_SFS'],
'sfs.core.SFS.del_orphans'
)
updates_in_del = core.SfsUpdates(added=0, updated=0, deleted=8)
# Reports no of links deleted
with unittest.mock.patch('sfs.core.SFS.del_orphans') as del_orphans:
del_orphans.return_value = updates_in_del
output = cli_exec([ops_collection.commands['DEL_ORPHANS']])
self.assertEqual([
prepare_args('{}{}'.format(ops_collection.messages['DEL_ORPHANS']['OUTPUT'], updates_in_del.deleted))
], output)
self.assertEqual([prepare_args()], del_orphans.call_args_list)
class QueryOpsCLITests(test_helper.TestCaseWithFS):
def __init__(self, *args, **kwargs):
super(QueryOpsCLITests, self).__init__(*args, **kwargs)
self.sfs_root = os.path.join(self.TESTS_BASE, 'sfs_root')
self.col_root = os.path.join(self.TESTS_BASE, 'col')
self.col_name = 'col'
self.col_path = os.path.join(self.col_root, 'file')
self.link_path = os.path.join(self.sfs_root, self.col_name, 'file')
def setUp(self):
super(QueryOpsCLITests, self).setUp()
# Create collection and SFS nodes
os.mkdir(self.sfs_root)
os.mkdir(self.col_root)
test_helper.dummy_file(self.col_path, 100)
core.SFS.init_sfs(self.sfs_root)
self.sfs = core.SFS.get_by_path(self.sfs_root)
self.sfs.add_collection(self.col_name, self.col_root)
self.col = self.sfs.get_collection_by_name(self.col_name)
def test_query_link(self):
# Reports link info
output = cli_exec([ops_query.commands['QUERY'], self.link_path])
self.assertEqual([
prepare_args("{}{}".format(ops_query.messages['QUERY']['OUTPUT']['LINK']['COL_NAME'], self.col_name)),
prepare_args("{}{}".format(ops_query.messages['QUERY']['OUTPUT']['LINK']['COL_PATH'], self.col_path)),
prepare_args("{}{}".format(
ops_query.messages['QUERY']['OUTPUT']['LINK']['CTIME'],
time.ctime(os.stat(self.col_path).st_ctime)
)),
prepare_args("{}{}".format(
ops_query.messages['QUERY']['OUTPUT']['LINK']['SIZE'],
sfs_helper.get_readable_size(100)
)),
], output)
def test_query_directory(self):
dir_path = self.sfs_root
dir_stats = ops_query.DirectoryStats()
dir_stats.size = 1
dir_stats.ctime = 2
dir_stats.active_links = 3
dir_stats.orphan_links = 4
dir_stats.foreign_links = 5
dir_stats.files = 6
dir_stats.sub_directories = 7
# Reports directory info. If path not specified current directory is used
with unittest.mock.patch('sfs.ops.ops_query.compute_directory_stats') as compute_directory_stats:
compute_directory_stats.return_value = dir_stats
with change_cwd(dir_path):
for output in [
cli_exec([ops_query.commands['QUERY'], dir_path]),
cli_exec([ops_query.commands['QUERY']]),
]:
self.assertEqual([
prepare_args("{}{}".format(
ops_query.messages['QUERY']['OUTPUT']['DIR']['SIZE'],
sfs_helper.get_readable_size(dir_stats.size)
)),
prepare_args("{}{}".format(
ops_query.messages['QUERY']['OUTPUT']['DIR']['CTIME'], time.ctime(dir_stats.ctime)
)),
prepare_args("{}{}".format(
ops_query.messages['QUERY']['OUTPUT']['DIR']['ACTIVE_LINKS'], dir_stats.active_links
)),
prepare_args("{}{}".format(
ops_query.messages['QUERY']['OUTPUT']['DIR']['FOREIGN_LINKS'], dir_stats.foreign_links
)),
prepare_args("{}{}".format(
ops_query.messages['QUERY']['OUTPUT']['DIR']['ORPHAN_LINKS'], dir_stats.orphan_links
)),
prepare_args("{}{}".format(
ops_query.messages['QUERY']['OUTPUT']['DIR']['FILES'], dir_stats.files
)),
prepare_args("{}{}".format(
ops_query.messages['QUERY']['OUTPUT']['DIR']['SUB_DIRECTORIES'], dir_stats.sub_directories
))
], output)
self.assertIsNotNone(compute_directory_stats.call_args)
self.assertEqual(2, len(compute_directory_stats.call_args[0]))
self.assertIsInstance(compute_directory_stats.call_args[0][0], core.SFS)
self.assertEqual(compute_directory_stats.call_args[0][1], dir_path)
self.assertEqual(2, len(compute_directory_stats.call_args_list))
def test_query_link_validations(self):
# Must be inside an SFS
not_sfs = self.TESTS_BASE
output = cli_exec([ops_query.commands['QUERY'], not_sfs], ignore_errors=True)
self.assertEqual([
prepare_args(prepare_validation_error(ops_query.messages['QUERY']['ERROR']['NOT_IN_SFS']))
], output)
# Path must be link or directory
file_path = os.path.join(self.sfs_root, 'test_file')
test_helper.dummy_file(file_path)
output = cli_exec([ops_query.commands['QUERY'], file_path], ignore_errors=True)
self.assertEqual([
prepare_args(prepare_validation_error(ops_query.messages['QUERY']['ERROR']['NOT_LINK_OR_DIR']))
], output)
# Link must belong to a collection
foreign_link = os.path.join(self.sfs_root, 'foreign_link')
test_helper.dummy_link(foreign_link)
output = cli_exec([ops_query.commands['QUERY'], foreign_link], ignore_errors=True)
self.assertEqual([
prepare_args(prepare_validation_error(ops_query.messages['QUERY']['ERROR']['COLLECTION_NOT_FOUND']))
], output)
# Stats must be available
stats_path = os.path.join(self.col.stats_base, 'file')
os.unlink(stats_path)
output = cli_exec([ops_query.commands['QUERY'], self.link_path], ignore_errors=True)
self.assertEqual([
prepare_args(prepare_validation_error(ops_query.messages['QUERY']['ERROR']['STATS_NOT_FOUND']))
], output)
class DedupOpsCLITests(test_helper.TestCaseWithFS):
def __init__(self, *args, **kwargs):
super(DedupOpsCLITests, self).__init__(*args, **kwargs)
self.sfs_root = os.path.join(self.TESTS_BASE, 'sfs_root')
self.col_root = os.path.join(self.TESTS_BASE, 'col')
self.col_name = 'col'
def setUp(self):
super(DedupOpsCLITests, self).setUp()
# Create collection and SFS nodes
os.mkdir(self.sfs_root)
col_files = [(os.path.join(self.col_root, rel_path), size) for rel_path, size in [
(os.path.join('dir1', 'file1'), 100),
(os.path.join('dir1', 'file2'), 200),
(os.path.join('dir1', 'file3'), 500),
(os.path.join('dir2', 'file1'), 100),
(os.path.join('dir2', 'file2'), 300),
(os.path.join('dir2', 'file4'), 500),
(os.path.join('dir3', 'file2'), 200),
]]
for col_file, size in col_files:
os.makedirs(os.path.dirname(col_file), exist_ok=True)
test_helper.dummy_file(col_file, size)
core.SFS.init_sfs(self.sfs_root)
self.sfs = core.SFS.get_by_path(self.sfs_root)
self.sfs.add_collection(self.col_name, self.col_root)
self.col = self.sfs.get_collection_by_name(self.col_name)
def test_find_dups(self):
# Must be inside an SFS
not_sfs = self.TESTS_BASE
output = cli_exec([ops_dedup.commands['FIND_DUPS'], not_sfs], ignore_errors=True)
self.assertEqual([
prepare_args(prepare_validation_error(ops_dedup.messages['FIND_DUPS']['ERROR']['NOT_IN_SFS']))
], output)
# Path must be a valid directory
not_dir = os.path.join(self.sfs_root, 'not_dir')
output = cli_exec([ops_dedup.commands['FIND_DUPS'], not_dir], ignore_errors=True)
self.assertEqual([
prepare_args(prepare_validation_error(ops_dedup.messages['FIND_DUPS']['ERROR']['INVALID_PATH']))
], output)
# Reports duplicate count and JSON path
output = cli_exec([ops_dedup.commands['FIND_DUPS'], self.sfs_root], ignore_errors=False)
self.assertEqual([
prepare_args("{}{}".format(ops_dedup.messages['FIND_DUPS']['OUTPUT']['DUPLICATE_COUNT'], 4)),
prepare_args("{}{}".format(
ops_dedup.messages['FIND_DUPS']['OUTPUT']['JSON_PATH'], ops_dedup.get_json_path(self.sfs_root)
))
], output)
# Reports that no duplicates were found
output = cli_exec([
ops_dedup.commands['FIND_DUPS'], os.path.join(self.sfs_root, self.col_name, 'dir3')
], ignore_errors=False)
self.assertEqual([
prepare_args(ops_dedup.messages['FIND_DUPS']['OUTPUT']['NO_DUPLICATES'])
], output)
# JSON must not already exist
output = cli_exec([ops_dedup.commands['FIND_DUPS'], self.sfs_root], ignore_errors=True)
self.assertEqual([
prepare_args(prepare_validation_error(ops_dedup.messages['FIND_DUPS']['ERROR']['JSON_EXISTS']))
], output)
# JSON can be overriden with the override flag
cli_exec([ops_dedup.commands['FIND_DUPS'], self.sfs_root, '--override'], ignore_errors=False)
# Delete duplicate flag marks files for deletion correctly
with unittest.mock.patch('sfs.ops.ops_dedup.find_dups') as find_dups:
find_dups.return_value = []
cli_exec([ops_dedup.commands['FIND_DUPS'], self.sfs_root, '--override'], ignore_errors=False)
self.assertEqual(find_dups.call_args[1]['keep'], 'all')
cli_exec([
ops_dedup.commands['FIND_DUPS'], self.sfs_root, '--del-duplicates', '--override'
], ignore_errors=False)
self.assertEqual(find_dups.call_args[1]['keep'], 'first')
def test_del_dups(self):
# Must be inside an SFS
not_sfs = self.TESTS_BASE
output = cli_exec([ops_dedup.commands['DEDUP'], not_sfs], ignore_errors=True)
self.assertEqual([
prepare_args(prepare_validation_error(ops_dedup.messages['DEDUP']['ERROR']['NOT_IN_SFS']))
], output)
# Path must be a valid directory
not_dir = os.path.join(self.sfs_root, 'not_dir')
output = cli_exec([ops_dedup.commands['DEDUP'], not_dir], ignore_errors=True)
self.assertEqual([
prepare_args(prepare_validation_error(ops_dedup.messages['DEDUP']['ERROR']['INVALID_PATH']))
], output)
# Dedup JSON must be present in the target directory
output = cli_exec([ops_dedup.commands['DEDUP'], self.sfs_root], ignore_errors=True)
self.assertEqual([
prepare_args(prepare_validation_error(ops_dedup.messages['DEDUP']['ERROR']['JSON_NOT_FOUND']))
], output)
# Create JSON
cli_exec([ops_dedup.commands['FIND_DUPS'], self.sfs_root], ignore_errors=False)
# Outputs number of links deleted
output = cli_exec([ops_dedup.commands['DEDUP'], self.sfs_root], ignore_errors=False)
self.assertEqual([
prepare_args("{}{}".format(ops_dedup.messages['DEDUP']['OUTPUT'], 0))
], output)
# Does not delete dedup JSON by default
json_path = ops_dedup.get_json_path(self.sfs_root)
self.assertTrue(os.path.isfile(json_path))
# Deletes dedup JSON with flag
cli_exec([ops_dedup.commands['FIND_DUPS'], self.sfs_root, '--del-duplicates', '--override'],
ignore_errors=False)
output = cli_exec([ops_dedup.commands['DEDUP'], self.sfs_root, '--del-json'], ignore_errors=False)
self.assertEqual([
prepare_args("{}{}".format(ops_dedup.messages['DEDUP']['OUTPUT'], 2))
], output)
self.assertFalse(os.path.isfile(json_path))
class TestMergeOps(test_helper.TestCaseWithFS):
def test_merge(self):
# Create SFS, target and source
sfs_root = self.TESTS_BASE
core.SFS.init_sfs(sfs_root)
self.create_fs_tree({
'dirs': {
'dir1': {},
'dir2': {},
}
}, base=sfs_root)
target = os.path.join(sfs_root, 'dir1')
source = os.path.join(sfs_root, 'dir2')
# Target and Source be in SFS
not_in_sfs = fs.expand_path(os.path.join(sfs_root, '..'))
output = cli_exec([ops_merge.commands['MERGE'], not_in_sfs, source], ignore_errors=True)
self.assertEqual([
prepare_args(prepare_validation_error(ops_merge.messages['MERGE']['ERROR']['NOT_IN_SFS']))
], output)
output = cli_exec([ops_merge.commands['MERGE'], target, not_in_sfs], ignore_errors=True)
self.assertEqual([
prepare_args(prepare_validation_error(ops_merge.messages['MERGE']['ERROR']['NOT_IN_SFS']))
], output)
# Target and source must be valid paths
not_dir = fs.expand_path(os.path.join(sfs_root, 'not_dir'))
output = cli_exec([ops_merge.commands['MERGE'], not_dir, source], ignore_errors=True)
self.assertEqual([
prepare_args(prepare_validation_error(ops_merge.messages['MERGE']['ERROR']['INVALID_PATH']))
], output)
output = cli_exec([ops_merge.commands['MERGE'], target, not_dir], ignore_errors=True)
self.assertEqual([
prepare_args(prepare_validation_error(ops_merge.messages['MERGE']['ERROR']['INVALID_PATH']))
], output)
# Target and Source cannot be nested
output = cli_exec([ops_merge.commands['MERGE'], sfs_root, source], ignore_errors=True)
self.assertEqual([
prepare_args(prepare_validation_error(ops_merge.messages['MERGE']['ERROR']['NESTED_DIR']))
], output)
output = cli_exec([ops_merge.commands['MERGE'], target, sfs_root], ignore_errors=True)
self.assertEqual([
prepare_args(prepare_validation_error(ops_merge.messages['MERGE']['ERROR']['NESTED_DIR']))
], output)
dummy_conflict = ops_merge.MergeConflict(
'test_path', ops_merge.MergeConflict.FileStats('file1'), ops_merge.MergeConflict.FileStats('file2')
)
# JSON must exist when using conflicts JSON for merge'
output = cli_exec([ops_merge.commands['MERGE'], target, source, '--json'], ignore_errors=True)
self.assertEqual([
prepare_args(prepare_validation_error(ops_merge.messages['MERGE']['ERROR']['JSON_NOT_FOUND']))
], output)
# JSON is not generated in case of no conflicts or when continue flag is set
cli_exec([ops_merge.commands['MERGE'], target, source], ignore_errors=False)
self.assertFalse(os.path.isfile(ops_merge.get_json_path(target)))
with unittest.mock.patch('sfs.ops.ops_merge.get_merge_conflicts') as get_merge_conflicts:
get_merge_conflicts.return_value = [dummy_conflict]
cli_exec([ops_merge.commands['MERGE'], target, source, '--continue'], ignore_errors=False)
self.assertFalse(os.path.isfile(ops_merge.get_json_path(target)))
# JSON is generated in case of conflicts
with unittest.mock.patch('sfs.ops.ops_merge.get_merge_conflicts') as get_merge_conflicts:
get_merge_conflicts.return_value = [dummy_conflict]
cli_exec([ops_merge.commands['MERGE'], target, source], ignore_errors=False)
self.assertTrue(os.path.isfile(ops_merge.get_json_path(target)))
# JSON must not exist when creating conflicts JSON
with unittest.mock.patch('sfs.ops.ops_merge.get_merge_conflicts') as get_merge_conflicts:
get_merge_conflicts.return_value = [dummy_conflict]
output = cli_exec([ops_merge.commands['MERGE'], target, source], ignore_errors=True)
self.assertEqual([
prepare_args(prepare_validation_error(ops_merge.messages['MERGE']['ERROR']['JSON_EXISTS']))
], output)
# Validates merge conflicts JSON
with unittest.mock.patch('sfs.ops.ops_merge.validate_merge_conflicts') as validate_merge_conflicts:
with unittest.mock.patch('sfs.ops.ops_merge.MergeConflict.from_dict') as from_dict:
path1 = os.path.join(target, 'file1')
path2 = os.path.join(source, 'file1')
validate_merge_conflicts.return_value = (path1, path2)
from_dict.return_value = dummy_conflict
output = cli_exec([ops_merge.commands['MERGE'], target, source, '--json'], ignore_errors=True)
self.assertEqual([prepare_args(prepare_validation_error(
'{}: "{}", "{}"'.format(ops_merge.messages['MERGE']['ERROR']['INVALID_CONFLICTS'], path1, path2)
))], output)
self.assertEqual(1, len(validate_merge_conflicts.call_args_list))
self.assertEqual(3, len(validate_merge_conflicts.call_args[0]))
self.assertEqual((target, source), validate_merge_conflicts.call_args[0][:-1])
# Override flag ignores existing JSON and overwrites it
with unittest.mock.patch('sfs.ops.ops_merge.get_merge_conflicts') as get_merge_conflicts:
get_merge_conflicts.return_value = [dummy_conflict] * 2
old_stats = os.stat(ops_merge.get_json_path(target))
cli_exec([ops_merge.commands['MERGE'], target, source, '--override'], ignore_errors=False)
new_stats = os.stat(ops_merge.get_json_path(target))
self.assertNotEqual(old_stats.st_size, new_stats.st_size)
# Deletes JSON on completion with flag set
cli_exec([ops_merge.commands['MERGE'], target, source, '--del-json', '--override'], ignore_errors=False)
self.assertFalse(os.path.isfile(ops_merge.get_json_path(target)))
# Deletes Source directory on completion with flag set
self.create_fs_tree({
'files': ['file_1'],
'links': ['link_1'],
'dirs': {
'dir_1': {}
}
}, source)
output = cli_exec([ops_merge.commands['MERGE'], target, source, '--del-source'], ignore_errors=False)
self.assertEqual(
prepare_args("{}{}".format(ops_merge.messages['MERGE']['OUTPUT']['SOURCE_DELETED'], 2))
, output[-1])
self.assertFalse(os.path.isdir(source))
os.mkdir(source)
# Passes valid arguments to get_merge_conflicts
with unittest.mock.patch('sfs.ops.ops_merge.get_merge_conflicts') as get_merge_conflicts:
get_merge_conflicts.return_value = [dummy_conflict] * 3
output = cli_exec([ops_merge.commands['MERGE'], target, source], ignore_errors=False)
self.assertEqual(1, len(get_merge_conflicts.call_args_list))
self.assertEqual((target, source), get_merge_conflicts.call_args[0][1:])
self.assertEqual([
prepare_args("{}{}".format(ops_merge.messages['MERGE']['OUTPUT']['CONFLICT_COUNT'], 3)),
prepare_args("{}{}".format(
ops_merge.messages['MERGE']['OUTPUT']['JSON_PATH'], ops_merge.get_json_path(target)
))
], output)
# Passes correct value of keep
for keep in ops_merge.constants['MERGE_MODES'].values():
cli_exec([ops_merge.commands['MERGE'], target, source, '--override', '--on-conflict', keep])
self.assertEqual(keep, get_merge_conflicts.call_args[1]['keep'])
# Passes valid arguments to merge
with unittest.mock.patch('sfs.ops.ops_merge.merge') as merge:
merge.return_value = {
'DIRS_CREATED': 1,
'DIRS_DELETED': 2,
'FILES_MERGED': 3,
'LINKS_MERGED': 4,
'NODES_DELETED': 5,
'NODES_RENAMED': 6,
}
output = cli_exec([ops_merge.commands['MERGE'], target, source], ignore_errors=False)
self.assertEqual(1, len(merge.call_args_list))
self.assertEqual(3, len(merge.call_args[0]))
self.assertEqual((target, source), merge.call_args[0][:-1])
self.assertEqual([
prepare_args("{}{}".format(ops_merge.messages['MERGE']['OUTPUT']['CONFLICT_COUNT'], 0)),
prepare_args("{}{}".format(ops_merge.messages['MERGE']['OUTPUT']['DIRS_CREATED'], 1)),
prepare_args("{}{}".format(ops_merge.messages['MERGE']['OUTPUT']['DIRS_DELETED'], 2)),
prepare_args("{}{}".format(ops_merge.messages['MERGE']['OUTPUT']['FILES_MERGED'], 3)),
prepare_args("{}{}".format(ops_merge.messages['MERGE']['OUTPUT']['LINKS_MERGED'], 4)),
prepare_args("{}{}".format(ops_merge.messages['MERGE']['OUTPUT']['NODES_DELETED'], 5)),
prepare_args("{}{}".format(ops_merge.messages['MERGE']['OUTPUT']['NODES_RENAMED'], 6))
], output) | 0.434701 | 0.17245 |
import time
class QState:
"""
POD class to hold value and measurement basis of a quantum state
"""
def __init__(self, value, basis):
self.value = value
self.basis = basis
def __eq__(self, other):
return self.value == other.value and self.basis == other.basis
def __repr__(self):
return 'Qubit(value={}, basis={})'.format(self.value, self.basis)
class QChannel:
"""
Object that handles quantum communication via the a quantum device interface.
"""
def __init__(self, connection, qubit_factory, receiver):
self._connection = connection
self._qubit_factory = qubit_factory
self._receiver = receiver
self.bases_mapping = [lambda q: None, lambda q: q.H(print_info=False)]
def send_qubits(self, qstates):
"""
Takes a list of QStates and prepares qubits dependent on value and basis specified in the QStates. It then
sends them via the quantum connection to the specified receiver
:param qstates: List of QStates
"""
for qs in qstates:
q = self._qubit_factory(self._connection)
if qs.value == 1:
q.X()
self.bases_mapping[qs.basis](q)
self._connection.sendQubit(q, self._receiver)
def send_epr(self, bases):
"""
Takes a list of bases and prepare an EPR-pair. One qubit is sent to the specified receiver and the other is
measured specified by the basis in the provided list.
:param bases: Integer list representing measurement bases
:return: List of QStates containing the measurement outcome as value and the basis used for measurement
"""
def from_created_epr_pair(idx):
# The recipient needs some time to catch up otherwise the sender runs out of available qubits
if idx % 50:
time.sleep(0.1)
return self._connection.createEPR(self._receiver, print_info=False)
return self._measure_qubits_in_bases(from_created_epr_pair, bases)
def _measure_qubits_in_bases(self, take_qubit, bases):
qstates = []
for i, b in enumerate(bases):
q = take_qubit(i)
self.bases_mapping[b](q)
qstates.append(QState(q.measure(print_info=False), b))
return qstates
def receive_qubits_in(self, bases):
"""
Takes a list of bases and measures the retrieved qubits in those bases. Returns a list of QStates containing
measurement outcomes and the used bases.
:param bases: Integer list representing bases
:return: QState list containing the measurement outcome as value and the used basis
"""
def from_received_qubit(idx):
return self._connection.recvQubit()
return self._measure_qubits_in_bases(from_received_qubit, bases)
def receive_epr_in(self, bases):
"""
Takes a list of bases and measures the retrieved entangled qubits in those bases. Returns a list of QStates
containing measurement outcomes and the used bases.
:param bases: Integer list representing bases
:return: QState list containing the measurement outcome as value and the used basis
"""
def from_received_epr(idx):
return self._connection.recvEPR(print_info=False)
return self._measure_qubits_in_bases(from_received_epr, bases)
def close(self):
"""
Closes the quantum connection.
"""
self._connection.close()
class CAChannel:
"""
An object that handled classical authenticated communication used in quantum key distribution.
"""
def __init__(self, connection, other):
self._connection = connection
self._other = other
def send(self, data):
"""
Sends data via the classical authenticated channel.
:param data: Integer list containing binary representation of the data to be sent
"""
if not isinstance(data, list):
data = [data]
self._connection.sendValueList(self._other, data)
def send_ack(self):
"""
Sends an acknowledgment signal
"""
self._connection.sendAck(self._other)
def receive(self):
"""
Receives data sent via the classical authenticated channel as integer list.
:return: Integer list containing binary representation of the data received
"""
data = self._connection.getValueList(self._other)
return data
def receive_ack(self):
"""
Receives an acknowledgement signal.
"""
self._connection.getAck(self._other)
def clear(self):
"""
Clears the classical server.
"""
self._connection.clearServer()
def close(self):
"""
Closes the classical server.
"""
self._connection.closeChannel() | QNetwork/q_network_channels.py | import time
class QState:
"""
POD class to hold value and measurement basis of a quantum state
"""
def __init__(self, value, basis):
self.value = value
self.basis = basis
def __eq__(self, other):
return self.value == other.value and self.basis == other.basis
def __repr__(self):
return 'Qubit(value={}, basis={})'.format(self.value, self.basis)
class QChannel:
"""
Object that handles quantum communication via the a quantum device interface.
"""
def __init__(self, connection, qubit_factory, receiver):
self._connection = connection
self._qubit_factory = qubit_factory
self._receiver = receiver
self.bases_mapping = [lambda q: None, lambda q: q.H(print_info=False)]
def send_qubits(self, qstates):
"""
Takes a list of QStates and prepares qubits dependent on value and basis specified in the QStates. It then
sends them via the quantum connection to the specified receiver
:param qstates: List of QStates
"""
for qs in qstates:
q = self._qubit_factory(self._connection)
if qs.value == 1:
q.X()
self.bases_mapping[qs.basis](q)
self._connection.sendQubit(q, self._receiver)
def send_epr(self, bases):
"""
Takes a list of bases and prepare an EPR-pair. One qubit is sent to the specified receiver and the other is
measured specified by the basis in the provided list.
:param bases: Integer list representing measurement bases
:return: List of QStates containing the measurement outcome as value and the basis used for measurement
"""
def from_created_epr_pair(idx):
# The recipient needs some time to catch up otherwise the sender runs out of available qubits
if idx % 50:
time.sleep(0.1)
return self._connection.createEPR(self._receiver, print_info=False)
return self._measure_qubits_in_bases(from_created_epr_pair, bases)
def _measure_qubits_in_bases(self, take_qubit, bases):
qstates = []
for i, b in enumerate(bases):
q = take_qubit(i)
self.bases_mapping[b](q)
qstates.append(QState(q.measure(print_info=False), b))
return qstates
def receive_qubits_in(self, bases):
"""
Takes a list of bases and measures the retrieved qubits in those bases. Returns a list of QStates containing
measurement outcomes and the used bases.
:param bases: Integer list representing bases
:return: QState list containing the measurement outcome as value and the used basis
"""
def from_received_qubit(idx):
return self._connection.recvQubit()
return self._measure_qubits_in_bases(from_received_qubit, bases)
def receive_epr_in(self, bases):
"""
Takes a list of bases and measures the retrieved entangled qubits in those bases. Returns a list of QStates
containing measurement outcomes and the used bases.
:param bases: Integer list representing bases
:return: QState list containing the measurement outcome as value and the used basis
"""
def from_received_epr(idx):
return self._connection.recvEPR(print_info=False)
return self._measure_qubits_in_bases(from_received_epr, bases)
def close(self):
"""
Closes the quantum connection.
"""
self._connection.close()
class CAChannel:
"""
An object that handled classical authenticated communication used in quantum key distribution.
"""
def __init__(self, connection, other):
self._connection = connection
self._other = other
def send(self, data):
"""
Sends data via the classical authenticated channel.
:param data: Integer list containing binary representation of the data to be sent
"""
if not isinstance(data, list):
data = [data]
self._connection.sendValueList(self._other, data)
def send_ack(self):
"""
Sends an acknowledgment signal
"""
self._connection.sendAck(self._other)
def receive(self):
"""
Receives data sent via the classical authenticated channel as integer list.
:return: Integer list containing binary representation of the data received
"""
data = self._connection.getValueList(self._other)
return data
def receive_ack(self):
"""
Receives an acknowledgement signal.
"""
self._connection.getAck(self._other)
def clear(self):
"""
Clears the classical server.
"""
self._connection.clearServer()
def close(self):
"""
Closes the classical server.
"""
self._connection.closeChannel() | 0.903111 | 0.736223 |
# pylint: disable=missing-docstring
import copy
import json
import logging
import re
import time
import traceback
import google_service
import spectator_client
import stackdriver_descriptors
import httplib2
try:
from urllib2 import (
Request as urllibRequest,
urlopen as urllibUrlopen)
except ImportError:
from urllib.request import (
Request as urllibRequest,
urlopen as urllibUrlopen)
try:
from googleapiclient.errors import HttpError
STACKDRIVER_AVAILABLE = True
except ImportError:
STACKDRIVER_AVAILABLE = False
class StackdriverMetricsService(google_service.GoogleMonitoringService):
"""Helper class for interacting with Stackdriver."""
SERVICE_SCOPE = 'https://www.googleapis.com/auth/monitoring'
SERVICE_KEY = 'stackdriver'
SERVICE_NAME = 'monitoring'
SERVICE_VERSION = 'v3'
MAX_BATCH = 200
JANITOR_PERIOD = 600
@property
def stackdriver_options(self):
return self.service_options
@property
def descriptor_manager(self):
"""Return MetricDescriptorManager."""
return self.__descriptor_manager
def __init__(self, stub_factory, options):
"""Constructor.
Args:
stub_factory: [callable that creates stub for stackdriver]
This is passed as a callable to defer initialization because
we create the handlers before we process commandline args.
"""
super(StackdriverMetricsService, self).__init__(
stub_factory, options)
# The janitor prepares metric descriptors before first write.
self.__janitor_func = lambda: self.__auto_audit_metric_descriptors()
self.__next_janitor_time = time.time()
self.__good_janitor_count = 0
self.__distributions_also_have_count = self.service_options.get(
'distributions_also_have_count')
self.__fix_custom_metrics_unsafe = self.service_options.get(
'fix_custom_metrics_unsafe', False)
self.__log_400_data = self.service_options.get('log_400_data', False)
manager_options = dict(options)
manager_options['spectator'] = self.spectator_helper.options
manager = stackdriver_descriptors.MetricDescriptorManager(
self, spectator_client.ResponseProcessor(manager_options))
self.__descriptor_manager = manager
@staticmethod
def add_parser_arguments(parser):
"""Add arguments for configuring stackdriver."""
parser.add_argument('--project', default='')
parser.add_argument('--zone', default='')
parser.add_argument('--instance_id', default=0, type=int)
parser.add_argument('--credentials_path', default=None)
parser.add_argument(
'--stackdriver_generic_task_resources',
default=False,
action='store_true',
help='Use stackdriver "generic_task" monitored resources'
' rather than the container or VM.')
parser.add_argument(
'--manage_stackdriver_descriptors',
choices=['none', 'full', 'create', 'delete'],
help='Specifies how to maintain stackdriver descriptors on startup.'
'\n none: Do nothing.'
'\n create: Only create new descriptors seen in the'
' metric filter default.yml'
'\n delete: Only delete existing descriptors no longer'
' mentioned in filter default.yml'
'\n full: Both create and delete.')
def __auto_audit_metric_descriptors(self):
"""The janitor function attempts to bring Stackdriver into compliance.
If the metric descriptors are already as expected then we'll disable
the janitor for the rest of the process' lifetime. Otherwise we'll
continue to call it and try again around every JANITOR_PERIOD seconds
to give time for the system to settle down.
The reason we expect to have problems is that old replicas are still
running and recreating the descriptors we are trying to delete when
stackdriver automatically creates metrics they are attempting to write.
If this is the case, we'll keep trying to clear them out until, eventually,
the old processes are no longer around to overwrite us.
Should something re-emerge then we'll be messed up until the next restart.
Note that each replica of each service is probably trying to create all
the descriptors so there is a lot of activity here. Since the descriptors
are all the same, there should not be a problem with these replicas
conflicting or needing coordination.
Note if management is disabled then this will be in a stable state
though still inconsistent with stackdriver because there will not
be any errors or activity performed.
"""
secs_remaining = self.__next_janitor_time - time.time()
if secs_remaining > 0:
logging.debug('Janitor skipping audit for at least another %d secs',
secs_remaining)
return
logging.info('Janitor auditing metric descriptors...')
scoped_options = {'stackdriver': self.service_options}
audit_results = self.descriptor_manager.audit_descriptors(scoped_options)
stable = (audit_results.errors == 0
and audit_results.num_fixed_issues == 0)
now = time.time()
self.__next_janitor_time = now + self.JANITOR_PERIOD
if stable:
self.__good_janitor_count += 1
if self.__good_janitor_count > 1:
logging.info('Metric descriptors appear stable. Disabling janitor.')
self.__janitor_func = lambda: None
else:
logging.info('Keeping janitor around to build confidence.')
else:
self.__good_janitor_count = 0
logging.debug('Metric descriptors are not yet stable.'
' There may be some errors writing metrics.'
' Check again in %d secs.',
self.JANITOR_PERIOD)
def add_metric_to_timeseries(self, service, name, instance,
metric_metadata, service_metadata, result):
data_list = [
google_service.GoogleMeasurementData.make_from_measurement(
self, service_metadata, metric_metadata, measurement)
for measurement in instance['values']
]
if not data_list:
return
sample = data_list[0]
points = [{'interval': {'endTime': data.endTime}, 'value': data.valueData}
for data in data_list]
if sample.metricKind == 'CUMULATIVE':
for elem in points:
elem['interval']['startTime'] = sample.startTime
name, tags = self.spectator_helper.normalize_name_and_tags(
service, name, instance, metric_metadata)
metric = {
'type': self.descriptor_manager.name_to_type(name),
'labels': {tag['key']: tag['value'] for tag in tags}
}
monitored_resource = self.get_monitored_resource(service, service_metadata)
if (sample.valueType == 'DISTRIBUTION'
and self.__distributions_also_have_count):
# Add an implied metric which is just a counter.
# This is to workaround a temporary shortcoming querying the counts.
# Eventually this will be deprecated.
counter_points = copy.deepcopy(points)
for elem in counter_points:
elem['value'] = {
'int64Value': int(sample.valueData['distributionValue']['count'])
}
counter_metric = copy.deepcopy(metric)
counter_metric['type'] = self.__descriptor_manager.distribution_to_counter(
counter_metric['type'])
result.append({
'metric': counter_metric,
'resource': monitored_resource,
'metricKind': 'CUMULATIVE',
'valueType': 'INT64',
'points': counter_points})
result.append({
'metric': metric,
'resource': monitored_resource,
'metricKind': sample.metricKind,
'valueType': sample.valueType,
'points': points})
def publish_metrics(self, service_metrics):
self.__janitor_func()
time_series = []
self._update_monitored_resources(service_metrics)
spectator_client.foreach_metric_in_service_map(
service_metrics, self.add_metric_to_timeseries, time_series)
offset = 0
method = self.stub.projects().timeSeries().create
while offset < len(time_series):
last = min(offset + self.MAX_BATCH, len(time_series))
chunk = time_series[offset:last]
try:
(method(name=self.project_to_resource(self.project),
body={'timeSeries': chunk})
.execute())
except HttpError as err:
self.handle_time_series_http_error(err, chunk)
offset = last
return len(time_series)
def find_problematic_elements(self, error, batch):
try:
content = json.JSONDecoder().decode(error.content.decode('utf-8'))
message = content['error']['message']
except KeyError:
return []
if self.__log_400_data:
time_series_index_pattern = r'timeSeries\[(\d+?)\]'
log_count = 0
for match in re.finditer(time_series_index_pattern, message):
ts_index = int(match.group(1))
log_count += 1
if log_count > 3:
break
logging.info('timeSeries[%d] -> %r', ts_index,batch[ts_index])
time_series_range_pattern = r'timeSeries\[(\d+?)\-(\d+?)\]'
for match in re.finditer(time_series_range_pattern, message):
ts_start_index = int(match.group(1))
ts_end_index = int(match.group(2))
text = []
for index in range(ts_start_index, ts_end_index):
text.append('[%d] -> %r' % (index, batch[index]))
logging.info('\n%s', '\n'.join(text))
break
found = []
counter_to_gauge_pattern = (
r'timeSeries\[(\d+?)\]\.metricKind'
r' had an invalid value of \"(CUMULATIVE|GAUGE)\"'
r'.* must be (CUMULATIVE|GAUGE).')
for match in re.finditer(counter_to_gauge_pattern, message):
ts_index = int(match.group(1))
metric = batch[ts_index]['metric']
metric_type = metric['type']
found.append((self.delete_descriptor_and_retry,
metric_type, batch[ts_index]))
return found
def delete_descriptor_and_retry(self, metric_type, ts_request):
metric_name_param = '/'.join([
self.project_to_resource(self.project),
'metricDescriptors', metric_type])
api = self.stub.projects().metricDescriptors()
try:
logging.info('Deleting existing descriptor %s', metric_name_param)
response = api.delete(name=metric_name_param).execute()
logging.info('Delete response: %s', repr(response))
except HttpError as err:
logging.error('Could not delete descriptor %s', err)
if err.resp.status != 404:
return
else:
logging.info("Ignore error.")
logging.info('Retrying create timeseries %s', ts_request)
(self.stub.projects().timeSeries().create(
name=self.project_to_resource(self.project),
body={'timeSeries': ts_request})
.execute())
def handle_time_series_http_error(self, error, batch):
logging.error('Caught %s', error)
if error.resp.status == 400:
problems = self.find_problematic_elements(error, batch)
logging.info('PROBLEMS %r', problems)
if problems and not self.__fix_custom_metrics_unsafe:
logging.info(
'Fixing this problem would wipe stackdriver data.'
' Doing so was not enabled. To enable, add:\n\n'
'stackdriver:\n fix_custom_metrics_unsafe: true\n'
'to your spinnaker-monitoring-local.yml')
elif problems:
logging.info('Attempting to fix these problems. This may lose'
' stackdriver data for these metrics.')
for elem in problems:
try:
elem[0](*elem[1:])
except BaseException as bex:
traceback.print_exc()
logging.error('Failed %s(%s): %s', elem[0], elem[1:], bex)
class StackdriverServiceFactory(google_service.GoogleMonitoringServiceFactory):
SERVICE_CLASS = StackdriverMetricsService
def add_argparser(self, parser):
"""Implements server_handlers.MonitorCommandHandler interface."""
StackdriverMetricsService.add_parser_arguments(parser)
parser.add_argument('--stackdriver', default=False, action='store_true',
dest='monitor_stackdriver',
help='Publish metrics to Stackdriver.')
parser.add_argument(
'--fix_stackdriver_labels_unsafe', default=True,
action='store_true', help='DEPRECATED')
parser.add_argument(
'--nofix_stackdriver_labels_unsafe',
dest='fix_stackdriver_labels_unsafe',
action='store_false', help='DEPRECATED')
def make_service(options, factory=StackdriverServiceFactory):
return factory()(options, None) | spinnaker-monitoring-daemon/spinnaker-monitoring/stackdriver_service.py |
# pylint: disable=missing-docstring
import copy
import json
import logging
import re
import time
import traceback
import google_service
import spectator_client
import stackdriver_descriptors
import httplib2
try:
from urllib2 import (
Request as urllibRequest,
urlopen as urllibUrlopen)
except ImportError:
from urllib.request import (
Request as urllibRequest,
urlopen as urllibUrlopen)
try:
from googleapiclient.errors import HttpError
STACKDRIVER_AVAILABLE = True
except ImportError:
STACKDRIVER_AVAILABLE = False
class StackdriverMetricsService(google_service.GoogleMonitoringService):
"""Helper class for interacting with Stackdriver."""
SERVICE_SCOPE = 'https://www.googleapis.com/auth/monitoring'
SERVICE_KEY = 'stackdriver'
SERVICE_NAME = 'monitoring'
SERVICE_VERSION = 'v3'
MAX_BATCH = 200
JANITOR_PERIOD = 600
@property
def stackdriver_options(self):
return self.service_options
@property
def descriptor_manager(self):
"""Return MetricDescriptorManager."""
return self.__descriptor_manager
def __init__(self, stub_factory, options):
"""Constructor.
Args:
stub_factory: [callable that creates stub for stackdriver]
This is passed as a callable to defer initialization because
we create the handlers before we process commandline args.
"""
super(StackdriverMetricsService, self).__init__(
stub_factory, options)
# The janitor prepares metric descriptors before first write.
self.__janitor_func = lambda: self.__auto_audit_metric_descriptors()
self.__next_janitor_time = time.time()
self.__good_janitor_count = 0
self.__distributions_also_have_count = self.service_options.get(
'distributions_also_have_count')
self.__fix_custom_metrics_unsafe = self.service_options.get(
'fix_custom_metrics_unsafe', False)
self.__log_400_data = self.service_options.get('log_400_data', False)
manager_options = dict(options)
manager_options['spectator'] = self.spectator_helper.options
manager = stackdriver_descriptors.MetricDescriptorManager(
self, spectator_client.ResponseProcessor(manager_options))
self.__descriptor_manager = manager
@staticmethod
def add_parser_arguments(parser):
"""Add arguments for configuring stackdriver."""
parser.add_argument('--project', default='')
parser.add_argument('--zone', default='')
parser.add_argument('--instance_id', default=0, type=int)
parser.add_argument('--credentials_path', default=None)
parser.add_argument(
'--stackdriver_generic_task_resources',
default=False,
action='store_true',
help='Use stackdriver "generic_task" monitored resources'
' rather than the container or VM.')
parser.add_argument(
'--manage_stackdriver_descriptors',
choices=['none', 'full', 'create', 'delete'],
help='Specifies how to maintain stackdriver descriptors on startup.'
'\n none: Do nothing.'
'\n create: Only create new descriptors seen in the'
' metric filter default.yml'
'\n delete: Only delete existing descriptors no longer'
' mentioned in filter default.yml'
'\n full: Both create and delete.')
def __auto_audit_metric_descriptors(self):
"""The janitor function attempts to bring Stackdriver into compliance.
If the metric descriptors are already as expected then we'll disable
the janitor for the rest of the process' lifetime. Otherwise we'll
continue to call it and try again around every JANITOR_PERIOD seconds
to give time for the system to settle down.
The reason we expect to have problems is that old replicas are still
running and recreating the descriptors we are trying to delete when
stackdriver automatically creates metrics they are attempting to write.
If this is the case, we'll keep trying to clear them out until, eventually,
the old processes are no longer around to overwrite us.
Should something re-emerge then we'll be messed up until the next restart.
Note that each replica of each service is probably trying to create all
the descriptors so there is a lot of activity here. Since the descriptors
are all the same, there should not be a problem with these replicas
conflicting or needing coordination.
Note if management is disabled then this will be in a stable state
though still inconsistent with stackdriver because there will not
be any errors or activity performed.
"""
secs_remaining = self.__next_janitor_time - time.time()
if secs_remaining > 0:
logging.debug('Janitor skipping audit for at least another %d secs',
secs_remaining)
return
logging.info('Janitor auditing metric descriptors...')
scoped_options = {'stackdriver': self.service_options}
audit_results = self.descriptor_manager.audit_descriptors(scoped_options)
stable = (audit_results.errors == 0
and audit_results.num_fixed_issues == 0)
now = time.time()
self.__next_janitor_time = now + self.JANITOR_PERIOD
if stable:
self.__good_janitor_count += 1
if self.__good_janitor_count > 1:
logging.info('Metric descriptors appear stable. Disabling janitor.')
self.__janitor_func = lambda: None
else:
logging.info('Keeping janitor around to build confidence.')
else:
self.__good_janitor_count = 0
logging.debug('Metric descriptors are not yet stable.'
' There may be some errors writing metrics.'
' Check again in %d secs.',
self.JANITOR_PERIOD)
def add_metric_to_timeseries(self, service, name, instance,
metric_metadata, service_metadata, result):
data_list = [
google_service.GoogleMeasurementData.make_from_measurement(
self, service_metadata, metric_metadata, measurement)
for measurement in instance['values']
]
if not data_list:
return
sample = data_list[0]
points = [{'interval': {'endTime': data.endTime}, 'value': data.valueData}
for data in data_list]
if sample.metricKind == 'CUMULATIVE':
for elem in points:
elem['interval']['startTime'] = sample.startTime
name, tags = self.spectator_helper.normalize_name_and_tags(
service, name, instance, metric_metadata)
metric = {
'type': self.descriptor_manager.name_to_type(name),
'labels': {tag['key']: tag['value'] for tag in tags}
}
monitored_resource = self.get_monitored_resource(service, service_metadata)
if (sample.valueType == 'DISTRIBUTION'
and self.__distributions_also_have_count):
# Add an implied metric which is just a counter.
# This is to workaround a temporary shortcoming querying the counts.
# Eventually this will be deprecated.
counter_points = copy.deepcopy(points)
for elem in counter_points:
elem['value'] = {
'int64Value': int(sample.valueData['distributionValue']['count'])
}
counter_metric = copy.deepcopy(metric)
counter_metric['type'] = self.__descriptor_manager.distribution_to_counter(
counter_metric['type'])
result.append({
'metric': counter_metric,
'resource': monitored_resource,
'metricKind': 'CUMULATIVE',
'valueType': 'INT64',
'points': counter_points})
result.append({
'metric': metric,
'resource': monitored_resource,
'metricKind': sample.metricKind,
'valueType': sample.valueType,
'points': points})
def publish_metrics(self, service_metrics):
self.__janitor_func()
time_series = []
self._update_monitored_resources(service_metrics)
spectator_client.foreach_metric_in_service_map(
service_metrics, self.add_metric_to_timeseries, time_series)
offset = 0
method = self.stub.projects().timeSeries().create
while offset < len(time_series):
last = min(offset + self.MAX_BATCH, len(time_series))
chunk = time_series[offset:last]
try:
(method(name=self.project_to_resource(self.project),
body={'timeSeries': chunk})
.execute())
except HttpError as err:
self.handle_time_series_http_error(err, chunk)
offset = last
return len(time_series)
def find_problematic_elements(self, error, batch):
try:
content = json.JSONDecoder().decode(error.content.decode('utf-8'))
message = content['error']['message']
except KeyError:
return []
if self.__log_400_data:
time_series_index_pattern = r'timeSeries\[(\d+?)\]'
log_count = 0
for match in re.finditer(time_series_index_pattern, message):
ts_index = int(match.group(1))
log_count += 1
if log_count > 3:
break
logging.info('timeSeries[%d] -> %r', ts_index,batch[ts_index])
time_series_range_pattern = r'timeSeries\[(\d+?)\-(\d+?)\]'
for match in re.finditer(time_series_range_pattern, message):
ts_start_index = int(match.group(1))
ts_end_index = int(match.group(2))
text = []
for index in range(ts_start_index, ts_end_index):
text.append('[%d] -> %r' % (index, batch[index]))
logging.info('\n%s', '\n'.join(text))
break
found = []
counter_to_gauge_pattern = (
r'timeSeries\[(\d+?)\]\.metricKind'
r' had an invalid value of \"(CUMULATIVE|GAUGE)\"'
r'.* must be (CUMULATIVE|GAUGE).')
for match in re.finditer(counter_to_gauge_pattern, message):
ts_index = int(match.group(1))
metric = batch[ts_index]['metric']
metric_type = metric['type']
found.append((self.delete_descriptor_and_retry,
metric_type, batch[ts_index]))
return found
def delete_descriptor_and_retry(self, metric_type, ts_request):
metric_name_param = '/'.join([
self.project_to_resource(self.project),
'metricDescriptors', metric_type])
api = self.stub.projects().metricDescriptors()
try:
logging.info('Deleting existing descriptor %s', metric_name_param)
response = api.delete(name=metric_name_param).execute()
logging.info('Delete response: %s', repr(response))
except HttpError as err:
logging.error('Could not delete descriptor %s', err)
if err.resp.status != 404:
return
else:
logging.info("Ignore error.")
logging.info('Retrying create timeseries %s', ts_request)
(self.stub.projects().timeSeries().create(
name=self.project_to_resource(self.project),
body={'timeSeries': ts_request})
.execute())
def handle_time_series_http_error(self, error, batch):
logging.error('Caught %s', error)
if error.resp.status == 400:
problems = self.find_problematic_elements(error, batch)
logging.info('PROBLEMS %r', problems)
if problems and not self.__fix_custom_metrics_unsafe:
logging.info(
'Fixing this problem would wipe stackdriver data.'
' Doing so was not enabled. To enable, add:\n\n'
'stackdriver:\n fix_custom_metrics_unsafe: true\n'
'to your spinnaker-monitoring-local.yml')
elif problems:
logging.info('Attempting to fix these problems. This may lose'
' stackdriver data for these metrics.')
for elem in problems:
try:
elem[0](*elem[1:])
except BaseException as bex:
traceback.print_exc()
logging.error('Failed %s(%s): %s', elem[0], elem[1:], bex)
class StackdriverServiceFactory(google_service.GoogleMonitoringServiceFactory):
SERVICE_CLASS = StackdriverMetricsService
def add_argparser(self, parser):
"""Implements server_handlers.MonitorCommandHandler interface."""
StackdriverMetricsService.add_parser_arguments(parser)
parser.add_argument('--stackdriver', default=False, action='store_true',
dest='monitor_stackdriver',
help='Publish metrics to Stackdriver.')
parser.add_argument(
'--fix_stackdriver_labels_unsafe', default=True,
action='store_true', help='DEPRECATED')
parser.add_argument(
'--nofix_stackdriver_labels_unsafe',
dest='fix_stackdriver_labels_unsafe',
action='store_false', help='DEPRECATED')
def make_service(options, factory=StackdriverServiceFactory):
return factory()(options, None) | 0.652352 | 0.108213 |
import os,re,optparse,tarfile,shutil,random
from fileReader_condor import *
from quartet_condor import *
from arguments import *
def translate_quartets(ref_dict, quartet_file_name, outputSuffix):
quartet_file = open(quartet_file_name, 'r')
new_quartet_file = open('quartets'+outputSuffix+'.txt', 'w');
kount = 0
for line in quartet_file:
replacement_line = ""
first_word = True
add_quartet = True
for word in line.split():
if( first_word != True): replacement_line += " "
found_it = False
for k, v in ref_dict.items():
if v == word:
replacement_line += str(k)
found_it = True
if( found_it == False ): add_quartet = False
first_word = False
if( add_quartet): new_quartet_file.write(replacement_line+'\n');
kount += 1
new_quartet_file.close()
quartet_file.close()
return kount
def quart_file_is_codes(quartet_file_name):
'''
opens up file and checks to see if quartets are numeric
or alphanumeric meaning need translation from ref_dict.
'''
quartet_file = open(quartet_file_name, 'r')
for line in quartet_file:
if re.search('[^0-9\s]', line):
return False
return True
def write_translate_table(ref_dict, outputSuffix):
'''
Writes a translate table for all taxa
:Input: A reference dictionary containing number/species-name pairs
'''
translate_filename = 'translate' + outputSuffix + '.txt'
translate_file = open(translate_filename, 'w')
kount = 0
for k, v in ref_dict.items():
translate_file.write(str(k) + "\t" + v + "\n")
kount += 1
translate_file.close()
return kount
def write_gene_table(ref_dict, outputSuffix):
'''
Writes a translate table for all genes
:Input: A reference dictionary containing genes
'''
gene_filename = "genes" + outputSuffix + ".txt"
gene_file = open(gene_filename, 'w')
kount = 0
for k, v in ref_dict.items():
gene_file.write(str(k) + "\t" + v + "\n")
kount += 1
gene_file.close()
return kount
'''
Finds the list of quartets to analyze, and builds the submit.run_taxa file to analyze each individual quartet
'''
def main():
parser = getParser()
options, args = parser.parse_args()
#Prepare output
out = "RUNNING organize.py: \n"
# unzip data files if zipped
if( options.data_is_zipped == 1 ):
iCount = 0
for file in os.listdir(options.gene_file):
isTar = re.search(r".tar.gz$", file )
if( isTar != None and not re.search(r"^D[0-9]+.tar.gz", file)):
iCount = iCount + 1
tarFilename = file
if( iCount != 1 ):
print "Error: not exactly one .tar.gz file in directory: %d in %s" % (iCount, options.gene_file)
return 1
args += " -z 1"
myTarfile = tarfile.open(name=options.gene_file+"/"+tarFilename,mode='r')
fr = fileReader()
#find the list of gene files to be analyzed
files = []
if (options.list_of_files != None):
list_of_files = open(options.list_of_files, 'r')
for line in list_of_files:
files.append(line.strip())
else:
if( options.data_is_zipped == 1 ):
files = myTarfile.getnames()
else:
for (dirpath, dirnames, filenames) in os.walk(options.gene_file):
files.extend(filenames)
#make reference dictionaries containing all taxa and all genes found in the data set
taxa_dict = {}
gene_dict = {}
gene_count = 0
for file in files:
ignore_this_file = False
if( options.data_is_zipped == 1 ):
fileInfo = myTarfile.getmember(file)
if( fileInfo.isfile() ):
nex_file = myTarfile.extractfile(fileInfo.name)
else:
ignore_this_file = True
else:
nex_file = open(options.gene_file + file, 'r')
if( ignore_this_file == False ):
taxa = fr.get_taxa(nex_file)
taxa_dict = fr.make_dict(taxa_dict, taxa)
nex_file.close()
try:
use_name = re.findall('[^/]+$', file)[0]
except:
print "Error: problem reading file name: %s" % (file)
return 1
gene_dict[str(gene_count)] = use_name
gene_count += 1
#write a translate table for reference
numTaxa = write_translate_table(taxa_dict, options.outputSuffix)
out += "- translate" + options.outputSuffix + ".txt written for " + str(numTaxa) + " taxa.\n"
#write a gene table
numGenes = write_gene_table(gene_dict, options.outputSuffix)
out += "- genes" + options.outputSuffix + ".txt written for " + str(numGenes) + " genes.\n"
#find the list of quartets to analyze
q = quartet()
quartet_filename = "quartets" + options.outputSuffix + ".txt"
#use a user-specified list if given
if (options.list_of_quartets != None):
if (options.list_of_quartets == quartet_filename):
print "Quartet file cannot be named <"+quartet_filename+">; that filename reserved. Please rename."
return 1
#open up user-specified file, if simply codes, rename if necessary and continue.
is_codes = quart_file_is_codes(options.list_of_quartets)
if( is_codes ):
shutil.copyfile(options.list_of_quartets, quartet_filename)
out += "- "+quartet_filename+" copied from " + options.list_of_quartets + ".\n"
num_quartets = 0
with open(quartets_filename, 'r') as input:
for line in input:
num_quartets += 1
else:
num_quartets = translate_quartets(taxa_dict, options.list_of_quartets, options.outputSuffix)
if( num_quartets == False):
print "Error: supplied quartets file could not be translated."
return 1
#Now subsample from file.
if( options.num_quartets != 0 ):
num_lines = sum(1 for line in open(quartet_filename))
if( options.num_quartets > num_lines ):
print "Error: requested quartets more than quartets in file."
return 1
myQuartets = random.sample(xrange(1,num_lines+1), options.num_quartets)
myQuartets.sort()
curQ = 0
curLine = 0
with open(quartet_filename, 'r') as input:
with open('xquartets.txt', 'w') as output:
for line in input:
curLine=curLine+1;
if (curLine in myQuartets):
output.write(line)
os.remove(quartet_filename)
os.rename("xquartets.txt", quartet_filename)
out += "- "+quartet_filename+" written for " + str(options.num_quartets) + " quartets given in " + options.list_of_quartets + ".\n"
else:
out += "- "+quartet_filename+" written for " + str(num_quartets) + " quartets given in " + options.list_of_quartets + ".\n"
#pick random quartets if no user-specified list
else:
quart_file = open(quartet_filename, 'w')
for i in range(options.num_quartets):
rand_taxa = q.pick_random_quartet(len(taxa_dict))
#print rand_taxa
for num in rand_taxa:
quart_file.write(str(num) + " ")
quart_file.write("\n")
quart_file.close()
out += "- "+quartet_filename+" written for " + str(options.num_quartets) + " random quartets.\n"
output_file = open("organize.meta", 'w')
output_file.write(out)
output_file.close()
main() | organize.py |
import os,re,optparse,tarfile,shutil,random
from fileReader_condor import *
from quartet_condor import *
from arguments import *
def translate_quartets(ref_dict, quartet_file_name, outputSuffix):
quartet_file = open(quartet_file_name, 'r')
new_quartet_file = open('quartets'+outputSuffix+'.txt', 'w');
kount = 0
for line in quartet_file:
replacement_line = ""
first_word = True
add_quartet = True
for word in line.split():
if( first_word != True): replacement_line += " "
found_it = False
for k, v in ref_dict.items():
if v == word:
replacement_line += str(k)
found_it = True
if( found_it == False ): add_quartet = False
first_word = False
if( add_quartet): new_quartet_file.write(replacement_line+'\n');
kount += 1
new_quartet_file.close()
quartet_file.close()
return kount
def quart_file_is_codes(quartet_file_name):
'''
opens up file and checks to see if quartets are numeric
or alphanumeric meaning need translation from ref_dict.
'''
quartet_file = open(quartet_file_name, 'r')
for line in quartet_file:
if re.search('[^0-9\s]', line):
return False
return True
def write_translate_table(ref_dict, outputSuffix):
'''
Writes a translate table for all taxa
:Input: A reference dictionary containing number/species-name pairs
'''
translate_filename = 'translate' + outputSuffix + '.txt'
translate_file = open(translate_filename, 'w')
kount = 0
for k, v in ref_dict.items():
translate_file.write(str(k) + "\t" + v + "\n")
kount += 1
translate_file.close()
return kount
def write_gene_table(ref_dict, outputSuffix):
'''
Writes a translate table for all genes
:Input: A reference dictionary containing genes
'''
gene_filename = "genes" + outputSuffix + ".txt"
gene_file = open(gene_filename, 'w')
kount = 0
for k, v in ref_dict.items():
gene_file.write(str(k) + "\t" + v + "\n")
kount += 1
gene_file.close()
return kount
'''
Finds the list of quartets to analyze, and builds the submit.run_taxa file to analyze each individual quartet
'''
def main():
parser = getParser()
options, args = parser.parse_args()
#Prepare output
out = "RUNNING organize.py: \n"
# unzip data files if zipped
if( options.data_is_zipped == 1 ):
iCount = 0
for file in os.listdir(options.gene_file):
isTar = re.search(r".tar.gz$", file )
if( isTar != None and not re.search(r"^D[0-9]+.tar.gz", file)):
iCount = iCount + 1
tarFilename = file
if( iCount != 1 ):
print "Error: not exactly one .tar.gz file in directory: %d in %s" % (iCount, options.gene_file)
return 1
args += " -z 1"
myTarfile = tarfile.open(name=options.gene_file+"/"+tarFilename,mode='r')
fr = fileReader()
#find the list of gene files to be analyzed
files = []
if (options.list_of_files != None):
list_of_files = open(options.list_of_files, 'r')
for line in list_of_files:
files.append(line.strip())
else:
if( options.data_is_zipped == 1 ):
files = myTarfile.getnames()
else:
for (dirpath, dirnames, filenames) in os.walk(options.gene_file):
files.extend(filenames)
#make reference dictionaries containing all taxa and all genes found in the data set
taxa_dict = {}
gene_dict = {}
gene_count = 0
for file in files:
ignore_this_file = False
if( options.data_is_zipped == 1 ):
fileInfo = myTarfile.getmember(file)
if( fileInfo.isfile() ):
nex_file = myTarfile.extractfile(fileInfo.name)
else:
ignore_this_file = True
else:
nex_file = open(options.gene_file + file, 'r')
if( ignore_this_file == False ):
taxa = fr.get_taxa(nex_file)
taxa_dict = fr.make_dict(taxa_dict, taxa)
nex_file.close()
try:
use_name = re.findall('[^/]+$', file)[0]
except:
print "Error: problem reading file name: %s" % (file)
return 1
gene_dict[str(gene_count)] = use_name
gene_count += 1
#write a translate table for reference
numTaxa = write_translate_table(taxa_dict, options.outputSuffix)
out += "- translate" + options.outputSuffix + ".txt written for " + str(numTaxa) + " taxa.\n"
#write a gene table
numGenes = write_gene_table(gene_dict, options.outputSuffix)
out += "- genes" + options.outputSuffix + ".txt written for " + str(numGenes) + " genes.\n"
#find the list of quartets to analyze
q = quartet()
quartet_filename = "quartets" + options.outputSuffix + ".txt"
#use a user-specified list if given
if (options.list_of_quartets != None):
if (options.list_of_quartets == quartet_filename):
print "Quartet file cannot be named <"+quartet_filename+">; that filename reserved. Please rename."
return 1
#open up user-specified file, if simply codes, rename if necessary and continue.
is_codes = quart_file_is_codes(options.list_of_quartets)
if( is_codes ):
shutil.copyfile(options.list_of_quartets, quartet_filename)
out += "- "+quartet_filename+" copied from " + options.list_of_quartets + ".\n"
num_quartets = 0
with open(quartets_filename, 'r') as input:
for line in input:
num_quartets += 1
else:
num_quartets = translate_quartets(taxa_dict, options.list_of_quartets, options.outputSuffix)
if( num_quartets == False):
print "Error: supplied quartets file could not be translated."
return 1
#Now subsample from file.
if( options.num_quartets != 0 ):
num_lines = sum(1 for line in open(quartet_filename))
if( options.num_quartets > num_lines ):
print "Error: requested quartets more than quartets in file."
return 1
myQuartets = random.sample(xrange(1,num_lines+1), options.num_quartets)
myQuartets.sort()
curQ = 0
curLine = 0
with open(quartet_filename, 'r') as input:
with open('xquartets.txt', 'w') as output:
for line in input:
curLine=curLine+1;
if (curLine in myQuartets):
output.write(line)
os.remove(quartet_filename)
os.rename("xquartets.txt", quartet_filename)
out += "- "+quartet_filename+" written for " + str(options.num_quartets) + " quartets given in " + options.list_of_quartets + ".\n"
else:
out += "- "+quartet_filename+" written for " + str(num_quartets) + " quartets given in " + options.list_of_quartets + ".\n"
#pick random quartets if no user-specified list
else:
quart_file = open(quartet_filename, 'w')
for i in range(options.num_quartets):
rand_taxa = q.pick_random_quartet(len(taxa_dict))
#print rand_taxa
for num in rand_taxa:
quart_file.write(str(num) + " ")
quart_file.write("\n")
quart_file.close()
out += "- "+quartet_filename+" written for " + str(options.num_quartets) + " random quartets.\n"
output_file = open("organize.meta", 'w')
output_file.write(out)
output_file.close()
main() | 0.212845 | 0.185799 |
import graphene
from graphene import ObjectType, Schema
from handlers.graphql.resolvers.console import resolve_console
from handlers.graphql.resolvers.subscription_utils import MakeSubscription, resolve_item_by_key, \
MakeSubscriptionWithChangeType, resolve_all_items_changes
from handlers.graphql.types.input.attachiso import AttachISOMutation
from handlers.graphql.types.input.attachnet import AttachNetworkMutation
from handlers.graphql.types.input.attachvdi import AttachVDIMutation
from handlers.graphql.types.input.createvm import CreateVM
from handlers.graphql.types.input.vm import VMMutation, VMStartMutation, VMShutdownMutation, VMRebootMutation, \
VMPauseMutation, VMDeleteMutation
from handlers.graphql.types.playbook import GPlaybook, resolve_playbooks, resolve_playbook
from handlers.graphql.types.playbooklauncher import PlaybookLaunchMutation
from handlers.graphql.types.tasks.playbook import PlaybookTask, PlaybookTaskList
from playbookloader import PlaybookLoader
from xenadapter.disk import GISO, GVDI, ISO, VDI
from xenadapter.host import Host, GHost
from xenadapter.pool import GPool, Pool
from xenadapter.task import GTask
from xenadapter.template import Template, GTemplate
from xenadapter.sr import SR, GSR
from xenadapter.vm import VM, GVM
from xenadapter.network import Network, GNetwork
from handlers.graphql.types.input.template import TemplateMutation
from rethinkdb import RethinkDB
from tornado.options import options as opts
r = RethinkDB()
class Query(ObjectType):
vms = graphene.List(GVM, required=True, resolver=VM.resolve_all(), description="All VMs available to user")
vm = graphene.Field(GVM, uuid=graphene.ID(), resolver=VM.resolve_one())
templates = graphene.List(GTemplate, required=True, resolver=Template.resolve_all(), description="All Templates available to user")
template = graphene.Field(GVM, uuid=graphene.ID(), resolver=Template.resolve_one())
hosts = graphene.List(GHost, required=True, resolver=Host.resolve_all())
host = graphene.Field(GHost, uuid=graphene.ID(), resolver=Host.resolve_one())
pools = graphene.List(GPool, required=True, resolver=Pool.resolve_all())
pool = graphene.Field(GPool, uuid=graphene.ID(), resolver=Pool.resolve_one())
networks = graphene.List(GNetwork, required=True, resolver=Network.resolve_all(), description="All Networks available to user")
network = graphene.Field(GNetwork, uuid=graphene.ID(), resolver=Network.resolve_one(), description="Information about a single network")
srs = graphene.List(GSR, required=True, resolver=SR.resolve_all(),
description="All Storage repositories available to user")
sr = graphene.Field(GSR, uuid=graphene.ID(), resolver=SR.resolve_one(), description="Information about a single storage repository")
vdis = graphene.List(GVDI, required=True, resolver=VDI.resolve_all(), description="All Virtual Disk Images (hard disks), available for user")
vdi = graphene.Field(GVDI, uuid=graphene.ID(), resolver=VDI.resolve_one(), description="Information about a single virtual disk image (hard disk)")
isos = graphene.List(GISO, required=True, resolver=ISO.resolve_all(), description="All ISO images available for user")
iso = graphene.Field(GVDI, uuid=graphene.ID(), resolver=ISO.resolve_one(),
description="Information about a single ISO image")
playbooks = graphene.List(GPlaybook, required=True, resolver=resolve_playbooks, description="List of Ansible-powered playbooks")
playbook = graphene.Field(GPlaybook, id=graphene.ID(), resolver=resolve_playbook,
description="Information about Ansible-powered playbook")
playbook_task = graphene.Field(PlaybookTask, id=graphene.NonNull(graphene.ID),
description="Info about a playbook task", resolver=PlaybookTaskList.resolve_one())
playbook_tasks = graphene.List(PlaybookTask, required=True,
description="All Playbook Tasks", resolver=PlaybookTaskList.resolve_all())
console = graphene.Field(graphene.String, required=False, vm_uuid=graphene.NonNull(graphene.ID),
description="One-time link to RFB console for a VM", resolver=resolve_console)
class Mutation(ObjectType):
create_VM = CreateVM.Field(description="Create a new VM")
template = TemplateMutation.Field(description="Edit template options")
vm = VMMutation.Field(description="Edit VM options")
vm_start = VMStartMutation.Field(description="Start VM")
vm_shutdown = VMShutdownMutation.Field(description="Shut down VM")
vm_reboot = VMRebootMutation.Field(description="Reboot VM")
vm_pause = VMPauseMutation.Field(description="If VM is Running, pause VM. If Paused, unpause VM")
playbook_launch = PlaybookLaunchMutation.Field(description="Launch an Ansible Playbook on specified VMs")
vm_delete = VMDeleteMutation.Field(description="Delete a Halted VM")
net_attach = AttachNetworkMutation.Field(description="Attach VM to a Network by creating a new Interface")
iso_attach = AttachISOMutation.Field(description="Attach ISO to a VM by creating a new virtual block device")
vdi_attach = AttachVDIMutation.Field(description="Attach VDI to a VM by creating a new virtual block device")
class Subscription(ObjectType):
'''
All subscriptions must return Observable
'''
vms = graphene.Field(MakeSubscriptionWithChangeType(GVM), required=True, description="Updates for all VMs")
vm = graphene.Field(MakeSubscription(GVM), uuid=graphene.NonNull(graphene.ID), description="Updates for a particular VM")
hosts = graphene.Field(MakeSubscriptionWithChangeType(GHost), required=True, description="Updates for all Hosts")
host = graphene.Field(MakeSubscription(GHost), uuid=graphene.NonNull(graphene.ID), description="Updates for a particular Host")
pools = graphene.Field(MakeSubscriptionWithChangeType(GPool), required=True, description="Updates for all pools available in VMEmperor")
pool = graphene.Field(MakeSubscription(GPool), uuid=graphene.NonNull(graphene.ID), description="Updates for a particular Pool")
task = graphene.Field(MakeSubscription(GTask), uuid=graphene.NonNull(graphene.ID), description="Updates for a particular XenServer Task")
playbook_task = graphene.Field(MakeSubscription(PlaybookTask), id=graphene.NonNull(graphene.ID), description="Updates for a particular Playbook installation Task")
playbook_tasks = graphene.Field(MakeSubscriptionWithChangeType(PlaybookTask), required=True, description="Updates for all Playbook Tasks")
def resolve_task(*args, **kwargs):
return resolve_item_by_key(GTask, r.db(opts.database), 'tasks', key_name='uuid')(*args, **kwargs)
def resolve_vms(*args, **kwargs):
return resolve_all_items_changes(GVM, r.db(opts.database), 'vms')(*args, **kwargs)
def resolve_vm(*args, **kwargs):
return resolve_item_by_key(GVM, r.db(opts.database), 'vms', key_name='uuid')(*args, **kwargs)
def resolve_hosts(*args, **kwargs):
return resolve_all_items_changes(GHost, r.db(opts.database), 'hosts')(*args, **kwargs)
def resolve_host(*args, **kwargs):
return resolve_item_by_key(GHost, r.db(opts.database), 'hosts', key_name='uuid')(*args, **kwargs)
def resolve_pools(*args, **kwargs):
return resolve_all_items_changes(GPool, r.db(opts.database), 'pools')(*args, **kwargs)
def resolve_pool(*args, **kwargs):
return resolve_item_by_key(GPool, r.db(opts.database), 'pools', key_name='uuid')(*args, **kwargs)
def resolve_playbook_task(*args, **kwargs):
return resolve_item_by_key(PlaybookTask, r.db(opts.database), 'tasks_playbooks', key_name='id')(*args, **kwargs)
def resolve_playbook_tasks(*args, **kwargs):
return resolve_all_items_changes(PlaybookTask, r.db(opts.database), 'tasks_playbooks')(*args, **kwargs)
schema = Schema(query=Query, mutation=Mutation, types=[GISO, GVDI], subscription=Subscription) | handlers/graphql/root.py | import graphene
from graphene import ObjectType, Schema
from handlers.graphql.resolvers.console import resolve_console
from handlers.graphql.resolvers.subscription_utils import MakeSubscription, resolve_item_by_key, \
MakeSubscriptionWithChangeType, resolve_all_items_changes
from handlers.graphql.types.input.attachiso import AttachISOMutation
from handlers.graphql.types.input.attachnet import AttachNetworkMutation
from handlers.graphql.types.input.attachvdi import AttachVDIMutation
from handlers.graphql.types.input.createvm import CreateVM
from handlers.graphql.types.input.vm import VMMutation, VMStartMutation, VMShutdownMutation, VMRebootMutation, \
VMPauseMutation, VMDeleteMutation
from handlers.graphql.types.playbook import GPlaybook, resolve_playbooks, resolve_playbook
from handlers.graphql.types.playbooklauncher import PlaybookLaunchMutation
from handlers.graphql.types.tasks.playbook import PlaybookTask, PlaybookTaskList
from playbookloader import PlaybookLoader
from xenadapter.disk import GISO, GVDI, ISO, VDI
from xenadapter.host import Host, GHost
from xenadapter.pool import GPool, Pool
from xenadapter.task import GTask
from xenadapter.template import Template, GTemplate
from xenadapter.sr import SR, GSR
from xenadapter.vm import VM, GVM
from xenadapter.network import Network, GNetwork
from handlers.graphql.types.input.template import TemplateMutation
from rethinkdb import RethinkDB
from tornado.options import options as opts
r = RethinkDB()
class Query(ObjectType):
vms = graphene.List(GVM, required=True, resolver=VM.resolve_all(), description="All VMs available to user")
vm = graphene.Field(GVM, uuid=graphene.ID(), resolver=VM.resolve_one())
templates = graphene.List(GTemplate, required=True, resolver=Template.resolve_all(), description="All Templates available to user")
template = graphene.Field(GVM, uuid=graphene.ID(), resolver=Template.resolve_one())
hosts = graphene.List(GHost, required=True, resolver=Host.resolve_all())
host = graphene.Field(GHost, uuid=graphene.ID(), resolver=Host.resolve_one())
pools = graphene.List(GPool, required=True, resolver=Pool.resolve_all())
pool = graphene.Field(GPool, uuid=graphene.ID(), resolver=Pool.resolve_one())
networks = graphene.List(GNetwork, required=True, resolver=Network.resolve_all(), description="All Networks available to user")
network = graphene.Field(GNetwork, uuid=graphene.ID(), resolver=Network.resolve_one(), description="Information about a single network")
srs = graphene.List(GSR, required=True, resolver=SR.resolve_all(),
description="All Storage repositories available to user")
sr = graphene.Field(GSR, uuid=graphene.ID(), resolver=SR.resolve_one(), description="Information about a single storage repository")
vdis = graphene.List(GVDI, required=True, resolver=VDI.resolve_all(), description="All Virtual Disk Images (hard disks), available for user")
vdi = graphene.Field(GVDI, uuid=graphene.ID(), resolver=VDI.resolve_one(), description="Information about a single virtual disk image (hard disk)")
isos = graphene.List(GISO, required=True, resolver=ISO.resolve_all(), description="All ISO images available for user")
iso = graphene.Field(GVDI, uuid=graphene.ID(), resolver=ISO.resolve_one(),
description="Information about a single ISO image")
playbooks = graphene.List(GPlaybook, required=True, resolver=resolve_playbooks, description="List of Ansible-powered playbooks")
playbook = graphene.Field(GPlaybook, id=graphene.ID(), resolver=resolve_playbook,
description="Information about Ansible-powered playbook")
playbook_task = graphene.Field(PlaybookTask, id=graphene.NonNull(graphene.ID),
description="Info about a playbook task", resolver=PlaybookTaskList.resolve_one())
playbook_tasks = graphene.List(PlaybookTask, required=True,
description="All Playbook Tasks", resolver=PlaybookTaskList.resolve_all())
console = graphene.Field(graphene.String, required=False, vm_uuid=graphene.NonNull(graphene.ID),
description="One-time link to RFB console for a VM", resolver=resolve_console)
class Mutation(ObjectType):
create_VM = CreateVM.Field(description="Create a new VM")
template = TemplateMutation.Field(description="Edit template options")
vm = VMMutation.Field(description="Edit VM options")
vm_start = VMStartMutation.Field(description="Start VM")
vm_shutdown = VMShutdownMutation.Field(description="Shut down VM")
vm_reboot = VMRebootMutation.Field(description="Reboot VM")
vm_pause = VMPauseMutation.Field(description="If VM is Running, pause VM. If Paused, unpause VM")
playbook_launch = PlaybookLaunchMutation.Field(description="Launch an Ansible Playbook on specified VMs")
vm_delete = VMDeleteMutation.Field(description="Delete a Halted VM")
net_attach = AttachNetworkMutation.Field(description="Attach VM to a Network by creating a new Interface")
iso_attach = AttachISOMutation.Field(description="Attach ISO to a VM by creating a new virtual block device")
vdi_attach = AttachVDIMutation.Field(description="Attach VDI to a VM by creating a new virtual block device")
class Subscription(ObjectType):
'''
All subscriptions must return Observable
'''
vms = graphene.Field(MakeSubscriptionWithChangeType(GVM), required=True, description="Updates for all VMs")
vm = graphene.Field(MakeSubscription(GVM), uuid=graphene.NonNull(graphene.ID), description="Updates for a particular VM")
hosts = graphene.Field(MakeSubscriptionWithChangeType(GHost), required=True, description="Updates for all Hosts")
host = graphene.Field(MakeSubscription(GHost), uuid=graphene.NonNull(graphene.ID), description="Updates for a particular Host")
pools = graphene.Field(MakeSubscriptionWithChangeType(GPool), required=True, description="Updates for all pools available in VMEmperor")
pool = graphene.Field(MakeSubscription(GPool), uuid=graphene.NonNull(graphene.ID), description="Updates for a particular Pool")
task = graphene.Field(MakeSubscription(GTask), uuid=graphene.NonNull(graphene.ID), description="Updates for a particular XenServer Task")
playbook_task = graphene.Field(MakeSubscription(PlaybookTask), id=graphene.NonNull(graphene.ID), description="Updates for a particular Playbook installation Task")
playbook_tasks = graphene.Field(MakeSubscriptionWithChangeType(PlaybookTask), required=True, description="Updates for all Playbook Tasks")
def resolve_task(*args, **kwargs):
return resolve_item_by_key(GTask, r.db(opts.database), 'tasks', key_name='uuid')(*args, **kwargs)
def resolve_vms(*args, **kwargs):
return resolve_all_items_changes(GVM, r.db(opts.database), 'vms')(*args, **kwargs)
def resolve_vm(*args, **kwargs):
return resolve_item_by_key(GVM, r.db(opts.database), 'vms', key_name='uuid')(*args, **kwargs)
def resolve_hosts(*args, **kwargs):
return resolve_all_items_changes(GHost, r.db(opts.database), 'hosts')(*args, **kwargs)
def resolve_host(*args, **kwargs):
return resolve_item_by_key(GHost, r.db(opts.database), 'hosts', key_name='uuid')(*args, **kwargs)
def resolve_pools(*args, **kwargs):
return resolve_all_items_changes(GPool, r.db(opts.database), 'pools')(*args, **kwargs)
def resolve_pool(*args, **kwargs):
return resolve_item_by_key(GPool, r.db(opts.database), 'pools', key_name='uuid')(*args, **kwargs)
def resolve_playbook_task(*args, **kwargs):
return resolve_item_by_key(PlaybookTask, r.db(opts.database), 'tasks_playbooks', key_name='id')(*args, **kwargs)
def resolve_playbook_tasks(*args, **kwargs):
return resolve_all_items_changes(PlaybookTask, r.db(opts.database), 'tasks_playbooks')(*args, **kwargs)
schema = Schema(query=Query, mutation=Mutation, types=[GISO, GVDI], subscription=Subscription) | 0.354545 | 0.131452 |
import sys, os, rtf2xml.copy, tempfile
"""
States.
1. default
1. an open bracket ends this state.
2. Text print out text. Print out any groups_in_waiting.
3. closed bracket. Close groups
2. after an open bracket
1. The lack of a control word ends this state.
2. paragraph end -- close out all tags
3. footnote beg -- close out all tags
"""
class Inline:
"""
Make inline tags within lists.
Logic:
"""
def __init__(self,
in_file,
bug_handler,
copy=None,
run_level = 1,):
"""
Required:
'file'--file to parse
Optional:
'copy'-- whether to make a copy of result for debugging
'temp_dir' --where to output temporary results (default is
directory from which the script is run.)
Returns:
nothing
"""
self.__file = in_file
self.__bug_handler = bug_handler
self.__copy = copy
self.__run_level = run_level
self.__write_to = tempfile.mktemp()
def __initiate_values(self):
"""
Initiate all values.
"""
self.__state_dict = {
'default': self.__default_func,
'after_open_bracket': self.__after_open_bracket_func,
}
self.__default_dict = {
'ob<nu<open-brack': self.__found_open_bracket_func,
'tx<nu<__________' : self.__found_text_func,
'tx<hx<__________' : self.__found_text_func,
'tx<ut<__________' : self.__found_text_func,
'mi<mk<inline-fld' : self.__found_text_func,
'text' : self.__found_text_func,
'cb<nu<clos-brack' : self.__close_bracket_func,
'mi<mk<par-end___' : self.__end_para_func,
'mi<mk<footnt-ope' : self.__end_para_func,
'mi<mk<footnt-ind' : self.__end_para_func,
}
self.__after_open_bracket_dict = {
'cb<nu<clos-brack' : self.__close_bracket_func,
'tx<nu<__________' : self.__found_text_func,
'tx<hx<__________' : self.__found_text_func,
'tx<ut<__________' : self.__found_text_func,
'text' : self.__found_text_func,
'mi<mk<inline-fld' : self.__found_text_func,
'ob<nu<open-brack': self.__found_open_bracket_func,
'mi<mk<par-end___' : self.__end_para_func,
'mi<mk<footnt-ope' : self.__end_para_func,
'mi<mk<footnt-ind' : self.__end_para_func,
'cw<fd<field_____' : self.__found_field_func,
}
self.__state = 'default'
self.__brac_count = 0 # do I need this?
self.__list_inline_list = []
self.__body_inline_list = []
self.__groups_in_waiting_list = [0]
self.__groups_in_waiting_body = [0]
self.__groups_in_waiting = self.__groups_in_waiting_body
self.__place = 'non_list'
self.__inline_list = self.__body_inline_list
self.__in_para = 0 # not in paragraph
self.__char_dict = {
# character info => ci
'annotation' : 'annotation',
'blue______' : 'blue',
'bold______' : 'bold',
'caps______' : 'caps',
'char-style' : 'character-style',
'dbl-strike' : 'double-strike-through',
'emboss____' : 'emboss',
'engrave___' : 'engrave',
'font-color' : 'font-color',
'font-down_' : 'subscript',
'font-size_' : 'font-size',
'font-style' : 'font-style',
'font-up___' : 'superscript',
'footnot-mk' : 'footnote-marker',
'green_____' : 'green',
'hidden____' : 'hidden',
'italics___' : 'italics',
'outline___' : 'outline',
'red_______' : 'red',
'shadow____' : 'shadow',
'small-caps' : 'small-caps',
'strike-thr' : 'strike-through',
'subscript_' : 'subscript',
'superscrip' : 'superscript',
'underlined' : 'underlined',
}
self.__caps_list = ['false']
def __set_list_func(self, line):
"""
Requires:
line--line of text
Returns:
nothing
Logic:
"""
if self.__place == 'in_list':
if self.__token_info == 'mi<mk<lst-tx-end':
self.__place = 'not_in_list'
self.__inline_list = self.__body_inline_list
self.__groups_in_waiting = self.__groups_in_waiting_body
else:
if self.__token_info == 'mi<mk<lst-tx-beg':
self.__place = 'in_list'
self.__inline_list = self.__list_inline_list
self.__groups_in_waiting = self.__groups_in_waiting_list
def __default_func(self, line):
"""
Requires:
line-- line of text
Returns:
nothing
Logic:
"""
action = self.__default_dict.get(self.__token_info)
if action:
action(line)
self.__write_obj.write(line)
def __found_open_bracket_func(self, line):
"""
Requires:
line -- current line of text
Returns:
nothing
Logic:
Change the state to 'after_open_bracket'
"""
self.__state = 'after_open_bracket'
self.__brac_count += 1
self.__groups_in_waiting[0] += 1
self.__inline_list.append({})
self.__inline_list[-1]['contains_inline'] = 0
def __after_open_bracket_func(self, line):
"""
Requires:
line --line of text
Returns:
nothing
Logic:
If the token is a control word for character info (cw<ci), use another
method to add to the dictionary.
Use the dictionary to get the approriate function.
Always print out the line.
"""
if line[0:2] == 'cw':
self.__handle_control_word(line)
else:
action = self.__after_open_bracket_dict.get(self.__token_info)
if action:
self.__state = 'default' # a non control word?
action(line)
self.__write_obj.write(line)
def __handle_control_word(self, line):
"""
Required:
line --line of text
Returns:
nothing
Logic:
Handle the control word for inline groups.
Add each name - value to a dictionary.
If the font style of Symbol, Wingdings, or Dingbats is found,
always mark this. I need this later to convert the text to
the right utf.
"""
# cw<ci<shadow_____<nu<true
# self.__char_dict = {
char_info = line[6:16]
char_value = line[20:-1]
name = self.__char_dict.get(char_info)
if name:
self.__inline_list[-1]['contains_inline'] = 1
self.__inline_list[-1][name] = char_value
"""
if name == 'font-style':
if char_value == 'Symbol':
self.__write_obj.write('mi<mk<font-symbo\n')
elif char_value == 'Wingdings':
self.__write_obj.write('mi<mk<font-wingd\n')
elif char_value == 'Zapf Dingbats':
self.__write_obj.write('mi<mk<font-dingb\n')
"""
def __close_bracket_func(self, line):
"""
Requires:
line --line of text
Returns:
Nothing
Logic:
If there are no inline groups, do nothing.
Get the keys of the last dictionary in the inline_groups.
If 'contains_inline' in the keys, write a close tag.
If the_dict contains font information, write a mk tag.
"""
if len(self.__inline_list) == 0:
# nothing to add
return
the_dict = self.__inline_list[-1]
the_keys = the_dict.keys()
# always close out
if self.__place == 'in_list':
if 'contains_inline' in the_keys and the_dict['contains_inline'] == 1\
and self.__groups_in_waiting[0] == 0:
self.__write_obj.write('mi<tg<close_____<inline\n')
if 'font-style' in the_keys:
self.__write_obj.write('mi<mk<font-end__\n')
if 'caps' in the_keys:
self.__write_obj.write('mi<mk<caps-end__\n')
else:
# close out only if in a paragraph
if 'contains_inline' in the_keys and the_dict['contains_inline'] == 1\
and self.__in_para and self.__groups_in_waiting[0] == 0:
self.__write_obj.write('mi<tg<close_____<inline\n')
if 'font-style' in the_keys:
self.__write_obj.write('mi<mk<font-end__\n')
if 'caps' in the_keys:
self.__write_obj.write('mi<mk<caps-end__\n')
self.__inline_list.pop()
if self.__groups_in_waiting[0] != 0:
self.__groups_in_waiting[0] -= 1
def __found_text_func(self, line):
"""
Required:
line--line of text
Return:
nothing
Logic:
Two cases:
1. in a list. Simply write inline
2. Not in a list
Text can mark the start of a paragraph.
If already in a paragraph, check to see if any groups are waiting
to be added. If so, use another method to write these groups.
"""
if self.__place == 'in_list':
self.__write_inline()
else:
if not self.__in_para:
self.__in_para = 1
self.__start_para_func(line)
else:
if self.__groups_in_waiting[0] != 0:
self.__write_inline()
def __write_inline(self):
"""
Required:
nothing
Returns
Nothing
Logic:
Method for writing inline when text is found.
Only write those groups that are "waiting", or that have no
tags yet.
First, slice the list self.__inline list to get just the groups
in waiting.
Iterate through this slice, which contains only dictionaries.
Get the keys in each dictionary. If 'font-style' is in the keys,
write a marker tag. (I will use this marker tag later when conerting
hext text to utf8.)
Write a tag for the inline vaues.
"""
if self.__groups_in_waiting[0] != 0:
last_index = -1 * self.__groups_in_waiting[0]
inline_list = self.__inline_list[last_index:]
if len(inline_list) <= 0:
if self.__run_level > 3:
msg = 'self.__inline_list is %s\n' % self.__inline_list
raise self.__bug_handler, msg
self.__write_obj.write('error\n')
self.__groups_in_waiting[0] = 0
return
for the_dict in inline_list:
if the_dict['contains_inline']:
the_keys = the_dict.keys()
if 'font-style' in the_keys:
face = the_dict['font-style']
self.__write_obj.write('mi<mk<font______<%s\n' % face)
if 'caps' in the_keys:
value = the_dict['caps']
self.__write_obj.write('mi<mk<caps______<%s\n' % value)
self.__write_obj.write('mi<tg<open-att__<inline')
for the_key in the_keys:
if the_key != 'contains_inline':
self.__write_obj.write('<%s>%s' % (the_key, the_dict[the_key]))
self.__write_obj.write('\n')
self.__groups_in_waiting[0] = 0
def __end_para_func(self, line):
"""
Requires:
line -- line of text
Returns:
nothing
Logic:
Slice from the end the groups in waiting.
Iterate through the list. If the dictionary contaings info, write
a closing tag.
"""
if not self.__in_para:
return
if self.__groups_in_waiting[0] == 0:
inline_list = self.__inline_list
else:
last_index = -1 * self.__groups_in_waiting[0]
inline_list = self.__inline_list[0:last_index]
for the_dict in inline_list:
contains_info = the_dict.get('contains_inline')
if contains_info:
the_keys = the_dict.keys()
if 'font-style' in the_keys:
self.__write_obj.write('mi<mk<font-end__\n')
if 'caps' in the_keys:
self.__write_obj.write('mi<mk<caps-end__\n')
self.__write_obj.write('mi<tg<close_____<inline\n')
self.__in_para = 0
def __start_para_func(self, line):
"""
Requires:
line -- line of text
Returns:
nothing
Logic:
Iterate through the self.__inline_list to get each dict.
If the dict containst inline info, get the keys.
Iterate through the keys and print out the key and value.
"""
for the_dict in self.__inline_list:
contains_info = the_dict.get('contains_inline')
if contains_info :
the_keys = the_dict.keys()
if 'font-style' in the_keys:
face = the_dict['font-style']
self.__write_obj.write('mi<mk<font______<%s\n' % face)
if 'caps' in the_keys:
value = the_dict['caps']
self.__write_obj.write('mi<mk<caps______<%s\n' % value)
self.__write_obj.write('mi<tg<open-att__<inline')
for the_key in the_keys:
if the_key != 'contains_inline':
self.__write_obj.write('<%s>%s' % (the_key, the_dict[the_key]))
self.__write_obj.write('\n')
self.__groups_in_waiting[0] = 0
def __found_field_func(self, line):
"""
Just a default function to make sure I don't prematurely exit
default state
"""
pass
def form_tags(self):
"""
Requires:
area--area to parse (list or non-list)
Returns:
nothing
Logic:
Read one line in at a time. Determine what action to take based on
the state.
"""
self.__initiate_values()
read_obj = open(self.__file, 'r')
self.__write_obj = open(self.__write_to, 'w')
line_to_read = 1
while line_to_read:
line_to_read = read_obj.readline()
line = line_to_read
token = line[0:-1]
self.__token_info = ''
if token == 'tx<mc<__________<rdblquote'\
or token == 'tx<mc<__________<ldblquote'\
or token == 'tx<mc<__________<lquote'\
or token == 'tx<mc<__________<rquote'\
or token == 'tx<mc<__________<emdash'\
or token == 'tx<mc<__________<endash'\
or token == 'tx<mc<__________<bullet':
self.__token_info = 'text'
else:
self.__token_info = line[:16]
self.__set_list_func(line)
action = self.__state_dict.get(self.__state)
if action == None:
sys.stderr.write('No matching state in module inline_for_lists.py\n')
sys.stderr.write(self.__state + '\n')
action(line)
read_obj.close()
self.__write_obj.close()
copy_obj = rtf2xml.copy.Copy(bug_handler = self.__bug_handler)
if self.__copy:
copy_obj.copy_file(self.__write_to, "inline.data")
copy_obj.rename(self.__write_to, self.__file)
os.remove(self.__write_to) | WorkingDirectory/DaisyPipeline/transformers/ca_cnib_rtf2dtbook/rtf2xml-py/rtf2xml/inline.py | import sys, os, rtf2xml.copy, tempfile
"""
States.
1. default
1. an open bracket ends this state.
2. Text print out text. Print out any groups_in_waiting.
3. closed bracket. Close groups
2. after an open bracket
1. The lack of a control word ends this state.
2. paragraph end -- close out all tags
3. footnote beg -- close out all tags
"""
class Inline:
"""
Make inline tags within lists.
Logic:
"""
def __init__(self,
in_file,
bug_handler,
copy=None,
run_level = 1,):
"""
Required:
'file'--file to parse
Optional:
'copy'-- whether to make a copy of result for debugging
'temp_dir' --where to output temporary results (default is
directory from which the script is run.)
Returns:
nothing
"""
self.__file = in_file
self.__bug_handler = bug_handler
self.__copy = copy
self.__run_level = run_level
self.__write_to = tempfile.mktemp()
def __initiate_values(self):
"""
Initiate all values.
"""
self.__state_dict = {
'default': self.__default_func,
'after_open_bracket': self.__after_open_bracket_func,
}
self.__default_dict = {
'ob<nu<open-brack': self.__found_open_bracket_func,
'tx<nu<__________' : self.__found_text_func,
'tx<hx<__________' : self.__found_text_func,
'tx<ut<__________' : self.__found_text_func,
'mi<mk<inline-fld' : self.__found_text_func,
'text' : self.__found_text_func,
'cb<nu<clos-brack' : self.__close_bracket_func,
'mi<mk<par-end___' : self.__end_para_func,
'mi<mk<footnt-ope' : self.__end_para_func,
'mi<mk<footnt-ind' : self.__end_para_func,
}
self.__after_open_bracket_dict = {
'cb<nu<clos-brack' : self.__close_bracket_func,
'tx<nu<__________' : self.__found_text_func,
'tx<hx<__________' : self.__found_text_func,
'tx<ut<__________' : self.__found_text_func,
'text' : self.__found_text_func,
'mi<mk<inline-fld' : self.__found_text_func,
'ob<nu<open-brack': self.__found_open_bracket_func,
'mi<mk<par-end___' : self.__end_para_func,
'mi<mk<footnt-ope' : self.__end_para_func,
'mi<mk<footnt-ind' : self.__end_para_func,
'cw<fd<field_____' : self.__found_field_func,
}
self.__state = 'default'
self.__brac_count = 0 # do I need this?
self.__list_inline_list = []
self.__body_inline_list = []
self.__groups_in_waiting_list = [0]
self.__groups_in_waiting_body = [0]
self.__groups_in_waiting = self.__groups_in_waiting_body
self.__place = 'non_list'
self.__inline_list = self.__body_inline_list
self.__in_para = 0 # not in paragraph
self.__char_dict = {
# character info => ci
'annotation' : 'annotation',
'blue______' : 'blue',
'bold______' : 'bold',
'caps______' : 'caps',
'char-style' : 'character-style',
'dbl-strike' : 'double-strike-through',
'emboss____' : 'emboss',
'engrave___' : 'engrave',
'font-color' : 'font-color',
'font-down_' : 'subscript',
'font-size_' : 'font-size',
'font-style' : 'font-style',
'font-up___' : 'superscript',
'footnot-mk' : 'footnote-marker',
'green_____' : 'green',
'hidden____' : 'hidden',
'italics___' : 'italics',
'outline___' : 'outline',
'red_______' : 'red',
'shadow____' : 'shadow',
'small-caps' : 'small-caps',
'strike-thr' : 'strike-through',
'subscript_' : 'subscript',
'superscrip' : 'superscript',
'underlined' : 'underlined',
}
self.__caps_list = ['false']
def __set_list_func(self, line):
"""
Requires:
line--line of text
Returns:
nothing
Logic:
"""
if self.__place == 'in_list':
if self.__token_info == 'mi<mk<lst-tx-end':
self.__place = 'not_in_list'
self.__inline_list = self.__body_inline_list
self.__groups_in_waiting = self.__groups_in_waiting_body
else:
if self.__token_info == 'mi<mk<lst-tx-beg':
self.__place = 'in_list'
self.__inline_list = self.__list_inline_list
self.__groups_in_waiting = self.__groups_in_waiting_list
def __default_func(self, line):
"""
Requires:
line-- line of text
Returns:
nothing
Logic:
"""
action = self.__default_dict.get(self.__token_info)
if action:
action(line)
self.__write_obj.write(line)
def __found_open_bracket_func(self, line):
"""
Requires:
line -- current line of text
Returns:
nothing
Logic:
Change the state to 'after_open_bracket'
"""
self.__state = 'after_open_bracket'
self.__brac_count += 1
self.__groups_in_waiting[0] += 1
self.__inline_list.append({})
self.__inline_list[-1]['contains_inline'] = 0
def __after_open_bracket_func(self, line):
"""
Requires:
line --line of text
Returns:
nothing
Logic:
If the token is a control word for character info (cw<ci), use another
method to add to the dictionary.
Use the dictionary to get the approriate function.
Always print out the line.
"""
if line[0:2] == 'cw':
self.__handle_control_word(line)
else:
action = self.__after_open_bracket_dict.get(self.__token_info)
if action:
self.__state = 'default' # a non control word?
action(line)
self.__write_obj.write(line)
def __handle_control_word(self, line):
"""
Required:
line --line of text
Returns:
nothing
Logic:
Handle the control word for inline groups.
Add each name - value to a dictionary.
If the font style of Symbol, Wingdings, or Dingbats is found,
always mark this. I need this later to convert the text to
the right utf.
"""
# cw<ci<shadow_____<nu<true
# self.__char_dict = {
char_info = line[6:16]
char_value = line[20:-1]
name = self.__char_dict.get(char_info)
if name:
self.__inline_list[-1]['contains_inline'] = 1
self.__inline_list[-1][name] = char_value
"""
if name == 'font-style':
if char_value == 'Symbol':
self.__write_obj.write('mi<mk<font-symbo\n')
elif char_value == 'Wingdings':
self.__write_obj.write('mi<mk<font-wingd\n')
elif char_value == 'Zapf Dingbats':
self.__write_obj.write('mi<mk<font-dingb\n')
"""
def __close_bracket_func(self, line):
"""
Requires:
line --line of text
Returns:
Nothing
Logic:
If there are no inline groups, do nothing.
Get the keys of the last dictionary in the inline_groups.
If 'contains_inline' in the keys, write a close tag.
If the_dict contains font information, write a mk tag.
"""
if len(self.__inline_list) == 0:
# nothing to add
return
the_dict = self.__inline_list[-1]
the_keys = the_dict.keys()
# always close out
if self.__place == 'in_list':
if 'contains_inline' in the_keys and the_dict['contains_inline'] == 1\
and self.__groups_in_waiting[0] == 0:
self.__write_obj.write('mi<tg<close_____<inline\n')
if 'font-style' in the_keys:
self.__write_obj.write('mi<mk<font-end__\n')
if 'caps' in the_keys:
self.__write_obj.write('mi<mk<caps-end__\n')
else:
# close out only if in a paragraph
if 'contains_inline' in the_keys and the_dict['contains_inline'] == 1\
and self.__in_para and self.__groups_in_waiting[0] == 0:
self.__write_obj.write('mi<tg<close_____<inline\n')
if 'font-style' in the_keys:
self.__write_obj.write('mi<mk<font-end__\n')
if 'caps' in the_keys:
self.__write_obj.write('mi<mk<caps-end__\n')
self.__inline_list.pop()
if self.__groups_in_waiting[0] != 0:
self.__groups_in_waiting[0] -= 1
def __found_text_func(self, line):
"""
Required:
line--line of text
Return:
nothing
Logic:
Two cases:
1. in a list. Simply write inline
2. Not in a list
Text can mark the start of a paragraph.
If already in a paragraph, check to see if any groups are waiting
to be added. If so, use another method to write these groups.
"""
if self.__place == 'in_list':
self.__write_inline()
else:
if not self.__in_para:
self.__in_para = 1
self.__start_para_func(line)
else:
if self.__groups_in_waiting[0] != 0:
self.__write_inline()
def __write_inline(self):
"""
Required:
nothing
Returns
Nothing
Logic:
Method for writing inline when text is found.
Only write those groups that are "waiting", or that have no
tags yet.
First, slice the list self.__inline list to get just the groups
in waiting.
Iterate through this slice, which contains only dictionaries.
Get the keys in each dictionary. If 'font-style' is in the keys,
write a marker tag. (I will use this marker tag later when conerting
hext text to utf8.)
Write a tag for the inline vaues.
"""
if self.__groups_in_waiting[0] != 0:
last_index = -1 * self.__groups_in_waiting[0]
inline_list = self.__inline_list[last_index:]
if len(inline_list) <= 0:
if self.__run_level > 3:
msg = 'self.__inline_list is %s\n' % self.__inline_list
raise self.__bug_handler, msg
self.__write_obj.write('error\n')
self.__groups_in_waiting[0] = 0
return
for the_dict in inline_list:
if the_dict['contains_inline']:
the_keys = the_dict.keys()
if 'font-style' in the_keys:
face = the_dict['font-style']
self.__write_obj.write('mi<mk<font______<%s\n' % face)
if 'caps' in the_keys:
value = the_dict['caps']
self.__write_obj.write('mi<mk<caps______<%s\n' % value)
self.__write_obj.write('mi<tg<open-att__<inline')
for the_key in the_keys:
if the_key != 'contains_inline':
self.__write_obj.write('<%s>%s' % (the_key, the_dict[the_key]))
self.__write_obj.write('\n')
self.__groups_in_waiting[0] = 0
def __end_para_func(self, line):
"""
Requires:
line -- line of text
Returns:
nothing
Logic:
Slice from the end the groups in waiting.
Iterate through the list. If the dictionary contaings info, write
a closing tag.
"""
if not self.__in_para:
return
if self.__groups_in_waiting[0] == 0:
inline_list = self.__inline_list
else:
last_index = -1 * self.__groups_in_waiting[0]
inline_list = self.__inline_list[0:last_index]
for the_dict in inline_list:
contains_info = the_dict.get('contains_inline')
if contains_info:
the_keys = the_dict.keys()
if 'font-style' in the_keys:
self.__write_obj.write('mi<mk<font-end__\n')
if 'caps' in the_keys:
self.__write_obj.write('mi<mk<caps-end__\n')
self.__write_obj.write('mi<tg<close_____<inline\n')
self.__in_para = 0
def __start_para_func(self, line):
"""
Requires:
line -- line of text
Returns:
nothing
Logic:
Iterate through the self.__inline_list to get each dict.
If the dict containst inline info, get the keys.
Iterate through the keys and print out the key and value.
"""
for the_dict in self.__inline_list:
contains_info = the_dict.get('contains_inline')
if contains_info :
the_keys = the_dict.keys()
if 'font-style' in the_keys:
face = the_dict['font-style']
self.__write_obj.write('mi<mk<font______<%s\n' % face)
if 'caps' in the_keys:
value = the_dict['caps']
self.__write_obj.write('mi<mk<caps______<%s\n' % value)
self.__write_obj.write('mi<tg<open-att__<inline')
for the_key in the_keys:
if the_key != 'contains_inline':
self.__write_obj.write('<%s>%s' % (the_key, the_dict[the_key]))
self.__write_obj.write('\n')
self.__groups_in_waiting[0] = 0
def __found_field_func(self, line):
"""
Just a default function to make sure I don't prematurely exit
default state
"""
pass
def form_tags(self):
"""
Requires:
area--area to parse (list or non-list)
Returns:
nothing
Logic:
Read one line in at a time. Determine what action to take based on
the state.
"""
self.__initiate_values()
read_obj = open(self.__file, 'r')
self.__write_obj = open(self.__write_to, 'w')
line_to_read = 1
while line_to_read:
line_to_read = read_obj.readline()
line = line_to_read
token = line[0:-1]
self.__token_info = ''
if token == 'tx<mc<__________<rdblquote'\
or token == 'tx<mc<__________<ldblquote'\
or token == 'tx<mc<__________<lquote'\
or token == 'tx<mc<__________<rquote'\
or token == 'tx<mc<__________<emdash'\
or token == 'tx<mc<__________<endash'\
or token == 'tx<mc<__________<bullet':
self.__token_info = 'text'
else:
self.__token_info = line[:16]
self.__set_list_func(line)
action = self.__state_dict.get(self.__state)
if action == None:
sys.stderr.write('No matching state in module inline_for_lists.py\n')
sys.stderr.write(self.__state + '\n')
action(line)
read_obj.close()
self.__write_obj.close()
copy_obj = rtf2xml.copy.Copy(bug_handler = self.__bug_handler)
if self.__copy:
copy_obj.copy_file(self.__write_to, "inline.data")
copy_obj.rename(self.__write_to, self.__file)
os.remove(self.__write_to) | 0.296858 | 0.162148 |
from __future__ import unicode_literals
import datetime
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Bet',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('value', models.FloatField()),
('odd', models.FloatField()),
('reward', models.FloatField()),
('creation_time', models.DateTimeField(default=datetime.datetime.now)),
('open', models.BooleanField(default=True)),
('won', models.BooleanField()),
],
),
migrations.CreateModel(
name='Event',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('start_time', models.DateTimeField()),
('end_time', models.DateTimeField()),
('creation_time', models.DateTimeField(default=datetime.datetime.now)),
('name', models.CharField(max_length=100)),
('description', models.TextField(blank=True)),
('home_name', models.CharField(max_length=50)),
('away_name', models.CharField(max_length=50)),
('home_odd', models.FloatField(default=1.0)),
('away_odd', models.FloatField(default=1.0)),
('draw_odd', models.FloatField(default=1.0)),
('closed', models.BooleanField(default=False)),
('result', models.IntegerField(blank=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Wallet',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('money', models.FloatField()),
('active', models.BooleanField(default=True)),
('creation_time', models.DateTimeField(default=datetime.datetime.now)),
('owner', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.AddField(
model_name='bet',
name='event',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.Event'),
),
migrations.AddField(
model_name='bet',
name='wallet',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.Wallet'),
),
] | typer/core/migrations/0001_initial.py | from __future__ import unicode_literals
import datetime
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Bet',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('value', models.FloatField()),
('odd', models.FloatField()),
('reward', models.FloatField()),
('creation_time', models.DateTimeField(default=datetime.datetime.now)),
('open', models.BooleanField(default=True)),
('won', models.BooleanField()),
],
),
migrations.CreateModel(
name='Event',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('start_time', models.DateTimeField()),
('end_time', models.DateTimeField()),
('creation_time', models.DateTimeField(default=datetime.datetime.now)),
('name', models.CharField(max_length=100)),
('description', models.TextField(blank=True)),
('home_name', models.CharField(max_length=50)),
('away_name', models.CharField(max_length=50)),
('home_odd', models.FloatField(default=1.0)),
('away_odd', models.FloatField(default=1.0)),
('draw_odd', models.FloatField(default=1.0)),
('closed', models.BooleanField(default=False)),
('result', models.IntegerField(blank=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Wallet',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('money', models.FloatField()),
('active', models.BooleanField(default=True)),
('creation_time', models.DateTimeField(default=datetime.datetime.now)),
('owner', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.AddField(
model_name='bet',
name='event',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.Event'),
),
migrations.AddField(
model_name='bet',
name='wallet',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.Wallet'),
),
] | 0.554953 | 0.138782 |
import unittest
from . import mock_db as dbapi
from dbutils.steady_db import (
connect as SteadyDBconnect, SteadyDBConnection, SteadyDBCursor)
class TestSteadyDB(unittest.TestCase):
def test_version(self):
from dbutils import __version__, steady_db
self.assertEqual(steady_db.__version__, __version__)
self.assertEqual(steady_db.SteadyDBConnection.version, __version__)
def test_mocked_connection(self):
db = dbapi.connect(
'SteadyDBTestDB', user='SteadyDBTestUser')
db.__class__.has_ping = False
db.__class__.num_pings = 0
self.assertTrue(hasattr(db, 'database'))
self.assertEqual(db.database, 'SteadyDBTestDB')
self.assertTrue(hasattr(db, 'user'))
self.assertEqual(db.user, 'SteadyDBTestUser')
self.assertTrue(hasattr(db, 'cursor'))
self.assertTrue(hasattr(db, 'close'))
self.assertTrue(hasattr(db, 'open_cursors'))
self.assertTrue(hasattr(db, 'num_uses'))
self.assertTrue(hasattr(db, 'num_queries'))
self.assertTrue(hasattr(db, 'session'))
self.assertTrue(hasattr(db, 'valid'))
self.assertTrue(db.valid)
self.assertEqual(db.open_cursors, 0)
for i in range(3):
cursor = db.cursor()
self.assertEqual(db.open_cursors, 1)
cursor.close()
self.assertEqual(db.open_cursors, 0)
cursor = []
for i in range(3):
cursor.append(db.cursor())
self.assertEqual(db.open_cursors, i + 1)
del cursor
self.assertEqual(db.open_cursors, 0)
cursor = db.cursor()
self.assertTrue(hasattr(cursor, 'execute'))
self.assertTrue(hasattr(cursor, 'fetchone'))
self.assertTrue(hasattr(cursor, 'callproc'))
self.assertTrue(hasattr(cursor, 'close'))
self.assertTrue(hasattr(cursor, 'valid'))
self.assertTrue(cursor.valid)
self.assertEqual(db.open_cursors, 1)
for i in range(3):
self.assertEqual(db.num_uses, i)
self.assertEqual(db.num_queries, i)
cursor.execute(f'select test{i}')
self.assertEqual(cursor.fetchone(), f'test{i}')
self.assertTrue(cursor.valid)
self.assertEqual(db.open_cursors, 1)
for i in range(4):
cursor.callproc('test')
cursor.close()
self.assertFalse(cursor.valid)
self.assertEqual(db.open_cursors, 0)
self.assertEqual(db.num_uses, 7)
self.assertEqual(db.num_queries, 3)
self.assertRaises(dbapi.InternalError, cursor.close)
self.assertRaises(dbapi.InternalError, cursor.execute, 'select test')
self.assertTrue(db.valid)
self.assertFalse(db.__class__.has_ping)
self.assertEqual(db.__class__.num_pings, 0)
self.assertRaises(AttributeError, db.ping)
self.assertEqual(db.__class__.num_pings, 1)
db.__class__.has_ping = True
self.assertIsNone(db.ping())
self.assertEqual(db.__class__.num_pings, 2)
db.close()
self.assertFalse(db.valid)
self.assertEqual(db.num_uses, 0)
self.assertEqual(db.num_queries, 0)
self.assertRaises(dbapi.InternalError, db.close)
self.assertRaises(dbapi.InternalError, db.cursor)
self.assertRaises(dbapi.OperationalError, db.ping)
self.assertEqual(db.__class__.num_pings, 3)
db.__class__.has_ping = False
db.__class__.num_pings = 0
def test_broken_connection(self):
self.assertRaises(TypeError, SteadyDBConnection, None)
self.assertRaises(TypeError, SteadyDBCursor, None)
db = SteadyDBconnect(dbapi, database='ok')
for i in range(3):
db.close()
del db
self.assertRaises(
dbapi.OperationalError, SteadyDBconnect, dbapi, database='error')
db = SteadyDBconnect(dbapi, database='ok')
cursor = db.cursor()
for i in range(3):
cursor.close()
cursor = db.cursor('ok')
for i in range(3):
cursor.close()
self.assertRaises(dbapi.OperationalError, db.cursor, 'error')
def test_close(self):
for closeable in (False, True):
db = SteadyDBconnect(dbapi, closeable=closeable)
self.assertTrue(db._con.valid)
db.close()
self.assertTrue(closeable ^ db._con.valid)
db.close()
self.assertTrue(closeable ^ db._con.valid)
db._close()
self.assertFalse(db._con.valid)
db._close()
self.assertFalse(db._con.valid)
def test_connection(self):
db = SteadyDBconnect(
dbapi, 0, None, None, None, True,
'SteadyDBTestDB', user='SteadyDBTestUser')
self.assertTrue(isinstance(db, SteadyDBConnection))
self.assertTrue(hasattr(db, '_con'))
self.assertTrue(hasattr(db, '_usage'))
self.assertEqual(db._usage, 0)
self.assertTrue(hasattr(db._con, 'valid'))
self.assertTrue(db._con.valid)
self.assertTrue(hasattr(db._con, 'cursor'))
self.assertTrue(hasattr(db._con, 'close'))
self.assertTrue(hasattr(db._con, 'open_cursors'))
self.assertTrue(hasattr(db._con, 'num_uses'))
self.assertTrue(hasattr(db._con, 'num_queries'))
self.assertTrue(hasattr(db._con, 'session'))
self.assertTrue(hasattr(db._con, 'database'))
self.assertEqual(db._con.database, 'SteadyDBTestDB')
self.assertTrue(hasattr(db._con, 'user'))
self.assertEqual(db._con.user, 'SteadyDBTestUser')
self.assertTrue(hasattr(db, 'cursor'))
self.assertTrue(hasattr(db, 'close'))
self.assertEqual(db._con.open_cursors, 0)
for i in range(3):
cursor = db.cursor()
self.assertEqual(db._con.open_cursors, 1)
cursor.close()
self.assertEqual(db._con.open_cursors, 0)
cursor = []
for i in range(3):
cursor.append(db.cursor())
self.assertEqual(db._con.open_cursors, i + 1)
del cursor
self.assertEqual(db._con.open_cursors, 0)
cursor = db.cursor()
self.assertTrue(hasattr(cursor, 'execute'))
self.assertTrue(hasattr(cursor, 'fetchone'))
self.assertTrue(hasattr(cursor, 'callproc'))
self.assertTrue(hasattr(cursor, 'close'))
self.assertTrue(hasattr(cursor, 'valid'))
self.assertTrue(cursor.valid)
self.assertEqual(db._con.open_cursors, 1)
for i in range(3):
self.assertEqual(db._usage, i)
self.assertEqual(db._con.num_uses, i)
self.assertEqual(db._con.num_queries, i)
cursor.execute(f'select test{i}')
self.assertEqual(cursor.fetchone(), f'test{i}')
self.assertTrue(cursor.valid)
self.assertEqual(db._con.open_cursors, 1)
for i in range(4):
cursor.callproc('test')
cursor.close()
self.assertFalse(cursor.valid)
self.assertEqual(db._con.open_cursors, 0)
self.assertEqual(db._usage, 7)
self.assertEqual(db._con.num_uses, 7)
self.assertEqual(db._con.num_queries, 3)
cursor.close()
cursor.execute('select test8')
self.assertTrue(cursor.valid)
self.assertEqual(db._con.open_cursors, 1)
self.assertEqual(cursor.fetchone(), 'test8')
self.assertEqual(db._usage, 8)
self.assertEqual(db._con.num_uses, 8)
self.assertEqual(db._con.num_queries, 4)
self.assertTrue(db._con.valid)
db.close()
self.assertFalse(db._con.valid)
self.assertEqual(db._con.open_cursors, 0)
self.assertEqual(db._usage, 8)
self.assertEqual(db._con.num_uses, 0)
self.assertEqual(db._con.num_queries, 0)
self.assertRaises(dbapi.InternalError, db._con.close)
db.close()
self.assertRaises(dbapi.InternalError, db._con.cursor)
cursor = db.cursor()
self.assertTrue(db._con.valid)
cursor.execute('select test11')
self.assertEqual(cursor.fetchone(), 'test11')
cursor.execute('select test12')
self.assertEqual(cursor.fetchone(), 'test12')
cursor.callproc('test')
self.assertEqual(db._usage, 3)
self.assertEqual(db._con.num_uses, 3)
self.assertEqual(db._con.num_queries, 2)
cursor2 = db.cursor()
self.assertEqual(db._con.open_cursors, 2)
cursor2.execute('select test13')
self.assertEqual(cursor2.fetchone(), 'test13')
self.assertEqual(db._con.num_queries, 3)
db.close()
self.assertEqual(db._con.open_cursors, 0)
self.assertEqual(db._con.num_queries, 0)
cursor = db.cursor()
self.assertTrue(cursor.valid)
cursor.callproc('test')
cursor._cursor.valid = False
self.assertFalse(cursor.valid)
self.assertRaises(dbapi.InternalError, cursor._cursor.callproc, 'test')
cursor.callproc('test')
self.assertTrue(cursor.valid)
cursor._cursor.callproc('test')
self.assertEqual(db._usage, 2)
self.assertEqual(db._con.num_uses, 3)
db._con.valid = cursor._cursor.valid = False
cursor.callproc('test')
self.assertTrue(cursor.valid)
self.assertEqual(db._usage, 1)
self.assertEqual(db._con.num_uses, 1)
cursor.execute('set this')
db.commit()
cursor.execute('set that')
db.rollback()
self.assertEqual(
db._con.session, ['this', 'commit', 'that', 'rollback'])
def test_connection_context_handler(self):
db = SteadyDBconnect(
dbapi, 0, None, None, None, True,
'SteadyDBTestDB', user='SteadyDBTestUser')
self.assertEqual(db._con.session, [])
with db as con:
con.cursor().execute('select test')
self.assertEqual(db._con.session, ['commit'])
try:
with db as con:
con.cursor().execute('error')
except dbapi.ProgrammingError:
error = True
else:
error = False
self.assertTrue(error)
self.assertEqual(db._con.session, ['commit', 'rollback'])
def test_cursor_context_handler(self):
db = SteadyDBconnect(
dbapi, 0, None, None, None, True,
'SteadyDBTestDB', user='SteadyDBTestUser')
self.assertEqual(db._con.open_cursors, 0)
with db.cursor() as cursor:
self.assertEqual(db._con.open_cursors, 1)
cursor.execute('select test')
self.assertEqual(cursor.fetchone(), 'test')
self.assertEqual(db._con.open_cursors, 0)
def test_cursor_as_iterator_provided(self):
db = SteadyDBconnect(
dbapi, 0, None, None, None, True,
'SteadyDBTestDB', user='SteadyDBTestUser')
self.assertEqual(db._con.open_cursors, 0)
cursor = db.cursor()
self.assertEqual(db._con.open_cursors, 1)
cursor.execute('select test')
_cursor = cursor._cursor
try:
assert not hasattr(_cursor, 'iter')
_cursor.__iter__ = lambda: ['test-iter']
assert list(iter(cursor)) == ['test']
finally:
del _cursor.__iter__
cursor.close()
self.assertEqual(db._con.open_cursors, 0)
def test_cursor_as_iterator_created(self):
db = SteadyDBconnect(
dbapi, 0, None, None, None, True,
'SteadyDBTestDB', user='SteadyDBTestUser')
self.assertEqual(db._con.open_cursors, 0)
cursor = db.cursor()
self.assertEqual(db._con.open_cursors, 1)
cursor.execute('select test')
assert list(iter(cursor)) == ['test']
cursor.close()
self.assertEqual(db._con.open_cursors, 0)
def test_connection_creator_function(self):
db1 = SteadyDBconnect(
dbapi, 0, None, None, None, True,
'SteadyDBTestDB', user='SteadyDBTestUser')
db2 = SteadyDBconnect(
dbapi.connect, 0, None, None, None, True,
'SteadyDBTestDB', user='SteadyDBTestUser')
self.assertEqual(db1.dbapi(), db2.dbapi())
self.assertEqual(db1.threadsafety(), db2.threadsafety())
self.assertEqual(db1._creator, db2._creator)
self.assertEqual(db1._args, db2._args)
self.assertEqual(db1._kwargs, db2._kwargs)
db2.close()
db1.close()
def test_connection_maxusage(self):
db = SteadyDBconnect(dbapi, 10)
cursor = db.cursor()
for i in range(100):
cursor.execute(f'select test{i}')
r = cursor.fetchone()
self.assertEqual(r, f'test{i}')
self.assertTrue(db._con.valid)
j = i % 10 + 1
self.assertEqual(db._usage, j)
self.assertEqual(db._con.num_uses, j)
self.assertEqual(db._con.num_queries, j)
self.assertEqual(db._con.open_cursors, 1)
db.begin()
for i in range(100):
cursor.callproc('test')
self.assertTrue(db._con.valid)
if i == 49:
db.commit()
j = i % 10 + 1 if i > 49 else i + 11
self.assertEqual(db._usage, j)
self.assertEqual(db._con.num_uses, j)
j = 0 if i > 49 else 10
self.assertEqual(db._con.num_queries, j)
for i in range(10):
if i == 7:
db._con.valid = cursor._cursor.valid = False
cursor.execute(f'select test{i}')
r = cursor.fetchone()
self.assertEqual(r, f'test{i}')
j = i % 7 + 1
self.assertEqual(db._usage, j)
self.assertEqual(db._con.num_uses, j)
self.assertEqual(db._con.num_queries, j)
for i in range(10):
if i == 5:
db._con.valid = cursor._cursor.valid = False
cursor.callproc('test')
j = (i + (3 if i < 5 else -5)) % 10 + 1
self.assertEqual(db._usage, j)
self.assertEqual(db._con.num_uses, j)
j = 3 if i < 5 else 0
self.assertEqual(db._con.num_queries, j)
db.close()
cursor.execute('select test1')
self.assertEqual(cursor.fetchone(), 'test1')
self.assertEqual(db._usage, 1)
self.assertEqual(db._con.num_uses, 1)
self.assertEqual(db._con.num_queries, 1)
def test_connection_setsession(self):
db = SteadyDBconnect(dbapi, 3, ('set time zone', 'set datestyle'))
self.assertTrue(hasattr(db, '_usage'))
self.assertEqual(db._usage, 0)
self.assertTrue(hasattr(db._con, 'open_cursors'))
self.assertEqual(db._con.open_cursors, 0)
self.assertTrue(hasattr(db._con, 'num_uses'))
self.assertEqual(db._con.num_uses, 2)
self.assertTrue(hasattr(db._con, 'num_queries'))
self.assertEqual(db._con.num_queries, 0)
self.assertTrue(hasattr(db._con, 'session'))
self.assertEqual(tuple(db._con.session), ('time zone', 'datestyle'))
for i in range(11):
db.cursor().execute('select test')
self.assertEqual(db._con.open_cursors, 0)
self.assertEqual(db._usage, 2)
self.assertEqual(db._con.num_uses, 4)
self.assertEqual(db._con.num_queries, 2)
self.assertEqual(db._con.session, ['time zone', 'datestyle'])
db.cursor().execute('set test')
self.assertEqual(db._con.open_cursors, 0)
self.assertEqual(db._usage, 3)
self.assertEqual(db._con.num_uses, 5)
self.assertEqual(db._con.num_queries, 2)
self.assertEqual(db._con.session, ['time zone', 'datestyle', 'test'])
db.cursor().execute('select test')
self.assertEqual(db._con.open_cursors, 0)
self.assertEqual(db._usage, 1)
self.assertEqual(db._con.num_uses, 3)
self.assertEqual(db._con.num_queries, 1)
self.assertEqual(db._con.session, ['time zone', 'datestyle'])
db.cursor().execute('set test')
self.assertEqual(db._con.open_cursors, 0)
self.assertEqual(db._usage, 2)
self.assertEqual(db._con.num_uses, 4)
self.assertEqual(db._con.num_queries, 1)
self.assertEqual(db._con.session, ['time zone', 'datestyle', 'test'])
db.cursor().execute('select test')
self.assertEqual(db._con.open_cursors, 0)
self.assertEqual(db._usage, 3)
self.assertEqual(db._con.num_uses, 5)
self.assertEqual(db._con.num_queries, 2)
self.assertEqual(db._con.session, ['time zone', 'datestyle', 'test'])
db.close()
db.cursor().execute('set test')
self.assertEqual(db._con.open_cursors, 0)
self.assertEqual(db._usage, 1)
self.assertEqual(db._con.num_uses, 3)
self.assertEqual(db._con.num_queries, 0)
self.assertEqual(db._con.session, ['time zone', 'datestyle', 'test'])
db.close()
db.cursor().execute('select test')
self.assertEqual(db._con.open_cursors, 0)
self.assertEqual(db._usage, 1)
self.assertEqual(db._con.num_uses, 3)
self.assertEqual(db._con.num_queries, 1)
self.assertEqual(db._con.session, ['time zone', 'datestyle'])
def test_connection_failures(self):
db = SteadyDBconnect(dbapi)
db.close()
db.cursor()
db = SteadyDBconnect(dbapi, failures=dbapi.InternalError)
db.close()
db.cursor()
db = SteadyDBconnect(dbapi, failures=dbapi.OperationalError)
db.close()
self.assertRaises(dbapi.InternalError, db.cursor)
db = SteadyDBconnect(dbapi, failures=(
dbapi.OperationalError, dbapi.InterfaceError))
db.close()
self.assertRaises(dbapi.InternalError, db.cursor)
db = SteadyDBconnect(dbapi, failures=(
dbapi.OperationalError, dbapi.InterfaceError, dbapi.InternalError))
db.close()
db.cursor()
def test_connection_failure_error(self):
db = SteadyDBconnect(dbapi)
cursor = db.cursor()
db.close()
cursor.execute('select test')
cursor = db.cursor()
db.close()
self.assertRaises(dbapi.ProgrammingError, cursor.execute, 'error')
def test_connection_set_sizes(self):
db = SteadyDBconnect(dbapi)
cursor = db.cursor()
cursor.execute('get sizes')
result = cursor.fetchone()
self.assertEqual(result, ([], {}))
cursor.setinputsizes([7, 42, 6])
cursor.setoutputsize(9)
cursor.setoutputsize(15, 3)
cursor.setoutputsize(42, 7)
cursor.execute('get sizes')
result = cursor.fetchone()
self.assertEqual(result, ([7, 42, 6], {None: 9, 3: 15, 7: 42}))
cursor.execute('get sizes')
result = cursor.fetchone()
self.assertEqual(result, ([], {}))
cursor.setinputsizes([6, 42, 7])
cursor.setoutputsize(7)
cursor.setoutputsize(15, 3)
cursor.setoutputsize(42, 9)
db.close()
cursor.execute('get sizes')
result = cursor.fetchone()
self.assertEqual(result, ([6, 42, 7], {None: 7, 3: 15, 9: 42}))
def test_connection_ping_check(self):
Connection = dbapi.Connection
Connection.has_ping = False
Connection.num_pings = 0
db = SteadyDBconnect(dbapi)
db.cursor().execute('select test')
self.assertEqual(Connection.num_pings, 0)
db.close()
db.cursor().execute('select test')
self.assertEqual(Connection.num_pings, 0)
self.assertIsNone(db._ping_check())
self.assertEqual(Connection.num_pings, 1)
db = SteadyDBconnect(dbapi, ping=7)
db.cursor().execute('select test')
self.assertEqual(Connection.num_pings, 2)
db.close()
db.cursor().execute('select test')
self.assertEqual(Connection.num_pings, 2)
self.assertIsNone(db._ping_check())
self.assertEqual(Connection.num_pings, 2)
Connection.has_ping = True
db = SteadyDBconnect(dbapi)
db.cursor().execute('select test')
self.assertEqual(Connection.num_pings, 2)
db.close()
db.cursor().execute('select test')
self.assertEqual(Connection.num_pings, 2)
self.assertTrue(db._ping_check())
self.assertEqual(Connection.num_pings, 3)
db = SteadyDBconnect(dbapi, ping=1)
db.cursor().execute('select test')
self.assertEqual(Connection.num_pings, 3)
db.close()
db.cursor().execute('select test')
self.assertEqual(Connection.num_pings, 3)
self.assertTrue(db._ping_check())
self.assertEqual(Connection.num_pings, 4)
db.close()
self.assertTrue(db._ping_check())
self.assertEqual(Connection.num_pings, 5)
db = SteadyDBconnect(dbapi, ping=7)
db.cursor().execute('select test')
self.assertEqual(Connection.num_pings, 7)
db.close()
db.cursor().execute('select test')
self.assertEqual(Connection.num_pings, 9)
db = SteadyDBconnect(dbapi, ping=3)
self.assertEqual(Connection.num_pings, 9)
db.cursor()
self.assertEqual(Connection.num_pings, 10)
db.close()
cursor = db.cursor()
self.assertEqual(Connection.num_pings, 11)
cursor.execute('select test')
self.assertEqual(Connection.num_pings, 11)
db = SteadyDBconnect(dbapi, ping=5)
self.assertEqual(Connection.num_pings, 11)
db.cursor()
self.assertEqual(Connection.num_pings, 11)
db.close()
cursor = db.cursor()
self.assertEqual(Connection.num_pings, 11)
cursor.execute('select test')
self.assertEqual(Connection.num_pings, 12)
db.close()
cursor = db.cursor()
self.assertEqual(Connection.num_pings, 12)
cursor.execute('select test')
self.assertEqual(Connection.num_pings, 13)
db = SteadyDBconnect(dbapi, ping=7)
self.assertEqual(Connection.num_pings, 13)
db.cursor()
self.assertEqual(Connection.num_pings, 14)
db.close()
cursor = db.cursor()
self.assertEqual(Connection.num_pings, 15)
cursor.execute('select test')
self.assertEqual(Connection.num_pings, 16)
db.close()
cursor = db.cursor()
self.assertEqual(Connection.num_pings, 17)
cursor.execute('select test')
self.assertEqual(Connection.num_pings, 18)
db.close()
cursor.execute('select test')
self.assertEqual(Connection.num_pings, 20)
Connection.has_ping = False
Connection.num_pings = 0
def test_begin_transaction(self):
db = SteadyDBconnect(dbapi, database='ok')
cursor = db.cursor()
cursor.close()
cursor.execute('select test12')
self.assertEqual(cursor.fetchone(), 'test12')
db.begin()
cursor = db.cursor()
cursor.close()
self.assertRaises(dbapi.InternalError, cursor.execute, 'select test12')
cursor.execute('select test12')
self.assertEqual(cursor.fetchone(), 'test12')
db.close()
db.begin()
self.assertRaises(dbapi.InternalError, cursor.execute, 'select test12')
cursor.execute('select test12')
self.assertEqual(cursor.fetchone(), 'test12')
db.begin()
self.assertRaises(dbapi.ProgrammingError, cursor.execute, 'error')
cursor.close()
cursor.execute('select test12')
self.assertEqual(cursor.fetchone(), 'test12')
def test_with_begin_extension(self):
db = SteadyDBconnect(dbapi, database='ok')
db._con._begin_called_with = None
def begin(a, b=None, c=7):
db._con._begin_called_with = (a, b, c)
db._con.begin = begin
db.begin(42, 6)
cursor = db.cursor()
cursor.execute('select test13')
self.assertEqual(cursor.fetchone(), 'test13')
self.assertEqual(db._con._begin_called_with, (42, 6, 7))
def test_cancel_transaction(self):
db = SteadyDBconnect(dbapi, database='ok')
cursor = db.cursor()
db.begin()
cursor.execute('select test14')
self.assertEqual(cursor.fetchone(), 'test14')
db.cancel()
cursor.execute('select test14')
self.assertEqual(cursor.fetchone(), 'test14')
def test_with_cancel_extension(self):
db = SteadyDBconnect(dbapi, database='ok')
db._con._cancel_called = None
def cancel():
db._con._cancel_called = 'yes'
db._con.cancel = cancel
db.begin()
cursor = db.cursor()
cursor.execute('select test15')
self.assertEqual(cursor.fetchone(), 'test15')
db.cancel()
self.assertEqual(db._con._cancel_called, 'yes')
def test_reset_transaction(self):
db = SteadyDBconnect(dbapi, database='ok')
db.begin()
self.assertFalse(db._con.session)
db.close()
self.assertFalse(db._con.session)
db = SteadyDBconnect(dbapi, database='ok', closeable=False)
db.begin()
self.assertFalse(db._con.session)
db.close()
self.assertEqual(db._con.session, ['rollback'])
def test_commit_error(self):
db = SteadyDBconnect(dbapi, database='ok')
db.begin()
self.assertFalse(db._con.session)
self.assertTrue(db._con.valid)
db.commit()
self.assertEqual(db._con.session, ['commit'])
self.assertTrue(db._con.valid)
db.begin()
db._con.valid = False
con = db._con
self.assertRaises(dbapi.InternalError, db.commit)
self.assertFalse(db._con.session)
self.assertTrue(db._con.valid)
self.assertIsNot(con, db._con)
db.begin()
self.assertFalse(db._con.session)
self.assertTrue(db._con.valid)
db.commit()
self.assertEqual(db._con.session, ['commit'])
self.assertTrue(db._con.valid)
def test_rollback_error(self):
db = SteadyDBconnect(dbapi, database='ok')
db.begin()
self.assertFalse(db._con.session)
self.assertTrue(db._con.valid)
db.rollback()
self.assertEqual(db._con.session, ['rollback'])
self.assertTrue(db._con.valid)
db.begin()
db._con.valid = False
con = db._con
self.assertRaises(dbapi.InternalError, db.rollback)
self.assertFalse(db._con.session)
self.assertTrue(db._con.valid)
self.assertIsNot(con, db._con)
db.begin()
self.assertFalse(db._con.session)
self.assertTrue(db._con.valid)
db.rollback()
self.assertEqual(db._con.session, ['rollback'])
self.assertTrue(db._con.valid)
if __name__ == '__main__':
unittest.main() | tests/test_steady_db.py | import unittest
from . import mock_db as dbapi
from dbutils.steady_db import (
connect as SteadyDBconnect, SteadyDBConnection, SteadyDBCursor)
class TestSteadyDB(unittest.TestCase):
def test_version(self):
from dbutils import __version__, steady_db
self.assertEqual(steady_db.__version__, __version__)
self.assertEqual(steady_db.SteadyDBConnection.version, __version__)
def test_mocked_connection(self):
db = dbapi.connect(
'SteadyDBTestDB', user='SteadyDBTestUser')
db.__class__.has_ping = False
db.__class__.num_pings = 0
self.assertTrue(hasattr(db, 'database'))
self.assertEqual(db.database, 'SteadyDBTestDB')
self.assertTrue(hasattr(db, 'user'))
self.assertEqual(db.user, 'SteadyDBTestUser')
self.assertTrue(hasattr(db, 'cursor'))
self.assertTrue(hasattr(db, 'close'))
self.assertTrue(hasattr(db, 'open_cursors'))
self.assertTrue(hasattr(db, 'num_uses'))
self.assertTrue(hasattr(db, 'num_queries'))
self.assertTrue(hasattr(db, 'session'))
self.assertTrue(hasattr(db, 'valid'))
self.assertTrue(db.valid)
self.assertEqual(db.open_cursors, 0)
for i in range(3):
cursor = db.cursor()
self.assertEqual(db.open_cursors, 1)
cursor.close()
self.assertEqual(db.open_cursors, 0)
cursor = []
for i in range(3):
cursor.append(db.cursor())
self.assertEqual(db.open_cursors, i + 1)
del cursor
self.assertEqual(db.open_cursors, 0)
cursor = db.cursor()
self.assertTrue(hasattr(cursor, 'execute'))
self.assertTrue(hasattr(cursor, 'fetchone'))
self.assertTrue(hasattr(cursor, 'callproc'))
self.assertTrue(hasattr(cursor, 'close'))
self.assertTrue(hasattr(cursor, 'valid'))
self.assertTrue(cursor.valid)
self.assertEqual(db.open_cursors, 1)
for i in range(3):
self.assertEqual(db.num_uses, i)
self.assertEqual(db.num_queries, i)
cursor.execute(f'select test{i}')
self.assertEqual(cursor.fetchone(), f'test{i}')
self.assertTrue(cursor.valid)
self.assertEqual(db.open_cursors, 1)
for i in range(4):
cursor.callproc('test')
cursor.close()
self.assertFalse(cursor.valid)
self.assertEqual(db.open_cursors, 0)
self.assertEqual(db.num_uses, 7)
self.assertEqual(db.num_queries, 3)
self.assertRaises(dbapi.InternalError, cursor.close)
self.assertRaises(dbapi.InternalError, cursor.execute, 'select test')
self.assertTrue(db.valid)
self.assertFalse(db.__class__.has_ping)
self.assertEqual(db.__class__.num_pings, 0)
self.assertRaises(AttributeError, db.ping)
self.assertEqual(db.__class__.num_pings, 1)
db.__class__.has_ping = True
self.assertIsNone(db.ping())
self.assertEqual(db.__class__.num_pings, 2)
db.close()
self.assertFalse(db.valid)
self.assertEqual(db.num_uses, 0)
self.assertEqual(db.num_queries, 0)
self.assertRaises(dbapi.InternalError, db.close)
self.assertRaises(dbapi.InternalError, db.cursor)
self.assertRaises(dbapi.OperationalError, db.ping)
self.assertEqual(db.__class__.num_pings, 3)
db.__class__.has_ping = False
db.__class__.num_pings = 0
def test_broken_connection(self):
self.assertRaises(TypeError, SteadyDBConnection, None)
self.assertRaises(TypeError, SteadyDBCursor, None)
db = SteadyDBconnect(dbapi, database='ok')
for i in range(3):
db.close()
del db
self.assertRaises(
dbapi.OperationalError, SteadyDBconnect, dbapi, database='error')
db = SteadyDBconnect(dbapi, database='ok')
cursor = db.cursor()
for i in range(3):
cursor.close()
cursor = db.cursor('ok')
for i in range(3):
cursor.close()
self.assertRaises(dbapi.OperationalError, db.cursor, 'error')
def test_close(self):
for closeable in (False, True):
db = SteadyDBconnect(dbapi, closeable=closeable)
self.assertTrue(db._con.valid)
db.close()
self.assertTrue(closeable ^ db._con.valid)
db.close()
self.assertTrue(closeable ^ db._con.valid)
db._close()
self.assertFalse(db._con.valid)
db._close()
self.assertFalse(db._con.valid)
def test_connection(self):
db = SteadyDBconnect(
dbapi, 0, None, None, None, True,
'SteadyDBTestDB', user='SteadyDBTestUser')
self.assertTrue(isinstance(db, SteadyDBConnection))
self.assertTrue(hasattr(db, '_con'))
self.assertTrue(hasattr(db, '_usage'))
self.assertEqual(db._usage, 0)
self.assertTrue(hasattr(db._con, 'valid'))
self.assertTrue(db._con.valid)
self.assertTrue(hasattr(db._con, 'cursor'))
self.assertTrue(hasattr(db._con, 'close'))
self.assertTrue(hasattr(db._con, 'open_cursors'))
self.assertTrue(hasattr(db._con, 'num_uses'))
self.assertTrue(hasattr(db._con, 'num_queries'))
self.assertTrue(hasattr(db._con, 'session'))
self.assertTrue(hasattr(db._con, 'database'))
self.assertEqual(db._con.database, 'SteadyDBTestDB')
self.assertTrue(hasattr(db._con, 'user'))
self.assertEqual(db._con.user, 'SteadyDBTestUser')
self.assertTrue(hasattr(db, 'cursor'))
self.assertTrue(hasattr(db, 'close'))
self.assertEqual(db._con.open_cursors, 0)
for i in range(3):
cursor = db.cursor()
self.assertEqual(db._con.open_cursors, 1)
cursor.close()
self.assertEqual(db._con.open_cursors, 0)
cursor = []
for i in range(3):
cursor.append(db.cursor())
self.assertEqual(db._con.open_cursors, i + 1)
del cursor
self.assertEqual(db._con.open_cursors, 0)
cursor = db.cursor()
self.assertTrue(hasattr(cursor, 'execute'))
self.assertTrue(hasattr(cursor, 'fetchone'))
self.assertTrue(hasattr(cursor, 'callproc'))
self.assertTrue(hasattr(cursor, 'close'))
self.assertTrue(hasattr(cursor, 'valid'))
self.assertTrue(cursor.valid)
self.assertEqual(db._con.open_cursors, 1)
for i in range(3):
self.assertEqual(db._usage, i)
self.assertEqual(db._con.num_uses, i)
self.assertEqual(db._con.num_queries, i)
cursor.execute(f'select test{i}')
self.assertEqual(cursor.fetchone(), f'test{i}')
self.assertTrue(cursor.valid)
self.assertEqual(db._con.open_cursors, 1)
for i in range(4):
cursor.callproc('test')
cursor.close()
self.assertFalse(cursor.valid)
self.assertEqual(db._con.open_cursors, 0)
self.assertEqual(db._usage, 7)
self.assertEqual(db._con.num_uses, 7)
self.assertEqual(db._con.num_queries, 3)
cursor.close()
cursor.execute('select test8')
self.assertTrue(cursor.valid)
self.assertEqual(db._con.open_cursors, 1)
self.assertEqual(cursor.fetchone(), 'test8')
self.assertEqual(db._usage, 8)
self.assertEqual(db._con.num_uses, 8)
self.assertEqual(db._con.num_queries, 4)
self.assertTrue(db._con.valid)
db.close()
self.assertFalse(db._con.valid)
self.assertEqual(db._con.open_cursors, 0)
self.assertEqual(db._usage, 8)
self.assertEqual(db._con.num_uses, 0)
self.assertEqual(db._con.num_queries, 0)
self.assertRaises(dbapi.InternalError, db._con.close)
db.close()
self.assertRaises(dbapi.InternalError, db._con.cursor)
cursor = db.cursor()
self.assertTrue(db._con.valid)
cursor.execute('select test11')
self.assertEqual(cursor.fetchone(), 'test11')
cursor.execute('select test12')
self.assertEqual(cursor.fetchone(), 'test12')
cursor.callproc('test')
self.assertEqual(db._usage, 3)
self.assertEqual(db._con.num_uses, 3)
self.assertEqual(db._con.num_queries, 2)
cursor2 = db.cursor()
self.assertEqual(db._con.open_cursors, 2)
cursor2.execute('select test13')
self.assertEqual(cursor2.fetchone(), 'test13')
self.assertEqual(db._con.num_queries, 3)
db.close()
self.assertEqual(db._con.open_cursors, 0)
self.assertEqual(db._con.num_queries, 0)
cursor = db.cursor()
self.assertTrue(cursor.valid)
cursor.callproc('test')
cursor._cursor.valid = False
self.assertFalse(cursor.valid)
self.assertRaises(dbapi.InternalError, cursor._cursor.callproc, 'test')
cursor.callproc('test')
self.assertTrue(cursor.valid)
cursor._cursor.callproc('test')
self.assertEqual(db._usage, 2)
self.assertEqual(db._con.num_uses, 3)
db._con.valid = cursor._cursor.valid = False
cursor.callproc('test')
self.assertTrue(cursor.valid)
self.assertEqual(db._usage, 1)
self.assertEqual(db._con.num_uses, 1)
cursor.execute('set this')
db.commit()
cursor.execute('set that')
db.rollback()
self.assertEqual(
db._con.session, ['this', 'commit', 'that', 'rollback'])
def test_connection_context_handler(self):
db = SteadyDBconnect(
dbapi, 0, None, None, None, True,
'SteadyDBTestDB', user='SteadyDBTestUser')
self.assertEqual(db._con.session, [])
with db as con:
con.cursor().execute('select test')
self.assertEqual(db._con.session, ['commit'])
try:
with db as con:
con.cursor().execute('error')
except dbapi.ProgrammingError:
error = True
else:
error = False
self.assertTrue(error)
self.assertEqual(db._con.session, ['commit', 'rollback'])
def test_cursor_context_handler(self):
db = SteadyDBconnect(
dbapi, 0, None, None, None, True,
'SteadyDBTestDB', user='SteadyDBTestUser')
self.assertEqual(db._con.open_cursors, 0)
with db.cursor() as cursor:
self.assertEqual(db._con.open_cursors, 1)
cursor.execute('select test')
self.assertEqual(cursor.fetchone(), 'test')
self.assertEqual(db._con.open_cursors, 0)
def test_cursor_as_iterator_provided(self):
db = SteadyDBconnect(
dbapi, 0, None, None, None, True,
'SteadyDBTestDB', user='SteadyDBTestUser')
self.assertEqual(db._con.open_cursors, 0)
cursor = db.cursor()
self.assertEqual(db._con.open_cursors, 1)
cursor.execute('select test')
_cursor = cursor._cursor
try:
assert not hasattr(_cursor, 'iter')
_cursor.__iter__ = lambda: ['test-iter']
assert list(iter(cursor)) == ['test']
finally:
del _cursor.__iter__
cursor.close()
self.assertEqual(db._con.open_cursors, 0)
def test_cursor_as_iterator_created(self):
db = SteadyDBconnect(
dbapi, 0, None, None, None, True,
'SteadyDBTestDB', user='SteadyDBTestUser')
self.assertEqual(db._con.open_cursors, 0)
cursor = db.cursor()
self.assertEqual(db._con.open_cursors, 1)
cursor.execute('select test')
assert list(iter(cursor)) == ['test']
cursor.close()
self.assertEqual(db._con.open_cursors, 0)
def test_connection_creator_function(self):
db1 = SteadyDBconnect(
dbapi, 0, None, None, None, True,
'SteadyDBTestDB', user='SteadyDBTestUser')
db2 = SteadyDBconnect(
dbapi.connect, 0, None, None, None, True,
'SteadyDBTestDB', user='SteadyDBTestUser')
self.assertEqual(db1.dbapi(), db2.dbapi())
self.assertEqual(db1.threadsafety(), db2.threadsafety())
self.assertEqual(db1._creator, db2._creator)
self.assertEqual(db1._args, db2._args)
self.assertEqual(db1._kwargs, db2._kwargs)
db2.close()
db1.close()
def test_connection_maxusage(self):
db = SteadyDBconnect(dbapi, 10)
cursor = db.cursor()
for i in range(100):
cursor.execute(f'select test{i}')
r = cursor.fetchone()
self.assertEqual(r, f'test{i}')
self.assertTrue(db._con.valid)
j = i % 10 + 1
self.assertEqual(db._usage, j)
self.assertEqual(db._con.num_uses, j)
self.assertEqual(db._con.num_queries, j)
self.assertEqual(db._con.open_cursors, 1)
db.begin()
for i in range(100):
cursor.callproc('test')
self.assertTrue(db._con.valid)
if i == 49:
db.commit()
j = i % 10 + 1 if i > 49 else i + 11
self.assertEqual(db._usage, j)
self.assertEqual(db._con.num_uses, j)
j = 0 if i > 49 else 10
self.assertEqual(db._con.num_queries, j)
for i in range(10):
if i == 7:
db._con.valid = cursor._cursor.valid = False
cursor.execute(f'select test{i}')
r = cursor.fetchone()
self.assertEqual(r, f'test{i}')
j = i % 7 + 1
self.assertEqual(db._usage, j)
self.assertEqual(db._con.num_uses, j)
self.assertEqual(db._con.num_queries, j)
for i in range(10):
if i == 5:
db._con.valid = cursor._cursor.valid = False
cursor.callproc('test')
j = (i + (3 if i < 5 else -5)) % 10 + 1
self.assertEqual(db._usage, j)
self.assertEqual(db._con.num_uses, j)
j = 3 if i < 5 else 0
self.assertEqual(db._con.num_queries, j)
db.close()
cursor.execute('select test1')
self.assertEqual(cursor.fetchone(), 'test1')
self.assertEqual(db._usage, 1)
self.assertEqual(db._con.num_uses, 1)
self.assertEqual(db._con.num_queries, 1)
def test_connection_setsession(self):
db = SteadyDBconnect(dbapi, 3, ('set time zone', 'set datestyle'))
self.assertTrue(hasattr(db, '_usage'))
self.assertEqual(db._usage, 0)
self.assertTrue(hasattr(db._con, 'open_cursors'))
self.assertEqual(db._con.open_cursors, 0)
self.assertTrue(hasattr(db._con, 'num_uses'))
self.assertEqual(db._con.num_uses, 2)
self.assertTrue(hasattr(db._con, 'num_queries'))
self.assertEqual(db._con.num_queries, 0)
self.assertTrue(hasattr(db._con, 'session'))
self.assertEqual(tuple(db._con.session), ('time zone', 'datestyle'))
for i in range(11):
db.cursor().execute('select test')
self.assertEqual(db._con.open_cursors, 0)
self.assertEqual(db._usage, 2)
self.assertEqual(db._con.num_uses, 4)
self.assertEqual(db._con.num_queries, 2)
self.assertEqual(db._con.session, ['time zone', 'datestyle'])
db.cursor().execute('set test')
self.assertEqual(db._con.open_cursors, 0)
self.assertEqual(db._usage, 3)
self.assertEqual(db._con.num_uses, 5)
self.assertEqual(db._con.num_queries, 2)
self.assertEqual(db._con.session, ['time zone', 'datestyle', 'test'])
db.cursor().execute('select test')
self.assertEqual(db._con.open_cursors, 0)
self.assertEqual(db._usage, 1)
self.assertEqual(db._con.num_uses, 3)
self.assertEqual(db._con.num_queries, 1)
self.assertEqual(db._con.session, ['time zone', 'datestyle'])
db.cursor().execute('set test')
self.assertEqual(db._con.open_cursors, 0)
self.assertEqual(db._usage, 2)
self.assertEqual(db._con.num_uses, 4)
self.assertEqual(db._con.num_queries, 1)
self.assertEqual(db._con.session, ['time zone', 'datestyle', 'test'])
db.cursor().execute('select test')
self.assertEqual(db._con.open_cursors, 0)
self.assertEqual(db._usage, 3)
self.assertEqual(db._con.num_uses, 5)
self.assertEqual(db._con.num_queries, 2)
self.assertEqual(db._con.session, ['time zone', 'datestyle', 'test'])
db.close()
db.cursor().execute('set test')
self.assertEqual(db._con.open_cursors, 0)
self.assertEqual(db._usage, 1)
self.assertEqual(db._con.num_uses, 3)
self.assertEqual(db._con.num_queries, 0)
self.assertEqual(db._con.session, ['time zone', 'datestyle', 'test'])
db.close()
db.cursor().execute('select test')
self.assertEqual(db._con.open_cursors, 0)
self.assertEqual(db._usage, 1)
self.assertEqual(db._con.num_uses, 3)
self.assertEqual(db._con.num_queries, 1)
self.assertEqual(db._con.session, ['time zone', 'datestyle'])
def test_connection_failures(self):
db = SteadyDBconnect(dbapi)
db.close()
db.cursor()
db = SteadyDBconnect(dbapi, failures=dbapi.InternalError)
db.close()
db.cursor()
db = SteadyDBconnect(dbapi, failures=dbapi.OperationalError)
db.close()
self.assertRaises(dbapi.InternalError, db.cursor)
db = SteadyDBconnect(dbapi, failures=(
dbapi.OperationalError, dbapi.InterfaceError))
db.close()
self.assertRaises(dbapi.InternalError, db.cursor)
db = SteadyDBconnect(dbapi, failures=(
dbapi.OperationalError, dbapi.InterfaceError, dbapi.InternalError))
db.close()
db.cursor()
def test_connection_failure_error(self):
db = SteadyDBconnect(dbapi)
cursor = db.cursor()
db.close()
cursor.execute('select test')
cursor = db.cursor()
db.close()
self.assertRaises(dbapi.ProgrammingError, cursor.execute, 'error')
def test_connection_set_sizes(self):
db = SteadyDBconnect(dbapi)
cursor = db.cursor()
cursor.execute('get sizes')
result = cursor.fetchone()
self.assertEqual(result, ([], {}))
cursor.setinputsizes([7, 42, 6])
cursor.setoutputsize(9)
cursor.setoutputsize(15, 3)
cursor.setoutputsize(42, 7)
cursor.execute('get sizes')
result = cursor.fetchone()
self.assertEqual(result, ([7, 42, 6], {None: 9, 3: 15, 7: 42}))
cursor.execute('get sizes')
result = cursor.fetchone()
self.assertEqual(result, ([], {}))
cursor.setinputsizes([6, 42, 7])
cursor.setoutputsize(7)
cursor.setoutputsize(15, 3)
cursor.setoutputsize(42, 9)
db.close()
cursor.execute('get sizes')
result = cursor.fetchone()
self.assertEqual(result, ([6, 42, 7], {None: 7, 3: 15, 9: 42}))
def test_connection_ping_check(self):
Connection = dbapi.Connection
Connection.has_ping = False
Connection.num_pings = 0
db = SteadyDBconnect(dbapi)
db.cursor().execute('select test')
self.assertEqual(Connection.num_pings, 0)
db.close()
db.cursor().execute('select test')
self.assertEqual(Connection.num_pings, 0)
self.assertIsNone(db._ping_check())
self.assertEqual(Connection.num_pings, 1)
db = SteadyDBconnect(dbapi, ping=7)
db.cursor().execute('select test')
self.assertEqual(Connection.num_pings, 2)
db.close()
db.cursor().execute('select test')
self.assertEqual(Connection.num_pings, 2)
self.assertIsNone(db._ping_check())
self.assertEqual(Connection.num_pings, 2)
Connection.has_ping = True
db = SteadyDBconnect(dbapi)
db.cursor().execute('select test')
self.assertEqual(Connection.num_pings, 2)
db.close()
db.cursor().execute('select test')
self.assertEqual(Connection.num_pings, 2)
self.assertTrue(db._ping_check())
self.assertEqual(Connection.num_pings, 3)
db = SteadyDBconnect(dbapi, ping=1)
db.cursor().execute('select test')
self.assertEqual(Connection.num_pings, 3)
db.close()
db.cursor().execute('select test')
self.assertEqual(Connection.num_pings, 3)
self.assertTrue(db._ping_check())
self.assertEqual(Connection.num_pings, 4)
db.close()
self.assertTrue(db._ping_check())
self.assertEqual(Connection.num_pings, 5)
db = SteadyDBconnect(dbapi, ping=7)
db.cursor().execute('select test')
self.assertEqual(Connection.num_pings, 7)
db.close()
db.cursor().execute('select test')
self.assertEqual(Connection.num_pings, 9)
db = SteadyDBconnect(dbapi, ping=3)
self.assertEqual(Connection.num_pings, 9)
db.cursor()
self.assertEqual(Connection.num_pings, 10)
db.close()
cursor = db.cursor()
self.assertEqual(Connection.num_pings, 11)
cursor.execute('select test')
self.assertEqual(Connection.num_pings, 11)
db = SteadyDBconnect(dbapi, ping=5)
self.assertEqual(Connection.num_pings, 11)
db.cursor()
self.assertEqual(Connection.num_pings, 11)
db.close()
cursor = db.cursor()
self.assertEqual(Connection.num_pings, 11)
cursor.execute('select test')
self.assertEqual(Connection.num_pings, 12)
db.close()
cursor = db.cursor()
self.assertEqual(Connection.num_pings, 12)
cursor.execute('select test')
self.assertEqual(Connection.num_pings, 13)
db = SteadyDBconnect(dbapi, ping=7)
self.assertEqual(Connection.num_pings, 13)
db.cursor()
self.assertEqual(Connection.num_pings, 14)
db.close()
cursor = db.cursor()
self.assertEqual(Connection.num_pings, 15)
cursor.execute('select test')
self.assertEqual(Connection.num_pings, 16)
db.close()
cursor = db.cursor()
self.assertEqual(Connection.num_pings, 17)
cursor.execute('select test')
self.assertEqual(Connection.num_pings, 18)
db.close()
cursor.execute('select test')
self.assertEqual(Connection.num_pings, 20)
Connection.has_ping = False
Connection.num_pings = 0
def test_begin_transaction(self):
db = SteadyDBconnect(dbapi, database='ok')
cursor = db.cursor()
cursor.close()
cursor.execute('select test12')
self.assertEqual(cursor.fetchone(), 'test12')
db.begin()
cursor = db.cursor()
cursor.close()
self.assertRaises(dbapi.InternalError, cursor.execute, 'select test12')
cursor.execute('select test12')
self.assertEqual(cursor.fetchone(), 'test12')
db.close()
db.begin()
self.assertRaises(dbapi.InternalError, cursor.execute, 'select test12')
cursor.execute('select test12')
self.assertEqual(cursor.fetchone(), 'test12')
db.begin()
self.assertRaises(dbapi.ProgrammingError, cursor.execute, 'error')
cursor.close()
cursor.execute('select test12')
self.assertEqual(cursor.fetchone(), 'test12')
def test_with_begin_extension(self):
db = SteadyDBconnect(dbapi, database='ok')
db._con._begin_called_with = None
def begin(a, b=None, c=7):
db._con._begin_called_with = (a, b, c)
db._con.begin = begin
db.begin(42, 6)
cursor = db.cursor()
cursor.execute('select test13')
self.assertEqual(cursor.fetchone(), 'test13')
self.assertEqual(db._con._begin_called_with, (42, 6, 7))
def test_cancel_transaction(self):
db = SteadyDBconnect(dbapi, database='ok')
cursor = db.cursor()
db.begin()
cursor.execute('select test14')
self.assertEqual(cursor.fetchone(), 'test14')
db.cancel()
cursor.execute('select test14')
self.assertEqual(cursor.fetchone(), 'test14')
def test_with_cancel_extension(self):
db = SteadyDBconnect(dbapi, database='ok')
db._con._cancel_called = None
def cancel():
db._con._cancel_called = 'yes'
db._con.cancel = cancel
db.begin()
cursor = db.cursor()
cursor.execute('select test15')
self.assertEqual(cursor.fetchone(), 'test15')
db.cancel()
self.assertEqual(db._con._cancel_called, 'yes')
def test_reset_transaction(self):
db = SteadyDBconnect(dbapi, database='ok')
db.begin()
self.assertFalse(db._con.session)
db.close()
self.assertFalse(db._con.session)
db = SteadyDBconnect(dbapi, database='ok', closeable=False)
db.begin()
self.assertFalse(db._con.session)
db.close()
self.assertEqual(db._con.session, ['rollback'])
def test_commit_error(self):
db = SteadyDBconnect(dbapi, database='ok')
db.begin()
self.assertFalse(db._con.session)
self.assertTrue(db._con.valid)
db.commit()
self.assertEqual(db._con.session, ['commit'])
self.assertTrue(db._con.valid)
db.begin()
db._con.valid = False
con = db._con
self.assertRaises(dbapi.InternalError, db.commit)
self.assertFalse(db._con.session)
self.assertTrue(db._con.valid)
self.assertIsNot(con, db._con)
db.begin()
self.assertFalse(db._con.session)
self.assertTrue(db._con.valid)
db.commit()
self.assertEqual(db._con.session, ['commit'])
self.assertTrue(db._con.valid)
def test_rollback_error(self):
db = SteadyDBconnect(dbapi, database='ok')
db.begin()
self.assertFalse(db._con.session)
self.assertTrue(db._con.valid)
db.rollback()
self.assertEqual(db._con.session, ['rollback'])
self.assertTrue(db._con.valid)
db.begin()
db._con.valid = False
con = db._con
self.assertRaises(dbapi.InternalError, db.rollback)
self.assertFalse(db._con.session)
self.assertTrue(db._con.valid)
self.assertIsNot(con, db._con)
db.begin()
self.assertFalse(db._con.session)
self.assertTrue(db._con.valid)
db.rollback()
self.assertEqual(db._con.session, ['rollback'])
self.assertTrue(db._con.valid)
if __name__ == '__main__':
unittest.main() | 0.512937 | 0.359055 |
# ----------------------------------------------------------------------
# Imports
# ----------------------------------------------------------------------
import SUAVE
from SUAVE.Core import Units, Data
import numpy as np
from SUAVE.Components.Energy.Networks.Ramjet import Ramjet
from SUAVE.Methods.Propulsion.ramjet_sizing import ramjet_sizing
# ----------------------------------------------------------------------
# Main
# ----------------------------------------------------------------------
def main():
# call the network function
energy_network()
return
# ----------------------------------------------------------------------
# Energy Network
# ----------------------------------------------------------------------
def energy_network():
# ------------------------------------------------------------------
# Evaluation Conditions
# ------------------------------------------------------------------
# --- Conditions
ones_1col = np.ones([1,1])
# setup conditions
conditions = SUAVE.Analyses.Mission.Segments.Conditions.Aerodynamics()
# freestream conditions
free = conditions.freestream
free.mach_number = ones_1col*1.5
conditions.M = free.mach_number
free.altitude = ones_1col*10000.
atmosphere = SUAVE.Analyses.Atmospheric.US_Standard_1976()
atmo_data = atmosphere.compute_values(free.altitude,0,True)
planet = SUAVE.Attributes.Planets.Earth()
working_fluid = SUAVE.Attributes.Gases.Air()
free.pressure = ones_1col*atmo_data.pressure
free.temperature = ones_1col*atmo_data.temperature
free.density = ones_1col*atmo_data.density
free.dynamic_viscosity = ones_1col*atmo_data.dynamic_viscosity
free.gravity = ones_1col*planet.compute_gravity(free.altitude)
free.isentropic_expansion_factor = working_fluid.compute_gamma(free.temperature,free.pressure)
free.Cp = working_fluid.compute_cp(free.temperature,free.pressure)
free.R = working_fluid.gas_specific_constant
free.speed_of_sound = ones_1col* atmo_data.speed_of_sound
free.velocity = conditions.M * free.speed_of_sound
conditions.velocity = conditions.M * free.speed_of_sound
conditions.q = 0.5*free.density*conditions.velocity**2
conditions.g0 = free.gravity
# propulsion conditions
conditions.propulsion.throttle = ones_1col*1.0
# ------------------------------------------------------------------
# Design/sizing conditions
# ------------------------------------------------------------------
# --- Conditions
ones_1col = np.ones([1,1])
# setup conditions
conditions_sizing = SUAVE.Analyses.Mission.Segments.Conditions.Aerodynamics()
# freestream conditions
size = conditions_sizing.freestream
size.mach_number = ones_1col*2.5
conditions_sizing.M = size.mach_number
size.altitude = ones_1col*10000.
atmosphere = SUAVE.Analyses.Atmospheric.US_Standard_1976()
atmo_data = atmosphere.compute_values(size.altitude,0,True)
working_fluid = SUAVE.Attributes.Gases.Air()
size.pressure = ones_1col*atmo_data.pressure
size.temperature = ones_1col*atmo_data.temperature
size.density = ones_1col*atmo_data.density
size.dynamic_viscosity = ones_1col*atmo_data.dynamic_viscosity
size.gravity = ones_1col*planet.compute_gravity(size.altitude)
size.isentropic_expansion_factor = working_fluid.compute_gamma(size.temperature,size.pressure)
size.Cp = working_fluid.compute_cp(size.temperature,size.pressure)
size.R = working_fluid.gas_specific_constant
size.speed_of_sound = ones_1col * atmo_data.speed_of_sound
size.velocity = conditions_sizing.M * size.speed_of_sound
conditions_sizing.velocity = conditions_sizing.M * size.speed_of_sound
conditions_sizing.q = 0.5*size.density*conditions_sizing.velocity**2
conditions_sizing.g0 = size.gravity
# propulsion conditions
conditions_sizing.propulsion.throttle = ones_1col*1.0
state_sizing = Data()
state_sizing.numerics = Data()
state_sizing.conditions = conditions_sizing
state_off_design=Data()
state_off_design.numerics=Data()
state_off_design.conditions=conditions
# ------------------------------------------------------------------
# Ramjet Network
# ------------------------------------------------------------------
# instantiate the ramjet network
ramjet = SUAVE.Components.Energy.Networks.Ramjet()
ramjet.tag = 'ramjet'
# setup
ramjet.number_of_engines = 2.0
ramjet.inlet_diameter = 1.1 * Units.meter
# working fluid
ramjet.working_fluid = SUAVE.Attributes.Gases.Air()
# ------------------------------------------------------------------
# Component 1 - Ram
# to convert freestream static to stagnation quantities
# instantiate
ram = SUAVE.Components.Energy.Converters.Ram()
ram.tag = 'ram'
# add to the network
ramjet.append(ram)
# ------------------------------------------------------------------
# Component 2 - Inlet Nozzle
# instantiate
inlet_nozzle = SUAVE.Components.Energy.Converters.Compression_Nozzle()
inlet_nozzle.tag = 'inlet_nozzle'
# setup
inlet_nozzle.polytropic_efficiency = 1.0
inlet_nozzle.pressure_ratio = 1.0
inlet_nozzle.compressibility_effects = True
# add to network
ramjet.append(inlet_nozzle)
# ------------------------------------------------------------------
# Component 3 - Combustor
# instantiate
combustor = SUAVE.Components.Energy.Converters.Combustor()
combustor.tag = 'combustor'
# setup
combustor.efficiency = 1.0
combustor.turbine_inlet_temperature = 2400.
combustor.pressure_ratio = 1.0
combustor.area_ratio = 2.0
combustor.fuel_data = SUAVE.Attributes.Propellants.Jet_A()
combustor.rayleigh_analyses = True
# add to network
ramjet.append(combustor)
# ------------------------------------------------------------------
# Component 4 - Core Nozzle
# instantiate
nozzle = SUAVE.Components.Energy.Converters.Supersonic_Nozzle()
nozzle.tag = 'core_nozzle'
# setup
nozzle.polytropic_efficiency = 1.0
nozzle.pressure_ratio = 1.0
# add to network
ramjet.append(nozzle)
# ------------------------------------------------------------------
# Component 5 - Thrust
# instantiate
thrust = SUAVE.Components.Energy.Processes.Thrust()
thrust.tag ='thrust'
# setup
thrust.total_design = ramjet.number_of_engines*169370.4652 * Units.N
# add to network
ramjet.thrust = thrust
#size the ramjet
ramjet_sizing(ramjet,2.5,10000.0)
print("Design thrust :",ramjet.design_thrust)
print("Sealevel static thrust :",ramjet.sealevel_static_thrust)
results_design = ramjet(state_sizing)
results_off_design = ramjet(state_off_design)
F = results_design.thrust_force_vector
mdot = results_design.vehicle_mass_rate
Isp = results_design.specific_impulse
F_off_design = results_off_design.thrust_force_vector
mdot_off_design = results_off_design.vehicle_mass_rate
Isp_off_design = results_off_design.specific_impulse
#Specify the expected values
expected = Data()
expected.thrust = 338740.9304
expected.mdot = 23.11172969
expected.Isp = 1499.25957118
#error data function
error = Data()
error.thrust_error = (F[0][0] - expected.thrust)/expected.thrust
error.mdot_error = (mdot[0][0] - expected.mdot)/expected.mdot
error.Isp_error = (Isp[0][0]- expected.Isp)/expected.Isp
print(error)
for k,v in list(error.items()):
assert(np.abs(v)<1e-6)
return
if __name__ == '__main__':
main() | SUAVE/SUAVE-2.5.0/regression/scripts/ramjet_network/ramjet_network.py | # ----------------------------------------------------------------------
# Imports
# ----------------------------------------------------------------------
import SUAVE
from SUAVE.Core import Units, Data
import numpy as np
from SUAVE.Components.Energy.Networks.Ramjet import Ramjet
from SUAVE.Methods.Propulsion.ramjet_sizing import ramjet_sizing
# ----------------------------------------------------------------------
# Main
# ----------------------------------------------------------------------
def main():
# call the network function
energy_network()
return
# ----------------------------------------------------------------------
# Energy Network
# ----------------------------------------------------------------------
def energy_network():
# ------------------------------------------------------------------
# Evaluation Conditions
# ------------------------------------------------------------------
# --- Conditions
ones_1col = np.ones([1,1])
# setup conditions
conditions = SUAVE.Analyses.Mission.Segments.Conditions.Aerodynamics()
# freestream conditions
free = conditions.freestream
free.mach_number = ones_1col*1.5
conditions.M = free.mach_number
free.altitude = ones_1col*10000.
atmosphere = SUAVE.Analyses.Atmospheric.US_Standard_1976()
atmo_data = atmosphere.compute_values(free.altitude,0,True)
planet = SUAVE.Attributes.Planets.Earth()
working_fluid = SUAVE.Attributes.Gases.Air()
free.pressure = ones_1col*atmo_data.pressure
free.temperature = ones_1col*atmo_data.temperature
free.density = ones_1col*atmo_data.density
free.dynamic_viscosity = ones_1col*atmo_data.dynamic_viscosity
free.gravity = ones_1col*planet.compute_gravity(free.altitude)
free.isentropic_expansion_factor = working_fluid.compute_gamma(free.temperature,free.pressure)
free.Cp = working_fluid.compute_cp(free.temperature,free.pressure)
free.R = working_fluid.gas_specific_constant
free.speed_of_sound = ones_1col* atmo_data.speed_of_sound
free.velocity = conditions.M * free.speed_of_sound
conditions.velocity = conditions.M * free.speed_of_sound
conditions.q = 0.5*free.density*conditions.velocity**2
conditions.g0 = free.gravity
# propulsion conditions
conditions.propulsion.throttle = ones_1col*1.0
# ------------------------------------------------------------------
# Design/sizing conditions
# ------------------------------------------------------------------
# --- Conditions
ones_1col = np.ones([1,1])
# setup conditions
conditions_sizing = SUAVE.Analyses.Mission.Segments.Conditions.Aerodynamics()
# freestream conditions
size = conditions_sizing.freestream
size.mach_number = ones_1col*2.5
conditions_sizing.M = size.mach_number
size.altitude = ones_1col*10000.
atmosphere = SUAVE.Analyses.Atmospheric.US_Standard_1976()
atmo_data = atmosphere.compute_values(size.altitude,0,True)
working_fluid = SUAVE.Attributes.Gases.Air()
size.pressure = ones_1col*atmo_data.pressure
size.temperature = ones_1col*atmo_data.temperature
size.density = ones_1col*atmo_data.density
size.dynamic_viscosity = ones_1col*atmo_data.dynamic_viscosity
size.gravity = ones_1col*planet.compute_gravity(size.altitude)
size.isentropic_expansion_factor = working_fluid.compute_gamma(size.temperature,size.pressure)
size.Cp = working_fluid.compute_cp(size.temperature,size.pressure)
size.R = working_fluid.gas_specific_constant
size.speed_of_sound = ones_1col * atmo_data.speed_of_sound
size.velocity = conditions_sizing.M * size.speed_of_sound
conditions_sizing.velocity = conditions_sizing.M * size.speed_of_sound
conditions_sizing.q = 0.5*size.density*conditions_sizing.velocity**2
conditions_sizing.g0 = size.gravity
# propulsion conditions
conditions_sizing.propulsion.throttle = ones_1col*1.0
state_sizing = Data()
state_sizing.numerics = Data()
state_sizing.conditions = conditions_sizing
state_off_design=Data()
state_off_design.numerics=Data()
state_off_design.conditions=conditions
# ------------------------------------------------------------------
# Ramjet Network
# ------------------------------------------------------------------
# instantiate the ramjet network
ramjet = SUAVE.Components.Energy.Networks.Ramjet()
ramjet.tag = 'ramjet'
# setup
ramjet.number_of_engines = 2.0
ramjet.inlet_diameter = 1.1 * Units.meter
# working fluid
ramjet.working_fluid = SUAVE.Attributes.Gases.Air()
# ------------------------------------------------------------------
# Component 1 - Ram
# to convert freestream static to stagnation quantities
# instantiate
ram = SUAVE.Components.Energy.Converters.Ram()
ram.tag = 'ram'
# add to the network
ramjet.append(ram)
# ------------------------------------------------------------------
# Component 2 - Inlet Nozzle
# instantiate
inlet_nozzle = SUAVE.Components.Energy.Converters.Compression_Nozzle()
inlet_nozzle.tag = 'inlet_nozzle'
# setup
inlet_nozzle.polytropic_efficiency = 1.0
inlet_nozzle.pressure_ratio = 1.0
inlet_nozzle.compressibility_effects = True
# add to network
ramjet.append(inlet_nozzle)
# ------------------------------------------------------------------
# Component 3 - Combustor
# instantiate
combustor = SUAVE.Components.Energy.Converters.Combustor()
combustor.tag = 'combustor'
# setup
combustor.efficiency = 1.0
combustor.turbine_inlet_temperature = 2400.
combustor.pressure_ratio = 1.0
combustor.area_ratio = 2.0
combustor.fuel_data = SUAVE.Attributes.Propellants.Jet_A()
combustor.rayleigh_analyses = True
# add to network
ramjet.append(combustor)
# ------------------------------------------------------------------
# Component 4 - Core Nozzle
# instantiate
nozzle = SUAVE.Components.Energy.Converters.Supersonic_Nozzle()
nozzle.tag = 'core_nozzle'
# setup
nozzle.polytropic_efficiency = 1.0
nozzle.pressure_ratio = 1.0
# add to network
ramjet.append(nozzle)
# ------------------------------------------------------------------
# Component 5 - Thrust
# instantiate
thrust = SUAVE.Components.Energy.Processes.Thrust()
thrust.tag ='thrust'
# setup
thrust.total_design = ramjet.number_of_engines*169370.4652 * Units.N
# add to network
ramjet.thrust = thrust
#size the ramjet
ramjet_sizing(ramjet,2.5,10000.0)
print("Design thrust :",ramjet.design_thrust)
print("Sealevel static thrust :",ramjet.sealevel_static_thrust)
results_design = ramjet(state_sizing)
results_off_design = ramjet(state_off_design)
F = results_design.thrust_force_vector
mdot = results_design.vehicle_mass_rate
Isp = results_design.specific_impulse
F_off_design = results_off_design.thrust_force_vector
mdot_off_design = results_off_design.vehicle_mass_rate
Isp_off_design = results_off_design.specific_impulse
#Specify the expected values
expected = Data()
expected.thrust = 338740.9304
expected.mdot = 23.11172969
expected.Isp = 1499.25957118
#error data function
error = Data()
error.thrust_error = (F[0][0] - expected.thrust)/expected.thrust
error.mdot_error = (mdot[0][0] - expected.mdot)/expected.mdot
error.Isp_error = (Isp[0][0]- expected.Isp)/expected.Isp
print(error)
for k,v in list(error.items()):
assert(np.abs(v)<1e-6)
return
if __name__ == '__main__':
main() | 0.57332 | 0.282202 |
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import numpy as np
import random
seed_num = 5
torch.manual_seed(seed_num)
random.seed(seed_num)
class linear(nn.Module):
def __init__(self, in_chs, out_chs, opt):
super(linear, self).__init__()
self.conv1 = nn.Sequential(
nn.Conv1d(in_chs, 1, kernel_size=1, stride=1, padding=0))
def forward(self, x):
# print("block in size = %s" % (str(x.size())))
x = self.conv1(x)
# print("block out size = %s" % (str(x.size())))
return x
class UNet(nn.Module):
def __init__(self, in_channel, out_channel, opt):
super(UNet, self).__init__()
self.ec0 = self.encoder(in_channel, 32, bias=True, batchnorm=True)
self.ec1 = self.encoder(32, 64, bias=True, batchnorm=True)
self.ec2 = self.encoder(64, 64, bias=True, batchnorm=True)
self.ec3 = self.encoder(64, 128, bias=True, batchnorm=True)
self.ec4 = self.encoder(128, 128, bias=True, batchnorm=True)
self.ec5 = self.encoder(128, 256, bias=True, batchnorm=True)
self.ec6 = self.encoder(256, 256, bias=True, batchnorm=True)
self.ec7 = self.encoder(256, 512, bias=True, batchnorm=True)
self.pool0 = nn.MaxPool1d(2)
self.pool1 = nn.MaxPool1d(2)
self.pool2 = nn.MaxPool1d(2)
self.dc9 = self.decoder(512, 512, kernel_size=2, stride=2, bias=True)
self.dc8 = self.decoder(256 + 512, 256, kernel_size=3, stride=1, padding=1, bias=True)
self.dc7 = self.decoder(256, 256, kernel_size=3, stride=1, padding=1, bias=True)
self.dc6 = self.decoder(256, 256, kernel_size=2, stride=2, bias=True)
self.dc5 = self.decoder(128 + 256, 133, kernel_size=3, stride=1, padding=1, bias=True)
self.dc4 = self.decoder(133, 133, kernel_size=3, stride=1, padding=1, bias=True)
self.dc3 = self.decoder(133, 133, kernel_size=2, stride=2, bias=True)
self.dc2 = self.decoder(64 + 133, 133, kernel_size=3, stride=1, padding=1, bias=True)
self.dc1 = self.decoder(133, 133, kernel_size=3, stride=1, padding=1, bias=True)
self.dc0 = self.decoder(133, 1, kernel_size=1, stride=1, bias=True)
def encoder(self, in_channels, out_channels, kernel_size=3, stride=1, padding=1,
bias=True, batchnorm=False):
if batchnorm:
layer = nn.Sequential(
nn.Conv1d(in_channels, out_channels, kernel_size, stride=stride, padding=padding, bias=bias))
else:
layer = nn.Sequential(
nn.Conv1d(in_channels, out_channels, kernel_size, stride=stride, padding=padding, bias=bias))
return layer
def decoder(self, in_channels, out_channels, kernel_size, stride=1, padding=0 ,
output_padding=0, bias=True):
layer = nn.Sequential(
nn.ConvTranspose1d(in_channels, out_channels, kernel_size, stride=stride,
padding=padding, output_padding=output_padding, bias=bias))
return layer
def forward(self, x):
e0 = self.ec0(x)
syn0 = self.ec1(e0)
e1 = self.pool0(syn0)
e2 = self.ec2(e1)
syn1 = self.ec3(e2)
del e0, e1, e2
e3 = self.pool1(syn1)
e4 = self.ec4(e3)
syn2 = self.ec5(e4)
del e3, e4
e5 = self.pool2(syn2)
e6 = self.ec6(e5)
e7 = self.ec7(e6)
del e5, e6
# print("block e7 size = %s" % (str(e7.size())))
# print("block dc9 size = %s" % (str(self.dc9(e7).size())))
# print("block syn2 size = %s" % (str(syn2.size())))
d9 = torch.cat((self.dc9(e7), syn2), 1)
# print("block d9 size = %s" % (str(d9.size())))
del e7, syn2
d8 = self.dc8(d9)
d7 = self.dc7(d8)
# print("block d8 size = %s" % (str(d8.size())))
del d9, d8
# print("block d7 size = %s" % (str(d7.size())))
d6 = torch.cat((self.dc6(d7), syn1), 1)
del d7, syn1
d5 = self.dc5(d6)
d4 = self.dc4(d5)
# print("block d5 size = %s" % (str(d5.size())))
# print("block d4 size = %s" % (str(d4.size())))
del d6, d5
d3 = torch.cat((self.dc3(d4), syn0), 1)
del d4, syn0
# print("block d3 size = %s" % (str(d3.size())))
d2 = self.dc2(d3)
d1 = self.dc1(d2)
# print("block d2 size = %s" % (str(d2.size())))
del d3, d2
# print("block d1 size = %s" % (str(d1.size())))
d0 = self.dc0(d1)
# print("block d0 size = %s" % (str(d0.size())))
return d0
class BidirectionalLSTM(nn.Module):
def __init__(self, nIn, nHidden, nOut):
super(BidirectionalLSTM, self).__init__()
self.rnn = nn.LSTM(nIn, nHidden, bidirectional=True)
# self.embedding = nn.Linear(nHidden, nOut)
self.embedding = nn.Linear(nHidden*2, nOut)
def forward(self, input):
# input of shape (seq_len, batch, input_size)
input = input.permute(2, 0, 1)
# print("block input size = %s" % (str(input.size())))
recurrent, _ = self.rnn(input)
T, b, h = recurrent.size()
# print("block input size = %s, %s, %s" % (T, b, h))
t_rec = recurrent.view(T * b, h)
# print(input.shape, recurrent.shape, t_rec.shape)
output = self.embedding(t_rec) # [T * b, nOut]
output = output.view(T, b, -1)
# print("block output size = %s" % (str(output.size())))
output = output.permute(1, 2, 0)
return output | MICCAI2020/source code/model.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import numpy as np
import random
seed_num = 5
torch.manual_seed(seed_num)
random.seed(seed_num)
class linear(nn.Module):
def __init__(self, in_chs, out_chs, opt):
super(linear, self).__init__()
self.conv1 = nn.Sequential(
nn.Conv1d(in_chs, 1, kernel_size=1, stride=1, padding=0))
def forward(self, x):
# print("block in size = %s" % (str(x.size())))
x = self.conv1(x)
# print("block out size = %s" % (str(x.size())))
return x
class UNet(nn.Module):
def __init__(self, in_channel, out_channel, opt):
super(UNet, self).__init__()
self.ec0 = self.encoder(in_channel, 32, bias=True, batchnorm=True)
self.ec1 = self.encoder(32, 64, bias=True, batchnorm=True)
self.ec2 = self.encoder(64, 64, bias=True, batchnorm=True)
self.ec3 = self.encoder(64, 128, bias=True, batchnorm=True)
self.ec4 = self.encoder(128, 128, bias=True, batchnorm=True)
self.ec5 = self.encoder(128, 256, bias=True, batchnorm=True)
self.ec6 = self.encoder(256, 256, bias=True, batchnorm=True)
self.ec7 = self.encoder(256, 512, bias=True, batchnorm=True)
self.pool0 = nn.MaxPool1d(2)
self.pool1 = nn.MaxPool1d(2)
self.pool2 = nn.MaxPool1d(2)
self.dc9 = self.decoder(512, 512, kernel_size=2, stride=2, bias=True)
self.dc8 = self.decoder(256 + 512, 256, kernel_size=3, stride=1, padding=1, bias=True)
self.dc7 = self.decoder(256, 256, kernel_size=3, stride=1, padding=1, bias=True)
self.dc6 = self.decoder(256, 256, kernel_size=2, stride=2, bias=True)
self.dc5 = self.decoder(128 + 256, 133, kernel_size=3, stride=1, padding=1, bias=True)
self.dc4 = self.decoder(133, 133, kernel_size=3, stride=1, padding=1, bias=True)
self.dc3 = self.decoder(133, 133, kernel_size=2, stride=2, bias=True)
self.dc2 = self.decoder(64 + 133, 133, kernel_size=3, stride=1, padding=1, bias=True)
self.dc1 = self.decoder(133, 133, kernel_size=3, stride=1, padding=1, bias=True)
self.dc0 = self.decoder(133, 1, kernel_size=1, stride=1, bias=True)
def encoder(self, in_channels, out_channels, kernel_size=3, stride=1, padding=1,
bias=True, batchnorm=False):
if batchnorm:
layer = nn.Sequential(
nn.Conv1d(in_channels, out_channels, kernel_size, stride=stride, padding=padding, bias=bias))
else:
layer = nn.Sequential(
nn.Conv1d(in_channels, out_channels, kernel_size, stride=stride, padding=padding, bias=bias))
return layer
def decoder(self, in_channels, out_channels, kernel_size, stride=1, padding=0 ,
output_padding=0, bias=True):
layer = nn.Sequential(
nn.ConvTranspose1d(in_channels, out_channels, kernel_size, stride=stride,
padding=padding, output_padding=output_padding, bias=bias))
return layer
def forward(self, x):
e0 = self.ec0(x)
syn0 = self.ec1(e0)
e1 = self.pool0(syn0)
e2 = self.ec2(e1)
syn1 = self.ec3(e2)
del e0, e1, e2
e3 = self.pool1(syn1)
e4 = self.ec4(e3)
syn2 = self.ec5(e4)
del e3, e4
e5 = self.pool2(syn2)
e6 = self.ec6(e5)
e7 = self.ec7(e6)
del e5, e6
# print("block e7 size = %s" % (str(e7.size())))
# print("block dc9 size = %s" % (str(self.dc9(e7).size())))
# print("block syn2 size = %s" % (str(syn2.size())))
d9 = torch.cat((self.dc9(e7), syn2), 1)
# print("block d9 size = %s" % (str(d9.size())))
del e7, syn2
d8 = self.dc8(d9)
d7 = self.dc7(d8)
# print("block d8 size = %s" % (str(d8.size())))
del d9, d8
# print("block d7 size = %s" % (str(d7.size())))
d6 = torch.cat((self.dc6(d7), syn1), 1)
del d7, syn1
d5 = self.dc5(d6)
d4 = self.dc4(d5)
# print("block d5 size = %s" % (str(d5.size())))
# print("block d4 size = %s" % (str(d4.size())))
del d6, d5
d3 = torch.cat((self.dc3(d4), syn0), 1)
del d4, syn0
# print("block d3 size = %s" % (str(d3.size())))
d2 = self.dc2(d3)
d1 = self.dc1(d2)
# print("block d2 size = %s" % (str(d2.size())))
del d3, d2
# print("block d1 size = %s" % (str(d1.size())))
d0 = self.dc0(d1)
# print("block d0 size = %s" % (str(d0.size())))
return d0
class BidirectionalLSTM(nn.Module):
def __init__(self, nIn, nHidden, nOut):
super(BidirectionalLSTM, self).__init__()
self.rnn = nn.LSTM(nIn, nHidden, bidirectional=True)
# self.embedding = nn.Linear(nHidden, nOut)
self.embedding = nn.Linear(nHidden*2, nOut)
def forward(self, input):
# input of shape (seq_len, batch, input_size)
input = input.permute(2, 0, 1)
# print("block input size = %s" % (str(input.size())))
recurrent, _ = self.rnn(input)
T, b, h = recurrent.size()
# print("block input size = %s, %s, %s" % (T, b, h))
t_rec = recurrent.view(T * b, h)
# print(input.shape, recurrent.shape, t_rec.shape)
output = self.embedding(t_rec) # [T * b, nOut]
output = output.view(T, b, -1)
# print("block output size = %s" % (str(output.size())))
output = output.permute(1, 2, 0)
return output | 0.790045 | 0.450601 |
from . import predicate
from . import path_predicate as pp
from .path_predicate_result import HasPathPredicateResult
class CardinalityResult(predicate.PredicateResult, HasPathPredicateResult):
"""Denotes a PredicateResult from a CardinalityPredicate.
In practice, this is a base class that is further refined for specific
types of events.
Attributes:
pred: The ValuePredicate genearting the result.
found: A list of JSON objects the predicate was applied to.
In practice these are the matched objects.
"""
@property
def path_predicate_result(self):
"""The result of mapping the underlying predicate over the source."""
return self.__collect_values_result
@property
def pred(self):
"""Returns the cardinality predicate used to generate this result."""
return self.cardinality_pred
@property
def path_pred(self):
"""The underlying path predicate used to collect values."""
return self.__collect_values_result.pred
@property
def filter_pred(self):
"""The filter to the underlying path predicate."""
return self.__collect_values_result.pred.pred
@property
def cardinality_pred(self):
"""The actual CardinalityPredicate used to generate this result."""
return self.__cardinality_pred
@property
def count(self):
"""The number of elements that satisfied the predicate."""
return len(self.__collect_values_result.path_values)
@property
def source(self):
"""The source value (collection) that we are mapping the predicateover."""
return self.__collect_values_result.source
def __init__(self, cardinality_pred, path_pred_result, **kwargs):
"""Constructor.
Args:
cardinality_pred: [CardinalityPredicate] The predicate we used to
generate this result.
pred_result: [CollectValuesResult]. The result of applying the
underlying PathPredicate bound to the |cardinality_pred|.
See the base class (PredicateResult) for additional kwargs.
"""
valid = kwargs.pop('valid', False)
super(CardinalityResult, self).__init__(valid=valid, **kwargs)
self.__cardinality_pred = cardinality_pred
self.__collect_values_result = path_pred_result
def __repr__(self):
return '{0} pred={1!r} result={2!r}'.format(
self.__class__.__name__,
self.__cardinality_pred, self.__collect_values_result)
def __str__(self):
return '{valid} count={count} of {min}...{max}'.format(
valid=self.valid, count=self.count,
min=self.__cardinality_pred.min, max=self.__cardinality_pred.max)
def __eq__(self, event):
return (self.__class__ == event.__class__
and self.__cardinality_pred == event.cardinality_pred
and self.__collect_values_result == event.path_predicate_result)
def export_to_json_snapshot(self, snapshot, entity):
"""Implements JsonSnapshotableEntity interface."""
builder = snapshot.edge_builder
count_relation = builder.determine_valid_relation(self)
result_relation = builder.determine_valid_relation(
self.__collect_values_result)
builder.make(entity, 'Count', self.count, relation=count_relation)
builder.make_mechanism(entity, 'Predicate', self.__cardinality_pred)
builder.make_input(entity, 'Source',
self.__collect_values_result.source, format='json')
builder.make(entity, 'Result',
self.__collect_values_result, relation=result_relation)
class ConfirmedCardinalityResult(CardinalityResult):
"""Denotes a CardinalityPredicate that was satisfied."""
def __init__(self, cardinality_pred, path_pred_result, **kwargs):
"""Constructor.
Args:
cardinality_pred: [CardinalityPredicate] The predicate we used to
generate this result.
pred_result: [CollectValuesResult]. The result of applying the
underlying PathPredicate bound to the |cardinality_pred|.
See the base class (CardinalityResult) for additional kwargs.
"""
valid = kwargs.pop('valid', True)
super(ConfirmedCardinalityResult, self).__init__(
valid=valid,
cardinality_pred=cardinality_pred, path_pred_result=path_pred_result,
**kwargs)
def __str__(self):
if not self.count:
return 'Confirmed no {pred}.'.format(pred=self.path_pred)
return 'Confirmed pred={pred} with count={count}'.format(
pred=self.cardinality_pred, count=self.count)
class FailedCardinalityResult(CardinalityResult):
"""Denotes a CardinalityPredicate that was not satisfied.
In practice, this is a base class used to detect failures.
It is further specialized for the particular reason for failure.
"""
pass
class UnexpectedValueCardinalityResult(FailedCardinalityResult):
"""Denotes a failure because a value existed where none were expected."""
def __str__(self):
return 'Found unexpected count={count} pred={pred}'.format(
count=self.count, pred=self.cardinality_pred)
class MissingValueCardinalityResult(FailedCardinalityResult):
"""Denotes a failure because a value did not exist where one was expected."""
def __init__(self, source, cardinality_pred, path_pred_result, **kwargs):
valid = kwargs.pop('valid', False)
super(MissingValueCardinalityResult, self).__init__(
valid=valid, cardinality_pred=cardinality_pred,
path_pred_result=path_pred_result)
self.__source = source
def __str__(self):
return 'Expected to find {pred}. No values found.'.format(
pred=self.cardinality_pred)
class FailedCardinalityRangeResult(FailedCardinalityResult):
"""Denotes a failure because too few or too many values were found."""
def __str__(self):
# pred is a CardinalityPredicate
return ('Found {count} {criteria}'
' but expected {min}..{max}'.format(
count=self.count, criteria=self.path_pred,
min=self.cardinality_pred.min, max=self.cardinality_pred.max))
class CardinalityPredicate(predicate.ValuePredicate,
pp.ProducesPathPredicateResult):
"""Validates a JSON object value based on how many things are found within.
We implicitly wrap the predicate in a MapPredicate so that the results
coming back have a structure that makes sense. But we dont bother passing
the MapPredicate in because it is implicit. Instead we just pass in the
predicate to be mapped.
Attributes:
pred: jc.ValuePredicate to apply is implictly wrapped in a MapPredicate.
min: Minimum number of expected object matches we expect.
max: Maximum number of expected object matches we allow. < 0 indicates any.
"""
@property
def path_pred(self):
"""The underlying predicate that we are mapping."""
return self.__path_pred
@property
def filter_pred(self):
"""The filter, if any, for the underlying path predicate."""
return self.__path_pred.pred
@property
def min(self):
"""The minimum desired cardinality, or None for no lower bound."""
return self.__min
@property
def max(self):
"""The maximum desired cardinality, or None for no upper bound."""
return self.__max
def export_to_json_snapshot(self, snapshot, entity):
"""Implements JsonSnapshotableEntity interface."""
snapshot.edge_builder.make_mechanism(entity, 'Predicate', self.path_pred)
if self.__min is not None:
snapshot.edge_builder.make_control(entity, 'Min', self.__min)
if self.__max is not None:
snapshot.edge_builder.make_control(entity, 'Max',
'Any' if self.__max < 0 else self.__max)
def __init__(self, pred, min=0, max=None, **kwargs):
"""Constructor.
Args:
pred: The jc.ValuePredicate to apply.
min: The minimum number of path values we expect to find when applied.
max: The maximum number of path values we expect to find when applied.
"""
super(CardinalityPredicate, self).__init__(**kwargs)
if not isinstance(pred, predicate.ValuePredicate):
raise TypeError(
'Got {0}, expected jc.ValuePredicate'.format(pred.__class__))
self.__min = min
self.__max = max
if isinstance(pred, pp.PathPredicate):
self.__path_pred = pred
else:
self.__path_pred = pp.PathPredicate('', pred=pred)
def __eq__(self, pred):
return (self.__class__ == pred.__class__
and self.__min == pred.min
and self.__max == pred.max
and self.__path_pred == pred.path_pred)
def __str__(self):
return 'Cardinality({0}) {1}..{2}'.format(
self.__path_pred, self.__min, self.__max)
def __call__(self, context, obj):
"""Attempt to match object.
Args:
obj: JSON object to match.
Returns:
PredicateResponse
"""
collected_result = self.__path_pred(context, obj)
count = len(collected_result.path_values)
the_max = context.eval(self.__max)
the_min = context.eval(self.__min)
if not count:
if the_max != 0:
return MissingValueCardinalityResult(
obj, valid=False,
cardinality_pred=self, path_pred_result=collected_result)
else:
result_type = ConfirmedCardinalityResult
elif the_max == 0:
result_type = UnexpectedValueCardinalityResult
elif (count >= the_min
and (the_max is None or count <= the_max)):
result_type = ConfirmedCardinalityResult
else:
result_type = FailedCardinalityRangeResult
valid = result_type == ConfirmedCardinalityResult
return result_type(valid=valid, cardinality_pred=self,
path_pred_result=collected_result) | citest/json_predicate/cardinality_predicate.py | from . import predicate
from . import path_predicate as pp
from .path_predicate_result import HasPathPredicateResult
class CardinalityResult(predicate.PredicateResult, HasPathPredicateResult):
"""Denotes a PredicateResult from a CardinalityPredicate.
In practice, this is a base class that is further refined for specific
types of events.
Attributes:
pred: The ValuePredicate genearting the result.
found: A list of JSON objects the predicate was applied to.
In practice these are the matched objects.
"""
@property
def path_predicate_result(self):
"""The result of mapping the underlying predicate over the source."""
return self.__collect_values_result
@property
def pred(self):
"""Returns the cardinality predicate used to generate this result."""
return self.cardinality_pred
@property
def path_pred(self):
"""The underlying path predicate used to collect values."""
return self.__collect_values_result.pred
@property
def filter_pred(self):
"""The filter to the underlying path predicate."""
return self.__collect_values_result.pred.pred
@property
def cardinality_pred(self):
"""The actual CardinalityPredicate used to generate this result."""
return self.__cardinality_pred
@property
def count(self):
"""The number of elements that satisfied the predicate."""
return len(self.__collect_values_result.path_values)
@property
def source(self):
"""The source value (collection) that we are mapping the predicateover."""
return self.__collect_values_result.source
def __init__(self, cardinality_pred, path_pred_result, **kwargs):
"""Constructor.
Args:
cardinality_pred: [CardinalityPredicate] The predicate we used to
generate this result.
pred_result: [CollectValuesResult]. The result of applying the
underlying PathPredicate bound to the |cardinality_pred|.
See the base class (PredicateResult) for additional kwargs.
"""
valid = kwargs.pop('valid', False)
super(CardinalityResult, self).__init__(valid=valid, **kwargs)
self.__cardinality_pred = cardinality_pred
self.__collect_values_result = path_pred_result
def __repr__(self):
return '{0} pred={1!r} result={2!r}'.format(
self.__class__.__name__,
self.__cardinality_pred, self.__collect_values_result)
def __str__(self):
return '{valid} count={count} of {min}...{max}'.format(
valid=self.valid, count=self.count,
min=self.__cardinality_pred.min, max=self.__cardinality_pred.max)
def __eq__(self, event):
return (self.__class__ == event.__class__
and self.__cardinality_pred == event.cardinality_pred
and self.__collect_values_result == event.path_predicate_result)
def export_to_json_snapshot(self, snapshot, entity):
"""Implements JsonSnapshotableEntity interface."""
builder = snapshot.edge_builder
count_relation = builder.determine_valid_relation(self)
result_relation = builder.determine_valid_relation(
self.__collect_values_result)
builder.make(entity, 'Count', self.count, relation=count_relation)
builder.make_mechanism(entity, 'Predicate', self.__cardinality_pred)
builder.make_input(entity, 'Source',
self.__collect_values_result.source, format='json')
builder.make(entity, 'Result',
self.__collect_values_result, relation=result_relation)
class ConfirmedCardinalityResult(CardinalityResult):
"""Denotes a CardinalityPredicate that was satisfied."""
def __init__(self, cardinality_pred, path_pred_result, **kwargs):
"""Constructor.
Args:
cardinality_pred: [CardinalityPredicate] The predicate we used to
generate this result.
pred_result: [CollectValuesResult]. The result of applying the
underlying PathPredicate bound to the |cardinality_pred|.
See the base class (CardinalityResult) for additional kwargs.
"""
valid = kwargs.pop('valid', True)
super(ConfirmedCardinalityResult, self).__init__(
valid=valid,
cardinality_pred=cardinality_pred, path_pred_result=path_pred_result,
**kwargs)
def __str__(self):
if not self.count:
return 'Confirmed no {pred}.'.format(pred=self.path_pred)
return 'Confirmed pred={pred} with count={count}'.format(
pred=self.cardinality_pred, count=self.count)
class FailedCardinalityResult(CardinalityResult):
"""Denotes a CardinalityPredicate that was not satisfied.
In practice, this is a base class used to detect failures.
It is further specialized for the particular reason for failure.
"""
pass
class UnexpectedValueCardinalityResult(FailedCardinalityResult):
"""Denotes a failure because a value existed where none were expected."""
def __str__(self):
return 'Found unexpected count={count} pred={pred}'.format(
count=self.count, pred=self.cardinality_pred)
class MissingValueCardinalityResult(FailedCardinalityResult):
"""Denotes a failure because a value did not exist where one was expected."""
def __init__(self, source, cardinality_pred, path_pred_result, **kwargs):
valid = kwargs.pop('valid', False)
super(MissingValueCardinalityResult, self).__init__(
valid=valid, cardinality_pred=cardinality_pred,
path_pred_result=path_pred_result)
self.__source = source
def __str__(self):
return 'Expected to find {pred}. No values found.'.format(
pred=self.cardinality_pred)
class FailedCardinalityRangeResult(FailedCardinalityResult):
"""Denotes a failure because too few or too many values were found."""
def __str__(self):
# pred is a CardinalityPredicate
return ('Found {count} {criteria}'
' but expected {min}..{max}'.format(
count=self.count, criteria=self.path_pred,
min=self.cardinality_pred.min, max=self.cardinality_pred.max))
class CardinalityPredicate(predicate.ValuePredicate,
pp.ProducesPathPredicateResult):
"""Validates a JSON object value based on how many things are found within.
We implicitly wrap the predicate in a MapPredicate so that the results
coming back have a structure that makes sense. But we dont bother passing
the MapPredicate in because it is implicit. Instead we just pass in the
predicate to be mapped.
Attributes:
pred: jc.ValuePredicate to apply is implictly wrapped in a MapPredicate.
min: Minimum number of expected object matches we expect.
max: Maximum number of expected object matches we allow. < 0 indicates any.
"""
@property
def path_pred(self):
"""The underlying predicate that we are mapping."""
return self.__path_pred
@property
def filter_pred(self):
"""The filter, if any, for the underlying path predicate."""
return self.__path_pred.pred
@property
def min(self):
"""The minimum desired cardinality, or None for no lower bound."""
return self.__min
@property
def max(self):
"""The maximum desired cardinality, or None for no upper bound."""
return self.__max
def export_to_json_snapshot(self, snapshot, entity):
"""Implements JsonSnapshotableEntity interface."""
snapshot.edge_builder.make_mechanism(entity, 'Predicate', self.path_pred)
if self.__min is not None:
snapshot.edge_builder.make_control(entity, 'Min', self.__min)
if self.__max is not None:
snapshot.edge_builder.make_control(entity, 'Max',
'Any' if self.__max < 0 else self.__max)
def __init__(self, pred, min=0, max=None, **kwargs):
"""Constructor.
Args:
pred: The jc.ValuePredicate to apply.
min: The minimum number of path values we expect to find when applied.
max: The maximum number of path values we expect to find when applied.
"""
super(CardinalityPredicate, self).__init__(**kwargs)
if not isinstance(pred, predicate.ValuePredicate):
raise TypeError(
'Got {0}, expected jc.ValuePredicate'.format(pred.__class__))
self.__min = min
self.__max = max
if isinstance(pred, pp.PathPredicate):
self.__path_pred = pred
else:
self.__path_pred = pp.PathPredicate('', pred=pred)
def __eq__(self, pred):
return (self.__class__ == pred.__class__
and self.__min == pred.min
and self.__max == pred.max
and self.__path_pred == pred.path_pred)
def __str__(self):
return 'Cardinality({0}) {1}..{2}'.format(
self.__path_pred, self.__min, self.__max)
def __call__(self, context, obj):
"""Attempt to match object.
Args:
obj: JSON object to match.
Returns:
PredicateResponse
"""
collected_result = self.__path_pred(context, obj)
count = len(collected_result.path_values)
the_max = context.eval(self.__max)
the_min = context.eval(self.__min)
if not count:
if the_max != 0:
return MissingValueCardinalityResult(
obj, valid=False,
cardinality_pred=self, path_pred_result=collected_result)
else:
result_type = ConfirmedCardinalityResult
elif the_max == 0:
result_type = UnexpectedValueCardinalityResult
elif (count >= the_min
and (the_max is None or count <= the_max)):
result_type = ConfirmedCardinalityResult
else:
result_type = FailedCardinalityRangeResult
valid = result_type == ConfirmedCardinalityResult
return result_type(valid=valid, cardinality_pred=self,
path_pred_result=collected_result) | 0.947113 | 0.408749 |
from wc_kb import core, prokaryote_schema
from wc_kb import io
import Bio.Seq
import Bio.SeqRecord
import filecmp
import obj_model.io
import os
import random
import shutil
import tempfile
import unittest
import wc_utils.workbook.io
class TestIO(unittest.TestCase):
def setUp(self):
self.dir = tempfile.mkdtemp()
self.seq_path = os.path.join(self.dir, 'seq.fna')
self.kb = kb = core.KnowledgeBase(id='genus_species', name='Genus species', version='0.0.1')
cell = kb.cell = core.Cell(id='genus_species_cell')
dna_seqs = []
for i_chr in range(5):
dna = core.DnaSpeciesType(id='chr_{}'.format(i_chr + 1), sequence_path=self.seq_path)
cell.species_types.append(dna)
seq_len = random.randint(100, 200)
bases = 'ACGT'
seq = ''
for i_nt in range(seq_len):
seq += bases[random.randint(0, 3)]
dna_seqs.append(Bio.SeqRecord.SeqRecord(
Bio.Seq.Seq(seq), dna.id))
for i_trn in range(5):
trn = prokaryote_schema.TranscriptionUnitLocus(id='tu_{}_{}'.format(i_chr + 1, i_trn + 1))
trn.cell = cell
dna.loci.append(trn)
trn.start = random.randint(100, 200)
trn.end = ((trn.start + random.randint(1, 200) - 1) % seq_len) + 1
trn.strand = core.PolymerStrand.positive
with open(self.seq_path, 'w') as file:
writer = Bio.SeqIO.FastaIO.FastaWriter(
file, wrap=70, record2title=lambda record: record.id)
writer.write_file(dna_seqs)
def tearDown(self):
shutil.rmtree(self.dir)
def test_write_read(self):
core_path = os.path.join(self.dir, 'core.xlsx')
writer = io.Writer()
writer.run(self.kb, core_path, set_repo_metadata_from_path=False)
reader = io.Reader()
kb = reader.run(core_path, self.seq_path)
core_path = os.path.join(self.dir, 'core2.xlsx')
seq_path = os.path.join(self.dir, 'seq2.fna')
writer.run(kb, core_path, seq_path, set_repo_metadata_from_path=False)
self.assertTrue(self.kb.is_equal(kb))
self.assertTrue(filecmp.cmp(self.seq_path, seq_path, shallow=False))
def test_read_write_prokaryote(self):
fixtures = os.path.join(os.path.dirname(__file__), 'fixtures')
core_path = os.path.join(fixtures, 'core.xlsx')
seq_path = os.path.join(fixtures, 'seq.fna')
reader = io.Reader()
kb = reader.run(core_path, seq_path)
tmp_core_path = os.path.join(self.dir, 'tmp_core.xlsx')
tmp_seq_path = os.path.join(self.dir, 'tmp_seq.fna')
writer = io.Writer()
writer.run(kb, tmp_core_path, tmp_seq_path, set_repo_metadata_from_path=False)
tmp_kb = reader.run(tmp_core_path, seq_path)
self.assertTrue(kb.is_equal(tmp_kb))
self.assertTrue(filecmp.cmp(tmp_seq_path, seq_path, shallow=False))
def test_read_write_eukaryote(self):
fixtures = os.path.join(os.path.dirname(__file__), 'fixtures')
core_path = os.path.join(fixtures, 'eukaryote_core.xlsx')
seq_path = os.path.join(fixtures, 'eukaryote_seq.fna')
reader = io.Reader()
kb = reader.run(core_path, seq_path, schema=False)
tmp_core_path = os.path.join(self.dir, 'tmp_eukaryote_core.xlsx')
tmp_seq_path = os.path.join(self.dir, 'tmp_eukaryote_seq.fna')
writer = io.Writer()
writer.run(kb, tmp_core_path, tmp_seq_path, schema=False, set_repo_metadata_from_path=False)
tmp_kb = reader.run(tmp_core_path, seq_path, schema=False)
self.assertTrue(kb.is_equal(tmp_kb))
self.assertTrue(filecmp.cmp(tmp_seq_path, seq_path, shallow=False))
def test_rewrite_seq_path_in_read_write(self):
path_core_1 = os.path.join(self.dir, 'core_1.xlsx')
path_core_2 = os.path.join(self.dir, 'core_2.xlsx')
path_seq_1 = os.path.join(self.dir, 'seq_1.fna')
path_seq_2 = os.path.join(self.dir, 'seq_2.fna')
io.Writer().run(self.kb, path_core_1, path_seq_1, set_repo_metadata_from_path=False)
kb1 = io.Reader().run(path_core_1, path_seq_1)
kb2 = io.Reader().run(path_core_1, path_seq_1, rewrite_seq_path=False)
self.assertFalse(kb1.is_equal(self.kb))
self.assertTrue(kb2.is_equal(self.kb))
self.assertTrue(filecmp.cmp(path_seq_1, self.seq_path, shallow=False))
io.Writer().run(self.kb, path_core_2, path_seq_2, rewrite_seq_path=True, set_repo_metadata_from_path=False)
kb3 = io.Reader().run(path_core_2, self.seq_path)
kb4 = io.Reader().run(path_core_2, self.seq_path, rewrite_seq_path=False)
self.assertFalse(kb3.is_equal(self.kb))
self.assertTrue(kb4.is_equal(self.kb))
self.assertTrue(filecmp.cmp(path_seq_2, self.seq_path, shallow=False))
def test_write_with_repo_md(self):
_, core_path = tempfile.mkstemp(suffix='.xlsx', dir='.')
_, seq_path = tempfile.mkstemp(suffix='.fna', dir='.')
self.assertEqual(self.kb.url, '')
writer = io.Writer()
writer.run(self.kb, core_path, seq_path, set_repo_metadata_from_path=True)
self.assertIn(self.kb.url, [
'https://github.com/KarrLab/wc_kb.git',
'ssh://git@github.com/KarrLab/wc_kb.git',
'git@github.com:KarrLab/wc_kb.git',
])
os.remove(core_path)
os.remove(seq_path)
def test_write_without_cell_relationships(self):
core_path = os.path.join(self.dir, 'core.xlsx')
seq_path = os.path.join(self.dir, 'test_seq.fna')
with open(seq_path, 'w') as file:
file.write('>chr_x\nACGT\n')
dna = core.DnaSpeciesType(id='chr_x', sequence_path=seq_path)
self.kb.cell.species_types.append(dna)
trn = prokaryote_schema.TranscriptionUnitLocus(id='tu_x_0')
dna.loci.append(trn)
trn.cell = None
writer = io.Writer()
with self.assertRaisesRegex(ValueError, 'must be set to the instance of `Cell`'):
writer.run(self.kb, core_path, seq_path, set_repo_metadata_from_path=False)
def test_write_read_sloppy(self):
core_path = os.path.join(self.dir, 'core.xlsx')
seq_path = os.path.join(self.dir, 'test_seq.fna')
writer = io.Writer()
writer.run(self.kb, core_path, seq_path, set_repo_metadata_from_path=False)
wb = wc_utils.workbook.io.read(core_path)
row = wb['Knowledge base'].pop(0)
wb['Knowledge base'].insert(1, row)
wc_utils.workbook.io.write(core_path, wb)
reader = io.Reader()
with self.assertRaisesRegex(ValueError, "The columns of worksheet 'Knowledge base' must be defined in this order"):
kb = reader.run(core_path, self.seq_path)
kb = reader.run(core_path, self.seq_path, strict=False)
self.assertTrue(kb.is_equal(self.kb))
self.assertTrue(filecmp.cmp(self.seq_path, seq_path, shallow=False))
def test_reader_no_kb(self):
core_path = os.path.join(self.dir, 'core.xlsx')
obj_model.io.WorkbookWriter().run(core_path, [], io.PROKARYOTE_MODEL_ORDER, include_all_attributes=False)
seq_path = os.path.join(self.dir, 'test_seq.fna')
with open(seq_path, 'w') as file:
pass
kb = io.Reader().run(core_path, seq_path)
self.assertEqual(kb, None)
obj_model.io.WorkbookWriter().run(core_path, [core.Cell(id='cell')], io.PROKARYOTE_MODEL_ORDER, include_all_attributes=False)
with self.assertRaisesRegex(ValueError, 'cannot contain instances'):
io.Reader().run(core_path, seq_path)
def test_reader_error_multiple_kbs(self):
kb1 = core.KnowledgeBase(id='kb1', name='kb1', version='0.0.1')
kb2 = core.KnowledgeBase(id='kb2', name='kb2', version='0.0.1')
core_path = os.path.join(self.dir, 'core.xlsx')
obj_model.io.WorkbookWriter().run(core_path, [kb1, kb2], io.PROKARYOTE_MODEL_ORDER, include_all_attributes=False)
seq_path = os.path.join(self.dir, 'test_seq.fna')
with open(seq_path, 'w') as file:
pass
with self.assertRaisesRegex(ValueError, ' should define one knowledge base'):
io.Reader().run(core_path, seq_path)
def test_reader_error_no_cell(self):
kb = core.KnowledgeBase(id='kb', name='kb1', version='0.0.1')
dna = core.DnaSpeciesType(id='chr')
core_path = os.path.join(self.dir, 'core.xlsx')
obj_model.io.WorkbookWriter().run(core_path, [kb, dna], io.PROKARYOTE_MODEL_ORDER, include_all_attributes=False)
seq_path = os.path.join(self.dir, 'test_seq.fna')
with open(seq_path, 'w') as file:
pass
with self.assertRaisesRegex(ValueError, 'cannot contain instances'):
io.Reader().run(core_path, seq_path)
def test_reader_error_multiple_cells(self):
kb = core.KnowledgeBase(id='kb', name='kb1', version='0.0.1')
cell1 = core.Cell(id='cell1', name='cell1')
cell2 = core.Cell(id='cell2', name='cell2')
core_path = os.path.join(self.dir, 'core.xlsx')
obj_model.io.WorkbookWriter().run(core_path, [kb, cell1, cell2], io.PROKARYOTE_MODEL_ORDER, include_all_attributes=False)
seq_path = os.path.join(self.dir, 'test_seq.fna')
with open(seq_path, 'w') as file:
pass
with self.assertRaisesRegex(ValueError, ' should define one cell'):
io.Reader().run(core_path, seq_path)
def test_convert(self):
path_core_1 = os.path.join(self.dir, 'core_1.xlsx')
path_core_2 = os.path.join(self.dir, 'core_2-*.csv')
path_core_3 = os.path.join(self.dir, 'core_3.xlsx')
path_seq_1 = os.path.join(self.dir, 'seq_1.fna')
path_seq_2 = os.path.join(self.dir, 'seq_2.fna')
path_seq_3 = os.path.join(self.dir, 'seq_3.fna')
io.Writer().run(self.kb, path_core_1, path_seq_1, set_repo_metadata_from_path=False)
self.assertTrue(filecmp.cmp(path_seq_1, self.seq_path, shallow=False))
io.convert(path_core_1, path_seq_1, path_core_2, path_seq_2)
kb = io.Reader().run(path_core_2, self.seq_path)
self.assertTrue(kb.is_equal(self.kb))
self.assertTrue(filecmp.cmp(path_seq_1, path_seq_2, shallow=False))
io.convert(path_core_2, path_seq_2, path_core_3, path_seq_3)
kb = io.Reader().run(path_core_3, self.seq_path)
self.assertTrue(kb.is_equal(self.kb))
self.assertTrue(filecmp.cmp(path_seq_2, path_seq_3, shallow=False))
def test_convert_sloppy(self):
path_core_1 = os.path.join(self.dir, 'core_1.xlsx')
path_core_2 = os.path.join(self.dir, 'core_2-*.csv')
path_core_3 = os.path.join(self.dir, 'core_3.xlsx')
path_seq_1 = os.path.join(self.dir, 'seq_1.fna')
path_seq_2 = os.path.join(self.dir, 'seq_2.fna')
path_seq_3 = os.path.join(self.dir, 'seq_3.fna')
io.Writer().run(self.kb, path_core_1, path_seq_1, set_repo_metadata_from_path=False)
self.assertTrue(filecmp.cmp(path_seq_1, self.seq_path, shallow=False))
wb = wc_utils.workbook.io.read(path_core_1)
row = wb['Knowledge base'].pop(0)
wb['Knowledge base'].insert(1, row)
wc_utils.workbook.io.write(path_core_1, wb)
with self.assertRaisesRegex(ValueError, "The columns of worksheet 'Knowledge base' must be defined in this order"):
io.convert(path_core_1, path_seq_1, path_core_2, path_seq_2)
io.convert(path_core_1, path_seq_1, path_core_2, path_seq_2, strict=False)
kb = io.Reader().run(path_core_2, self.seq_path)
self.assertTrue(kb.is_equal(self.kb))
self.assertTrue(filecmp.cmp(path_seq_1, path_seq_2, shallow=False))
io.convert(path_core_2, path_seq_2, path_core_3, path_seq_3)
kb = io.Reader().run(path_core_3, self.seq_path)
self.assertTrue(kb.is_equal(self.kb))
self.assertTrue(filecmp.cmp(path_seq_2, path_seq_3, shallow=False))
def test_create_template(self):
path_core = os.path.join(self.dir, 'template.xlsx')
path_seq = os.path.join(self.dir, 'template_seq.fna')
io.create_template(path_core, path_seq, set_repo_metadata_from_path=False)
kb = io.Reader().run(path_core, path_seq)
def test_validate_implicit_relationships(self):
class TestModel(obj_model.Model):
id = obj_model.StringAttribute(primary=True, unique=True)
try:
core.KnowledgeBase.Meta.attributes['test'] = obj_model.OneToOneAttribute(TestModel, related_name='a')
with self.assertRaisesRegex(Exception, 'Relationships from `KnowledgeBase` not supported:'):
io.Writer.validate_implicit_relationships()
finally:
core.KnowledgeBase.Meta.attributes.pop('test')
try:
core.KnowledgeBase.Meta.related_attributes['test'] = obj_model.OneToManyAttribute(core.Cell, related_name='c')
with self.assertRaisesRegex(Exception,
'Relationships to `KnowledgeBase` that are not one-to-one are prohibited'):
io.Writer.validate_implicit_relationships()
finally:
core.KnowledgeBase.Meta.related_attributes.pop('test')
try:
core.Cell.Meta.attributes['test'] = obj_model.OneToManyAttribute(TestModel, related_name='c')
with self.assertRaisesRegex(Exception,
'Relationships from `Cell` to `KnowledgeBase` that are not one-to-one are prohibited:'):
io.Writer.validate_implicit_relationships()
finally:
core.Cell.Meta.attributes.pop('test')
try:
core.Cell.Meta.attributes['test'] = obj_model.OneToOneAttribute(TestModel, related_name='d')
with self.assertRaisesRegex(Exception,
'Relationships from `Cell` to classes other than `KnowledgeBase` are prohibited:'):
io.Writer.validate_implicit_relationships()
finally:
core.Cell.Meta.attributes.pop('test')
try:
core.Cell.Meta.related_attributes['test'] = obj_model.OneToManyAttribute(TestModel, related_name='d')
with self.assertRaisesRegex(Exception,
'Relationships to `Cell` that are not one-to-one or many-to-one are prohibited: '):
io.Writer.validate_implicit_relationships()
finally:
core.Cell.Meta.related_attributes.pop('test')
try:
core.KnowledgeBase.Meta.related_attributes['test'] = obj_model.OneToOneAttribute(TestModel, related_name='b')
with self.assertRaisesRegex(Exception,
'Relationships to `KnowledgeBase` from classes other than `Cell` are prohibited'):
io.Writer.validate_implicit_relationships()
finally:
core.KnowledgeBase.Meta.related_attributes.pop('test') | tests/test_io.py | from wc_kb import core, prokaryote_schema
from wc_kb import io
import Bio.Seq
import Bio.SeqRecord
import filecmp
import obj_model.io
import os
import random
import shutil
import tempfile
import unittest
import wc_utils.workbook.io
class TestIO(unittest.TestCase):
def setUp(self):
self.dir = tempfile.mkdtemp()
self.seq_path = os.path.join(self.dir, 'seq.fna')
self.kb = kb = core.KnowledgeBase(id='genus_species', name='Genus species', version='0.0.1')
cell = kb.cell = core.Cell(id='genus_species_cell')
dna_seqs = []
for i_chr in range(5):
dna = core.DnaSpeciesType(id='chr_{}'.format(i_chr + 1), sequence_path=self.seq_path)
cell.species_types.append(dna)
seq_len = random.randint(100, 200)
bases = 'ACGT'
seq = ''
for i_nt in range(seq_len):
seq += bases[random.randint(0, 3)]
dna_seqs.append(Bio.SeqRecord.SeqRecord(
Bio.Seq.Seq(seq), dna.id))
for i_trn in range(5):
trn = prokaryote_schema.TranscriptionUnitLocus(id='tu_{}_{}'.format(i_chr + 1, i_trn + 1))
trn.cell = cell
dna.loci.append(trn)
trn.start = random.randint(100, 200)
trn.end = ((trn.start + random.randint(1, 200) - 1) % seq_len) + 1
trn.strand = core.PolymerStrand.positive
with open(self.seq_path, 'w') as file:
writer = Bio.SeqIO.FastaIO.FastaWriter(
file, wrap=70, record2title=lambda record: record.id)
writer.write_file(dna_seqs)
def tearDown(self):
shutil.rmtree(self.dir)
def test_write_read(self):
core_path = os.path.join(self.dir, 'core.xlsx')
writer = io.Writer()
writer.run(self.kb, core_path, set_repo_metadata_from_path=False)
reader = io.Reader()
kb = reader.run(core_path, self.seq_path)
core_path = os.path.join(self.dir, 'core2.xlsx')
seq_path = os.path.join(self.dir, 'seq2.fna')
writer.run(kb, core_path, seq_path, set_repo_metadata_from_path=False)
self.assertTrue(self.kb.is_equal(kb))
self.assertTrue(filecmp.cmp(self.seq_path, seq_path, shallow=False))
def test_read_write_prokaryote(self):
fixtures = os.path.join(os.path.dirname(__file__), 'fixtures')
core_path = os.path.join(fixtures, 'core.xlsx')
seq_path = os.path.join(fixtures, 'seq.fna')
reader = io.Reader()
kb = reader.run(core_path, seq_path)
tmp_core_path = os.path.join(self.dir, 'tmp_core.xlsx')
tmp_seq_path = os.path.join(self.dir, 'tmp_seq.fna')
writer = io.Writer()
writer.run(kb, tmp_core_path, tmp_seq_path, set_repo_metadata_from_path=False)
tmp_kb = reader.run(tmp_core_path, seq_path)
self.assertTrue(kb.is_equal(tmp_kb))
self.assertTrue(filecmp.cmp(tmp_seq_path, seq_path, shallow=False))
def test_read_write_eukaryote(self):
fixtures = os.path.join(os.path.dirname(__file__), 'fixtures')
core_path = os.path.join(fixtures, 'eukaryote_core.xlsx')
seq_path = os.path.join(fixtures, 'eukaryote_seq.fna')
reader = io.Reader()
kb = reader.run(core_path, seq_path, schema=False)
tmp_core_path = os.path.join(self.dir, 'tmp_eukaryote_core.xlsx')
tmp_seq_path = os.path.join(self.dir, 'tmp_eukaryote_seq.fna')
writer = io.Writer()
writer.run(kb, tmp_core_path, tmp_seq_path, schema=False, set_repo_metadata_from_path=False)
tmp_kb = reader.run(tmp_core_path, seq_path, schema=False)
self.assertTrue(kb.is_equal(tmp_kb))
self.assertTrue(filecmp.cmp(tmp_seq_path, seq_path, shallow=False))
def test_rewrite_seq_path_in_read_write(self):
path_core_1 = os.path.join(self.dir, 'core_1.xlsx')
path_core_2 = os.path.join(self.dir, 'core_2.xlsx')
path_seq_1 = os.path.join(self.dir, 'seq_1.fna')
path_seq_2 = os.path.join(self.dir, 'seq_2.fna')
io.Writer().run(self.kb, path_core_1, path_seq_1, set_repo_metadata_from_path=False)
kb1 = io.Reader().run(path_core_1, path_seq_1)
kb2 = io.Reader().run(path_core_1, path_seq_1, rewrite_seq_path=False)
self.assertFalse(kb1.is_equal(self.kb))
self.assertTrue(kb2.is_equal(self.kb))
self.assertTrue(filecmp.cmp(path_seq_1, self.seq_path, shallow=False))
io.Writer().run(self.kb, path_core_2, path_seq_2, rewrite_seq_path=True, set_repo_metadata_from_path=False)
kb3 = io.Reader().run(path_core_2, self.seq_path)
kb4 = io.Reader().run(path_core_2, self.seq_path, rewrite_seq_path=False)
self.assertFalse(kb3.is_equal(self.kb))
self.assertTrue(kb4.is_equal(self.kb))
self.assertTrue(filecmp.cmp(path_seq_2, self.seq_path, shallow=False))
def test_write_with_repo_md(self):
_, core_path = tempfile.mkstemp(suffix='.xlsx', dir='.')
_, seq_path = tempfile.mkstemp(suffix='.fna', dir='.')
self.assertEqual(self.kb.url, '')
writer = io.Writer()
writer.run(self.kb, core_path, seq_path, set_repo_metadata_from_path=True)
self.assertIn(self.kb.url, [
'https://github.com/KarrLab/wc_kb.git',
'ssh://git@github.com/KarrLab/wc_kb.git',
'git@github.com:KarrLab/wc_kb.git',
])
os.remove(core_path)
os.remove(seq_path)
def test_write_without_cell_relationships(self):
core_path = os.path.join(self.dir, 'core.xlsx')
seq_path = os.path.join(self.dir, 'test_seq.fna')
with open(seq_path, 'w') as file:
file.write('>chr_x\nACGT\n')
dna = core.DnaSpeciesType(id='chr_x', sequence_path=seq_path)
self.kb.cell.species_types.append(dna)
trn = prokaryote_schema.TranscriptionUnitLocus(id='tu_x_0')
dna.loci.append(trn)
trn.cell = None
writer = io.Writer()
with self.assertRaisesRegex(ValueError, 'must be set to the instance of `Cell`'):
writer.run(self.kb, core_path, seq_path, set_repo_metadata_from_path=False)
def test_write_read_sloppy(self):
core_path = os.path.join(self.dir, 'core.xlsx')
seq_path = os.path.join(self.dir, 'test_seq.fna')
writer = io.Writer()
writer.run(self.kb, core_path, seq_path, set_repo_metadata_from_path=False)
wb = wc_utils.workbook.io.read(core_path)
row = wb['Knowledge base'].pop(0)
wb['Knowledge base'].insert(1, row)
wc_utils.workbook.io.write(core_path, wb)
reader = io.Reader()
with self.assertRaisesRegex(ValueError, "The columns of worksheet 'Knowledge base' must be defined in this order"):
kb = reader.run(core_path, self.seq_path)
kb = reader.run(core_path, self.seq_path, strict=False)
self.assertTrue(kb.is_equal(self.kb))
self.assertTrue(filecmp.cmp(self.seq_path, seq_path, shallow=False))
def test_reader_no_kb(self):
core_path = os.path.join(self.dir, 'core.xlsx')
obj_model.io.WorkbookWriter().run(core_path, [], io.PROKARYOTE_MODEL_ORDER, include_all_attributes=False)
seq_path = os.path.join(self.dir, 'test_seq.fna')
with open(seq_path, 'w') as file:
pass
kb = io.Reader().run(core_path, seq_path)
self.assertEqual(kb, None)
obj_model.io.WorkbookWriter().run(core_path, [core.Cell(id='cell')], io.PROKARYOTE_MODEL_ORDER, include_all_attributes=False)
with self.assertRaisesRegex(ValueError, 'cannot contain instances'):
io.Reader().run(core_path, seq_path)
def test_reader_error_multiple_kbs(self):
kb1 = core.KnowledgeBase(id='kb1', name='kb1', version='0.0.1')
kb2 = core.KnowledgeBase(id='kb2', name='kb2', version='0.0.1')
core_path = os.path.join(self.dir, 'core.xlsx')
obj_model.io.WorkbookWriter().run(core_path, [kb1, kb2], io.PROKARYOTE_MODEL_ORDER, include_all_attributes=False)
seq_path = os.path.join(self.dir, 'test_seq.fna')
with open(seq_path, 'w') as file:
pass
with self.assertRaisesRegex(ValueError, ' should define one knowledge base'):
io.Reader().run(core_path, seq_path)
def test_reader_error_no_cell(self):
kb = core.KnowledgeBase(id='kb', name='kb1', version='0.0.1')
dna = core.DnaSpeciesType(id='chr')
core_path = os.path.join(self.dir, 'core.xlsx')
obj_model.io.WorkbookWriter().run(core_path, [kb, dna], io.PROKARYOTE_MODEL_ORDER, include_all_attributes=False)
seq_path = os.path.join(self.dir, 'test_seq.fna')
with open(seq_path, 'w') as file:
pass
with self.assertRaisesRegex(ValueError, 'cannot contain instances'):
io.Reader().run(core_path, seq_path)
def test_reader_error_multiple_cells(self):
kb = core.KnowledgeBase(id='kb', name='kb1', version='0.0.1')
cell1 = core.Cell(id='cell1', name='cell1')
cell2 = core.Cell(id='cell2', name='cell2')
core_path = os.path.join(self.dir, 'core.xlsx')
obj_model.io.WorkbookWriter().run(core_path, [kb, cell1, cell2], io.PROKARYOTE_MODEL_ORDER, include_all_attributes=False)
seq_path = os.path.join(self.dir, 'test_seq.fna')
with open(seq_path, 'w') as file:
pass
with self.assertRaisesRegex(ValueError, ' should define one cell'):
io.Reader().run(core_path, seq_path)
def test_convert(self):
path_core_1 = os.path.join(self.dir, 'core_1.xlsx')
path_core_2 = os.path.join(self.dir, 'core_2-*.csv')
path_core_3 = os.path.join(self.dir, 'core_3.xlsx')
path_seq_1 = os.path.join(self.dir, 'seq_1.fna')
path_seq_2 = os.path.join(self.dir, 'seq_2.fna')
path_seq_3 = os.path.join(self.dir, 'seq_3.fna')
io.Writer().run(self.kb, path_core_1, path_seq_1, set_repo_metadata_from_path=False)
self.assertTrue(filecmp.cmp(path_seq_1, self.seq_path, shallow=False))
io.convert(path_core_1, path_seq_1, path_core_2, path_seq_2)
kb = io.Reader().run(path_core_2, self.seq_path)
self.assertTrue(kb.is_equal(self.kb))
self.assertTrue(filecmp.cmp(path_seq_1, path_seq_2, shallow=False))
io.convert(path_core_2, path_seq_2, path_core_3, path_seq_3)
kb = io.Reader().run(path_core_3, self.seq_path)
self.assertTrue(kb.is_equal(self.kb))
self.assertTrue(filecmp.cmp(path_seq_2, path_seq_3, shallow=False))
def test_convert_sloppy(self):
path_core_1 = os.path.join(self.dir, 'core_1.xlsx')
path_core_2 = os.path.join(self.dir, 'core_2-*.csv')
path_core_3 = os.path.join(self.dir, 'core_3.xlsx')
path_seq_1 = os.path.join(self.dir, 'seq_1.fna')
path_seq_2 = os.path.join(self.dir, 'seq_2.fna')
path_seq_3 = os.path.join(self.dir, 'seq_3.fna')
io.Writer().run(self.kb, path_core_1, path_seq_1, set_repo_metadata_from_path=False)
self.assertTrue(filecmp.cmp(path_seq_1, self.seq_path, shallow=False))
wb = wc_utils.workbook.io.read(path_core_1)
row = wb['Knowledge base'].pop(0)
wb['Knowledge base'].insert(1, row)
wc_utils.workbook.io.write(path_core_1, wb)
with self.assertRaisesRegex(ValueError, "The columns of worksheet 'Knowledge base' must be defined in this order"):
io.convert(path_core_1, path_seq_1, path_core_2, path_seq_2)
io.convert(path_core_1, path_seq_1, path_core_2, path_seq_2, strict=False)
kb = io.Reader().run(path_core_2, self.seq_path)
self.assertTrue(kb.is_equal(self.kb))
self.assertTrue(filecmp.cmp(path_seq_1, path_seq_2, shallow=False))
io.convert(path_core_2, path_seq_2, path_core_3, path_seq_3)
kb = io.Reader().run(path_core_3, self.seq_path)
self.assertTrue(kb.is_equal(self.kb))
self.assertTrue(filecmp.cmp(path_seq_2, path_seq_3, shallow=False))
def test_create_template(self):
path_core = os.path.join(self.dir, 'template.xlsx')
path_seq = os.path.join(self.dir, 'template_seq.fna')
io.create_template(path_core, path_seq, set_repo_metadata_from_path=False)
kb = io.Reader().run(path_core, path_seq)
def test_validate_implicit_relationships(self):
class TestModel(obj_model.Model):
id = obj_model.StringAttribute(primary=True, unique=True)
try:
core.KnowledgeBase.Meta.attributes['test'] = obj_model.OneToOneAttribute(TestModel, related_name='a')
with self.assertRaisesRegex(Exception, 'Relationships from `KnowledgeBase` not supported:'):
io.Writer.validate_implicit_relationships()
finally:
core.KnowledgeBase.Meta.attributes.pop('test')
try:
core.KnowledgeBase.Meta.related_attributes['test'] = obj_model.OneToManyAttribute(core.Cell, related_name='c')
with self.assertRaisesRegex(Exception,
'Relationships to `KnowledgeBase` that are not one-to-one are prohibited'):
io.Writer.validate_implicit_relationships()
finally:
core.KnowledgeBase.Meta.related_attributes.pop('test')
try:
core.Cell.Meta.attributes['test'] = obj_model.OneToManyAttribute(TestModel, related_name='c')
with self.assertRaisesRegex(Exception,
'Relationships from `Cell` to `KnowledgeBase` that are not one-to-one are prohibited:'):
io.Writer.validate_implicit_relationships()
finally:
core.Cell.Meta.attributes.pop('test')
try:
core.Cell.Meta.attributes['test'] = obj_model.OneToOneAttribute(TestModel, related_name='d')
with self.assertRaisesRegex(Exception,
'Relationships from `Cell` to classes other than `KnowledgeBase` are prohibited:'):
io.Writer.validate_implicit_relationships()
finally:
core.Cell.Meta.attributes.pop('test')
try:
core.Cell.Meta.related_attributes['test'] = obj_model.OneToManyAttribute(TestModel, related_name='d')
with self.assertRaisesRegex(Exception,
'Relationships to `Cell` that are not one-to-one or many-to-one are prohibited: '):
io.Writer.validate_implicit_relationships()
finally:
core.Cell.Meta.related_attributes.pop('test')
try:
core.KnowledgeBase.Meta.related_attributes['test'] = obj_model.OneToOneAttribute(TestModel, related_name='b')
with self.assertRaisesRegex(Exception,
'Relationships to `KnowledgeBase` from classes other than `Cell` are prohibited'):
io.Writer.validate_implicit_relationships()
finally:
core.KnowledgeBase.Meta.related_attributes.pop('test') | 0.358915 | 0.223017 |
from http import HTTPStatus
from django import test as django_tests
from django.contrib.auth import get_user_model
from django.urls import reverse
from mytravelblog.main_app.models import VisitedCity
UserModel = get_user_model()
class VisitedCitiesViewTests(django_tests.TestCase):
def setUp(self):
self.username = 'testuser'
self.other_username = 'testuser2'
self.password1 = '<PASSWORD>'
self.context_data = 'user_cities'
self.city_name = 'sofia'
self.other_city_name = 'shumen'
self.country_name = 'bulgaria'
self.user = UserModel.objects.create_user(
username=self.username,
password=<PASSWORD>,
)
self.user_two = UserModel.objects.create_user(
username=self.other_username,
password=<PASSWORD>,
)
self.client.login(username=self.username, password=<PASSWORD>)
def test_cities_view_no_city_registered_page_url(self):
response = self.client.get('/show-cities/')
self.assertRedirects(response,
reverse('show dashboard'),
status_code=HTTPStatus.FOUND,
target_status_code=HTTPStatus.OK)
def test_cities_view_no_city_registered_age_view_name(self):
response = self.client.get(reverse('cities view'))
self.assertRedirects(response,
reverse('show dashboard'),
status_code=HTTPStatus.FOUND,
target_status_code=HTTPStatus.OK)
def test_cities_view_with_existing_city(self):
visited_city = VisitedCity.objects.create(
city_name=self.city_name,
country_name=self.country_name,
user=self.user,
)
self.assertEqual(1, VisitedCity.objects.count())
response = self.client.get(reverse('cities view'))
self.assertEqual(HTTPStatus.OK, response.status_code)
self.assertTemplateUsed(response, 'main_app/generic/visited_cities.html')
def test_cities_view_does_not_show_other_user_city(self):
visited_city_one = VisitedCity.objects.create(
city_name=self.city_name,
country_name=self.country_name,
user=self.user,
)
visited_city_two = VisitedCity.objects.create(
city_name=self.other_city_name,
country_name=self.country_name,
user=self.user_two,
)
self.assertEqual(2, VisitedCity.objects.count())
response = self.client.get(reverse('cities view'))
self.assertEqual(1, len(response.context_data['cities']))
self.assertEqual(visited_city_one, response.context_data['cities'][0]) | mytravelblog/main_app/tests/views/city/tests_VisitedCitiesView.py | from http import HTTPStatus
from django import test as django_tests
from django.contrib.auth import get_user_model
from django.urls import reverse
from mytravelblog.main_app.models import VisitedCity
UserModel = get_user_model()
class VisitedCitiesViewTests(django_tests.TestCase):
def setUp(self):
self.username = 'testuser'
self.other_username = 'testuser2'
self.password1 = '<PASSWORD>'
self.context_data = 'user_cities'
self.city_name = 'sofia'
self.other_city_name = 'shumen'
self.country_name = 'bulgaria'
self.user = UserModel.objects.create_user(
username=self.username,
password=<PASSWORD>,
)
self.user_two = UserModel.objects.create_user(
username=self.other_username,
password=<PASSWORD>,
)
self.client.login(username=self.username, password=<PASSWORD>)
def test_cities_view_no_city_registered_page_url(self):
response = self.client.get('/show-cities/')
self.assertRedirects(response,
reverse('show dashboard'),
status_code=HTTPStatus.FOUND,
target_status_code=HTTPStatus.OK)
def test_cities_view_no_city_registered_age_view_name(self):
response = self.client.get(reverse('cities view'))
self.assertRedirects(response,
reverse('show dashboard'),
status_code=HTTPStatus.FOUND,
target_status_code=HTTPStatus.OK)
def test_cities_view_with_existing_city(self):
visited_city = VisitedCity.objects.create(
city_name=self.city_name,
country_name=self.country_name,
user=self.user,
)
self.assertEqual(1, VisitedCity.objects.count())
response = self.client.get(reverse('cities view'))
self.assertEqual(HTTPStatus.OK, response.status_code)
self.assertTemplateUsed(response, 'main_app/generic/visited_cities.html')
def test_cities_view_does_not_show_other_user_city(self):
visited_city_one = VisitedCity.objects.create(
city_name=self.city_name,
country_name=self.country_name,
user=self.user,
)
visited_city_two = VisitedCity.objects.create(
city_name=self.other_city_name,
country_name=self.country_name,
user=self.user_two,
)
self.assertEqual(2, VisitedCity.objects.count())
response = self.client.get(reverse('cities view'))
self.assertEqual(1, len(response.context_data['cities']))
self.assertEqual(visited_city_one, response.context_data['cities'][0]) | 0.520253 | 0.101012 |
from campy.graphics.gwindow import GWindow
from campy.graphics.gobjects import GOval, GRect, GLabel
from campy.gui.events.mouse import onmouseclicked, onmousemoved
import random
BRICK_SPACING = 5 # Space between bricks (in pixels). This space is used for horizontal and vertical spacing.
BRICK_WIDTH = 40 # Height of a brick (in pixels).
BRICK_HEIGHT = 15 # Height of a brick (in pixels).
BRICK_ROWS = 10 # Number of rows of bricks.
BRICK_COLS = 10 # Number of columns of bricks.
BRICK_OFFSET = 50 # Vertical offset of the topmost brick from the window top (in pixels).
BALL_RADIUS = 10 # Radius of the ball (in pixels).
PADDLE_WIDTH = 75 # Width of the paddle (in pixels).
PADDLE_HEIGHT = 15 # Height of the paddle (in pixels).
PADDLE_OFFSET = 50 # Vertical offset of the paddle from the window bottom (in pixels).
INITIAL_Y_SPEED = 7.0 # Initial vertical speed for the ball.
MAX_X_SPEED = 5 # Maximum initial horizontal speed for the ball.
LIVES = 3 # Control the times which user can restart the game.
class BreakoutGraphics:
def __init__(self, ball_radius=BALL_RADIUS, paddle_width=PADDLE_WIDTH,
paddle_height=PADDLE_HEIGHT, paddle_offset=PADDLE_OFFSET,
brick_rows=BRICK_ROWS, brick_cols=BRICK_COLS,
brick_width=BRICK_WIDTH, brick_height=BRICK_HEIGHT,
brick_offset=BRICK_OFFSET, brick_spacing=BRICK_SPACING,
title='Breakout'):
# How many lives left.
self.lives_left = LIVES
# Create a graphical window, with some extra space.
self.window_width = brick_cols * (brick_width + brick_spacing) - brick_spacing
self.window_height = brick_offset + 3 * (brick_rows * (brick_height + brick_spacing) - brick_spacing)
self.window = GWindow(width=self.window_width, height=self.window_height, title=title)
# Create a paddle.
self.paddle = GRect(PADDLE_WIDTH, PADDLE_HEIGHT, x=(self.window_width - PADDLE_WIDTH) / 2,
y=self.window_height - PADDLE_OFFSET)
self.paddle.filled = True
self.paddle.color = 'lavenderblush'
self.paddle.fill_color = 'lavenderblush'
self.window.add(self.paddle)
# Center a filled ball in the graphical window.
self.ball = GOval(BALL_RADIUS * 2, BALL_RADIUS * 2, x=self.window_width / 2 - BALL_RADIUS,
y=self.window_height / 2 - BALL_RADIUS)
self.ball.filled = True
self.ball.color = 'lightpink'
self.ball.fill_color = 'lightpink'
self.window.add(self.ball)
# Default initial velocity for the ball.
self.__dx = random.randint(1, MAX_X_SPEED)
self.__dy = INITIAL_Y_SPEED
if random.random() > 0.5:
self.__dx = -self.__dx
# Determine whether game has been started.
self.start_or_not = False
# Initialize our mouse listeners.
onmouseclicked(self.start)
# Determine the game has started.
onmousemoved(self.drag)
# Draw bricks.
for i in range(BRICK_ROWS):
for j in range(BRICK_COLS):
self.brick = GRect(BRICK_WIDTH, BRICK_HEIGHT)
self.brick.filled = True
if j == 0 or j == 1:
self.brick.color = 'darkmagenta'
self.brick.fill_color = 'darkmagenta'
self.window.add(self.brick, x=(BRICK_SPACING * i + BRICK_WIDTH * i),
y=(BRICK_SPACING * j + BRICK_HEIGHT * j + BRICK_OFFSET))
elif j == 2 or j == 3:
self.brick.color = 'mediumvioletred'
self.brick.fill_color = 'mediumvioletred'
self.window.add(self.brick, x=(BRICK_SPACING * i + BRICK_WIDTH * i),
y=(BRICK_SPACING * j + BRICK_HEIGHT * j + BRICK_OFFSET))
elif j == 4 or j == 5:
self.brick.color = 'deeppink'
self.brick.fill_color = 'deeppink'
self.window.add(self.brick, x=(BRICK_SPACING * i + BRICK_WIDTH * i),
y=(BRICK_SPACING * j + BRICK_HEIGHT * j + BRICK_OFFSET))
elif j == 6 or j == 7:
self.brick.color = 'hotpink'
self.brick.fill_color = 'hotpink'
self.window.add(self.brick, x=(BRICK_SPACING * i + BRICK_WIDTH * i),
y=(BRICK_SPACING * j + BRICK_HEIGHT * j + BRICK_OFFSET))
elif j == 8 or j == 9:
self.brick.color = 'pink'
self.brick.fill_color = 'pink'
self.window.add(self.brick, x=(BRICK_SPACING * i + BRICK_WIDTH * i),
y=(BRICK_SPACING * j + BRICK_HEIGHT * j + BRICK_OFFSET))
# Calculate how many bricks exist.
self.numbers_of_brick = BRICK_ROWS * BRICK_COLS
def check_for_collision(self):
ball_upper_left = self.window.get_object_at(self.ball.x, self.ball.y)
ball_upper_right = self.window.get_object_at(self.ball.x + 2 * BALL_RADIUS, self.ball.y)
ball_lower_left = self.window.get_object_at(self.ball.x, self.ball.y + 2 * BALL_RADIUS)
ball_lower_right = self.window.get_object_at(self.ball.x + 2 * BALL_RADIUS, self.ball.y + 2 * BALL_RADIUS)
# Check whether is right edge.
if self.ball.x + 2 * BALL_RADIUS > self.window.width:
self.__dx = -self.__dx
# Check whether is left edge.
if self.ball.x < 0:
self.__dx = -self.__dx
# Check whether is upper edge.
if self.ball.y < 0:
self.__dy = -self.__dy
# Check whether is paddle.
if ball_lower_left or ball_lower_right is self.paddle:
self.__dy = -INITIAL_Y_SPEED
# Check whether is brick.
if ball_upper_left is not None and ball_upper_left is not self.paddle:
self.window.remove(ball_upper_left)
self.__dy = -self.__dy
self.numbers_of_brick -= 1
elif ball_upper_right is not None and ball_upper_right is not self.paddle:
self.window.remove(ball_upper_right)
self.__dy = -self.__dy
self.numbers_of_brick -= 1
elif ball_lower_left is not None and ball_lower_left is not self.paddle:
self.window.remove(ball_lower_left)
self.__dy = -self.__dy
self.numbers_of_brick -= 1
elif ball_lower_right is not None and ball_lower_right is not self.paddle:
self.window.remove(ball_lower_right)
self.__dy = -self.__dy
self.numbers_of_brick -= 1
def reset_ball(self):
# Check whether is lower edge.
if self.ball.y > self.window.height:
self.lives_left -= 1
self.start_or_not = False
self.window.add(self.ball, x=self.window_width / 2 - BALL_RADIUS,
y=self.window_height / 2 - BALL_RADIUS)
def drag(self, mouse):
if PADDLE_WIDTH / 2 <= mouse.x <= self.window.width - PADDLE_WIDTH / 2:
self.paddle.x = mouse.x - PADDLE_WIDTH / 2
def start(self, mouse):
self.start_or_not = True
def get_dx(self):
return self.__dx
def get_dy(self):
return self.__dy | stanCode project/Object-oriented Programming/breakoutgraphics.py | from campy.graphics.gwindow import GWindow
from campy.graphics.gobjects import GOval, GRect, GLabel
from campy.gui.events.mouse import onmouseclicked, onmousemoved
import random
BRICK_SPACING = 5 # Space between bricks (in pixels). This space is used for horizontal and vertical spacing.
BRICK_WIDTH = 40 # Height of a brick (in pixels).
BRICK_HEIGHT = 15 # Height of a brick (in pixels).
BRICK_ROWS = 10 # Number of rows of bricks.
BRICK_COLS = 10 # Number of columns of bricks.
BRICK_OFFSET = 50 # Vertical offset of the topmost brick from the window top (in pixels).
BALL_RADIUS = 10 # Radius of the ball (in pixels).
PADDLE_WIDTH = 75 # Width of the paddle (in pixels).
PADDLE_HEIGHT = 15 # Height of the paddle (in pixels).
PADDLE_OFFSET = 50 # Vertical offset of the paddle from the window bottom (in pixels).
INITIAL_Y_SPEED = 7.0 # Initial vertical speed for the ball.
MAX_X_SPEED = 5 # Maximum initial horizontal speed for the ball.
LIVES = 3 # Control the times which user can restart the game.
class BreakoutGraphics:
def __init__(self, ball_radius=BALL_RADIUS, paddle_width=PADDLE_WIDTH,
paddle_height=PADDLE_HEIGHT, paddle_offset=PADDLE_OFFSET,
brick_rows=BRICK_ROWS, brick_cols=BRICK_COLS,
brick_width=BRICK_WIDTH, brick_height=BRICK_HEIGHT,
brick_offset=BRICK_OFFSET, brick_spacing=BRICK_SPACING,
title='Breakout'):
# How many lives left.
self.lives_left = LIVES
# Create a graphical window, with some extra space.
self.window_width = brick_cols * (brick_width + brick_spacing) - brick_spacing
self.window_height = brick_offset + 3 * (brick_rows * (brick_height + brick_spacing) - brick_spacing)
self.window = GWindow(width=self.window_width, height=self.window_height, title=title)
# Create a paddle.
self.paddle = GRect(PADDLE_WIDTH, PADDLE_HEIGHT, x=(self.window_width - PADDLE_WIDTH) / 2,
y=self.window_height - PADDLE_OFFSET)
self.paddle.filled = True
self.paddle.color = 'lavenderblush'
self.paddle.fill_color = 'lavenderblush'
self.window.add(self.paddle)
# Center a filled ball in the graphical window.
self.ball = GOval(BALL_RADIUS * 2, BALL_RADIUS * 2, x=self.window_width / 2 - BALL_RADIUS,
y=self.window_height / 2 - BALL_RADIUS)
self.ball.filled = True
self.ball.color = 'lightpink'
self.ball.fill_color = 'lightpink'
self.window.add(self.ball)
# Default initial velocity for the ball.
self.__dx = random.randint(1, MAX_X_SPEED)
self.__dy = INITIAL_Y_SPEED
if random.random() > 0.5:
self.__dx = -self.__dx
# Determine whether game has been started.
self.start_or_not = False
# Initialize our mouse listeners.
onmouseclicked(self.start)
# Determine the game has started.
onmousemoved(self.drag)
# Draw bricks.
for i in range(BRICK_ROWS):
for j in range(BRICK_COLS):
self.brick = GRect(BRICK_WIDTH, BRICK_HEIGHT)
self.brick.filled = True
if j == 0 or j == 1:
self.brick.color = 'darkmagenta'
self.brick.fill_color = 'darkmagenta'
self.window.add(self.brick, x=(BRICK_SPACING * i + BRICK_WIDTH * i),
y=(BRICK_SPACING * j + BRICK_HEIGHT * j + BRICK_OFFSET))
elif j == 2 or j == 3:
self.brick.color = 'mediumvioletred'
self.brick.fill_color = 'mediumvioletred'
self.window.add(self.brick, x=(BRICK_SPACING * i + BRICK_WIDTH * i),
y=(BRICK_SPACING * j + BRICK_HEIGHT * j + BRICK_OFFSET))
elif j == 4 or j == 5:
self.brick.color = 'deeppink'
self.brick.fill_color = 'deeppink'
self.window.add(self.brick, x=(BRICK_SPACING * i + BRICK_WIDTH * i),
y=(BRICK_SPACING * j + BRICK_HEIGHT * j + BRICK_OFFSET))
elif j == 6 or j == 7:
self.brick.color = 'hotpink'
self.brick.fill_color = 'hotpink'
self.window.add(self.brick, x=(BRICK_SPACING * i + BRICK_WIDTH * i),
y=(BRICK_SPACING * j + BRICK_HEIGHT * j + BRICK_OFFSET))
elif j == 8 or j == 9:
self.brick.color = 'pink'
self.brick.fill_color = 'pink'
self.window.add(self.brick, x=(BRICK_SPACING * i + BRICK_WIDTH * i),
y=(BRICK_SPACING * j + BRICK_HEIGHT * j + BRICK_OFFSET))
# Calculate how many bricks exist.
self.numbers_of_brick = BRICK_ROWS * BRICK_COLS
def check_for_collision(self):
ball_upper_left = self.window.get_object_at(self.ball.x, self.ball.y)
ball_upper_right = self.window.get_object_at(self.ball.x + 2 * BALL_RADIUS, self.ball.y)
ball_lower_left = self.window.get_object_at(self.ball.x, self.ball.y + 2 * BALL_RADIUS)
ball_lower_right = self.window.get_object_at(self.ball.x + 2 * BALL_RADIUS, self.ball.y + 2 * BALL_RADIUS)
# Check whether is right edge.
if self.ball.x + 2 * BALL_RADIUS > self.window.width:
self.__dx = -self.__dx
# Check whether is left edge.
if self.ball.x < 0:
self.__dx = -self.__dx
# Check whether is upper edge.
if self.ball.y < 0:
self.__dy = -self.__dy
# Check whether is paddle.
if ball_lower_left or ball_lower_right is self.paddle:
self.__dy = -INITIAL_Y_SPEED
# Check whether is brick.
if ball_upper_left is not None and ball_upper_left is not self.paddle:
self.window.remove(ball_upper_left)
self.__dy = -self.__dy
self.numbers_of_brick -= 1
elif ball_upper_right is not None and ball_upper_right is not self.paddle:
self.window.remove(ball_upper_right)
self.__dy = -self.__dy
self.numbers_of_brick -= 1
elif ball_lower_left is not None and ball_lower_left is not self.paddle:
self.window.remove(ball_lower_left)
self.__dy = -self.__dy
self.numbers_of_brick -= 1
elif ball_lower_right is not None and ball_lower_right is not self.paddle:
self.window.remove(ball_lower_right)
self.__dy = -self.__dy
self.numbers_of_brick -= 1
def reset_ball(self):
# Check whether is lower edge.
if self.ball.y > self.window.height:
self.lives_left -= 1
self.start_or_not = False
self.window.add(self.ball, x=self.window_width / 2 - BALL_RADIUS,
y=self.window_height / 2 - BALL_RADIUS)
def drag(self, mouse):
if PADDLE_WIDTH / 2 <= mouse.x <= self.window.width - PADDLE_WIDTH / 2:
self.paddle.x = mouse.x - PADDLE_WIDTH / 2
def start(self, mouse):
self.start_or_not = True
def get_dx(self):
return self.__dx
def get_dy(self):
return self.__dy | 0.709523 | 0.270149 |
from reconstruct import iradon_centered
from skimage.filters import sobel_h, sobel_v
from scipy.interpolate import InterpolatedUnivariateSpline
from tqdm import tqdm
import numpy as np
import matplotlib.pyplot as plt
def image_entropy(reco, eps=1e-12):
return np.sum(reco * np.log(reco + eps))
def inv_image_gradient(reco, eps=1e-12):
sobelx = sobel_v(reco)
sobely = sobel_h(reco)
grad_norm = (sobelx ** 2 + sobely ** 2) ** (1 / 2)
return grad_norm.sum()
def run_method_on_set(sino, angles, shift=15, use_spline_minima=True, method=inv_image_gradient, reconstructor=iradon_centered, verbose=True):
best_id = 0
temp_loss = []
for i in tqdm(range(-shift, shift), ):
reco = reconstructor(sino, angles, center=sino.shape[1] / 2. + i)
temp_sum = method(reco)
temp_loss.append(temp_sum)
if use_spline_minima:
x_axis = np.arange(-shift, shift)
f = InterpolatedUnivariateSpline(x_axis, np.array(temp_loss), k=4)
cr_pts = f.derivative().roots()
cr_pts = np.append(cr_pts, (x_axis[0], x_axis[-1])) # also check the endpoints of the interval
cr_vals = f(cr_pts)
min_index = np.argmin(cr_vals)
max_index = np.argmax(cr_vals)
min_point = cr_pts[min_index]
else:
min_point = np.argmin(temp_loss) - shift
if verbose:
print('predict:', min_point)
plt.plot(np.arange(-shift, shift), temp_loss)
plt.show()
return min_point
def run_method_diff(sino, angles, start_point=0., eps=0.25, iters=20, step=0.5, step_size='gradient', method=inv_image_gradient, reconstructor=iradon_centered, verbose=True):
shift = start_point
temp_loss = []
for i in range(iters):
print(sino.shape[1] / 2. + shift - eps, sino.shape[1] / 2. + shift + eps)
reco_left = reconstructor(sino.copy(), angles, center=sino.shape[1] / 2. + shift - eps)
reco_right = reconstructor(sino.copy(), angles, center=sino.shape[1] / 2. + shift + eps)
sum_left = method(reco_left)
sum_right = method(reco_right)
gradient = (sum_right - sum_left) / (2 * eps)
update = gradient * step
if step_size == 'fixed':
update = step * np.sign(gradient)
if verbose:
print('iter:', i, 'sums:', sum_left, sum_right, 'gradient:', gradient, 'new_shift:', shift, 'update:', update)
if update < eps:
break
shift += update
return shift | reco_based.py | from reconstruct import iradon_centered
from skimage.filters import sobel_h, sobel_v
from scipy.interpolate import InterpolatedUnivariateSpline
from tqdm import tqdm
import numpy as np
import matplotlib.pyplot as plt
def image_entropy(reco, eps=1e-12):
return np.sum(reco * np.log(reco + eps))
def inv_image_gradient(reco, eps=1e-12):
sobelx = sobel_v(reco)
sobely = sobel_h(reco)
grad_norm = (sobelx ** 2 + sobely ** 2) ** (1 / 2)
return grad_norm.sum()
def run_method_on_set(sino, angles, shift=15, use_spline_minima=True, method=inv_image_gradient, reconstructor=iradon_centered, verbose=True):
best_id = 0
temp_loss = []
for i in tqdm(range(-shift, shift), ):
reco = reconstructor(sino, angles, center=sino.shape[1] / 2. + i)
temp_sum = method(reco)
temp_loss.append(temp_sum)
if use_spline_minima:
x_axis = np.arange(-shift, shift)
f = InterpolatedUnivariateSpline(x_axis, np.array(temp_loss), k=4)
cr_pts = f.derivative().roots()
cr_pts = np.append(cr_pts, (x_axis[0], x_axis[-1])) # also check the endpoints of the interval
cr_vals = f(cr_pts)
min_index = np.argmin(cr_vals)
max_index = np.argmax(cr_vals)
min_point = cr_pts[min_index]
else:
min_point = np.argmin(temp_loss) - shift
if verbose:
print('predict:', min_point)
plt.plot(np.arange(-shift, shift), temp_loss)
plt.show()
return min_point
def run_method_diff(sino, angles, start_point=0., eps=0.25, iters=20, step=0.5, step_size='gradient', method=inv_image_gradient, reconstructor=iradon_centered, verbose=True):
shift = start_point
temp_loss = []
for i in range(iters):
print(sino.shape[1] / 2. + shift - eps, sino.shape[1] / 2. + shift + eps)
reco_left = reconstructor(sino.copy(), angles, center=sino.shape[1] / 2. + shift - eps)
reco_right = reconstructor(sino.copy(), angles, center=sino.shape[1] / 2. + shift + eps)
sum_left = method(reco_left)
sum_right = method(reco_right)
gradient = (sum_right - sum_left) / (2 * eps)
update = gradient * step
if step_size == 'fixed':
update = step * np.sign(gradient)
if verbose:
print('iter:', i, 'sums:', sum_left, sum_right, 'gradient:', gradient, 'new_shift:', shift, 'update:', update)
if update < eps:
break
shift += update
return shift | 0.587233 | 0.393851 |
from django.shortcuts import render, redirect, reverse, HttpResponse
from django.views.generic import View
from django.http import JsonResponse
from django.contrib import auth
from django.contrib.auth.hashers import make_password
import logging
import random
import string
from apps.users.models import UserInfo
from django.core.cache import cache
from django.core.mail import send_mail, EmailMultiAlternatives
from qika.settings import EMAIL_FROM
from django.core.cache import cache
logger = logging.getLogger('account')
# Create your views here.
class Register(View):
def get(self, request):
return redirect(reverse('index'))
def post(self, request):
username = request.POST.get('username')
password = request.POST.get('password')
password2 = request.POST.get('<PASSWORD>')
nickname = request.POST.get('nickname')
email = request.POST.get('email')
email_captcha = request.POST.get('email_captcha')
if_username_exist = UserInfo.objects.filter(username=username)
if_nickname_exist = UserInfo.objects.filter(nickname=nickname)
if_email_exist = UserInfo.objects.filter(email=email)
email_captcha_redis = cache.get(email)
if not if_username_exist and not if_nickname_exist and not if_email_exist \
and (password == <PASSWORD>) and (email_captcha == email_captcha_redis):
user = UserInfo.objects.create(username=username, password=<PASSWORD>_password(password),
nickname=nickname, email=email)
user.save()
auth.login(request, user)
ret = {'status': 200, 'msg': '用户注册成功'}
elif if_username_exist:
ret = {'status': 402, 'msg': '账号已存在'}
elif if_nickname_exist:
ret = {'status': 402, 'msg': '昵称已存在'}
elif if_email_exist:
ret = {'status': 402, 'msg': '邮箱已存在'}
elif password != <PASSWORD>:
ret = {'status': 402, 'msg': '两次密码不一致'}
elif email_captcha != email_captcha_redis:
ret = {'status': 402, 'msg': '邮箱验证码不正确'}
elif not email_captcha_redis:
ret = {'status': 401, 'msg': '验证码错误或过期'}
else:
ret = {'status': 400, 'msg': '调用方式错误'}
return JsonResponse(ret)
class Login(View):
def get(self, request):
return redirect(reverse('index'))
# Form表单直接提交
def post(self, request):
username = request.POST.get('username')
password = request.POST.get('password')
captcha = request.POST.get('captcha')
session_captcha_code = request.session.get('captcha_code', '')
if captcha.lower() == session_captcha_code.lower():
user = auth.authenticate(username=username, password=password)
if user and user.is_active:
auth.login(request, user)
logger.info(f"{user.username}登录成功")
ret = {'status': 200, 'msg': '登录成功'}
else:
logger.error(f"{username}登录失败, 用户名或密码错误")
ret = {'status': 400, 'msg': '账号或密码错误'}
else:
ret = {'status': 400, 'msg': '验证码错误'}
logger.error(f'{username}登陆失败,验证码错误')
return JsonResponse(ret)
def logout(request):
auth.logout(request)
return redirect(reverse('index'))
class PasswordForget(View):
def get(self, request):
return render(request, 'index.html')
def post(self, request):
username = request.POST.get('username')
email = request.POST.get('email')
if username and email and UserInfo.objects.filter(
username=username, email=email):
verify_code = "".join(
random.choices(
string.ascii_lowercase +
string.digits,
k=128))
url = f"{request.scheme}://{request.META['HTTP_HOST']}/account/password/reset/{verify_code}?email={email}"
cache.set(verify_code,
{'username': username,
'email': email,
'verify_code': verify_code,
'url': url},
1800)
email_title = '【qikaACG】忘记密码验证'
email_body = f'<p>点击下面的链接进行验证,有效时间30分钟:</p></br><a href="{url}">{url}</a>'
msg = EmailMultiAlternatives(
email_title, email_body, EMAIL_FROM, [email])
msg.content_subtype = "html"
msg.send()
ret = {'status': 200, 'msg': '邮件发送成功,请登录邮箱查看!如果没收到,请到垃圾箱查看是否存在!'}
else:
ret = {'status': 400, 'msg': '输入的邮箱不存在!'}
return JsonResponse(ret)
class PasswordReset(View):
def get(self, request, verify_code):
message = cache.get(verify_code)
if verify_code and message:
return render(request, 'password_reset.html')
else:
return HttpResponse("链接失效或有误")
def post(self, request, verify_code):
password1 = request.POST.get('password1')
password2 = request.POST.get('password2')
if password1 == password2:
try:
message = cache.get(verify_code)
email = message.get('email')
user = UserInfo.objects.get(email=email)
user.set_password(<PASSWORD>)
user.save()
msg = "重置密码成功"
code = 200
except Exception as ex:
logger.error(ex)
code = 400
msg = "出错啦"
else:
code = 400
msg = '两次密码不一致'
return render(request, 'password_reset.html', {'code': code, 'msg': msg})
def page_not_found(request):
return render(request, '404.html') | qika/apps/account/views.py | from django.shortcuts import render, redirect, reverse, HttpResponse
from django.views.generic import View
from django.http import JsonResponse
from django.contrib import auth
from django.contrib.auth.hashers import make_password
import logging
import random
import string
from apps.users.models import UserInfo
from django.core.cache import cache
from django.core.mail import send_mail, EmailMultiAlternatives
from qika.settings import EMAIL_FROM
from django.core.cache import cache
logger = logging.getLogger('account')
# Create your views here.
class Register(View):
def get(self, request):
return redirect(reverse('index'))
def post(self, request):
username = request.POST.get('username')
password = request.POST.get('password')
password2 = request.POST.get('<PASSWORD>')
nickname = request.POST.get('nickname')
email = request.POST.get('email')
email_captcha = request.POST.get('email_captcha')
if_username_exist = UserInfo.objects.filter(username=username)
if_nickname_exist = UserInfo.objects.filter(nickname=nickname)
if_email_exist = UserInfo.objects.filter(email=email)
email_captcha_redis = cache.get(email)
if not if_username_exist and not if_nickname_exist and not if_email_exist \
and (password == <PASSWORD>) and (email_captcha == email_captcha_redis):
user = UserInfo.objects.create(username=username, password=<PASSWORD>_password(password),
nickname=nickname, email=email)
user.save()
auth.login(request, user)
ret = {'status': 200, 'msg': '用户注册成功'}
elif if_username_exist:
ret = {'status': 402, 'msg': '账号已存在'}
elif if_nickname_exist:
ret = {'status': 402, 'msg': '昵称已存在'}
elif if_email_exist:
ret = {'status': 402, 'msg': '邮箱已存在'}
elif password != <PASSWORD>:
ret = {'status': 402, 'msg': '两次密码不一致'}
elif email_captcha != email_captcha_redis:
ret = {'status': 402, 'msg': '邮箱验证码不正确'}
elif not email_captcha_redis:
ret = {'status': 401, 'msg': '验证码错误或过期'}
else:
ret = {'status': 400, 'msg': '调用方式错误'}
return JsonResponse(ret)
class Login(View):
def get(self, request):
return redirect(reverse('index'))
# Form表单直接提交
def post(self, request):
username = request.POST.get('username')
password = request.POST.get('password')
captcha = request.POST.get('captcha')
session_captcha_code = request.session.get('captcha_code', '')
if captcha.lower() == session_captcha_code.lower():
user = auth.authenticate(username=username, password=password)
if user and user.is_active:
auth.login(request, user)
logger.info(f"{user.username}登录成功")
ret = {'status': 200, 'msg': '登录成功'}
else:
logger.error(f"{username}登录失败, 用户名或密码错误")
ret = {'status': 400, 'msg': '账号或密码错误'}
else:
ret = {'status': 400, 'msg': '验证码错误'}
logger.error(f'{username}登陆失败,验证码错误')
return JsonResponse(ret)
def logout(request):
auth.logout(request)
return redirect(reverse('index'))
class PasswordForget(View):
def get(self, request):
return render(request, 'index.html')
def post(self, request):
username = request.POST.get('username')
email = request.POST.get('email')
if username and email and UserInfo.objects.filter(
username=username, email=email):
verify_code = "".join(
random.choices(
string.ascii_lowercase +
string.digits,
k=128))
url = f"{request.scheme}://{request.META['HTTP_HOST']}/account/password/reset/{verify_code}?email={email}"
cache.set(verify_code,
{'username': username,
'email': email,
'verify_code': verify_code,
'url': url},
1800)
email_title = '【qikaACG】忘记密码验证'
email_body = f'<p>点击下面的链接进行验证,有效时间30分钟:</p></br><a href="{url}">{url}</a>'
msg = EmailMultiAlternatives(
email_title, email_body, EMAIL_FROM, [email])
msg.content_subtype = "html"
msg.send()
ret = {'status': 200, 'msg': '邮件发送成功,请登录邮箱查看!如果没收到,请到垃圾箱查看是否存在!'}
else:
ret = {'status': 400, 'msg': '输入的邮箱不存在!'}
return JsonResponse(ret)
class PasswordReset(View):
def get(self, request, verify_code):
message = cache.get(verify_code)
if verify_code and message:
return render(request, 'password_reset.html')
else:
return HttpResponse("链接失效或有误")
def post(self, request, verify_code):
password1 = request.POST.get('password1')
password2 = request.POST.get('password2')
if password1 == password2:
try:
message = cache.get(verify_code)
email = message.get('email')
user = UserInfo.objects.get(email=email)
user.set_password(<PASSWORD>)
user.save()
msg = "重置密码成功"
code = 200
except Exception as ex:
logger.error(ex)
code = 400
msg = "出错啦"
else:
code = 400
msg = '两次密码不一致'
return render(request, 'password_reset.html', {'code': code, 'msg': msg})
def page_not_found(request):
return render(request, '404.html') | 0.286768 | 0.049497 |
from functools import partial
from os import environ
from string import Template
from constrictor.dpkg import LINK_PATH_KEY
PARENT_KEY = "parent"
DEB_CONSTRICTOR_KEY = "deb_constrictor"
IGNORE_PATHS_KEY = "ignore_paths"
VARIABLES_KEY = "variables"
ENVIRONMENT_VARIABLES_KEY = "environment_variables"
EXTRA_CONTROL_FIELDS_KEY = "extra_control_fields"
DEPENDS_KEY = "Depends"
PROVIDES_KEY = "Provides"
DIRECTORIES_KEY = "directories"
LINKS_KEY = "links"
COMMANDS_KEY = "commands"
MAINTAINER_SCRIPTS_KEY = "maintainer_scripts"
CONFIG_FILES_KEY = "configuration_files"
DIRECTORY_PATH_KEYS = ('source', 'destination')
def ensure_trailing_slash(path):
"""Return path if path ends with a / or path + / if it doesn't."""
return path if path.endswith('/') else path + '/'
def ensure_trailing_slashes_in_directory(directory):
"""Return a directory entry where all path entries will be guaranteed to have a trailing /"""
new_directory = {}
new_directory.update(directory)
for path_type in DIRECTORY_PATH_KEYS:
new_directory[path_type] = ensure_trailing_slash(directory[path_type])
return new_directory
def extract_directory_paths(directory):
"""Return a dictionary for a directory containing only the path items."""
return {k: v for k, v in directory.items() if k in DIRECTORY_PATH_KEYS}
def directory_entries_equal(dir1, dir2):
"""Compare two directory entries, considered equal if only the path items match."""
dir1_paths, dir2_paths = map(extract_directory_paths, map(ensure_trailing_slashes_in_directory, (dir1, dir2)))
return dir1_paths == dir2_paths
def directory_entries_not_equal(dir1, dir2):
"""Negate directory_entries_equal function (for use when inverse is required in map()/filter() call)"""
return not directory_entries_equal(dir1, dir2)
def interpolate_value(v, context):
if isinstance(v, dict):
interpolate_dictionary(v, context)
return None
elif isinstance(v, list):
interpolate_list(v, context)
return None
elif isinstance(v, (bool, int, float)):
return None
else:
return Template(v).substitute(context)
def interpolate_list(list_, context):
"""Walk through list and interpolate variables for each value."""
for i, v in enumerate(list_):
interpolated = interpolate_value(v, context)
if interpolated is not None:
list_[i] = interpolated
def interpolate_dictionary(d, context):
"""Walk through dictionary and interpolate variables for each value."""
for k, v in d.items():
if k == COMMANDS_KEY: # don't interpolate commands at build time as some variables are passed in at run time
continue
interpolated = interpolate_value(v, context)
if interpolated is not None:
d[k] = interpolated
class ConstrictorConfiguration(object):
"""
Configuration for the DpkgBuilder. Should be instantiated with the root config dict, and then updated with child
(overriding) configuration dictionaries by calling the update_configuration method with reach child.
"""
def __init__(self, base_configuration):
self.configuration = {}
self.environment_variables = {}
self.variables = {}
self.update_configuration(base_configuration)
def update_configuration(self, configuration):
"""
Override the existing configuration with the new values. Is more advanced than just doing old[k] = new[k] as it
is also aware of what items are lists and should be appended to.
"""
for k, v in configuration.items():
if k == PARENT_KEY:
continue
elif k == DEB_CONSTRICTOR_KEY:
self.update_deb_constrictor_configuration(v)
elif k == EXTRA_CONTROL_FIELDS_KEY:
self.update_extra_control_fields(v)
elif k == DIRECTORIES_KEY:
self.update_directory_entries(v)
elif k == LINKS_KEY:
self.update_link_entries(v)
elif k == MAINTAINER_SCRIPTS_KEY:
self.update_maintainer_scripts(v)
elif k == CONFIG_FILES_KEY:
self.update_configuration_files(v)
else:
self.configuration[k] = v
def update_deb_constrictor_configuration(self, configuration):
"""
Updates items in the DEB_CONSTRICTOR_KEY value dict with new items. Will append items to lists where
appropriate.
"""
if DEB_CONSTRICTOR_KEY not in self.configuration:
self.configuration[DEB_CONSTRICTOR_KEY] = {}
for k, v in configuration.items():
if k == IGNORE_PATHS_KEY:
self.update_ignore_paths(v)
elif k in (VARIABLES_KEY, ENVIRONMENT_VARIABLES_KEY):
self.update_variables(k, v)
elif k == COMMANDS_KEY:
self.update_commands(v)
else:
self.configuration[DEB_CONSTRICTOR_KEY][k] = v
def update_ignore_paths(self, ignore_paths):
"""
Updates the IGNORE_PATHS_KEY list in the DEB_CONSTRICTOR_KEY dict with the passed in list. Will only add the
path if it does not exist in the list already (no duplicates).
"""
if IGNORE_PATHS_KEY not in self.configuration[DEB_CONSTRICTOR_KEY]:
self.configuration[DEB_CONSTRICTOR_KEY][IGNORE_PATHS_KEY] = []
for ignore_path in ignore_paths:
if ignore_path not in self.configuration[DEB_CONSTRICTOR_KEY][IGNORE_PATHS_KEY]:
self.configuration[DEB_CONSTRICTOR_KEY][IGNORE_PATHS_KEY].append(ignore_path)
def update_extra_control_fields(self, control_fields):
"""
Updates existing EXTRA_CONTROL_FIELDS_KEY dictionary with items from the passed in dictionary. Appends
(uniquely) to list type items instead of overriding.
"""
if EXTRA_CONTROL_FIELDS_KEY not in self.configuration:
self.configuration[EXTRA_CONTROL_FIELDS_KEY] = {}
for k, v in control_fields.items():
if k in (DEPENDS_KEY, PROVIDES_KEY):
self.update_extra_control_field_list(k, v)
else:
self.configuration[EXTRA_CONTROL_FIELDS_KEY][k] = v
def update_extra_control_field_list(self, control_field_name, new_list):
"""
Appends items in new_list to the given key (control_field_name) in the EXTRA_CONTROL_FIELDS_KEY dictionary.
Makes sure items are unique.
"""
if control_field_name not in self.configuration[EXTRA_CONTROL_FIELDS_KEY]:
self.configuration[EXTRA_CONTROL_FIELDS_KEY][control_field_name] = []
for field_item in new_list:
if field_item not in self.configuration[EXTRA_CONTROL_FIELDS_KEY][control_field_name]:
self.configuration[EXTRA_CONTROL_FIELDS_KEY][control_field_name].append(field_item)
def update_directory_entries(self, directories_list):
"""
Append the given directory entries in directories_list to the existing directories in the configuration.
Existing directories are removed and replaced if they have the same source and destination as an incoming entry
(as there may be legitimate cases to source the same destination to multiple targets or multiple sources to the
same target [if they contain different files] so if either differ to an existing entry it will be added).
"""
if DIRECTORIES_KEY not in self.configuration:
self.configuration[DIRECTORIES_KEY] = []
for new_directory in directories_list:
self.configuration[DIRECTORIES_KEY] = list(filter(partial(directory_entries_not_equal, new_directory),
self.configuration[DIRECTORIES_KEY]))
self.configuration[DIRECTORIES_KEY].append(new_directory)
def update_link_entries(self, links_list):
"""
Append the given links in the links_list to the existing links in the configuration. Existing link entries are
removed and replaced with incoming if they match on the source field only (as it doesn't make sense to create
two links in the same place).
"""
if LINKS_KEY not in self.configuration:
self.configuration[LINKS_KEY] = []
for new_link in links_list:
self.configuration[LINKS_KEY] = list(filter(lambda link: link[LINK_PATH_KEY] != new_link[LINK_PATH_KEY],
self.configuration[LINKS_KEY]))
self.configuration[LINKS_KEY].append(new_link)
def update_maintainer_scripts(self, maintainer_scripts):
"""Override existing maintainer script keys with the new ones."""
if MAINTAINER_SCRIPTS_KEY not in self.configuration:
self.configuration[MAINTAINER_SCRIPTS_KEY] = {}
self.configuration[MAINTAINER_SCRIPTS_KEY].update(maintainer_scripts)
def update_configuration_files(self, configuration_files):
"""Append configuration files to existing."""
if CONFIG_FILES_KEY not in self.configuration:
self.configuration[CONFIG_FILES_KEY] = []
self.configuration[CONFIG_FILES_KEY] += list(
filter(lambda path: path not in self.configuration[CONFIG_FILES_KEY], configuration_files))
def update_variables(self, variables_key, new_variables):
"""
Append the list of variables to the given variables_key. Does not do de-duplication of the variable name as it
might be good to have parent variables populate and the be able to be used in a child config.
"""
if variables_key not in self.configuration[DEB_CONSTRICTOR_KEY]:
self.configuration[DEB_CONSTRICTOR_KEY][variables_key] = []
self.configuration[DEB_CONSTRICTOR_KEY][variables_key] += new_variables
def update_commands(self, commands):
if COMMANDS_KEY not in self.configuration[DEB_CONSTRICTOR_KEY]:
self.configuration[DEB_CONSTRICTOR_KEY][COMMANDS_KEY] = {}
self.configuration[DEB_CONSTRICTOR_KEY][COMMANDS_KEY].update(commands)
def interpolate_variables(self):
"""
Should be called before variables are used, when we have finished updating all configs down the hierarchy, to
interpolate the variables with variables before they can be used.
"""
self.store_variable_list(VARIABLES_KEY, self.variables)
self.store_variable_list(ENVIRONMENT_VARIABLES_KEY, self.environment_variables)
def store_variable_list(self, variables_list_key, variables_container):
"""
Interpolate each variable in the variables list (a list of lists, each item is [key, value]) and store it in the
dictionary for use later, i.e: [[k, v]] => {k: interpolate_value(v)}
Because the items are processed in order, items further along the list might be interpolated with variables set
by earlier elements.
"""
for k, v in self.configuration[DEB_CONSTRICTOR_KEY].get(variables_list_key, []):
variables_container[k] = self.interpolate_value(v)
def get_template_context(self):
"""
Template context (for interpolating variables) is os.environ, which is overridden by self.environment_variables
and then overridden by self.variables.
If you wanted to be more performant you would cache the ctx and invalidate it when the var variables change, but
I don't foresee this being an issue.
"""
ctx = dict(environ)
ctx.update(self.environment_variables)
ctx.update(self.variables)
return ctx
def interpolate_value(self, value):
return interpolate_value(value, self.get_template_context())
def interpolate_configuration_values(self):
"""
Recurse through the configuration and interpolate all the values with the template context. This should be
called after all the configurations have been loaded (parent hierarchy resolved and updated) and then
interpolate_variables called.
"""
interpolate_dictionary(self.configuration, self.get_template_context())
def __getitem__(self, item):
return self.configuration[item]
def get(self, item, default=None):
return self.configuration.get(item, default) | constrictor/configuration.py | from functools import partial
from os import environ
from string import Template
from constrictor.dpkg import LINK_PATH_KEY
PARENT_KEY = "parent"
DEB_CONSTRICTOR_KEY = "deb_constrictor"
IGNORE_PATHS_KEY = "ignore_paths"
VARIABLES_KEY = "variables"
ENVIRONMENT_VARIABLES_KEY = "environment_variables"
EXTRA_CONTROL_FIELDS_KEY = "extra_control_fields"
DEPENDS_KEY = "Depends"
PROVIDES_KEY = "Provides"
DIRECTORIES_KEY = "directories"
LINKS_KEY = "links"
COMMANDS_KEY = "commands"
MAINTAINER_SCRIPTS_KEY = "maintainer_scripts"
CONFIG_FILES_KEY = "configuration_files"
DIRECTORY_PATH_KEYS = ('source', 'destination')
def ensure_trailing_slash(path):
"""Return path if path ends with a / or path + / if it doesn't."""
return path if path.endswith('/') else path + '/'
def ensure_trailing_slashes_in_directory(directory):
"""Return a directory entry where all path entries will be guaranteed to have a trailing /"""
new_directory = {}
new_directory.update(directory)
for path_type in DIRECTORY_PATH_KEYS:
new_directory[path_type] = ensure_trailing_slash(directory[path_type])
return new_directory
def extract_directory_paths(directory):
"""Return a dictionary for a directory containing only the path items."""
return {k: v for k, v in directory.items() if k in DIRECTORY_PATH_KEYS}
def directory_entries_equal(dir1, dir2):
"""Compare two directory entries, considered equal if only the path items match."""
dir1_paths, dir2_paths = map(extract_directory_paths, map(ensure_trailing_slashes_in_directory, (dir1, dir2)))
return dir1_paths == dir2_paths
def directory_entries_not_equal(dir1, dir2):
"""Negate directory_entries_equal function (for use when inverse is required in map()/filter() call)"""
return not directory_entries_equal(dir1, dir2)
def interpolate_value(v, context):
if isinstance(v, dict):
interpolate_dictionary(v, context)
return None
elif isinstance(v, list):
interpolate_list(v, context)
return None
elif isinstance(v, (bool, int, float)):
return None
else:
return Template(v).substitute(context)
def interpolate_list(list_, context):
"""Walk through list and interpolate variables for each value."""
for i, v in enumerate(list_):
interpolated = interpolate_value(v, context)
if interpolated is not None:
list_[i] = interpolated
def interpolate_dictionary(d, context):
"""Walk through dictionary and interpolate variables for each value."""
for k, v in d.items():
if k == COMMANDS_KEY: # don't interpolate commands at build time as some variables are passed in at run time
continue
interpolated = interpolate_value(v, context)
if interpolated is not None:
d[k] = interpolated
class ConstrictorConfiguration(object):
"""
Configuration for the DpkgBuilder. Should be instantiated with the root config dict, and then updated with child
(overriding) configuration dictionaries by calling the update_configuration method with reach child.
"""
def __init__(self, base_configuration):
self.configuration = {}
self.environment_variables = {}
self.variables = {}
self.update_configuration(base_configuration)
def update_configuration(self, configuration):
"""
Override the existing configuration with the new values. Is more advanced than just doing old[k] = new[k] as it
is also aware of what items are lists and should be appended to.
"""
for k, v in configuration.items():
if k == PARENT_KEY:
continue
elif k == DEB_CONSTRICTOR_KEY:
self.update_deb_constrictor_configuration(v)
elif k == EXTRA_CONTROL_FIELDS_KEY:
self.update_extra_control_fields(v)
elif k == DIRECTORIES_KEY:
self.update_directory_entries(v)
elif k == LINKS_KEY:
self.update_link_entries(v)
elif k == MAINTAINER_SCRIPTS_KEY:
self.update_maintainer_scripts(v)
elif k == CONFIG_FILES_KEY:
self.update_configuration_files(v)
else:
self.configuration[k] = v
def update_deb_constrictor_configuration(self, configuration):
"""
Updates items in the DEB_CONSTRICTOR_KEY value dict with new items. Will append items to lists where
appropriate.
"""
if DEB_CONSTRICTOR_KEY not in self.configuration:
self.configuration[DEB_CONSTRICTOR_KEY] = {}
for k, v in configuration.items():
if k == IGNORE_PATHS_KEY:
self.update_ignore_paths(v)
elif k in (VARIABLES_KEY, ENVIRONMENT_VARIABLES_KEY):
self.update_variables(k, v)
elif k == COMMANDS_KEY:
self.update_commands(v)
else:
self.configuration[DEB_CONSTRICTOR_KEY][k] = v
def update_ignore_paths(self, ignore_paths):
"""
Updates the IGNORE_PATHS_KEY list in the DEB_CONSTRICTOR_KEY dict with the passed in list. Will only add the
path if it does not exist in the list already (no duplicates).
"""
if IGNORE_PATHS_KEY not in self.configuration[DEB_CONSTRICTOR_KEY]:
self.configuration[DEB_CONSTRICTOR_KEY][IGNORE_PATHS_KEY] = []
for ignore_path in ignore_paths:
if ignore_path not in self.configuration[DEB_CONSTRICTOR_KEY][IGNORE_PATHS_KEY]:
self.configuration[DEB_CONSTRICTOR_KEY][IGNORE_PATHS_KEY].append(ignore_path)
def update_extra_control_fields(self, control_fields):
"""
Updates existing EXTRA_CONTROL_FIELDS_KEY dictionary with items from the passed in dictionary. Appends
(uniquely) to list type items instead of overriding.
"""
if EXTRA_CONTROL_FIELDS_KEY not in self.configuration:
self.configuration[EXTRA_CONTROL_FIELDS_KEY] = {}
for k, v in control_fields.items():
if k in (DEPENDS_KEY, PROVIDES_KEY):
self.update_extra_control_field_list(k, v)
else:
self.configuration[EXTRA_CONTROL_FIELDS_KEY][k] = v
def update_extra_control_field_list(self, control_field_name, new_list):
"""
Appends items in new_list to the given key (control_field_name) in the EXTRA_CONTROL_FIELDS_KEY dictionary.
Makes sure items are unique.
"""
if control_field_name not in self.configuration[EXTRA_CONTROL_FIELDS_KEY]:
self.configuration[EXTRA_CONTROL_FIELDS_KEY][control_field_name] = []
for field_item in new_list:
if field_item not in self.configuration[EXTRA_CONTROL_FIELDS_KEY][control_field_name]:
self.configuration[EXTRA_CONTROL_FIELDS_KEY][control_field_name].append(field_item)
def update_directory_entries(self, directories_list):
"""
Append the given directory entries in directories_list to the existing directories in the configuration.
Existing directories are removed and replaced if they have the same source and destination as an incoming entry
(as there may be legitimate cases to source the same destination to multiple targets or multiple sources to the
same target [if they contain different files] so if either differ to an existing entry it will be added).
"""
if DIRECTORIES_KEY not in self.configuration:
self.configuration[DIRECTORIES_KEY] = []
for new_directory in directories_list:
self.configuration[DIRECTORIES_KEY] = list(filter(partial(directory_entries_not_equal, new_directory),
self.configuration[DIRECTORIES_KEY]))
self.configuration[DIRECTORIES_KEY].append(new_directory)
def update_link_entries(self, links_list):
"""
Append the given links in the links_list to the existing links in the configuration. Existing link entries are
removed and replaced with incoming if they match on the source field only (as it doesn't make sense to create
two links in the same place).
"""
if LINKS_KEY not in self.configuration:
self.configuration[LINKS_KEY] = []
for new_link in links_list:
self.configuration[LINKS_KEY] = list(filter(lambda link: link[LINK_PATH_KEY] != new_link[LINK_PATH_KEY],
self.configuration[LINKS_KEY]))
self.configuration[LINKS_KEY].append(new_link)
def update_maintainer_scripts(self, maintainer_scripts):
"""Override existing maintainer script keys with the new ones."""
if MAINTAINER_SCRIPTS_KEY not in self.configuration:
self.configuration[MAINTAINER_SCRIPTS_KEY] = {}
self.configuration[MAINTAINER_SCRIPTS_KEY].update(maintainer_scripts)
def update_configuration_files(self, configuration_files):
"""Append configuration files to existing."""
if CONFIG_FILES_KEY not in self.configuration:
self.configuration[CONFIG_FILES_KEY] = []
self.configuration[CONFIG_FILES_KEY] += list(
filter(lambda path: path not in self.configuration[CONFIG_FILES_KEY], configuration_files))
def update_variables(self, variables_key, new_variables):
"""
Append the list of variables to the given variables_key. Does not do de-duplication of the variable name as it
might be good to have parent variables populate and the be able to be used in a child config.
"""
if variables_key not in self.configuration[DEB_CONSTRICTOR_KEY]:
self.configuration[DEB_CONSTRICTOR_KEY][variables_key] = []
self.configuration[DEB_CONSTRICTOR_KEY][variables_key] += new_variables
def update_commands(self, commands):
if COMMANDS_KEY not in self.configuration[DEB_CONSTRICTOR_KEY]:
self.configuration[DEB_CONSTRICTOR_KEY][COMMANDS_KEY] = {}
self.configuration[DEB_CONSTRICTOR_KEY][COMMANDS_KEY].update(commands)
def interpolate_variables(self):
"""
Should be called before variables are used, when we have finished updating all configs down the hierarchy, to
interpolate the variables with variables before they can be used.
"""
self.store_variable_list(VARIABLES_KEY, self.variables)
self.store_variable_list(ENVIRONMENT_VARIABLES_KEY, self.environment_variables)
def store_variable_list(self, variables_list_key, variables_container):
"""
Interpolate each variable in the variables list (a list of lists, each item is [key, value]) and store it in the
dictionary for use later, i.e: [[k, v]] => {k: interpolate_value(v)}
Because the items are processed in order, items further along the list might be interpolated with variables set
by earlier elements.
"""
for k, v in self.configuration[DEB_CONSTRICTOR_KEY].get(variables_list_key, []):
variables_container[k] = self.interpolate_value(v)
def get_template_context(self):
"""
Template context (for interpolating variables) is os.environ, which is overridden by self.environment_variables
and then overridden by self.variables.
If you wanted to be more performant you would cache the ctx and invalidate it when the var variables change, but
I don't foresee this being an issue.
"""
ctx = dict(environ)
ctx.update(self.environment_variables)
ctx.update(self.variables)
return ctx
def interpolate_value(self, value):
return interpolate_value(value, self.get_template_context())
def interpolate_configuration_values(self):
"""
Recurse through the configuration and interpolate all the values with the template context. This should be
called after all the configurations have been loaded (parent hierarchy resolved and updated) and then
interpolate_variables called.
"""
interpolate_dictionary(self.configuration, self.get_template_context())
def __getitem__(self, item):
return self.configuration[item]
def get(self, item, default=None):
return self.configuration.get(item, default) | 0.659295 | 0.098469 |
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import djangocms_text_ckeditor.fields
import filer.fields.image
class Migration(migrations.Migration):
dependencies = [
('cms', '0020_old_tree_cleanup'),
migrations.swappable_dependency(settings.FILER_IMAGE_MODEL),
('pages', '0032_auto_20181114_1351'),
]
operations = [
migrations.CreateModel(
name='KeyNotePlugin',
fields=[
('cmsplugin_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, related_name='pages_keynoteplugin', serialize=False, to='cms.CMSPlugin')),
('hidden', models.BooleanField(default=False)),
('button_text', models.CharField(blank=True, max_length=200)),
('uid', models.SlugField(max_length=8, unique=True)),
('title', models.CharField(blank=True, max_length=200)),
('description', djangocms_text_ckeditor.fields.HTMLField()),
],
options={
'abstract': False,
},
bases=('cms.cmsplugin',),
),
migrations.CreateModel(
name='KeyNotesSetPlugin',
fields=[
('cmsplugin_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, related_name='pages_keynotessetplugin', serialize=False, to='cms.CMSPlugin')),
('hidden', models.BooleanField(default=False)),
('title', models.CharField(blank=True, max_length=200)),
('description', djangocms_text_ckeditor.fields.HTMLField()),
],
options={
'abstract': False,
},
bases=('cms.cmsplugin',),
),
migrations.CreateModel(
name='ProofPlugin',
fields=[
('cmsplugin_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, related_name='pages_proofplugin', serialize=False, to='cms.CMSPlugin')),
('hidden', models.BooleanField(default=False)),
('title', models.CharField(blank=True, max_length=200)),
('description', djangocms_text_ckeditor.fields.HTMLField()),
('proof_icon', filer.fields.image.FilerImageField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='proof_icon', to=settings.FILER_IMAGE_MODEL)),
],
options={
'abstract': False,
},
bases=('cms.cmsplugin',),
),
migrations.CreateModel(
name='SocialProofsPlugin',
fields=[
('cmsplugin_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, related_name='pages_socialproofsplugin', serialize=False, to='cms.CMSPlugin')),
('hidden', models.BooleanField(default=False)),
('title', models.CharField(blank=True, max_length=200)),
('post_description', djangocms_text_ckeditor.fields.HTMLField()),
('more_proofs_link', models.URLField(blank=True, null=True)),
],
options={
'abstract': False,
},
bases=('cms.cmsplugin',),
),
] | mysite/pages/migrations/0033_keynoteplugin_keynotessetplugin_proofplugin_socialproofsplugin.py | from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import djangocms_text_ckeditor.fields
import filer.fields.image
class Migration(migrations.Migration):
dependencies = [
('cms', '0020_old_tree_cleanup'),
migrations.swappable_dependency(settings.FILER_IMAGE_MODEL),
('pages', '0032_auto_20181114_1351'),
]
operations = [
migrations.CreateModel(
name='KeyNotePlugin',
fields=[
('cmsplugin_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, related_name='pages_keynoteplugin', serialize=False, to='cms.CMSPlugin')),
('hidden', models.BooleanField(default=False)),
('button_text', models.CharField(blank=True, max_length=200)),
('uid', models.SlugField(max_length=8, unique=True)),
('title', models.CharField(blank=True, max_length=200)),
('description', djangocms_text_ckeditor.fields.HTMLField()),
],
options={
'abstract': False,
},
bases=('cms.cmsplugin',),
),
migrations.CreateModel(
name='KeyNotesSetPlugin',
fields=[
('cmsplugin_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, related_name='pages_keynotessetplugin', serialize=False, to='cms.CMSPlugin')),
('hidden', models.BooleanField(default=False)),
('title', models.CharField(blank=True, max_length=200)),
('description', djangocms_text_ckeditor.fields.HTMLField()),
],
options={
'abstract': False,
},
bases=('cms.cmsplugin',),
),
migrations.CreateModel(
name='ProofPlugin',
fields=[
('cmsplugin_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, related_name='pages_proofplugin', serialize=False, to='cms.CMSPlugin')),
('hidden', models.BooleanField(default=False)),
('title', models.CharField(blank=True, max_length=200)),
('description', djangocms_text_ckeditor.fields.HTMLField()),
('proof_icon', filer.fields.image.FilerImageField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='proof_icon', to=settings.FILER_IMAGE_MODEL)),
],
options={
'abstract': False,
},
bases=('cms.cmsplugin',),
),
migrations.CreateModel(
name='SocialProofsPlugin',
fields=[
('cmsplugin_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, related_name='pages_socialproofsplugin', serialize=False, to='cms.CMSPlugin')),
('hidden', models.BooleanField(default=False)),
('title', models.CharField(blank=True, max_length=200)),
('post_description', djangocms_text_ckeditor.fields.HTMLField()),
('more_proofs_link', models.URLField(blank=True, null=True)),
],
options={
'abstract': False,
},
bases=('cms.cmsplugin',),
),
] | 0.458834 | 0.132908 |
import time
import journal
from nisar.workflows import (bandpass_insar, crossmul, dense_offsets, geo2rdr,
geocode_insar, h5_prep, filter_interferogram,
rdr2geo, resample_slc, rubbersheet,
split_spectrum, unwrap)
from nisar.workflows.insar_runconfig import InsarRunConfig
from nisar.workflows.persistence import Persistence
from nisar.workflows.yaml_argparse import YamlArgparse
def run(cfg: dict, out_paths: dict, run_steps: dict):
'''
Run INSAR workflow with parameters in cfg dictionary
'''
info_channel = journal.info("insar.run")
info_channel.log("starting INSAR")
t_all = time.time()
if run_steps['bandpass_insar']:
bandpass_insar.run(cfg)
if run_steps['h5_prep']:
h5_prep.run(cfg)
if run_steps['rdr2geo']:
rdr2geo.run(cfg)
if run_steps['geo2rdr']:
geo2rdr.run(cfg)
if run_steps['coarse_resample']:
resample_slc.run(cfg, 'coarse')
if (run_steps['dense_offsets']) and \
(cfg['processing']['dense_offsets']['enabled']):
dense_offsets.run(cfg)
if run_steps['rubbersheet'] and \
cfg['processing']['rubbersheet']['enabled']:
rubbersheet.run(cfg, out_paths['RIFG'])
# If enabled, run fine_resampling
if run_steps['fine_resample'] and \
cfg['processing']['fine_resample']['enabled']:
resample_slc.run(cfg, 'fine')
# If fine_resampling is enabled, use fine-coregistered SLC
# to run crossmul
if run_steps['crossmul']:
if cfg['processing']['fine_resample']['enabled']:
crossmul.run(cfg, out_paths['RIFG'], 'fine')
else:
crossmul.run(cfg, out_paths['RIFG'], 'coarse')
# Run insar_filter only
if run_steps['filter_interferogram'] and \
cfg['processing']['filter_interferogram']['filter_type'] != 'no_filter':
filter_interferogram.run(cfg, out_paths['RIFG'])
if run_steps['unwrap'] and 'RUNW' in out_paths:
unwrap.run(cfg, out_paths['RIFG'], out_paths['RUNW'])
if run_steps['ionosphere'] and \
cfg['processing']['ionosphere_phase_correction']['enabled']:
split_spectrum.run(cfg)
if run_steps['geocode'] and 'GUNW' in out_paths:
geocode_insar.run(cfg, out_paths['RUNW'], out_paths['GUNW'])
t_all_elapsed = time.time() - t_all
info_channel.log(f"successfully ran INSAR in {t_all_elapsed:.3f} seconds")
if __name__ == "__main__":
# parse CLI input
yaml_parser = YamlArgparse()
args = yaml_parser.parse()
# convert CLI input to run configuration
insar_runcfg = InsarRunConfig(args)
# determine what steps if any need to be rerun
persist = Persistence(insar_runcfg.args.restart)
# run InSAR workflow
if persist.run:
_, out_paths = h5_prep.get_products_and_paths(insar_runcfg.cfg)
run(insar_runcfg.cfg, out_paths, persist.run_steps) | python/packages/nisar/workflows/insar.py | import time
import journal
from nisar.workflows import (bandpass_insar, crossmul, dense_offsets, geo2rdr,
geocode_insar, h5_prep, filter_interferogram,
rdr2geo, resample_slc, rubbersheet,
split_spectrum, unwrap)
from nisar.workflows.insar_runconfig import InsarRunConfig
from nisar.workflows.persistence import Persistence
from nisar.workflows.yaml_argparse import YamlArgparse
def run(cfg: dict, out_paths: dict, run_steps: dict):
'''
Run INSAR workflow with parameters in cfg dictionary
'''
info_channel = journal.info("insar.run")
info_channel.log("starting INSAR")
t_all = time.time()
if run_steps['bandpass_insar']:
bandpass_insar.run(cfg)
if run_steps['h5_prep']:
h5_prep.run(cfg)
if run_steps['rdr2geo']:
rdr2geo.run(cfg)
if run_steps['geo2rdr']:
geo2rdr.run(cfg)
if run_steps['coarse_resample']:
resample_slc.run(cfg, 'coarse')
if (run_steps['dense_offsets']) and \
(cfg['processing']['dense_offsets']['enabled']):
dense_offsets.run(cfg)
if run_steps['rubbersheet'] and \
cfg['processing']['rubbersheet']['enabled']:
rubbersheet.run(cfg, out_paths['RIFG'])
# If enabled, run fine_resampling
if run_steps['fine_resample'] and \
cfg['processing']['fine_resample']['enabled']:
resample_slc.run(cfg, 'fine')
# If fine_resampling is enabled, use fine-coregistered SLC
# to run crossmul
if run_steps['crossmul']:
if cfg['processing']['fine_resample']['enabled']:
crossmul.run(cfg, out_paths['RIFG'], 'fine')
else:
crossmul.run(cfg, out_paths['RIFG'], 'coarse')
# Run insar_filter only
if run_steps['filter_interferogram'] and \
cfg['processing']['filter_interferogram']['filter_type'] != 'no_filter':
filter_interferogram.run(cfg, out_paths['RIFG'])
if run_steps['unwrap'] and 'RUNW' in out_paths:
unwrap.run(cfg, out_paths['RIFG'], out_paths['RUNW'])
if run_steps['ionosphere'] and \
cfg['processing']['ionosphere_phase_correction']['enabled']:
split_spectrum.run(cfg)
if run_steps['geocode'] and 'GUNW' in out_paths:
geocode_insar.run(cfg, out_paths['RUNW'], out_paths['GUNW'])
t_all_elapsed = time.time() - t_all
info_channel.log(f"successfully ran INSAR in {t_all_elapsed:.3f} seconds")
if __name__ == "__main__":
# parse CLI input
yaml_parser = YamlArgparse()
args = yaml_parser.parse()
# convert CLI input to run configuration
insar_runcfg = InsarRunConfig(args)
# determine what steps if any need to be rerun
persist = Persistence(insar_runcfg.args.restart)
# run InSAR workflow
if persist.run:
_, out_paths = h5_prep.get_products_and_paths(insar_runcfg.cfg)
run(insar_runcfg.cfg, out_paths, persist.run_steps) | 0.427038 | 0.277996 |
from appJar import gui
app=gui()
import os
import csv
def weather_plot(btn):
import matplotlib.pyplot as plt
import dateutil
import numpy as np
from matplotlib.dates import DateFormatter
times=[]
degrees_list=[]
pressure_list=[]
humidity_list=[]
file_name=[]
for filename in os.listdir('.'):
if filename.endswith(".csv"):
file_name.append(os.path.join('.', filename))
app.setFont(20)
app.addOptionBox("Files",file_name)
def ok(btn):
user_file=app.getOptionBox("Files")
results = csv.reader(open(user_file), delimiter=',')
row_counter=0
for r in results:
if row_counter>0:
times.append(dateutil.parser.parse(r[0]))
degrees_list.append(float(r[1]))
pressure_list.append(float(r[2]))
humidity_list.append(float(r[3]))
row_counter+=1
temp_ave=[]
temp_unc = []
pressure_ave=[]
pressure_unc=[]
humidity_ave=[]
humidity_unc=[]
merge_times = []
n_merge = 8
ndata = len(degrees_list)
nsum_data = int(ndata/n_merge)
for i in range(nsum_data):
itemp = degrees_list[i*n_merge:(i+1)*n_merge]
itemp_array = np.asarray(itemp)
temp_mean = np.mean(itemp_array)
temp_sigma = np.sqrt(np.var(itemp_array))
temp_ave.append(temp_mean)
temp_unc.append(temp_sigma)
for i in range(nsum_data):
ipressure = pressure_list[i*n_merge:(i+1)*n_merge]
ipressure_array = np.asarray(ipressure)
pressure_mean = np.mean(ipressure_array)
pressure_sigma = np.sqrt(np.var(ipressure_array))
pressure_ave.append(pressure_mean)
pressure_unc.append(pressure_sigma)
for i in range(nsum_data):
ihumid = humidity_list[i*n_merge:(i+1)*n_merge]
ihumid_array = np.asarray(ihumid)
humid_mean = np.mean(ihumid_array)
humid_sigma = np.sqrt(np.var(ihumid_array))
humidity_ave.append(humid_mean)
humidity_unc.append(humid_sigma)
for i in range(nsum_data):
itimes = times[i*n_merge:(i+1)*n_merge]
itime = itimes[int(len(itimes)/2)]
merge_times.append(itime)
fig=plt.figure()
ax=fig.add_subplot(111)
plt.plot(merge_times, temp_ave, "b.")
plt.errorbar(merge_times, temp_ave, yerr = temp_unc)
plt.title("Temperature")
plt.xlabel("Time(s)")
plt.ylabel("Temperature(C)")
fig.autofmt_xdate()
ax.xaxis.set_major_formatter(DateFormatter('%H:%M:%S'))
fig=plt.figure()
ax=fig.add_subplot(111)
plt.plot(merge_times, pressure_ave,"g." )
plt.errorbar(merge_times, pressure_ave, yerr = pressure_unc)
plt.title("Pressure")
plt.xlabel("Time(s)")
plt.ylabel("Pressure(hPa)")
fig.autofmt_xdate()
ax.xaxis.set_major_formatter(DateFormatter('%H:%M:%S'))
fig=plt.figure()
ax=fig.add_subplot(111)
plt.plot(merge_times, humidity_ave,"r." )
plt.errorbar(merge_times, humidity_ave, yerr = humidity_unc)
plt.title("Humidity")
plt.xlabel("Time(s)")
plt.ylabel("Humidity(%)")
fig.autofmt_xdate()
ax.xaxis.set_major_formatter(DateFormatter('%H:%M:%S'))
plt.show()
app.addButton("OK",ok)
app.go()
app.addButton("Plot Weather Data",weather_plot)
app.go() | weather_test_try.py | from appJar import gui
app=gui()
import os
import csv
def weather_plot(btn):
import matplotlib.pyplot as plt
import dateutil
import numpy as np
from matplotlib.dates import DateFormatter
times=[]
degrees_list=[]
pressure_list=[]
humidity_list=[]
file_name=[]
for filename in os.listdir('.'):
if filename.endswith(".csv"):
file_name.append(os.path.join('.', filename))
app.setFont(20)
app.addOptionBox("Files",file_name)
def ok(btn):
user_file=app.getOptionBox("Files")
results = csv.reader(open(user_file), delimiter=',')
row_counter=0
for r in results:
if row_counter>0:
times.append(dateutil.parser.parse(r[0]))
degrees_list.append(float(r[1]))
pressure_list.append(float(r[2]))
humidity_list.append(float(r[3]))
row_counter+=1
temp_ave=[]
temp_unc = []
pressure_ave=[]
pressure_unc=[]
humidity_ave=[]
humidity_unc=[]
merge_times = []
n_merge = 8
ndata = len(degrees_list)
nsum_data = int(ndata/n_merge)
for i in range(nsum_data):
itemp = degrees_list[i*n_merge:(i+1)*n_merge]
itemp_array = np.asarray(itemp)
temp_mean = np.mean(itemp_array)
temp_sigma = np.sqrt(np.var(itemp_array))
temp_ave.append(temp_mean)
temp_unc.append(temp_sigma)
for i in range(nsum_data):
ipressure = pressure_list[i*n_merge:(i+1)*n_merge]
ipressure_array = np.asarray(ipressure)
pressure_mean = np.mean(ipressure_array)
pressure_sigma = np.sqrt(np.var(ipressure_array))
pressure_ave.append(pressure_mean)
pressure_unc.append(pressure_sigma)
for i in range(nsum_data):
ihumid = humidity_list[i*n_merge:(i+1)*n_merge]
ihumid_array = np.asarray(ihumid)
humid_mean = np.mean(ihumid_array)
humid_sigma = np.sqrt(np.var(ihumid_array))
humidity_ave.append(humid_mean)
humidity_unc.append(humid_sigma)
for i in range(nsum_data):
itimes = times[i*n_merge:(i+1)*n_merge]
itime = itimes[int(len(itimes)/2)]
merge_times.append(itime)
fig=plt.figure()
ax=fig.add_subplot(111)
plt.plot(merge_times, temp_ave, "b.")
plt.errorbar(merge_times, temp_ave, yerr = temp_unc)
plt.title("Temperature")
plt.xlabel("Time(s)")
plt.ylabel("Temperature(C)")
fig.autofmt_xdate()
ax.xaxis.set_major_formatter(DateFormatter('%H:%M:%S'))
fig=plt.figure()
ax=fig.add_subplot(111)
plt.plot(merge_times, pressure_ave,"g." )
plt.errorbar(merge_times, pressure_ave, yerr = pressure_unc)
plt.title("Pressure")
plt.xlabel("Time(s)")
plt.ylabel("Pressure(hPa)")
fig.autofmt_xdate()
ax.xaxis.set_major_formatter(DateFormatter('%H:%M:%S'))
fig=plt.figure()
ax=fig.add_subplot(111)
plt.plot(merge_times, humidity_ave,"r." )
plt.errorbar(merge_times, humidity_ave, yerr = humidity_unc)
plt.title("Humidity")
plt.xlabel("Time(s)")
plt.ylabel("Humidity(%)")
fig.autofmt_xdate()
ax.xaxis.set_major_formatter(DateFormatter('%H:%M:%S'))
plt.show()
app.addButton("OK",ok)
app.go()
app.addButton("Plot Weather Data",weather_plot)
app.go() | 0.169028 | 0.290352 |
import torch
from torch import nn
class AELoss(nn.Module):
def __init__(self, pull_factor, push_factor, distance, margin_push):
super(AELoss, self).__init__()
self.pull_factor = pull_factor
self.push_factor = push_factor
self.distance = distance
self.margin_push = margin_push
def forward(self, lof_tag_img, lof_tag_avg_img, lof_tag_avg_gather_img, mask, centerness_img=None):
# lof_tag_img shape (selected 5level)
# lof_tag_avg_img shape (num_boxes)
# lof_tag_avg_gather_img shape (selected 5level)
lof_tag_avg_gather_img = torch.round(lof_tag_avg_gather_img / self.distance) * self.distance
tag = torch.pow(lof_tag_img - torch.round(lof_tag_avg_gather_img), 2)
dist = lof_tag_avg_img.unsqueeze(0) - lof_tag_avg_img.unsqueeze(1)
dist = self.distance + self.margin_push - torch.abs(dist)
dist = nn.functional.relu(dist, inplace=True)
dist = dist[mask]
if centerness_img is not None:
pull = (tag * centerness_img).sum() / centerness_img.sum()
push = torch.zeros_like(pull)
if mask.any():
# centerness = (centerness_img.unsqueeze(0) * centerness_img.unsqueeze(1))[mask]
# push = (dist * centerness).sum() / centerness.sum()
push = dist.sum() / mask.sum().float()
else:
pull = tag.mean()
push = dist.mean()
return self.pull_factor*pull, self.push_factor*push
class AELossV2(nn.Module):
def __init__(self, pull_factor, push_factor, margin_push, num_lof):
super(AELossV2, self).__init__()
self.pull_factor = pull_factor
self.push_factor = push_factor
self.distance = 0.5
self.margin_push = margin_push
self.tag_loss = nn.BCEWithLogitsLoss(reduction='none')
self.num_lof = num_lof
def forward(self, lof_tag_img, lof_tag_avg_img, lof_tag_avg_gather_img, mask, nmultinminus1mulnumlof, centerness_img=None):
# lof_tag_img shape (selected 5level, num_lof)
# lof_tag_avg_img shape (num_lof, num_boxes)
# lof_tag_avg_gather_img shape (selected 5level, num_lof)
# centerness_img shape (selected 5level, num_lof)
lof_tag_avg_gather_img = torch.round(torch.sigmoid(lof_tag_avg_gather_img))
# tag = torch.pow(lof_tag_img - torch.round(lof_tag_avg_gather_img), 2)
tag = self.tag_loss(lof_tag_img, lof_tag_avg_gather_img)
dist = torch.abs(torch.sigmoid(lof_tag_avg_img.unsqueeze(1))
- torch.sigmoid(lof_tag_avg_img.unsqueeze(2)))
dist_mask = ((dist > (0.5 + self.margin_push)).sum(0, keepdim=True) == 0).repeat((self.num_lof, 1, 1))
mask = dist_mask & mask
dist = (0.5 + self.margin_push) - dist
dist = nn.functional.relu(dist, inplace=True)
dist = dist[mask]
if centerness_img is not None:
pull = (tag * centerness_img).sum() / centerness_img.sum()
push = torch.zeros_like(pull)
if mask.any():
# centerness = (centerness_img.unsqueeze(0) * centerness_img.unsqueeze(1))[mask]
# push = (dist * centerness).sum() / centerness.sum()
push = dist.sum() / nmultinminus1mulnumlof.float()
else:
pull = tag.mean()
push = dist.mean()
return self.pull_factor*pull, self.push_factor*push | maskrcnn_benchmark/layers/ae_loss.py | import torch
from torch import nn
class AELoss(nn.Module):
def __init__(self, pull_factor, push_factor, distance, margin_push):
super(AELoss, self).__init__()
self.pull_factor = pull_factor
self.push_factor = push_factor
self.distance = distance
self.margin_push = margin_push
def forward(self, lof_tag_img, lof_tag_avg_img, lof_tag_avg_gather_img, mask, centerness_img=None):
# lof_tag_img shape (selected 5level)
# lof_tag_avg_img shape (num_boxes)
# lof_tag_avg_gather_img shape (selected 5level)
lof_tag_avg_gather_img = torch.round(lof_tag_avg_gather_img / self.distance) * self.distance
tag = torch.pow(lof_tag_img - torch.round(lof_tag_avg_gather_img), 2)
dist = lof_tag_avg_img.unsqueeze(0) - lof_tag_avg_img.unsqueeze(1)
dist = self.distance + self.margin_push - torch.abs(dist)
dist = nn.functional.relu(dist, inplace=True)
dist = dist[mask]
if centerness_img is not None:
pull = (tag * centerness_img).sum() / centerness_img.sum()
push = torch.zeros_like(pull)
if mask.any():
# centerness = (centerness_img.unsqueeze(0) * centerness_img.unsqueeze(1))[mask]
# push = (dist * centerness).sum() / centerness.sum()
push = dist.sum() / mask.sum().float()
else:
pull = tag.mean()
push = dist.mean()
return self.pull_factor*pull, self.push_factor*push
class AELossV2(nn.Module):
def __init__(self, pull_factor, push_factor, margin_push, num_lof):
super(AELossV2, self).__init__()
self.pull_factor = pull_factor
self.push_factor = push_factor
self.distance = 0.5
self.margin_push = margin_push
self.tag_loss = nn.BCEWithLogitsLoss(reduction='none')
self.num_lof = num_lof
def forward(self, lof_tag_img, lof_tag_avg_img, lof_tag_avg_gather_img, mask, nmultinminus1mulnumlof, centerness_img=None):
# lof_tag_img shape (selected 5level, num_lof)
# lof_tag_avg_img shape (num_lof, num_boxes)
# lof_tag_avg_gather_img shape (selected 5level, num_lof)
# centerness_img shape (selected 5level, num_lof)
lof_tag_avg_gather_img = torch.round(torch.sigmoid(lof_tag_avg_gather_img))
# tag = torch.pow(lof_tag_img - torch.round(lof_tag_avg_gather_img), 2)
tag = self.tag_loss(lof_tag_img, lof_tag_avg_gather_img)
dist = torch.abs(torch.sigmoid(lof_tag_avg_img.unsqueeze(1))
- torch.sigmoid(lof_tag_avg_img.unsqueeze(2)))
dist_mask = ((dist > (0.5 + self.margin_push)).sum(0, keepdim=True) == 0).repeat((self.num_lof, 1, 1))
mask = dist_mask & mask
dist = (0.5 + self.margin_push) - dist
dist = nn.functional.relu(dist, inplace=True)
dist = dist[mask]
if centerness_img is not None:
pull = (tag * centerness_img).sum() / centerness_img.sum()
push = torch.zeros_like(pull)
if mask.any():
# centerness = (centerness_img.unsqueeze(0) * centerness_img.unsqueeze(1))[mask]
# push = (dist * centerness).sum() / centerness.sum()
push = dist.sum() / nmultinminus1mulnumlof.float()
else:
pull = tag.mean()
push = dist.mean()
return self.pull_factor*pull, self.push_factor*push | 0.889599 | 0.466724 |
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
SECRET_KEY = <KEY>"
DEBUG = False
ALLOWED_HOSTS = ['*']
# COMPRESS_ENABLED = True
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sitemaps',
'django.contrib.sites',
'tradutor',
]
SITE_ID = 1
CRISPY_TEMPLATE_PACK = 'bootstrap3'
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.gzip.GZipMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates'), ],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'pt-br'
TIME_ZONE = 'America/Sao_Paulo'
STATIC_ROOT = '/static'
STATIC_URL = '/static/' | site/mysite/settings/local.py | import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
SECRET_KEY = <KEY>"
DEBUG = False
ALLOWED_HOSTS = ['*']
# COMPRESS_ENABLED = True
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sitemaps',
'django.contrib.sites',
'tradutor',
]
SITE_ID = 1
CRISPY_TEMPLATE_PACK = 'bootstrap3'
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.gzip.GZipMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates'), ],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'pt-br'
TIME_ZONE = 'America/Sao_Paulo'
STATIC_ROOT = '/static'
STATIC_URL = '/static/' | 0.243373 | 0.056705 |
from docopt import docopt
import os
import pandas as pd
import json
import numpy as np
import yaml
from copy import copy
import cPickle as pkl
from sklearn.preprocessing import LabelBinarizer as Binarizer
from sklearn.decomposition import LatentDirichletAllocation
import pnr.config as CONFIG
def mold_baseline_vectors(annotations):
"""
Use actions identified in clusters as vectors to represent as text document.
Use standard topic modelling techniques to find pnr topic.
Parameters
----------
annotations: list of dict
information on pnr annotations
Returns
-------
senctences: list of vectors
annotation information, as well as topics identified, but with context
"""
annotations = pd.DataFrame(annotations)
annotations = annotations[['ball_handler', 'ball_defender', 'screen_setter', 'screen_defender']]
players = []
for column in annotations.columns:
role_players = annotations[column].drop_duplicates(inplace=False).values
for player in role_players:
if player not in players:
players.append(player)
vectors = []
for ind, annotation in annotations.iterrows():
vector = []
for annotation_player in annotation.values:
for player in players:
if annotation_player == player:
vector.append(1)
else:
vector.append(0)
vectors.append(vector)
vectors = np.array(vectors)
return vectors
def mold_sentences(vectors):
"""
Use actions identified in clusters as vectors to represent as text document.
Use standard topic modelling techniques to find pnr topic.
Parameters
----------
vectors: dict
information on pnr annotation, as well as actions identified
Returns
-------
senctences: dict
annotation information, as well as topics identified, but with context
"""
annotations = []
vector_ids = vectors.keys()
vectorizer = Binarizer()
vector_sentences = pd.DataFrame()
for vector_id in vector_ids:
vector = vectors[vector_id]
sentence = vector['actions']
annotation = vector['annotation']
vector_sentence = pd.DataFrame()
vector_sentence['id'] = 0
vector_sentence['player_1_action'] = 0
vector_sentence['player_2_action'] = 0
if len(sentence) == 8:
before_actions = sentence[:4]
for ind, action in enumerate(before_actions):
player_vector = {}
player_vector['player_1_action'] = action
before_comparisons = [x for i,x in enumerate(before_actions) if i != ind]
for before_comparison in before_comparisons:
player_vector['player_2_action'] = before_comparison
player_vector['id'] = vector_id
vector_sentence = vector_sentence.append(player_vector, ignore_index=True)
after_actions = sentence[4:]
for ind, action in enumerate(after_actions):
player_vector = {}
player_vector['player_1_action'] = action
after_comparisons = [x for i,x in enumerate(after_actions) if i != ind]
for after_comparison in after_comparisons:
player_vector['player_2_action'] = after_comparison
player_vector['id'] = vector_id
vector_sentence = vector_sentence.append(player_vector, ignore_index=True)
vector_sentences = vector_sentences.append(vector_sentence)
annotations.append(annotation)
vector_sentences['pairwise_actions'] = vector_sentences['player_1_action'].map(str) + vector_sentences['player_2_action'].map(str)
pairwise_actions = vector_sentences['pairwise_actions']
pairwise_actions = copy(pd.get_dummies(pairwise_actions))
pairwise_actions['id'] = vector_sentences['id'].values
return pairwise_actions, annotations
def find_topics(sentences, annotations, exp_name, n_components=4):
"""
Use actions identified in clusters as vectors to represent as text document.
Use standard topic modelling techniques to find pnr topic.
Parameters
----------
sentences: dict
information on pnr annotation, as well as actions identified
Returns
-------
topics: dict
annotation information, as well as topics identified
"""
vectors = []
n_actions = sentences.shape[1] - 1
vocab = list(range(0, n_actions))
sentences = sentences.groupby('id')
for sentence_id, sentence in sentences:
vocab_count = np.zeros(len(vocab))
for ind, action in sentence.iterrows():
action.drop('id', inplace=True)
action = action.values
action_id = np.argmax(action)
if action[action_id] > 0:
vocab_count[action_id] += 1
vectors.append(vocab_count)
vectors = np.array(vectors)
lda = LatentDirichletAllocation(
n_components=n_components,
max_iter=5,
learning_method='online',
learning_offset=50.,
random_state=0
)
lda.fit(vectors)
topic_probs = lda.transform(vectors)
for ind, prob in enumerate(topic_probs):
topic = np.argmax(prob)
annotations[ind]['topic'] = topic
pkl.dump(annotations, open(os.path.join(pnr_dir, 'roles/%s.pkl' % exp_name), 'wb'))
if __name__ == '__main__':
from pnr.data.constant import sportvu_dir, game_dir
pnr_dir = os.path.join(game_dir, 'pnr-annotations')
arguments = docopt(__doc__)
print ("...Docopt... ")
print(arguments)
print ("............\n")
f_data_config = '%s/%s' % (CONFIG.data.config.dir, arguments['<f_data_config>'])
data_config = yaml.load(open(f_data_config, 'rb'))
vectors = pkl.load(open(os.path.join(pnr_dir, 'roles/vectors.pkl'), 'rb'))
vectors, annotations = mold_sentences(vectors)
baseline_vectors = mold_baseline_vectors(annotations)
find_topics(sentences=vectors, annotations=annotations, exp_name='topics')
# find_topics(sentences=baseline_vectors, annotations=annotations, exp_name='topics-baseline') | pnr/model_topics.py | from docopt import docopt
import os
import pandas as pd
import json
import numpy as np
import yaml
from copy import copy
import cPickle as pkl
from sklearn.preprocessing import LabelBinarizer as Binarizer
from sklearn.decomposition import LatentDirichletAllocation
import pnr.config as CONFIG
def mold_baseline_vectors(annotations):
"""
Use actions identified in clusters as vectors to represent as text document.
Use standard topic modelling techniques to find pnr topic.
Parameters
----------
annotations: list of dict
information on pnr annotations
Returns
-------
senctences: list of vectors
annotation information, as well as topics identified, but with context
"""
annotations = pd.DataFrame(annotations)
annotations = annotations[['ball_handler', 'ball_defender', 'screen_setter', 'screen_defender']]
players = []
for column in annotations.columns:
role_players = annotations[column].drop_duplicates(inplace=False).values
for player in role_players:
if player not in players:
players.append(player)
vectors = []
for ind, annotation in annotations.iterrows():
vector = []
for annotation_player in annotation.values:
for player in players:
if annotation_player == player:
vector.append(1)
else:
vector.append(0)
vectors.append(vector)
vectors = np.array(vectors)
return vectors
def mold_sentences(vectors):
"""
Use actions identified in clusters as vectors to represent as text document.
Use standard topic modelling techniques to find pnr topic.
Parameters
----------
vectors: dict
information on pnr annotation, as well as actions identified
Returns
-------
senctences: dict
annotation information, as well as topics identified, but with context
"""
annotations = []
vector_ids = vectors.keys()
vectorizer = Binarizer()
vector_sentences = pd.DataFrame()
for vector_id in vector_ids:
vector = vectors[vector_id]
sentence = vector['actions']
annotation = vector['annotation']
vector_sentence = pd.DataFrame()
vector_sentence['id'] = 0
vector_sentence['player_1_action'] = 0
vector_sentence['player_2_action'] = 0
if len(sentence) == 8:
before_actions = sentence[:4]
for ind, action in enumerate(before_actions):
player_vector = {}
player_vector['player_1_action'] = action
before_comparisons = [x for i,x in enumerate(before_actions) if i != ind]
for before_comparison in before_comparisons:
player_vector['player_2_action'] = before_comparison
player_vector['id'] = vector_id
vector_sentence = vector_sentence.append(player_vector, ignore_index=True)
after_actions = sentence[4:]
for ind, action in enumerate(after_actions):
player_vector = {}
player_vector['player_1_action'] = action
after_comparisons = [x for i,x in enumerate(after_actions) if i != ind]
for after_comparison in after_comparisons:
player_vector['player_2_action'] = after_comparison
player_vector['id'] = vector_id
vector_sentence = vector_sentence.append(player_vector, ignore_index=True)
vector_sentences = vector_sentences.append(vector_sentence)
annotations.append(annotation)
vector_sentences['pairwise_actions'] = vector_sentences['player_1_action'].map(str) + vector_sentences['player_2_action'].map(str)
pairwise_actions = vector_sentences['pairwise_actions']
pairwise_actions = copy(pd.get_dummies(pairwise_actions))
pairwise_actions['id'] = vector_sentences['id'].values
return pairwise_actions, annotations
def find_topics(sentences, annotations, exp_name, n_components=4):
"""
Use actions identified in clusters as vectors to represent as text document.
Use standard topic modelling techniques to find pnr topic.
Parameters
----------
sentences: dict
information on pnr annotation, as well as actions identified
Returns
-------
topics: dict
annotation information, as well as topics identified
"""
vectors = []
n_actions = sentences.shape[1] - 1
vocab = list(range(0, n_actions))
sentences = sentences.groupby('id')
for sentence_id, sentence in sentences:
vocab_count = np.zeros(len(vocab))
for ind, action in sentence.iterrows():
action.drop('id', inplace=True)
action = action.values
action_id = np.argmax(action)
if action[action_id] > 0:
vocab_count[action_id] += 1
vectors.append(vocab_count)
vectors = np.array(vectors)
lda = LatentDirichletAllocation(
n_components=n_components,
max_iter=5,
learning_method='online',
learning_offset=50.,
random_state=0
)
lda.fit(vectors)
topic_probs = lda.transform(vectors)
for ind, prob in enumerate(topic_probs):
topic = np.argmax(prob)
annotations[ind]['topic'] = topic
pkl.dump(annotations, open(os.path.join(pnr_dir, 'roles/%s.pkl' % exp_name), 'wb'))
if __name__ == '__main__':
from pnr.data.constant import sportvu_dir, game_dir
pnr_dir = os.path.join(game_dir, 'pnr-annotations')
arguments = docopt(__doc__)
print ("...Docopt... ")
print(arguments)
print ("............\n")
f_data_config = '%s/%s' % (CONFIG.data.config.dir, arguments['<f_data_config>'])
data_config = yaml.load(open(f_data_config, 'rb'))
vectors = pkl.load(open(os.path.join(pnr_dir, 'roles/vectors.pkl'), 'rb'))
vectors, annotations = mold_sentences(vectors)
baseline_vectors = mold_baseline_vectors(annotations)
find_topics(sentences=vectors, annotations=annotations, exp_name='topics')
# find_topics(sentences=baseline_vectors, annotations=annotations, exp_name='topics-baseline') | 0.705379 | 0.466846 |
import json
from asyncio import BaseEventLoop
from typing import Optional, List, Type
from asyncpg import create_pool
from asyncpg.connection import Connection
from asyncpg.pool import Pool
from postDB.model.meta import ModelMeta
from postDB.types import Serial
def format_missing(missing):
def fmt_single(name) -> str:
return "'%s'" % name
if len(missing) == 1:
return fmt_single(missing[0].name)
if len(missing) == 2:
return " and ".join(fmt_single(col.name) for col in missing)
return ", ".join(
fmt_single(col.name) for col in missing[:-1]
) + " and %s" % fmt_single(missing[-1].name)
class Model(metaclass=ModelMeta):
"""Base class for all the models."""
pool: Optional[Pool] = None
def __init__(self, **attrs):
missing = []
for col in self.columns:
try:
val = attrs[col.name]
except KeyError:
if (
col.default is None
and not col.nullable
and not isinstance(col.column_type, Serial)
):
missing.append(col)
continue
val = col.default
setattr(self, col.name, val)
if missing:
raise TypeError(
"__init__() missing {0} required positional arguments: {1}".format(
len(missing), format_missing(missing)
)
)
@classmethod
def create_table_sql(cls, *, exists_ok: bool = True) -> str:
"""Generates the ``CREATE TABLE`` SQL statement."""
statements = []
builder = ["CREATE TABLE"]
if exists_ok:
builder.append("IF NOT EXISTS")
builder.append(cls.__tablename__)
columns = []
pks = [col.name for col in cls.columns if col.primary_key]
for col in cls.columns:
columns.append(
col.generate_create_table_sql()
+ ("," if col != cls.columns[-1] or any(pks) else "")
)
if pks:
columns.append("PRIMARY KEY (%s)" % ", ".join(pks))
builder.append("(\n %s\n)" % "\n ".join(columns))
statements.append(" ".join(builder) + ";")
if any(col.index for col in cls.columns):
statements.append("")
for col in cls.columns:
if col.index:
fmt = "CREATE INDEX IF NOT EXISTS {1.index_name} ON {0} ({1.name});".format(
cls.__tablename__, col
)
statements.append(fmt)
return "\n".join(statements)
@classmethod
def drop_table_sql(cls, *, exists_ok: bool = True, cascade: bool = False) -> str:
"""Generates the ``DROP TABLE`` SQL statement."""
builder = ["DROP TABLE"]
if exists_ok:
builder.append("IF EXISTS")
to_cascade = "CASCADE" if cascade else "RESTRICT"
builder.append("%s %s;" % (cls.__tablename__, to_cascade))
return " ".join(builder)
@classmethod
async def create_pool(
cls,
uri: str,
*,
min_con: int = 1,
max_con: int = 10,
timeout: float = 10.0,
loop: BaseEventLoop = None,
**pool_kwargs,
) -> None:
"""Populate the internal pool keyword."""
if isinstance(cls.pool, Pool):
await cls.pool.close()
async def init(con: Connection) -> None:
await con.set_type_codec(
"json", schema="pg_catalog", encoder=json.dumps, decoder=json.loads
)
cls.pool = await create_pool(
dsn=uri,
init=init,
loop=loop,
timeout=timeout,
min_size=min_con,
max_size=max_con,
**pool_kwargs,
)
@classmethod
async def create_table(
cls,
*,
verbose: bool = False,
exists_ok: bool = True,
):
"""Create the PostgreSQL Table for this Model."""
if cls.pool is None:
raise TypeError("Unable to get Connection, please call `Model.create_pool` before using the coroutine.")
sql = cls.create_table_sql(exists_ok=exists_ok)
if verbose:
print(sql)
return await cls.pool.execute(sql)
@classmethod
async def drop_table(
cls,
*,
verbose: bool = False,
cascade: bool = True,
exists_ok: bool = True,
):
"""Drop the PostgreSQL Table for this Model."""
if cls.pool is None:
raise TypeError("Unable to get Connection, please call `Model.create_pool` before using the coroutine.")
sql = cls.drop_table_sql(exists_ok=exists_ok, cascade=cascade)
if verbose:
print(sql)
return await cls.pool.execute(sql)
@classmethod
def all_models(cls) -> List[Type["Model"]]:
"""Returns a list of all :class:`Model` subclasses."""
return cls.__subclasses__()
def as_dict(self, *columns) -> dict:
"""Returns a dict of attribute:value, only containing the columns specified."""
all_column_names = [col.name for col in self.columns]
if not columns:
columns = all_column_names
else:
for col in columns:
if col not in all_column_names:
raise ValueError(
"%s is not a attribute of the %s Model."
% (col, type(self).__name__)
)
return {key: getattr(self, key, None) for key in columns} | postDB/model/model.py | import json
from asyncio import BaseEventLoop
from typing import Optional, List, Type
from asyncpg import create_pool
from asyncpg.connection import Connection
from asyncpg.pool import Pool
from postDB.model.meta import ModelMeta
from postDB.types import Serial
def format_missing(missing):
def fmt_single(name) -> str:
return "'%s'" % name
if len(missing) == 1:
return fmt_single(missing[0].name)
if len(missing) == 2:
return " and ".join(fmt_single(col.name) for col in missing)
return ", ".join(
fmt_single(col.name) for col in missing[:-1]
) + " and %s" % fmt_single(missing[-1].name)
class Model(metaclass=ModelMeta):
"""Base class for all the models."""
pool: Optional[Pool] = None
def __init__(self, **attrs):
missing = []
for col in self.columns:
try:
val = attrs[col.name]
except KeyError:
if (
col.default is None
and not col.nullable
and not isinstance(col.column_type, Serial)
):
missing.append(col)
continue
val = col.default
setattr(self, col.name, val)
if missing:
raise TypeError(
"__init__() missing {0} required positional arguments: {1}".format(
len(missing), format_missing(missing)
)
)
@classmethod
def create_table_sql(cls, *, exists_ok: bool = True) -> str:
"""Generates the ``CREATE TABLE`` SQL statement."""
statements = []
builder = ["CREATE TABLE"]
if exists_ok:
builder.append("IF NOT EXISTS")
builder.append(cls.__tablename__)
columns = []
pks = [col.name for col in cls.columns if col.primary_key]
for col in cls.columns:
columns.append(
col.generate_create_table_sql()
+ ("," if col != cls.columns[-1] or any(pks) else "")
)
if pks:
columns.append("PRIMARY KEY (%s)" % ", ".join(pks))
builder.append("(\n %s\n)" % "\n ".join(columns))
statements.append(" ".join(builder) + ";")
if any(col.index for col in cls.columns):
statements.append("")
for col in cls.columns:
if col.index:
fmt = "CREATE INDEX IF NOT EXISTS {1.index_name} ON {0} ({1.name});".format(
cls.__tablename__, col
)
statements.append(fmt)
return "\n".join(statements)
@classmethod
def drop_table_sql(cls, *, exists_ok: bool = True, cascade: bool = False) -> str:
"""Generates the ``DROP TABLE`` SQL statement."""
builder = ["DROP TABLE"]
if exists_ok:
builder.append("IF EXISTS")
to_cascade = "CASCADE" if cascade else "RESTRICT"
builder.append("%s %s;" % (cls.__tablename__, to_cascade))
return " ".join(builder)
@classmethod
async def create_pool(
cls,
uri: str,
*,
min_con: int = 1,
max_con: int = 10,
timeout: float = 10.0,
loop: BaseEventLoop = None,
**pool_kwargs,
) -> None:
"""Populate the internal pool keyword."""
if isinstance(cls.pool, Pool):
await cls.pool.close()
async def init(con: Connection) -> None:
await con.set_type_codec(
"json", schema="pg_catalog", encoder=json.dumps, decoder=json.loads
)
cls.pool = await create_pool(
dsn=uri,
init=init,
loop=loop,
timeout=timeout,
min_size=min_con,
max_size=max_con,
**pool_kwargs,
)
@classmethod
async def create_table(
cls,
*,
verbose: bool = False,
exists_ok: bool = True,
):
"""Create the PostgreSQL Table for this Model."""
if cls.pool is None:
raise TypeError("Unable to get Connection, please call `Model.create_pool` before using the coroutine.")
sql = cls.create_table_sql(exists_ok=exists_ok)
if verbose:
print(sql)
return await cls.pool.execute(sql)
@classmethod
async def drop_table(
cls,
*,
verbose: bool = False,
cascade: bool = True,
exists_ok: bool = True,
):
"""Drop the PostgreSQL Table for this Model."""
if cls.pool is None:
raise TypeError("Unable to get Connection, please call `Model.create_pool` before using the coroutine.")
sql = cls.drop_table_sql(exists_ok=exists_ok, cascade=cascade)
if verbose:
print(sql)
return await cls.pool.execute(sql)
@classmethod
def all_models(cls) -> List[Type["Model"]]:
"""Returns a list of all :class:`Model` subclasses."""
return cls.__subclasses__()
def as_dict(self, *columns) -> dict:
"""Returns a dict of attribute:value, only containing the columns specified."""
all_column_names = [col.name for col in self.columns]
if not columns:
columns = all_column_names
else:
for col in columns:
if col not in all_column_names:
raise ValueError(
"%s is not a attribute of the %s Model."
% (col, type(self).__name__)
)
return {key: getattr(self, key, None) for key in columns} | 0.830181 | 0.158728 |
import unittest
from mock import Mock, call
import photo
import json
from StringIO import StringIO
import requests
from download import FlickrApiDownloader
class ThrowsTwice:
def __init__(self, successful_response):
self.successful_response = successful_response
self.count = 0
def get(self, url):
self.count += 1
if self.count == 3:
resp = Mock(spec = requests.models.Response)
resp.status_code = 200
resp.content = self.successful_response
return resp
else:
raise requests.exceptions.ConnectionError('nope')
class ErrorsTwice:
def __init__(self, successful_response):
self.successful_response = successful_response
self.count = 0
def get(self, url):
self.count += 1
resp = Mock(spec = requests.models.Response)
if self.count == 3:
resp.status_code = 200
resp.content = self.successful_response
else:
resp.status_code = 500
return resp
class UnmockedUrlException(Exception):
pass
class MockRequests:
def __init__(self):
self.contents = {}
def get(self, url):
if url not in self.contents:
raise UnmockedUrlException('Un-mocked URL: ' + url)
return MockResponse(self.contents[url])
class MockResponse:
def __init__(self, content):
self.content = content
class MockFlickrApi:
def __init__(self, photo_infos):
self.photos = MockFlickrPhotos(photo_infos)
class MockFlickrPhotos:
def __init__(self, photo_infos):
side_effect = lambda **kwargs: \
json.dumps(photo_infos[kwargs['photo_id']])
self.getInfo = Mock(side_effect=side_effect)
class TestPhoto(unittest.TestCase):
def test_download_originals(self):
photos = [
{'id': '25461030990', 'url_o': 'https://farm2.staticflickr.com/1521/25461030990_3621f6ae2d_o.jpg'},
{'id': '23793491473', 'url_o': 'https://farm2.staticflickr.com/1514/23793491473_11cf9041b4_o.jpg'}
]
responses = [
'\xff\xd8\xff\xe1\x16&Exif\x00\x00II*\x00\x08\x00\x00\x00',
'\xff\xd8\xff\xe1\x16&Exif\x00\x00II*\x00\x08\x00\x00\x01'
]
requests = MockRequests()
for i in xrange(0, len(photos)):
requests.contents[photos[i]['url_o']] = responses[i]
file_store = Mock()
file_store.exists.return_value = False
photo.download_originals(photos, [], file_store, requests, StringIO())
file_store.save_image.assert_has_calls([
call('originals/25461030990_o.jpg', responses[0]),
call('originals/23793491473_o.jpg', responses[1])])
def test_download_originals_exception_retries(self):
photos = [{'id': '25461030990', 'url_o': 'https://farm2.staticflickr.com/1521/25461030990_3621f6ae2d_o.jpg'}]
response = '\xff\xd8\xff\xe1\x16&Exif\x00\x00II*\x00\x08\x00\x00\x00'
requests = ThrowsTwice(response)
file_store = Mock()
file_store.exists.return_value = False
photo.download_originals(photos, [], file_store, requests, StringIO())
file_store.save_image.assert_has_calls([
call('originals/25461030990_o.jpg', response)])
def test_download_originals_bad_status_retries(self):
photos = [{'id': '25461030990', 'url_o': 'https://farm2.staticflickr.com/1521/25461030990_3621f6ae2d_o.jpg'}]
response = '\xff\xd8\xff\xe1\x16&Exif\x00\x00II*\x00\x08\x00\x00\x00'
requests = ErrorsTwice(response)
file_store = Mock()
file_store.exists.return_value = False
photo.download_originals(photos, [], file_store, requests, StringIO())
file_store.save_image.assert_has_calls([
call('originals/25461030990_o.jpg', response)])
def test_download_originals_eventually_fails(self):
photos = [{'id': '25461030990', 'url_o': 'https://farm2.staticflickr.com/1521/25461030990_3621f6ae2d_o.jpg'}]
requests = MockRequests() # And don't provide a response
file_store = Mock()
file_store.exists.return_value = False
threw = False
try:
photo.download_originals(photos, [], file_store, requests,
StringIO())
except UnmockedUrlException:
threw = True
self.assertTrue(threw)
file_store.save_image.assert_not_called()
def test_download_originals_skips_existing(self):
photos = [{'id': '25461030990', 'url_o': 'https://farm2.staticflickr.com/1521/25461030990_3621f6ae2d_o.jpg'}]
requests = Mock()
file_store = Mock()
file_store.exists.return_value = True
photo.download_originals(photos, [], file_store, requests, StringIO())
self.assertEqual(requests.get.call_count, 0)
def test_download_originals_downloads_modified(self):
photos = [
{'id': '25461030990', 'url_o': 'https://farm2.staticflickr.com/1521/25461030990_3621f6ae2d_o.jpg'},
{'id': '23793491473', 'url_o': 'https://farm2.staticflickr.com/1514/23793491473_11cf9041b4_o.jpg'}
]
response = '\xff\xd8\xff\xe1\x16&Exif\x00\x00II*\x00\x08\x00\x00\x00'
requests = MockRequests()
requests.contents[photos[0]['url_o']] = response
for i in xrange(0, len(photos)):
requests.contents[photos[i]['url_o']] = response
file_store = Mock()
file_store.exists.return_value = True
photo.download_originals(photos, ['25461030990'], file_store, requests,
StringIO())
file_store.save_image.assert_called_with(
'originals/25461030990_o.jpg', response)
def test_download_info(self):
photos = [
{'id': '1'},
{'id': '2'}
]
responses = {
'1': { "photo": { "id": "1", "secret": "s1" }, "stat": "ok" },
'2': { "photo": { "id": "2", "secret": "s2" }, "stat": "ok" }
}
file_store = Mock()
file_store.exists.return_value = False
downloader = FlickrApiDownloader(file_store, Mock())
photo.download_info(photos, downloader, MockFlickrApi(responses),
StringIO())
file_store.save_json.assert_has_calls([
call('photo-info/1.json', responses['1']['photo']),
call('photo-info/2.json', responses['2']['photo'])
])
def test_download_infos_skips_existing(self):
photos = [{'id': '1'}]
file_store = Mock()
file_store.exists.return_value = True
flickr = MockFlickrApi({'1': {'photo': {}}})
downloader = FlickrApiDownloader(file_store, Mock())
photo.download_info(photos, downloader, flickr, StringIO())
self.assertEqual(flickr.photos.getInfo.call_count, 0) | test_photo.py | import unittest
from mock import Mock, call
import photo
import json
from StringIO import StringIO
import requests
from download import FlickrApiDownloader
class ThrowsTwice:
def __init__(self, successful_response):
self.successful_response = successful_response
self.count = 0
def get(self, url):
self.count += 1
if self.count == 3:
resp = Mock(spec = requests.models.Response)
resp.status_code = 200
resp.content = self.successful_response
return resp
else:
raise requests.exceptions.ConnectionError('nope')
class ErrorsTwice:
def __init__(self, successful_response):
self.successful_response = successful_response
self.count = 0
def get(self, url):
self.count += 1
resp = Mock(spec = requests.models.Response)
if self.count == 3:
resp.status_code = 200
resp.content = self.successful_response
else:
resp.status_code = 500
return resp
class UnmockedUrlException(Exception):
pass
class MockRequests:
def __init__(self):
self.contents = {}
def get(self, url):
if url not in self.contents:
raise UnmockedUrlException('Un-mocked URL: ' + url)
return MockResponse(self.contents[url])
class MockResponse:
def __init__(self, content):
self.content = content
class MockFlickrApi:
def __init__(self, photo_infos):
self.photos = MockFlickrPhotos(photo_infos)
class MockFlickrPhotos:
def __init__(self, photo_infos):
side_effect = lambda **kwargs: \
json.dumps(photo_infos[kwargs['photo_id']])
self.getInfo = Mock(side_effect=side_effect)
class TestPhoto(unittest.TestCase):
def test_download_originals(self):
photos = [
{'id': '25461030990', 'url_o': 'https://farm2.staticflickr.com/1521/25461030990_3621f6ae2d_o.jpg'},
{'id': '23793491473', 'url_o': 'https://farm2.staticflickr.com/1514/23793491473_11cf9041b4_o.jpg'}
]
responses = [
'\xff\xd8\xff\xe1\x16&Exif\x00\x00II*\x00\x08\x00\x00\x00',
'\xff\xd8\xff\xe1\x16&Exif\x00\x00II*\x00\x08\x00\x00\x01'
]
requests = MockRequests()
for i in xrange(0, len(photos)):
requests.contents[photos[i]['url_o']] = responses[i]
file_store = Mock()
file_store.exists.return_value = False
photo.download_originals(photos, [], file_store, requests, StringIO())
file_store.save_image.assert_has_calls([
call('originals/25461030990_o.jpg', responses[0]),
call('originals/23793491473_o.jpg', responses[1])])
def test_download_originals_exception_retries(self):
photos = [{'id': '25461030990', 'url_o': 'https://farm2.staticflickr.com/1521/25461030990_3621f6ae2d_o.jpg'}]
response = '\xff\xd8\xff\xe1\x16&Exif\x00\x00II*\x00\x08\x00\x00\x00'
requests = ThrowsTwice(response)
file_store = Mock()
file_store.exists.return_value = False
photo.download_originals(photos, [], file_store, requests, StringIO())
file_store.save_image.assert_has_calls([
call('originals/25461030990_o.jpg', response)])
def test_download_originals_bad_status_retries(self):
photos = [{'id': '25461030990', 'url_o': 'https://farm2.staticflickr.com/1521/25461030990_3621f6ae2d_o.jpg'}]
response = '\xff\xd8\xff\xe1\x16&Exif\x00\x00II*\x00\x08\x00\x00\x00'
requests = ErrorsTwice(response)
file_store = Mock()
file_store.exists.return_value = False
photo.download_originals(photos, [], file_store, requests, StringIO())
file_store.save_image.assert_has_calls([
call('originals/25461030990_o.jpg', response)])
def test_download_originals_eventually_fails(self):
photos = [{'id': '25461030990', 'url_o': 'https://farm2.staticflickr.com/1521/25461030990_3621f6ae2d_o.jpg'}]
requests = MockRequests() # And don't provide a response
file_store = Mock()
file_store.exists.return_value = False
threw = False
try:
photo.download_originals(photos, [], file_store, requests,
StringIO())
except UnmockedUrlException:
threw = True
self.assertTrue(threw)
file_store.save_image.assert_not_called()
def test_download_originals_skips_existing(self):
photos = [{'id': '25461030990', 'url_o': 'https://farm2.staticflickr.com/1521/25461030990_3621f6ae2d_o.jpg'}]
requests = Mock()
file_store = Mock()
file_store.exists.return_value = True
photo.download_originals(photos, [], file_store, requests, StringIO())
self.assertEqual(requests.get.call_count, 0)
def test_download_originals_downloads_modified(self):
photos = [
{'id': '25461030990', 'url_o': 'https://farm2.staticflickr.com/1521/25461030990_3621f6ae2d_o.jpg'},
{'id': '23793491473', 'url_o': 'https://farm2.staticflickr.com/1514/23793491473_11cf9041b4_o.jpg'}
]
response = '\xff\xd8\xff\xe1\x16&Exif\x00\x00II*\x00\x08\x00\x00\x00'
requests = MockRequests()
requests.contents[photos[0]['url_o']] = response
for i in xrange(0, len(photos)):
requests.contents[photos[i]['url_o']] = response
file_store = Mock()
file_store.exists.return_value = True
photo.download_originals(photos, ['25461030990'], file_store, requests,
StringIO())
file_store.save_image.assert_called_with(
'originals/25461030990_o.jpg', response)
def test_download_info(self):
photos = [
{'id': '1'},
{'id': '2'}
]
responses = {
'1': { "photo": { "id": "1", "secret": "s1" }, "stat": "ok" },
'2': { "photo": { "id": "2", "secret": "s2" }, "stat": "ok" }
}
file_store = Mock()
file_store.exists.return_value = False
downloader = FlickrApiDownloader(file_store, Mock())
photo.download_info(photos, downloader, MockFlickrApi(responses),
StringIO())
file_store.save_json.assert_has_calls([
call('photo-info/1.json', responses['1']['photo']),
call('photo-info/2.json', responses['2']['photo'])
])
def test_download_infos_skips_existing(self):
photos = [{'id': '1'}]
file_store = Mock()
file_store.exists.return_value = True
flickr = MockFlickrApi({'1': {'photo': {}}})
downloader = FlickrApiDownloader(file_store, Mock())
photo.download_info(photos, downloader, flickr, StringIO())
self.assertEqual(flickr.photos.getInfo.call_count, 0) | 0.37319 | 0.174445 |
from __future__ import unicode_literals
from django.db import migrations, models
import wagtail.core.blocks
import wagtail.core.fields
import wagtailmarkdown.blocks
class Migration(migrations.Migration):
dependencies = [
('setup_guide', '0003_setupguidelandingpage_lead_in'),
]
operations = [
migrations.AddField(
model_name='setupguidelandingpage',
name='heading_ar',
field=models.CharField(max_length=255, null=True),
),
migrations.AddField(
model_name='setupguidelandingpage',
name='heading_de',
field=models.CharField(max_length=255, null=True),
),
migrations.AddField(
model_name='setupguidelandingpage',
name='heading_en',
field=models.CharField(max_length=255, null=True),
),
migrations.AddField(
model_name='setupguidelandingpage',
name='heading_es',
field=models.CharField(max_length=255, null=True),
),
migrations.AddField(
model_name='setupguidelandingpage',
name='heading_fr',
field=models.CharField(max_length=255, null=True),
),
migrations.AddField(
model_name='setupguidelandingpage',
name='heading_ja',
field=models.CharField(max_length=255, null=True),
),
migrations.AddField(
model_name='setupguidelandingpage',
name='heading_pt',
field=models.CharField(max_length=255, null=True),
),
migrations.AddField(
model_name='setupguidelandingpage',
name='heading_zh',
field=models.CharField(max_length=255, null=True),
),
migrations.AddField(
model_name='setupguidelandingpage',
name='lead_in_ar',
field=models.TextField(blank=True, null=True),
),
migrations.AddField(
model_name='setupguidelandingpage',
name='lead_in_de',
field=models.TextField(blank=True, null=True),
),
migrations.AddField(
model_name='setupguidelandingpage',
name='lead_in_en',
field=models.TextField(blank=True, null=True),
),
migrations.AddField(
model_name='setupguidelandingpage',
name='lead_in_es',
field=models.TextField(blank=True, null=True),
),
migrations.AddField(
model_name='setupguidelandingpage',
name='lead_in_fr',
field=models.TextField(blank=True, null=True),
),
migrations.AddField(
model_name='setupguidelandingpage',
name='lead_in_ja',
field=models.TextField(blank=True, null=True),
),
migrations.AddField(
model_name='setupguidelandingpage',
name='lead_in_pt',
field=models.TextField(blank=True, null=True),
),
migrations.AddField(
model_name='setupguidelandingpage',
name='lead_in_zh',
field=models.TextField(blank=True, null=True),
),
migrations.AddField(
model_name='setupguidelandingpage',
name='sub_heading_ar',
field=models.CharField(max_length=255, null=True),
),
migrations.AddField(
model_name='setupguidelandingpage',
name='sub_heading_de',
field=models.CharField(max_length=255, null=True),
),
migrations.AddField(
model_name='setupguidelandingpage',
name='sub_heading_en',
field=models.CharField(max_length=255, null=True),
),
migrations.AddField(
model_name='setupguidelandingpage',
name='sub_heading_es',
field=models.CharField(max_length=255, null=True),
),
migrations.AddField(
model_name='setupguidelandingpage',
name='sub_heading_fr',
field=models.CharField(max_length=255, null=True),
),
migrations.AddField(
model_name='setupguidelandingpage',
name='sub_heading_ja',
field=models.CharField(max_length=255, null=True),
),
migrations.AddField(
model_name='setupguidelandingpage',
name='sub_heading_pt',
field=models.CharField(max_length=255, null=True),
),
migrations.AddField(
model_name='setupguidelandingpage',
name='sub_heading_zh',
field=models.CharField(max_length=255, null=True),
),
migrations.AddField(
model_name='setupguidepage',
name='description_ar',
field=models.TextField(null=True),
),
migrations.AddField(
model_name='setupguidepage',
name='description_de',
field=models.TextField(null=True),
),
migrations.AddField(
model_name='setupguidepage',
name='description_en',
field=models.TextField(null=True),
),
migrations.AddField(
model_name='setupguidepage',
name='description_es',
field=models.TextField(null=True),
),
migrations.AddField(
model_name='setupguidepage',
name='description_fr',
field=models.TextField(null=True),
),
migrations.AddField(
model_name='setupguidepage',
name='description_ja',
field=models.TextField(null=True),
),
migrations.AddField(
model_name='setupguidepage',
name='description_pt',
field=models.TextField(null=True),
),
migrations.AddField(
model_name='setupguidepage',
name='description_zh',
field=models.TextField(null=True),
),
migrations.AddField(
model_name='setupguidepage',
name='heading_ar',
field=models.CharField(max_length=255, null=True),
),
migrations.AddField(
model_name='setupguidepage',
name='heading_de',
field=models.CharField(max_length=255, null=True),
),
migrations.AddField(
model_name='setupguidepage',
name='heading_en',
field=models.CharField(max_length=255, null=True),
),
migrations.AddField(
model_name='setupguidepage',
name='heading_es',
field=models.CharField(max_length=255, null=True),
),
migrations.AddField(
model_name='setupguidepage',
name='heading_fr',
field=models.CharField(max_length=255, null=True),
),
migrations.AddField(
model_name='setupguidepage',
name='heading_ja',
field=models.CharField(max_length=255, null=True),
),
migrations.AddField(
model_name='setupguidepage',
name='heading_pt',
field=models.CharField(max_length=255, null=True),
),
migrations.AddField(
model_name='setupguidepage',
name='heading_zh',
field=models.CharField(max_length=255, null=True),
),
migrations.AddField(
model_name='setupguidepage',
name='sub_heading_ar',
field=models.CharField(max_length=255, null=True),
),
migrations.AddField(
model_name='setupguidepage',
name='sub_heading_de',
field=models.CharField(max_length=255, null=True),
),
migrations.AddField(
model_name='setupguidepage',
name='sub_heading_en',
field=models.CharField(max_length=255, null=True),
),
migrations.AddField(
model_name='setupguidepage',
name='sub_heading_es',
field=models.CharField(max_length=255, null=True),
),
migrations.AddField(
model_name='setupguidepage',
name='sub_heading_fr',
field=models.CharField(max_length=255, null=True),
),
migrations.AddField(
model_name='setupguidepage',
name='sub_heading_ja',
field=models.CharField(max_length=255, null=True),
),
migrations.AddField(
model_name='setupguidepage',
name='sub_heading_pt',
field=models.CharField(max_length=255, null=True),
),
migrations.AddField(
model_name='setupguidepage',
name='sub_heading_zh',
field=models.CharField(max_length=255, null=True),
),
migrations.AddField(
model_name='setupguidepage',
name='subsections_ar',
field=wagtail.core.fields.StreamField((('markdown', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock(max_length=255)), ('content', wagtailmarkdown.blocks.MarkdownBlock())))),), null=True),
),
migrations.AddField(
model_name='setupguidepage',
name='subsections_de',
field=wagtail.core.fields.StreamField((('markdown', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock(max_length=255)), ('content', wagtailmarkdown.blocks.MarkdownBlock())))),), null=True),
),
migrations.AddField(
model_name='setupguidepage',
name='subsections_en',
field=wagtail.core.fields.StreamField((('markdown', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock(max_length=255)), ('content', wagtailmarkdown.blocks.MarkdownBlock())))),), null=True),
),
migrations.AddField(
model_name='setupguidepage',
name='subsections_es',
field=wagtail.core.fields.StreamField((('markdown', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock(max_length=255)), ('content', wagtailmarkdown.blocks.MarkdownBlock())))),), null=True),
),
migrations.AddField(
model_name='setupguidepage',
name='subsections_fr',
field=wagtail.core.fields.StreamField((('markdown', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock(max_length=255)), ('content', wagtailmarkdown.blocks.MarkdownBlock())))),), null=True),
),
migrations.AddField(
model_name='setupguidepage',
name='subsections_ja',
field=wagtail.core.fields.StreamField((('markdown', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock(max_length=255)), ('content', wagtailmarkdown.blocks.MarkdownBlock())))),), null=True),
),
migrations.AddField(
model_name='setupguidepage',
name='subsections_pt',
field=wagtail.core.fields.StreamField((('markdown', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock(max_length=255)), ('content', wagtailmarkdown.blocks.MarkdownBlock())))),), null=True),
),
migrations.AddField(
model_name='setupguidepage',
name='subsections_zh',
field=wagtail.core.fields.StreamField((('markdown', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock(max_length=255)), ('content', wagtailmarkdown.blocks.MarkdownBlock())))),), null=True),
),
] | setup_guide/migrations/0004_auto_20180322_1443.py | from __future__ import unicode_literals
from django.db import migrations, models
import wagtail.core.blocks
import wagtail.core.fields
import wagtailmarkdown.blocks
class Migration(migrations.Migration):
dependencies = [
('setup_guide', '0003_setupguidelandingpage_lead_in'),
]
operations = [
migrations.AddField(
model_name='setupguidelandingpage',
name='heading_ar',
field=models.CharField(max_length=255, null=True),
),
migrations.AddField(
model_name='setupguidelandingpage',
name='heading_de',
field=models.CharField(max_length=255, null=True),
),
migrations.AddField(
model_name='setupguidelandingpage',
name='heading_en',
field=models.CharField(max_length=255, null=True),
),
migrations.AddField(
model_name='setupguidelandingpage',
name='heading_es',
field=models.CharField(max_length=255, null=True),
),
migrations.AddField(
model_name='setupguidelandingpage',
name='heading_fr',
field=models.CharField(max_length=255, null=True),
),
migrations.AddField(
model_name='setupguidelandingpage',
name='heading_ja',
field=models.CharField(max_length=255, null=True),
),
migrations.AddField(
model_name='setupguidelandingpage',
name='heading_pt',
field=models.CharField(max_length=255, null=True),
),
migrations.AddField(
model_name='setupguidelandingpage',
name='heading_zh',
field=models.CharField(max_length=255, null=True),
),
migrations.AddField(
model_name='setupguidelandingpage',
name='lead_in_ar',
field=models.TextField(blank=True, null=True),
),
migrations.AddField(
model_name='setupguidelandingpage',
name='lead_in_de',
field=models.TextField(blank=True, null=True),
),
migrations.AddField(
model_name='setupguidelandingpage',
name='lead_in_en',
field=models.TextField(blank=True, null=True),
),
migrations.AddField(
model_name='setupguidelandingpage',
name='lead_in_es',
field=models.TextField(blank=True, null=True),
),
migrations.AddField(
model_name='setupguidelandingpage',
name='lead_in_fr',
field=models.TextField(blank=True, null=True),
),
migrations.AddField(
model_name='setupguidelandingpage',
name='lead_in_ja',
field=models.TextField(blank=True, null=True),
),
migrations.AddField(
model_name='setupguidelandingpage',
name='lead_in_pt',
field=models.TextField(blank=True, null=True),
),
migrations.AddField(
model_name='setupguidelandingpage',
name='lead_in_zh',
field=models.TextField(blank=True, null=True),
),
migrations.AddField(
model_name='setupguidelandingpage',
name='sub_heading_ar',
field=models.CharField(max_length=255, null=True),
),
migrations.AddField(
model_name='setupguidelandingpage',
name='sub_heading_de',
field=models.CharField(max_length=255, null=True),
),
migrations.AddField(
model_name='setupguidelandingpage',
name='sub_heading_en',
field=models.CharField(max_length=255, null=True),
),
migrations.AddField(
model_name='setupguidelandingpage',
name='sub_heading_es',
field=models.CharField(max_length=255, null=True),
),
migrations.AddField(
model_name='setupguidelandingpage',
name='sub_heading_fr',
field=models.CharField(max_length=255, null=True),
),
migrations.AddField(
model_name='setupguidelandingpage',
name='sub_heading_ja',
field=models.CharField(max_length=255, null=True),
),
migrations.AddField(
model_name='setupguidelandingpage',
name='sub_heading_pt',
field=models.CharField(max_length=255, null=True),
),
migrations.AddField(
model_name='setupguidelandingpage',
name='sub_heading_zh',
field=models.CharField(max_length=255, null=True),
),
migrations.AddField(
model_name='setupguidepage',
name='description_ar',
field=models.TextField(null=True),
),
migrations.AddField(
model_name='setupguidepage',
name='description_de',
field=models.TextField(null=True),
),
migrations.AddField(
model_name='setupguidepage',
name='description_en',
field=models.TextField(null=True),
),
migrations.AddField(
model_name='setupguidepage',
name='description_es',
field=models.TextField(null=True),
),
migrations.AddField(
model_name='setupguidepage',
name='description_fr',
field=models.TextField(null=True),
),
migrations.AddField(
model_name='setupguidepage',
name='description_ja',
field=models.TextField(null=True),
),
migrations.AddField(
model_name='setupguidepage',
name='description_pt',
field=models.TextField(null=True),
),
migrations.AddField(
model_name='setupguidepage',
name='description_zh',
field=models.TextField(null=True),
),
migrations.AddField(
model_name='setupguidepage',
name='heading_ar',
field=models.CharField(max_length=255, null=True),
),
migrations.AddField(
model_name='setupguidepage',
name='heading_de',
field=models.CharField(max_length=255, null=True),
),
migrations.AddField(
model_name='setupguidepage',
name='heading_en',
field=models.CharField(max_length=255, null=True),
),
migrations.AddField(
model_name='setupguidepage',
name='heading_es',
field=models.CharField(max_length=255, null=True),
),
migrations.AddField(
model_name='setupguidepage',
name='heading_fr',
field=models.CharField(max_length=255, null=True),
),
migrations.AddField(
model_name='setupguidepage',
name='heading_ja',
field=models.CharField(max_length=255, null=True),
),
migrations.AddField(
model_name='setupguidepage',
name='heading_pt',
field=models.CharField(max_length=255, null=True),
),
migrations.AddField(
model_name='setupguidepage',
name='heading_zh',
field=models.CharField(max_length=255, null=True),
),
migrations.AddField(
model_name='setupguidepage',
name='sub_heading_ar',
field=models.CharField(max_length=255, null=True),
),
migrations.AddField(
model_name='setupguidepage',
name='sub_heading_de',
field=models.CharField(max_length=255, null=True),
),
migrations.AddField(
model_name='setupguidepage',
name='sub_heading_en',
field=models.CharField(max_length=255, null=True),
),
migrations.AddField(
model_name='setupguidepage',
name='sub_heading_es',
field=models.CharField(max_length=255, null=True),
),
migrations.AddField(
model_name='setupguidepage',
name='sub_heading_fr',
field=models.CharField(max_length=255, null=True),
),
migrations.AddField(
model_name='setupguidepage',
name='sub_heading_ja',
field=models.CharField(max_length=255, null=True),
),
migrations.AddField(
model_name='setupguidepage',
name='sub_heading_pt',
field=models.CharField(max_length=255, null=True),
),
migrations.AddField(
model_name='setupguidepage',
name='sub_heading_zh',
field=models.CharField(max_length=255, null=True),
),
migrations.AddField(
model_name='setupguidepage',
name='subsections_ar',
field=wagtail.core.fields.StreamField((('markdown', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock(max_length=255)), ('content', wagtailmarkdown.blocks.MarkdownBlock())))),), null=True),
),
migrations.AddField(
model_name='setupguidepage',
name='subsections_de',
field=wagtail.core.fields.StreamField((('markdown', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock(max_length=255)), ('content', wagtailmarkdown.blocks.MarkdownBlock())))),), null=True),
),
migrations.AddField(
model_name='setupguidepage',
name='subsections_en',
field=wagtail.core.fields.StreamField((('markdown', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock(max_length=255)), ('content', wagtailmarkdown.blocks.MarkdownBlock())))),), null=True),
),
migrations.AddField(
model_name='setupguidepage',
name='subsections_es',
field=wagtail.core.fields.StreamField((('markdown', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock(max_length=255)), ('content', wagtailmarkdown.blocks.MarkdownBlock())))),), null=True),
),
migrations.AddField(
model_name='setupguidepage',
name='subsections_fr',
field=wagtail.core.fields.StreamField((('markdown', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock(max_length=255)), ('content', wagtailmarkdown.blocks.MarkdownBlock())))),), null=True),
),
migrations.AddField(
model_name='setupguidepage',
name='subsections_ja',
field=wagtail.core.fields.StreamField((('markdown', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock(max_length=255)), ('content', wagtailmarkdown.blocks.MarkdownBlock())))),), null=True),
),
migrations.AddField(
model_name='setupguidepage',
name='subsections_pt',
field=wagtail.core.fields.StreamField((('markdown', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock(max_length=255)), ('content', wagtailmarkdown.blocks.MarkdownBlock())))),), null=True),
),
migrations.AddField(
model_name='setupguidepage',
name='subsections_zh',
field=wagtail.core.fields.StreamField((('markdown', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock(max_length=255)), ('content', wagtailmarkdown.blocks.MarkdownBlock())))),), null=True),
),
] | 0.644337 | 0.10904 |