max_stars_repo_path stringlengths 3 269 | max_stars_repo_name stringlengths 4 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.05M | score float64 0.23 5.13 | int_score int64 0 5 |
|---|---|---|---|---|---|---|
appdaemon/apps/common/common.py | Mithras/ha | 3 | 17100 | <gh_stars>1-10
import hassapi as hass
import csv
from collections import namedtuple
Profile = namedtuple(
"Profile", ["profile", "x_color", "y_color", "brightness"])
with open("/config/light_profiles.csv") as profiles_file:
profiles_reader = csv.reader(profiles_file)
next(profiles_reader)
LIGHT_PROFILES = [Profile(row[0], float(row[1]), float(
row[2]), int(row[3])) for row in profiles_reader]
class Common(hass.Hass):
async def initialize(self):
config = self.args["config"]
self.telegram_mithras = config["telegram_mithras"]
self.telegram_debug_chat = config["telegram_debug_chat"]
self.telegram_state_chat_mithras = config["telegram_state_chat_mithras"]
self.telegram_state_chat_diana = config["telegram_state_chat_diana"]
self.telegram_alarm_chat = config["telegram_alarm_chat"]
self.external_url = config["external_url"]
async def is_sleep_async(self):
return await self.get_state("input_boolean.sleep") == "on"
async def send_state_async(self, person: str, message: str, **kwargs):
if person == "person.mithras":
target = self.telegram_state_chat_mithras
elif person == "person.diana":
target = self.telegram_state_chat_diana
await self.call_service("telegram_bot/send_message",
target=[target],
message=message,
**kwargs)
async def send_alarm_async(self, message: str, **kwargs):
await self.call_service("telegram_bot/send_message",
target=[self.telegram_alarm_chat],
message=message,
**kwargs)
async def send_debug_async(self, message: str, **kwargs):
await self.call_service("telegram_bot/send_message",
target=[self.telegram_debug_chat],
message=message,
**kwargs)
async def turn_on_async(self, entity: str):
[domain, _] = entity.split(".")
await self.call_service(f"{domain}/turn_on",
entity_id=entity)
async def turn_off_async(self, entity: str):
[domain, _] = entity.split(".")
await self.call_service(f"{domain}/turn_off",
entity_id=entity)
async def light_turn_bright_async(self, light_group: str):
await self.light_turn_profile_async(light_group, "bright")
async def light_turn_dimmed_async(self, light_group: str):
await self.light_turn_profile_async(light_group, "dimmed")
async def light_turn_nightlight_async(self, light_group: str):
await self.light_turn_profile_async(light_group, "nightlight")
async def light_turn_profile_async(self, light_group: str, profile: str):
if profile == "off":
await self.turn_off_async(light_group)
else:
await self.call_service("light/turn_on",
entity_id=light_group,
profile=profile)
# TODO: test
async def light_flash(self, light_group: str, flash="short"):
await self.call_service("light/turn_on",
entity_id=light_group,
flash=flash)
| 2.671875 | 3 |
src/mongo_model.py | zxteloiv/curated-geokb-subsearcher | 0 | 17101 | # coding: utf-8
from pymongo import MongoClient
import conf
class MongoQuery(object):
def __init__(self):
self._conn = MongoClient(conf.mongodb_conn_str)
self._db = self._conn.geokb
def query(self, grounded, limit=15, sort_keys=None):
col = self._db[grounded['from']]
docs = col.find(grounded['where'],
limit=limit,
sort=([('popularity', -1)]
+ ['_sys_ranks.%s' % x[0] for x in sort_keys if x is not None])
)
if '*' in grounded['select']:
res = [dict((k, v) for k, v in doc.iteritems() if k != '_id') for doc in docs]
else:
res = []
for doc in docs:
selected = {}
for k in grounded['select']:
if k in doc:
selected[k] = doc[k]
res.append(selected)
return res
def coarse_query(self, grounded, limit=2000, sort_keys=None):
col = self._db[grounded['from']]
# docs = col.find(grounded['where'], limit=limit, sort=[('popularity', -1), ('_id', 1)])
docs = col.find(grounded['where'],
limit=limit,
sort=([('popularity', -1)]
+ [('_sys_ranks.%s' % x[0], -1) for x in sort_keys if x is not None])
)
return [dict((k, v) for k, v in doc.iteritems() if k != '_id') for doc in docs]
def project(self, docs, grounded, limit=15):
res = []
for doc in docs:
if len(res) >= 15:
break
try:
score = doc['_rerank']['TimeRanker']
if score < 1:
continue
except KeyError:
pass
if '*' in grounded['select']:
doc = dict((k, v) if type(v) != type([]) else (k, self._merge_obj_array(v))
for k, v in doc.iteritems() if k != '_id')
doc['src'] = 'geokb'
doc['score'] = 2.0 # fixed high score for nginx blender, in another module
res.append(doc)
else:
selected = {}
for k in grounded['select']:
if type(doc[k]) == type([]):
selected[k] = self._merge_obj_array(doc[k])
else:
selected[k] = doc[k]
selected['_sys_ranks'] = doc['_sys_ranks']
selected['src'] = 'geokb'
selected['score'] = 2.0 # fixed high score for nginx blender, in another module
res.append(selected)
return res
@staticmethod
def _merge_obj_array(arr):
if len(arr) == 0 or type(arr) != type([]):
return arr
if type(arr[0]) != type(dict()):
return arr
# [{u'推荐菜': u'AA"}, {u'推荐菜': u'BB'}, ...]
get_val_lst = lambda o: [v for _, v in o.iteritems()]
lst = []
for obj in arr:
lst += get_val_lst(obj)
return lst
| 2.453125 | 2 |
src/dcm/agent/plugins/builtin/configure_server.py | JPWKU/unix-agent | 0 | 17102 | #
# Copyright (C) 2014 Dell, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import configparser
import json
import logging
import os
import urllib.parse
import dcm.agent.exceptions as exceptions
import dcm.agent.logger as dcm_logger
import dcm.agent.plugins.api.base as plugin_base
import dcm.agent.plugins.api.exceptions as plugin_exceptions
import dcm.agent.plugins.api.utils as plugin_utils
import dcm.agent.utils as utils
_g_logger = logging.getLogger(__name__)
class ConfigureServer(plugin_base.Plugin):
protocol_arguments = {
"configType":
("Which configuration management software to use (chef or puppet)",
True, str, None),
"authId":
("", False, str, None),
"configurationData":
("", False, plugin_utils.base64type_convertor, None),
"encryptedConfigToken":
("", False, plugin_utils.base64type_convertor, None),
"encryptedAuthSecret":
("", False, plugin_utils.base64type_convertor, None),
"endpoint":
("", False, str, None),
"providerRegionId":
("", False, str, None),
"runAsUser":
("", False, str, None),
"storageDelegate":
("", False, str, None),
"storageEndpoint":
("", False, str, None),
"storageAccount":
("", False, str, None),
"scriptFiles":
("", False, list, None),
"storagePublicKey":
("", False, plugin_utils.base64type_convertor, None),
"storagePrivateKey":
("", False, plugin_utils.base64type_convertor, None),
"environmentId":
("", False, str, None),
"personalityFiles":
("", False, list, None),
"configClientName":
("", False, str, None),
"configCert":
("", False, plugin_utils.base64type_convertor, None),
"configKey":
("", False, plugin_utils.base64type_convertor, None),
"runListIds":
("", False, list, None),
"parameterList":
("", False, plugin_utils.base64type_convertor, None),
}
def __init__(self, conf, job_id, items_map, name, arguments):
super(ConfigureServer, self).__init__(
conf, job_id, items_map, name, arguments)
if not self.args.runAsUser:
self.args.runAsUser = self.conf.system_user
def configure_server_with_chef(self):
chef_dir = self.conf.get_temp_file("chefconf", isdir=True)
run_list_file_name = os.path.join(chef_dir, "runList.cfg")
token_file_path = self.conf.get_temp_file("token.pem")
try:
if self.args.encryptedAuthSecret:
token = self.args.encryptedAuthSecret
else:
token = "NULL"
authId = self.args.authId
if authId is None:
authId = "NULL"
endpoint = self.args.endpoint
if endpoint is None:
endpoint = "NULL"
environmentId = self.args.environmentId
if environmentId is None:
environmentId = "NULL"
chef_json = {"run_list": self.args.runListIds}
with open(run_list_file_name, "w") as fptr:
fptr.write(json.dumps(chef_json))
with open(token_file_path, "w") as fptr:
fptr.write(token)
fptr.write(os.linesep)
exe = self.conf.get_script_location(
"runConfigurationManagement-CHEF")
cmd_list = [exe,
self.args.runAsUser,
self.args.configClientName,
token_file_path,
run_list_file_name,
authId,
endpoint,
environmentId,
self.conf.configuration_management_chef_client_version]
return plugin_utils.run_command(self.conf, cmd_list)
finally:
plugin_utils.safe_delete(run_list_file_name)
plugin_utils.safe_delete(token_file_path)
def _edit_puppet_conf(self, template_path, new_location, endpoint):
parser = configparser.SafeConfigParser()
parser.read(template_path)
if not parser.has_section("agent"):
parser.add_section("agent")
parser.set("agent", "certname", self.args.configClientName)
parser.set("agent", "server", endpoint)
with open(new_location, "w") as fptr:
parser.write(fptr)
def configure_server_with_puppet(self):
if self.args.endpoint is None:
raise exceptions.AgentOptionValueNotSetException("endpoint")
# XXX it will only work with the default port. There is no way for
# the user to configure anything else in the console
endpoint = urllib.parse.urlparse(self.args.endpoint).hostname
puppet_extras_base_path = os.path.join(self.conf.extra_base_path,
"puppetconf")
puppet_extras_bin = os.path.join(self.conf.extra_base_path,
"bin/puppet")
try:
utils.install_extras(
self.conf, package=self.conf.extra_package_name)
except exceptions.AgentExtrasNotInstalledException as ex:
_g_logger.exception("An error occurred trying to install puppet. "
"Exception message is %s" % str(ex))
raise
template_puppet_conf_path = os.path.join(puppet_extras_base_path,
"puppet.conf.template")
if not os.path.exists(template_puppet_conf_path):
raise exceptions.AgentExtrasNotInstalledException(
"The puppet.conf template did not install properly.")
if not os.path.exists(puppet_extras_bin):
raise exceptions.AgentExtrasNotInstalledException(
"The puppet binary did not install properly.")
puppet_conf_path = self.conf.get_temp_file("puppet.conf")
self._edit_puppet_conf(template_puppet_conf_path,
puppet_conf_path,
endpoint)
cert_file_path = self.conf.get_temp_file("cert.pem")
key_file_path = self.conf.get_temp_file("key.pem")
try:
with open(cert_file_path, "w") as fptr:
fptr.write(self.args.configCert)
with open(key_file_path, "w") as fptr:
fptr.write(self.args.configKey)
exe = self.conf.get_script_location(
"runConfigurationManagement-PUPPET")
cmd = [exe,
endpoint,
cert_file_path,
key_file_path,
self.args.configClientName,
self.conf.extra_base_path,
puppet_conf_path]
return plugin_utils.run_command(self.conf, cmd)
finally:
plugin_utils.safe_delete(cert_file_path)
plugin_utils.safe_delete(key_file_path)
plugin_utils.safe_delete(puppet_conf_path)
def run(self):
_g_logger.info("Running configuration management of type " +
self.args.configType)
if self.args.configType.upper() == "CHEF":
(stdout, stderr, rc) = self.configure_server_with_chef()
elif self.args.configType.upper() == "PUPPET":
(stdout, stderr, rc) = self.configure_server_with_puppet()
else:
raise plugin_exceptions.AgentPluginParameterBadValueException(
"configType", "CHEF or PUPPET")
if stderr:
dcm_logger.log_to_dcm_console_configuration_management_error(
stderr=stderr)
if stdout:
dcm_logger.log_to_dcm_console_configuration_management_output(
stdout=stdout)
if rc != 0:
return plugin_base.PluginReply(rc, message=stderr)
else:
return plugin_base.PluginReply(
rc, reply_type="string", reply_object=stdout)
def load_plugin(conf, job_id, items_map, name, arguments):
return ConfigureServer(conf, job_id, items_map, name, arguments)
| 1.632813 | 2 |
phy/plot/interact.py | ycanerol/phy | 118 | 17103 | <filename>phy/plot/interact.py
# -*- coding: utf-8 -*-
"""Common layouts."""
#------------------------------------------------------------------------------
# Imports
#------------------------------------------------------------------------------
import logging
import numpy as np
from phylib.utils import emit
from phylib.utils.geometry import get_non_overlapping_boxes, get_closest_box
from .base import BaseLayout
from .transform import Scale, Range, Subplot, Clip, NDC
from .utils import _get_texture, _in_polygon
from .visuals import LineVisual, PolygonVisual
logger = logging.getLogger(__name__)
#------------------------------------------------------------------------------
# Grid
#------------------------------------------------------------------------------
class Grid(BaseLayout):
"""Layout showing subplots arranged in a 2D grid.
Constructor
-----------
shape : tuple or str
Number of rows, cols in the grid.
shape_var : str
Name of the GLSL uniform variable that holds the shape, when it is variable.
box_var : str
Name of the GLSL variable with the box index.
has_clip : boolean
Whether subplots should be clipped.
Note
----
To be used in a grid, a visual must define `a_box_index` (by default) or another GLSL
variable specified in `box_var`.
"""
margin = .075
n_dims = 2
active_box = (0, 0)
_scaling = (1., 1.)
def __init__(self, shape=(1, 1), shape_var='u_grid_shape', box_var=None, has_clip=True):
super(Grid, self).__init__(box_var=box_var)
self.shape_var = shape_var
self._shape = shape
ms = 1 - self.margin
mc = 1 - self.margin
# Define the GPU transforms of the Grid layout.
# 1. Global scaling.
self.gpu_transforms.add(Scale(self._scaling, gpu_var='u_grid_scaling'))
# 2. Margin.
self.gpu_transforms.add(Scale((ms, ms)))
# 3. Clipping for the subplots.
if has_clip:
self.gpu_transforms.add(Clip([-mc, -mc, +mc, +mc]))
# 4. Subplots.
self.gpu_transforms.add(Subplot(
# The parameters of the subplots are callable as they can be changed dynamically.
shape=lambda: self._shape, index=lambda: self.active_box,
shape_gpu_var=self.shape_var, index_gpu_var=self.box_var))
def attach(self, canvas):
"""Attach the grid to a canvas."""
super(Grid, self).attach(canvas)
canvas.gpu_transforms += self.gpu_transforms
canvas.inserter.insert_vert(
"""
attribute vec2 {};
uniform vec2 {};
uniform vec2 u_grid_scaling;
""".format(self.box_var, self.shape_var),
'header', origin=self)
def add_boxes(self, canvas, shape=None):
"""Show subplot boxes."""
shape = shape or self.shape
assert isinstance(shape, tuple)
n, m = shape
n_boxes = n * m
a = 1 - .0001
pos = np.array([[-a, -a, +a, -a],
[+a, -a, +a, +a],
[+a, +a, -a, +a],
[-a, +a, -a, -a],
])
pos = np.tile(pos, (n_boxes, 1))
box_index = []
for i in range(n):
for j in range(m):
box_index.append([i, j])
box_index = np.vstack(box_index)
box_index = np.repeat(box_index, 8, axis=0)
boxes = LineVisual()
# We exclude this interact when adding the visual.
canvas.add_visual(boxes, clearable=False)
boxes.set_data(pos=pos)
boxes.set_box_index(box_index)
canvas.update()
def get_closest_box(self, pos):
"""Get the box index (i, j) closest to a given position in NDC coordinates."""
x, y = pos
rows, cols = self.shape
j = np.clip(int(cols * (1. + x) / 2.), 0, cols - 1)
i = np.clip(int(rows * (1. - y) / 2.), 0, rows - 1)
return i, j
def update_visual(self, visual):
"""Update a visual."""
super(Grid, self).update_visual(visual)
if self.shape_var in visual.program:
visual.program[self.shape_var] = self._shape
visual.program['u_grid_scaling'] = self._scaling
@property
def shape(self):
"""Return the grid shape."""
return self._shape
@shape.setter
def shape(self, value):
self._shape = value
self.update()
@property
def scaling(self):
"""Return the grid scaling."""
return self._scaling
@scaling.setter
def scaling(self, value):
self._scaling = value
self.update()
#------------------------------------------------------------------------------
# Boxed
#------------------------------------------------------------------------------
class Boxed(BaseLayout):
"""Layout showing plots in rectangles at arbitrary positions. Used by the waveform view.
The boxes are specified via their center positions and optional sizes, in which case
an iterative algorithm is used to find the largest box size that will not make them overlap.
Constructor
----------
box_pos : array-like (2D, shape[1] == 2)
Position of the centers of the boxes.
box_var : str
Name of the GLSL variable with the box index.
keep_aspect_ratio : boolean
Whether to keep the aspect ratio of the bounds.
Note
----
To be used in a boxed layout, a visual must define `a_box_index` (by default) or another GLSL
variable specified in `box_var`.
"""
margin = .1
n_dims = 1
active_box = 0
_box_scaling = (1., 1.)
_layout_scaling = (1., 1.)
_scaling_param_increment = 1.1
def __init__(self, box_pos=None, box_var=None, keep_aspect_ratio=False):
super(Boxed, self).__init__(box_var=box_var)
self._key_pressed = None
self.keep_aspect_ratio = keep_aspect_ratio
self.update_boxes(box_pos)
self.gpu_transforms.add(Range(
NDC, lambda: self.box_bounds[self.active_box],
from_gpu_var='vec4(-1, -1, 1, 1)', to_gpu_var='box_bounds'))
def attach(self, canvas):
"""Attach the boxed interact to a canvas."""
super(Boxed, self).attach(canvas)
canvas.gpu_transforms += self.gpu_transforms
canvas.inserter.insert_vert("""
#include "utils.glsl"
attribute float {};
uniform sampler2D u_box_pos;
uniform float n_boxes;
uniform vec2 u_box_size;
uniform vec2 u_layout_scaling;
""".format(self.box_var), 'header', origin=self)
canvas.inserter.insert_vert("""
// Fetch the box bounds for the current box (`box_var`).
vec2 box_pos = fetch_texture({}, u_box_pos, n_boxes).xy;
box_pos = (2 * box_pos - 1); // from [0, 1] (texture) to [-1, 1] (NDC)
box_pos = box_pos * u_layout_scaling;
vec4 box_bounds = vec4(box_pos - u_box_size, box_pos + u_box_size);
""".format(self.box_var), 'start', origin=self)
def update_visual(self, visual):
"""Update a visual."""
super(Boxed, self).update_visual(visual)
box_pos = _get_texture(self.box_pos, (0, 0), self.n_boxes, [-1, 1])
box_pos = box_pos.astype(np.float32)
if 'u_box_pos' in visual.program:
logger.log(5, "Update visual with interact Boxed.")
visual.program['u_box_pos'] = box_pos
visual.program['n_boxes'] = self.n_boxes
visual.program['u_box_size'] = np.array(self.box_size) * np.array(self._box_scaling)
visual.program['u_layout_scaling'] = self._layout_scaling
def update_boxes(self, box_pos):
"""Update the box positions and automatically-computed size."""
self.box_pos, self.box_size = get_non_overlapping_boxes(box_pos)
def add_boxes(self, canvas):
"""Show the boxes borders."""
n_boxes = len(self.box_pos)
a = 1 + .05
pos = np.array([[-a, -a, +a, -a],
[+a, -a, +a, +a],
[+a, +a, -a, +a],
[-a, +a, -a, -a],
])
pos = np.tile(pos, (n_boxes, 1))
boxes = LineVisual()
box_index = np.repeat(np.arange(n_boxes), 8)
canvas.add_visual(boxes, clearable=False)
boxes.set_data(pos=pos, color=(.5, .5, .5, 1))
boxes.set_box_index(box_index)
canvas.update()
# Change the box bounds, positions, or size
#--------------------------------------------------------------------------
@property
def n_boxes(self):
"""Total number of boxes."""
return len(self.box_pos)
@property
def box_bounds(self):
"""Bounds of the boxes."""
bs = np.array(self.box_size)
return np.c_[self.box_pos - bs, self.box_pos + bs]
def get_closest_box(self, pos):
"""Get the box closest to some position."""
return get_closest_box(pos, self.box_pos, self.box_size)
# Box scaling
#--------------------------------------------------------------------------
def _increment_box_scaling(self, cw=1., ch=1.):
self._box_scaling = (self._box_scaling[0] * cw, self._box_scaling[1] * ch)
self.update()
@property
def box_scaling(self):
return self._box_scaling
def expand_box_width(self):
return self._increment_box_scaling(cw=self._scaling_param_increment)
def shrink_box_width(self):
return self._increment_box_scaling(cw=1. / self._scaling_param_increment)
def expand_box_height(self):
return self._increment_box_scaling(ch=self._scaling_param_increment)
def shrink_box_height(self):
return self._increment_box_scaling(ch=1. / self._scaling_param_increment)
# Layout scaling
#--------------------------------------------------------------------------
def _increment_layout_scaling(self, cw=1., ch=1.):
self._layout_scaling = (self._layout_scaling[0] * cw, self._layout_scaling[1] * ch)
self.update()
@property
def layout_scaling(self):
return self._layout_scaling
def expand_layout_width(self):
return self._increment_layout_scaling(cw=self._scaling_param_increment)
def shrink_layout_width(self):
return self._increment_layout_scaling(cw=1. / self._scaling_param_increment)
def expand_layout_height(self):
return self._increment_layout_scaling(ch=self._scaling_param_increment)
def shrink_layout_height(self):
return self._increment_layout_scaling(ch=1. / self._scaling_param_increment)
class Stacked(Boxed):
"""Layout showing a number of subplots stacked vertically.
Parameters
----------
n_boxes : int
Number of boxes to stack vertically.
box_var : str
Name of the GLSL variable with the box index.
origin : str
top or bottom
Note
----
To be used in a boxed layout, a visual must define `a_box_index` (by default) or another GLSL
variable specified in `box_var`.
"""
margin = 0
_origin = 'bottom'
def __init__(self, n_boxes, box_var=None, origin=None):
self._origin = origin or self._origin
assert self._origin in ('top', 'bottom')
box_pos = self.get_box_pos(n_boxes)
super(Stacked, self).__init__(box_pos, box_var=box_var, keep_aspect_ratio=False)
@property
def n_boxes(self):
"""Number of boxes."""
return len(self.box_pos)
@n_boxes.setter
def n_boxes(self, n_boxes):
if n_boxes >= 1:
self.update_boxes(self.get_box_pos(n_boxes))
def get_box_pos(self, n_boxes):
"""Return the box bounds for a given number of stacked boxes."""
# Signal bounds.
b = np.zeros((n_boxes, 2))
b[:, 1] = np.linspace(-1, 1, n_boxes)
if self._origin == 'top':
b = b[::-1, :]
return b
@property
def origin(self):
"""Whether to show the channels from top to bottom (`top` option, the default), or from
bottom to top (`bottom`)."""
return self._origin
@origin.setter
def origin(self, value):
self._origin = value
self.update_boxes(self.get_box_pos(self.n_boxes))
self.update()
def attach(self, canvas):
"""Attach the stacked interact to a canvas."""
BaseLayout.attach(self, canvas)
canvas.gpu_transforms += self.gpu_transforms
canvas.inserter.insert_vert("""
#include "utils.glsl"
attribute float {};
uniform float n_boxes;
uniform bool u_top_origin;
uniform vec2 u_box_size;
""".format(self.box_var), 'header', origin=self)
canvas.inserter.insert_vert("""
float margin = .1 / n_boxes;
float a = 1 - 2. / n_boxes + margin;
float b = -1 + 2. / n_boxes - margin;
float u = (u_top_origin ? (n_boxes - 1. - {bv}) : {bv}) / max(1., n_boxes - 1.);
float y0 = -1 + u * (a + 1);
float y1 = b + u * (1 - b);
float ym = .5 * (y0 + y1);
float yh = u_box_size.y * (y1 - ym);
y0 = ym - yh;
y1 = ym + yh;
vec4 box_bounds = vec4(-1., y0, +1., y1);
""".format(bv=self.box_var), 'before_transforms', origin=self)
def update_visual(self, visual):
"""Update a visual."""
BaseLayout.update_visual(self, visual)
if 'n_boxes' in visual.program:
visual.program['n_boxes'] = self.n_boxes
visual.program['u_box_size'] = self._box_scaling
visual.program['u_top_origin'] = self._origin == 'top'
#------------------------------------------------------------------------------
# Interactive tools
#------------------------------------------------------------------------------
class Lasso(object):
"""Draw a polygon with the mouse and find the points that belong to the inside of the
polygon."""
def __init__(self):
self._points = []
self.canvas = None
self.visual = None
self.box = None
def add(self, pos):
"""Add a point to the polygon."""
x, y = pos.flat if isinstance(pos, np.ndarray) else pos
self._points.append((x, y))
logger.debug("Lasso has %d points.", len(self._points))
self.update_lasso_visual()
@property
def polygon(self):
"""Coordinates of the polygon vertices."""
l = self._points
# Close the polygon.
# l = l + l[0] if len(l) else l
out = np.array(l, dtype=np.float64)
out = np.reshape(out, (out.size // 2, 2))
assert out.ndim == 2
assert out.shape[1] == 2
return out
def clear(self):
"""Reset the lasso."""
self._points = []
self.box = None
self.update_lasso_visual()
@property
def count(self):
"""Number of vertices in the polygon."""
return len(self._points)
def in_polygon(self, pos):
"""Return which points belong to the polygon."""
return _in_polygon(pos, self.polygon)
def attach(self, canvas):
"""Attach the lasso to a canvas."""
canvas.attach_events(self)
self.canvas = canvas
self.create_lasso_visual()
def create_lasso_visual(self):
"""Create the lasso visual."""
self.visual = PolygonVisual()
self.canvas.add_visual(self.visual, clearable=False)
def update_lasso_visual(self):
"""Update the lasso visual with the current polygon."""
if not self.visual and self.count > 0:
return
# The following call updates a_box_index with the active box in BaseLayout.
self.visual.set_data(pos=self.polygon)
self.canvas.update()
def on_mouse_click(self, e):
"""Add a polygon point with ctrl+click."""
if 'Control' in e.modifiers:
if e.button == 'Left':
layout = getattr(self.canvas, 'layout', None)
if hasattr(layout, 'box_map'):
box, pos = layout.box_map(e.pos)
# Only update the box for the first click, so that the box containing
# the lasso is determined by the first click only.
if self.box is None:
self.box = box
# Avoid clicks outside the active box (box of the first click).
if box != self.box:
return
else: # pragma: no cover
pos = self.canvas.window_to_ndc(e.pos)
# Force the active box to be the box of the first click, not the box of the
# current click.
if layout:
layout.active_box = self.box
self.add(pos) # call update_lasso_visual
emit("lasso_updated", self.canvas, self.polygon)
else:
self.clear()
self.box = None
def __repr__(self):
return str(self.polygon)
| 2.4375 | 2 |
AutocompleteHandler.py | codeforamerica/sheltraustin | 0 | 17104 | <filename>AutocompleteHandler.py
import tornado.httpserver
import tornado.ioloop
import tornado.options
import tornado.web
import simplejson
from QueryHandler import QueryHandler
class AutocompleteHandler(tornado.web.RequestHandler):
@tornado.web.asynchronous
def get(self):
if not self.request.arguments or self.request.arguments=={}:
self.render('index.html')
return
if not 'address' in self.request.arguments.keys():
self.render('index.html')
return
address = self.request.arguments['address'][0]
data = {
'address': address
}
output = QueryHandler.get_addresses(data)
self.write(output)
self.flush()
self.finish() | 2.84375 | 3 |
mirari/SV/migrations/0052_auto_20190428_1522.py | gcastellan0s/mirariapp | 0 | 17105 | <gh_stars>0
# Generated by Django 2.0.5 on 2019-04-28 20:22
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('SV', '0051_ticketproducts_offerprice'),
]
operations = [
migrations.AddField(
model_name='product',
name='bar_code',
field=models.CharField(blank=True, help_text='(sugerido)', max_length=250, null=True, verbose_name='Código de Barras '),
),
migrations.AddField(
model_name='product',
name='ieps',
field=models.BooleanField(default=True, help_text='Graba IEPS? (sugerido)', verbose_name='IEPS. '),
),
migrations.AddField(
model_name='product',
name='is_dynamic',
field=models.BooleanField(default=False, help_text='Este producto tiene precio variable? (sugerido)', verbose_name='Precio dinámico '),
),
migrations.AddField(
model_name='product',
name='is_favorite',
field=models.BooleanField(default=False, help_text='Se muestra siempre este producto? (sugerido)', verbose_name='Es favorito? '),
),
migrations.AddField(
model_name='product',
name='iva',
field=models.BooleanField(default=True, help_text='Graba IVA? (sugerido)', verbose_name='I.V.A. '),
),
migrations.AddField(
model_name='product',
name='price',
field=models.FloatField(default=0, help_text='Graba IVA? (sugerido)', verbose_name='Precio en esta sucursal '),
),
migrations.AlterField(
model_name='ticketproducts',
name='ticket',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='SV.Ticket'),
),
]
| 1.539063 | 2 |
svl/compiler/plot_validators.py | timothyrenner/svl | 8 | 17106 | from toolz import get
PLOT_VALIDATORS = [
(
{"line", "scatter", "bar"},
lambda x: ("x" not in x) or ("y" not in x),
"XY plot does not have X and Y.",
),
(
{"histogram"},
lambda x: ("step" in x) and ("bins" in x),
"Histogram cannot have STEP and BINS.",
),
(
{"line", "scatter", "bar"},
lambda x: ("agg" in x["x"]) and ("agg" in x["y"]),
"XY plot cannot have an aggregation on X and Y.",
),
(
{"histogram", "pie"},
lambda x: ("agg" in get("x", x, {}))
or ("agg" in get("y", x, {}))
or ("agg" in get("axis", x, {})),
"Histograms and pie charts cannot have aggregations.",
),
(
{"histogram", "pie"},
lambda x: ("temporal" in get("x", x, {}))
or ("temporal" in get("y", x, {}))
or ("temporal" in get("axis", x, {})),
"Histograms and pie charts cannot have temporal axes.",
),
(
{"histogram"},
lambda x: ("x" in x) and ("y" in x),
"Histograms can have X or Y, not both.",
),
(
{"histogram"},
lambda x: ("x" not in x) and ("y" not in x),
"Histograms must have an X or Y.",
),
({"pie"}, lambda x: "axis" not in x, "Pie charts must have an axis."),
(
{"line", "bar"}, # SORT is a no-op for scatter.
lambda x: ("sort" in x["x"]) and ("sort" in x["y"]),
"Cannot sort by two axes.",
),
(
{"pie"},
lambda x: (get("hole", x, 0.0) < 0) or (get("hole", x, 0.0) > 1),
"HOLE must be between zero and one.",
),
(
{"histogram"},
lambda x: get("step", x, 1) <= 0,
"STEP must be greater than zero.",
),
(
{"histogram"},
lambda x: get("bins", x, 1) <= 0,
"BINS must be greater than zero.",
),
(
{"histogram", "pie"},
lambda x: "color_by" in x,
"Histograms and pie charts cannot have COLOR BY.",
),
({"pie"}, lambda x: "split_by" in x, "Pie charts cannot have SPLIT BY."),
(
{"line", "scatter", "bar"},
lambda x: ("split_by" in x) and ("color_by" in x),
"Cannot have COLOR BY and SPLIT BY on same plot.",
),
(
{"line", "scatter", "bar"},
lambda x: (
# If we don't include this it can throw exceptions for other
# validators.
("x" in x)
and ("y" in x)
)
and (("agg" in x["x"]) or ("agg" in x["y"]))
and (("color_by" in x) and ("agg" not in x["color_by"])),
"If there's an aggregation on X or Y, COLOR BY must also aggregate.",
),
]
def validate_plot(svl_plot):
""" Validates the SVL plot.
Parameters
----------
svl_plot : dict
The SVL plot specifier.
Returns
-------
Tuple[bool, str]
A boolean indicating whether the plot is valid and a message
indicating that the plot is either valid or which validations it
failed.
"""
ok = True
failure_messages = []
for plots, validator, message in PLOT_VALIDATORS:
if (svl_plot["type"] in plots) and validator(svl_plot):
ok = False
failure_messages.append(message)
return ok, "\n".join(failure_messages)
| 2.359375 | 2 |
decision_tree.py | cjbayron/ml-models | 1 | 17107 | <filename>decision_tree.py
'''
Building a Decision Tree using CART (from scratch)
Note: Code was tested only on dataset with numerical features.
Categorical features are not yet fully supported.
'''
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
from scikitplot.metrics import plot_confusion_matrix
import common.utils as ut
# get data from:
# https://www.kaggle.com/c/otto-group-product-classification-challenge
TRN_DATA_PATH = 'datasets/otto-group-product-classification/train.csv'
NUM_SAMPLES = 5000
NUM_FEATS = 93
def visualize_data(feats, true_labels, preds):
'''Display labeled data and clustered data
'''
print("Visualizing data...")
red_feats = ut.reduce_to_2D_by_tsne(feats)
label2col_map = ['red', 'orange', 'yellow', 'green', 'blue',
'violet', 'brown', 'gray', 'pink']
label_list = np.unique(true_labels)
_, ax = plt.subplots(ncols=2, figsize=(10, 5))
graph_label_pair = zip(ax, [true_labels, preds])
for graph, labels in graph_label_pair:
for label in label_list:
# get samples with label == label
idxs = np.where(labels == label)
# get components
pc1, pc2 = red_feats['pc1'].values[idxs], red_feats['pc2'].values[idxs]
# scatter plot w/ color based on labels
graph.scatter(x=pc1, y=pc2, color=label2col_map[label-1],
alpha=0.5, label=label)
graph.set_xlabel('PC1')
graph.set_ylabel('PC2')
ax[0].set_title('Labeled Products')
ax[1].set_title('Predicted Labels')
for graph in ax:
graph.legend() # show legend
graph.grid(True) # show gridlines
plt.show()
def get_impurity(labels):
'''Calculate Gini impurity
'''
num_labels = float(len(labels))
imp = 0.0
_, cnts = np.unique(labels, return_counts=True)
for cnt in cnts:
cnt = float(cnt)
imp += float((cnt/num_labels)*(1-(cnt/num_labels)))
return imp
def get_best_split_along_column(data, labels, feat_idx, categorical=False):
'''Get best split using features in a single column
'''
feat_col = data[:, feat_idx]
splitter_pool = np.unique(feat_col) # get splitters
min_im = np.inf
left_idxs = []
right_idxs = []
splitter = None
for val in splitter_pool:
if categorical:
left_labels = labels[feat_col == val]
right_labels = labels[feat_col != val]
else:
left_labels = labels[feat_col >= val]
right_labels = labels[feat_col < val]
# if all data is placed on only one side
# then it is not a meaningful split so we skip
if len(left_labels) == len(data) or len(right_labels) == len(data):
continue
avg_im = len(left_labels) * get_impurity(left_labels) + \
len(right_labels) * get_impurity(right_labels)
if avg_im < min_im:
min_im = avg_im
left_idxs = (feat_col >= val)
right_idxs = (feat_col < val)
splitter = val
if len(left_idxs) + len(right_idxs) > 0:
min_im /= (len(left_idxs) + len(right_idxs))
return min_im, splitter, left_idxs, right_idxs
class TreeNode():
'''Node for a Decision Tree
'''
def __init__(self):
self.labels = None
self.left_node = None
self.right_node = None
self.is_leaf = False
self.categorical = False
self.splitter = None
def build_tree(self, feats, labels):
'''Build tree recursively
'''
self.labels = labels
best_gain = 0
best_left_idxs = []
best_right_idxs = []
best_splitter = None
cur_imp = get_impurity(labels)
for col in range(len(feats[0])):
# Note: we assume all features are numerical instead of categorical
imp, splitter, left_idxs, right_idxs = \
get_best_split_along_column(feats, labels, col,
categorical=False)
gain = cur_imp - imp
if gain > best_gain:
best_gain = gain
best_left_idxs = left_idxs
best_right_idxs = right_idxs
best_splitter = {'col': col, 'val': splitter}
self.splitter = best_splitter
if self.splitter is None:
self.is_leaf = True
else:
self.left_node = TreeNode()
self.right_node = TreeNode()
self.left_node.build_tree(feats[best_left_idxs], labels[best_left_idxs])
self.right_node.build_tree(feats[best_right_idxs], labels[best_right_idxs])
return
def classify(self, feats):
'''Classify sample according to built tree
'''
if self.is_leaf is False and self.splitter is None:
raise Exception("Decision tree not built!")
if self.is_leaf:
return np.random.choice(self.labels)
else:
val = self.splitter['val']
col = self.splitter['col']
if self.categorical:
if feats[col] == val:
label = self.left_node.classify(feats)
else:
label = self.right_node.classify(feats)
else:
if feats[col] >= val:
label = self.left_node.classify(feats)
else:
label = self.right_node.classify(feats)
return label
def main():
'''Main
'''
global TRN_DATA_PATH, NUM_SAMPLES, NUM_FEATS
# no need to rescale for decision tree
feats, labels = ut.get_data_from_csv(TRN_DATA_PATH, rescale=False)
if NUM_SAMPLES < len(feats):
feats, labels = ut.sample(feats, labels, NUM_SAMPLES)
feats = feats.values
if NUM_FEATS < len(feats[0]):
idxs = np.random.choice(range(len(feats[0])), NUM_FEATS, replace=False)
feats = feats[:, idxs]
trn_feats, tst_feats, trn_labels, tst_labels = train_test_split(feats,
labels,
test_size=0.20,
stratify=labels)
# build tree
print("Building decision tree...")
decision_tree = TreeNode()
decision_tree.build_tree(trn_feats, trn_labels.values)
print("Done!")
print("Checking accuracy on training set...")
predictions = []
for sample in trn_feats:
result = decision_tree.classify(sample)
predictions.append(result)
# for checking only. must be 100% accuracy on training set
print("Training Set Results:\n", classification_report(trn_labels, predictions))
print("Using tree to predict labels...")
predictions = []
for sample in tst_feats:
result = decision_tree.classify(sample)
predictions.append(result)
print("Test Set Results:\n", classification_report(tst_labels, predictions))
visualize_data(pd.DataFrame(tst_feats), tst_labels, predictions)
# display confusion matrix
print("Plotting confusion matrix...")
plot_confusion_matrix(tst_labels, predictions, normalize=True)
plt.show()
return 0
if __name__ == "__main__":
main()
| 3.375 | 3 |
codegen/codegen/fblas_routine.py | spcl/fblas | 68 | 17108 | <gh_stars>10-100
"""
FBlas Routine class: it used to represent a routine definition, specified by the user using JSON file.
It is used by the Host and Module Codegen (specified by the _codegen variable). Accordingly,
some class members could be invalid.
"""
from codegen import fblas_types
from codegen import generator_definitions
class FBLASRoutine:
# name of the routine according to blas (without indication of the precision)
_blas_name = ""
# user name for the routine
_user_name = ""
# spatial parallelism (vectorization width)
_width = generator_definitions.DEFAULT_WIDTH
# data type used in routine
_type: fblas_types.RoutineType
_type_str: str
# if the routine has to use shift registers (e.g. double precision) or not
# and in case how big they should be
_uses_shift_registers = False
_size_shift_registers = 0
# The type of codegen:Host/Modules
_codegen = None
# inx/incy
_incx = 1
_incy = 1
# Level 2/3: tile sizes
_tile_n_size = generator_definitions.DEFAULT_TILE_SIZE
_tile_m_size = generator_definitions.DEFAULT_TILE_SIZE
# Matrix characteristics
_order = None
_diag = None
_transposeA = None
_transposeB = None
_side = None
_uplo = None
# input/output channels (useful for Module Codegen)
# these are instance member dictionaries "required_channel_name" -> "user_name"
_input_channels = None
_output_channels = None
# Tiles and element order (for level2/3 that works with matrices)
# The order is RowMajor if tiles/element are row streamed
# otherwise it is ColumnMajor
_tiles_A_order: fblas_types.FblasOrder = fblas_types.FblasOrder.FblasRowMajor
_elements_A_order: fblas_types.FblasOrder = fblas_types.FblasOrder.FblasRowMajor
# Indicates whether or not this routines has a 2D computatioal tile (e.g. GEMM)
_has_2D_computational_tile = False
# If yes, there are the two vectorization width
_width_x = 0
_width_y = 0
_tile_size = 0
_systolic = False
_vect_size = 0
def __init__(self, blas_name: str, user_name: str, type: fblas_types.RoutineType, platform: fblas_types.Platform, codegen: fblas_types.FblasCodegen):
self._blas_name = blas_name
self._user_name = user_name
self._type = type
self._type_str = fblas_types.ROUTINE_TYPE_TO_TYPE_STR[type]
self._platform = platform
self._codegen = codegen
self._width = generator_definitions.DEFAULT_WIDTH
# Declare all the instance variables
self._input_channels = {}
self._output_channels = {}
self._incx = 1
self._incy = 1
self._tile_n_size = generator_definitions.DEFAULT_TILE_SIZE
self._tile_m_size = generator_definitions.DEFAULT_TILE_SIZE
self._order = fblas_types.FblasOrder.FblasOrderUndef
self._diag = fblas_types.FblasDiag.FblasDiagUndef
self._transposeA = fblas_types.FblasTranspose.FblasTransUndef
self._transposeB = fblas_types.FblasTranspose.FblasTransUndef
self._side = fblas_types.FblasSide.FblasSideUndef
self._uplo = fblas_types.FblasUpLo.FblasUpLoUndef
if type == fblas_types.RoutineType.Double:
self._uses_shift_registers = True
self._size_shift_registers = fblas_types.SHIFT_REGISTER_SIZES[(type, platform)]
else:
self._uses_shift_registers = False
self._has_2D_computational_tile = False
self._width_x = self._width = generator_definitions.DEFAULT_2D_CTILE_WIDTH
self._width_y = self._width = generator_definitions.DEFAULT_2D_CTILE_WIDTH
self._tile_size = generator_definitions.DEFAULT_TILE_SIZE
self._systolic = False
self._vect_size = 4
def __str__(self):
return """Routine {} implements {} with type {}
Width: {} Incx: {} Incy: {}""".format(self._user_name, self._blas_name, self._type, self._width, self._incx, self._incy)
#Getter/setter
@property
def blas_name(self):
return self._blas_name
@property
def user_name(self):
return self._user_name
@property
def type(self):
return self._type
@property
def type_str(self):
return self._type_str
@property
def uses_shift_registers(self):
return self._uses_shift_registers
@uses_shift_registers.setter
def uses_shift_registers(self, value: bool):
#if the routine uses shift register, set the size
self._uses_shift_registers = value
if value:
self._size_shift_registers = fblas_types.SHIFT_REGISTER_SIZES[(self.type, self._platform)]
@property
def size_shift_registers(self):
return self._size_shift_registers
@property
def width(self):
return self._width
@width.setter
def width(self, width: int):
self._width = width
@property
def incx(self):
return self._incx
@incx.setter
def incx(self, incx: int):
self._incx = incx
@property
def incy(self):
return self._incy
@incy.setter
def incy(self, incy: int):
self._incy = incy
@property
def tile_n_size(self):
return self._tile_n_size
@tile_n_size.setter
def tile_n_size(self, tile_size: int):
self._tile_n_size = tile_size
@property
def tile_m_size(self):
return self._tile_m_size
@tile_m_size.setter
def tile_m_size(self, tile_size: int):
self._tile_m_size = tile_size
@property
def tile_size(self):
return self._tile_size
@tile_size.setter
def tile_size(self, tile_size: int):
self._tile_size = tile_size
@property
def order(self):
return self._order
@order.setter
def order(self, order: fblas_types.FblasOrder):
self._order = order
@property
def uplo(self):
return self._uplo
@uplo.setter
def uplo(self, uplo: fblas_types.FblasUpLo):
self._uplo = uplo
@property
def transposedA(self):
return self._transposeA
@transposedA.setter
def transposedA(self, trans: fblas_types.FblasTranspose):
self._transposeA = trans
@property
def transposedB(self):
return self._transposeB
@transposedB.setter
def transposedB(self, trans: fblas_types.FblasTranspose):
self._transposeB = trans
@property
def input_channels(self):
return self._input_channels
@property
def output_channels(self):
return self._output_channels
@property
def tiles_A_order(self):
return self._tiles_A_order
@tiles_A_order.setter
def tiles_A_order(self, order: fblas_types.FblasOrder):
self._tiles_A_order = order
@property
def elements_A_order(self):
return self._elements_A_order
@elements_A_order.setter
def elements_A_order(self, order : fblas_types.FblasOrder):
self._elements_A_order = order
@property
def has_2D_computational_tile(self):
return self._has_2D_computational_tile
@has_2D_computational_tile.setter
def has_2D_computational_tile(self, value: bool):
self._has_2D_computational_tile = value
@property
def width_x(self):
return self._width_x
@width_x.setter
def width_x(self, width: int):
self._width_x = width
@property
def width_y(self):
return self._width_y
@width_y.setter
def width_y(self, width: int):
self._width_y = width
@property
def systolic(self):
return self._systolic
@systolic.setter
def systolic(self, value: bool):
self._systolic = value
@property
def vect_size(self):
return self._vect_size
@vect_size.setter
def vect_size(self, value: int):
self._vect_size = value
def are_tiles_A_rowstreamed(self):
"""
:return: True if the tiles of A are rowstreamed
"""
return self._tiles_A_order == fblas_types.FblasOrder.FblasRowMajor
def are_elements_A_rowstreamed(self):
"""
:return: True if the elements of A are rowstreamed
"""
return self._elements_A_order == fblas_types.FblasOrder.FblasRowMajor
def add_input_channel(self, routine_channel_name, user_name):
'''
Add the channel to the dictionary of input channels
If already present, it will be overwritten
'''
self._input_channels[routine_channel_name] = user_name
def add_output_channel(self, routine_channel_name, user_name):
'''
Add the channel to the dictionary of input channels
If already present, it will be overwritten
'''
self._output_channels[routine_channel_name] = user_name
| 2.640625 | 3 |
lib/prefab/errors.py | lexsca/docker-prefab | 1 | 17109 | <filename>lib/prefab/errors.py
class PrefabError(Exception):
pass
class HashAlgorithmNotFound(PrefabError):
pass
class ImageAccessError(PrefabError):
pass
class ImageBuildError(PrefabError):
pass
class ImageNotFoundError(PrefabError):
pass
class ImagePushError(PrefabError):
pass
class ImageValidationError(PrefabError):
pass
class InvalidConfigError(PrefabError):
pass
class TargetCyclicError(PrefabError):
pass
class TargetNotFoundError(PrefabError):
pass
| 1.859375 | 2 |
test_hoyolab.py | c3kay/hoyolab-json-feed | 1 | 17110 | from hoyolab import main
from os import environ
from os.path import exists
import atoma
def init_environ(d):
environ['HOYOLAB_JSON_PATH'] = '{}/hoyolab.json'.format(d)
environ['HOYOLAB_ATOM_PATH'] = '{}/hoyolab.xml'.format(d)
environ['HOYOLAB_JSON_URL'] = 'hoyolab.json'
environ['HOYOLAB_ATOM_URL'] = 'hoyolab.xml'
environ['HOYOLAB_ENTRIES'] = '1'
def test_feeds(tmpdir):
init_environ(tmpdir)
json_path = environ['HOYOLAB_JSON_PATH']
atom_path = environ['HOYOLAB_ATOM_PATH']
num_entries = int(environ['HOYOLAB_ENTRIES']) * 3
main()
assert exists(json_path)
assert exists(atom_path)
json_feed = atoma.parse_json_feed_file(json_path)
assert len(json_feed.items) == num_entries
atom_feed = atoma.parse_atom_file(atom_path)
assert len(atom_feed.entries) == num_entries
| 2.34375 | 2 |
sausage_grinder/urls.py | jesseerdmann/audiobonsai | 0 | 17111 | from django.urls import path
from . import views as sg
urlpatterns = [
path('artist', sg.artist),
path('genre', sg.genre),
path('release', sg.release),
path('track', sg.track),
path('', sg.sausage_grinder_index),
]
| 1.515625 | 2 |
sails/ui/mmck/parameters/string.py | metrasynth/solar-sails | 6 | 17112 | <filename>sails/ui/mmck/parameters/string.py
from PyQt5.QtCore import pyqtSlot
from PyQt5.QtWidgets import QComboBox
from PyQt5.QtWidgets import QLineEdit
from sf.mmck.parameters import String
from .manager import widget_class_for
from .widget import ParameterWidget
@widget_class_for(String)
class StringParameterWidget(ParameterWidget):
def setUp(self, ui):
super().setUp(ui)
if self.parameter.choices:
self.combobox = QComboBox(self)
self.combobox.setEditable(True)
self.combobox.setCurrentText(self.value)
self.combobox.insertItems(0, self.parameter.choices)
self.combobox.currentTextChanged.connect(self.on_combobox_currentTextChanged)
self.layout.addWidget(self.combobox)
else:
self.lineedit = QLineEdit(self)
self.lineedit.setText(self.value)
self.lineedit.textChanged.connect(self.on_lineedit_textChanged)
self.layout.addWidget(self.lineedit)
@pyqtSlot(str)
def on_combobox_currentTextChanged(self, value):
self.valueChanged.emit(value, self.name)
@pyqtSlot(str)
def on_lineedit_textChanged(self, value):
self.valueChanged.emit(value, self.name)
| 2.171875 | 2 |
plot_top_performers.py | jmphil09/mario_rl | 0 | 17113 | from FitnessPlot import FitnessPlot
'''
for n in range(1,6):
plot = FitnessPlot(folder_prefix='data_top{}'.format(n))
plot.plot_all_workers()
plot.plot_workers_as_average()
'''
plot = FitnessPlot(folder_prefix='data_top1', num_workers=16)
worker_dict = plot.create_worker_dict()
#plot.plot_all_workers()
#plot.plot_workers_as_average()
#print(worker_dict)
for key,value in worker_dict.items():
dict_len = len(value)
#if dict_len < 100:
# print(key)
# print(dict_len)
print(key)
print(value[len(value)-1])
| 2.890625 | 3 |
test/PySrc/tests/test_code_tracer_width.py | lifubang/live-py-plugin | 224 | 17114 | <gh_stars>100-1000
from space_tracer.main import replace_input, TraceRunner
def test_source_width_positive():
code = """\
i = 1 + 1
"""
expected_report = """\
i = 1 + | i = 2"""
with replace_input(code):
report = TraceRunner().trace_command(['space_tracer',
'--source_width', '8',
'--traced_file', 'foo.py'])
assert report == expected_report
def test_source_width_negative():
code = """\
i = 1 + 1
"""
expected_report = """\
i = 1 + | i = 2"""
with replace_input(code):
report = TraceRunner().trace_command(['space_tracer',
'--source_width', '-2',
'--traced_file', 'foo.py'])
assert report == expected_report
def test_source_indent():
code = """\
i = 1 + 1
"""
expected_report = """\
i = 1 + 1 | i = 2"""
with replace_input(code):
report = TraceRunner().trace_command(['space_tracer',
'--source_indent', '4',
'--traced_file', 'foo.py'])
assert report == expected_report
def test_source_indent_small():
code = """\
i = 1 + 1
"""
expected_report = """\
i = 1 + 1 | i = 2"""
with replace_input(code):
report = TraceRunner().trace_command(['space_tracer',
'--source_indent', '2',
'--traced_file', 'foo.py'])
assert report == expected_report
def test_source_indent_negative():
code = """\
i = 1 + 1
"""
expected_report = """\
= 1 + 1 | i = 2"""
with replace_input(code):
report = TraceRunner().trace_command(['space_tracer',
'--source_indent', '-2',
'--traced_file', 'foo.py'])
assert report == expected_report
def test_trace_width():
code = """\
i = 1 + 1
"""
expected_report = """\
i = 1 + 1 | i ="""
with replace_input(code):
report = TraceRunner().trace_command(['space_tracer',
'--trace_width', '15',
'--traced_file', 'foo.py'])
assert report == expected_report
def test_trace_width_negative():
code = """\
i = 1 + 1
s = 'a' * 10
"""
expected_report = """\
i = 1 + 1 | i = 2
s = 'a' * 10 | s = 'aaaaaa"""
with replace_input(code):
report = TraceRunner().trace_command(['space_tracer',
'--trace_width', '-5',
'--traced_file', 'foo.py'])
assert report == expected_report
def test_trace_width_without_source():
code = """\
i = 1 + 1
s = 'a' * 10
"""
expected_report = """\
i = 2
s = 'aaaaaa"""
with replace_input(code):
report = TraceRunner().trace_command(['space_tracer',
'--source_width', '0',
'--trace_width', '-5',
'--traced_file', 'foo.py'])
assert report == expected_report
def test_trace_offset():
code = """\
i = 1 + 1
s = 'a' * 10
"""
expected_report = """\
i = 1 + 1 | 2
s = 'a' * 10 | 'aaaaaaaaaa'"""
with replace_input(code):
report = TraceRunner().trace_command(['space_tracer',
'--trace_offset', '3',
'--traced_file', 'foo.py'])
assert report == expected_report
| 2.765625 | 3 |
benchmark/src/benchmark/bench_logging.py | lwanfuturewei/QFlock | 0 | 17115 |
import logging
def setup_logger():
formatter = logging.Formatter('%(asctime)s.%(msecs)03d %(levelname)s %(message)s',
'%Y-%m-%d %H:%M:%S')
logging.basicConfig(level=logging.INFO,
format='%(asctime)s.%(msecs)03d %(levelname)-8s %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
root = logging.getLogger()
hdlr = root.handlers[0]
hdlr.setFormatter(formatter)
| 2.59375 | 3 |
tools/configure-gateway/threescale/proxies.py | jparsai/f8a-3scale-connect-api | 1 | 17116 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""ThreeScale Proxies Rule interface for APIs."""
from .base import ThreeScale
import logging
import requests
import xmltodict
import json
logger = logging.getLogger(__name__)
class Proxies(ThreeScale):
"""ThreeScale Proxies create, update."""
response = None
def __init__(self):
"""Initialize object."""
super().__init__()
self.service_id = None
def update(self,
tracker,
service_id,
api_backend,
credentials_location='query',
auth_app_key='user_key',
endpoint=None,
auth_app_id=None,
auth_user_key=None,
error_auth_failed=None,
error_status_auth_failed=None,
error_headers_auth_failed=None,
error_auth_missing=None,
error_status_auth_missing=None,
error_headers_auth_missing=None,
error_no_match=None,
error_status_no_match=None,
error_headers_no_match=None,
oidc_issuer_endpoint=None,
sandbox_endpoint=None
):
"""Update policy."""
self.service_id = service_id
request_body = {
'access_token': self._access_token,
"api_backend": api_backend,
"credentials_location": credentials_location,
"auth_app_key": auth_app_key,
"endpoint": endpoint,
"auth_app_id": auth_app_id,
"auth_user_key": auth_user_key,
"error_auth_failed": error_auth_failed,
"error_status_auth_failed": error_status_auth_failed,
"error_headers_auth_failed": error_headers_auth_failed,
"error_auth_missing": error_auth_missing,
"error_status_auth_missing": error_status_auth_missing,
"error_headers_auth_missing": error_headers_auth_missing,
"error_no_match": error_no_match,
"error_status_no_match": error_status_no_match,
"error_headers_no_match": error_headers_no_match,
"oidc_issuer_endpoint": oidc_issuer_endpoint,
"sandbox_endpoint": sandbox_endpoint,
}
request_body = {k: v for k, v in request_body.items() if v}
_url = self._build_url(
self._endpoints.proxy_update.format(service_id=service_id))
_resp = requests.patch(_url, data=request_body)
logger.info("[PATCH] {} with STATUS CODE: {}".format(
_url, _resp.status_code))
if _resp.ok:
self.response = xmltodict.parse(
_resp.content, dict_constructor=dict)
logger.info(
"Successfully Updated Proxy: {}".format(api_backend))
return self.response
else:
logger.error("Update Proxy FAILED {} with STATUS CODE {}".format(
_url, _resp.status_code))
logger.error("FAILED RESPONSE: {}".format(_resp.content))
tracker._rollback()
def _get_highest_version(self, service_id=None, environment='sandbox'):
service_id = service_id or self.service_id
params = {
'access_token': self._access_token,
}
_url = self._build_url(
self._endpoints.proxy_config_list.format(service_id=service_id,
environment=environment))
_resp = requests.get(_url, params=params)
logger.info("[GET] {} with STATUS CODE: {}".format(
_url, _resp.status_code))
if _resp.ok:
output = _resp.json()
if output:
higest_version = max([conf.get('proxy_config', {}).get('version', 2)
for conf in output.get('proxy_configs', {})])
logger.info("HIGHEST Version: {}".format(higest_version))
return higest_version
else:
logger.error("Unable to fetch the latest version.")
return 2
def policy_update(self, tracker, headers, service_id=None):
"""Update the Proxy Policy Configuration."""
policies_config = [{
"name": "headers",
"configuration": {
"response": [],
"request":headers},
"version": "builtin",
"enabled": True
}]
service_id = service_id or self.service_id
request_body = {
'access_token': self._access_token,
'service_id': service_id,
'policies_config': json.dumps(policies_config)
}
_url = self._build_url(
self._endpoints.proxy_policy_update.format(service_id=service_id))
_resp = requests.put(_url, data=request_body)
logger.info("[PUT] {} with STATUS CODE: {}".format(
_url, _resp.status_code))
if _resp.ok:
self.response = _resp
logger.info("Successfully Updated Proxy Policy Config")
return self.response
else:
logger.error("Update Proxy Policy Config FAILED {} with STATUS CODE {}".format(
_url, _resp.status_code))
logger.error("FAILED RESPONSE: {}".format(_resp.content))
tracker._rollback()
def proxy_promote(self, tracker,
service_id=None,
environment='sandbox',
to='production'):
"""Promote Proxy to another environment."""
service_id = service_id or self.service_id
version = self._get_highest_version()
request_body = {
'access_token': self._access_token,
'to': to
}
_url = self._build_url(
self._endpoints.proxy_config_promote.format(service_id=service_id,
environment=environment,
version=version))
_resp = requests.post(_url, data=request_body)
logger.info("[POST] {} with STATUS CODE: {}".format(
_url, _resp.status_code))
if _resp.ok:
self.response = _resp
logger.info("Successfully Promoted Proxy to {}".format(to))
return self.response
else:
logger.error("Promote Proxy FAILED {} with STATUS CODE {}".format(
_url, _resp.status_code))
logger.error("FAILED RESPONSE: {}".format(_resp.content))
tracker._rollback()
def find(self):
"""Find the Mapping."""
raise NotImplementedError("Method find Not Implemented.")
def __repr__(self):
"""Representation of class."""
api_backend = self.response.get('proxy', {}).get('api_backend')
return "Class Mappings(id={})".format(api_backend)
| 1.992188 | 2 |
src/__main__.py | Grox2006/Kayambot | 0 | 17117 | <gh_stars>0
import sys
from __init__ import Bot
MESSAGE_USAGE = "Usage is python %s [name] [token]"
if __name__ == "__main__":
if len(sys.argv) == 3:
Bot(sys.argv[1], sys.argv[2])
else:
print(MESSAGE_USAGE.format(sys.argv[0]))
| 2.6875 | 3 |
app/__init__.py | logicalicy/flask-react-boilerplate | 2 | 17118 | # Created with tutorials:
# https://www.digitalocean.com/community/tutorials/how-to-structure-large-flask-applications
# http://flask.pocoo.org/docs/0.12/tutorial
from flask import Flask, g, render_template
from flask_sqlalchemy import SQLAlchemy
import sqlite3
# Define WSGI application object.
app = Flask(__name__)
# Configurations
app.config.from_object('config')
app.config.from_envvar('CONFIG', silent=True)
# Define database object.
db = SQLAlchemy(app)
@app.errorhandler(404)
def not_found(error):
return render_template('404.html'), 404
# Import a module / component using its blueprint handler variable (mod_auth)
from app.api.entries.controllers import mod as entries_module
from app.site.controllers import mod as site_module
# Register blueprint(s)
app.register_blueprint(entries_module)
app.register_blueprint(site_module)
# app.register_blueprint(xyz_module)
# ..
# Build the database:
# This will create the database file using SQLAlchemy
db.create_all()
| 2.59375 | 3 |
dumpcode/cpiter.py | gkfthddk/keras | 0 | 17119 | <reponame>gkfthddk/keras
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import subprocess
import numpy as np
import datetime
import random
import warnings
import ROOT as rt
import math
from keras.preprocessing.sequence import pad_sequences
from keras.callbacks import Callback
from array import array
from sklearn.metrics import roc_auc_score, auc, roc_curve
class AddVal(Callback):
def __init__(self,valid_sets,savename):
self.valid_sets = valid_sets
self.epoch=[]
self.history={}
self.savename=savename
def on_train_begin(self,logs=None):
self.epoch=[]
self.history={}
def on_epoch_end(self, epoch, logs=None):
logs=logs or {}
self.epoch.append(epoch)
print("validation")
for i,j in logs.items():
self.history.setdefault(i,[]).append(j)
for valid_set in self.valid_sets:
valid,val_name=valid_set
#valid.reset()
#gen=valid.next()
#tar_set=[]
#pre_set=[]
atar_set=[]
apre_set=[]
X,Y=valid
#X=X[0]
"""for j in range(valid.totalnum()):
data,target=next(gen)
#print(target)
#tar_set=np.append(tar_set,target[:,0])
#pre_set=np.append(pre_set,self.model.predict(data,verbose=0)[:,0])
try:atar_set.extend(target[:,0])
except:print(np.array(target).shape)
apre_set.extend(self.model.predict(data,verbose=0)[:,0])
valid.reset()"""
#tar_set=np.array(tar_set)
#pre_set=np.array(pre_set)
atar_set=np.array(Y)[:,0]
apre_set=np.array(self.model.predict(X,verbose=0)[:,0])
#print(valid.totalnum(),valid.batch_size)
#print("############")
#print(tar_set)
#print("AAAAAAAAAAAAAAAAAAAA")
#print(atar_set)
auc_val=roc_auc_score(atar_set,apre_set)
results=self.model.evaluate(X,Y)
print(results,auc_val)
self.history.setdefault(val_name+"_auc",[]).append(auc_val)
for i,result in enumerate(results):
if(i==0):
name=val_name+"_loss"
else:
name=val_name+"_"+self.model.metrics[i-1][:3]
self.history.setdefault(name,[]).append(result)
f=open(self.savename+'/history','w')
f.write(str(self.history))
f.close()
class wkiter(object):
def __init__(self,data_path,data_names=['data'],label_names=['softmax_label'],batch_size=100,begin=0.0,end=1.0,rat=0.7,endcut=1,arnum=16,maxx=0.4,maxy=0.4,istrain=0, varbs=0,rc="rc",onehot=0,channel=64,order=1,eta=0.,etabin=2.4,pt=None,ptmin=0.,ptmax=2.,unscale=0):
self.eta=eta
self.pt=pt
self.ptmin=ptmin
self.ptmax=ptmax
self.etabin=etabin
self.channel=channel
self.istrain=istrain
self.unscale=unscale
#if(batch_size<100):
self.rand=0.5
# print("batch_size is small it might cause error")
self.count=0
self.rc=rc
self.onehot=onehot
self.order=1
#self.file=rt.TFile(data_path,'read')
dataname1=data_path[0]
dataname2=data_path[1]
self.qfile=rt.TFile(dataname1,'read')
self.gfile=rt.TFile(dataname2,'read')
print(dataname2)
self.gjet=self.gfile.Get("jetAnalyser")
self.gEntries=self.gjet.GetEntriesFast()
if(begin>1):
self.gBegin=int(begin)
else:
self.gBegin=int(begin*self.gEntries)
if(end>1):
self.gEnd=int(end)
else:
self.gEnd=int(self.gEntries*end)
self.a=self.gBegin
self.qjet=self.qfile.Get("jetAnalyser")
self.qEntries=self.qjet.GetEntriesFast()
if(begin>1):
self.qBegin=int(begin)
else:
self.qBegin=int(begin*self.qEntries)
if(end>1):
self.qEnd=int(end)
else:
self.qEnd=int(self.qEntries*end)
self.b=self.qBegin
self.ratt=rat
self.rat=sorted([1-rat,rat])
self.batch_size = batch_size
if(varbs==0):
self._provide_data = zip(data_names, [(self.batch_size, 3, 33, 33)])
else:
data_names=['images','variables']
self._provide_data = zip(data_names, [(self.batch_size, 3, 33, 33),(self.batch_size,5)])
self.varbs=varbs
self._provide_label = zip(label_names, [(self.batch_size,)])
self.arnum=arnum
self.maxx=maxx
self.maxy=maxy
self.endfile=0
self.endcut=endcut
qjetset=[]
gjetset=[]
qrnnset=[]
grnnset=[]
qptset=[]
gptset=[]
qetaset=[]
getaset=[]
qchadmultset=[]
gchadmultset=[]
qnhadmultset=[]
gnhadmultset=[]
qelectronmultset=[]
gelectronmultset=[]
qmuonmultset=[]
gmuonmultset=[]
qphotonmultset=[]
gphotonmultset=[]
qcmultset=[]
gcmultset=[]
qnmultset=[]
gnmultset=[]
qptdset=[]
gptdset=[]
qmajorset=[]
gmajorset=[]
qminorset=[]
gminorset=[]
for i in range(self.gEntries):
if(self.a>=self.gEnd):
self.a=self.gBegin
break
#if((self.a-self.gBegin)%int((self.gEnd-self.gBegin)/10)==0):print('.')
self.gjet.GetEntry(self.a)
##label q=1 g=0
self.a+=1
if(self.eta>abs(self.gjet.eta) or self.eta+self.etabin<abs(self.gjet.eta)):
continue
if(self.pt!=None):
if(self.pt*self.ptmin>self.gjet.pt or self.pt*self.ptmax<self.gjet.pt):
continue
gptset.append(self.gjet.pt)
getaset.append(self.gjet.eta)
gchadmultset.append(self.gjet.chad_mult)
gnhadmultset.append(self.gjet.nhad_mult)
gelectronmultset.append(self.gjet.electron_mult)
gmuonmultset.append(self.gjet.muon_mult)
gphotonmultset.append(self.gjet.photon_mult)
gcmultset.append(self.gjet.chad_mult+self.gjet.electron_mult+self.gjet.muon_mult)
gnmultset.append(self.gjet.nhad_mult+self.gjet.photon_mult)
gptdset.append(self.gjet.ptd)
gmajorset.append(self.gjet.major_axis)
gminorset.append(self.gjet.minor_axis)
if("c" in self.rc):
maxchadpt=1.*max(self.gjet.image_chad_pt_33)
maxnhadpt=1.*max(self.gjet.image_nhad_pt_33)
maxelecpt=1.*max(self.gjet.image_electron_pt_33)
maxmuonpt=1.*max(self.gjet.image_muon_pt_33)
maxphotonpt=1.*max(self.gjet.image_photon_pt_33)
maxchadmult=1.*max(self.gjet.image_chad_mult_33)
maxnhadmult=1.*max(self.gjet.image_nhad_mult_33)
maxelecmult=1.*max(self.gjet.image_electron_mult_33)
maxmuonmult=1.*max(self.gjet.image_muon_mult_33)
maxphotonmult=1.*max(self.gjet.image_photon_mult_33)
if(self.unscale==1 or maxchadpt==0):maxchadpt=1.
if(self.unscale==1 or maxnhadpt==0):maxnhadpt=1.
if(self.unscale==1 or maxelecpt==0):maxelecpt=1.
if(self.unscale==1 or maxmuonpt==0):maxmuonpt=1.
if(self.unscale==1 or maxphotonpt==0):maxphotonpt=1.
if(self.unscale==1 or maxchadmult==0):maxchadmult=1.
if(self.unscale==1 or maxnhadmult==0):maxnhadmult=1.
if(self.unscale==1 or maxelecmult==0):maxelecmult=1.
if(self.unscale==1 or maxmuonmult==0):maxmuonmult=1.
if(self.unscale==1 or maxphotonmult==0):maxphotonmult=1.
gjetset.append([(np.array(self.gjet.image_chad_pt_33)/maxchadpt).reshape(2*arnum+1,2*arnum+1),(np.array(self.gjet.image_nhad_pt_33)/maxnhadpt).reshape(2*arnum+1,2*arnum+1),(np.array(self.gjet.image_electron_pt_33)/maxelecpt).reshape(2*arnum+1,2*arnum+1),(np.array(self.gjet.image_muon_pt_33)/maxmuonpt).reshape(2*arnum+1,2*arnum+1),(np.array(self.gjet.image_photon_pt_33)/maxphotonpt).reshape(2*arnum+1,2*arnum+1),(np.array(self.gjet.image_chad_mult_33)/maxchadmult).reshape(2*arnum+1,2*arnum+1),(np.array(self.gjet.image_nhad_mult_33)/maxnhadmult).reshape(2*arnum+1,2*arnum+1),(np.array(self.gjet.image_electron_mult_33)/maxelecmult).reshape(2*arnum+1,2*arnum+1),(np.array(self.gjet.image_muon_mult_33)/maxmuonmult).reshape(2*arnum+1,2*arnum+1),(np.array(self.gjet.image_photon_mult_33)/maxphotonmult).reshape(2*arnum+1,2*arnum+1)])
if("r" in self.rc):
dau_pt=self.gjet.dau_pt
dau_deta=self.gjet.dau_deta
dau_dphi=self.gjet.dau_dphi
dau_charge=self.gjet.dau_charge
dau_pid=self.gjet.dau_pid
dau_is_e=np.zeros(len(dau_pid))
dau_is_mu=np.zeros(len(dau_pid))
dau_is_r=np.zeros(len(dau_pid))
dau_is_chad=np.zeros(len(dau_pid))
dau_is_nhad=np.zeros(len(dau_pid))
for t in range(len(dau_pid)):
if(abs(dau_pid[t])==11):dau_is_e[t]=1.
elif(abs(dau_pid[t])==13):dau_is_mu[t]=1.
elif(abs(dau_pid[t])==22):dau_is_r[t]=1.
elif(dau_charge[t]==0):dau_is_nhad[t]=1.
else:dau_is_chad[t]=1.
dausort=sorted(range(len(dau_pt)),key=lambda k: dau_pt[k],reverse=True)
if(self.order):
maxdaupt=1.*max(dau_pt)
maxdaudeta=1.*max(dau_deta)
maxdaudphi=1.*max(dau_dphi)
maxdaucharge=1.*max(dau_charge)
maxdauc=1.*max(dau_is_chad)
maxdaun=1.*max(dau_is_nhad)
maxdaue=1.*max(dau_is_e)
maxdaum=1.*max(dau_is_mu)
maxdaup=1.*max(dau_is_r)
if(self.unscale==1 or maxdaupt==0):maxdaupt=1.
if(self.unscale==1 or maxdaudeta==0):maxdaudeta=1.
if(self.unscale==1 or maxdaudphi==0):maxdaudphi=1.
if(self.unscale==1 or maxdaucharge==0):maxdaucharge=1.
if(self.unscale==1 or maxdauc==0):maxdauc=1.
if(self.unscale==1 or maxdaun==0):maxdaun=1.
if(self.unscale==1 or maxdaue==0):maxdaue=1.
if(self.unscale==1 or maxdaum==0):maxdaum=1.
if(self.unscale==1 or maxdaup==0):maxdaup=1.
grnnset.append([[dau_pt[dausort[i]]/maxdaupt, dau_deta[dausort[i]]/maxdaudeta, dau_dphi[dausort[i]]/maxdaudphi, dau_charge[dausort[i]]/maxdaucharge, dau_is_e[dausort[i]]/maxdaue, dau_is_mu[dausort[i]]/maxdaum, dau_is_r[dausort[i]]/maxdaup, dau_is_chad[dausort[i]]/maxdauc, dau_is_nhad[dausort[i]]/maxdaun] if len(dau_pt)>i else [0.,0.,0.,0.,0.,0.,0.,0.,0.] for i in range(self.channel)])
self.gjetset=np.array(gjetset)
del gjetset
self.grnnset=np.array(grnnset)
del grnnset
self.gptset=np.array(gptset)
del gptset
self.getaset=np.array(getaset)
del getaset
self.gptdset=np.array(gptdset)
del gptdset
self.gchadmultset=np.array(gchadmultset)
del gchadmultset
self.gnhadmultset=np.array(gnhadmultset)
del gnhadmultset
self.gcmultset=np.array(gcmultset)
del gcmultset
self.gnmultset=np.array(gnmultset)
del gnmultset
self.gelectronmultset=np.array(gelectronmultset)
del gelectronmultset
self.gmuonmultset=np.array(gmuonmultset)
del gmuonmultset
self.gphotonmultset=np.array(gphotonmultset)
del gphotonmultset
self.gmajorset=np.array(gmajorset)
del gmajorset
self.gminorset=np.array(gminorset)
del gminorset
for i in range(self.qEntries):
if(self.b>=self.qEnd):
self.b=self.qBegin
break
#if((self.b-self.qBegin)%int((self.qEnd-self.qBegin)/10)==0):print(',')
self.qjet.GetEntry(self.b)
##label q=1 g=0
self.b+=1
if(self.eta>abs(self.qjet.eta) or self.eta+self.etabin<abs(self.qjet.eta)):
continue
if(self.pt!=None):
if(self.pt*self.ptmin>self.qjet.pt or self.pt*self.ptmax<self.qjet.pt):
continue
qptset.append(self.qjet.pt)
qetaset.append(self.qjet.eta)
qchadmultset.append(self.qjet.chad_mult)
qnhadmultset.append(self.qjet.nhad_mult)
qelectronmultset.append(self.qjet.electron_mult)
qmuonmultset.append(self.qjet.muon_mult)
qphotonmultset.append(self.qjet.photon_mult)
qcmultset.append(self.qjet.chad_mult+self.qjet.electron_mult+self.qjet.muon_mult)
qnmultset.append(self.qjet.nhad_mult+self.qjet.photon_mult)
qptdset.append(self.qjet.ptd)
qmajorset.append(self.qjet.major_axis)
qminorset.append(self.qjet.minor_axis)
if("c" in self.rc):
maxchadpt=1.*max(self.qjet.image_chad_pt_33)
maxnhadpt=1.*max(self.qjet.image_nhad_pt_33)
maxelecpt=1.*max(self.qjet.image_electron_pt_33)
maxmuonpt=1.*max(self.qjet.image_muon_pt_33)
maxphotonpt=1.*max(self.qjet.image_photon_pt_33)
maxchadmult=1.*max(self.qjet.image_chad_mult_33)
maxnhadmult=1.*max(self.qjet.image_nhad_mult_33)
maxelecmult=1.*max(self.qjet.image_electron_mult_33)
maxmuonmult=1.*max(self.qjet.image_muon_mult_33)
maxphotonmult=1.*max(self.qjet.image_photon_mult_33)
if(self.unscale==1 or maxchadpt==0):maxchadpt=1.
if(self.unscale==1 or maxnhadpt==0):maxnhadpt=1.
if(self.unscale==1 or maxelecpt==0):maxelecpt=1.
if(self.unscale==1 or maxmuonpt==0):maxmuonpt=1.
if(self.unscale==1 or maxphotonpt==0):maxphotonpt=1.
if(self.unscale==1 or maxchadmult==0):maxchadmult=1.
if(self.unscale==1 or maxnhadmult==0):maxnhadmult=1.
if(self.unscale==1 or maxelecmult==0):maxelecmult=1.
if(self.unscale==1 or maxmuonmult==0):maxmuonmult=1.
if(self.unscale==1 or maxphotonmult==0):maxphotonmult=1.
qjetset.append([(np.array(self.qjet.image_chad_pt_33)/maxchadpt).reshape(2*arnum+1,2*arnum+1),(np.array(self.qjet.image_nhad_pt_33)/maxnhadpt).reshape(2*arnum+1,2*arnum+1),(np.array(self.qjet.image_electron_pt_33)/maxelecpt).reshape(2*arnum+1,2*arnum+1),(np.array(self.qjet.image_muon_pt_33)/maxmuonpt).reshape(2*arnum+1,2*arnum+1),(np.array(self.qjet.image_photon_pt_33)/maxphotonpt).reshape(2*arnum+1,2*arnum+1),(np.array(self.qjet.image_chad_mult_33)/maxchadmult).reshape(2*arnum+1,2*arnum+1),(np.array(self.qjet.image_nhad_mult_33)/maxnhadmult).reshape(2*arnum+1,2*arnum+1),(np.array(self.qjet.image_electron_mult_33)/maxelecmult).reshape(2*arnum+1,2*arnum+1),(np.array(self.qjet.image_muon_mult_33)/maxmuonmult).reshape(2*arnum+1,2*arnum+1),(np.array(self.qjet.image_photon_mult_33)/maxphotonmult).reshape(2*arnum+1,2*arnum+1)])
if("r" in self.rc):
dau_pt=self.qjet.dau_pt
dau_deta=self.qjet.dau_deta
dau_dphi=self.qjet.dau_dphi
dau_charge=self.qjet.dau_charge
dau_pid=self.qjet.dau_pid
dau_is_e=np.zeros(len(dau_pid))
dau_is_mu=np.zeros(len(dau_pid))
dau_is_r=np.zeros(len(dau_pid))
dau_is_chad=np.zeros(len(dau_pid))
dau_is_nhad=np.zeros(len(dau_pid))
for t in range(len(dau_pid)):
if(abs(dau_pid[t])==11):dau_is_e[t]=1.
elif(abs(dau_pid[t])==13):dau_is_mu[t]=1.
elif(abs(dau_pid[t])==22):dau_is_r[t]=1.
elif(dau_charge[t]==0):dau_is_nhad[t]=1.
else:dau_is_chad[t]=1.
dausort=sorted(range(len(dau_pt)),key=lambda k: dau_pt[k],reverse=True)
#dauset.append([[dau_pt[dausort[i]], dau_deta[dausort[i]], dau_dphi[dausort[i]], dau_charge[dausort[i]]] if len(dau_pt)>i else [0.,0.,0.,0.] for i in range(20)])
if(self.order):
maxdaupt=1.*max(dau_pt)
maxdaudeta=1.*max(dau_deta)
maxdaudphi=1.*max(dau_dphi)
maxdaucharge=1.*max(dau_charge)
maxdauc=1.*max(dau_is_chad)
maxdaun=1.*max(dau_is_nhad)
maxdaue=1.*max(dau_is_e)
maxdaum=1.*max(dau_is_mu)
maxdaup=1.*max(dau_is_r)
if(self.unscale==1 or maxdaupt==0):maxdaupt=1.
if(self.unscale==1 or maxdaudeta==0):maxdaudeta=1.
if(self.unscale==1 or maxdaudphi==0):maxdaudphi=1.
if(self.unscale==1 or maxdaucharge==0):maxdaucharge=1.
if(self.unscale==1 or maxdauc==0):maxdauc=1.
if(self.unscale==1 or maxdaun==0):maxdaun=1.
if(self.unscale==1 or maxdaue==0):maxdaue=1.
if(self.unscale==1 or maxdaum==0):maxdaum=1.
if(self.unscale==1 or maxdaup==0):maxdaup=1.
qrnnset.append([[dau_pt[dausort[i]]/maxdaupt, dau_deta[dausort[i]]/maxdaudeta, dau_dphi[dausort[i]]/maxdaudphi, dau_charge[dausort[i]]/maxdaucharge, dau_is_e[dausort[i]]/maxdaue, dau_is_mu[dausort[i]]/maxdaum, dau_is_r[dausort[i]]/maxdaup, dau_is_chad[dausort[i]]/maxdauc, dau_is_nhad[dausort[i]]/maxdaun] if len(dau_pt)>i else [0.,0.,0.,0.,0.,0.,0.,0.,0.] for i in range(self.channel)])
self.qjetset=np.array(qjetset)
del qjetset
self.qrnnset=np.array(qrnnset)
del qrnnset
self.qptset=np.array(qptset)
del qptset
self.qetaset=np.array(qetaset)
del qetaset
self.qptdset=np.array(qptdset)
del qptdset
self.qchadmultset=np.array(qchadmultset)
del qchadmultset
self.qnhadmultset=np.array(qnhadmultset)
del qnhadmultset
self.qcmultset=np.array(qcmultset)
del qcmultset
self.qnmultset=np.array(qnmultset)
del qnmultset
self.qelectronmultset=np.array(qelectronmultset)
del qelectronmultset
self.qmuonmultset=np.array(qmuonmultset)
del qmuonmultset
self.qphotonmultset=np.array(qphotonmultset)
del qphotonmultset
self.qmajorset=np.array(qmajorset)
del qmajorset
self.qminorset=np.array(qminorset)
del qminorset
"""if("r" in self.rc):
for c in range(channel):
for i in range(3):
#std=np.std(abs(np.append(self.qjetset[:,c,i],self.gjetset[:,c,i])))
#mean=np.mean(abs(np.append(self.qjetset[:,c,i],self.gjetset[:,c,i])))
self.qjetset[:,c,i]=(self.qjetset[:,c,i])#/mean
self.gjetset[:,c,i]=(self.gjetset[:,c,i])#/mean
"""
self.reset()
#print("length ",len(self.gjetset),len(self.qjetset))
def __iter__(self):
return self
def reset(self):
self.rand=0.5
self.gjet.GetEntry(self.gBegin)
self.qjet.GetEntry(self.qBegin)
self.a=self.gBegin
self.b=self.qBegin
self.endfile = 0
self.count=0
def __next__(self):
return self.next()
@property
def provide_data(self):
return self._provide_data
@property
def provide_label(self):
return self._provide_label
def close(self):
self.file.Close()
def sampleallnum(self):
return self.Entries
def trainnum(self):
return self.End-self.Begin
def totalnum(self):
return int(math.ceil(1.*(self.gEnd-self.gBegin+self.qEnd-self.qBegin)/(self.batch_size*1.00)))
def next(self):
while self.endfile==0:
self.count+=1
arnum=self.arnum
jetset=[]
variables=[]
labels=[]
for i in range(self.batch_size):
if(random.random()<0.5):
if(self.a-self.gBegin>=len(self.gjetset)):
self.a=self.gBegin
self.endfile=1
break
labels.append([0,1])
jetset.append(self.gjetset[self.a-self.gBegin])
self.a+=1
else:
if(self.b-self.qBegin>=len(self.qjetset)):
self.b=self.qBegin
self.endfile=1
break
labels.append([1,0])
jetset.append(self.qjetset[self.b-self.qBegin])
self.b+=1
data=[]
data.append(np.array(jetset))
label=np.array(labels)
#if(self.totalnum()<=self.count):
# if(self.istrain==1):print "\nreset\n"
# self.reset()
if(self.endfile==1):
#print "\nendd\n"
self.reset()
#print "\n",self.count,self.istrain,"\n"
yield data, label
#else:
#if(self.istrain==1):
# print "\n",datetime.datetime.now()
#raise StopIteration
| 2.0625 | 2 |
tools/opt.py | hmtrii/tirg | 0 | 17120 | <gh_stars>0
class Opt:
def __init__(self):
self.dataset = "fashion200k"
self.dataset_path = "./dataset/Fashion200k"
self.batch_size = 32
self.embed_dim = 512
self.hashing = False
self.retrieve_by_random = True | 2.078125 | 2 |
addons/iap_crm/models/crm_lead.py | SHIVJITH/Odoo_Machine_Test | 0 | 17121 | <filename>addons/iap_crm/models/crm_lead.py
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import fields, models
class Lead(models.Model):
_inherit = 'crm.lead'
reveal_id = fields.Char(string='Reveal ID', help="Technical ID of reveal request done by IAP.")
| 1.273438 | 1 |
vitrage/tests/unit/datasources/kubernetes/test_kubernetes_transformer.py | openstack/vitrage | 89 | 17122 | <filename>vitrage/tests/unit/datasources/kubernetes/test_kubernetes_transformer.py
# Copyright 2018 - Nokia
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_log import log as logging
from testtools import matchers
from vitrage.common.constants import DatasourceAction
from vitrage.common.constants import DatasourceOpts as DSOpts
from vitrage.common.constants import DatasourceProperties as DSProps
from vitrage.common.constants import EntityCategory
from vitrage.common.constants import GraphAction
from vitrage.common.constants import UpdateMethod
from vitrage.common.constants import VertexProperties as VProps
from vitrage.datasources.kubernetes.properties import KUBERNETES_DATASOURCE
from vitrage.datasources.kubernetes.properties import KubernetesProperties \
as kubProp
from vitrage.datasources.kubernetes.transformer import KubernetesTransformer
from vitrage.datasources.nova.instance import NOVA_INSTANCE_DATASOURCE
from vitrage.datasources.nova.instance.transformer import InstanceTransformer
from vitrage.datasources import transformer_base as tbase
from vitrage.datasources.transformer_base import TransformerBase
from vitrage.tests import base
from vitrage.tests.mocks import mock_driver as mock_sync
from vitrage.tests.mocks import utils
LOG = logging.getLogger(__name__)
cluster_name = 'kubernetes'
class KubernetesTransformerTest(base.BaseTest):
OPTS = [
cfg.StrOpt(DSOpts.UPDATE_METHOD,
default=UpdateMethod.PULL),
cfg.StrOpt(DSOpts.CONFIG_FILE,
default=utils.get_resources_dir() +
'/kubernetes/kubernetes_config.yaml'),
]
# noinspection PyAttributeOutsideInit,PyPep8Naming
@classmethod
def setUpClass(cls):
super(KubernetesTransformerTest, cls).setUpClass()
cls.transformers = {}
cls.transformers[KUBERNETES_DATASOURCE] = KubernetesTransformer(
cls.transformers)
cls.transformers[NOVA_INSTANCE_DATASOURCE] = \
InstanceTransformer(cls.transformers)
def setUp(self):
super(KubernetesTransformerTest, self).setUp()
self.conf_reregister_opts(self.OPTS, group=KUBERNETES_DATASOURCE)
def test_snapshot_event_transform(self):
LOG.debug('Test tactual transform action for '
'snapshot and snapshot init events')
k8s_spec_list = \
mock_sync.simple_k8s_nodes_generators(nodes_num=2,
snapshot_events=1)
nodes_events = mock_sync.generate_random_events_list(k8s_spec_list)
for event in nodes_events:
k8s_wrapper = self.transformers[KUBERNETES_DATASOURCE].transform(
event)
# Test assertions
self.assertEqual(cluster_name, k8s_wrapper.vertex[VProps.NAME])
n_length = str(len(k8s_wrapper.neighbors))
self.assertThat(n_length, matchers.HasLength(1),
'Cluster vertex has one neighbor')
self._validate_cluster_neighbors(k8s_wrapper.neighbors, event)
datasource_action = event[DSProps.DATASOURCE_ACTION]
if datasource_action == DatasourceAction.INIT_SNAPSHOT:
self.assertEqual(GraphAction.CREATE_ENTITY, k8s_wrapper.action)
elif datasource_action == DatasourceAction.SNAPSHOT:
self.assertEqual(GraphAction.UPDATE_ENTITY, k8s_wrapper.action)
def test_build_cluster_key(self):
LOG.debug('Test build cluster key')
# Test setup
expected_key = 'RESOURCE:kubernetes:kubernetes'
instance_transformer = self.transformers[NOVA_INSTANCE_DATASOURCE]
# Test action
key_fields = instance_transformer._key_values(
KUBERNETES_DATASOURCE,
cluster_name)
# Test assertions
observed_key = tbase.build_key(key_fields)
self.assertEqual(expected_key, observed_key)
def _validate_cluster_neighbors(self, neighbor, event):
# Create expected neighbor
time = event[DSProps.SAMPLE_DATE]
external_id = event['resources'][0][kubProp.EXTERNALID]
properties = {
VProps.ID: external_id,
VProps.VITRAGE_TYPE: NOVA_INSTANCE_DATASOURCE,
VProps.VITRAGE_CATEGORY: EntityCategory.RESOURCE,
VProps.VITRAGE_SAMPLE_TIMESTAMP: time
}
nova_instance_tran = self.transformers[NOVA_INSTANCE_DATASOURCE]
expected_neighbor = \
nova_instance_tran.create_neighbor_placeholder_vertex(**properties)
self.assertEqual(expected_neighbor, neighbor[0].vertex)
# Validate neighbor edge
edge = neighbor[0].edge
entity_key = \
self.transformers[KUBERNETES_DATASOURCE]._create_entity_key(event)
entity_uuid = \
TransformerBase.uuid_from_deprecated_vitrage_id(entity_key)
self.assertEqual(edge.source_id, entity_uuid)
self.assertEqual(edge.target_id, neighbor[0].vertex.vertex_id)
def test_create_entity_key(self):
LOG.debug('Test get key from kubernetes transformer')
# Test setup
spec_list = mock_sync.simple_k8s_nodes_generators(nodes_num=1,
snapshot_events=1)
nodes_events = mock_sync.generate_random_events_list(spec_list)
kubernetes_transformer = self.transformers[KUBERNETES_DATASOURCE]
for event in nodes_events:
# Test action
observed_key = kubernetes_transformer._create_entity_key(event)
# Test assertions
observed_key_fields = observed_key.split(
TransformerBase.KEY_SEPARATOR)
self.assertEqual(EntityCategory.RESOURCE, observed_key_fields[0])
self.assertEqual(
KUBERNETES_DATASOURCE,
observed_key_fields[1]
)
key_values = kubernetes_transformer._key_values(
KUBERNETES_DATASOURCE,
cluster_name)
expected_key = tbase.build_key(key_values)
self.assertEqual(expected_key, observed_key)
| 1.398438 | 1 |
two_children.py | daniel2019-max/HackerRank-preparation-month | 0 | 17123 | <reponame>daniel2019-max/HackerRank-preparation-month<filename>two_children.py<gh_stars>0
# Two children, Lily and Ron, want to share a chocolate bar. Each of the squares has an integer on it.
# Lily decides to share a contiguous segment of the bar selected such that:
# The length of the segment matches Ron's birth month, and,
# The sum of the integers on the squares is equal to his birth day.
# Determine how many ways she can divide the chocolate.
# int s[n]: the numbers on each of the squares of chocolate
# int d: Ron's birth day
# int m: Ron's birth month
# Two children
def birthday(s, d, m):
# Write your code here
numberDiveded = 0
numberIteration = len(s)-(m-1)
if(numberIteration == 0):
numberIteration = 1
for k in range(0, numberIteration):
newArray = s[k:k+m]
sumArray = sum(newArray)
if sumArray == d:
numberDiveded += 1
return numberDiveded
s = '2 5 1 3 4 4 3 5 1 1 2 1 4 1 3 3 4 2 1'
caracteres = '18 7'
array = list(map(int, s.split()))
caracteresList = list(map(int, caracteres.split()))
print(birthday(array, caracteresList[0], caracteresList[1]))
| 3.984375 | 4 |
sdk/python/pulumi_sonarqube/get_users.py | jshield/pulumi-sonarqube | 0 | 17124 | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
__all__ = [
'GetUsersResult',
'AwaitableGetUsersResult',
'get_users',
'get_users_output',
]
@pulumi.output_type
class GetUsersResult:
"""
A collection of values returned by getUsers.
"""
def __init__(__self__, email=None, id=None, is_local=None, login_name=None, name=None):
if email and not isinstance(email, str):
raise TypeError("Expected argument 'email' to be a str")
pulumi.set(__self__, "email", email)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if is_local and not isinstance(is_local, bool):
raise TypeError("Expected argument 'is_local' to be a bool")
pulumi.set(__self__, "is_local", is_local)
if login_name and not isinstance(login_name, str):
raise TypeError("Expected argument 'login_name' to be a str")
pulumi.set(__self__, "login_name", login_name)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
@property
@pulumi.getter
def email(self) -> str:
return pulumi.get(self, "email")
@property
@pulumi.getter
def id(self) -> str:
"""
The provider-assigned unique ID for this managed resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="isLocal")
def is_local(self) -> bool:
return pulumi.get(self, "is_local")
@property
@pulumi.getter(name="loginName")
def login_name(self) -> str:
return pulumi.get(self, "login_name")
@property
@pulumi.getter
def name(self) -> str:
return pulumi.get(self, "name")
class AwaitableGetUsersResult(GetUsersResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetUsersResult(
email=self.email,
id=self.id,
is_local=self.is_local,
login_name=self.login_name,
name=self.name)
def get_users(login_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetUsersResult:
"""
Use this data source to access information about an existing resource.
"""
__args__ = dict()
__args__['loginName'] = login_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('sonarqube:index/getUsers:getUsers', __args__, opts=opts, typ=GetUsersResult).value
return AwaitableGetUsersResult(
email=__ret__.email,
id=__ret__.id,
is_local=__ret__.is_local,
login_name=__ret__.login_name,
name=__ret__.name)
@_utilities.lift_output_func(get_users)
def get_users_output(login_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetUsersResult]:
"""
Use this data source to access information about an existing resource.
"""
...
| 1.992188 | 2 |
rl/valuefunction/FeatureExtractor.py | nickswalker/counterpoint-reinforcement-learning | 1 | 17125 | from abc import abstractmethod
from typing import List
from rl.action import Action
from rl.state import State
class StateActionFeatureExtractor:
@abstractmethod
def num_features(self) -> int:
pass
@abstractmethod
def extract(self, state: State, action: Action) -> List[float]:
pass
class StateFeatureExtractor:
@abstractmethod
def num_features(self) -> int:
pass
@abstractmethod
def extract(self, state: State) -> List[float]:
pass
| 3.15625 | 3 |
yxf_utils/jsonx.py | yanyaming/yxf_utils | 0 | 17126 | <reponame>yanyaming/yxf_utils<gh_stars>0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
通用json处理接口
"""
import json
# 输入单引号为边界的类json字符串(内部可能还有双引号),返回单引号为边界的python字典or列表对象。
def singleQuoteJsonStr_to_PythonObj(strr):
jsonObj = eval(strr) # 不能用内置函数解析。只能模拟执行。
return jsonObj # dict or list
# 输入完全正规的json字符串(键-值边界为双引号),返回单引号为边界的python字典or列表对象。
def jsonStr_to_PythonObj(strr):
jsonObj = json.loads(strr)
return jsonObj # dict or list
# 输入python列表或字典(边界为单引号的类json对象),返回边界为双引号的json字符串且双引号加斜杠转义。
def pythonObj_to_jsonStr(obj):
jsonStr = json.dumps(obj)
return jsonStr # str
| 2.53125 | 3 |
ggshield/scan/scannable_errors.py | rgajason/gg-shield | 0 | 17127 | <reponame>rgajason/gg-shield
from ast import literal_eval
from typing import Dict, List
import click
from pygitguardian.models import Detail
from ggshield.text_utils import STYLE, display_error, format_text, pluralize
def handle_scan_error(detail: Detail, chunk: List[Dict[str, str]]) -> None:
if detail.status_code == 401:
raise click.UsageError(detail.detail)
display_error("Error scanning. Results may be incomplete.")
try:
details = literal_eval(detail.detail)
if isinstance(details, list) and details:
display_error(
f"Add the following {pluralize('file', len(details))}"
" to your paths-ignore:"
)
for i, inner_detail in enumerate(details):
if inner_detail:
click.echo(
f"- {format_text(chunk[i]['filename'], STYLE['filename'])}:"
f" {str(inner_detail)}",
err=True,
)
return
except Exception:
click.echo(f"Error {str(detail)}", err=True)
| 2.3125 | 2 |
Packs/Base/Scripts/DBotPreprocessTextData/DBotPreprocessTextData.py | matan-xmcyber/content | 1 | 17128 | <gh_stars>1-10
# pylint: disable=no-member
from CommonServerUserPython import *
from CommonServerPython import *
from sklearn.feature_extraction.text import TfidfVectorizer
import pickle
import uuid
import spacy
import string
from html.parser import HTMLParser
from html import unescape
from re import compile as _Re
import pandas as pd
def hash_word(word, hash_seed):
return str(hash_djb2(word, int(hash_seed)))
def create_text_result(original_text, tokenized_text, original_words_to_tokens, hash_seed=None):
text_result = {
'originalText': original_text,
'tokenizedText': tokenized_text,
'originalWordsToTokens': original_words_to_tokens,
}
if hash_seed is not None:
hash_tokenized_text = ' '.join(hash_word(word, hash_seed) for word in tokenized_text.split())
words_to_hashed_tokens = {word: [hash_word(t, hash_seed) for t in tokens_list] for word, tokens_list in
original_words_to_tokens.items()}
text_result['hashedTokenizedText'] = hash_tokenized_text
text_result['wordsToHashedTokens'] = words_to_hashed_tokens
return text_result
class Tokenizer:
def __init__(self, clean_html=True, remove_new_lines=True, hash_seed=None, remove_non_english=True,
remove_stop_words=True, remove_punct=True, remove_non_alpha=True, replace_emails=True,
replace_numbers=True, lemma=True, replace_urls=True, language='English',
tokenization_method='byWords'):
self.number_pattern = "NUMBER_PATTERN"
self.url_pattern = "URL_PATTERN"
self.email_pattern = "EMAIL_PATTERN"
self.reserved_tokens = set([self.number_pattern, self.url_pattern, self.email_pattern])
self.clean_html = clean_html
self.remove_new_lines = remove_new_lines
self.hash_seed = hash_seed
self.remove_non_english = remove_non_english
self.remove_stop_words = remove_stop_words
self.remove_punct = remove_punct
self.remove_non_alpha = remove_non_alpha
self.replace_emails = replace_emails
self.replace_urls = replace_urls
self.replace_numbers = replace_numbers
self.lemma = lemma
self.language = language
self.tokenization_method = tokenization_method
self.max_text_length = 10 ** 5
self.html_patterns = [
re.compile(r"(?is)<(script|style).*?>.*?(</\1>)"),
re.compile(r"(?s)<!--(.*?)-->[\n]?"),
re.compile(r"(?s)<.*?>"),
re.compile(r" "),
re.compile(r" +")
]
self.nlp = None
self.html_parser = HTMLParser()
self._unicode_chr_splitter = _Re('(?s)((?:[\ud800-\udbff][\udc00-\udfff])|.)').split
self.languages_to_model_names = {'English': 'en_core_web_sm',
'German': 'de_core_news_sm',
'French': 'fr_core_news_sm',
'Spanish': 'es_core_news_sm',
'Portuguese': 'pt_core_news_sm',
'Italian': 'it_core_news_sm',
'Dutch': 'nl_core_news_sm'
}
self.spacy_count = 0
self.spacy_reset_count = 500
def handle_long_text(self):
return '', ''
def map_indices_to_words(self, text):
original_text_indices_to_words = {}
word_start = 0
while word_start < len(text) and text[word_start].isspace():
word_start += 1
for word in text.split():
for char_idx, char in enumerate(word):
original_text_indices_to_words[word_start + char_idx] = word
# find beginning of next word
word_start += len(word)
while word_start < len(text) and text[word_start].isspace():
word_start += 1
return original_text_indices_to_words
def remove_line_breaks(self, text):
return text.replace("\r", " ").replace("\n", " ")
def remove_multiple_whitespaces(self, text):
return re.sub(r"\s+", " ", text).strip()
def clean_html_from_text(self, text):
cleaned = text
for pattern in self.html_patterns:
cleaned = pattern.sub(" ", cleaned)
return unescape(cleaned).strip()
def handle_tokenizaion_method(self, text):
language = self.language
if language in self.languages_to_model_names:
tokens_list, original_words_to_tokens = self.tokenize_text_spacy(text)
else:
tokens_list, original_words_to_tokens = self.tokenize_text_other(text)
tokenized_text = ' '.join(tokens_list).strip()
return tokenized_text, original_words_to_tokens
def tokenize_text_other(self, text):
tokens_list = []
tokenization_method = self.tokenization_method
if tokenization_method == 'byWords':
original_words_to_tokens = {}
for t in text.split():
token_without_punct = ''.join([c for c in t if c not in string.punctuation])
if len(token_without_punct) > 0:
tokens_list.append(token_without_punct)
original_words_to_tokens[token_without_punct] = t
elif tokenization_method == 'byLetters':
for t in text:
tokens_list += [chr for chr in self._unicode_chr_splitter(t) if chr and chr != ' ']
original_words_to_tokens = {c: t for c in tokens_list}
else:
return_error('Unsupported tokenization method: when language is "Other" ({})'.format(tokenization_method))
return tokens_list, original_words_to_tokens
def tokenize_text_spacy(self, text):
if self.nlp is None or self.spacy_count % self.spacy_reset_count == 0:
self.init_spacy_model(self.language)
doc = self.nlp(text) # type: ignore
self.spacy_count += 1
original_text_indices_to_words = self.map_indices_to_words(text)
tokens_list = []
original_words_to_tokens = {} # type: ignore
for word in doc:
if word.is_space:
continue
elif self.remove_stop_words and word.is_stop:
continue
elif self.remove_punct and word.is_punct:
continue
elif self.replace_emails and '@' in word.text:
tokens_list.append(self.email_pattern)
elif self.replace_urls and word.like_url:
tokens_list.append(self.url_pattern)
elif self.replace_numbers and (word.like_num or word.pos_ == 'NUM'):
tokens_list.append(self.number_pattern)
elif self.remove_non_alpha and not word.is_alpha:
continue
elif self.remove_non_english and word.text not in self.nlp.vocab: # type: ignore
continue
else:
if self.lemma and word.lemma_ != '-PRON-':
token_to_add = word.lemma_
else:
token_to_add = word.lower_
tokens_list.append(token_to_add)
original_word = original_text_indices_to_words[word.idx]
if original_word not in original_words_to_tokens:
original_words_to_tokens[original_word] = []
original_words_to_tokens[original_word].append(token_to_add)
return tokens_list, original_words_to_tokens
def init_spacy_model(self, language):
try:
self.nlp = spacy.load(self.languages_to_model_names[language],
disable=['tagger', 'parser', 'ner', 'textcat'])
except Exception:
return_error("The specified language is not supported in this docker. In order to pre-process text "
"using this language, it's required to change this docker. Please check at the documentation "
"or contact us for help.")
def word_tokenize(self, text):
if not isinstance(text, list):
text = [text]
result = []
for t in text:
original_text = t
if self.remove_new_lines:
t = self.remove_line_breaks(t)
if self.clean_html:
t = self.clean_html_from_text(t)
t = self.remove_multiple_whitespaces(t)
if len(t) < self.max_text_length:
tokenized_text, original_words_to_tokens = self.handle_tokenizaion_method(t)
else:
tokenized_text, original_words_to_tokens = self.handle_long_text()
text_result = create_text_result(original_text, tokenized_text, original_words_to_tokens,
hash_seed=self.hash_seed)
result.append(text_result)
if len(result) == 1:
result = result[0] # type: ignore
return result
# define global parsers
DBOT_TEXT_FIELD = 'dbot_text'
DBOT_PROCESSED_TEXT_FIELD = 'dbot_processed_text'
CONTEXT_KEY = 'DBotPreProcessTextData'
HTML_PATTERNS = [
re.compile(r"(?is)<(script|style).*?>.*?(</\1>)"),
re.compile(r"(?s)<!--(.*?)-->[\n]?"),
re.compile(r"(?s)<.*?>"),
re.compile(r" "),
re.compile(r" +")
]
html_parser = HTMLParser()
tokenizer = None
def read_file(input_data, input_type):
data = [] # type: ignore
if not input_data:
return data
if input_type.endswith("string"):
if 'b64' in input_type:
input_data = base64.b64decode(input_data)
file_content = input_data.decode("utf-8")
else:
file_content = input_data
else:
res = demisto.getFilePath(input_data)
if not res:
return_error("Entry {} not found".format(input_data))
file_path = res['path']
if input_type.startswith('json'):
with open(file_path, 'r') as f:
file_content = f.read()
if input_type.startswith('csv'):
return pd.read_csv(file_path).fillna('').to_dict(orient='records')
elif input_type.startswith('json'):
return json.loads(file_content)
elif input_type.startswith('pickle'):
return pd.read_pickle(file_path, compression=None)
else:
return_error("Unsupported file type %s" % input_type)
def concat_text_fields(data, target_field, text_fields):
for d in data:
text = ''
for fields in text_fields:
for field in fields.strip().split("|"):
field = field.strip()
if "." in field:
value = demisto.dt(d, field)
if type(value) is list and len(value) > 0:
value = value[0]
else:
value = d.get(field) or d.get(field.lower(), '')
if value and isinstance(value, str):
text += value
text += ' '
break
text = text.strip()
d[target_field] = text
return data
def clean_html(text):
cleaned = text
for pattern in HTML_PATTERNS:
cleaned = pattern.sub(" ", cleaned)
return unescape(cleaned).strip()
def remove_line_breaks(text):
return re.sub(r"\s+", " ", text.replace("\r", " ").replace("\n", " ")).strip()
def pre_process_batch(data, source_text_field, target_text_field, remove_html_tags, pre_process_type, hash_seed):
raw_text_data = [x[source_text_field] for x in data]
if remove_html_tags:
raw_text_data = [clean_html(x) for x in raw_text_data]
raw_text_data = [remove_line_breaks(x) for x in raw_text_data]
tokenized_text_data = []
for raw_text in raw_text_data:
tokenized_text = pre_process_single_text(raw_text, hash_seed, pre_process_type)
if hash_seed is None:
tokenized_text_data.append(tokenized_text['tokenizedText'])
else:
tokenized_text_data.append(tokenized_text['hashedTokenizedText'])
for d, tokenized_text in zip(data, tokenized_text_data):
d[target_text_field] = tokenized_text
return data
def pre_process_single_text(raw_text, hash_seed, pre_process_type):
pre_process_func = PRE_PROCESS_TYPES[pre_process_type]
tokenized_text = pre_process_func(raw_text, hash_seed)
return tokenized_text
def pre_process_tokenizer(text, seed):
global tokenizer
if tokenizer is None:
tokenizer = Tokenizer(tokenization_method=demisto.args()['tokenizationMethod'],
language=demisto.args()['language'], hash_seed=seed)
processed_text = tokenizer.word_tokenize(text)
return processed_text
def pre_process_none(text, seed):
original_text = text
tokenized_text = text
original_words_to_tokens = {x: x for x in text.split()}
return create_text_result(original_text, tokenized_text, original_words_to_tokens, seed)
PRE_PROCESS_TYPES = {
'none': pre_process_none,
'nlp': pre_process_tokenizer,
}
def remove_short_text(data, text_field, target_text_field, remove_short_threshold):
description = ""
before_count = len(data)
data = [x for x in data if len(x[text_field].split(" ")) > remove_short_threshold and len(x[target_text_field]) > 0]
after_count = len(data)
dropped_count = before_count - after_count
if dropped_count > 0:
description += "Dropped %d samples shorter than %d words" % (dropped_count, remove_short_threshold) + "\n"
return data, description
def get_tf_idf_similarity_arr(documents):
tfidf = TfidfVectorizer(stop_words="english", min_df=1).fit_transform(documents)
pairwise_similarity = tfidf * tfidf.T
return pairwise_similarity.toarray()
def find_duplicate_indices(texts, dedup_threshold):
similarity_arr = get_tf_idf_similarity_arr(texts)
indices_to_remove = []
for i in range(similarity_arr.shape[0]):
for j in range(similarity_arr.shape[1]):
if j > i and similarity_arr[i][j] > dedup_threshold:
indices_to_remove.append(j)
return set(indices_to_remove)
def remove_duplicate_by_indices(data, duplicate_indices):
description = ""
data = [x for i, x in enumerate(data) if i not in duplicate_indices]
dropped_count = len(duplicate_indices)
if dropped_count > 0:
description += "Dropped %d samples duplicate to other samples" % dropped_count + "\n"
return data, description
def whitelist_dict_fields(data, fields):
fields = [x.strip() for x in fields] + [x.strip().lower() for x in fields]
new_data = []
for d in data:
new_data.append({k: v for k, v in d.items() if k in fields})
return new_data
def main():
text_fields = demisto.args()['textFields'].split(",")
input = demisto.args().get('input')
input_type = demisto.args()['inputType']
hash_seed = int(demisto.args().get('hashSeed')) if demisto.args().get('hashSeed') else None
remove_short_threshold = int(demisto.args().get('removeShortTextThreshold', 1))
de_dup_threshold = float(demisto.args()['dedupThreshold'])
pre_process_type = demisto.args()['preProcessType']
remove_html_tags = demisto.args()['cleanHTML'] == 'true'
whitelist_fields = demisto.args().get('whitelistFields').split(",") if demisto.args().get(
'whitelistFields') else None
# if input is a snigle string (from DbotPredictPhishingWords):
if input_type == 'string':
res = pre_process_single_text(raw_text=demisto.args().get('input'),
hash_seed=hash_seed, pre_process_type=pre_process_type)
return res
output_original_text_fields = demisto.args().get('outputOriginalTextFields', 'false') == 'true'
description = ""
# read data
data = read_file(input, input_type)
# concat text fields
concat_text_fields(data, DBOT_TEXT_FIELD, text_fields)
description += "Read initial %d samples" % len(data) + "\n"
# clean text
if pre_process_type not in PRE_PROCESS_TYPES:
return_error('Pre-process type {} is not supported'.format(pre_process_type))
data = pre_process_batch(data, DBOT_TEXT_FIELD, DBOT_PROCESSED_TEXT_FIELD, remove_html_tags, pre_process_type,
hash_seed)
# remove short emails
data, desc = remove_short_text(data, DBOT_TEXT_FIELD, DBOT_PROCESSED_TEXT_FIELD, remove_short_threshold)
description += desc
# remove duplicates
try:
if 0 < de_dup_threshold < 1:
duplicate_indices = find_duplicate_indices([x[DBOT_PROCESSED_TEXT_FIELD] for x in data], de_dup_threshold)
data, desc = remove_duplicate_by_indices(data, duplicate_indices)
description += desc
except Exception:
pass
if output_original_text_fields:
for field in text_fields:
whitelist_fields += [x.strip() for x in field.split('|')]
if whitelist_fields and len(whitelist_fields) > 0:
whitelist_fields.append(DBOT_PROCESSED_TEXT_FIELD)
data = whitelist_dict_fields(data, whitelist_fields)
description += "Done processing: %d samples" % len(data) + "\n"
# output
file_name = str(uuid.uuid4())
output_format = demisto.args()['outputFormat']
if output_format == 'pickle':
data_encoded = pickle.dumps(data, protocol=2)
elif output_format == 'json':
data_encoded = json.dumps(data, default=str) # type: ignore
else:
return_error("Invalid output format: %s" % output_format)
entry = fileResult(file_name, data_encoded)
entry['Contents'] = data
entry['HumanReadable'] = description
entry['EntryContext'] = {
CONTEXT_KEY: {
'Filename': file_name,
'FileFormat': output_format,
'TextField': DBOT_TEXT_FIELD,
'TextFieldProcessed': DBOT_PROCESSED_TEXT_FIELD,
}
}
return entry
if __name__ in ['builtins', '__main__']:
entry = main()
demisto.results(entry)
| 2.5 | 2 |
punc_recover/tester/punc_tester.py | Z-yq/audioSamples.github.io | 1 | 17129 | import logging
import os
import tensorflow as tf
from punc_recover.models.punc_transformer import PuncTransformer
from punc_recover.tester.base_tester import BaseTester
from utils.text_featurizers import TextFeaturizer
class PuncTester(BaseTester):
""" Trainer for CTC Models """
def __init__(self,
config,
):
super(PuncTester, self).__init__(config['running_config'])
self.model_config = config['model_config']
self.vocab_featurizer = TextFeaturizer(config['punc_vocab'])
self.bd_featurizer = TextFeaturizer(config['punc_biaodian'])
self.opt_config = config['optimizer_config']
self.eval_metrics = {
"acc": tf.keras.metrics.Mean(),
}
def _eval_step(self, batch):
x, labels = batch
mask = self.creat_mask(x)
pred_bd = self.model.inference(x, mask)
acc=self.classes_acc(labels,pred_bd)
self.eval_metrics["acc"].update_state(acc)
def creat_mask(self, seq):
seq_pad = tf.cast(tf.equal(seq, 0), tf.float32)
return seq_pad[:, tf.newaxis, tf.newaxis, :] # (batch_size, 1, 1, seq_len)
def classes_acc(self, real, pred):
mask = tf.math.logical_not(tf.math.equal(real, 0))
accs = tf.keras.metrics.sparse_categorical_accuracy(real,pred)
mask = tf.cast(mask, dtype=accs.dtype)
accs *= mask
final=tf.reduce_sum(accs,-1)/tf.reduce_sum(mask,-1)
return tf.reduce_mean(final)
def compile(self, ):
self.model = PuncTransformer(num_layers=self.model_config['num_layers'],
d_model=self.model_config['d_model'],
enc_embedding_dim=self.model_config['enc_embedding_dim'],
num_heads=self.model_config['num_heads'],
dff=self.model_config['dff'],
input_vocab_size=self.vocab_featurizer.num_classes,
bd_vocab_size=self.bd_featurizer.num_classes,
pe_input=self.model_config['pe_input'],
rate=self.model_config['rate'])
self.model._build()
self.load_checkpoint()
logging.info('trainer resume failed')
self.model.summary(line_length=100)
def run(self, ):
self._eval_batches()
def load_checkpoint(self, ):
"""Load checkpoint."""
self.checkpoint_dir = os.path.join(self.running_config["outdir"], "checkpoints")
files = os.listdir(self.checkpoint_dir)
files.sort(key=lambda x: int(x.split('_')[-1].replace('.h5', '')))
self.model.load_weights(os.path.join(self.checkpoint_dir, files[-1]))
| 2.171875 | 2 |
moztrap/model/core/migrations/0003_auto__add_field_productversion_cc_version__add_field_product.py | mbeko/moztrap | 0 | 17130 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'ProductVersion.cc_version'
db.add_column('core_productversion', 'cc_version', self.gf('django.db.models.fields.IntegerField')(default=0), keep_default=False)
# Adding field 'Product.cc_version'
db.add_column('core_product', 'cc_version', self.gf('django.db.models.fields.IntegerField')(default=0), keep_default=False)
def backwards(self, orm):
# Deleting field 'ProductVersion.cc_version'
db.delete_column('core_productversion', 'cc_version')
# Deleting field 'Product.cc_version'
db.delete_column('core_product', 'cc_version')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'core.product': {
'Meta': {'ordering': "['name']", 'object_name': 'Product'},
'cc_version': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['auth.User']"}),
'created_on': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 2, 25, 0, 0, 59, 558711)'}),
'deleted_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['auth.User']"}),
'deleted_on': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'has_team': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['auth.User']"}),
'modified_on': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 2, 25, 0, 0, 59, 558895)'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'own_team': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.User']", 'symmetrical': 'False', 'blank': 'True'})
},
'core.productversion': {
'Meta': {'ordering': "['product', 'order']", 'object_name': 'ProductVersion'},
'cc_version': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['auth.User']"}),
'created_on': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 2, 25, 0, 0, 59, 559819)'}),
'deleted_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['auth.User']"}),
'deleted_on': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'environments': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'productversion'", 'symmetrical': 'False', 'to': "orm['environments.Environment']"}),
'has_team': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latest': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'modified_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['auth.User']"}),
'modified_on': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 2, 25, 0, 0, 59, 560004)'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'own_team': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.User']", 'symmetrical': 'False', 'blank': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'versions'", 'to': "orm['core.Product']"}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'core.user': {
'Meta': {'object_name': 'User', 'db_table': "'auth_user'", '_ormbases': ['auth.User'], 'proxy': 'True'}
},
'environments.category': {
'Meta': {'ordering': "['name']", 'object_name': 'Category'},
'cc_version': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['auth.User']"}),
'created_on': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 2, 25, 0, 0, 59, 562776)'}),
'deleted_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['auth.User']"}),
'deleted_on': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['auth.User']"}),
'modified_on': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 2, 25, 0, 0, 59, 562967)'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'environments.element': {
'Meta': {'ordering': "['name']", 'object_name': 'Element'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'elements'", 'to': "orm['environments.Category']"}),
'cc_version': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['auth.User']"}),
'created_on': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 2, 25, 0, 0, 59, 561818)'}),
'deleted_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['auth.User']"}),
'deleted_on': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['auth.User']"}),
'modified_on': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 2, 25, 0, 0, 59, 562003)'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'environments.environment': {
'Meta': {'object_name': 'Environment'},
'cc_version': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['auth.User']"}),
'created_on': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 2, 25, 0, 0, 59, 555711)'}),
'deleted_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['auth.User']"}),
'deleted_on': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'elements': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'environments'", 'symmetrical': 'False', 'to': "orm['environments.Element']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['auth.User']"}),
'modified_on': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 2, 25, 0, 0, 59, 555910)'}),
'profile': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'environments'", 'null': 'True', 'to': "orm['environments.Profile']"})
},
'environments.profile': {
'Meta': {'object_name': 'Profile'},
'cc_version': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['auth.User']"}),
'created_on': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 2, 25, 0, 0, 59, 557817)'}),
'deleted_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['auth.User']"}),
'deleted_on': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['auth.User']"}),
'modified_on': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 2, 25, 0, 0, 59, 558002)'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'})
}
}
complete_apps = ['core']
| 2.078125 | 2 |
tests/downloader_test.py | jkawamoto/roadie-gcp | 1 | 17131 | <gh_stars>1-10
#! /usr/bin/env python
#
# downloader_test.py
#
# Copyright (c) 2015-2016 <NAME>
#
# This software is released under the MIT License.
#
# http://opensource.org/licenses/mit-license.php
#
""" Test for downloader module.
"""
import logging
import shutil
import sys
import unittest
import os
from os import path
import downloader # pylint: disable=import-error
TARGET_FILE = "bin/entrypoint.sh"
SAMPLE_FILE = "https://raw.githubusercontent.com/jkawamoto/roadie-gcp/master/bin/entrypoint.sh"
ORIGINAL_FILE = path.normpath(
path.join(path.dirname(__file__), "..", TARGET_FILE))
ARCHIVE_ROOT = "./roadie-gcp-20160618"
ZIP_FILE = "https://github.com/jkawamoto/roadie-gcp/archive/v20160618.zip"
TAR_FILE = "https://github.com/jkawamoto/roadie-gcp/archive/v20160618.tar.gz"
class TestDownload(unittest.TestCase):
""" Test case for download module.
"""
def test_download(self):
""" Test downloading a file.
"""
downloader.download(SAMPLE_FILE)
basename = path.basename(SAMPLE_FILE)
self.evaluate_file(basename, ORIGINAL_FILE)
os.remove(basename)
def test_set_destination(self):
""" Test downloading a file to another directory.
"""
downloader.download(SAMPLE_FILE + ":/tmp/")
target = "/tmp/" + path.basename(SAMPLE_FILE)
self.evaluate_file(target, ORIGINAL_FILE)
os.remove(target)
def test_rename(self):
""" Test downloading a file and renaming it.
"""
target = "test.md"
downloader.download(SAMPLE_FILE + ":" + target)
self.evaluate_file(target, ORIGINAL_FILE)
os.remove(target)
def test_set_destination_and_rename(self):
""" Test downloading a file to a directory and renaming it.
"""
target = "/tmp/test.md"
downloader.download(SAMPLE_FILE + ":" + target)
self.evaluate_file(target, ORIGINAL_FILE)
os.remove(target)
def test_download_zip(self):
""" Test downloading a zip file.
"""
downloader.download(ZIP_FILE)
target = path.join(ARCHIVE_ROOT, TARGET_FILE)
self.evaluate_file(target, ORIGINAL_FILE)
shutil.rmtree(ARCHIVE_ROOT)
def test_set_destination_zip(self):
""" Test downloading a zip file to a specified path.
"""
downloader.download(ZIP_FILE + ":/tmp/")
target = path.join("/tmp/", ARCHIVE_ROOT, TARGET_FILE)
self.evaluate_file(target, ORIGINAL_FILE)
shutil.rmtree(path.join("/tmp/", ARCHIVE_ROOT))
def test_download_tarball(self):
""" Test downloading a tarball file.
"""
downloader.download(TAR_FILE)
target = path.join(ARCHIVE_ROOT, TARGET_FILE)
self.evaluate_file(target, ORIGINAL_FILE)
shutil.rmtree(ARCHIVE_ROOT)
def test_set_destination_taball(self):
""" Test downloading a tarball file to a specified path.
"""
downloader.download(TAR_FILE + ":/tmp/")
target = path.join("/tmp/", ARCHIVE_ROOT, TARGET_FILE)
self.evaluate_file(target, ORIGINAL_FILE)
shutil.rmtree(path.join("/tmp/", ARCHIVE_ROOT))
def evaluate_file(self, target, original):
""" Evaluate existence and contents of the target file.
Args:
target: target file to be checked.
original: original file of which contetns will be compared of the ones
of target.
"""
self.assertTrue(path.exists(target))
self.assertEqual(
self.read_file(target),
self.read_file(original))
@staticmethod
def read_file(fpath):
""" Open a file and read it.
Args:
fpath: Path for a file.
Returns:
Contents of the file.
"""
with open(fpath) as f:
return f.read()
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO, stream=sys.stderr)
unittest.main()
| 2.3125 | 2 |
ticle/plotters/plot_phase.py | muma7490/TICLE | 0 | 17132 | import matplotlib.pyplot as pl
import os
import numpy as np
from ticle.data.dataHandler import normalizeData,load_file
from ticle.analysis.analysis import get_phases,normalize_phase
pl.rc('xtick', labelsize='x-small')
pl.rc('ytick', labelsize='x-small')
pl.rc('font', family='serif')
pl.rcParams.update({'font.size': 20})
pl.tight_layout()
path = os.getcwd()
phase_dir = f"{path}/results/phase_plots"
try:
os.makedirs(phase_dir)
except FileExistsError:
pass
data_dir = f"{path}/data/"
data_list_file = f"{data_dir}/dataList.txt"
data_list = np.loadtxt(data_list_file)
for data in data_list:
star = f"0{int(data[0])}"
file_name = f"{data_dir}/{star}/{star}_LC_destepped.txt"
res_dir = f"{phase_dir}/{star}"
try:
os.mkdir(res_dir)
except FileExistsError:
pass
t_series = load_file(file_name)
t_series = normalizeData(t_series)
p = [(f"Phaseplot {star} - literature","literature",data[2]),
(f"Phaseplot {star} - P={data[1]} days",f"result",data[1])]
for title,save_text,period in p:
masks = get_phases(t_series,period)
fig_phase = pl.figure(figsize=(10,7))
for i in masks:
plot_data = normalize_phase(np.array((t_series[0][i],t_series[1][i])))
pl.plot(plot_data[0],plot_data[1],linewidth = 1)
pl.xlabel("Phase")
pl.ylabel("Flux")
pl.title(title)
fig_phase.savefig(f"{res_dir}/{star}_{save_text}_phase_.pdf")
fig_lightcurve = pl.figure(figsize=(10,7))
for i in masks:
pl.plot(t_series[0][i],t_series[1][i],linewidth = 1)
pl.xlabel("Period(days)")
pl.ylabel("Flux")
pl.title(f"{star} Lightcurve {save_text}")
fig_lightcurve.savefig(f"{res_dir}/{star}_{save_text}_lightcurve.pdf") | 2.359375 | 2 |
src/putil/rabbitmq/rabbit_util.py | scionrep/scioncc_new | 2 | 17133 | #!/usr/bin/python
import shlex
import simplejson
from putil.rabbitmq.rabbitmqadmin import Management, make_parser, LISTABLE, DELETABLE
class RabbitManagementUtil(object):
def __init__(self, config, options=None, sysname=None):
"""
Given a config object (system CFG or rabbit mgmt config), extracts the correct config
and prepares util for subsequent calls to RabbitMQ via management plugin REST API.
"""
self.mgmt_cfg = self.get_mgmt_config(config, sysname)
self.connect_str = self.build_connect_str(self.mgmt_cfg)
self.options = options
self.sysname = sysname
self.call_args = self.connect_str
if self.options:
self.call_args += "_" + self.options
self.parser = make_parser()
@staticmethod
def get_mgmt_config(config, sysname=None):
""" Returns the RabbitMq management config dict from indirect reference in container CFG
or from given config dict. """
if not config:
raise RuntimeError("Bad config argument")
if "container" in config and hasattr(config, "get_safe"):
mgmt_cfg_key = config.get_safe("container.messaging.management.server", "rabbit_manage")
mgmt_cfg = config.get_safe("server." + mgmt_cfg_key)
elif "host" in config:
mgmt_cfg = config
else:
raise RuntimeError("Bad RabbitMQ management config")
sysname = sysname or "scioncc"
mgmt_cfg = mgmt_cfg.copy()
mgmt_cfg["host"] = mgmt_cfg.get("host", None) or "localhost"
mgmt_cfg["port"] = mgmt_cfg.get("port", None) or "15672"
mgmt_cfg["username"] = mgmt_cfg.get("username", None) or "guest"
mgmt_cfg["password"] = mgmt_cfg.get("password", None) or "<PASSWORD>"
mgmt_cfg["vhost"] = mgmt_cfg.get("vhost", None) or "/"
mgmt_cfg["system_exchange"] = mgmt_cfg.get("system_exchange", None)
if not mgmt_cfg["system_exchange"] and "exchange" in config and hasattr(config, "get_safe"):
mgmt_cfg["system_exchange"] = "%s.%s" % (sysname, config.get_safe('exchange.core.system_xs', 'system'))
mgmt_cfg["events_xp"] = mgmt_cfg.get("events_xp", None)
if not mgmt_cfg["events_xp"] and "exchange" in config and hasattr(config, "get_safe"):
mgmt_cfg["events_xp"] = "%s.%s" % (mgmt_cfg["system_exchange"], config.get_safe('exchange.core.events', 'events'))
return mgmt_cfg
@staticmethod
def build_connect_str(mgmt_cfg):
connect_str = "-q -H {0} -P {1} -u {2} -p {3} -V {4}".format(
mgmt_cfg["host"], mgmt_cfg["port"], mgmt_cfg["username"], mgmt_cfg["password"], mgmt_cfg["vhost"])
return connect_str
@staticmethod
def get_mgmt_url(config, feats=None):
mgmt_cfg = RabbitManagementUtil.get_mgmt_config(config)
feats = feats or []
url = "http://%s:%s/api/%s" % (mgmt_cfg["host"], mgmt_cfg["port"], "/".join(feats))
return url
# -------------------------------------------------------------------------
# Util methods
def clean_by_prefix(self, prefix):
"""
Utility method to clean (sysname) prefixed exchanges and queues on a broker.
@param prefix The sysname / prefix to use to select exchanges and queues to delete.
Must be the prefix to the exchange or queue or this will not be deleted.
@returns A 2-tuple of (list of exchanges deleted, list of queues deleted).
"""
exchanges = self.list_names('exchanges')
deleted_exchanges = self.delete_names_with_prefix('exchange', exchanges, prefix)
queues = self.list_names('queues')
deleted_queues = self.delete_names_with_prefix('queue', queues, prefix)
return deleted_exchanges, deleted_queues
def clean_by_sysname(self, sysname=None):
sysname = sysname or self.sysname
if not sysname:
raise RuntimeError("Must provide sysname")
return self.clean_by_prefix(sysname or self.sysname)
def declare_exchange(self, xp):
if xp == "events":
ex_name = self.mgmt_cfg["events_xp"]
else:
ex_name = self.mgmt_cfg["system_exchange"]
cmd_str = '{0} declare exchange name="{1}" durable=false auto_delete=true type=topic'.format(self.call_args, ex_name)
(options, args) = self.parser.parse_args(shlex.split(cmd_str))
mgmt = Management(options, args[1:])
mgmt.invoke_declare()
def declare_queue(self, xp, queue_name):
if xp == "events":
ex_name = self.mgmt_cfg["events_xp"]
else:
ex_name = self.mgmt_cfg["system_exchange"]
if queue_name.startswith(self.sysname):
qqueue_name = queue_name
else:
qqueue_name = ".".join([ex_name, queue_name])
cmd_str = '{0} declare queue name="{1}" durable=false auto_delete=false'.format(self.call_args, qqueue_name)
(options, args) = self.parser.parse_args(shlex.split(cmd_str))
mgmt = Management(options, args[1:])
mgmt.invoke_declare()
def bind_queue(self, xp, queue_name, binding):
if xp == "events":
ex_name = self.mgmt_cfg["events_xp"]
else:
ex_name = self.mgmt_cfg["system_exchange"]
if queue_name.startswith(self.sysname):
qqueue_name = queue_name
else:
qqueue_name = ".".join([ex_name, queue_name])
cmd_str = '{0} declare binding source="{1}" destination="{2}" destination_type=queue routing_key="{3}"'.format(
self.call_args, ex_name, qqueue_name, binding)
(options, args) = self.parser.parse_args(shlex.split(cmd_str))
mgmt = Management(options, args[1:])
mgmt.invoke_declare()
# TODO: Move the management calls from pyon.ion.exchange here
# -------------------------------------------------------------------------
# Helpers
def list_names(self, listable_type):
list_str = '%s list %s name' % (self.call_args, listable_type)
(options, args) = self.parser.parse_args(shlex.split(list_str))
mgmt = Management(options, args[1:])
uri = mgmt.list_show_uri(LISTABLE, 'list', mgmt.args[1:])
output_json = mgmt.get(uri)
listables = simplejson.loads(output_json)
return listables
def list_names_with_prefix(self, listables, name_prefix):
return [l['name'] for l in listables if l['name'].startswith(name_prefix)]
# This function works on exchange, queue, vhost, user
def delete_names_with_prefix(self, deletable_type, deleteable, name_prefix):
deleted = []
for d in deleteable:
try:
if d['name'].startswith(name_prefix):
delete_cmd = '%s delete %s name="%s"' % (self.call_args, deletable_type, d['name'])
(options, args) = self.parser.parse_args(shlex.split(delete_cmd))
mgmt = Management(options, args[1:])
mgmt.invoke_delete()
deleted.append(d['name'])
except KeyError:
# Some has no key 'name'
pass
return deleted
| 2.296875 | 2 |
gaternet/main.py | gunpowder78/google-research | 1 | 17134 | <filename>gaternet/main.py
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Loads a GaterNet checkpoint and tests on Cifar-10 test set."""
import argparse
import io
import os
from backbone_resnet import Network as Backbone
from gater_resnet import Gater
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision import datasets
from torchvision import transforms
def load_from_state(state_dict, model):
"""Loads the state dict of a checkpoint into model."""
tem_dict = dict()
for k in state_dict.keys():
tem_dict[k.replace('module.', '')] = state_dict[k]
state_dict = tem_dict
ckpt_key = set(state_dict.keys())
model_key = set(model.state_dict().keys())
print('Keys not in current model: {}\n'.format(ckpt_key - model_key))
print('Keys not in checkpoint: {}\n'.format(model_key - ckpt_key))
model.load_state_dict(state_dict, strict=True)
print('Successfully reload from state.')
return model
def test(backbone, gater, device, test_loader):
"""Tests the model on a test set."""
backbone.eval()
gater.eval()
loss = 0
correct = 0
with torch.no_grad():
for data, target in test_loader:
data, target = data.to(device), target.to(device)
gate = gater(data)
output = backbone(data, gate)
loss += F.cross_entropy(output, target, size_average=False).item()
pred = output.max(1, keepdim=True)[1]
correct += pred.eq(target.view_as(pred)).sum().item()
loss /= len(test_loader.dataset)
acy = 100. * correct / len(test_loader.dataset)
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.4f}%)\n'.format(
loss, correct, len(test_loader.dataset), acy))
return acy
def run(args, device, test_loader):
"""Loads checkpoint into GaterNet and runs test on the test data."""
with open(args.checkpoint_file, 'rb') as fin:
inbuffer = io.BytesIO(fin.read())
state_dict = torch.load(inbuffer, map_location='cpu')
print('Successfully load checkpoint file.\n')
backbone = Backbone(depth=args.backbone_depth, num_classes=10)
print('Loading checkpoint weights into backbone.')
backbone = load_from_state(state_dict['backbone_state_dict'], backbone)
backbone = nn.DataParallel(backbone).to(device)
print('Backbone is ready after loading checkpoint and moving to device:')
print(backbone)
n_params_b = sum(
[param.view(-1).size()[0] for param in backbone.parameters()])
print('Number of parameters in backbone: {}\n'.format(n_params_b))
gater = Gater(depth=20,
bottleneck_size=8,
gate_size=backbone.module.gate_size)
print('Loading checkpoint weights into gater.')
gater = load_from_state(state_dict['gater_state_dict'], gater)
gater = nn.DataParallel(gater).to(device)
print('Gater is ready after loading checkpoint and moving to device:')
print(gater)
n_params_g = sum(
[param.view(-1).size()[0] for param in gater.parameters()])
print('Number of parameters in gater: {}'.format(n_params_g))
print('Total number of parameters: {}\n'.format(n_params_b + n_params_g))
print('Running test on test data.')
test(backbone, gater, device, test_loader)
def parse_flags():
"""Parses input arguments."""
parser = argparse.ArgumentParser(description='GaterNet')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--backbone-depth', type=int, default=20,
help='resnet depth of the backbone subnetwork')
parser.add_argument('--checkpoint-file', type=str, default=None,
help='checkpoint file to run test')
parser.add_argument('--data-dir', type=str, default=None,
help='the directory for storing data')
args = parser.parse_args()
return args
def main(args):
print('Input arguments:\n{}\n'.format(args))
use_cuda = not args.no_cuda and torch.cuda.is_available()
print('use_cuda: {}'.format(use_cuda))
device = torch.device('cuda' if use_cuda else 'cpu')
torch.backends.cudnn.benchmark = True
print('device: {}'.format(device))
if not os.path.isdir(args.data_dir):
os.mkdir(args.data_dir)
kwargs = {'num_workers': 8, 'pin_memory': True} if use_cuda else {}
normalize_mean = [0.4914, 0.4822, 0.4465]
normalize_std = [0.2470, 0.2435, 0.2616]
test_loader = torch.utils.data.DataLoader(
datasets.CIFAR10(
args.data_dir,
train=False,
download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(normalize_mean, normalize_std)])
),
batch_size=1000, shuffle=False, drop_last=False, **kwargs)
print('Successfully get data loader.')
run(args, device, test_loader)
if __name__ == '__main__':
main(parse_flags())
| 2.1875 | 2 |
client.pyw | thatfuckingbird/hydrus-websocket-server | 1,417 | 17135 | <filename>client.pyw
#!/usr/bin/env python3
# Hydrus is released under WTFPL
# You just DO WHAT THE FUCK YOU WANT TO.
# https://github.com/sirkris/WTFPL/blob/master/WTFPL.md
from hydrus import hydrus_client
if __name__ == '__main__':
hydrus_client.boot()
| 1.625 | 2 |
fixEngine/fixEngine.py | HNGlez/ExchangeConnector | 0 | 17136 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
ExchangeConnector fixEngine
Copyright (c) 2020 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import asyncio
import simplefix
import threading
import logging
import time
import sys
import configparser
from fixClientMessages import FixClientMessages
from connectionHandler import FIXConnectionHandler, SocketConnectionState
class FixEngine(FIXConnectionHandler):
def __init__(self, config, reader, writer, messageListener):
FIXConnectionHandler.__init__(self, config, reader, writer, messageListener)
self._config = config
self._logout = False
self._engineLogger.info(f"Socket Connection Open to {config['SocketHost']}:{config['SocketPort']}")
self.clientMessage = FixClientMessages(config['SenderCompID'], config['TargetCompID'], config['SenderPassword'], config['BeginString'], config.getint('HeartBeatInterval'))
asyncio.ensure_future(self._handleEngine())
def getConnectionState(self):
return self._connectionState
async def _sessionMessageHandler(self, message: simplefix.FixMessage) -> bool:
""" Handle Session Message."""
assert isinstance(message, simplefix.FixMessage)
# NEED TO ADD HANDLING OF BUSINESS REJECTS
msgType = message.get(simplefix.TAG_MSGTYPE)
if msgType == simplefix.MSGTYPE_LOGON: # Handle logon
if self._connectionState == SocketConnectionState.LOGGED_IN:
self._engineLogger.warning(f"{self._config['SenderCompID']} already looged in -> Ignoring Login Request.")
else:
self._connectionState = SocketConnectionState.LOGGED_IN
self._engineLogger.info(f"{self._config['SenderCompID']} session -> LOGON")
self._config['HeartBeatInterval'] = str(message.get(simplefix.TAG_HEARTBTINT).decode())
return True
elif self._connectionState == SocketConnectionState.LOGGED_IN:
if msgType == simplefix.MSGTYPE_TEST_REQUEST: # Send test heartbeat when requested
msg = self.clientMessage.sendHeartbeat()
msg.append_pair(simplefix.TAG_TESTREQID, message.get(simplefix.TAG_TESTREQID))
await self.sendMessage(msg)
return True
elif msgType == simplefix.MSGTYPE_LOGOUT: # Handle Logout
self._connectionState = SocketConnectionState.LOGGED_OUT
self._engineLogger.info(f"{self._config['SenderCompID']} session -> LOGOUT")
self.handleClose()
return True
elif msgType == simplefix.MSGTYPE_HEARTBEAT:
msg = self.clientMessage.sendHeartbeat()
msg.append_pair(simplefix.TAG_TESTREQID, message.get(simplefix.TAG_TESTREQID))
await self.sendMessage(msg)
return True
elif message.get(simplefix.TAG_RESETSEQNUMFLAG) == simplefix.RESETSEQNUMFLAG_YES: # If ResetSeqNum = Y Then Reset sequence
self._session.resetSeqNo()
self._engineLogger.info("Resetting Sequence Number to 1")
return True
else:
return False
else:
self._engineLogger.warning(f"Cannot process message. {self._config['SenderCompID']} is not logged in.")
return False
async def _handleEngine(self):
await self.logon()
while self._connectionState != SocketConnectionState.DISCONNECTED:
if self._connectionState != SocketConnectionState.LOGGED_OUT:
await self.readMessage()
await self.expectedHeartbeat(self._config.getint('HeartBeatInterval'))
else:
await self.logon()
class FIXClient:
def __init__(self, configFile, gateway, listener):
self._config = self.loadConfig(configFile, gateway)
self._reader = None
self._writer = None
self._client = None
self._messageListener = listener
async def startClient(self, loop):
""" Creates Socket Connection and Runs Main Loop."""
self._reader, self._writer = await asyncio.open_connection(self._config["SocketHost"], self._config["SocketPort"], loop=loop)
self._connectionState = SocketConnectionState.CONNECTED
self._client = FixEngine(self._config, self._reader, self._writer, self._messageListener)
def loadConfig(self, filePath, gateway):
parser = configparser.SafeConfigParser()
parser.read(filePath)
if parser.has_section(gateway):
return parser[gateway]
else:
raise Exception(f"{gateway} section not found in configuration file {filePath}")
def getClient(self):
return self._client | 1.984375 | 2 |
plugins/session_list/views.py | farazkhanfk7/ajenti | 1 | 17137 | from jadi import component
from aj.api.http import url, HttpPlugin
from aj.auth import authorize
from aj.api.endpoint import endpoint, EndpointError
import aj
import gevent
@component(HttpPlugin)
class Handler(HttpPlugin):
def __init__(self, context):
self.context = context
@url(r'/api/session_list/list')
@endpoint(api=True)
def handle_api_list_sessions(self, http_context):
if http_context.method == 'GET':
self.context.worker.update_sessionlist()
gevent.sleep(1)
return aj.sessions
| 1.992188 | 2 |
now/collection/prov_execution/argument_captors.py | CrystalMei/Prov_Build | 2 | 17138 | <filename>now/collection/prov_execution/argument_captors.py<gh_stars>1-10
# Copyright (c) 2016 Universidade Federal Fluminense (UFF)
# Copyright (c) 2016 Polytechnic Institute of New York University.
# Copyright (c) 2018, 2019, 2020 President and Fellows of Harvard College.
# This file is part of ProvBuild.
"""Capture arguments from calls"""
from __future__ import (absolute_import, print_function,
division, unicode_literals)
import weakref
import itertools
import inspect
from future.utils import viewitems
from ...utils.functions import abstract
from ..prov_definition.utils import ClassDef, Assert, With, Decorator
WITHOUT_PARAMS = (ClassDef, Assert, With)
class ArgumentCaptor(object): # pylint: disable=too-few-public-methods
"""Collect arguments during calls"""
def __init__(self, provider):
self.provider = weakref.proxy(provider)
def capture(self, frame, activation): # pylint: disable=unused-argument, no-self-use
"""Abstract method for capture"""
abstract()
class ProfilerArgumentCaptor(ArgumentCaptor): # pylint: disable=too-few-public-methods
"""Collect arguments for profiler"""
def __init__(self, *args, **kwargs):
super(ProfilerArgumentCaptor, self).__init__(*args, **kwargs)
self.f_locals = {}
def capture(self, frame, activation):
"""Store argument object values
Arguments:
frame -- current frame, after trace call
activation -- current activation
"""
provider = self.provider
self.f_locals = values = frame.f_locals
code = frame.f_code
names = code.co_varnames
nargs = code.co_argcount
# Capture args
for var in itertools.islice(names, 0, nargs):
try:
provider.object_values.add(
var,
provider.serialize(values[var]), "ARGUMENT", activation.id)
activation.args.append(var)
except Exception: # pylint: disable=broad-except
# ignoring any exception during capture
pass
# Capture *args
if code.co_flags & inspect.CO_VARARGS: # pylint: disable=no-member
varargs = names[nargs]
provider.object_values.add(
varargs,
provider.serialize(values[varargs]), "ARGUMENT", activation.id)
activation.starargs.append(varargs)
nargs += 1
# Capture **kwargs
if code.co_flags & inspect.CO_VARKEYWORDS: # pylint: disable=no-member
kwargs = values[names[nargs]]
for key in kwargs:
provider.object_values.add(
key, provider.serialize(kwargs[key]), "ARGUMENT",
activation.id)
activation.kwargs.append(names[nargs])
class InspectProfilerArgumentCaptor(ArgumentCaptor): # pylint: disable=too-few-public-methods
"""This Argument Captor uses the inspect.getargvalues that is slower
because it considers the existence of anonymous tuple
"""
def capture(self, frame, activation):
"""Store argument object values
Arguments:
frame -- current frame, after trace call
activation -- current activation
"""
provider = self.provider
# ToDo #75: inspect.getargvalues was deprecated on Python 3.5
# ToDo #75: use inspect.signature instead
(args, varargs, keywords, values) = inspect.getargvalues(frame)
for arg in args:
try:
provider.object_values.add(
arg, provider.serialize(values[arg]), "ARGUMENT",
activation.id)
activation.args.append(arg)
except Exception: # ignoring any exception during capture # pylint: disable=broad-except
pass
if varargs:
provider.object_values.add(
varargs, provider.serialize(values[varargs]), "ARGUMENT",
activation.id)
activation.starargs.append(varargs)
if keywords:
for key, value in viewitems(values[keywords]):
provider.object_values.add(
key, provider.serialize(value), "ARGUMENT", activation.id)
activation.kwargs.append(key)
class SlicingArgumentCaptor(ProfilerArgumentCaptor):
"""Create Slicing Variables for Arguments and dependencies between
Parameters and Arguments"""
def __init__(self, *args, **kwargs):
super(SlicingArgumentCaptor, self).__init__(*args, **kwargs)
self.caller, self.activation = None, None
self.filename, self.line = "", 0
self.frame = None
def match_arg(self, passed, arg):
"""Match passed arguments with param
Arguments:
passed -- Call Variable name
arg -- Argument name
"""
provider = self.provider
activation = self.activation
context = activation.context
if arg in context:
act_var = context[arg]
else:
vid = provider.add_variable(activation.id, arg,
self.line, self.f_locals, "param")
act_var = provider.variables[vid]
context[arg] = act_var
if passed:
caller = self.caller
target = provider.find_variable(caller, passed, self.filename)
if target is not None:
provider.dependencies.add(
act_var.activation_id, act_var.id,
target.activation_id, target.id, "parameter"
)
def match_args(self, params, arg):
"""Match passed argument with param
Arguments:
params -- Call Variable names
arg -- Argument name
"""
for param in params:
self.match_arg(param, arg)
def _defined_call(self, activation):
"""Return a call extracted from AST if it has arguments
or None, otherwise
Arguments:
activation -- current activation
"""
if not activation.with_definition or activation.is_main:
return
if activation.is_comprehension():
return
provider = self.provider
lineno, lasti = activation.line, activation.lasti
filename = activation.filename
function_name = activation.name
if (function_name == "__enter__" and
lasti in provider.with_enter_by_lasti[filename][lineno]):
activation.has_parameters = False
return
if (function_name == "__exit__" and
lasti in provider.with_exit_by_lasti[filename][lineno]):
activation.has_parameters = False
return
if lasti in provider.iters[filename][lineno]:
activation.has_parameters = False
provider.next_is_iter = True
return
try:
call = provider.call_by_lasti[filename][lineno][lasti]
except (IndexError, KeyError):
# call not found
# ToDo: show in dev-mode
return
if (isinstance(call, WITHOUT_PARAMS) or
(isinstance(call, Decorator) and not call.is_fn)):
activation.has_parameters = False
return
return call
def capture(self, frame, activation): # pylint: disable=too-many-locals
"""Match call parameters to function arguments
Arguments:
frame -- current frame, after trace call
activation -- current activation
"""
super(SlicingArgumentCaptor, self).capture(frame, activation)
provider = self.provider
self.frame = frame
call = self._defined_call(activation)
if not call:
return
self.filename = activation.filename
self.line = frame.f_lineno
self.caller, self.activation = provider.current_activation, activation
match_args, match_arg = self.match_args, self.match_arg
act_args_index = activation.args.index
# Check if it has starargs and kwargs
sub = -[bool(activation.starargs), bool(activation.kwargs)].count(True)
order = activation.args + activation.starargs + activation.kwargs
activation_arguments = len(order) + sub
used = [0 for _ in order]
j = 0
# Match positional arguments
for i, call_arg in enumerate(call.args):
if call_arg:
j = i if i < activation_arguments else sub
act_arg = order[j]
match_args(call_arg, act_arg)
used[j] += 1
# Match keyword arguments
for act_arg, call_arg in viewitems(call.keywords):
try:
i = act_args_index(act_arg)
match_args(call_arg, act_arg)
used[i] += 1
except ValueError:
for kwargs in activation.kwargs:
match_args(call_arg, kwargs)
# Match kwargs, starargs
# ToDo #75: Python 3.5 supports multiple keyword arguments and starargs
# ToDo #75: improve matching
# Ignore default params
# Do not match f(**kwargs) with def(*args)
args = [(k, order[k]) for k in range(len(used)) if not used[k]]
for star in call.kwargs + call.starargs:
for i, act_arg in args:
match_args(star, act_arg)
used[i] += 1
# Create variables for unmatched arguments
args = [(k, order[k]) for k in range(len(used)) if not used[k]]
for i, act_arg in args:
match_arg(None, act_arg)
# Create dependencies between all parameters
# ToDo #35: improve dependencies to use references.
# Do not create dependencies between all parameters
all_args = list(provider.find_variables(
self.caller, call.all_args(), activation.filename))
if all_args:
graybox = provider.create_func_graybox(activation.id, activation.line)
provider.add_dependencies(graybox, all_args)
provider.add_inter_dependencies(frame.f_back.f_locals, all_args,
self.caller, activation.line,
[(graybox, graybox.name)])
| 2.171875 | 2 |
Python/Day8 DictionariesAndMaps.py | codePerfectPlus/30-DaysOfCode-With-Python-And-JavaScript | 8 | 17139 | N = int(input())
entry = [input().split() for _ in range(N)]
phoneBook = {name: number for name, number in entry}
while True:
try:
name = input()
if name in phoneBook:
print(f"{name}={phoneBook[name]}")
else:
print("Not found")
except:
break
| 3.90625 | 4 |
10. Recurrent Neural Network/10-1) Recurrent Neural Network, RNN.py | choijiwoong/-ROKA-torch-tutorial-files | 0 | 17140 | <gh_stars>0
#Sequence model. != Recursive Neural Network
#memory cell or RNN cell
#hidden state
#one-to-many_image captioning, many-to-one_sentiment classfication || spam detection, many-to-many_chat bot
#2) create RNN in python
import numpy as np
timesteps=10#시점의 수 _문장의 길이
input_size=4#입력의 차원_단어벡터의 차원
hidden_size=8#메모리 셀의 용량(은닉상태의 크기)
inputs=np.random.random((timesteps, input_size))#입력에 해당하는 2D텐서
hidden_state_t=np.zeros((hidden_size,))#jiddensize로 은닉상태 만들고 0초기화
print(hidden_state_t)
Wx=np.random.random((hidden_size, input_size))#입력 가중치
Wh=np.random.random((hidden_size, hidden_size))#은닉 가중치
b=np.random.random((hidden_size,))
print(np.shape(Wx))
print(np.shape(Wh))
print(np.shape(b))
total_hidden_states=[]
#memory cell work
for input_t in inputs:
output_t=np.tanh(np.dot(Wx,input_t)+np.dot(Wh,hidden_state_t)+b)
total_hidden_states.append(list(output_t))#각 시점의 은닉상태값을 축적
print(np.shape(total_hidden_states))
hidden_state_t=output_t
total_hidden_states=np.stack(total_hidden_states, axis=0)#깨끗한 출력
print(total_hidden_states)
#3) nn.RNN() in pytorch
import torch
import torch.nn as nn
input_size=5#입력 크기
hidden_size=8#은닉상태의 크기
inputs=torch.Tensor(1, 10, 5)#배치크기 1 10번의 시점동안 5차원의 입력벡터
cell=nn.RNN(input_size, hidden_size, batch_first=True)#입력텐서의 첫번째 차원이 배치크기
outputs, _status=cell(inputs)#2개의 입력을 리턴. 모든시점의 은닉상태들, 마시막시점의 은닉상태
print(outputs.shape)
#4) Deep Recurrent Neural Network
inputs=torch.Tensor(1, 10, 5)
cell=nn.RNN(input_size=5, hidden_size=8, num_layers=2, batch_first=True)# 은닉층 2개(cell)
print(outputs.shape)
print(_status.shape)#층개수, 배치크기, 은닉상태의 크기
#5) Bidirectional Recurrent Neural Network
inputs=torch.Tensor(1, 10, 5)
cell=nn.RNN(input_size=5, hidden_size=8, num_layers=2, batch_first=True, bidirectional=True)#양방향순환
outputs, _status=cell(inputs)
print(outputs.shape)#연결되었기에 은닉상태크기2배
print(_status.shape)#층의개수2배
| 2.796875 | 3 |
WeLearn/M3-Python/L3-Python_Object/pet.py | munoz196/moonyosCSSIrep | 0 | 17141 | <filename>WeLearn/M3-Python/L3-Python_Object/pet.py
pet = {
"name":"Doggo",
"animal":"dog",
"species":"labrador",
"age":"5"
}
class Pet(object):
def __init__(self, name, age, animal):
self.name = name
self.age = age
self.animal = animal
self.hungry = False
self.mood= "happy"
def eat(self):
print("> %s is eating..." % self.name)
if self.is_hungry:
self.is_hungry = False
else:
print("> %s may have eaten too much." % self.name)
self.mood = "lethargic "
my_pet= Pet("Fido", 3, "dog")
my_pet.is_hungry= True
print("is my pet hungry? %s"% my_pet.is_hungry)
my_pet.eat()
print("how about now? %s" % my_pet.is_hungry)
print ("My pet is feeling %s" % my_pet.mood)
| 4.28125 | 4 |
src/repositories/example_repo.py | pybokeh/dagster-examples | 0 | 17142 | <filename>src/repositories/example_repo.py
from dagster import job, repository
from ops.sklearn_ops import (
fetch_freehand_text_to_generic_data,
separate_features_from_target_label,
label_encode_target,
count_tfid_transform_train,
count_tfid_transform_test,
create_sgd_classifier_model,
predict
)
@ job(
description="Scikit-Learn multi-class text classification: classify free-hand text computer skills descriptions to generic descriptions"
)
def text_classify():
X_train, y_train = separate_features_from_target_label.alias("separate_features_from_target_train")(
fetch_freehand_text_to_generic_data.alias("fetch_training_data")()
)
df_test = fetch_freehand_text_to_generic_data.alias("fetch_test_data")()
y_encoded_train, label_encoder_train = label_encode_target.alias("label_encode_train")(y_train)
X_encoded_train, count_vect, tfid_vect = count_tfid_transform_train.alias("count_tfid_transform_train")(X_train)
clf = create_sgd_classifier_model(X_encoded_train, y_encoded_train)
X_encoded_test = count_tfid_transform_test(df_test, count_vect, tfid_vect)
predict(X_encoded_test, clf, label_encoder_train)
@repository
def examples_repo():
return [
text_classify,
]
| 2.3125 | 2 |
exercicios/Maior_e_Menor_Valores.py | jeversonneves/Python | 0 | 17143 | <filename>exercicios/Maior_e_Menor_Valores.py
resposta = 'S'
soma = quant = media = maior = menor = 0
while resposta in 'Ss':
n = int(input('Digite um número: '))
soma += n
quant += 1
if quant == 1:
maior = menor = n
else:
if n > maior:
maior = n
elif n < menor:
menor = n
resposta = str(input('Quer continuar? [S/N]: ')).upper().strip()[0]
media = soma / quant
print('Você digitou {} números e a soma foi de {} e media de {}.'.format(quant, soma, media))
print('O maior número {} e o menor número {}.'.format(maior, menor))
| 3.984375 | 4 |
rbc/opening/opening.py | rebuildingcode/hardware | 0 | 17144 | <gh_stars>0
from shapely.geometry import Polygon
from ..point import Point
class Opening(Polygon):
"""
Openings are rectangular only.
"""
def __init__(self, width, height):
self.width = width
self.height = height
points = [
Point(0, 0), Point(0, height), Point(width, height), Point(width, 0)
]
super().__init__(shell=[(pt.x, pt.y) for pt in points])
def plot(self):
"""
- [ ] plot plan view
- [ ] plot elevation view
"""
pass # pragma: no cover | 3 | 3 |
AI/Housing Prices Prediction/HousePricesNN.py | n0rel/self | 0 | 17145 | <filename>AI/Housing Prices Prediction/HousePricesNN.py
import numpy as np
import pandas as pd
from sklearn.preprocessing import LabelEncoder, MinMaxScaler
from numpy.random import uniform
import matplotlib.pyplot as plt
def relu(x):
return x * (x > 0)
def relu_deriv(x):
return 1 * (x > 0)
class NeuralNetwork:
"""
Our NN will predict a housing price given 6 parameters, meaning:
* Input Size: 6
* Error Function: MSE
Hidden Layer:
* Neurons: Param
* Input: 6
* Output: Param
* Activation: ReLU
Output Layer:
* Neurons: 1
* Input: Param
* Output: 1
* Activation: Linear
"""
def __init__(self, hidden_neurons, alpha):
self.hidden_neurons = hidden_neurons
self.alpha = alpha
self.hidden_weights = uniform(low=(-1) * np.sqrt(2 / 6), high=np.sqrt(2 / 6), size=(hidden_neurons, 6))
self.output_weights = uniform(low=-0.1, high=0.1, size=(1, hidden_neurons))
def f_propagate(self, x):
z1 = x.dot(self.hidden_weights.transpose())
g = relu(z1)
output = g.dot(self.output_weights.transpose())
return [g, output, z1]
def fit(self, x, y, epochs):
# ERROR CHECKING
error = []
for epoch in range(epochs):
print(epoch)
# ERROR CHECKING
error_sum = 0
# Arrays used to store incrementing weight changes
hidden_w_sum = np.zeros(shape=self.hidden_weights.shape)
output_w_sum = np.zeros(shape=self.output_weights.shape)
for sample_x, sample_y in zip(x, y):
forward_values = self.f_propagate(sample_x)
output_delta = (forward_values[1] - sample_y) * forward_values[0]
output_delta1 = forward_values[1] - sample_y
# hidden_delta1 = (output_delta1 * self.output_weights).dot(np.outer(relu_deriv(forward_values[2]), sample_x)) # Shape: (Neurons,6)
hidden_delta1 = np.outer(output_delta1 * self.output_weights * relu_deriv(forward_values[2]), sample_x)
output_w_sum += output_delta
hidden_w_sum += hidden_delta1
# ERROR CHECKING
error_sum += abs(sample_y - forward_values[1])
# ERROR CHECKING
error.append(error_sum / len(x))
self.output_weights -= self.alpha * output_w_sum / len(x)
self.hidden_weights -= self.alpha * hidden_w_sum / len(x)
plt.plot(error)
plt.show()
# Import Data
training_amount = 4000
input_scaler = MinMaxScaler((-1, 1))
output_scaler = MinMaxScaler((-1, 1))
data = pd.read_csv('USA_Housing.csv').drop(columns=['Address'])
data = np.insert(data.to_numpy(), 0, np.ones((1, len(data))), axis=1)
x_scaled, y_scaled = input_scaler.fit_transform(data[:, :6]), output_scaler.fit_transform(data[:, 6:7])
x_train, y_train = x_scaled[:training_amount], y_scaled[:training_amount]
x_test, y_test = x_scaled[training_amount:], y_scaled[training_amount:]
hidden_neurons = 10
# Create NN & train it
nn = NeuralNetwork(hidden_neurons, 0.7)
nn.fit(x_train, y_train, epochs=75)
error = 0
amount_to_check = 20
for x, y in zip(x_test[:amount_to_check, :], y_test[:amount_to_check]):
error += abs(output_scaler.inverse_transform(y.reshape(-1, 1))[0][0] -
output_scaler.inverse_transform(nn.f_propagate(x)[1].reshape(-1, 1))[0][0])
print(
f"{output_scaler.inverse_transform(nn.f_propagate(x)[1].reshape(-1, 1))[0][0]} -> {output_scaler.inverse_transform(y.reshape(-1, 1))[0][0]}")
print(f"{(error / len(x_test)):.9f}")
"""
# Keras Version of NN
model = keras.models.Sequential()
model.add(keras.layers.Dense(hidden_neurons, input_dim=5, activation='relu', kernel_initializer='he_normal'))
model.add(keras.layers.Dense(1, input_dim=hidden_neurons, activation='linear'))
model.compile(loss='mse', optimizer='adam', metrics=['mse'])
history = model.fit(x_train, y_train, epochs=10, batch_size=10)
plt.plot(history.history['mse'])
plt.show()
for x, y in zip(model.predict(x_test), y_test):
print(f"{output_scaler.inverse_transform(y.reshape(-1, 1))[0][0]} -> {output_scaler.inverse_transform(x.reshape(-1, 1))[0][0]}")
"""
| 4.125 | 4 |
tests/test_export_keyword_template_catalina_10_15_4.py | PabloKohan/osxphotos | 0 | 17146 | <filename>tests/test_export_keyword_template_catalina_10_15_4.py
import pytest
from osxphotos._constants import _UNKNOWN_PERSON
PHOTOS_DB = "./tests/Test-10.15.4.photoslibrary/database/photos.db"
TOP_LEVEL_FOLDERS = ["Folder1"]
TOP_LEVEL_CHILDREN = ["SubFolder1", "SubFolder2"]
FOLDER_ALBUM_DICT = {"Folder1": [], "SubFolder1": [], "SubFolder2": ["AlbumInFolder"]}
ALBUM_NAMES = ["Pumpkin Farm", "AlbumInFolder", "Test Album", "Test Album"]
ALBUM_PARENT_DICT = {
"Pumpkin Farm": None,
"AlbumInFolder": "SubFolder2",
"Test Album": None,
}
ALBUM_FOLDER_NAMES_DICT = {
"Pumpkin Farm": [],
"AlbumInFolder": ["Folder1", "SubFolder2"],
"Test Album": [],
}
ALBUM_LEN_DICT = {"Pumpkin Farm": 3, "AlbumInFolder": 2, "Test Album": 1}
ALBUM_PHOTO_UUID_DICT = {
"Pumpkin Farm": [
"F12384F6-CD17-4151-ACBA-AE0E3688539E",
"D79B8D77-BFFC-460B-9312-034F2877D35B",
"1EB2B765-0765-43BA-A90C-0D0580E6172C",
],
"Test Album": [
"F12384F6-CD17-4151-ACBA-AE0E3688539E",
"D79B8D77-BFFC-460B-9312-034F2877D35B",
],
"AlbumInFolder": [
"3DD2C897-F19E-4CA6-8C22-B027D5A71907",
"E9BC5C36-7CD1-40A1-A72B-8B8FAC227D51",
],
}
UUID_DICT = {
"two_albums": "F12384F6-CD17-4151-ACBA-AE0E3688539E",
"in_album": "E9BC5C36-7CD1-40A1-A72B-8B8FAC227D51",
"xmp": "F12384F6-CD17-4151-ACBA-AE0E3688539E",
}
def test_exiftool_json_sidecar_keyword_template_long(caplog):
import osxphotos
from osxphotos._constants import _MAX_IPTC_KEYWORD_LEN
import json
photosdb = osxphotos.PhotosDB(dbfile=PHOTOS_DB)
photos = photosdb.photos(uuid=[UUID_DICT["in_album"]])
json_expected = json.loads(
"""
[{"_CreatedBy": "osxphotos, https://github.com/RhetTbull/osxphotos",
"EXIF:ImageDescription": "Bride Wedding day",
"XMP:Description": "Bride Wedding day",
"XMP:TagsList": ["wedding", "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"],
"IPTC:Keywords": ["wedding", "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"],
"XMP:PersonInImage": ["Maria"],
"XMP:Subject": ["wedding", "Maria"],
"EXIF:DateTimeOriginal": "2019:04:15 14:40:24",
"EXIF:OffsetTimeOriginal": "-04:00", "EXIF:ModifyDate": "2019:11:24 13:09:17"}]
"""
)[0]
long_str = "x" * (_MAX_IPTC_KEYWORD_LEN + 1)
json_got = photos[0]._exiftool_json_sidecar(keyword_template=[long_str])
json_got = json.loads(json_got)[0]
assert "Some keywords exceed max IPTC Keyword length" in caplog.text
# some gymnastics to account for different sort order in different pythons
for k, v in json_got.items():
if type(v) in (list, tuple):
assert sorted(json_expected[k]) == sorted(v)
else:
assert json_expected[k] == v
for k, v in json_expected.items():
if type(v) in (list, tuple):
assert sorted(json_got[k]) == sorted(v)
else:
assert json_got[k] == v
for k, v in json_expected.items():
if type(v) in (list, tuple):
assert sorted(json_got[k]) == sorted(v)
else:
assert json_got[k] == v
def test_exiftool_json_sidecar_keyword_template():
import osxphotos
import json
photosdb = osxphotos.PhotosDB(dbfile=PHOTOS_DB)
photos = photosdb.photos(uuid=[UUID_DICT["in_album"]])
json_expected = json.loads(
"""
[{"_CreatedBy": "osxphotos, https://github.com/RhetTbull/osxphotos",
"EXIF:ImageDescription": "Bride Wedding day",
"XMP:Description": "Bride Wedding day",
"XMP:TagsList": ["wedding", "Folder1/SubFolder2/AlbumInFolder"],
"IPTC:Keywords": ["wedding", "Folder1/SubFolder2/AlbumInFolder"],
"XMP:PersonInImage": ["Maria"],
"XMP:Subject": ["wedding", "Maria"],
"EXIF:DateTimeOriginal": "2019:04:15 14:40:24",
"EXIF:OffsetTimeOriginal": "-04:00", "EXIF:ModifyDate": "2019:11:24 13:09:17"}]
"""
)[0]
json_got = photos[0]._exiftool_json_sidecar(keyword_template=["{folder_album}"])
json_got = json.loads(json_got)[0]
# some gymnastics to account for different sort order in different pythons
for k, v in json_got.items():
if type(v) in (list, tuple):
assert sorted(json_expected[k]) == sorted(v)
else:
assert json_expected[k] == v
for k, v in json_expected.items():
if type(v) in (list, tuple):
assert sorted(json_got[k]) == sorted(v)
else:
assert json_got[k] == v
# some gymnastics to account for different sort order in different pythons
for k, v in json_got.items():
if type(v) in (list, tuple):
assert sorted(json_expected[k]) == sorted(v)
else:
assert json_expected[k] == v
for k, v in json_expected.items():
if type(v) in (list, tuple):
assert sorted(json_got[k]) == sorted(v)
else:
assert json_got[k] == v
for k, v in json_expected.items():
if type(v) in (list, tuple):
assert sorted(json_got[k]) == sorted(v)
else:
assert json_got[k] == v
def test_xmp_sidecar_keyword_template():
import osxphotos
photosdb = osxphotos.PhotosDB(dbfile=PHOTOS_DB)
photos = photosdb.photos(uuid=[UUID_DICT["xmp"]])
xmp_expected = """<!-- Created with osxphotos https://github.com/RhetTbull/osxphotos -->
<x:xmpmeta xmlns:x="adobe:ns:meta/" x:xmptk="XMP Core 5.4.0">
<!-- mirrors Photos 5 "Export IPTC as XMP" option -->
<rdf:RDF xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#">
<rdf:Description rdf:about=""
xmlns:dc="http://purl.org/dc/elements/1.1/"
xmlns:photoshop="http://ns.adobe.com/photoshop/1.0/">
<dc:description>Girls with pumpkins</dc:description>
<dc:title>Can we carry this?</dc:title>
<!-- keywords and persons listed in <dc:subject> as Photos does -->
<dc:subject>
<rdf:Seq>
<rdf:li>Kids</rdf:li>
<rdf:li>Suzy</rdf:li>
<rdf:li>Katie</rdf:li>
</rdf:Seq>
</dc:subject>
<photoshop:DateCreated>2018-09-28T15:35:49.063000-04:00</photoshop:DateCreated>
</rdf:Description>
<rdf:Description rdf:about=""
xmlns:Iptc4xmpExt='http://iptc.org/std/Iptc4xmpExt/2008-02-29/'>
<Iptc4xmpExt:PersonInImage>
<rdf:Bag>
<rdf:li>Suzy</rdf:li>
<rdf:li>Katie</rdf:li>
</rdf:Bag>
</Iptc4xmpExt:PersonInImage>
</rdf:Description>
<rdf:Description rdf:about=""
xmlns:digiKam='http://www.digikam.org/ns/1.0/'>
<digiKam:TagsList>
<rdf:Seq>
<rdf:li>Kids</rdf:li>
<rdf:li>Pumpkin Farm</rdf:li>
<rdf:li>Test Album</rdf:li>
<rdf:li>2018</rdf:li>
</rdf:Seq>
</digiKam:TagsList>
</rdf:Description>
<rdf:Description rdf:about=""
xmlns:xmp='http://ns.adobe.com/xap/1.0/'>
<xmp:CreateDate>2018-09-28T15:35:49</xmp:CreateDate>
<xmp:ModifyDate>2018-09-28T15:35:49</xmp:ModifyDate>
</rdf:Description>
<rdf:Description rdf:about=""
xmlns:exif='http://ns.adobe.com/exif/1.0/'>
</rdf:Description>
</rdf:RDF>
</x:xmpmeta>"""
xmp_expected_lines = [line.strip() for line in xmp_expected.split("\n")]
xmp_got = photos[0]._xmp_sidecar(
keyword_template=["{created.year}", "{folder_album}"]
)
xmp_got_lines = [line.strip() for line in xmp_got.split("\n")]
for line_expected, line_got in zip(
sorted(xmp_expected_lines), sorted(xmp_got_lines)
):
assert line_expected == line_got
| 1.804688 | 2 |
video_level_models.py | pomonam/youtube-8m | 43 | 17147 | <reponame>pomonam/youtube-8m
# Copyright 2018 Deep Topology All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains model definitions."""
# noinspection PyUnresolvedReferences
import pathmagic
from tensorflow import flags
import attention_modules
import tensorflow as tf
import tensorflow.contrib.slim as slim
import models
import math
FLAGS = flags.FLAGS
flags.DEFINE_integer(
"moe_num_mixtures", 2,
"The number of mixtures (excluding the dummy 'expert') used for MoeModel.")
###############################################################################
# Baseline (Benchmark) models #################################################
###############################################################################
flags.DEFINE_float(
"moe_l2", 1e-8,
"L2 penalty for MoeModel.")
flags.DEFINE_integer(
"moe_low_rank_gating", -1,
"Low rank gating for MoeModel.")
flags.DEFINE_bool(
"moe_prob_gating", False,
"Prob gating for MoeModel.")
flags.DEFINE_string(
"moe_prob_gating_input", "prob",
"input Prob gating for MoeModel.")
class MoeModel(models.BaseModel):
"""A softmax over a mixture of logistic models (with L2 regularization)."""
def create_model(self,
model_input,
vocab_size,
is_training,
num_mixtures=None,
l2_penalty=1e-8,
**unused_params):
"""Creates a Mixture of (Logistic) Experts model.
It also includes the possibility of gating the probabilities
The model consists of a per-class softmax distribution over a
configurable number of logistic classifiers. One of the classifiers in the
mixture is not trained, and always predicts 0.
Args:
model_input: 'batch_size' x 'num_features' matrix of input features.
vocab_size: The number of classes in the dataset.
is_training: Is this the training phase ?
num_mixtures: The number of mixtures (excluding a dummy 'expert' that
always predicts the non-existence of an entity).
l2_penalty: How much to penalize the squared magnitudes of parameter
values.
Returns:
A dictionary with a tensor containing the probability predictions of the
model in the 'predictions' key. The dimensions of the tensor are
batch_size x num_classes.
"""
num_mixtures = num_mixtures or FLAGS.moe_num_mixtures
low_rank_gating = FLAGS.moe_low_rank_gating
l2_penalty = FLAGS.moe_l2
gating_probabilities = FLAGS.moe_prob_gating
gating_input = FLAGS.moe_prob_gating_input
input_size = model_input.get_shape().as_list()[1]
remove_diag = FLAGS.gating_remove_diag
if low_rank_gating == -1:
gate_activations = slim.fully_connected(
model_input,
vocab_size * (num_mixtures + 1),
activation_fn=None,
biases_initializer=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="gates")
else:
gate_activations1 = slim.fully_connected(
model_input,
low_rank_gating,
activation_fn=None,
biases_initializer=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="gates1")
gate_activations = slim.fully_connected(
gate_activations1,
vocab_size * (num_mixtures + 1),
activation_fn=None,
biases_initializer=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="gates2")
expert_activations = slim.fully_connected(
model_input,
vocab_size * num_mixtures,
activation_fn=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="experts")
gating_distribution = tf.nn.softmax(tf.reshape(
gate_activations,
[-1, num_mixtures + 1])) # (Batch * #Labels) x (num_mixtures + 1)
expert_distribution = tf.nn.sigmoid(tf.reshape(
expert_activations,
[-1, num_mixtures])) # (Batch * #Labels) x num_mixtures
probabilities_by_class_and_batch = tf.reduce_sum(
gating_distribution[:, :num_mixtures] * expert_distribution, 1)
probabilities = tf.reshape(probabilities_by_class_and_batch,
[-1, vocab_size])
if gating_probabilities:
if gating_input == 'prob':
gating_weights = tf.get_variable("gating_prob_weights",
[vocab_size, vocab_size],
initializer=tf.random_normal_initializer(
stddev=1 / math.sqrt(vocab_size)))
gates = tf.matmul(probabilities, gating_weights)
else:
gating_weights = tf.get_variable("gating_prob_weights",
[input_size, vocab_size],
initializer=tf.random_normal_initializer(
stddev=1 / math.sqrt(vocab_size)))
gates = tf.matmul(model_input, gating_weights)
if remove_diag:
# removes diagonals coefficients
diagonals = tf.matrix_diag_part(gating_weights)
gates = gates - tf.multiply(diagonals, probabilities)
gates = slim.batch_norm(
gates,
center=True,
scale=True,
is_training=is_training,
scope="gating_prob_bn")
gates = tf.sigmoid(gates)
probabilities = tf.multiply(probabilities, gates)
return {"predictions": probabilities}
class FishMoeModel(models.BaseModel):
"""A softmax over a mixture of logistic models (with L2 regularization)."""
def create_model(self,
model_input,
vocab_size,
is_training,
num_mixtures=None,
l2_penalty=1e-8,
filter_size=2,
**unused_params):
"""Creates a Mixture of (Logistic) Experts model.
It also includes the possibility of gating the probabilities
The model consists of a per-class softmax distribution over a
configurable number of logistic classifiers. One of the classifiers in the
mixture is not trained, and always predicts 0.
Args:
model_input: 'batch_size' x 'num_features' matrix of input features.
vocab_size: The number of classes in the dataset.
is_training: Is this the training phase ?
num_mixtures: The number of mixtures (excluding a dummy 'expert' that
always predicts the non-existence of an entity).
l2_penalty: How much to penalize the squared magnitudes of parameter
values.
Returns:
A dictionary with a tensor containing the probability predictions of the
model in the 'predictions' key. The dimensions of the tensor are
batch_size x num_classes.
"""
num_mixtures = num_mixtures or FLAGS.moe_num_mixtures
l2_penalty = FLAGS.moe_l2
gate_activations = slim.fully_connected(
model_input,
vocab_size * (num_mixtures + 1),
activation_fn=None,
biases_initializer=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="gates")
expert_activations = slim.fully_connected(
model_input,
vocab_size * num_mixtures,
activation_fn=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="experts")
gating_distribution = tf.nn.softmax(tf.reshape(
gate_activations,
[-1, num_mixtures + 1])) # (Batch * #Labels) x (num_mixtures + 1)
expert_distribution = tf.nn.sigmoid(tf.reshape(
expert_activations,
[-1, num_mixtures])) # (Batch * #Labels) x num_mixtures
probabilities_by_class_and_batch = tf.reduce_sum(
gating_distribution[:, :num_mixtures] * expert_distribution, 1)
probabilities = tf.reshape(probabilities_by_class_and_batch,
[-1, vocab_size])
probabilities = tf.layers.batch_normalization(probabilities, training=is_training)
fish_gate = fish_modules.FishGate(hidden_size=vocab_size,
k=2,
dropout_rate=0.9,
is_training=is_training)
probabilities = fish_gate.forward(probabilities)
probabilities = tf.contrib.layers.layer_norm(probabilities)
probabilities = tf.layers.dense(probabilities, vocab_size, use_bias=True, activation=tf.nn.softmax)
return {"predictions": probabilities}
class FishMoeModel2(models.BaseModel):
"""A softmax over a mixture of logistic models (with L2 regularization)."""
def create_model(self,
model_input,
vocab_size,
is_training,
num_mixtures=None,
l2_penalty=1e-8,
filter_size=2,
**unused_params):
"""Creates a Mixture of (Logistic) Experts model.
It also includes the possibility of gating the probabilities
The model consists of a per-class softmax distribution over a
configurable number of logistic classifiers. One of the classifiers in the
mixture is not trained, and always predicts 0.
Args:
model_input: 'batch_size' x 'num_features' matrix of input features.
vocab_size: The number of classes in the dataset.
is_training: Is this the training phase ?
num_mixtures: The number of mixtures (excluding a dummy 'expert' that
always predicts the non-existence of an entity).
l2_penalty: How much to penalize the squared magnitudes of parameter
values.
Returns:
A dictionary with a tensor containing the probability predictions of the
model in the 'predictions' key. The dimensions of the tensor are
batch_size x num_classes.
"""
num_mixtures = num_mixtures or FLAGS.moe_num_mixtures
l2_penalty = FLAGS.moe_l2
gate_activations = slim.fully_connected(
model_input,
vocab_size * (num_mixtures + 1),
activation_fn=None,
biases_initializer=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="gates")
expert_activations = slim.fully_connected(
model_input,
vocab_size * num_mixtures,
activation_fn=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="experts")
gating_distribution = tf.nn.softmax(tf.reshape(
gate_activations,
[-1, num_mixtures + 1])) # (Batch * #Labels) x (num_mixtures + 1)
expert_distribution = tf.nn.sigmoid(tf.reshape(
expert_activations,
[-1, num_mixtures])) # (Batch * #Labels) x num_mixtures
probabilities_by_class_and_batch = tf.reduce_sum(
gating_distribution[:, :num_mixtures] * expert_distribution, 1)
probabilities = tf.reshape(probabilities_by_class_and_batch,
[-1, vocab_size])
fish_gate = fish_modules.FishGate(hidden_size=vocab_size,
k=filter_size,
dropout_rate=0.8,
is_training=is_training)
probabilities = fish_gate.forward(probabilities)
# probabilities = tf.layers.dense(probabilities, vocab_size, use_bias=True, activation=tf.nn.softmax)
return {"predictions": probabilities}
class FishMoeModel4(models.BaseModel):
"""A softmax over a mixture of logistic models (with L2 regularization)."""
def create_model(self,
model_input,
vocab_size,
is_training,
num_mixtures=None,
l2_penalty=1e-8,
filter_size=2,
**unused_params):
"""Creates a Mixture of (Logistic) Experts model.
It also includes the possibility of gating the probabilities
The model consists of a per-class softmax distribution over a
configurable number of logistic classifiers. One of the classifiers in the
mixture is not trained, and always predicts 0.
Args:
model_input: 'batch_size' x 'num_features' matrix of input features.
vocab_size: The number of classes in the dataset.
is_training: Is this the training phase ?
num_mixtures: The number of mixtures (excluding a dummy 'expert' that
always predicts the non-existence of an entity).
l2_penalty: How much to penalize the squared magnitudes of parameter
values.
Returns:
A dictionary with a tensor containing the probability predictions of the
model in the 'predictions' key. The dimensions of the tensor are
batch_size x num_classes.
"""
num_mixtures = num_mixtures or FLAGS.moe_num_mixtures
l2_penalty = FLAGS.moe_l2
fc1 = tf.layers.dense(model_input, vocab_size, activation=tf.nn.relu,
kernel_regularizer=slim.l2_regularizer(l2_penalty))
fc1 = tf.layers.batch_normalization(fc1, training=is_training)
if is_training:
fc1 = tf.nn.dropout(fc1, keep_prob=0.9)
fc2 = tf.layers.dense(fc1, vocab_size, activation=tf.nn.relu,
kernel_regularizer=slim.l2_regularizer(l2_penalty))
fc2 = tf.layers.batch_normalization(fc2, training=is_training)
if is_training:
fc2 = tf.nn.dropout(fc2, keep_prob=0.9)
fc3 = tf.layers.dense(fc2, vocab_size, activation=tf.nn.sigmoid,
kernel_regularizer=slim.l2_regularizer(l2_penalty))
fc3 = tf.layers.batch_normalization(fc3, training=is_training)
if is_training:
fc3 = tf.nn.dropout(fc3, keep_prob=0.9)
fish_gate = fish_modules.FishGate(hidden_size=vocab_size,
k=filter_size,
dropout_rate=0.9,
is_training=is_training)
probabilities = fish_gate.forward(fc3)
# probabilities = tf.layers.dense(probabilities, vocab_size, use_bias=True, activation=tf.nn.softmax)
return {"predictions": probabilities}
class FishMoeModel3(models.BaseModel):
"""A softmax over a mixture of logistic models (with L2 regularization)."""
def create_model(self,
model_input,
vocab_size,
is_training,
num_mixtures=None,
l2_penalty=1e-6,
filter_size=2,
**unused_params):
"""Creates a Mixture of (Logistic) Experts model.
It also includes the possibility of gating the probabilities
The model consists of a per-class softmax distribution over a
configurable number of logistic classifiers. One of the classifiers in the
mixture is not trained, and always predicts 0.
Args:
model_input: 'batch_size' x 'num_features' matrix of input features.
vocab_size: The number of classes in the dataset.
is_training: Is this the training phase ?
num_mixtures: The number of mixtures (excluding a dummy 'expert' that
always predicts the non-existence of an entity).
l2_penalty: How much to penalize the squared magnitudes of parameter
values.
Returns:
A dictionary with a tensor containing the probability predictions of the
model in the 'predictions' key. The dimensions of the tensor are
batch_size x num_classes.
"""
num_mixtures = num_mixtures or FLAGS.moe_num_mixtures
l2_penalty = FLAGS.moe_l2
gate_activations = slim.fully_connected(
model_input,
vocab_size * (num_mixtures + 1),
activation_fn=None,
biases_initializer=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="gates")
expert_activations = slim.fully_connected(
model_input,
vocab_size * num_mixtures,
activation_fn=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="experts")
gating_distribution = tf.nn.softmax(tf.reshape(
gate_activations,
[-1, num_mixtures + 1])) # (Batch * #Labels) x (num_mixtures + 1)
expert_distribution = tf.nn.sigmoid(tf.reshape(
expert_activations,
[-1, num_mixtures])) # (Batch * #Labels) x num_mixtures
probabilities_by_class_and_batch = tf.reduce_sum(
gating_distribution[:, :num_mixtures] * expert_distribution, 1)
probabilities0 = tf.reshape(probabilities_by_class_and_batch,
[-1, vocab_size])
probabilities0 = tf.layers.batch_normalization(probabilities0, training=is_training)
r_activation0 = tf.layers.dense(probabilities0, vocab_size * filter_size, use_bias=True, activation=tf.nn.relu)
r_activation0 = tf.layers.batch_normalization(r_activation0, training=is_training)
if is_training:
r_activation0 = tf.layers.dropout(r_activation0, 0.9)
r_activation1 = tf.layers.dense(r_activation0, vocab_size, use_bias=True, activation=None)
probabilities1 = probabilities0 + r_activation1
probabilities1 = tf.contrib.layers.layer_norm(probabilities1)
probabilities1 = tf.layers.batch_normalization(probabilities1, training=is_training)
probabilities2 = tf.layers.dense(probabilities1, vocab_size, use_bias=True, activation=tf.nn.softmax)
return {"predictions": probabilities2}
class MoeModel2(models.BaseModel):
"""A softmax over a mixture of logistic models (with L2 regularization)."""
def create_model(self,
model_input,
vocab_size,
is_training,
num_mixtures=None,
l2_penalty=1e-8,
**unused_params):
"""Creates a Mixture of (Logistic) Experts model.
It also includes the possibility of gating the probabilities
The model consists of a per-class softmax distribution over a
configurable number of logistic classifiers. One of the classifiers in the
mixture is not trained, and always predicts 0.
Args:
model_input: 'batch_size' x 'num_features' matrix of input features.
vocab_size: The number of classes in the dataset.
is_training: Is this the training phase ?
num_mixtures: The number of mixtures (excluding a dummy 'expert' that
always predicts the non-existence of an entity).
l2_penalty: How much to penalize the squared magnitudes of parameter
values.
Returns:
A dictionary with a tensor containing the probability predictions of the
model in the 'predictions' key. The dimensions of the tensor are
batch_size x num_classes.
"""
num_mixtures = 3
low_rank_gating = FLAGS.moe_low_rank_gating
l2_penalty = FLAGS.moe_l2
gating_probabilities = FLAGS.moe_prob_gating
gating_input = FLAGS.moe_prob_gating_input
if low_rank_gating == -1:
gate_activations = slim.fully_connected(
model_input,
vocab_size * (num_mixtures + 1),
activation_fn=None,
biases_initializer=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="gates")
else:
gate_activations1 = slim.fully_connected(
model_input,
low_rank_gating,
activation_fn=None,
biases_initializer=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="gates1")
gate_activations = slim.fully_connected(
gate_activations1,
vocab_size * (num_mixtures + 1),
activation_fn=None,
biases_initializer=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="gates2")
expert_activations = slim.fully_connected(
model_input,
vocab_size * num_mixtures,
activation_fn=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="experts")
gating_distribution = tf.nn.softmax(tf.reshape(
gate_activations,
[-1, num_mixtures + 1])) # (Batch * #Labels) x (num_mixtures + 1)
expert_distribution = tf.nn.sigmoid(tf.reshape(
expert_activations,
[-1, num_mixtures])) # (Batch * #Labels) x num_mixtures
probabilities_by_class_and_batch = tf.reduce_sum(
gating_distribution[:, :num_mixtures] * expert_distribution, 1)
probabilities = tf.reshape(probabilities_by_class_and_batch,
[-1, vocab_size])
filter1 = tf.layers.dense(probabilities,
vocab_size * 2,
use_bias=True,
activation=tf.nn.relu,
name="v-filter1")
filter1 = tf.layers.batch_normalization(filter1, training=is_training)
if is_training:
filter1 = tf.nn.dropout(filter1, 0.8)
filter2 = tf.layers.dense(filter1,
vocab_size,
use_bias=False,
activation=None,
name="v-filter2")
probabilities = probabilities + filter2
probabilities = tf.nn.relu(probabilities)
probabilities = tf.layers.batch_normalization(probabilities, training=is_training)
probabilities = tf.layers.dense(probabilities, vocab_size, use_bias=True,
activation=tf.nn.sigmoid, name="v-final_output")
return {"predictions": probabilities}
class JuhanMoeModel(models.BaseModel):
"""A softmax over a mixture of logistic models (with L2 regularization)."""
def create_model(self,
model_input,
vocab_size,
is_training,
num_mixtures=None,
l2_penalty=1e-8,
**unused_params):
"""Creates a Mixture of (Logistic) Experts model.
The model consists of a per-class softmax distribution over a
configurable number of logistic classifiers. One of the classifiers in the
mixture is not trained, and always predicts 0.
Args:
model_input: 'batch_size' x 'num_features' matrix of input features.
vocab_size: The number of classes in the dataset.
num_mixtures: The number of mixtures (excluding a dummy 'expert' that
always predicts the non-existence of an entity).
l2_penalty: How much to penalize the squared magnitudes of parameter
values.
Returns:
A dictionary with a tensor containing the probability predictions of the
model in the 'predictions' key. The dimensions of the tensor are
batch_size x num_classes.
"""
num_mixtures = 3
gate_activations = slim.fully_connected(
model_input,
vocab_size * (num_mixtures + 1),
activation_fn=None,
biases_initializer=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="gates")
expert_activations = slim.fully_connected(
model_input,
vocab_size * num_mixtures,
activation_fn=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="experts")
gating_distribution = tf.nn.softmax(tf.reshape(
gate_activations,
[-1, num_mixtures + 1])) # (Batch * #Labels) x (num_mixtures + 1)
expert_distribution = tf.nn.sigmoid(tf.reshape(
expert_activations,
[-1, num_mixtures])) # (Batch * #Labels) x num_mixtures
final_probabilities_by_class_and_batch = tf.reduce_sum(
gating_distribution[:, :num_mixtures] * expert_distribution, 1)
probabilities = tf.reshape(final_probabilities_by_class_and_batch,
[-1, vocab_size])
if is_training:
probabilities = tf.nn.dropout(probabilities, 0.8)
filter1 = tf.layers.dense(probabilities,
vocab_size * 2,
use_bias=True,
activation=tf.nn.leaky_relu,
name="v-filter1")
filter1 = tf.layers.batch_normalization(filter1, training=is_training)
if is_training:
filter1 = tf.nn.dropout(filter1, 0.8)
filter2 = tf.layers.dense(filter1,
vocab_size,
use_bias=False,
activation=None,
name="v-filter2")
probabilities = probabilities + filter2
probabilities = tf.nn.leaky_relu(probabilities)
probabilities = tf.layers.batch_normalization(probabilities, training=is_training)
probabilities = tf.layers.dense(probabilities, vocab_size, use_bias=True,
activation=tf.nn.sigmoid, name="v-final_output")
return {"predictions": probabilities}
class FourLayerBatchNeuralModel(models.BaseModel):
def create_model(self,
model_input,
vocab_size,
is_training,
l2_penalty=1e-7,
**unused_params):
model_input_dim = model_input.get_shape().as_list()[1]
fc1_weights = tf.get_variable("fc1_weights",
[model_input_dim, vocab_size],
initializer=tf.contrib.layers.xavier_initializer())
tf.summary.histogram("fc1_weights", fc1_weights)
fc1_activation = tf.matmul(model_input, fc1_weights)
fc1_activation = tf.nn.relu(fc1_activation)
fc1_activation = slim.batch_norm(
fc1_activation,
center=True,
scale=True,
is_training=is_training,
scope="fc1_activation_bn")
fc2_weights = tf.get_variable("fc2_weights",
[vocab_size, vocab_size],
initializer=tf.contrib.layers.xavier_initializer())
tf.summary.histogram("fc2_weights", fc2_weights)
fc2_activation = tf.matmul(fc1_activation, fc2_weights)
fc2_activation = tf.nn.relu(fc2_activation)
fc2_activation = slim.batch_norm(
fc2_activation,
center=True,
scale=True,
is_training=is_training,
scope="fc2_activation_bn")
fc3_weights = tf.get_variable("fc3_weights",
[vocab_size, vocab_size],
initializer=tf.contrib.layers.xavier_initializer())
tf.summary.histogram("fc3_weights", fc3_weights)
fc3_activation = tf.matmul(fc2_activation, fc3_weights)
fc3_activation = tf.nn.relu(fc3_activation)
fc3_activation = slim.batch_norm(
fc3_activation,
center=True,
scale=True,
is_training=is_training,
scope="fc3_activation_bn")
fc4_weights = tf.get_variable("fc4_weights",
[vocab_size, vocab_size],
initializer=tf.contrib.layers.xavier_initializer())
fc4_activation = tf.matmul(fc3_activation, fc4_weights)
cluster_biases = tf.get_variable("fc4_bias",
[vocab_size],
initializer=tf.constant_initializer(0.01))
tf.summary.histogram("fc4_bias", cluster_biases)
fc4_activation += cluster_biases
fc4_activation = tf.sigmoid(fc4_activation)
return {"predictions": fc4_activation}
class ClassLearningThreeNnModel(models.BaseModel):
def create_model(self,
model_input,
vocab_size,
is_training,
l2_penalty=1e-8,
ortho_reg=0,
**unused_params):
fc1 = slim.fully_connected(
model_input, vocab_size, activation_fn=None, biases_initializer=None,
weights_regularizer=slim.l2_regularizer(l2_penalty))
fc1 = tf.contrib.layers.layer_norm(inputs=fc1, center=True, scale=True, activation_fn=tf.nn.leaky_relu)
if is_training:
fc1 = tf.nn.dropout(fc1, keep_prob=0.5)
fc2 = slim.fully_connected(
fc1, vocab_size, activation_fn=None, biases_initializer=None,
weights_regularizer=slim.l2_regularizer(l2_penalty))
fc2 = tf.contrib.layers.layer_norm(inputs=fc2, center=True, scale=True, activation_fn=tf.nn.leaky_relu)
if is_training:
fc2 = tf.nn.dropout(fc2, keep_prob=0.5)
fc3 = slim.fully_connected(
fc2, vocab_size, activation_fn=tf.nn.sigmoid, biases_initializer=tf.constant_initializer(0.1),
weights_regularizer=slim.l2_regularizer(l2_penalty))
return {"predictions": fc3,
"regularization_loss": ortho_reg}
class ClassLearningFourNnModel(models.BaseModel):
def create_model(self,
model_input,
vocab_size,
is_training,
l2_penalty=1e-8,
ortho_reg=0,
**unused_params):
fc1 = slim.fully_connected(
model_input, vocab_size, activation_fn=None, biases_initializer=None,
weights_regularizer=slim.l2_regularizer(l2_penalty))
fc1 = tf.contrib.layers.layer_norm(inputs=fc1, center=True, scale=True, activation_fn=tf.nn.leaky_relu)
# if is_training:
# fc1 = tf.nn.dropout(fc1, keep_prob=0.5)
fc2 = slim.fully_connected(
fc1, vocab_size, activation_fn=None, biases_initializer=None,
weights_regularizer=slim.l2_regularizer(l2_penalty))
fc2 = tf.contrib.layers.layer_norm(inputs=fc2, center=True, scale=True, activation_fn=tf.nn.leaky_relu)
# if is_training:
# fc2 = tf.nn.dropout(fc2, keep_prob=0.5)
fc3 = slim.fully_connected(
fc2, vocab_size, activation_fn=None, biases_initializer=None,
weights_regularizer=slim.l2_regularizer(l2_penalty))
fc3 = tf.contrib.layers.layer_norm(inputs=fc3, center=True, scale=True, activation_fn=tf.nn.leaky_relu)
fc4 = slim.fully_connected(
fc3, vocab_size, activation_fn=tf.nn.sigmoid, biases_initializer=tf.constant_initializer(0.1),
weights_regularizer=slim.l2_regularizer(l2_penalty))
return {"predictions": fc4,
"regularization_loss": ortho_reg} | 1.726563 | 2 |
tests/test_refinement.py | qfardet/Pandora2D | 4 | 17148 | <reponame>qfardet/Pandora2D
#!/usr/bin/env python
# coding: utf8
#
# Copyright (c) 2021 Centre National d'Etudes Spatiales (CNES).
#
# This file is part of PANDORA2D
#
# https://github.com/CNES/Pandora2D
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Test refinement step
"""
import unittest
import numpy as np
import xarray as xr
import pytest
from pandora2d import refinement, common
class TestRefinement(unittest.TestCase):
"""
TestRefinement class allows to test the refinement module
"""
@staticmethod
def test_check_conf():
"""
Test the interpolation method
"""
refinement.AbstractRefinement(**{"refinement_method": "interpolation"}) # type: ignore
with pytest.raises(KeyError):
refinement.AbstractRefinement(**{"refinement_method": "wta"}) # type: ignore
@staticmethod
def test_refinement_method_subpixel():
"""
test refinement
"""
cv = np.zeros((3, 3, 5, 5))
cv[:, :, 2, 2] = np.ones([3, 3])
cv[:, :, 2, 3] = np.ones([3, 3])
cv[:, :, 3, 2] = np.ones([3, 3])
cv[:, :, 3, 3] = np.ones([3, 3])
c_row = [0, 1, 2]
c_col = [0, 1, 2]
# First pixel in the image that is fully computable (aggregation windows are complete)
row = np.arange(c_row[0], c_row[-1] + 1)
col = np.arange(c_col[0], c_col[-1] + 1)
disparity_range_col = np.arange(-2, 2 + 1)
disparity_range_row = np.arange(-2, 2 + 1)
cost_volumes_test = xr.Dataset(
{"cost_volumes": (["row", "col", "disp_col", "disp_row"], cv)},
coords={"row": row, "col": col, "disp_col": disparity_range_col, "disp_row": disparity_range_row},
)
cost_volumes_test.attrs["measure"] = "zncc"
cost_volumes_test.attrs["window_size"] = 1
cost_volumes_test.attrs["type_measure"] = "max"
data = np.array(
([[0.4833878, 0.4833878, 0.4833878], [0.4833878, 0.4833878, 0.4833878], [0.4833878, 0.4833878, 0.4833878]]),
dtype=np.float64,
)
dataset_disp_map = common.dataset_disp_maps(data, data)
test = refinement.AbstractRefinement(**{"refinement_method": "interpolation"}) # type: ignore
delta_x, delta_y = test.refinement_method(cost_volumes_test, dataset_disp_map)
np.testing.assert_allclose(data, delta_y, rtol=1e-06)
np.testing.assert_allclose(data, delta_x, rtol=1e-06)
@staticmethod
def test_refinement_method_pixel():
"""
test refinement
"""
cv = np.zeros((3, 3, 5, 5))
cv[:, :, 1, 3] = np.ones([3, 3])
c_row = [0, 1, 2]
c_col = [0, 1, 2]
# First pixel in the image that is fully computable (aggregation windows are complete)
row = np.arange(c_row[0], c_row[-1] + 1)
col = np.arange(c_col[0], c_col[-1] + 1)
disparity_range_col = np.arange(-2, 2 + 1)
disparity_range_row = np.arange(-2, 2 + 1)
cost_volumes_test = xr.Dataset(
{"cost_volumes": (["row", "col", "disp_col", "disp_row"], cv)},
coords={"row": row, "col": col, "disp_col": disparity_range_col, "disp_row": disparity_range_row},
)
cost_volumes_test.attrs["measure"] = "zncc"
cost_volumes_test.attrs["window_size"] = 1
cost_volumes_test.attrs["type_measure"] = "max"
gt_delta_y = np.array(
([[-1, -1, -1], [-1, -1, -1], [-1, -1, -1]]),
dtype=np.float64,
)
gt_delta_x = np.array(
([[1, 1, 1], [1, 1, 1], [1, 1, 1]]),
dtype=np.float64,
)
dataset_disp_map = common.dataset_disp_maps(gt_delta_y, gt_delta_x)
test = refinement.AbstractRefinement(**{"refinement_method": "interpolation"}) # type: ignore
delta_x, delta_y = test.refinement_method(cost_volumes_test, dataset_disp_map)
np.testing.assert_allclose(gt_delta_y, delta_y, rtol=1e-06)
np.testing.assert_allclose(gt_delta_x, delta_x, rtol=1e-06)
| 2.390625 | 2 |
goblet/tests/test_scheduler.py | Aaron-Gill/goblet | 0 | 17149 | <filename>goblet/tests/test_scheduler.py
from unittest.mock import Mock
from goblet import Goblet
from goblet.resources.scheduler import Scheduler
from goblet.test_utils import (
get_responses,
get_response,
mock_dummy_function,
dummy_function,
)
class TestScheduler:
def test_add_schedule(self, monkeypatch):
app = Goblet(function_name="goblet_example")
monkeypatch.setenv("GOOGLE_PROJECT", "TEST_PROJECT")
monkeypatch.setenv("GOOGLE_LOCATION", "us-central1")
app.schedule("* * * * *", description="test")(dummy_function)
scheduler = app.handlers["schedule"]
assert len(scheduler.resources) == 1
scheule_json = {
"name": "projects/TEST_PROJECT/locations/us-central1/jobs/goblet_example-dummy_function",
"schedule": "* * * * *",
"timeZone": "UTC",
"description": "test",
"attemptDeadline": None,
"retry_config": None,
"httpTarget": {
"body": None,
"headers": {
"X-Goblet-Type": "schedule",
"X-Goblet-Name": "dummy_function",
},
"httpMethod": "GET",
"oidcToken": {},
},
}
assert scheduler.resources["dummy_function"]["job_json"] == scheule_json
assert scheduler.resources["dummy_function"]["func"] == dummy_function
def test_multiple_schedules(self, monkeypatch):
app = Goblet(function_name="goblet_example")
monkeypatch.setenv("GOOGLE_PROJECT", "TEST_PROJECT")
monkeypatch.setenv("GOOGLE_LOCATION", "us-central1")
app.schedule("1 * * * *", description="test")(dummy_function)
app.schedule("2 * * * *", headers={"test": "header"})(dummy_function)
app.schedule("3 * * * *", httpMethod="POST")(dummy_function)
scheduler = app.handlers["schedule"]
assert len(scheduler.resources) == 3
scheule_json = {
"name": "projects/TEST_PROJECT/locations/us-central1/jobs/goblet_example-dummy_function",
"schedule": "1 * * * *",
"timeZone": "UTC",
"description": "test",
"attemptDeadline": None,
"retry_config": None,
"httpTarget": {
"body": None,
"headers": {
"X-Goblet-Type": "schedule",
"X-Goblet-Name": "dummy_function",
},
"httpMethod": "GET",
"oidcToken": {},
},
}
assert scheduler.resources["dummy_function"]["job_json"] == scheule_json
assert (
scheduler.resources["dummy_function-2"]["job_json"]["httpTarget"][
"headers"
]["test"]
== "header"
)
assert (
scheduler.resources["dummy_function-2"]["job_json"]["httpTarget"][
"headers"
]["X-Goblet-Name"]
== "dummy_function-2"
)
assert (
scheduler.resources["dummy_function-3"]["job_json"]["httpTarget"][
"headers"
]["X-Goblet-Name"]
== "dummy_function-3"
)
assert (
scheduler.resources["dummy_function-3"]["job_json"]["httpTarget"][
"httpMethod"
]
== "POST"
)
def test_call_scheduler(self, monkeypatch):
app = Goblet(function_name="goblet_example")
monkeypatch.setenv("GOOGLE_PROJECT", "TEST_PROJECT")
monkeypatch.setenv("GOOGLE_LOCATION", "us-central1")
mock = Mock()
app.schedule("* * * * *", description="test")(mock_dummy_function(mock))
headers = {
"X-Goblet-Name": "dummy_function",
"X-Goblet-Type": "schedule",
"X-Cloudscheduler": True,
}
mock_event = Mock()
mock_event.headers = headers
app(mock_event, None)
assert mock.call_count == 1
def test_deploy_schedule(self, monkeypatch):
monkeypatch.setenv("GOOGLE_PROJECT", "goblet")
monkeypatch.setenv("GOOGLE_LOCATION", "us-central1")
monkeypatch.setenv("GOBLET_TEST_NAME", "schedule-deploy")
monkeypatch.setenv("GOBLET_HTTP_TEST", "REPLAY")
goblet_name = "goblet_example"
scheduler = Scheduler(goblet_name)
scheduler.register_job(
"test-job", None, kwargs={"schedule": "* * * * *", "kwargs": {}}
)
scheduler.deploy()
responses = get_responses("schedule-deploy")
assert goblet_name in responses[0]["body"]["name"]
assert (
responses[1]["body"]["httpTarget"]["headers"]["X-Goblet-Name"] == "test-job"
)
assert (
responses[1]["body"]["httpTarget"]["headers"]["X-Goblet-Type"] == "schedule"
)
assert responses[1]["body"]["schedule"] == "* * * * *"
def test_deploy_schedule_cloudrun(self, monkeypatch):
monkeypatch.setenv("GOOGLE_PROJECT", "goblet")
monkeypatch.setenv("GOOGLE_LOCATION", "us-central1")
monkeypatch.setenv("GOBLET_TEST_NAME", "schedule-deploy-cloudrun")
monkeypatch.setenv("GOBLET_HTTP_TEST", "REPLAY")
scheduler = Scheduler("goblet", backend="cloudrun")
cloudrun_url = "https://goblet-12345.a.run.app"
service_account = "<EMAIL>"
scheduler.register_job(
"test-job", None, kwargs={"schedule": "* * * * *", "kwargs": {}}
)
scheduler._deploy(config={"scheduler": {"serviceAccount": service_account}})
responses = get_responses("schedule-deploy-cloudrun")
assert responses[0]["body"]["status"]["url"] == cloudrun_url
assert (
responses[1]["body"]["httpTarget"]["oidcToken"]["serviceAccountEmail"]
== service_account
)
assert (
responses[1]["body"]["httpTarget"]["oidcToken"]["audience"] == cloudrun_url
)
assert responses[1]["body"]["schedule"] == "* * * * *"
def test_deploy_multiple_schedule(self, monkeypatch):
monkeypatch.setenv("GOOGLE_PROJECT", "goblet")
monkeypatch.setenv("GOOGLE_LOCATION", "us-central1")
monkeypatch.setenv("GOBLET_TEST_NAME", "schedule-deploy-multiple")
monkeypatch.setenv("GOBLET_HTTP_TEST", "REPLAY")
goblet_name = "goblet-test-schedule"
scheduler = Scheduler(goblet_name)
scheduler.register_job(
"test-job", None, kwargs={"schedule": "* * 1 * *", "kwargs": {}}
)
scheduler.register_job(
"test-job",
None,
kwargs={"schedule": "* * 2 * *", "kwargs": {"httpMethod": "POST"}},
)
scheduler.register_job(
"test-job",
None,
kwargs={
"schedule": "* * 3 * *",
"kwargs": {"headers": {"X-HEADER": "header"}},
},
)
scheduler.deploy()
post_job_1 = get_response(
"schedule-deploy-multiple",
"post-v1-projects-goblet-locations-us-central1-jobs_1.json",
)
post_job_2 = get_response(
"schedule-deploy-multiple",
"post-v1-projects-goblet-locations-us-central1-jobs_2.json",
)
post_job_3 = get_response(
"schedule-deploy-multiple",
"post-v1-projects-goblet-locations-us-central1-jobs_3.json",
)
assert (
post_job_1["body"]["httpTarget"]["headers"]["X-Goblet-Name"] == "test-job"
)
assert (
post_job_2["body"]["httpTarget"]["headers"]["X-Goblet-Name"] == "test-job-2"
)
assert post_job_2["body"]["httpTarget"]["httpMethod"] == "POST"
assert (
post_job_3["body"]["httpTarget"]["headers"]["X-Goblet-Name"] == "test-job-3"
)
assert post_job_3["body"]["httpTarget"]["headers"]["X-HEADER"] == "header"
def test_destroy_schedule(self, monkeypatch):
monkeypatch.setenv("GOOGLE_PROJECT", "goblet")
monkeypatch.setenv("GOOGLE_LOCATION", "us-central1")
monkeypatch.setenv("GOBLET_TEST_NAME", "schedule-destroy")
monkeypatch.setenv("GOBLET_HTTP_TEST", "REPLAY")
goblet_name = "goblet_example"
scheduler = Scheduler(goblet_name)
scheduler.register_job(
"test-job", None, kwargs={"schedule": "* * * * *", "kwargs": {}}
)
scheduler.destroy()
responses = get_responses("schedule-destroy")
assert len(responses) == 1
assert responses[0]["body"] == {}
def test_sync_schedule(self, monkeypatch):
monkeypatch.setenv("GOOGLE_PROJECT", "goblet")
monkeypatch.setenv("GOOGLE_LOCATION", "us-central1")
monkeypatch.setenv("GOBLET_TEST_NAME", "schedule-sync")
monkeypatch.setenv("GOBLET_HTTP_TEST", "REPLAY")
goblet_name = "goblet"
scheduler = Scheduler(goblet_name)
scheduler.register_job(
"scheduled_job", None, kwargs={"schedule": "* * * * *", "kwargs": {}}
)
scheduler.sync(dryrun=True)
scheduler.sync(dryrun=False)
responses = get_responses("schedule-sync")
assert len(responses) == 3
assert responses[1] == responses[2]
assert responses[0]["body"] == {}
| 2.421875 | 2 |
arachnado/rpc/sites.py | wigginzz/arachnado | 2 | 17150 | <gh_stars>1-10
import logging
from functools import partial
from arachnado.storages.mongotail import MongoTailStorage
class Sites(object):
""" 'Known sites' object exposed via JSON-RPC """
logger = logging.getLogger(__name__)
def __init__(self, handler, site_storage, **kwargs):
self.handler = handler
self.storage = site_storage # type: MongoTailStorage
def list(self):
return self.storage.fetch()
def post(self, site):
self.storage.create(site)
def patch(self, site):
self.storage.update(site)
def delete(self, site):
self.storage.delete(site)
def subscribe(self):
for event_name in self.storage.available_events:
self.storage.subscribe(
event_name,
partial(self._publish, event=event_name)
)
def _on_close(self):
self.storage.unsubscribe(self.storage.available_events)
def _publish(self, event, data):
self.handler.write_event('sites.{}'.format(event), data)
| 2.453125 | 2 |
src/players.py | deacona/the-ball-is-round | 0 | 17151 | <filename>src/players.py
"""players module.
Used for players data processes
"""
import numpy as np
import pandas as pd
import src.config as config
import src.utilities as utilities
from src.utilities import logging
pd.set_option("display.max_columns", 500)
pd.set_option("display.expand_frame_repr", False)
# master_file = config.MASTER_FILES["ftb_players"]
# distance_columns = ["Age", "ChancesInvolved", "DefensiveActions", "FoulsCommited", "FoulsSuffered", "Height", "Minutes", "NPG+A", "Points", "Weight", "SuccessfulPasses"]
def get_outfile(source_name):
"""Return outfile stub for given source.
INPUT:
source_name: String containing name of the data source
OUTPUT:
outfile_stub: Stub to use when saving output
"""
logging.info("Mapping {0} to outfile".format(source_name))
if source_name == "tmk_cnt":
outfile_stub = "players_contract"
elif source_name == "tmk_psm":
outfile_stub = "players_performance"
logging.debug(outfile_stub)
return outfile_stub
def clean_data(source_name, directory=config.MASTER_DIR):
"""Clean raw player data and save processed version.
INPUT:
source_name: String containing name of the data source
directory: Directory to save output to
OUTPUT:
df: Dataframe containing the cleaned data
"""
logging.info("Loading {0} data".format(source_name))
if source_name == "tmk_cnt":
source_header = [
"Shirt number",
"Position",
"Name",
"Date of birth",
"Nationality",
"Height",
"Foot",
"Joined",
"Signed from",
"Contract expires",
"Market value",
]
drop_cols = ["Nationality", "Signed from", "Competition"]
notna_cols = ["Market value"]
elif source_name == "tmk_psm":
source_header = [
"Shirt number",
"Position",
"Name",
"Age",
"Nationality",
"In squad",
"Games started",
"Goals",
"Assists",
"Yellow cards",
"Second yellow cards",
"Red cards",
"Substitutions on",
"Substitutions off",
"PPG",
"Minutes played",
]
drop_cols = ["Nationality"]
notna_cols = ["In squad"]
df = utilities.folder_loader(
source_name[:3], source_name, "comp_season", source_header=source_header
)
## Name and Position are mis-aligned in the source files
df["Name"].fillna(method="bfill", inplace=True)
df["Position"] = df.Name.shift(-1)
df.loc[df.Position == df.Name, "Position"] = df.Name.shift(-2)
df.drop(axis=1, columns=drop_cols, inplace=True)
df.dropna(subset=notna_cols, inplace=True)
df = df.apply(lambda x: x.str.strip() if x.dtype == "object" else x)
df = df.replace("-", np.nan)
df = df.replace("Was not used during this season", np.nan)
df = df.replace("Not in squad during this season", np.nan)
df = df.replace("Not used during this season", np.nan)
df["Shirt number"] = pd.to_numeric(df["Shirt number"], downcast="integer")
df["Position group"] = None
df.loc[
(df.Position.str.upper().str.contains("KEEPER"))
| (df.Position.str.upper().str.contains("GOAL")),
"Position group",
] = "G"
df.loc[
(df.Position.str.upper().str.contains("BACK"))
| (df.Position.str.upper().str.contains("DEF")),
"Position group",
] = "D"
df.loc[
(df.Position.str.upper().str.contains("MID"))
| (df.Position.str.upper().str.contains("MIT"))
| (df.Position.str.upper().str.contains("WING")),
"Position group",
] = "M"
df.loc[
(df.Position.str.upper().str.contains("STRIKER"))
| (df.Position.str.upper().str.contains("FORW")),
"Position group",
] = "F"
if source_name == "tmk_cnt":
df["Age"] = (
df["Date of birth"].str.extract(r".*([0-9]{2})", expand=False).astype("int")
)
df["Date of birth"] = pd.to_datetime(
df["Date of birth"].str.extract(r"(.*) \([0-9]{2}\)", expand=False),
format="%b %d, %Y",
)
df["Joined"] = pd.to_datetime(df.Joined, format="%b %d, %Y")
df["Contract expires"] = pd.to_datetime(
df["Contract expires"], format="%d.%m.%Y"
)
df["Height"] = (
df["Height"]
.str.strip()
.str.replace(" ", "")
.str.replace(",", "")
.str.replace("m", "")
.replace({"-": np.nan, "": np.nan})
.astype(float)
)
df.loc[
df.Name.isin(df[df.Height.notna()].Name.values)
& df.Name.isin(df[df.Height.isna()].Name.values),
"Height",
] = (
df.loc[
df.Name.isin(df[df.Height.notna()].Name.values)
& df.Name.isin(df[df.Height.isna()].Name.values)
]
.sort_values(by=["Name", "Season"])
.Height.fillna(method="bfill")
)
df.loc[
df.Name.isin(df[df.Foot.notna()].Name.values)
& df.Name.isin(df[df.Foot.isna()].Name.values),
"Foot",
] = (
df.loc[
df.Name.isin(df[df.Foot.notna()].Name.values)
& df.Name.isin(df[df.Foot.isna()].Name.values)
]
.sort_values(by=["Name", "Season"])
.Foot.fillna(method="bfill")
)
df["Market value"] = (
df["Market value"]
.str.strip()
.replace({"-": np.nan})
.replace(r"[£kmTh\.]", "", regex=True)
.astype(float)
* df["Market value"]
.str.extract(r"[\d\.]+([kmTh\.]+)", expand=False)
.fillna(1)
.replace(["k", "Th.", "m"], [10 ** 3, 10 ** 3, 10 ** 6])
.astype(int)
/ 10 ** 6
)
elif source_name == "tmk_psm":
df["PPG"] = df["PPG"].str.strip().replace(r"[,]", ".", regex=True).astype(float)
df["Minutes played"] = (
df["Minutes played"]
.str.strip()
.replace(r"[.\']", "", regex=True)
.astype(float)
)
df[
[
"In squad",
"Games started",
"Goals",
"Assists",
"Yellow cards",
"Second yellow cards",
"Red cards",
"Substitutions on",
"Substitutions off",
"PPG",
"Minutes played",
]
] = df[
[
"In squad",
"Games started",
"Goals",
"Assists",
"Yellow cards",
"Second yellow cards",
"Red cards",
"Substitutions on",
"Substitutions off",
"PPG",
"Minutes played",
]
].fillna(
0
)
df[
[
"In squad",
"Games started",
"Goals",
"Assists",
"Yellow cards",
"Second yellow cards",
"Red cards",
"Substitutions on",
"Substitutions off",
"PPG",
"Minutes played",
]
] = df[
[
"In squad",
"Games started",
"Goals",
"Assists",
"Yellow cards",
"Second yellow cards",
"Red cards",
"Substitutions on",
"Substitutions off",
"PPG",
"Minutes played",
]
].astype(
float
)
logging.debug(df.describe(include="all"))
logging.info("Saving processed data to ")
utilities.save_master(df, get_outfile(source_name), directory=directory)
return df
# def get_players():
# """
# INPUT:
# None
# OUTPUT:
# df - Dataframe of aggregated player data
# """
# logging.info("Fetching aggregated player data")
# # fetch from master csv
# # df = pd.read_csv(master_file, sep='|', encoding="ISO-8859-1")
# df = utilities.get_master("players")
# # filter unwanted records
# df = df[(df["Season"] >= "s1314") & (df["Competition"].isin(["chm", "cpo", "prm"]))]
# df.dropna(subset=["Name"], inplace=True)
# # select columns
# group_key = "Name"
# max_cols = ["Age", "Height", "Weight"]
# # p90_cols = ["AerialsWon", "ChancesInvolved", "DefensiveActions", "Dispossesed", "Dribbles", "FoulsCommited", "FoulsSuffered", "NPG+A", "SuccessfulPasses"]
# p90_cols = [
# "AerialsWon",
# "Assists",
# "BadControl",
# "Blocks",
# "CalledOffside",
# "Clearances",
# "Crosses",
# "Dispossesed",
# "Dribbles",
# "DribblesAgainst",
# "FirstYellowCards",
# "FoulsCommited",
# "FoulsSuffered",
# "GoalsConceded",
# "Interceptions",
# "KeyPasses",
# "LongBalls",
# "NonPenaltyGoals",
# "OffsidesWon",
# "OwnGoals",
# "Passes",
# "PenaltyGoals",
# "RedCards",
# "Saves",
# "Shots",
# "ShotsFaced",
# "ShotsOnTarget",
# "Tackles",
# "ThroughBalls",
# "YellowCards",
# ]
# pGm_cols = ["Appearances", "Minutes", "Points"]
# sum_cols = p90_cols + pGm_cols
# selected_columns = [group_key] + max_cols + sum_cols
# df = df[selected_columns]
# # aggregate to player level
# df_max = df[[group_key] + max_cols].groupby(group_key).max()
# df_sum = df[[group_key] + sum_cols].groupby(group_key).sum()
# df = pd.concat([df_max, df_sum], axis=1)
# df = df[(df["Minutes"] >= 900)]
# # convert action totals to per90
# for col in p90_cols:
# df[col + "P90"] = 90 * df[col] / df["Minutes"]
# for col in pGm_cols:
# df[col + "PGm"] = df[col] / df["Appearances"]
# for col in sum_cols:
# del df[col]
# del df["AppearancesPGm"]
# logging.debug(df.describe(include="all"))
# return df
# def find_similar():
# players = get_players()
# # print players
# print("\nNumber of players included: " + str(len(players)))
# # Normalize all of the numeric columns
# players_normalized = (players - players.mean()) / players.std()
# players_normalized.fillna(0, inplace=True)
# # players_normalized.info()
# # print players_normalized.describe(include="all")
# # print players_normalized.index.values
# for (
# name
# ) in (
# players_normalized.index.values
# ): # ["<NAME>", "<NAME>", "<NAME>", "<NAME>"]:
# # print "\n###############################"
# print("\n" + name, end=" ")
# # selected_player = players.loc[name]
# # print selected_player.name
# # print selected_player.to_frame().T #.name
# # Normalize all of the numeric columns
# selected_normalized = players_normalized.loc[name]
# # print selected_normalized
# # Find the distance between select player and everyone else.
# euclidean_distances = players_normalized.apply(
# lambda row: distance.euclidean(row, selected_normalized), axis=1
# )
# # Create a new dataframe with distances.
# distance_frame = pd.DataFrame(
# data={"dist": euclidean_distances, "idx": euclidean_distances.index}
# )
# distance_frame.sort_values("dist", inplace=True)
# most_similar_players = distance_frame.iloc[1:4]["idx"]
# # most_similar_players = players.loc[nearest_neighbours] #["Name"]
# # print most_similar_players
# print("... is similar to... ", end=" ")
# print(list(most_similar_players.index.values))
# def make_prediction():
# players = get_players()
# pred_col = "AssistsP90"
# x_columns = list(players.columns.values)
# x_columns.remove(pred_col)
# y_column = [pred_col]
# # # The columns that we will be making predictions with.
# # x_columns = ['Age', 'Height', 'Weight', 'AerialsWonP90', 'AssistsP90', 'BadControlP90', 'BlocksP90', 'CalledOffsideP90', 'ClearancesP90', 'CrossesP90', 'DispossesedP90', 'DribblesP90', 'DribblesAgainstP90', 'FirstYellowCardsP90', 'FoulsCommitedP90', 'FoulsSufferedP90', 'GoalsConcededP90', 'InterceptionsP90', 'KeyPassesP90', 'LongBallsP90', 'NonPenaltyGoalsP90', 'OffsidesWonP90', 'OwnGoalsP90', 'PassesP90', 'PenaltyGoalsP90', 'RedCardsP90', 'SavesP90', 'ShotsP90', 'ShotsFacedP90', 'ShotsOnTargetP90', 'TacklesP90', 'ThroughBallsP90', 'YellowCardsP90', 'MinutesPGm']
# # print x_columns
# # # The column that we want to predict.
# # y_column = [pred_col]
# # print y_column
# ###Generating training and testing sets
# # Randomly shuffle the index of nba.
# random_indices = permutation(players.index)
# # Set a cutoff for how many items we want in the test set (in this case 1/3 of the items)
# test_cutoff = math.floor(len(players) / 3)
# # Generate the test set by taking the first 1/3 of the randomly shuffled indices.
# test = players.loc[random_indices[1:test_cutoff]]
# test.fillna(0, inplace=True)
# # test.info()
# # print test.describe(include="all")
# # Generate the train set with the rest of the data.
# train = players.loc[random_indices[test_cutoff:]]
# train.fillna(0, inplace=True)
# # train.info()
# # print train.describe(include="all")
# ###Using sklearn for k nearest neighbors
# # print "Using sklearn for k nearest neighbors..."
# from sklearn.neighbors import KNeighborsRegressor
# # Create the knn model.
# # Look at the five closest neighbors.
# knn = KNeighborsRegressor(n_neighbors=5)
# # print knn
# # Fit the model on the training data.
# knn.fit(train[x_columns], train[y_column])
# # print knn
# # Make point predictions on the test set using the fit model.
# predictions = knn.predict(test[x_columns])
# # print "\nPredicted PointsPGm:"
# # print predictions.shape
# ###Computing error
# # Get the actual values for the test set.
# actual = test[y_column].copy()
# # Compute the mean squared error of our predictions.
# mse = (((predictions - actual) ** 2).sum()) / len(predictions)
# print("\nMean Squared Error:")
# print(mse)
# actual["Predicted" + pred_col] = predictions
# actual["Diff"] = actual[pred_col] - actual["Predicted" + pred_col]
# print("\nActual and Predicted " + pred_col + ":")
# print(actual.sort_values(["Diff"], ascending=False))
# def test_opinions():
# players = get_players()
# players = players.reset_index()
# players = players[
# players["Name"].isin(
# [
# "<NAME>",
# "<NAME>",
# "<NAME>",
# "<NAME>",
# "<NAME>",
# "<NAME>",
# "<NAME>",
# "<NAME>",
# "<NAME>",
# ]
# )
# ]
# # df_info(players)
# players["ShotAccuracy"] = players["ShotsOnTargetP90"] / players["ShotsP90"]
# players["ShotEfficiency"] = (
# players["NonPenaltyGoalsP90"] + players["PenaltyGoalsP90"].fillna(0)
# ) / players["ShotsP90"]
# players["ShotPercentage"] = (
# players["NonPenaltyGoalsP90"] + players["PenaltyGoalsP90"].fillna(0)
# ) / players["ShotsOnTargetP90"]
# players = players[
# [
# "Name",
# "NonPenaltyGoalsP90",
# "PenaltyGoalsP90",
# "ShotsP90",
# "ShotsOnTargetP90",
# "ShotAccuracy",
# "ShotEfficiency",
# "ShotPercentage",
# ]
# ]
# # df_info(players)
# print(players.describe())
# print(players)
def main():
"""Use the Main for CLI usage."""
logging.info("Executing players module")
clean_data("tmk_cnt")
clean_data("tmk_psm")
# get_players()
# find_similar()
# make_prediction()
# test_opinions()
if __name__ == "__main__":
main()
| 2.890625 | 3 |
Z_ALL_FILE/Py/code_qry.py | omikabir/omEngin | 0 | 17152 | import pandas as pd
import os
#opt = itertools.islice(ls, len(ls))
#st = map(lambda x : )
def parsecode(txt):
df = pd.read_csv(os.getcwd() + '\\OMDB.csv')
ls = df['Code'].to_list()
code = []
q = 0
for i in range(len(ls)):
text = txt
if ls[i] in text:
n = text.find(ls[i])
st = text[n:n+7]
code.append(st)
txt = txt.replace(ls[i],'')
q = q + 1
else:
if q == 0:
return ''
else:
return code
def qry_by_code(code, tbl = None, col = None):
if tbl is None and col is None:
a1 = "select Incident_Notification,Down_Time,Up_Time,Major_Cause,Action_Taken,Link_ID_Site_ID,Incident_ID from incident_tracker_v2 where ("
a2 = " No_of_2G_Impacted_sites Like '%" + code + "%' or No_of_3G_Impacted_sites like '%" + code + "%' or No_of_4G_Impacted_Sites like '%" + code + "%' or Incident_Notification Like '%" + code
a3 = "%') order by Down_Time desc"
aa = a1 + a2 + a3
return aa
else:
return ""
def codechk(txt):
rs = parsecode(txt.upper())
st = 0
print('ret val', rs)
if len(rs) == 1:
code = rs[0]
rn = 0
try:
cd = int(code[6:7])
qry = qry_by_code(code)
conn = pyodbc.connect(soc)
df = pd.read(qry, con = conn)
if df.shape[0] != 0:
if df.shape[0] > 3:
st = "last 3 incident out of " + df.shape[0]
rn = 3
else:
st = "incident found " + df.shape[0] + chr(10)
rn = df.shape[0]
for i in range(rn):
tmp = chr(10)
for j in df:
tmp = tmp + chr(10) + df.loc[i,j]
else:
st = st + chr(10) + str(i) + tmp
except:
print('not code')
return st
else:
return st
| 2.84375 | 3 |
eats/tests/common/base_test_setup.py | Etiqa/eats | 0 | 17153 | import socket
import unittest
from eats.webdriver import PytractorWebDriver
from eats.tests.common import SimpleWebServerProcess as SimpleServer
def _get_local_ip_addr():
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("gmail.com",80))
local_ip_addr = s.getsockname()[0]
s.close()
return local_ip_addr
class PytractorTestBaseSetup(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.process = SimpleServer()
cls.process.run()
def setUp(self):
self.base_url = "http://{}:{}".format(_get_local_ip_addr(), SimpleServer.PORT)
self.driver = self.get_driver()
self.driver.ignore_synchronization = False
@classmethod
def tearDownClass(cls):
cls.process.stop()
def tearDown(self):
self.driver.quit()
class FirefoxRemoteWebDriverTest(object):
def get_driver(self):
return PytractorWebDriver(
test_timeout=3000,
command_executor='http://{}:4444/wd/hub'.format(_get_local_ip_addr()),
desired_capabilities={'browserName': 'firefox', 'version': '', 'platform': 'ANY'}
)
class ChromeRemoteWebDriverTest(object):
def get_driver(self):
return PytractorWebDriver(
test_timeout=3000,
command_executor='http://{}:4444/wd/hub'.format(_get_local_ip_addr()),
desired_capabilities={'browserName': 'chrome', 'version': '', 'platform': 'ANY'}
) | 2.328125 | 2 |
autoprep/service/sqlite_project_service.py | haginot/auto-prep | 0 | 17154 | <reponame>haginot/auto-prep
from autoprep.service.project_service import ProjectService
class SQLiteProjectService(ProjectService):
def get_projects(self):
pass
def get_project(self):
pass
def save_project(self):
pass
| 1.476563 | 1 |
p1_navigation/model.py | Alexandr0s93/deep-reinforcement-learning | 0 | 17155 | import torch
import torch.nn as nn
class QNetwork(nn.Module):
"""Actor (Policy) Model using a Single DQN."""
def __init__(self, state_size, action_size, seed):
"""Initialize parameters and build model.
Params
======
state_size (int): Dimension of each state
action_size (int): Dimension of each action
seed (int): Random seed
"""
super(QNetwork, self).__init__()
self.seed = torch.manual_seed(seed)
# Define Deep Q-Network Layers
self.dqn_layers = nn.Sequential(
nn.Linear(state_size, 128),
nn.ReLU(),
nn.Linear(128, 64),
nn.ReLU(),
nn.Linear(64, 32),
nn.ReLU(),
nn.Linear(32, action_size)
)
def forward(self, state):
"""Build a network that maps state -> action values."""
q_values = self.dqn_layers(state)
return q_values
class DuelQNetwork(nn.Module):
"""Actor (Policy) Model using a Duel DQN."""
def __init__(self, state_size, action_size, seed):
"""Initialize parameters and build model.
Params
======
state_size (int): Dimension of each state
action_size (int): Dimension of each action
seed (int): Random seed
"""
super(DuelQNetwork, self).__init__()
self.seed = torch.manual_seed(seed)
# Define Feature Layers
self.feature_layers = nn.Sequential(
nn.Linear(state_size, 128),
nn.ReLU(),
nn.Linear(128, 64),
nn.ReLU(),
nn.Linear(64, 32),
nn.ReLU()
)
# Define Value Stream
self.value_stream = nn.Sequential(
nn.Linear(32, 1)
)
# Define Advantage Layers
self.advantage_stream = nn.Sequential(
nn.Linear(32, action_size)
)
def forward(self, state):
"""Build a network that maps state -> action values."""
x = self.feature_layers(state)
values = self.value_stream(x)
advantages = self.advantage_stream(x)
q_values = values + (advantages - advantages.mean())
return q_values | 2.984375 | 3 |
const.py | TakosukeGH/pmx_bone_importer | 0 | 17156 | ADDON_NAME = "pmx_bone_importer"
LOG_FILE_NAME = "pmx_bone_importer.log"
| 1.265625 | 1 |
pox/lib/interfaceio/__init__.py | korrigans84/pox_network | 416 | 17157 | # Copyright 2017 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Input and output from network interfaces.
This wraps PCap, TunTap, etc., to provide a simple, universal, cooperative
interface to network interfaces.
Currently limited to Linux.
"""
from pox.lib.pxpcap import PCap
from queue import Queue
from pox.lib.revent import Event, EventMixin
from pox.lib.ioworker.io_loop import ReadLoop
from pox.core import core
import struct
from fcntl import ioctl
import socket
from pox.lib.addresses import EthAddr, IPAddr
from pox.lib.addresses import parse_cidr, cidr_to_netmask
import os
import ctypes
IFNAMESIZ = 16
IFREQ_SIZE = 40
# from linux/if_tun.h
TUNSETIFF = 0x400454ca
TUNGETIFF = 0x800454d2
IFF_TUN = 0x0001
IFF_TAP = 0x0002
IFF_NO_PI = 0x1000
IFF_ONE_QUEUE = 0x2000
IFF_VNET_HDR = 0x4000
IFF_TUN_EXCL = 0x8000
IFF_MULTI_QUEUE = 0x0100
IFF_ATTACH_QUEUE = 0x0200
IFF_DETACH_QUEUE = 0x0400
IFF_PERSIST = 0x0800
IFF_NOFILTER = 0x1000
#from linux/if.h (flags)
IFF_UP = 1<<0
IFF_BROADCAST = 1<<1
IFF_DEBUG = 1<<2
IFF_LOOPBACK = 1<<3
IFF_POINTOPOINT = 1<<4
IFF_NOTRAILERS = 1<<5
IFF_RUNNING = 1<<6
IFF_NOARP = 1<<7
IFF_PROMISC = 1<<8
IFF_ALLMULTI = 1<<9
IFF_MASTER = 1<<10
IFF_SLAVE = 1<<11
IFF_MULTICAST = 1<<12
IFF_PORTSEL = 1<<13
IFF_AUTOMEDIA = 1<<14
IFF_DYNAMIC = 1<<15
IFF_LOWER_UP = 1<<16
IFF_DORMANT = 1<<17
IFF_ECHO = 1<<18
# Unless IFF_NO_PI, there's a header on packets:
# 16 bits of flags
# 16 bits (big endian?) protocol number
# from /usr/include/linux/sockios.h
SIOCGIFHWADDR = 0x8927
SIOCGIFMTU = 0x8921
SIOCSIFMTU = 0x8922
SIOCGIFFLAGS = 0x8913
SIOCSIFFLAGS = 0x8914
SIOCSIFHWADDR = 0x8924
SIOCGIFNETMASK = 0x891b
SIOCSIFNETMASK = 0x891c
SIOCGIFADDR = 0x8915
SIOCSIFADDR = 0x8916
SIOCGIFBRDADDR = 0x8919
SIOCSIFBRDADDR = 0x891a
SIOCSIFNAME = 0x8923
SIOCADDRT = 0x890B # rtentry (route.h) for IPv4, in6_rtmsg for IPv6
SIOCDELRT = 0x890C
# from /usr/include/linux/if_arp.h
ARPHRD_ETHER = 1
ARPHRD_IEEE802 = 1
ARPHRD_IEEE1394 = 24
ARPHRD_EUI64 = 27
ARPHRD_LOOPBACK = 772
ARPHRD_IPGRE = 778
ARPHRD_IEE802_TR = 800
ARPHRD_IEE80211 = 801
ARPHRD_IEE80211_PRISM = 802
ARPHRD_IEE80211_RADIOTAP = 803
ARPHRD_IP6GRE = 823
class rtentry (object):
"""
Wrapper for Linux rtentry
Only tries to capture IPv4 usage.
Possibly better done with ctypes.
"""
# flags
RTF_UP = 0x0001 # usable
RTF_GATEWAY = 0x0002 # dst is gateway
RTF_HOST = 0x0004 # host route
RTF_REINSTATE = 0x0008 # reinstate after timeout
RTF_DYNAMIC = 0x0010 # created dynamically (by redirect)
RTF_MODIFIED = 0x0020 # modified dynamically (by redirect)
RTF_MSS = 0x0040 # use specific MSS for this route
RTF_WINDOW = 0x0080 # use per-route window clamping
RTF_IRTT = 0x0100 # use initial RTT
RTF_REJECT = 0x0200 # reject route
# fields
rt_hash = 0
rt_dst = IPAddr("0.0.0.0")
rt_gateway = IPAddr("0.0.0.0")
rt_genmask = IPAddr("0.0.0.0")
rt_flags = 0
rt_refcnt = 0
rt_use = 0
rt_ifp = 0 # ptr to struct ifnet
rt_metric = 0
rt_dev = None # device name
rt_mss = 0
rt_window = 0 # window clamping
rt_irtt = 0 # initial RTT
def pack (self):
if self.rt_dev:
s = ctypes.c_char_p(self.rt_dev + "\0") # Null terminator necessary?
dev = ctypes.cast(s, ctypes.c_void_p).value
self._buf = s # You must use the resulting packed string before changing
# rt_dev!
else:
dev = 0
return struct.pack("L16s16s16shhLPhPLLH",
self.rt_hash,
sockaddr_in(self.rt_dst).pack(),
sockaddr_in(self.rt_gateway).pack(),
sockaddr_in(self.rt_genmask).pack(),
self.rt_flags,
self.rt_refcnt,
self.rt_use,
self.rt_ifp,
self.rt_metric,
dev,
self.rt_mss,
self.rt_window,
self.rt_irtt)
class sockaddr_in (object):
"""
Wrapper for sockaddr_in
"""
sin_family = socket.AF_INET
sin_port = 0
sin_addr = IPAddr("0.0.0.0")
def __init__ (self, addr=None, port=None):
if addr is not None:
self.sin_addr = IPAddr(addr)
if port is not None:
self.sin_port = port
def pack (self):
r = struct.pack("hH", self.sin_family, self.sin_port)
r += self.sin_addr.raw
r += ("\0" * 8)
return r
class Interface (object):
"""
Simple interface to tun/tap driver
Currently only for Linux. IIRC, shouldn't be too hard to adapt for BSD.
Other OSes will probably need a fair amount of work.
"""
#TODO: Setters
def __init__ (self, name):
self._name = name
def __str__ (self):
return "%s('%s')" % (type(self).__name__, self.name)
@property
def name (self):
return self._name.rstrip("\0")
@name.setter
def name (self, value):
if len(value) > IFNAMESIZ: raise RuntimeError("Name too long")
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
ifr = struct.pack(str(IFNAMESIZ) + "s", self.name)
ifr += value
ifr += "\0" * (IFREQ_SIZE - len(ifr))
ret = ioctl(sock, SIOCSIFNAME, ifr)
self._name = value
@property
def ipv6_enabled (self):
f = file("/proc/sys/net/ipv6/conf/%s/disable_ipv6" % (self.name,), "r")
with f:
return f.read()[0] == "0" # Note inversion!
@ipv6_enabled.setter
def ipv6_enabled (self, value):
f = file("/proc/sys/net/ipv6/conf/%s/disable_ipv6" % (self.name,), "w")
with f:
f.write("0" if value else "1") # Note inversion!
@property
def ip_forwarding (self):
f = file("/proc/sys/net/ipv4/conf/%s/forwarding" % (self.name,), "r")
with f:
return f.read()[0] == "1"
@ip_forwarding.setter
def ip_forwarding (self, value):
f = file("/proc/sys/net/ipv4/conf/%s/forwarding" % (self.name,), "w")
with f:
f.write("1" if value else "0")
@property
def mtu (self):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
ifr = struct.pack(str(IFNAMESIZ) + "s", self.name)
ifr += "\0" * (IFREQ_SIZE - len(ifr))
ret = ioctl(sock, SIOCGIFMTU, ifr)
return struct.unpack("I", ret[IFNAMESIZ:][:4])[0]
@mtu.setter
def mtu (self, value):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
ifr = struct.pack(str(IFNAMESIZ) + "sI", self.name, value)
ifr += "\0" * (IFREQ_SIZE - len(ifr))
ret = ioctl(sock, SIOCSIFMTU, ifr)
@property
def flags (self):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
ifr = struct.pack(str(IFNAMESIZ) + "s", self.name)
ifr += "\0" * (IFREQ_SIZE - len(ifr))
ret = ioctl(sock, SIOCGIFFLAGS, ifr)
return struct.unpack("H", ret[IFNAMESIZ:IFNAMESIZ+2])[0]
@flags.setter
def flags (self, value):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
ifr = struct.pack(str(IFNAMESIZ) + "sH", self.name, value)
ifr += "\0" * (IFREQ_SIZE - len(ifr))
ret = ioctl(sock, SIOCSIFFLAGS, ifr)
def set_flags (self, flags, on=True):
if on:
self.flags |= flags
else:
self.unset_flags(flags)
def unset_flags (self, flags):
self.flags = self.flags & (flags ^ 0xffFF)
@property
def promiscuous (self):
return bool(self.flags & IFF_PROMISC)
@promiscuous.setter
def promiscuous (self, value):
self.set_flags(IFF_PROMISC, value)
@property
def is_up (self):
return (self.flags & IFF_UP) != 0
@is_up.setter
def is_up (self, value):
self.set_flags(IFF_UP, value)
@property
def is_running (self):
return (self.flags & IFF_RUNNING) != 0
@property
def arp_enabled (self):
return (self.flags & IFF_NOARP) == 0
@arp_enabled.setter
def arp_enabled (self, value):
self.set_flags(IFF_NOARP, not value)
@property
def ip_addr (self):
try:
return self._ioctl_get_ipv4(SIOCGIFADDR)
except IOError as e:
if e.errno == 99: return None
raise
@ip_addr.setter
def ip_addr (self, value):
return self._ioctl_set_ipv4(SIOCSIFADDR, value)
@property
def netmask (self):
try:
return self._ioctl_get_ipv4(SIOCGIFNETMASK)
except IOError as e:
if e.errno == 99: return None
raise
@netmask.setter
def netmask (self, value):
return self._ioctl_set_ipv4(SIOCSIFNETMASK, value)
@property
def broadcast_addr (self):
try:
return self._ioctl_get_ipv4(SIOCGIFBRDADDR)
except IOError as e:
if e.errno == 99: return None
raise
@broadcast_addr.setter
def broadcast_addr (self, value):
return self._ioctl_set_ipv4(SIOCSIFBRDADDR, value)
@property
def eth_addr (self):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
ifr = struct.pack(str(IFNAMESIZ) + "s", self.name)
ifr += "\0" * (IFREQ_SIZE - len(ifr))
ret = ioctl(sock, SIOCGIFHWADDR, ifr)
sa = ret[IFNAMESIZ:] # sockaddr
return self._get_eth(sa)
@eth_addr.setter
def eth_addr (self, value):
value = EthAddr(value).raw
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
ifr = struct.pack(str(IFNAMESIZ) + "sH", self.name, ARPHRD_ETHER)
ifr += value # Append to sockaddr
ifr += "\0" * (IFREQ_SIZE - len(ifr))
ret = ioctl(sock, SIOCSIFHWADDR, ifr)
def _ioctl_get_ipv4 (self, which):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
ifr = struct.pack(str(IFNAMESIZ) + "s", self.name)
ifr += "\0" * (IFREQ_SIZE - len(ifr))
ret = ioctl(sock, which, ifr)
return self._get_ipv4(ret[IFNAMESIZ:])
def _ioctl_set_ipv4 (self, which, value):
value = IPAddr(value)
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
ifr = struct.pack(str(IFNAMESIZ) + "sHHI", self.name, socket.AF_INET, 0,
value.toUnsigned(networkOrder=True))
ifr += "\0" * (IFREQ_SIZE - len(ifr))
ret = ioctl(sock, which, ifr)
@staticmethod
def _get_ipv4 (sa):
sa_family = struct.unpack("H", sa[:2])[0]
if sa_family == socket.AF_INET:
return IPAddr(sa[4:8])
else:
raise RuntimeError("Unsupported hardware type %s for %s (expected %s)"
% (sa_family, self, socket.AF_INET))
@staticmethod
def _get_eth (sa):
sa_family = struct.unpack("H", sa[:2])[0]
if sa_family == ARPHRD_ETHER:
return EthAddr(sa[2:8])
else:
raise RuntimeError("Unsupported hardware type %s (expected %s)"
% (sa_family, ARPHRD_ETHER))
def add_default_route (self, *args, **kw):
return self.add_route("0.0.0.0/0", *args, **kw)
def add_route (self, network, gateway=None, dev=(), metric=0):
"""
Add routing table entry
If dev is unspecified, it defaults to this device
"""
return self._add_del_route(network, gateway, dev, metric, SIOCADDRT)
def del_route (self, network, gateway=None, dev=(), metric=0):
"""
Remove a routing table entry
If dev is unspecified, it defaults to this device
"""
return self._add_del_route(network, gateway, dev, metric, SIOCDELRT)
def _add_del_route (self, network, gateway=None, dev=(), metric=0,
command=None):
"""
Add or remove a routing table entry
If dev is unspecified, it defaults to this device
"""
r = rtentry()
if isinstance(network, tuple):
addr,mask = network
addr = str(addr)
if isinstance(mask, int):
mask = cidr_to_netmask(mask)
mask = str(mask)
network = "%s/%s" % (addr,mask)
host = False
if isinstance(network, IPAddr) or (isinstance(network, str)
and "/" not in network):
host = True
network,bits = parse_cidr(network)
r.rt_dst = network
r.rt_genmask = cidr_to_netmask(bits)
if gateway is not None:
r.rt_gateway = IPAddr(gateway)
r.rt_flags |= r.RTF_GATEWAY
r.rt_metric = metric
if dev is (): dev = self
if isinstance(dev, Interface): dev = dev.name
if dev: r.rt_dev = dev
if host: r.rt_flags |= r.RTF_HOST
r.rt_flags |= r.RTF_UP
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
rv = ioctl(sock, command, r.pack())
class TunTap (object):
"""
Simple wrapper for tun/tap interfaces
Looks like a file-like object. You should be able to read/write it, select
on it, etc.
"""
def __init__ (self, name=None, tun=False, raw=False):
"""
Create tun or tap
By default, it creates a new tun or tap with a default name. If you
specify a name, it will either try to create it (if it doesn't exist),
or try to use an existing interface (for which you must have permission).
Defaults to tap (Ethernet) mode. Specify tun=True for tun (IP) mode.
Specify raw=True to skip the 32 bits of flag/protocol metadata.
"""
if name is None: name = ""
openflags = os.O_RDWR
try:
openflow |= os.O_BINARY
except:
pass
self._f = os.open("/dev/net/tun", openflags)
# an ifreq is IFREQ_SIZE bytes long, starting with an interface name
# (IFNAMESIZ bytes) followed by a big union.
self.is_tun = tun
self.is_tap = not tun
self.is_raw = raw
flags = 0
if tun: flags |= IFF_TUN
else: flags |= IFF_TAP
if raw: flags |= IFF_NO_PI
ifr = struct.pack(str(IFNAMESIZ) + "sH", name, flags)
ifr += "\0" * (IFREQ_SIZE - len(ifr))
ret = ioctl(self.fileno(), TUNSETIFF, ifr)
self.name = ret[:IFNAMESIZ]
iflags = flags
ifr = struct.pack(str(IFNAMESIZ) + "sH", name, 0)
ifr += "\0" * (IFREQ_SIZE - len(ifr))
ret = ioctl(self.fileno(), TUNGETIFF, ifr)
flags = struct.unpack("H", ret[IFNAMESIZ:IFNAMESIZ+2])[0]
self.is_tun = (flags & IFF_TUN) == IFF_TUN
self.is_tap = not self.is_tun
#self.is_raw = (flags & IFF_NO_PI) == IFF_NO_PI
def fileno (self):
return self._f
def write (self, data):
return os.write(self.fileno(), data)
def read (self, n):
return os.read(self.fileno(), n)
def close (self):
return os.close(self.fileno())
@property
def eth_addr (self):
return Interface(self.name).eth_addr
class RXData (Event):
"""
Event fired when an interface receives data
"""
def __init__ (self, interface, data):
self.interface = interface
self.data = data
class PCapInterface (Interface, EventMixin):
_eventMixin_events = set([
RXData,
])
def __init__ (self, name):
Interface.__init__(self, name)
EventMixin.__init__(self)
self._q = Queue()
p = PCap(name, callback=self._pcap_cb, start=False)
p.set_direction(True, False) # Incoming, not outgoing
p.start()
self.pcap = p
core.add_listener(self._handle_GoingDownEvent)
def _handle_GoingDownEvent (self, event):
self.close()
def send (self, data):
if self.pcap is None: return
self.pcap.inject(data)
def _pcap_cb (self, obj, data, sec, usec, length):
"""
Handles incoming data from pcap
This may not be on the right thread, so we just push it to a thread-safe
queue and poke the cooperative thread, which will pop it later.
"""
do_read = self._q.empty()
self._q.put((obj,data))
if do_read: core.callLater(self._queue_read)
def _queue_read (self):
anything = False
for _ in range(10): # as most X at once
try:
data = self._q.get(False)
self._q.task_done()
anything = True
except:
break
pcap,data = data
self.raiseEventNoErrors(RXData, self, data)
if anything:
# Check for remainders later
core.callLater(self._queue_read)
def __del__ (self):
self.close()
def close (self):
if self.pcap:
self.pcap.close()
self.pcap = None
class TapInterface (Interface, EventMixin):
_eventMixin_events = set([
RXData,
])
io_loop = None
max_read_size = 1600
default_send_protocol = None
def __init__ (self, name="", tun=False, raw=False, protocol=None):
self.tap = None
self.last_flags = None
self.last_protocol = None
if protocol: self.default_send_protocol = protocol
self.io_loop = ReadLoop.singleton
Interface.__init__(self, name)
EventMixin.__init__(self)
self.tap = TunTap(name, raw=raw, tun=tun)
if not name: self._name = self.tap.name
self.io_loop.add(self)
@property
def is_tap (self):
return self.tap.is_tap
@property
def is_tun (self):
return self.tap.is_tun
def send (self, data, flags=0, protocol=None):
if not self.tap.is_raw:
if protocol is None: protocol = self.default_send_protocol or 0
#FIXME: In the "0" case above, should we fall back to using the Etherype
# in the packet?
if flags or protocol:
flags = struct.pack("!HH", flags, protocol) # Flags reversed?
else:
flags = "\0\0\0\0"
data = flags + data
self.tap.write(data)
def _do_rx (self):
data = self.tap.read(self.max_read_size)
if not self.tap.is_raw:
flags,proto = struct.unpack("!HH", data[:4])
#FIXME: This may invert the flags...
self.last_flags = flags
self.last_protocol = proto
data = data[4:] # Cut off header
self.raiseEvent(RXData, self, data)
def fileno (self):
# Support fileno so that this can be used in IO loop directly
return self.tap.fileno()
def close (self):
if self.tap:
self.tap.close()
self.tap = None
self.io_loop.remove(self)
def __del__ (self):
self.close()
| 1.710938 | 2 |
icarus_simulator/strategies/atk_geo_constraint/geo_constr_strat.py | RubenFr/ICARUS-framework | 5 | 17158 | # 2020 <NAME> and <NAME>
import os
import json
import numpy as np
from typing import Set, List
from geopy.distance import great_circle
from scipy.spatial.ckdtree import cKDTree
from shapely.geometry import Polygon, shape, Point
from icarus_simulator.sat_core.coordinate_util import geo2cart
from icarus_simulator.strategies.atk_geo_constraint.base_geo_constraint_strat import (
BaseGeoConstraintStrat,
)
from icarus_simulator.structure_definitions import GridPos
dirname = os.path.dirname(__file__)
strategies_dirname = os.path.split(dirname)[0]
library_dirname = os.path.split(strategies_dirname)[0]
data_dirname = os.path.join(library_dirname, "data")
COUNTRIES_FILE: str = os.path.join(data_dirname, "natural_earth_world_small.geo.json")
class GeoConstrStrat(BaseGeoConstraintStrat):
def __init__(self, geo_names: List[str], **kwargs):
super().__init__()
self.geo_names = geo_names
if len(kwargs) > 0:
pass # Appease the unused param inspection
@property
def name(self) -> str:
return "geo"
@property
def param_description(self) -> str:
return ",".join(self.geo_names)
def compute(self, grid_pos: GridPos) -> Set[int]:
allowed = set()
geo_data = load_country_geojson()
for s in self.geo_names:
allowed.update(get_allowed_gridpoints(s, grid_pos, geo_data))
return allowed
# noinspection PyTypeChecker
def get_allowed_gridpoints(geo_location: str, grid_pos: GridPos, geo_data) -> Set[int]:
# Get a list of all possible source points
if geo_location in geo_data["countries"]:
indices = [geo_data["countries"][geo_location]]
elif geo_location in geo_data["subregions"]:
indices = geo_data["subregions"][geo_location]
elif geo_location in geo_data["continents"]:
indices = geo_data["continents"][geo_location]
else:
raise ValueError("Invalid geographic constraint")
geometries = [geo_data["geometries"][index] for index in indices]
allowed_points = set()
# Create a unique shape, union of all shapes in the region, and take the points include within
shp = Polygon()
for idx, geo in enumerate(geometries):
shp = shp.union(shape(geo))
for idx, pos in grid_pos.items():
if Point(pos.lat, pos.lon).within(shp):
allowed_points.add(idx)
# Extract the border points
x, y = [], []
if shp.geom_type == "MultiPolygon":
for idx, shap in enumerate(shp.geoms):
if True:
x1, y1 = shap.exterior.xy
x.extend(x1)
y.extend(y1)
else:
x1, y1 = shp.exterior.xy
x.extend(x1)
y.extend(y1)
# plotter.plot_points({idx: GeodeticPosInfo({"lat": x[idx], "lon": y[idx], "elev": 0.0})
# for idx in range(len(x))}, "GRID", "TEST", "aa", "asas",)
grid_cart = np.zeros((len(grid_pos), 3))
grid_map = {}
i = 0
for idx, pos in grid_pos.items():
grid_map[i] = idx
grid_cart[i] = geo2cart({"elev": 0, "lon": pos.lon, "lat": pos.lat})
i += 1
# Put the homogeneous grid into a KD-tree and query the border points to include also point slightly in the sea
kd = cKDTree(grid_cart)
for idx in range(len(x)):
_, closest_grid_idx = kd.query(
geo2cart({"elev": 0, "lon": y[idx], "lat": x[idx]}), k=1
)
grid_id = grid_map[closest_grid_idx]
if (
great_circle(
(grid_pos[grid_id].lat, grid_pos[grid_id].lon), (x[idx], y[idx])
).meters
< 300000
):
# 300000 -> number elaborated to keep the out-of-coast values without including wrong points
allowed_points.add(grid_map[closest_grid_idx])
return allowed_points
# noinspection PyTypeChecker
def load_country_geojson():
new_data = {"geometries": [], "countries": {}, "continents": {}, "subregions": {}}
with open(COUNTRIES_FILE, encoding="utf-8") as f:
data = json.load(f)
new_data["geometries"] = [""] * len(data["features"])
for idx, feature in enumerate(data["features"]):
props = feature["properties"]
code = props["iso_a3"]
if code == "-99":
continue
continent = props["continent"]
subregion = props["region_wb"]
subregion2 = props["subregion"]
if continent not in new_data["continents"]:
new_data["continents"][continent] = []
if subregion not in new_data["subregions"]:
new_data["subregions"][subregion] = []
if subregion2 not in new_data["subregions"]:
new_data["subregions"][subregion2] = []
new_data["continents"][continent].append(idx)
new_data["subregions"][subregion].append(idx)
new_data["subregions"][subregion2].append(idx)
new_data["countries"][code] = idx
new_data["geometries"][idx] = feature["geometry"]
geom = new_data["geometries"][idx]
if geom["type"] == "MultiPolygon":
for l1 in range(len(geom["coordinates"])):
for l2 in range(len(geom["coordinates"][l1])):
for l3 in range(len(geom["coordinates"][l1][l2])):
geom["coordinates"][l1][l2][l3] = geom["coordinates"][l1][l2][
l3
][::-1]
elif geom["type"] == "Polygon":
for l1 in range(len(geom["coordinates"])):
for l2 in range(len(geom["coordinates"][l1])):
geom["coordinates"][l1][l2] = geom["coordinates"][l1][l2][::-1]
print(f"Available subregions: {list(new_data['subregions'].keys())}")
return new_data
| 2.203125 | 2 |
grafeas/models/deployable_deployment_details.py | nyc/client-python | 0 | 17159 | # coding: utf-8
"""
Grafeas API
An API to insert and retrieve annotations on cloud artifacts. # noqa: E501
OpenAPI spec version: v1alpha1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from grafeas.models.deployment_details_platform import DeploymentDetailsPlatform # noqa: F401,E501
class DeployableDeploymentDetails(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'user_email': 'str',
'deploy_time': 'datetime',
'undeploy_time': 'datetime',
'config': 'str',
'address': 'str',
'resource_uri': 'list[str]',
'platform': 'DeploymentDetailsPlatform'
}
attribute_map = {
'user_email': 'user_email',
'deploy_time': 'deploy_time',
'undeploy_time': 'undeploy_time',
'config': 'config',
'address': 'address',
'resource_uri': 'resource_uri',
'platform': 'platform'
}
def __init__(self, user_email=None, deploy_time=None, undeploy_time=None, config=None, address=None, resource_uri=None, platform=None): # noqa: E501
"""DeployableDeploymentDetails - a model defined in Swagger""" # noqa: E501
self._user_email = None
self._deploy_time = None
self._undeploy_time = None
self._config = None
self._address = None
self._resource_uri = None
self._platform = None
self.discriminator = None
if user_email is not None:
self.user_email = user_email
if deploy_time is not None:
self.deploy_time = deploy_time
if undeploy_time is not None:
self.undeploy_time = undeploy_time
if config is not None:
self.config = config
if address is not None:
self.address = address
if resource_uri is not None:
self.resource_uri = resource_uri
if platform is not None:
self.platform = platform
@property
def user_email(self):
"""Gets the user_email of this DeployableDeploymentDetails. # noqa: E501
Identity of the user that triggered this deployment. # noqa: E501
:return: The user_email of this DeployableDeploymentDetails. # noqa: E501
:rtype: str
"""
return self._user_email
@user_email.setter
def user_email(self, user_email):
"""Sets the user_email of this DeployableDeploymentDetails.
Identity of the user that triggered this deployment. # noqa: E501
:param user_email: The user_email of this DeployableDeploymentDetails. # noqa: E501
:type: str
"""
self._user_email = user_email
@property
def deploy_time(self):
"""Gets the deploy_time of this DeployableDeploymentDetails. # noqa: E501
Beginning of the lifetime of this deployment. # noqa: E501
:return: The deploy_time of this DeployableDeploymentDetails. # noqa: E501
:rtype: datetime
"""
return self._deploy_time
@deploy_time.setter
def deploy_time(self, deploy_time):
"""Sets the deploy_time of this DeployableDeploymentDetails.
Beginning of the lifetime of this deployment. # noqa: E501
:param deploy_time: The deploy_time of this DeployableDeploymentDetails. # noqa: E501
:type: datetime
"""
self._deploy_time = deploy_time
@property
def undeploy_time(self):
"""Gets the undeploy_time of this DeployableDeploymentDetails. # noqa: E501
End of the lifetime of this deployment. # noqa: E501
:return: The undeploy_time of this DeployableDeploymentDetails. # noqa: E501
:rtype: datetime
"""
return self._undeploy_time
@undeploy_time.setter
def undeploy_time(self, undeploy_time):
"""Sets the undeploy_time of this DeployableDeploymentDetails.
End of the lifetime of this deployment. # noqa: E501
:param undeploy_time: The undeploy_time of this DeployableDeploymentDetails. # noqa: E501
:type: datetime
"""
self._undeploy_time = undeploy_time
@property
def config(self):
"""Gets the config of this DeployableDeploymentDetails. # noqa: E501
Configuration used to create this deployment. # noqa: E501
:return: The config of this DeployableDeploymentDetails. # noqa: E501
:rtype: str
"""
return self._config
@config.setter
def config(self, config):
"""Sets the config of this DeployableDeploymentDetails.
Configuration used to create this deployment. # noqa: E501
:param config: The config of this DeployableDeploymentDetails. # noqa: E501
:type: str
"""
self._config = config
@property
def address(self):
"""Gets the address of this DeployableDeploymentDetails. # noqa: E501
Address of the runtime element hosting this deployment. # noqa: E501
:return: The address of this DeployableDeploymentDetails. # noqa: E501
:rtype: str
"""
return self._address
@address.setter
def address(self, address):
"""Sets the address of this DeployableDeploymentDetails.
Address of the runtime element hosting this deployment. # noqa: E501
:param address: The address of this DeployableDeploymentDetails. # noqa: E501
:type: str
"""
self._address = address
@property
def resource_uri(self):
"""Gets the resource_uri of this DeployableDeploymentDetails. # noqa: E501
Output only. Resource URI for the artifact being deployed taken from the deployable field with the same name. # noqa: E501
:return: The resource_uri of this DeployableDeploymentDetails. # noqa: E501
:rtype: list[str]
"""
return self._resource_uri
@resource_uri.setter
def resource_uri(self, resource_uri):
"""Sets the resource_uri of this DeployableDeploymentDetails.
Output only. Resource URI for the artifact being deployed taken from the deployable field with the same name. # noqa: E501
:param resource_uri: The resource_uri of this DeployableDeploymentDetails. # noqa: E501
:type: list[str]
"""
self._resource_uri = resource_uri
@property
def platform(self):
"""Gets the platform of this DeployableDeploymentDetails. # noqa: E501
Platform hosting this deployment. # noqa: E501
:return: The platform of this DeployableDeploymentDetails. # noqa: E501
:rtype: DeploymentDetailsPlatform
"""
return self._platform
@platform.setter
def platform(self, platform):
"""Sets the platform of this DeployableDeploymentDetails.
Platform hosting this deployment. # noqa: E501
:param platform: The platform of this DeployableDeploymentDetails. # noqa: E501
:type: DeploymentDetailsPlatform
"""
self._platform = platform
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, DeployableDeploymentDetails):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 1.84375 | 2 |
lib/python/test/test_trans.py | qxo/cat | 5 | 17160 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Author: stdrickforce (<NAME>)
# Email: <<EMAIL>> <<EMAIL>>
import cat
import time
def ignore_exception(func):
def wraps(*args, **kwargs):
try:
return func(*args, **kwargs)
except Exception:
pass
return wraps
@ignore_exception
@cat.transaction("Trans", "T1")
def test1():
'''
Use via decorator
'''
print(1 / 0) # NOTE will cause ZeroDivisionException
def test2():
'''
Use via context manager
'''
def do_something():
import random
if random.random() < 0.1:
raise Exception("error occured!")
with cat.Transaction("Trans", "T2") as t:
cat.log_event("Event", "E2")
try:
do_something()
except Exception:
t.set_status(cat.CAT_ERROR)
t.add_data("context-manager")
t.add_data("foo", "bar")
def test3():
try:
trans = cat.Transaction("Trans", "T3")
trans.add_data("content")
trans.add_data("key", "val")
trans.set_status("error")
trans.set_duration(500)
trans.set_duration_start(time.time() * 1000 - 30 * 1000)
trans.set_timestamp(time.time() * 1000 - 30 * 1000)
finally:
# NOTE don't forget to complete the transaction!
trans.complete()
if __name__ == '__main__':
cat.init("pycat", debug=True, logview=False)
for i in range(100):
test1()
test2()
test3()
time.sleep(0.01)
time.sleep(1)
| 2.65625 | 3 |
src/ui/ui_hw_recovery_wdg.py | frosted97/dash-masternode-tool | 75 | 17161 | <reponame>frosted97/dash-masternode-tool<filename>src/ui/ui_hw_recovery_wdg.py
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file ui_hw_recovery_wdg.ui
#
# Created by: PyQt5 UI code generator
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_WdgRecoverHw(object):
def setupUi(self, WdgRecoverHw):
WdgRecoverHw.setObjectName("WdgRecoverHw")
WdgRecoverHw.resize(587, 352)
self.verticalLayout_4 = QtWidgets.QVBoxLayout(WdgRecoverHw)
self.verticalLayout_4.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_4.setSpacing(6)
self.verticalLayout_4.setObjectName("verticalLayout_4")
self.pages = QtWidgets.QStackedWidget(WdgRecoverHw)
self.pages.setObjectName("pages")
self.page0 = QtWidgets.QWidget()
self.page0.setObjectName("page0")
self.verticalLayout = QtWidgets.QVBoxLayout(self.page0)
self.verticalLayout.setContentsMargins(0, 0, 0, 0)
self.verticalLayout.setSpacing(6)
self.verticalLayout.setObjectName("verticalLayout")
self.label_2 = QtWidgets.QLabel(self.page0)
self.label_2.setObjectName("label_2")
self.verticalLayout.addWidget(self.label_2)
self.gbSeedSource = QtWidgets.QGroupBox(self.page0)
self.gbSeedSource.setTitle("")
self.gbSeedSource.setFlat(False)
self.gbSeedSource.setObjectName("gbSeedSource")
self.verticalLayout_5 = QtWidgets.QVBoxLayout(self.gbSeedSource)
self.verticalLayout_5.setContentsMargins(6, 6, 6, 6)
self.verticalLayout_5.setSpacing(8)
self.verticalLayout_5.setObjectName("verticalLayout_5")
self.rbSeedSourceHwScreen = QtWidgets.QRadioButton(self.gbSeedSource)
self.rbSeedSourceHwScreen.setChecked(False)
self.rbSeedSourceHwScreen.setObjectName("rbSeedSourceHwScreen")
self.verticalLayout_5.addWidget(self.rbSeedSourceHwScreen)
self.rbSeedSourceAppWords = QtWidgets.QRadioButton(self.gbSeedSource)
self.rbSeedSourceAppWords.setChecked(False)
self.rbSeedSourceAppWords.setObjectName("rbSeedSourceAppWords")
self.verticalLayout_5.addWidget(self.rbSeedSourceAppWords)
self.rbSeedSourceAppEntropy = QtWidgets.QRadioButton(self.gbSeedSource)
self.rbSeedSourceAppEntropy.setObjectName("rbSeedSourceAppEntropy")
self.verticalLayout_5.addWidget(self.rbSeedSourceAppEntropy)
self.verticalLayout.addWidget(self.gbSeedSource)
self.lblActionTypeMessage = QtWidgets.QLabel(self.page0)
self.lblActionTypeMessage.setWordWrap(True)
self.lblActionTypeMessage.setObjectName("lblActionTypeMessage")
self.verticalLayout.addWidget(self.lblActionTypeMessage)
spacerItem = QtWidgets.QSpacerItem(20, 288, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.verticalLayout.addItem(spacerItem)
self.pages.addWidget(self.page0)
self.page1 = QtWidgets.QWidget()
self.page1.setObjectName("page1")
self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.page1)
self.verticalLayout_2.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_2.setSpacing(6)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.label = QtWidgets.QLabel(self.page1)
self.label.setObjectName("label")
self.verticalLayout_2.addWidget(self.label)
self.gbNumberOfMnemonicWords = QtWidgets.QGroupBox(self.page1)
self.gbNumberOfMnemonicWords.setTitle("")
self.gbNumberOfMnemonicWords.setObjectName("gbNumberOfMnemonicWords")
self.verticalLayout_8 = QtWidgets.QVBoxLayout(self.gbNumberOfMnemonicWords)
self.verticalLayout_8.setContentsMargins(6, 6, 6, 6)
self.verticalLayout_8.setSpacing(8)
self.verticalLayout_8.setObjectName("verticalLayout_8")
self.rbWordsCount24 = QtWidgets.QRadioButton(self.gbNumberOfMnemonicWords)
self.rbWordsCount24.setChecked(True)
self.rbWordsCount24.setObjectName("rbWordsCount24")
self.verticalLayout_8.addWidget(self.rbWordsCount24)
self.rbWordsCount18 = QtWidgets.QRadioButton(self.gbNumberOfMnemonicWords)
self.rbWordsCount18.setObjectName("rbWordsCount18")
self.verticalLayout_8.addWidget(self.rbWordsCount18)
self.rbWordsCount12 = QtWidgets.QRadioButton(self.gbNumberOfMnemonicWords)
self.rbWordsCount12.setObjectName("rbWordsCount12")
self.verticalLayout_8.addWidget(self.rbWordsCount12)
self.verticalLayout_2.addWidget(self.gbNumberOfMnemonicWords)
self.lblPage1Message = QtWidgets.QLabel(self.page1)
self.lblPage1Message.setText("")
self.lblPage1Message.setWordWrap(True)
self.lblPage1Message.setObjectName("lblPage1Message")
self.verticalLayout_2.addWidget(self.lblPage1Message)
spacerItem1 = QtWidgets.QSpacerItem(20, 310, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.verticalLayout_2.addItem(spacerItem1)
self.pages.addWidget(self.page1)
self.page2 = QtWidgets.QWidget()
self.page2.setObjectName("page2")
self.verticalLayout_6 = QtWidgets.QVBoxLayout(self.page2)
self.verticalLayout_6.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_6.setSpacing(6)
self.verticalLayout_6.setObjectName("verticalLayout_6")
self.lblStep1HexEntropy = QtWidgets.QLabel(self.page2)
self.lblStep1HexEntropy.setObjectName("lblStep1HexEntropy")
self.verticalLayout_6.addWidget(self.lblStep1HexEntropy)
self.edtHexEntropy = QtWidgets.QLineEdit(self.page2)
self.edtHexEntropy.setObjectName("edtHexEntropy")
self.verticalLayout_6.addWidget(self.edtHexEntropy)
spacerItem2 = QtWidgets.QSpacerItem(20, 365, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.verticalLayout_6.addItem(spacerItem2)
self.pages.addWidget(self.page2)
self.page3 = QtWidgets.QWidget()
self.page3.setObjectName("page3")
self.verticalLayout_7 = QtWidgets.QVBoxLayout(self.page3)
self.verticalLayout_7.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_7.setSpacing(6)
self.verticalLayout_7.setObjectName("verticalLayout_7")
self.lblStepWordListTitle = QtWidgets.QLabel(self.page3)
self.lblStepWordListTitle.setWordWrap(True)
self.lblStepWordListTitle.setOpenExternalLinks(True)
self.lblStepWordListTitle.setObjectName("lblStepWordListTitle")
self.verticalLayout_7.addWidget(self.lblStepWordListTitle)
self.pages.addWidget(self.page3)
self.page4 = QtWidgets.QWidget()
self.page4.setObjectName("page4")
self.verticalLayout_3 = QtWidgets.QVBoxLayout(self.page4)
self.verticalLayout_3.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_3.setSpacing(6)
self.verticalLayout_3.setObjectName("verticalLayout_3")
self.lblStep1HexEntropy_2 = QtWidgets.QLabel(self.page4)
self.lblStep1HexEntropy_2.setObjectName("lblStep1HexEntropy_2")
self.verticalLayout_3.addWidget(self.lblStep1HexEntropy_2)
self.gridLayout_2 = QtWidgets.QGridLayout()
self.gridLayout_2.setSpacing(6)
self.gridLayout_2.setObjectName("gridLayout_2")
self.horizontalLayout_3 = QtWidgets.QHBoxLayout()
self.horizontalLayout_3.setObjectName("horizontalLayout_3")
self.lblPinMessage = QtWidgets.QLabel(self.page4)
self.lblPinMessage.setText("")
self.lblPinMessage.setWordWrap(False)
self.lblPinMessage.setObjectName("lblPinMessage")
self.horizontalLayout_3.addWidget(self.lblPinMessage)
self.edtPrimaryPIN = QtWidgets.QLineEdit(self.page4)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.edtPrimaryPIN.sizePolicy().hasHeightForWidth())
self.edtPrimaryPIN.setSizePolicy(sizePolicy)
self.edtPrimaryPIN.setLayoutDirection(QtCore.Qt.LeftToRight)
self.edtPrimaryPIN.setEchoMode(QtWidgets.QLineEdit.Password)
self.edtPrimaryPIN.setObjectName("edtPrimaryPIN")
self.horizontalLayout_3.addWidget(self.edtPrimaryPIN)
self.btnShowPIN = QtWidgets.QToolButton(self.page4)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.btnShowPIN.sizePolicy().hasHeightForWidth())
self.btnShowPIN.setSizePolicy(sizePolicy)
self.btnShowPIN.setMinimumSize(QtCore.QSize(21, 21))
self.btnShowPIN.setMaximumSize(QtCore.QSize(21, 21))
self.btnShowPIN.setText("")
self.btnShowPIN.setObjectName("btnShowPIN")
self.horizontalLayout_3.addWidget(self.btnShowPIN)
self.edtSecondaryPIN = QtWidgets.QLineEdit(self.page4)
self.edtSecondaryPIN.setEchoMode(QtWidgets.QLineEdit.Password)
self.edtSecondaryPIN.setObjectName("edtSecondaryPIN")
self.horizontalLayout_3.addWidget(self.edtSecondaryPIN)
self.btnShowSecondaryPIN = QtWidgets.QToolButton(self.page4)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.btnShowSecondaryPIN.sizePolicy().hasHeightForWidth())
self.btnShowSecondaryPIN.setSizePolicy(sizePolicy)
self.btnShowSecondaryPIN.setMinimumSize(QtCore.QSize(21, 21))
self.btnShowSecondaryPIN.setMaximumSize(QtCore.QSize(21, 21))
self.btnShowSecondaryPIN.setText("")
self.btnShowSecondaryPIN.setObjectName("btnShowSecondaryPIN")
self.horizontalLayout_3.addWidget(self.btnShowSecondaryPIN)
self.gridLayout_2.addLayout(self.horizontalLayout_3, 2, 1, 1, 1)
self.chbUsePassphrase = QtWidgets.QCheckBox(self.page4)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.chbUsePassphrase.sizePolicy().hasHeightForWidth())
self.chbUsePassphrase.setSizePolicy(sizePolicy)
self.chbUsePassphrase.setLayoutDirection(QtCore.Qt.RightToLeft)
self.chbUsePassphrase.setText("Use passphrase")
self.chbUsePassphrase.setObjectName("chbUsePassphrase")
self.gridLayout_2.addWidget(self.chbUsePassphrase, 3, 0, 1, 1)
self.chbUsePIN = QtWidgets.QCheckBox(self.page4)
self.chbUsePIN.setLayoutDirection(QtCore.Qt.RightToLeft)
self.chbUsePIN.setChecked(True)
self.chbUsePIN.setObjectName("chbUsePIN")
self.gridLayout_2.addWidget(self.chbUsePIN, 2, 0, 1, 1)
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
self.edtDeviceLabel = QtWidgets.QLineEdit(self.page4)
self.edtDeviceLabel.setPlaceholderText("")
self.edtDeviceLabel.setObjectName("edtDeviceLabel")
self.horizontalLayout.addWidget(self.edtDeviceLabel)
self.gridLayout_2.addLayout(self.horizontalLayout, 1, 1, 1, 1)
self.lblDeviceLabel = QtWidgets.QLabel(self.page4)
self.lblDeviceLabel.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.lblDeviceLabel.setObjectName("lblDeviceLabel")
self.gridLayout_2.addWidget(self.lblDeviceLabel, 1, 0, 1, 1)
self.horizontalLayout_4 = QtWidgets.QHBoxLayout()
self.horizontalLayout_4.setObjectName("horizontalLayout_4")
self.lblPassphraseMessage = QtWidgets.QLabel(self.page4)
self.lblPassphraseMessage.setText("")
self.lblPassphraseMessage.setWordWrap(False)
self.lblPassphraseMessage.setObjectName("lblPassphraseMessage")
self.horizontalLayout_4.addWidget(self.lblPassphraseMessage)
self.edtPassphrase = QtWidgets.QLineEdit(self.page4)
self.edtPassphrase.setEchoMode(QtWidgets.QLineEdit.Password)
self.edtPassphrase.setObjectName("edtPassphrase")
self.horizontalLayout_4.addWidget(self.edtPassphrase)
self.btnShowPassphrase = QtWidgets.QToolButton(self.page4)
self.btnShowPassphrase.setMinimumSize(QtCore.QSize(21, 21))
self.btnShowPassphrase.setMaximumSize(QtCore.QSize(21, 21))
self.btnShowPassphrase.setText("")
self.btnShowPassphrase.setObjectName("btnShowPassphrase")
self.horizontalLayout_4.addWidget(self.btnShowPassphrase)
spacerItem3 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_4.addItem(spacerItem3)
self.gridLayout_2.addLayout(self.horizontalLayout_4, 3, 1, 1, 1)
self.lblDeviceWordsInputType = QtWidgets.QLabel(self.page4)
self.lblDeviceWordsInputType.setAlignment(QtCore.Qt.AlignCenter)
self.lblDeviceWordsInputType.setObjectName("lblDeviceWordsInputType")
self.gridLayout_2.addWidget(self.lblDeviceWordsInputType, 0, 0, 1, 1)
self.gbDeviceWordsInputType = QtWidgets.QGroupBox(self.page4)
self.gbDeviceWordsInputType.setObjectName("gbDeviceWordsInputType")
self.horizontalLayout_2 = QtWidgets.QHBoxLayout(self.gbDeviceWordsInputType)
self.horizontalLayout_2.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.rbScrambledWords = QtWidgets.QRadioButton(self.gbDeviceWordsInputType)
self.rbScrambledWords.setLayoutDirection(QtCore.Qt.LeftToRight)
self.rbScrambledWords.setChecked(True)
self.rbScrambledWords.setObjectName("rbScrambledWords")
self.horizontalLayout_2.addWidget(self.rbScrambledWords)
self.rbWordsMatrix = QtWidgets.QRadioButton(self.gbDeviceWordsInputType)
self.rbWordsMatrix.setLayoutDirection(QtCore.Qt.LeftToRight)
self.rbWordsMatrix.setChecked(False)
self.rbWordsMatrix.setObjectName("rbWordsMatrix")
self.horizontalLayout_2.addWidget(self.rbWordsMatrix)
spacerItem4 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_2.addItem(spacerItem4)
self.gridLayout_2.addWidget(self.gbDeviceWordsInputType, 0, 1, 1, 1)
self.verticalLayout_3.addLayout(self.gridLayout_2)
self.lblOptionsPageMessage = QtWidgets.QLabel(self.page4)
self.lblOptionsPageMessage.setText("")
self.lblOptionsPageMessage.setObjectName("lblOptionsPageMessage")
self.verticalLayout_3.addWidget(self.lblOptionsPageMessage)
spacerItem5 = QtWidgets.QSpacerItem(20, 293, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.verticalLayout_3.addItem(spacerItem5)
self.lblOptionsEntropy = QtWidgets.QLabel(self.page4)
self.lblOptionsEntropy.setStyleSheet("font-size:11px")
self.lblOptionsEntropy.setWordWrap(True)
self.lblOptionsEntropy.setOpenExternalLinks(True)
self.lblOptionsEntropy.setTextInteractionFlags(QtCore.Qt.LinksAccessibleByMouse|QtCore.Qt.TextSelectableByKeyboard|QtCore.Qt.TextSelectableByMouse)
self.lblOptionsEntropy.setObjectName("lblOptionsEntropy")
self.verticalLayout_3.addWidget(self.lblOptionsEntropy)
self.btnPreviewAddresses = QtWidgets.QPushButton(self.page4)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Maximum, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.btnPreviewAddresses.sizePolicy().hasHeightForWidth())
self.btnPreviewAddresses.setSizePolicy(sizePolicy)
self.btnPreviewAddresses.setLayoutDirection(QtCore.Qt.LeftToRight)
self.btnPreviewAddresses.setAutoDefault(False)
self.btnPreviewAddresses.setObjectName("btnPreviewAddresses")
self.verticalLayout_3.addWidget(self.btnPreviewAddresses)
self.pages.addWidget(self.page4)
self.verticalLayout_4.addWidget(self.pages)
self.retranslateUi(WdgRecoverHw)
self.pages.setCurrentIndex(4)
QtCore.QMetaObject.connectSlotsByName(WdgRecoverHw)
def retranslateUi(self, WdgRecoverHw):
_translate = QtCore.QCoreApplication.translate
WdgRecoverHw.setWindowTitle(_translate("WdgRecoverHw", "Form"))
self.label_2.setText(_translate("WdgRecoverHw", "<b>Source of the recovery seed</b>"))
self.rbSeedSourceHwScreen.setText(_translate("WdgRecoverHw", "Recover from seed words using hardware wallet screen (secure)"))
self.rbSeedSourceAppWords.setText(_translate("WdgRecoverHw", "Recover from seed words using in-app editor (convenient but insecure)"))
self.rbSeedSourceAppEntropy.setText(_translate("WdgRecoverHw", "Recover from hexadecimal entropy (insecure)"))
self.lblActionTypeMessage.setText(_translate("WdgRecoverHw", "..."))
self.label.setText(_translate("WdgRecoverHw", "<b>Number of words of the recovery seed</b>"))
self.rbWordsCount24.setText(_translate("WdgRecoverHw", "24"))
self.rbWordsCount18.setText(_translate("WdgRecoverHw", "18"))
self.rbWordsCount12.setText(_translate("WdgRecoverHw", "12"))
self.lblStep1HexEntropy.setText(_translate("WdgRecoverHw", "<b>Enter the hexadecimal entropy of the recovery seed</b>"))
self.edtHexEntropy.setPlaceholderText(_translate("WdgRecoverHw", "32/24/16-byte hexadecimal string"))
self.lblStepWordListTitle.setText(_translate("WdgRecoverHw", "<b>Enter the words of your recovery seed</b>"))
self.lblStep1HexEntropy_2.setText(_translate("WdgRecoverHw", "<b>Tune hardware wallet options as needed</b>"))
self.edtPrimaryPIN.setPlaceholderText(_translate("WdgRecoverHw", "PIN"))
self.btnShowPIN.setToolTip(_translate("WdgRecoverHw", "Show PIN"))
self.edtSecondaryPIN.setToolTip(_translate("WdgRecoverHw", "<html><head/><body><p>This PIN will be used to activate passphrase saved in your Ledger Nano S.</p></body></html>"))
self.edtSecondaryPIN.setPlaceholderText(_translate("WdgRecoverHw", "Secondary PIN"))
self.btnShowSecondaryPIN.setToolTip(_translate("WdgRecoverHw", "Show secondary PIN"))
self.chbUsePassphrase.setWhatsThis(_translate("WdgRecoverHw", "<html><head/><body><p>Check the link attached <a href="dash.org">dash.org</a></p></body></html>"))
self.chbUsePIN.setText(_translate("WdgRecoverHw", "Use PIN"))
self.lblDeviceLabel.setText(_translate("WdgRecoverHw", "Device label"))
self.edtPassphrase.setToolTip(_translate("WdgRecoverHw", "<html><head/><body><p>This passphrase (if used) will be saved in your Ledger Nano S device and will be secured with the secondary PIN .</p></body></html>"))
self.edtPassphrase.setPlaceholderText(_translate("WdgRecoverHw", "Passphrase"))
self.btnShowPassphrase.setToolTip(_translate("WdgRecoverHw", "Show passphrase"))
self.lblDeviceWordsInputType.setText(_translate("WdgRecoverHw", "Tnput type on device"))
self.rbScrambledWords.setText(_translate("WdgRecoverHw", "Scrambled words"))
self.rbWordsMatrix.setText(_translate("WdgRecoverHw", "Word matrix"))
self.lblOptionsEntropy.setText(_translate("WdgRecoverHw", "Entropy:"))
self.btnPreviewAddresses.setText(_translate("WdgRecoverHw", "Show preview"))
| 1.835938 | 2 |
datahandler/analyser.py | ameliecordier/IIK | 0 | 17162 | # -*- coding: utf-8 -*-
import csv
from matplotlib import pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
def isValid(p, ep):
return p in ep.patterns
# CLASS ANALYSER
class Analyser:
"""
Représentation d'un résultat d'analyse
"""
def __init__(self):
"""
:param results: contient les résultats de l'analyse
"""
self.results = []
def addResult(self, result):
"""
Ajoute une liste de résultats à l'ensemble des résultats
:param result: la ligne de résultats
:return: None
"""
self.results.append(result)
def __str__(self):
"""
Affichage des résultats sur la sortie standard
"""
return "Résultats : %r" % self.results
def toFile(self, filename):
with open(filename, "w") as outfile:
fieldnames = ['idxExpert', 'idxMining', 'pattern expert', 'pattern mining' , 'full pattern']
w = csv.DictWriter(outfile, delimiter=";", fieldnames=fieldnames)
w.writeheader()
w.writerows(self.results)
| 3.15625 | 3 |
binary_trees/next_right.py | xxaxdxcxx/miscellaneous-code | 0 | 17163 | <filename>binary_trees/next_right.py
# Definition for binary tree with next pointer.
class TreeLinkNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
self.next = None
class Solution:
# @param root, a tree link node
# @return nothing
def connect(self, root):
node = root
current = None
candidate = None
next_start = None
if node is None:
return
while node is not None:
# loop through nodes in this level, assigning nexts
# assumption: previous level (node's level)
# has all nexts assigned correctly
# assign left's next to right if applicable
if node.left is not None:
# tells loop where to start for next level
if next_start is None:
next_start = node.left
if node.right is not None:
node.left.next = node.right
current = node.right
else:
current = node.left
else:
if node.right is not None:
if next_start is None:
next_start = node.right
current = node.right
else:
node = node.next
continue
while candidate is None:
node = node.next
if node is None:
break
if node.left is None:
if node.right is None:
continue
else:
candidate = node.right
else:
candidate = node.left
current.next = candidate
candidate = None
# end of inner loop, through nodes in a level
if node is None:
node = next_start
next_start = None
| 4.125 | 4 |
6.all_species/species_data/merge_species_data.py | oaxiom/episcan | 0 | 17164 | #!/usr/bin/env python3
import sys, os, glob
from glbase3 import *
all_species = glload('species_annotations/species.glb')
newl = []
for file in glob.glob('pep_counts/*.txt'):
oh = open(file, 'rt')
count = int(oh.readline().split()[0])
oh.close()
species_name = os.path.split(file)[1].split('.')[0].lower() # seems a simple rule
assembly_name = os.path.split(file)[1].replace('.txt', '')
if count < 5000:
continue
newl.append({'species': species_name, 'assembly_name': assembly_name, 'num_pep': count})
pep_counts = genelist()
pep_counts.load_list(newl)
all_species = all_species.map(genelist=pep_counts, key='species')
all_species = all_species.removeDuplicates('name')
print(all_species)
all_species = all_species.getColumns(['name', 'species', 'division' ,'num_pep', 'assembly_name'])
all_species.sort('name')
all_species.saveTSV('all_species.tsv')
all_species.save('all_species.glb')
# and add the peptide counts for all species
| 2.625 | 3 |
modules/lex_managers/lex_intent_manager.py | adamhamden/lex-bot | 0 | 17165 | <filename>modules/lex_managers/lex_intent_manager.py<gh_stars>0
import boto3
from prettytable import PrettyTable
class LexIntentManager:
def __init__(self):
self.client = boto3.client('lex-models')
def create_new_intent(self, intent_name, description="n/a", sample_utterances=[], slot_types=[]):
intent_info = {'name': intent_name,
'description': description,
'sampleUtterances': sample_utterances,
'fulfillmentActivity': {'type':'ReturnIntent'}}
try:
response = self.client.get_slot_type(name=intent_name, version='$LATEST')
intent_info['checksum'] = response['checksum']
except self.client.exceptions.NotFoundException:
pass
slots_info = self._slot_type_constructor(slot_types)
intent_info['slots'] = slots_info
self.client.put_intent(**intent_info)
print("Successfully created intent {}".format(intent_name))
def get_intent_list(self):
response = self.client.get_intents()
intent_list = []
for intent in response['intents']:
intent_list.append(intent['name'])
def print_intents(self):
response = self.client.get_intents()
table = PrettyTable()
table.field_names = ['intent_name', 'description', 'version']
for intent in response['intents']:
try:
table.add_row([intent['name'], intent['description'], intent['version']])
except KeyError:
table.add_row([intent['name'], "n/a", intent['version']])
print(table)
@staticmethod
def _slot_type_constructor(slot_types):
slots_info = []
for slot_type in slot_types:
slot_name = "sample_" + slot_type
slot_required = input("Will the slot {} be required [Required / Optional]: ".format(slot_type))
slot_version = '$LATEST'
slot_prompt = str(input("Provide an elicitation prompt for slot {}: ".format(slot_type)))
slot_max_attempts = int(input("What is the max attempts to allow when filling slot {}: ".format(slot_type)))
slot_sample_utterances = []
while True:
slot_sample_utterances.append(
str(input("Please enter a sample utterance for slot {}: ".format(slot_type))).replace("this",
"{" + slot_name + "}"))
if input("Would you like to add another utterance [True / False]: ") == "False":
break
print("{} - req: {} - prompt: {} - max_attempt: {} - sampleUtterances {}".format(slot_type, slot_required,
slot_prompt,
slot_max_attempts,
slot_sample_utterances))
slot_info = {'name': slot_name,
'slotConstraint': slot_required,
'slotType': slot_type,
'slotTypeVersion': slot_version,
'valueElicitationPrompt': {
'messages': [
{
'contentType': 'PlainText',
'content': slot_prompt,
},
],
'maxAttempts': slot_max_attempts,
},
'sampleUtterances': slot_sample_utterances
}
slots_info.append(slot_info)
return slots_info
| 2.1875 | 2 |
src/__init__.py | Victorpc98/CE888-Project | 1 | 17166 | <filename>src/__init__.py
import sys
sys.path.append("..") # Adds higher directory to python modules path. | 2.03125 | 2 |
wxtbx/wx4_compatibility.py | dperl-sol/cctbx_project | 155 | 17167 | <filename>wxtbx/wx4_compatibility.py
from __future__ import absolute_import, division, print_function
'''
Author : Lyubimov, A.Y.
Created : 04/14/2014
Last Changed: 11/05/2018
Description : wxPython 3-4 compatibility tools
The context managers, classes, and other tools below can be used to make the
GUI code compatible with wxPython 3 and 4. Mostly, the tools convert the
functions, enumerations, and classes which have been renamed in wxPython 4;
the name mismatches result in exceptions.
Use case 1: subclassing wx.PyControl or wx.Control:
from wxtbx import wx4_compatibility as wx4c
WxCtrl = wx4c.get_wx_mod(wx, wx.Control)
class MyCustomControl(WxCtrl): ...
Use case 2: brush style (NOTE: you can do that with fonts as well, but it
doesn't seem to be necessary):
from wxtbx import wx4_compatibility as wx4c
bkgrd = self.GetBackgroundColour()
with wx4c.set_brush_style(wx.BRUSHSTYLE_SOLID) as bstyle:
brush = wx.Brush(bkgrd, bstyle)
Use case 3: Toolbars
from wxtbx import wx4_compatibility as wx4c, bitmaps
class MyFrame(wx.Frame):
def __init__(self, parent, id, title, *args, **kwargs):
wx.Frame.__init__(self, parent, id, title, *args, **kwargs)
self.toolbar = wx4c.ToolBar(self, style=wx.TB_TEXT)
self.quit_button = self.toolbar.AddTool(toolId=wx.ID_ANY,
label='Quit',
kind=wx.ITEM_NORMAL,
bitmap=bitmaps.fetch_icon_bitmap('actions', 'exit')
shortHelp='Exit program')
...
self.SetToolBar(self.toolbar)
self.toolbar.Realize()
'''
import wx
from contextlib import contextmanager
import importlib
wx4 = wx.__version__[0] == '4'
modnames = [
('PyControl', 'Control'),
('PyDataObjectSimple', 'DataObjectSimple'),
('PyDropTarget', 'DropTarget'),
('PyEvtHandler', 'EvtHandler'),
('PyImageHandler', 'ImageHandler'),
('PyLocale', 'Locale'),
('PyLog', 'Log'),
('PyPanel', 'Panel'),
('PyPickerBase', 'PickerBase'),
('PyPreviewControlBar', 'PreviewControlBar'),
('PyPreviewFrame', 'PreviewFrame'),
('PyPrintPreview', 'PrintPreview'),
('PyScrolledWindow', 'ScrolledWindow'),
('PySimpleApp', 'App'),
('PyTextDataObject', 'TextDataObject'),
('PyTimer', 'Timer'),
('PyTipProvider', 'adv.TipProvider'),
('PyValidator', 'Validator'),
('PyWindow'', Window')
]
font_families = [
(wx.DEFAULT, wx.FONTFAMILY_DEFAULT),
(wx.DECORATIVE, wx.FONTFAMILY_DECORATIVE),
(wx.ROMAN, wx.FONTFAMILY_ROMAN),
(wx.SCRIPT, wx.FONTFAMILY_SCRIPT),
(wx.SWISS, wx.FONTFAMILY_SWISS),
(wx.MODERN, wx.FONTFAMILY_MODERN),
(wx.TELETYPE, wx.FONTFAMILY_TELETYPE)
]
font_weights = [
(wx.NORMAL, wx.FONTWEIGHT_NORMAL),
(wx.LIGHT, wx.FONTWEIGHT_LIGHT),
(wx.BOLD, wx.FONTWEIGHT_BOLD)
]
font_styles = [
(wx.NORMAL, wx.FONTSTYLE_NORMAL),
(wx.ITALIC, wx.FONTSTYLE_ITALIC),
(wx.SLANT, wx.FONTSTYLE_SLANT)
]
pen_styles = [
(wx.SOLID, wx.PENSTYLE_SOLID),
(wx.DOT, wx.PENSTYLE_DOT),
(wx.LONG_DASH, wx.PENSTYLE_LONG_DASH),
(wx.SHORT_DASH, wx.PENSTYLE_SHORT_DASH),
(wx.DOT_DASH, wx.PENSTYLE_DOT_DASH),
(wx.USER_DASH, wx.PENSTYLE_USER_DASH),
(wx.TRANSPARENT, wx.PENSTYLE_TRANSPARENT)
]
brush_styles = [
(wx.SOLID, wx.BRUSHSTYLE_SOLID),
(wx.TRANSPARENT, wx.BRUSHSTYLE_TRANSPARENT),
(wx.STIPPLE_MASK_OPAQUE, wx.BRUSHSTYLE_STIPPLE_MASK_OPAQUE),
(wx.STIPPLE_MASK, wx.BRUSHSTYLE_STIPPLE_MASK),
(wx.STIPPLE, wx.BRUSHSTYLE_STIPPLE),
(wx.BDIAGONAL_HATCH, wx.BRUSHSTYLE_BDIAGONAL_HATCH),
(wx.CROSSDIAG_HATCH, wx.BRUSHSTYLE_CROSSDIAG_HATCH),
(wx.FDIAGONAL_HATCH, wx.BRUSHSTYLE_FDIAGONAL_HATCH),
(wx.CROSS_HATCH, wx.BRUSHSTYLE_CROSS_HATCH),
(wx.HORIZONTAL_HATCH, wx.BRUSHSTYLE_HORIZONTAL_HATCH),
(wx.VERTICAL_HATCH, wx.BRUSHSTYLE_VERTICAL_HATCH),
]
def find_module(module):
for m in modnames:
if module.__name__ in m:
return m
def find_enum(enums, item):
for en in enums:
if item in en:
value = en[1] if wx4 else en[0]
return value
def get_wx_mod(base, module):
mname = find_module(module)[1] if wx4 else find_module(module)[0]
bname = base.__name__
if '.' in mname:
spl = [i for i in mname.split('.') if i != bname]
modname = '.'.join(spl[:-1])
mod = importlib.import_module('{}.{}'.format(bname, modname))
return getattr(mod, spl[-1])
else:
return getattr(base, mname)
@contextmanager
def wx_mod(base, module):
''' Identify and import the appropriate wxPython module '''
yield get_wx_mod(base, module)
@contextmanager
def set_font_style(style):
yield find_enum(font_styles, style)
@contextmanager
def set_font_weight(weight):
yield find_enum(font_weights, weight)
@contextmanager
def set_font_family(family):
yield find_enum(font_families, family)
@contextmanager
def set_pen_style(style):
yield find_enum(pen_styles, style)
@contextmanager
def set_brush_style(style):
yield find_enum(brush_styles, style)
@contextmanager
def create_measuring_context():
dc = wx.GraphicsContext.Create() if wx4 else \
wx.GraphicsContext.CreateMeasuringContext()
yield dc
class Wx3ToolBar(wx.ToolBar):
''' Special toolbar class that accepts wxPython 4-style AddTool command and
converts it to a wxPython 3-style AddLabelTool command '''
def __init__(self, parent, id=wx.ID_ANY, pos=wx.DefaultPosition,
size=wx.DefaultSize, style=wx.TB_HORIZONTAL, name='toolbar'):
wx.ToolBar.__init__(self, parent, id, pos, size, style, name)
def AddTool(self, toolId, label, bitmap, bmpDisabled=wx.NullBitmap,
kind=wx.ITEM_NORMAL, shortHelp='', longHelp='',
clientData=None):
''' Override to make this a very thin wrapper for AddLabelTool, which in
wxPython 3 is the same as AddTool in wxPython 4 '''
return self.AddLabelTool(id=toolId, label=label, bitmap=bitmap,
bmpDisabled=bmpDisabled, kind=kind,
shortHelp=shortHelp, longHelp=longHelp,
clientData=clientData)
class Wx4ToolBar(wx.ToolBar):
''' Special toolbar class that accepts wxPython 3-style AddLabelTool command
and converts it to a wxPython 4-style AddTool command '''
def __init__(self, parent, id=wx.ID_ANY, pos=wx.DefaultPosition,
size=wx.DefaultSize, style=wx.TB_HORIZONTAL, name='toolbar'):
wx.ToolBar.__init__(self, parent, id, pos, size, style, name)
def AddLabelTool(self, id, label, bitmap, bmpDisabled=wx.NullBitmap,
kind=wx.ITEM_NORMAL, shortHelp='', longHelp='',
clientData=None):
''' Override to make this a very thin wrapper for AddTool, which in
wxPython 4 is the same as AddLabelTool in wxPython 3 '''
return self.AddTool(toolId=id, label=label, bitmap=bitmap,
bmpDisabled=bmpDisabled, kind=kind,
shortHelp=shortHelp, longHelp=longHelp,
clientData=clientData)
# Use this ToolBar class to create toolbars in frames
ToolBar = Wx4ToolBar if wx4 else Wx3ToolBar
| 2.375 | 2 |
BMVC_version/utils.py | ZhengyuZhao/ACE | 19 | 17168 | import torch
import torch.nn as nn
import csv
#image quantization
def quantization(x):
x_quan=torch.round(x*255)/255
return x_quan
#picecwise-linear color filter
def CF(img, param,pieces):
param=param[:,:,None,None]
color_curve_sum = torch.sum(param, 4) + 1e-30
total_image = img * 0
for i in range(pieces):
total_image += torch.clamp(img - 1.0 * i /pieces, 0, 1.0 / pieces) * param[:, :, :, :, i]
total_image *= pieces/ color_curve_sum
return total_image
#parsing the data annotation
def load_ground_truth(csv_filename):
image_id_list = []
label_ori_list = []
label_tar_list = []
with open(csv_filename) as csvfile:
reader = csv.DictReader(csvfile, delimiter=',')
for row in reader:
image_id_list.append( row['ImageId'] )
label_ori_list.append( int(row['TrueLabel']) )
label_tar_list.append( int(row['TargetClass']) )
return image_id_list,label_ori_list,label_tar_list
# simple Module to normalize an image
class Normalize(nn.Module):
def __init__(self, mean, std):
super(Normalize, self).__init__()
self.mean = torch.Tensor(mean)
self.std = torch.Tensor(std)
def forward(self, x):
return (x - self.mean.type_as(x)[None,:,None,None]) / self.std.type_as(x)[None,:,None,None]
# values are standard normalization for ImageNet images,
# from https://github.com/pytorch/examples/blob/master/imagenet/main.py
norm = Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
| 2.953125 | 3 |
lemonpie/_nbdev.py | corazonlabs/ehr_preprocessing | 3 | 17169 | # AUTOGENERATED BY NBDEV! DO NOT EDIT!
__all__ = ["index", "modules", "custom_doc_links", "git_url"]
index = {"get_device": "00_basics.ipynb",
"settings_template": "00_basics.ipynb",
"read_settings": "00_basics.ipynb",
"DEVICE": "00_basics.ipynb",
"settings": "00_basics.ipynb",
"DATA_STORE": "00_basics.ipynb",
"LOG_STORE": "00_basics.ipynb",
"MODEL_STORE": "00_basics.ipynb",
"EXPERIMENT_STORE": "00_basics.ipynb",
"PATH_1K": "00_basics.ipynb",
"PATH_10K": "00_basics.ipynb",
"PATH_20K": "00_basics.ipynb",
"PATH_100K": "00_basics.ipynb",
"FILENAMES": "00_basics.ipynb",
"SYNTHEA_DATAGEN_DATES": "00_basics.ipynb",
"CONDITIONS": "00_basics.ipynb",
"LOG_NUMERICALIZE_EXCEP": "00_basics.ipynb",
"read_raw_ehrdata": "01_preprocessing_clean.ipynb",
"split_patients": "01_preprocessing_clean.ipynb",
"split_ehr_dataset": "01_preprocessing_clean.ipynb",
"cleanup_pts": "01_preprocessing_clean.ipynb",
"cleanup_obs": "01_preprocessing_clean.ipynb",
"cleanup_algs": "01_preprocessing_clean.ipynb",
"cleanup_crpls": "01_preprocessing_clean.ipynb",
"cleanup_meds": "01_preprocessing_clean.ipynb",
"cleanup_img": "01_preprocessing_clean.ipynb",
"cleanup_procs": "01_preprocessing_clean.ipynb",
"cleanup_cnds": "01_preprocessing_clean.ipynb",
"cleanup_immns": "01_preprocessing_clean.ipynb",
"cleanup_dataset": "01_preprocessing_clean.ipynb",
"extract_ys": "01_preprocessing_clean.ipynb",
"insert_age": "01_preprocessing_clean.ipynb",
"clean_raw_ehrdata": "01_preprocessing_clean.ipynb",
"load_cleaned_ehrdata": "01_preprocessing_clean.ipynb",
"load_ehr_vocabcodes": "01_preprocessing_clean.ipynb",
"EhrVocab": "02_preprocessing_vocab.ipynb",
"ObsVocab": "02_preprocessing_vocab.ipynb",
"EhrVocabList": "02_preprocessing_vocab.ipynb",
"get_all_emb_dims": "02_preprocessing_vocab.ipynb",
"collate_codes_offsts": "03_preprocessing_transform.ipynb",
"get_codenums_offsts": "03_preprocessing_transform.ipynb",
"get_demographics": "03_preprocessing_transform.ipynb",
"Patient": "03_preprocessing_transform.ipynb",
"get_pckl_dir": "03_preprocessing_transform.ipynb",
"PatientList": "03_preprocessing_transform.ipynb",
"cpu_cnt": "03_preprocessing_transform.ipynb",
"create_all_ptlists": "03_preprocessing_transform.ipynb",
"preprocess_ehr_dataset": "03_preprocessing_transform.ipynb",
"EHRDataSplits": "04_data.ipynb",
"LabelEHRData": "04_data.ipynb",
"EHRDataset": "04_data.ipynb",
"EHRData": "04_data.ipynb",
"accuracy": "05_metrics.ipynb",
"null_accuracy": "05_metrics.ipynb",
"ROC": "05_metrics.ipynb",
"MultiLabelROC": "05_metrics.ipynb",
"plot_rocs": "05_metrics.ipynb",
"plot_train_valid_rocs": "05_metrics.ipynb",
"auroc_score": "05_metrics.ipynb",
"auroc_ci": "05_metrics.ipynb",
"save_to_checkpoint": "06_learn.ipynb",
"load_from_checkpoint": "06_learn.ipynb",
"get_loss_fn": "06_learn.ipynb",
"RunHistory": "06_learn.ipynb",
"train": "06_learn.ipynb",
"evaluate": "06_learn.ipynb",
"fit": "06_learn.ipynb",
"predict": "06_learn.ipynb",
"plot_loss": "06_learn.ipynb",
"plot_losses": "06_learn.ipynb",
"plot_aurocs": "06_learn.ipynb",
"plot_train_valid_aurocs": "06_learn.ipynb",
"plot_fit_results": "06_learn.ipynb",
"summarize_prediction": "06_learn.ipynb",
"count_parameters": "06_learn.ipynb",
"dropout_mask": "07_models.ipynb",
"InputDropout": "07_models.ipynb",
"linear_layer": "07_models.ipynb",
"create_linear_layers": "07_models.ipynb",
"init_lstm": "07_models.ipynb",
"EHR_LSTM": "07_models.ipynb",
"init_cnn": "07_models.ipynb",
"conv_layer": "07_models.ipynb",
"EHR_CNN": "07_models.ipynb",
"get_data": "08_experiment.ipynb",
"get_optimizer": "08_experiment.ipynb",
"get_model": "08_experiment.ipynb",
"Experiment": "08_experiment.ipynb"}
modules = ["basics.py",
"preprocessing/clean.py",
"preprocessing/vocab.py",
"preprocessing/transform.py",
"data.py",
"metrics.py",
"learn.py",
"models.py",
"experiment.py"]
doc_url = "https://corazonlabs.github.io/lemonpie/"
git_url = "https://github.com/corazonlabs/lemonpie/tree/main/"
def custom_doc_links(name): return None
| 1.0625 | 1 |
tensorboard/plugins/graph_edit/c2graph_util.py | qzhong0605/tensorboardplugins | 0 | 17170 | <filename>tensorboard/plugins/graph_edit/c2graph_util.py
# Convert the caffe2 model into tensorboard GraphDef
#
# The details of caffe2 model is on the compat/proto/caffe2/caffe2.proto
# And the details of GraphDef model is on the compat/proto/graph.proto
#
################################################################################
from tensorboard.compat.proto import graph_pb2
from tensorboard.compat.proto import attr_value_pb2
from tensorboard.compat.proto import node_def_pb2
from tensorboard.compat.proto import tensor_shape_pb2
from tensorboard.compat.proto import tensor_pb2
from tensorboard.compat.proto import types_pb2
from tensorboard.compat.proto.caffe2 import caffe2_pb2
from tensorboard.util import tb_logging
from tensorboard.plugins.graph_edit import tbgraph_base
from google.protobuf import text_format
logger = tb_logging.get_logger()
class C2Graph(tbgraph_base.TBGraph):
""" In order to visualize the caffe2 model graph, it converts the caffe2
format model graph into the tensoboard-format model graph.
The information about caffe2 model is on the proto
`compat/proto/caffe2/caffe2.proto`. And the tensorboard model is on the
proto `compat/proto/graph.proto`
In order to avoid the same tensor name and they are built from the different
operators, we adopt the SSA form, which is used to differentiate different tensor
"""
def __init__(self, predict_net, init_net, predict_net_type="pb"):
super(C2Graph, self).__init__()
self._predict_net = caffe2_pb2.NetDef()
if predict_net_type == "pb":
with open(predict_net, "rb") as predict_stream:
self._predict_net.ParseFromString(predict_stream.read())
logger.info("parse caffe2 predict net {} with protobuf format".format(predict_net))
elif predict_net_type == "txt":
with open(predict_net, "r") as predict_stream:
text_format.Parse(predict_stream.read(), self._predict_net)
logger.info("parse caffe2 predict net {} with text format".format(predict_net))
else:
raise NotImplementedError("The predict net type: {} doesn't support".format(predict_net_type))
self._init_net = caffe2_pb2.NetDef()
with open(init_net, "rb") as init_stream:
self._init_net.ParseFromString(init_stream.read())
logger.info("load caffe2 init net {} with protobuf format".format(init_net))
# a map from node key to node, where the node key is globaly unique
self.nodes = {}
# a map from caffe2 operator to output, which is a SSA-format
self.c2_op_out = {}
# record the blob version for inplace-change
self.blob_version = {}
# a map from node name to shape info
self.shapes = {}
# a map from node name to dtype
self.types = {}
def _build_nodes_shapetype(self):
""" Build an inner node shape information given the weights information for network """
# add shape information
if self._init_net is None:
return
for init_op in self._init_net.op:
for init_arg in init_op.arg:
if init_arg.name == "shape":
self.shapes[init_op.output[0]] = init_arg.ints
elif init_arg.name == "values":
if len(init_arg.floats):
self.types[init_op.output[0]] = types_pb2.DT_FLOAT
elif len(init_arg.ints):
self.types[init_op.output[0]] = types_pb2.DT_INT64
elif len(init_arg.strings):
self.types[init_op.output[0]] = types_pb2.DT_STRING
else:
raise NotImplementedError("Not Supported Field: {}".format(init_arg))
def _add_node_shapetype(self, node, shape_name):
""" build an internal node shape map if given the weights information """
if shape_name in self.shapes:
tensor_shape = tensor_shape_pb2.TensorShapeProto()
for shape_i in self.shapes[shape_name]:
shape_dim = tensor_shape_pb2.TensorShapeProto.Dim()
shape_dim.size = shape_i
tensor_shape.dim.extend([shape_dim])
attr_value = attr_value_pb2.AttrValue()
attr_value.shape.CopyFrom(tensor_shape)
node.attr['shape'].CopyFrom(attr_value)
# add optional dtype
if shape_name in self.types:
attr_value = attr_value_pb2.AttrValue()
attr_value.type = self.types[shape_name]
node.attr['dtype'].CopyFrom(attr_value)
def _MakeSSAName(self, name):
""" It's used to make a unique name through a ssa-based format for `name`
"""
if name not in self.blob_version:
self.blob_version[name] = 0
else:
self.blob_version[name] += 1
ret_name = "{}_{}".format(name, self.blob_version[name])
return ret_name
def convert_to_nodes(self, c2_op):
""" Convert a caffe2 OperatorDef into TB nodes
The nodes for TensorBoard have only inputs and don't have outputs. Therefore
a caffe2 operator maybe converted into muliple nodes
Arg:
c2_op: a caffe2 OperatorDef
"""
new_node = node_def_pb2.NodeDef()
new_node.op = c2_op.type
for c2_input in c2_op.input:
if c2_input not in self.blob_version:
# These inputs are weights or input data for current
# tensorboard node. Therefore, the `op` is set to
# `Initialization`
in_node = node_def_pb2.NodeDef()
self._add_node_shapetype(in_node, c2_input)
self.blob_version[c2_input] = 0
in_node.name = '{}_{}'.format(c2_input, self.blob_version[c2_input])
in_node.op = "Initialization"
self.nodes["{}_{}".format(c2_input, 0)] = in_node
self._tb_graph.node.extend([in_node])
new_node.input.append('{}_{}'.format(c2_input, self.blob_version[c2_input]))
if len(c2_op.output) == 0:
# There are no outputs for current C2 operator. Therefore, the node
# name is set to C2 operation type
new_node.name = self._MakeSSAName(c2_op.type)
else:
new_node.name = self._MakeSSAName(c2_op.output[0])
# If more than one output, we build `Sibling` tensorboard node for
# other outpouts
for c2_output in c2_op.output[1:]:
sibling_node = node_def_pb2.NodeDef()
sibling_node.op = 'Sibling'
sibling_node.name = self._MakeSSAName(c2_output)
sibling_node.input.extend([new_node.name])
self._add_node_shapetype(sibling_node, c2_output)
self.nodes[sibling_node.name] = sibling_node
self._tb_graph.node.extend([sibling_node])
# add argument
for c2_arg in c2_op.arg:
attr = attr_value_pb2.AttrValue()
if c2_arg.HasField('i'):
attr.i = c2_arg.i
elif c2_arg.HasField('f'):
attr.f = c2_arg.f
elif c2_arg.HasField('s'):
attr.s = c2_arg.s
elif len(c2_arg.floats):
list_value = attr_value_pb2.AttrValue.ListValue()
list_value.f.extend(c2_args.floats)
attr.list = list_value
elif len(c2_arg.ints):
list_value = attr_value_pb2.AttrValue.ListValue()
list_value.i.extend(c2_arg.ints)
attr.list.CopyFrom(list_value)
elif len(c2_arg.strings):
list_value = attr_value_pb2.AttrValue.ListValue()
list_value.s.extend(c2_arg.strings)
attr.list.CopyFrom(list_value)
new_node.attr[c2_arg.name].CopyFrom(attr)
self._add_node_shapetype(new_node, c2_op.output[0])
self.nodes[new_node.name] = new_node
self._tb_graph.node.extend([new_node])
def ConvertNet(self):
""" Convert the full network of caffe2 into TB network """
self._build_nodes_shapetype()
for c2_op in self._predict_net.op:
self.convert_to_nodes(c2_op)
| 2.1875 | 2 |
src/Homework2_1.py | alexaquino/TUM-AUTONAVx | 0 | 17171 | #!/usr/bin/env python
# The MIT License (MIT)
# Copyright (c) 2014 <NAME>
# Technische Universität München (TUM)
# Autonomous Navigation for Flying Robots
# Homework 2.1
from plot import plot
class UserCode:
def __init__(self):
# initialize data you want to store in this object between calls to the measurement_callback() method
self.last_yaw_velocity = 0
self.max_roll_angle = 0
self.max_pitch_angle = 0
self.max_yaw_velocity = 0
def measurement_callback(self, t, dt, navdata):
'''
:param t: time since simulation start
:param dt: time since last call to measurement_callback
:param navdata: measurements of the quadrotor
'''
# add your plot commands here
self.max_roll_angle = max(self.max_roll_angle, abs(navdata.rotX))
self.max_pitch_angle = max(self.max_pitch_angle, abs(navdata.rotY))
self.max_yaw_velocity = max(self.max_yaw_velocity, abs((navdata.rotZ - self.last_yaw_velocity) / dt))
self.last_yaw_velocity = navdata.rotZ
plot("max_roll_angle", self.max_roll_angle)
plot("max_pitch_angle", self.max_pitch_angle)
plot("max_yaw_velocity", self.max_yaw_velocity)
| 3.515625 | 4 |
src_tf/templates/tf_estimator_template/model/example.py | ashishpatel26/finch | 1 | 17172 | from configs import args
import tensorflow as tf
def forward(x, mode):
is_training = (mode == tf.estimator.ModeKeys.TRAIN)
x = tf.contrib.layers.embed_sequence(x, args.vocab_size, args.embed_dim)
x = tf.layers.dropout(x, 0.2, training=is_training)
feat_map = []
for k_size in [3, 4, 5]:
_x = tf.layers.conv1d(x, args.filters, k_size, activation=tf.nn.relu)
_x = tf.layers.max_pooling1d(_x, _x.get_shape().as_list()[1], 1)
_x = tf.reshape(_x, (tf.shape(x)[0], args.filters))
feat_map.append(_x)
x = tf.concat(feat_map, -1)
x = tf.layers.dense(x, args.filters, tf.nn.relu)
logits = tf.layers.dense(x, args.n_class)
return logits
def model_fn(features, labels, mode):
logits = forward(features, mode)
if mode == tf.estimator.ModeKeys.PREDICT:
preds = tf.argmax(logits, -1)
return tf.estimator.EstimatorSpec(mode, predictions=preds)
if mode == tf.estimator.ModeKeys.TRAIN:
global_step = tf.train.get_global_step()
LR = {'start': 5e-3, 'end': 5e-4, 'steps': 1500}
lr_op = tf.train.exponential_decay(
LR['start'], global_step, LR['steps'], LR['end']/LR['start'])
loss_op = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=logits, labels=labels))
train_op = tf.train.AdamOptimizer(lr_op).minimize(
loss_op, global_step=global_step)
lth = tf.train.LoggingTensorHook({'lr': lr_op}, every_n_iter=100)
return tf.estimator.EstimatorSpec(
mode=mode, loss=loss_op, train_op=train_op, training_hooks=[lth])
| 2.265625 | 2 |
{{cookiecutter.project_slug}}/core/management/commands/snippets/fastapi_project/core/security.py | claysllanxavier/django-cookiecutter | 8 | 17173 | <filename>{{cookiecutter.project_slug}}/core/management/commands/snippets/fastapi_project/core/security.py
from datetime import datetime, timedelta
from typing import Any, Union
from jose import jwt
from passlib.context import CryptContext
from .config import settings
pwd_context = CryptContext(
default="django_pbkdf2_sha256",
schemes=["django_argon2", "django_bcrypt", "django_bcrypt_sha256",
"django_pbkdf2_sha256", "django_pbkdf2_sha1",
"django_disabled"])
ALGORITHM = "HS256"
ACCESS_TOKEN_EXPIRE_MINUTES = 60 * 24 * 8 # 8 days
'''
Arquivo de configuração de segurança dos tokens JWT
- Métodos de verificação e criação de hash de senha
- Método para criar o token jwt válido
'''
def verify_password(plain_password: str, hashed_password: str) -> bool:
return pwd_context.verify(plain_password, hashed_password)
def get_password_hash(password: str) -> str:
return pwd_context.hash(password)
def create_access_token(
subject: Union[str, Any], expires_delta: timedelta = None
) -> str:
if expires_delta:
expire = datetime.utcnow() + expires_delta
else:
expire = datetime.utcnow() + timedelta(
minutes=ACCESS_TOKEN_EXPIRE_MINUTES
)
to_encode = {"exp": expire, "sub": str(subject)}
encoded_jwt = jwt.encode(to_encode, settings.app_secret, algorithm=ALGORITHM)
return encoded_jwt | 2.0625 | 2 |
margarita/main.py | w0de/margarita | 3 | 17174 | <reponame>w0de/margarita
#!/usr/bin/env python
from flask import Flask
from flask import jsonify, render_template, redirect
from flask import request, Response
from saml_auth import BaseAuth, SamlAuth
import os, sys
try:
import json
except ImportError:
# couldn't find json, try simplejson library
import simplejson as json
import getopt
from operator import itemgetter
from distutils.version import LooseVersion
from reposadolib import reposadocommon
apple_catalog_version_map = {
'index-10.14-10.13-10.12-10.11-10.10-10.9-mountainlion-lion-snowleopard-leopard.merged-1.sucatalog': '10.14',
'index-10.13-10.12-10.11-10.10-10.9-mountainlion-lion-snowleopard-leopard.merged-1.sucatalog': '10.13',
'index-10.12-10.11-10.10-10.9-mountainlion-lion-snowleopard-leopard.merged-1.sucatalog': '10.12',
'index-10.11-10.10-10.9-mountainlion-lion-snowleopard-leopard.merged-1.sucatalog': '10.11',
'index-10.10-10.9-mountainlion-lion-snowleopard-leopard.merged-1.sucatalog': '10.10',
'index-10.9-mountainlion-lion-snowleopard-leopard.merged-1.sucatalog': '10.9',
'index-mountainlion-lion-snowleopard-leopard.merged-1.sucatalog': '10.8',
'index-lion-snowleopard-leopard.merged-1.sucatalog': '10.7',
'index-leopard-snowleopard.merged-1.sucatalog': '10.6',
'index-leopard.merged-1.sucatalog': '10.5',
'index-1.sucatalog': '10.4',
'index.sucatalog': '10.4',
}
BASE_AUTH_CLASS = BaseAuth
def build_app():
app = Flask(__name__)
app.config.update(
{
"DEBUG": os.environ.get('DEBUG', False),
"LOCAL_DEBUG": os.environ.get('LOCAL_DEBUG', False),
"SECRET_KEY": os.environ.get("SECRET_KEY", "insecure"),
"SAML_PATH": os.environ.get(
"SAML_PATH",
os.path.join(os.path.dirname(os.path.dirname(__file__)), "saml"),
),
"SAML_AUTH_ENABLED": bool(os.environ.get("SAML_AUTH_ENABLED", False)),
}
)
if app.config["SAML_AUTH_ENABLED"]:
auth = SamlAuth(app, auth_path="saml2", exemptions=["/<name>", "/test", "/status"])
else:
auth = BASE_AUTH_CLASS(app, is_admin=(lambda: LOCAL_DEBUG), is_auth=(lambda: True))
return app, auth
app, auth = build_app()
# cache the keys of the catalog version map dict
apple_catalog_suffixes = apple_catalog_version_map.keys()
def versions_from_catalogs(cats):
'''Given an iterable of catalogs return the corresponding OS X versions'''
versions = set()
for cat in cats:
# take the last portion of the catalog URL path
short_cat = cat.split('/')[-1]
if short_cat in apple_catalog_suffixes:
versions.add(apple_catalog_version_map[short_cat])
return versions
def json_response(r):
'''Glue for wrapping raw JSON responses'''
return Response(json.dumps(r), status=200, mimetype='application/json')
@app.route('/')
def index():
return render_template('margarita.html')
@app.route('/branches', methods=['GET'])
def list_branches():
'''Returns catalog branch names and associated updates'''
catalog_branches = reposadocommon.getCatalogBranches()
return json_response(catalog_branches.keys())
def get_description_content(html):
if len(html) == 0:
return None
# in the interest of (attempted) speed, try to avoid regexps
lwrhtml = html.lower()
celem = 'p'
startloc = lwrhtml.find('<' + celem + '>')
if startloc == -1:
startloc = lwrhtml.find('<' + celem + ' ')
if startloc == -1:
celem = 'body'
startloc = lwrhtml.find('<' + celem)
if startloc != -1:
startloc += 6 # length of <body>
if startloc == -1:
# no <p> nor <body> tags. bail.
return None
endloc = lwrhtml.rfind('</' + celem + '>')
if endloc == -1:
endloc = len(html)
elif celem != 'body':
# if the element is a body tag, then don't include it.
# DOM parsing will just ignore it anyway
endloc += len(celem) + 3
return html[startloc:endloc]
def product_urls(cat_entry):
'''Retreive package URLs for a given reposado product CatalogEntry.
Will rewrite URLs to be served from local reposado repo if necessary.'''
packages = cat_entry.get('Packages', [])
pkg_urls = []
for package in packages:
pkg_urls.append({
'url': reposadocommon.rewriteOneURL(package['URL']),
'size': package['Size'],
})
return pkg_urls
@app.route('/products', methods=['GET'])
def products():
products = reposadocommon.getProductInfo()
catalog_branches = reposadocommon.getCatalogBranches()
prodlist = []
for prodid in products.keys():
if 'title' in products[prodid] and 'version' in products[prodid] and 'PostDate' in products[prodid]:
prod = {
'title': products[prodid]['title'],
'version': products[prodid]['version'],
'PostDate': products[prodid]['PostDate'].strftime('%Y-%m-%d'),
'description': get_description_content(products[prodid]['description']),
'id': prodid,
'depr': len(products[prodid].get('AppleCatalogs', [])) < 1,
'branches': [],
'oscatalogs': sorted(versions_from_catalogs(products[prodid].get('OriginalAppleCatalogs')), key=LooseVersion, reverse=True),
'packages': product_urls(products[prodid]['CatalogEntry']),
}
for branch in catalog_branches.keys():
if prodid in catalog_branches[branch]:
prod['branches'].append(branch)
prodlist.append(prod)
else:
print 'Invalid update!'
sprodlist = sorted(prodlist, key=itemgetter('PostDate'), reverse=True)
return json_response({'products': sprodlist, 'branches': catalog_branches.keys()})
@app.route('/new_branch/<branchname>', methods=['POST'])
def new_branch(branchname):
catalog_branches = reposadocommon.getCatalogBranches()
if branchname in catalog_branches:
reposadocommon.print_stderr('Branch %s already exists!', branchname)
abort(401)
catalog_branches[branchname] = []
reposadocommon.writeCatalogBranches(catalog_branches)
return jsonify(result='success')
@app.route('/delete_branch/<branchname>', methods=['POST'])
def delete_branch(branchname):
catalog_branches = reposadocommon.getCatalogBranches()
if not branchname in catalog_branches:
reposadocommon.print_stderr('Branch %s does not exist!', branchname)
return
del catalog_branches[branchname]
# this is not in the common library, so we have to duplicate code
# from repoutil
for catalog_URL in reposadocommon.pref('AppleCatalogURLs'):
localcatalogpath = reposadocommon.getLocalPathNameFromURL(catalog_URL)
# now strip the '.sucatalog' bit from the name
if localcatalogpath.endswith('.sucatalog'):
localcatalogpath = localcatalogpath[0:-10]
branchcatalogpath = localcatalogpath + '_' + branchname + '.sucatalog'
if os.path.exists(branchcatalogpath):
reposadocommon.print_stdout(
'Removing %s', os.path.basename(branchcatalogpath))
os.remove(branchcatalogpath)
reposadocommon.writeCatalogBranches(catalog_branches)
return jsonify(result=True);
@app.route('/add_all/<branchname>', methods=['POST'])
def add_all(branchname):
products = reposadocommon.getProductInfo()
catalog_branches = reposadocommon.getCatalogBranches()
catalog_branches[branchname] = products.keys()
reposadocommon.writeCatalogBranches(catalog_branches)
reposadocommon.writeAllBranchCatalogs()
return jsonify(result=True)
@app.route('/process_queue', methods=['POST'])
def process_queue():
catalog_branches = reposadocommon.getCatalogBranches()
for change in request.json:
prodId = change['productId']
branch = change['branch']
if branch not in catalog_branches.keys():
print 'No such catalog'
continue
if change['listed']:
# if this change /was/ listed, then unlist it
if prodId in catalog_branches[branch]:
print 'Removing product %s from branch %s' % (prodId, branch, )
catalog_branches[branch].remove(prodId)
else:
# if this change /was not/ listed, then list it
if prodId not in catalog_branches[branch]:
print 'Adding product %s to branch %s' % (prodId, branch, )
catalog_branches[branch].append(prodId)
print 'Writing catalogs'
reposadocommon.writeCatalogBranches(catalog_branches)
reposadocommon.writeAllBranchCatalogs()
return jsonify(result=True)
@app.route('/dup_apple/<branchname>', methods=['POST'])
def dup_apple(branchname):
catalog_branches = reposadocommon.getCatalogBranches()
if branchname not in catalog_branches.keys():
print 'No branch ' + branchname
return jsonify(result=False)
# generate list of (non-deprecated) updates
products = reposadocommon.getProductInfo()
prodlist = []
for prodid in products.keys():
if len(products[prodid].get('AppleCatalogs', [])) >= 1:
prodlist.append(prodid)
catalog_branches[branchname] = prodlist
print 'Writing catalogs'
reposadocommon.writeCatalogBranches(catalog_branches)
reposadocommon.writeAllBranchCatalogs()
return jsonify(result=True)
@app.route('/dup/<frombranch>/<tobranch>', methods=['POST'])
def dup(frombranch, tobranch):
catalog_branches = reposadocommon.getCatalogBranches()
if frombranch not in catalog_branches.keys() or tobranch not in catalog_branches.keys():
print 'No branch ' + branchname
return jsonify(result=False)
catalog_branches[tobranch] = catalog_branches[frombranch]
print 'Writing catalogs'
reposadocommon.writeCatalogBranches(catalog_branches)
reposadocommon.writeAllBranchCatalogs()
return jsonify(result=True)
@app.route('/config_data', methods=['POST'])
def config_data():
# catalog_branches = reposadocommon.getCatalogBranches()
check_prods = request.json
if len(check_prods) > 0:
cd_prods = reposadocommon.check_or_remove_config_data_attribute(check_prods, suppress_output=True)
else:
cd_prods = []
response_prods = {}
for prod_id in check_prods:
response_prods.update({prod_id: True if prod_id in cd_prods else False})
print response_prods
return json_response(response_prods)
@app.route('/remove_config_data/<product>', methods=['POST'])
def remove_config_data(product):
# catalog_branches = reposadocommon.getCatalogBranches()
check_prods = request.json
products = reposadocommon.check_or_remove_config_data_attribute([product, ], remove_attr=True, suppress_output=True)
return json_response(products)
@app.route('/status')
def status():
return jsonify(state='calmer than you')
| 1.960938 | 2 |
python/pato/transport/uart.py | kloper/pato | 0 | 17175 | # -*- python -*-
"""@file
@brief pyserial transport for pato
Copyright (c) 2014-2015 <NAME> <<EMAIL>>.
All rights reserved.
@page License
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
The views and conclusions contained in the software and documentation
are those of the authors and should not be interpreted as representing
official policies, either expressed or implied, of the Pato Project.
"""
import serial
from util.protocol import ProtocolException
class Uart(object):
"""
@brief Communication transport using any UART TTL cable (FTDI)
A simple transport that allows python code running on PC to talk
with Pato via UART (using any UART cable or dongle e.g. FTDI),
while Pato is compiled with UART interface.
This requires python pyserial package to be installed.
"""
def __init__(self, *args, **kwargs):
"""
@brief Constructor
@param[in] args arguments for pyserial
@param[in] kwargs keyword arguments for pyserial
"""
self.serial = serial.Serial(*args, **kwargs)
def query(self, request):
"""
@brief Generic query (request/reply) method via pyserial interface.
Send request packet to Pato via serial interface and wait for reply
packet.
If send and/or receive return unexpected result,
@ref ProtocolException is thrown.
@param[in] request regular list of bytes representing packet to be sent
via the bridge.
@returns Received reply packet
@throws ProtocolException upon send or receive error
"""
bytes_written = self.serial.write(bytes(request))
if bytes_written != len(request):
raise ProtocolException("Failed to send request")
reply_size = 5
reply = self.serial.read(reply_size)
if len(reply) != reply_size:
raise ProtocolException("Failed to receive reply")
reply = [ord(c) for c in reply]
return reply
def close(self):
"""
@brief Close serial line to bridge
"""
self.serial.close()
| 1.890625 | 2 |
PYQT5/Games/RockPapperScissorsGame.py | Amara-Manikanta/Python-GUI | 0 | 17176 | <reponame>Amara-Manikanta/Python-GUI<gh_stars>0
import sys
from PyQt5.QtWidgets import *
from PyQt5.QtGui import QFont, QPixmap
from PyQt5.QtCore import QTimer
from random import randint
font = QFont("Times", 14)
buttonFont = QFont("Arial", 12)
computerScore = 0
playerScore = 0
class Windows(QWidget):
def __init__(self):
super().__init__()
self.setWindowTitle("Using Spinboxes")
self.setGeometry(350, 150, 550, 500)
self.UI()
def UI(self):
############################Score Borad#############################
self.scorecomputerText = QLabel("Computer Score : ", self)
self.scorecomputerText.move(30, 20)
self.scorecomputerText.setFont(font)
self.scorePlayerText = QLabel("Your Score : ", self)
self.scorePlayerText.setFont(font)
self.scorePlayerText.move(330, 20)
##########################Images###################################
self.imageComputer = QLabel(self)
self.imageComputer.setPixmap(QPixmap("Images/rock.png"))
self.imageComputer.move(50, 100)
self.imagePlayer = QLabel(self)
self.imagePlayer.setPixmap(QPixmap("Images/rock.png"))
self.imagePlayer.move(330, 100)
self.imagegame = QLabel(self)
self.imagegame.setPixmap(QPixmap("Images/game.png"))
self.imagegame.move(230, 145)
##################Buttons#########################
startButton = QPushButton("Start", self)
startButton.setFont(buttonFont)
startButton.move(90, 250)
startButton.clicked.connect(self.funcstart)
stopButton = QPushButton("Stop", self)
stopButton.setFont(buttonFont)
stopButton.move(350, 250)
stopButton.clicked.connect(self.funcstop)
######################Timer##########################
self.timer = QTimer(self)
self.timer.setInterval(50)
self.timer.timeout.connect(self.playGame)
self.show()
def playGame(self):
self.rndcomputer = randint(1, 3)
if self.rndcomputer == 1:
self.imageComputer.setPixmap(QPixmap("Images/rock.png"))
elif self.rndcomputer == 2:
self.imageComputer.setPixmap(QPixmap("Images/paper.png"))
else:
self.imageComputer.setPixmap(QPixmap("Images/scissors.png"))
self.rndplayer = randint(1, 3)
if self.rndplayer == 1:
self.imagePlayer.setPixmap(QPixmap("Images/rock.png"))
elif self.rndplayer == 2:
self.imagePlayer.setPixmap(QPixmap("Images/paper.png"))
else:
self.imagePlayer.setPixmap(QPixmap("Images/scissors.png"))
def funcstart(self):
self.timer.start()
def funcstop(self):
global computerScore
global playerScore
self.timer.stop()
if (self.rndcomputer == 1 and self.rndplayer == 1) or (self.rndcomputer == 2 and self.rndplayer == 2) or (
self.rndcomputer == 3 and self.rndplayer == 3):
mbox = QMessageBox.information(self, "Information", "Draw Game")
elif (self.rndcomputer == 1 and self.rndplayer == 2) or (self.rndcomputer == 2 and self.rndplayer == 3) or (
self.rndcomputer == 3 and self.rndplayer == 1):
mbox = QMessageBox.information(self, "Information", "you win!")
playerScore += 1
self.scorePlayerText.setText("Your Score:" + str(playerScore))
elif (self.rndcomputer == 1 and self.rndplayer == 3) or (self.rndcomputer == 2 and self.rndplayer == 1) or (
self.rndcomputer == 3 and self.rndplayer == 2):
mbox = QMessageBox.information(self, "Information", "Computer wins!")
computerScore += 1
self.scorecomputerText.setText("Computer Score:" + str(computerScore))
if computerScore == 5 or playerScore == 5:
mbox = QMessageBox.information(self, "Information", "Game Over")
sys.exit()
def main():
App = QApplication(sys.argv)
window = Windows()
sys.exit(App.exec_())
if __name__ == '__main__':
main()
| 3.140625 | 3 |
services/neural/traindatabase.py | vitorecomp/hackaton-deep-learn | 0 | 17177 | <filename>services/neural/traindatabase.py<gh_stars>0
from os import walk
import h5py
import numpy as np
from config.Database import Base
from config.Database import engine
from config.Database import Session
from models.Music import Music
from kmeans.kmeans import Kmeans
mypath = './dataset/datatr/'
def main():
files = []
# 2 - generate database schema
Base.metadata.create_all(engine)
# 3 - create a new session
session = Session()
musics = session.query(Music).all()
musics, distances = Kmeans.split(musics)
session.commit()
return
if __name__ == "__main__":
main()
| 2.296875 | 2 |
projects/TGS_salt/binary_classifier/model.py | liaopeiyuan/ml-arsenal-public | 280 | 17178 | <filename>projects/TGS_salt/binary_classifier/model.py
import torch.nn as nn
import pretrainedmodels
class classifier(nn.Module):
def __init__(self, model_name='resnet32'):
super(classifier, self).__init__()
# Load pretrained ImageNet model
self.model = pretrainedmodels.__dict__[model_name](num_classes=1000, pretrained='imagenet')
print(model_name + ' model settings:')
for var in pretrainedmodels.pretrained_settings[model_name]['imagenet']:
print('\t' + var + ': '+ str(pretrainedmodels.pretrained_settings[model_name]['imagenet'][var]))
# Define last layer for fine-tuning
dim_feats = self.model.last_linear.in_features
nb_classes = 1
self.model.last_linear = F.dropout2d(nn.Linear(dim_feats, nb_classes),p=0.50)
def forward(self, input):
return self.model(input)
def set_mode(self, mode):
self.mode = mode
if 'validation' in mode or 'test' in mode:
self.eval()
elif 'train' in mode:
self.train()
else:
raise NotImplementedError
| 2.46875 | 2 |
code/algorithm/assr.py | ShuhuaGao/bcn_opt_dc | 0 | 17179 | <filename>code/algorithm/assr.py
"""
Given a Boolean function/network, get its algebraic state-space representation.
A logical vector `\delta_n^i` is represented by an integer `i` for space efficiency. Consequently, a logical matrix
is represented by a list, each element for one column, (also known as the "condensed form").
[1] Conversion from an infix expression to a postfix one:
https://runestone.academy/runestone/books/published/pythonds/BasicDS/InfixPrefixandPostfixExpressions.html
[2] Logical connectives: https://en.wikipedia.org/wiki/Logical_connective
Author: <NAME>
"""
import operator
import os
from typing import List, Union, Tuple, Iterable, Dict
from .bcn import BooleanNetwork, BooleanControlNetwork
_COMMENT = '#'
_STATES = '[STATES]'
_CONTROLS = '[CONTROLS]'
class LogicalConnective:
"""
Represent a logical connective. https://en.wikipedia.org/wiki/Logical_connective
"""
def __init__(self, id: str, description: str, arity: int, precedence: int, function):
"""
Initialize a logical connective.
:param id: a unique description
:param description: a description text
:param arity: number of operands
:param precedence: operator precedence
:param function: callable, the underlying operation which accepts *arity* argments
"""
self.id = id
self.description = description
self.arity = arity
self.precedence = precedence # a smaller number means a higher precedence
self.function = function
def __str__(self):
return self.id
def __call__(self, *args):
return self.function(*args)
def _imply(a, b):
if a:
return b
return 1
def _xnor(a, b):
return a == b
LOGICAL_CONNECTIVES = {
'NOT': LogicalConnective('NOT', 'not', 1, 0, operator.not_),
'XOR': LogicalConnective('XOR', 'exclusive disjunction', 2, 1, operator.xor),
'AND': LogicalConnective('AND', 'and', 2, 2, operator.and_),
'OR': LogicalConnective('OR', 'or', 2, 3, operator.or_),
'IMPLY': LogicalConnective('IMPLY', 'implication', 2, 4, _imply),
'EQUIV': LogicalConnective('EQUIV', 'equivalent', 2, 5, _xnor)
}
def _infix_to_postfix(expression: str) -> List[Union[LogicalConnective, str]]:
"""
Convert an infix expression to its postfix form.
:param expression: infix, separated by spaces
:return: postfix expression, a list, whose element is an operator (LogicalConnective) or a variable (str)
"""
# parse tokens: handle ( and ) specially, which may not be separated by spaces, e.g., 'A OR (B AND C)'
items = expression.split()
tokens = []
for item in items:
token = ''
for c in item:
if c in '()':
if token:
tokens.append(token)
token = ''
tokens.append(c)
else:
token = token + c
if token:
tokens.append(token)
# conversion
op_stack = []
output = []
for token in tokens:
if token.upper() in LOGICAL_CONNECTIVES: # an operator
connective = LOGICAL_CONNECTIVES[token.upper()]
while op_stack and isinstance(op_stack[-1], LogicalConnective) and \
op_stack[-1].precedence < connective.precedence:
output.append(op_stack.pop())
op_stack.append(connective)
elif token == '(':
op_stack.append(token)
elif token == ')':
left_parenthesis_found = False
while op_stack:
top = op_stack.pop()
if top == '(':
left_parenthesis_found = True
break
else:
output.append(top)
if not left_parenthesis_found:
raise RuntimeError("Unmatched parentheses are encountered: an extra ')'!")
elif token.upper() in ['1', 'TRUE']:
output.append('TRUE')
elif token.upper() in ['0', 'FALSE']:
output.append('FALSE')
else: # a variable
output.append(token)
while op_stack:
top = op_stack.pop()
if top == '(':
raise RuntimeError("Unmatched parentheses are encountered: an extra '('!")
output.append(top)
return output
def _evaluate_postfix(expression, values: {}):
"""
Evaluate a postfix expression with the given parameter values.
:param expression: postfix
:param values: a dict: variable --> value (0/1 or False/True)
:return: a Boolean variable, or 0/1
"""
operand_stack = []
for token in expression:
if isinstance(token, str): # a variable
if token in values:
val = values[token]
operand_stack.append(val)
elif token == 'TRUE':
operand_stack.append(True)
elif token == 'FALSE':
operand_stack.append(False)
else:
raise RuntimeError(f"Unrecognized variable: '{token}'")
else: # a logical connective
arguments = []
for _ in range(token.arity):
arguments.append(operand_stack.pop())
result = token(*arguments[::-1])
operand_stack.append(result)
return operand_stack.pop()
def _assr_function(pf_expr: List[Union[LogicalConnective, str]], states: List[str], controls: List[str]) -> List[int]:
"""
Compute the ASSR for a Boolean function.
:param pf_expr: the postfix expression of a Boolean function
:param states: the state variables
:param controls: the control inputs. If `None`, then no inputs.
:return: the structure matrix, a list of length MN
"""
n = len(states)
m = len(controls)
N = 2 ** n
M = 2 ** m
MN = M * N
all_variables = controls + states
structure_matrix = [None] * MN
# enumerate the binary sequences to get the truth table
for h in range(MN):
bh = f'{h:0{m+n}b}'
values = {var: int(val) for var, val in zip(all_variables, bh)}
output = _evaluate_postfix(pf_expr, values)
k = MN - h
if output: # 1 (True)
structure_matrix[k - 1] = 1
else:
structure_matrix[k - 1] = 2
return structure_matrix
def _tokenize(state_to_expr: Dict[str, str], controls: Iterable[str]=None) -> Tuple[Dict[str, List[Union[LogicalConnective, str]]], List[str]]:
"""
(1) Parse the `exprs` into postfix forms
(2) Infer the control inputs, if `controls` is `None`
:return: the tokenized expressions and the controls
"""
state_to_pf_expr = {s: _infix_to_postfix(e) for s, e in state_to_expr.items()}
if controls is None:
# infer controls
controls = []
for pf_expr in state_to_pf_expr.values():
for t in pf_expr:
if isinstance(t, str): # t is a variable, or 'TRUE' or 'FALSE'
if t not in ['TRUE', 'FALSE'] and t not in state_to_pf_expr: # a control
if t not in controls:
controls.append(t)
else:
controls = list(controls)
# validate
for s, pf_expr in state_to_pf_expr.items():
for t in pf_expr:
if isinstance(t, str):
assert t in state_to_pf_expr or t in controls, f"Unrecognized variable: '{t}' in equation of {s}"
return state_to_pf_expr, controls
def _assr_network(state_to_pf_expr: Dict[str, List[Union[LogicalConnective, str]]], states: List[str],
controls: List[str], verbose: bool=True) -> List[int]:
"""
Get the ASSR of a Boolean (control) network.
:param state_to_pf_expr: state -> its postfix expression
:param states: state variables
:param controls: control inputs.
:return: network transition matrix, each column is represented by an integer
"""
assert len(state_to_pf_expr) == len(states), 'The number of Boolean functions must be equal to the number of state states'
# get the structure matrix of each state (i.e., its Boolean equation)
state_to_sms = {}
for s, pf_expr in state_to_pf_expr.items():
if verbose:
print(f'\tComputing the structure matrix for state {s} ...')
state_to_sms[s] = _assr_function(pf_expr, states, controls)
n = len(states)
m = len(controls)
transition_matrix = [None] * (2 ** m * 2 ** n)
stp = lambda i, j: (i - 1) * 2 + j
if verbose:
print('\tComposing the complete network transition matrix...')
for k in range(len(transition_matrix)): # k-th column
r = 1
for s in states:
sm = state_to_sms[s]
r = stp(r, sm[k])
transition_matrix[k] = r
return transition_matrix
def build_ASSR(source: Union[str, Iterable[str]], states: List[str]=None,
controls: List[str]=None, verbose: bool=True) -> Union[BooleanNetwork, BooleanControlNetwork]:
"""
Build the ASSR for a given Boolean network in a string form.
Each Boolean function is given by the form: state = f(states, controls).
If a text file is given, each Boolean function is provided per line, and '#' starts a comment line
:param source: str or a list of str. (1) str: a single Boolean function or a text file, which contains one or more
Boolean functions (i.e., a network), each per line; (2) a list of str: multiple Boolean functions
:param states: state variables. If `None`, then inferred automatically.
:param controls: control inputs. If this a Boolean network with no inputs, then give it an empty List.
If `None`, then inferred automatically.
:param verbose: whether to print more information
:return: a Boolean network if there are no inputs; otherwise, a Boolean control network
.. note::
If the states and controls are inferred, the order of states corresponds to the line order, whereas the order
of controls depend on their appearance order in the equations. To precisely control the order (especially for
controls), two additional lines may be appended after the state equations that begin with "[STATES]" or "[CONTROLS]".
For example, line "[STATES] AKT MKK EGFR" specifies the state order (AKT, MKK, EGFR).
Of course, both "[STATES]" and "[CONTROLS]" lines are optional.
The non-None arguments `states` and `controls` have higher precedence than "[STATES]" and "[CONTROLS]" lines respectively.
"""
# get the strings of a network
net = []
if isinstance(source, str):
if os.path.isfile(source):
if verbose:
print(f'User provided a network file: {source}\nParsing...')
with open(source, 'r') as f:
for line in f:
line = line.strip()
if line.startswith(_COMMENT):
continue
elif line.startswith(_STATES):
if states is None:
words = line.split()
states = [w.strip() for w in words[1:]]
elif line.startswith(_CONTROLS):
if controls is None:
words = line.split()
controls = [w.strip() for w in words[1:]]
else:
if line: # skip empty lines if any
net.append(line)
else:
if verbose:
print(f'User provided a single Boolean equation.')
net.append(source)
else:
if verbose:
print(f'User provided a list of Boolean equations.')
net = list(source)
# extract the states and equations
state_to_expr = {}
inferred_states = []
for eq in net:
state, expr = eq.split('=')
state = state.strip()
expr = expr.strip()
if states is not None:
assert state in states, f'Unexpected state {state} is encountered!'
else:
inferred_states.append(state)
assert state not in state_to_expr, f'More than one equation is provided for state {state}'
state_to_expr[state] = expr
if states is not None:
for s in states:
assert s in state_to_expr, f'The equation for state {s} is missing'
else:
states = inferred_states
if verbose:
print('Tokenizing...')
# tokenize
state_to_pf_expr, controls = _tokenize(state_to_expr, controls)
assert set(states).isdisjoint(controls), 'States and controls should be disjoint'
if verbose:
print(f'States are {states}')
print(f'Controls are {controls}')
print('Computing...')
# get the ASSR the network
L = _assr_network(state_to_pf_expr, states, controls, verbose)
# wrap them into a Boolean (control) network
m = len(controls)
n = len(states)
if m == 0:
return BooleanNetwork(n, L, states)
return BooleanControlNetwork(n, m, L, states, controls)
| 3.578125 | 4 |
datasets/voc_dataset.py | ming71/DAL | 206 | 17180 | # --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by <NAME>
# Extended by <NAME>
# --------------------------------------------------------
import os
import cv2
import numpy as np
import torch
import torch.utils.data as data
import xml.etree.ElementTree as ET
from utils.bbox import quad_2_rbox
class VOCDataset(data.Dataset):
""""""
def __init__(self,
dataset='trainval.txt',
augment = False,
level = 1,
random_flip=True):
self.image_set = dataset
self.data_path = self.image_set.strip('/ImageSets/Main/trainval.txt')
self.image_ext = [".jpg"]
self.image_list = self._load_image_names()
self.classes = ('__background__', 'aeroplane','bicycle','bird','boat',
'bottle','bus','car','cat','chair','cow','diningtable',
'dog','horse','motorbike','person','pottedplant',
'sheep','sofa','train','tvmonitor')
self.num_classes = len(self.classes)
self.class_to_ind = dict(zip(self.classes, range(self.num_classes)))
self.random_flip = random_flip
def __len__(self):
return len(self.image_list)
def __getitem__(self, index):
im_path = self._image_path_from_index(self.image_list[index])
im = cv2.cvtColor(cv2.imread(im_path, cv2.IMREAD_COLOR), cv2.COLOR_BGR2RGB)
roidb = self._load_pascal_annotation(self.image_list[index])
gt_inds = np.where(roidb['gt_classes'] != 0)[0]
bboxes = roidb['boxes'][gt_inds, :]
classes = roidb['gt_classes'][gt_inds]
if self.random_flip and np.random.rand() >= 0.5:
im = cv2.flip(im, 1, None)
oldxs = bboxes[:, 0::2].copy()
bboxes[:, 0::2] = im.shape[1] - oldxs - 1
gt_boxes = np.empty((len(gt_inds), 6), dtype=np.float32)
for i, bbox in enumerate(bboxes):
gt_boxes[i, :5] = quad_2_rbox(np.array(bbox))
gt_boxes[i, 5] = classes[i]
return {'image': im, 'boxes': gt_boxes}
def _load_image_names(self):
"""
Load the names listed in this dataset's image set file.
"""
image_set_file = self.image_set
if not os.path.exists(image_set_file):
'Path does not exist: {}'.format(image_set_file)
image_names = []
else:
with open(image_set_file) as f:
image_names = [x.strip() for x in f.readlines()]
return image_names
def _image_path_from_index(self, index):
"""
Construct an image path from the image's "index" identifier.
"""
image_path = None
image_exist = False
for image_ext in self.image_ext:
image_path = os.path.join(self.data_path, 'JPEGImages', index + image_ext)
if os.path.exists(image_path):
image_exist = True
break
if not image_exist:
raise Exception('Image path does not exist: {}'.format(
os.path.join(self.data_path, 'JPEGImages', index))
)
return image_path
def _load_pascal_annotation(self, index):
"""
Load image and bounding boxes info from XML file in the PASCAL VOC format.
"""
filename = os.path.join(self.data_path, 'Annotations', index + '.xml')
tree = ET.parse(filename)
objs = tree.findall('object')
boxes, gt_classes = [], []
for _, obj in enumerate(objs):
difficult = int(obj.find('difficult').text)
is_latin = obj.find('language') is None or obj.find('language').text == 'Latin'
bnd_box = obj.find('bndbox')
box = [
float(bnd_box.find('xmin').text),
float(bnd_box.find('ymin').text),
float(bnd_box.find('xmax').text),
float(bnd_box.find('ymin').text),
float(bnd_box.find('xmax').text),
float(bnd_box.find('ymax').text),
float(bnd_box.find('xmin').text),
float(bnd_box.find('ymax').text),
]
label = self.class_to_ind[obj.find('name').text.lower().strip()]
if difficult:
continue
# if self.only_latin and not is_latin:
# continue
boxes.append(box)
gt_classes.append(label)
return {'boxes': np.array(boxes, dtype=np.int32), 'gt_classes': np.array(gt_classes)}
def image_path_at(self, i):
"""
Return the absolute path to image i in the image sequence.
"""
return self._image_path_from_index(self.image_list[i])
def return_class(self, id):
id = int(id)
return self.classes[id]
if __name__ == '__main__':
pass | 2.3125 | 2 |
neploid.py | GravityI/neploid | 0 | 17181 | import discord
import random
import asyncio
import logging
import urllib.request
from discord.ext import commands
bot = commands.Bot(command_prefix='nep ', description= "Nep Nep")
counter = 0
countTask = None
@bot.event
async def on_ready():
print('Logged in as')
print(bot.user.name)
# print(bot.user.id)
print('------')
@bot.command()
async def nep(ctx):
await ctx.send("NEP NEP")
@bot.command(pass_context = True)
async def guessWhat(ctx):
await ctx.send(str(ctx.message.author.display_name) + " officially learned how to code a Discord bot")
async def countdown(channel):
global counter
while not bot.is_closed():
counter += 1
await channel.send("Count is at " + str(counter))
await asyncio.sleep(3)
@bot.command(pass_context = True, aliases = ["collect"])
async def sc(ctx):
global countTask
await ctx.send("Countdown Started!")
countTask = bot.loop.create_task(countdown(ctx.message.channel))
@bot.command(pass_context = True, aliases = ["cancel", "stop"])
async def cc(ctx):
global countTask
await ctx.send("Countdown Cancelled!")
countTask.cancel()
@bot.command(pass_context = True)
async def pm(ctx, *content):
if ctx.author.dm_channel is not None:
await ctx.author.dm_channel.send(content)
else:
await ctx.author.create_dm()
sendString = ''
for c in content:
sendString += c + ' '
await ctx.author.dm_channel.send(sendString)
@bot.command(aliases = ['nh'])
async def nhentai(ctx):
rurl = "https://nhentai.net/random/"
headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 Safari/537.36'}
accessHurl = urllib.request.urlopen(urllib.request.Request(rurl, headers = headers))
await ctx.send(accessHurl.geturl())
token = "insert token here"
bot.run(token) | 2.65625 | 3 |
odoo/base-addons/l10n_tr/__manifest__.py | LucasBorges-Santos/docker-odoo | 0 | 17182 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
{
'name': 'Turkey - Accounting',
'version': '1.0',
'category': 'Localization',
'description': """
Türkiye için Tek düzen hesap planı şablonu Odoo Modülü.
==========================================================
Bu modül kurulduktan sonra, Muhasebe yapılandırma sihirbazı çalışır
* Sihirbaz sizden hesap planı şablonu, planın kurulacağı şirket, banka hesap
bilgileriniz, ilgili para birimi gibi bilgiler isteyecek.
""",
'author': '<NAME>, <NAME>',
'maintainer':'https://launchpad.net/~openerp-turkey, http://www.cantecim.com',
'depends': [
'account',
],
'data': [
'data/l10n_tr_chart_data.xml',
'data/account.account.template.csv',
'data/l10n_tr_chart_post_data.xml',
'data/account_data.xml',
'data/account_tax_template_data.xml',
'data/account_chart_template_data.xml',
],
'license': 'LGPL-3',
}
| 1.132813 | 1 |
Source Codes/SMF_Python/smf_main.py | mmaher22/iCV-SBR | 20 | 17183 | import os
import time
import argparse
import pandas as pd
from smf import SessionMF
parser = argparse.ArgumentParser()
parser.add_argument('--K', type=int, default=20, help="K items to be used in Recall@K and MRR@K")
parser.add_argument('--factors', type=int, default=100, help="Number of latent factors.")
parser.add_argument('--batch', type=int, default=32, help="Batch size for the training process")
parser.add_argument('--momentum', type=float, default=0.0, help="Momentum of the optimizer adagrad_sub")
parser.add_argument('--regularization', type=float, default=0.0001, help="Regularization Amount of the objective function")
parser.add_argument('--dropout', type=float, default=0.0, help="Share of items that are randomly discarded from the current session while training")
parser.add_argument('--skip', type=float, default=0.0, help="Probability that an item is skiped and the next one is used as the positive example")
parser.add_argument('--neg_samples', type=int, default=2048, help="Number of items that are sampled as negative examples")
parser.add_argument('--activation', type=str, default='linear', help="Final activation function (linear, sigmoid, uf_sigmoid, hard_sigmoid, relu, softmax, softsign, softplus, tanh)")
parser.add_argument('--objective', type=str, default='bpr_max', help="Loss Function (bpr_max, top1_max, bpr, top1)")
parser.add_argument('--epochs', type=int, default=10, help="Number of Epochs")
parser.add_argument('--lr', type=float, default=0.001, help="Learning Rate")
parser.add_argument('--itemid', default='ItemID', type=str)
parser.add_argument('--sessionid', default='SessionID', type=str)
parser.add_argument('--valid_data', default='recSys15Valid.txt', type=str)
parser.add_argument('--train_data', default='recSys15TrainOnly.txt', type=str)
parser.add_argument('--data_folder', default='/home/icvuser/Desktop/Recsys cleaned data/RecSys15 Dataset Splits', type=str)
# Get the arguments
args = parser.parse_args()
train_data = os.path.join(args.data_folder, args.train_data)
x_train = pd.read_csv(train_data)
x_train.sort_values(args.sessionid, inplace=True)
x_train = x_train.iloc[-int(len(x_train) / 64) :] #just take 1/64 last instances
valid_data = os.path.join(args.data_folder, args.valid_data)
x_valid = pd.read_csv(valid_data)
x_valid.sort_values(args.sessionid, inplace=True)
print('Finished Reading Data \nStart Model Fitting...')
# Fitting Model
t1 = time.time()
model = SessionMF(factors = args.factors, session_key = args.sessionid, item_key = args.itemid,
batch = args.batch, momentum = args.momentum, regularization = args.regularization,
dropout = args.dropout, skip = args.skip, samples = args.neg_samples,
activation = args.activation, objective = args.objective, epochs = args.epochs, learning_rate = args.lr)
model.fit(x_train)
t2 = time.time()
print('End Model Fitting with total time =', t2 - t1, '\n Start Predictions...')
# Test Set Evaluation
test_size = 0.0
hit = 0.0
MRR = 0.0
cur_length = 0
cur_session = -1
last_items = []
t1 = time.time()
index_item = x_valid.columns.get_loc(args.itemid)
index_session = x_valid.columns.get_loc(args.sessionid)
train_items = model.unique_items
counter = 0
for row in x_valid.itertuples( index=False ):
counter += 1
if counter % 10000 == 0:
print('Finished Prediction for ', counter, 'items.')
session_id, item_id = row[index_session], row[index_item]
if session_id != cur_session:
cur_session = session_id
last_items = []
cur_length = 0
if item_id in model.item_map.keys():
if len(last_items) > cur_length: #make prediction
cur_length += 1
test_size += 1
# Predict the most similar items to items
predictions = model.predict_next(last_items, K = args.K)
# Evaluation
rank = 0
for predicted_item in predictions:
#print(predicted_item, item_id, '###')
rank += 1
if int(predicted_item) == item_id:
hit += 1.0
MRR += 1/rank
break
last_items.append(item_id)
t2 = time.time()
print('Recall: {}'.format(hit / test_size))
print ('\nMRR: {}'.format(MRR / test_size))
print('End Model Predictions with total time =', t2 - t1) | 2.4375 | 2 |
sdks/python/appcenter_sdk/models/Device.py | Brantone/appcenter-sdks | 0 | 17184 | # coding: utf-8
"""
App Center Client
Microsoft Visual Studio App Center API # noqa: E501
OpenAPI spec version: preview
Contact: <EMAIL>
Project Repository: https://github.com/b3nab/appcenter-sdks
"""
import pprint
import re # noqa: F401
import six
class Device(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'sdk_name': 'string',
'sdk_version': 'string',
'wrapper_sdk_version': 'string',
'wrapper_sdk_name': 'string',
'model': 'string',
'oem_name': 'string',
'os_name': 'string',
'os_version': 'string',
'os_build': 'string',
'os_api_level': 'integer',
'locale': 'string',
'time_zone_offset': 'integer',
'screen_size': 'string',
'app_version': 'string',
'carrier_name': 'string',
'carrier_code': 'string',
'carrier_country': 'string',
'app_build': 'string',
'app_namespace': 'string',
'live_update_release_label': 'string',
'live_update_deployment_key': 'string',
'live_update_package_hash': 'string',
'wrapper_runtime_version': 'string'
}
attribute_map = {
'sdk_name': 'sdk_name',
'sdk_version': 'sdk_version',
'wrapper_sdk_version': 'wrapper_sdk_version',
'wrapper_sdk_name': 'wrapper_sdk_name',
'model': 'model',
'oem_name': 'oem_name',
'os_name': 'os_name',
'os_version': 'os_version',
'os_build': 'os_build',
'os_api_level': 'os_api_level',
'locale': 'locale',
'time_zone_offset': 'time_zone_offset',
'screen_size': 'screen_size',
'app_version': 'app_version',
'carrier_name': 'carrier_name',
'carrier_code': 'carrier_code',
'carrier_country': 'carrier_country',
'app_build': 'app_build',
'app_namespace': 'app_namespace',
'live_update_release_label': 'live_update_release_label',
'live_update_deployment_key': 'live_update_deployment_key',
'live_update_package_hash': 'live_update_package_hash',
'wrapper_runtime_version': 'wrapper_runtime_version'
}
def __init__(self, sdk_name=None, sdk_version=None, wrapper_sdk_version=None, wrapper_sdk_name=None, model=None, oem_name=None, os_name=None, os_version=None, os_build=None, os_api_level=None, locale=None, time_zone_offset=None, screen_size=None, app_version=None, carrier_name=None, carrier_code=None, carrier_country=None, app_build=None, app_namespace=None, live_update_release_label=None, live_update_deployment_key=None, live_update_package_hash=None, wrapper_runtime_version=None): # noqa: E501
"""Device - a model defined in Swagger""" # noqa: E501
self._sdk_name = None
self._sdk_version = None
self._wrapper_sdk_version = None
self._wrapper_sdk_name = None
self._model = None
self._oem_name = None
self._os_name = None
self._os_version = None
self._os_build = None
self._os_api_level = None
self._locale = None
self._time_zone_offset = None
self._screen_size = None
self._app_version = None
self._carrier_name = None
self._carrier_code = None
self._carrier_country = None
self._app_build = None
self._app_namespace = None
self._live_update_release_label = None
self._live_update_deployment_key = None
self._live_update_package_hash = None
self._wrapper_runtime_version = None
self.discriminator = None
self.sdk_name = sdk_name
self.sdk_version = sdk_version
if wrapper_sdk_version is not None:
self.wrapper_sdk_version = wrapper_sdk_version
if wrapper_sdk_name is not None:
self.wrapper_sdk_name = wrapper_sdk_name
if model is not None:
self.model = model
if oem_name is not None:
self.oem_name = oem_name
self.os_name = os_name
self.os_version = os_version
if os_build is not None:
self.os_build = os_build
if os_api_level is not None:
self.os_api_level = os_api_level
self.locale = locale
self.time_zone_offset = time_zone_offset
if screen_size is not None:
self.screen_size = screen_size
self.app_version = app_version
if carrier_name is not None:
self.carrier_name = carrier_name
if carrier_code is not None:
self.carrier_code = carrier_code
if carrier_country is not None:
self.carrier_country = carrier_country
self.app_build = app_build
if app_namespace is not None:
self.app_namespace = app_namespace
if live_update_release_label is not None:
self.live_update_release_label = live_update_release_label
if live_update_deployment_key is not None:
self.live_update_deployment_key = live_update_deployment_key
if live_update_package_hash is not None:
self.live_update_package_hash = live_update_package_hash
if wrapper_runtime_version is not None:
self.wrapper_runtime_version = wrapper_runtime_version
@property
def sdk_name(self):
"""Gets the sdk_name of this Device. # noqa: E501
Name of the SDK. Consists of the name of the SDK and the platform, e.g. "appcenter.ios", "hockeysdk.android".
# noqa: E501
:return: The sdk_name of this Device. # noqa: E501
:rtype: string
"""
return self._sdk_name
@sdk_name.setter
def sdk_name(self, sdk_name):
"""Sets the sdk_name of this Device.
Name of the SDK. Consists of the name of the SDK and the platform, e.g. "appcenter.ios", "hockeysdk.android".
# noqa: E501
:param sdk_name: The sdk_name of this Device. # noqa: E501
:type: string
"""
if sdk_name is None:
raise ValueError("Invalid value for `sdk_name`, must not be `None`") # noqa: E501
self._sdk_name = sdk_name
@property
def sdk_version(self):
"""Gets the sdk_version of this Device. # noqa: E501
Version of the SDK in semver format, e.g. "1.2.0" or "0.12.3-alpha.1".
# noqa: E501
:return: The sdk_version of this Device. # noqa: E501
:rtype: string
"""
return self._sdk_version
@sdk_version.setter
def sdk_version(self, sdk_version):
"""Sets the sdk_version of this Device.
Version of the SDK in semver format, e.g. "1.2.0" or "0.12.3-alpha.1".
# noqa: E501
:param sdk_version: The sdk_version of this Device. # noqa: E501
:type: string
"""
if sdk_version is None:
raise ValueError("Invalid value for `sdk_version`, must not be `None`") # noqa: E501
self._sdk_version = sdk_version
@property
def wrapper_sdk_version(self):
"""Gets the wrapper_sdk_version of this Device. # noqa: E501
Version of the wrapper SDK in semver format. When the SDK is embedding another base SDK (for example Xamarin.Android wraps Android), the Xamarin specific version is populated into this field while sdkVersion refers to the original Android SDK.
# noqa: E501
:return: The wrapper_sdk_version of this Device. # noqa: E501
:rtype: string
"""
return self._wrapper_sdk_version
@wrapper_sdk_version.setter
def wrapper_sdk_version(self, wrapper_sdk_version):
"""Sets the wrapper_sdk_version of this Device.
Version of the wrapper SDK in semver format. When the SDK is embedding another base SDK (for example Xamarin.Android wraps Android), the Xamarin specific version is populated into this field while sdkVersion refers to the original Android SDK.
# noqa: E501
:param wrapper_sdk_version: The wrapper_sdk_version of this Device. # noqa: E501
:type: string
"""
self._wrapper_sdk_version = wrapper_sdk_version
@property
def wrapper_sdk_name(self):
"""Gets the wrapper_sdk_name of this Device. # noqa: E501
Name of the wrapper SDK. Consists of the name of the SDK and the wrapper platform, e.g. "appcenter.xamarin", "hockeysdk.cordova".
# noqa: E501
:return: The wrapper_sdk_name of this Device. # noqa: E501
:rtype: string
"""
return self._wrapper_sdk_name
@wrapper_sdk_name.setter
def wrapper_sdk_name(self, wrapper_sdk_name):
"""Sets the wrapper_sdk_name of this Device.
Name of the wrapper SDK. Consists of the name of the SDK and the wrapper platform, e.g. "appcenter.xamarin", "hockeysdk.cordova".
# noqa: E501
:param wrapper_sdk_name: The wrapper_sdk_name of this Device. # noqa: E501
:type: string
"""
self._wrapper_sdk_name = wrapper_sdk_name
@property
def model(self):
"""Gets the model of this Device. # noqa: E501
Device model (example: iPad2,3).
# noqa: E501
:return: The model of this Device. # noqa: E501
:rtype: string
"""
return self._model
@model.setter
def model(self, model):
"""Sets the model of this Device.
Device model (example: iPad2,3).
# noqa: E501
:param model: The model of this Device. # noqa: E501
:type: string
"""
self._model = model
@property
def oem_name(self):
"""Gets the oem_name of this Device. # noqa: E501
Device manufacturer (example: HTC).
# noqa: E501
:return: The oem_name of this Device. # noqa: E501
:rtype: string
"""
return self._oem_name
@oem_name.setter
def oem_name(self, oem_name):
"""Sets the oem_name of this Device.
Device manufacturer (example: HTC).
# noqa: E501
:param oem_name: The oem_name of this Device. # noqa: E501
:type: string
"""
self._oem_name = oem_name
@property
def os_name(self):
"""Gets the os_name of this Device. # noqa: E501
OS name (example: iOS). The following OS names are standardized (non-exclusive): Android, iOS, macOS, tvOS, Windows.
# noqa: E501
:return: The os_name of this Device. # noqa: E501
:rtype: string
"""
return self._os_name
@os_name.setter
def os_name(self, os_name):
"""Sets the os_name of this Device.
OS name (example: iOS). The following OS names are standardized (non-exclusive): Android, iOS, macOS, tvOS, Windows.
# noqa: E501
:param os_name: The os_name of this Device. # noqa: E501
:type: string
"""
if os_name is None:
raise ValueError("Invalid value for `os_name`, must not be `None`") # noqa: E501
self._os_name = os_name
@property
def os_version(self):
"""Gets the os_version of this Device. # noqa: E501
OS version (example: 9.3.0).
# noqa: E501
:return: The os_version of this Device. # noqa: E501
:rtype: string
"""
return self._os_version
@os_version.setter
def os_version(self, os_version):
"""Sets the os_version of this Device.
OS version (example: 9.3.0).
# noqa: E501
:param os_version: The os_version of this Device. # noqa: E501
:type: string
"""
if os_version is None:
raise ValueError("Invalid value for `os_version`, must not be `None`") # noqa: E501
self._os_version = os_version
@property
def os_build(self):
"""Gets the os_build of this Device. # noqa: E501
OS build code (example: LMY47X).
# noqa: E501
:return: The os_build of this Device. # noqa: E501
:rtype: string
"""
return self._os_build
@os_build.setter
def os_build(self, os_build):
"""Sets the os_build of this Device.
OS build code (example: LMY47X).
# noqa: E501
:param os_build: The os_build of this Device. # noqa: E501
:type: string
"""
self._os_build = os_build
@property
def os_api_level(self):
"""Gets the os_api_level of this Device. # noqa: E501
API level when applicable like in Android (example: 15).
# noqa: E501
:return: The os_api_level of this Device. # noqa: E501
:rtype: integer
"""
return self._os_api_level
@os_api_level.setter
def os_api_level(self, os_api_level):
"""Sets the os_api_level of this Device.
API level when applicable like in Android (example: 15).
# noqa: E501
:param os_api_level: The os_api_level of this Device. # noqa: E501
:type: integer
"""
self._os_api_level = os_api_level
@property
def locale(self):
"""Gets the locale of this Device. # noqa: E501
Language code (example: en_US).
# noqa: E501
:return: The locale of this Device. # noqa: E501
:rtype: string
"""
return self._locale
@locale.setter
def locale(self, locale):
"""Sets the locale of this Device.
Language code (example: en_US).
# noqa: E501
:param locale: The locale of this Device. # noqa: E501
:type: string
"""
if locale is None:
raise ValueError("Invalid value for `locale`, must not be `None`") # noqa: E501
self._locale = locale
@property
def time_zone_offset(self):
"""Gets the time_zone_offset of this Device. # noqa: E501
The offset in minutes from UTC for the device time zone, including daylight savings time.
# noqa: E501
:return: The time_zone_offset of this Device. # noqa: E501
:rtype: integer
"""
return self._time_zone_offset
@time_zone_offset.setter
def time_zone_offset(self, time_zone_offset):
"""Sets the time_zone_offset of this Device.
The offset in minutes from UTC for the device time zone, including daylight savings time.
# noqa: E501
:param time_zone_offset: The time_zone_offset of this Device. # noqa: E501
:type: integer
"""
if time_zone_offset is None:
raise ValueError("Invalid value for `time_zone_offset`, must not be `None`") # noqa: E501
self._time_zone_offset = time_zone_offset
@property
def screen_size(self):
"""Gets the screen_size of this Device. # noqa: E501
Screen size of the device in pixels (example: 640x480).
# noqa: E501
:return: The screen_size of this Device. # noqa: E501
:rtype: string
"""
return self._screen_size
@screen_size.setter
def screen_size(self, screen_size):
"""Sets the screen_size of this Device.
Screen size of the device in pixels (example: 640x480).
# noqa: E501
:param screen_size: The screen_size of this Device. # noqa: E501
:type: string
"""
self._screen_size = screen_size
@property
def app_version(self):
"""Gets the app_version of this Device. # noqa: E501
Application version name, e.g. 1.1.0
# noqa: E501
:return: The app_version of this Device. # noqa: E501
:rtype: string
"""
return self._app_version
@app_version.setter
def app_version(self, app_version):
"""Sets the app_version of this Device.
Application version name, e.g. 1.1.0
# noqa: E501
:param app_version: The app_version of this Device. # noqa: E501
:type: string
"""
if app_version is None:
raise ValueError("Invalid value for `app_version`, must not be `None`") # noqa: E501
self._app_version = app_version
@property
def carrier_name(self):
"""Gets the carrier_name of this Device. # noqa: E501
Carrier name (for mobile devices).
# noqa: E501
:return: The carrier_name of this Device. # noqa: E501
:rtype: string
"""
return self._carrier_name
@carrier_name.setter
def carrier_name(self, carrier_name):
"""Sets the carrier_name of this Device.
Carrier name (for mobile devices).
# noqa: E501
:param carrier_name: The carrier_name of this Device. # noqa: E501
:type: string
"""
self._carrier_name = carrier_name
@property
def carrier_code(self):
"""Gets the carrier_code of this Device. # noqa: E501
Carrier country code (for mobile devices).
# noqa: E501
:return: The carrier_code of this Device. # noqa: E501
:rtype: string
"""
return self._carrier_code
@carrier_code.setter
def carrier_code(self, carrier_code):
"""Sets the carrier_code of this Device.
Carrier country code (for mobile devices).
# noqa: E501
:param carrier_code: The carrier_code of this Device. # noqa: E501
:type: string
"""
self._carrier_code = carrier_code
@property
def carrier_country(self):
"""Gets the carrier_country of this Device. # noqa: E501
Carrier country.
# noqa: E501
:return: The carrier_country of this Device. # noqa: E501
:rtype: string
"""
return self._carrier_country
@carrier_country.setter
def carrier_country(self, carrier_country):
"""Sets the carrier_country of this Device.
Carrier country.
# noqa: E501
:param carrier_country: The carrier_country of this Device. # noqa: E501
:type: string
"""
self._carrier_country = carrier_country
@property
def app_build(self):
"""Gets the app_build of this Device. # noqa: E501
The app's build number, e.g. 42.
# noqa: E501
:return: The app_build of this Device. # noqa: E501
:rtype: string
"""
return self._app_build
@app_build.setter
def app_build(self, app_build):
"""Sets the app_build of this Device.
The app's build number, e.g. 42.
# noqa: E501
:param app_build: The app_build of this Device. # noqa: E501
:type: string
"""
if app_build is None:
raise ValueError("Invalid value for `app_build`, must not be `None`") # noqa: E501
self._app_build = app_build
@property
def app_namespace(self):
"""Gets the app_namespace of this Device. # noqa: E501
The bundle identifier, package identifier, or namespace, depending on what the individual plattforms use, .e.g com.microsoft.example.
# noqa: E501
:return: The app_namespace of this Device. # noqa: E501
:rtype: string
"""
return self._app_namespace
@app_namespace.setter
def app_namespace(self, app_namespace):
"""Sets the app_namespace of this Device.
The bundle identifier, package identifier, or namespace, depending on what the individual plattforms use, .e.g com.microsoft.example.
# noqa: E501
:param app_namespace: The app_namespace of this Device. # noqa: E501
:type: string
"""
self._app_namespace = app_namespace
@property
def live_update_release_label(self):
"""Gets the live_update_release_label of this Device. # noqa: E501
Label that is used to identify application code 'version' released via Live Update beacon running on device
# noqa: E501
:return: The live_update_release_label of this Device. # noqa: E501
:rtype: string
"""
return self._live_update_release_label
@live_update_release_label.setter
def live_update_release_label(self, live_update_release_label):
"""Sets the live_update_release_label of this Device.
Label that is used to identify application code 'version' released via Live Update beacon running on device
# noqa: E501
:param live_update_release_label: The live_update_release_label of this Device. # noqa: E501
:type: string
"""
self._live_update_release_label = live_update_release_label
@property
def live_update_deployment_key(self):
"""Gets the live_update_deployment_key of this Device. # noqa: E501
Identifier of environment that current application release belongs to, deployment key then maps to environment like Production, Staging.
# noqa: E501
:return: The live_update_deployment_key of this Device. # noqa: E501
:rtype: string
"""
return self._live_update_deployment_key
@live_update_deployment_key.setter
def live_update_deployment_key(self, live_update_deployment_key):
"""Sets the live_update_deployment_key of this Device.
Identifier of environment that current application release belongs to, deployment key then maps to environment like Production, Staging.
# noqa: E501
:param live_update_deployment_key: The live_update_deployment_key of this Device. # noqa: E501
:type: string
"""
self._live_update_deployment_key = live_update_deployment_key
@property
def live_update_package_hash(self):
"""Gets the live_update_package_hash of this Device. # noqa: E501
Hash of all files (ReactNative or Cordova) deployed to device via LiveUpdate beacon. Helps identify the Release version on device or need to download updates in future.
# noqa: E501
:return: The live_update_package_hash of this Device. # noqa: E501
:rtype: string
"""
return self._live_update_package_hash
@live_update_package_hash.setter
def live_update_package_hash(self, live_update_package_hash):
"""Sets the live_update_package_hash of this Device.
Hash of all files (ReactNative or Cordova) deployed to device via LiveUpdate beacon. Helps identify the Release version on device or need to download updates in future.
# noqa: E501
:param live_update_package_hash: The live_update_package_hash of this Device. # noqa: E501
:type: string
"""
self._live_update_package_hash = live_update_package_hash
@property
def wrapper_runtime_version(self):
"""Gets the wrapper_runtime_version of this Device. # noqa: E501
Version of the wrapper technology framework (Xamarin runtime version or ReactNative or Cordova etc...). See wrapper_sdk_name to see if this version refers to Xamarin or ReactNative or other.
# noqa: E501
:return: The wrapper_runtime_version of this Device. # noqa: E501
:rtype: string
"""
return self._wrapper_runtime_version
@wrapper_runtime_version.setter
def wrapper_runtime_version(self, wrapper_runtime_version):
"""Sets the wrapper_runtime_version of this Device.
Version of the wrapper technology framework (Xamarin runtime version or ReactNative or Cordova etc...). See wrapper_sdk_name to see if this version refers to Xamarin or ReactNative or other.
# noqa: E501
:param wrapper_runtime_version: The wrapper_runtime_version of this Device. # noqa: E501
:type: string
"""
self._wrapper_runtime_version = wrapper_runtime_version
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Device):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 1.414063 | 1 |
src/providers/snmp.py | tcuthbert/napi | 0 | 17185 | <filename>src/providers/snmp.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# author : <NAME>
import os, sys
from providers.provider import Provider
from config.config import Config
sys.path.append('../')
def _reverse_dict(d):
ret = {}
for key, val in d.items():
if ret.has_key(val):
ret[val].append(key)
else:
ret[val] = [key]
return ret
def _parse_routes(routing_table):
ret = {}
for key, value in routing_table.items():
ret[key] = {}
routes = [i.split('.') for i in value]
for index, route in enumerate(routes):
subnet = ".".join(route[0:4])
ret[key][subnet] = {
"mask": ".".join(route[4:8]),
"next_hop": ".".join(route[9:])
}
return ret
def _strip_oid_from_list(oids, strip):
"""Iterates through list of oids and strips snmp tree off index.
Returns sorted list of indexes.
Keyword Arguments:
self --
oid -- Regular numeric oid index
strip -- Value to be stripped off index
"""
sorted_oids = []
for index in oids:
s = index[0].replace(strip, "")
sorted_oids.append((s, index[1]))
return sorted(sorted_oids)
def _get_snmp(oid, hostname, community):
"""SNMP Wrapper function. Returns tuple of oid, value
Keyword Arguments:
oid --
community --
"""
from pysnmp.entity.rfc3413.oneliner import cmdgen
cmd_gen = cmdgen.CommandGenerator()
error_indication, error_status, error_index, var_bind = cmd_gen.getCmd(
cmdgen.CommunityData(community),
cmdgen.UdpTransportTarget((hostname, 161)),
oid)
if error_indication:
print(error_indication)
else:
if error_status:
print ('%s at %s' % (
error_status.prettyPrint(),
error_index and var_bind[int(error_index)-1] or '?')
)
else:
for name, value in var_bind:
return (name.prettyPrint(), value.prettyPrint())
def _walk_snmp(oid, hostname, community):
"""SNMP getNext generator method. Yields each index to caller.
Keyword Arguments:
oid --
community --
"""
from pysnmp.entity.rfc3413.oneliner import cmdgen
cmd_gen = cmdgen.CommandGenerator()
error_indication, error_status, error_index, var_bind_table = cmd_gen.nextCmd(
cmdgen.CommunityData(community),
cmdgen.UdpTransportTarget((hostname, 161)),
oid)
if error_indication:
print(error_indication)
else:
if error_status:
print ('%s at %s' % (
error_status.prettyPrint(),
error_index and var_bind_table[int(error_index)-1] or '?')
)
else:
for var_bind_row in var_bind_table:
for name, val in var_bind_row:
yield name.prettyPrint(), val.prettyPrint()
class SNMP(Provider):
"""docstring"""
def __init__(self, *args, **kwargs):
"docstring"
self.snmp_params = Config.config_section_map("SNMP_PARAMS")
self.snmp_oids = Config.config_section_map("OIDS")
super(SNMP, self).__init__(*args, **kwargs)
def __resolve_community_string(self):
if self._device.device_type == "core":
return self.snmp_params["community_core"]
else:
return self.snmp_params["community_remote"]
def walk_tree_from_oid(self, oid):
"""Walks SNMP tree from rooted at oid.
Oid must exist in the netlib configuration file else an exception is raised.
:type oid: string
:param oid: An SNMP oid index
"""
try:
index = self.snmp_oids[oid]
except KeyError as e:
#TODO: Logging
print "oid not present in config file"
raise e
return dict(_strip_oid_from_list(list(_walk_snmp(index, self._device.hostname, self.__resolve_community_string())), index + "."))
def __get_ipcidrrouteifindex(self):
"""Get routing table for use by Layer 3 object.
This method gets the ipcidrrouteifindex routing table.
"""
return self.walk_tree_from_oid("ipcidrrouteifindex")
def _build_layer3_prop_routing_table(self):
"Build routing table from device"
return _parse_routes(_reverse_dict(self.__get_ipcidrrouteifindex()))
def _build_layer2_prop_cam_table(self):
"Build cam table from device"
return "ff-ff-ff-ff"
def _build_device_prop_interfaces(self):
intfs = self.__get_index("ifname")
for key, val in intfs.items():
# intfs[key] = [intfs[key], self.__get_index("ifdesc")[key], self.__get_index("ifspeed")[key]]
intfs[key] = {
"intf_name": intfs[key],
"intf_desc": self.__get_index("ifdesc")[key],
"intf_speed": self.__get_index("ifspeed")[key]
}
return intfs
def _wrapper_layer3_device_prop_interfaces(self, func):
res = func()
res.update({
"0": {"intf_name": "INTERNAL"}
})
for key, value in _reverse_dict(self.walk_tree_from_oid("ipaddressifindex")).items():
res[key].update({"intf_ip": value.pop()})
return res
def __get_index(self, index):
"Gather interfaces for upstream device."
oid = self.snmp_oids[index]
hostname = self._device.hostname
return dict(_strip_oid_from_list(list(_walk_snmp(oid, hostname, self.__resolve_community_string())), oid + "."))
| 2.265625 | 2 |
visionpack/stable_baselines3/common/off_policy_algorithm.py | joeljosephjin/gvgai-rl | 0 | 17186 | <reponame>joeljosephjin/gvgai-rl<gh_stars>0
import time
import os
import pickle
import warnings
from typing import Union, Type, Optional, Dict, Any, Callable
import gym
import torch as th
import numpy as np
from stable_baselines3.common import logger
from stable_baselines3.common.base_class import BaseAlgorithm
from stable_baselines3.common.policies import BasePolicy
from stable_baselines3.common.utils import safe_mean
from stable_baselines3.common.vec_env import VecEnv
from stable_baselines3.common.type_aliases import GymEnv, RolloutReturn
from stable_baselines3.common.callbacks import BaseCallback
from stable_baselines3.common.noise import ActionNoise
from stable_baselines3.common.buffers import ReplayBuffer
class OffPolicyAlgorithm(BaseAlgorithm):
"""
The base for Off-Policy algorithms (ex: SAC/TD3)
:param policy: Policy object
:param env: The environment to learn from
(if registered in Gym, can be str. Can be None for loading trained models)
:param policy_base: The base policy used by this method
:param learning_rate: (float or callable) learning rate for the optimizer,
it can be a function of the current progress remaining (from 1 to 0)
:param buffer_size: (int) size of the replay buffer
:param learning_starts: (int) how many steps of the model to collect transitions for before learning starts
:param batch_size: (int) Minibatch size for each gradient update
:param policy_kwargs: Additional arguments to be passed to the policy on creation
:param tensorboard_log: (str) the log location for tensorboard (if None, no logging)
:param verbose: The verbosity level: 0 none, 1 training information, 2 debug
:param device: Device on which the code should run.
By default, it will try to use a Cuda compatible device and fallback to cpu
if it is not possible.
:param support_multi_env: Whether the algorithm supports training
with multiple environments (as in A2C)
:param create_eval_env: Whether to create a second environment that will be
used for evaluating the agent periodically. (Only available when passing string for the environment)
:param monitor_wrapper: When creating an environment, whether to wrap it
or not in a Monitor wrapper.
:param seed: Seed for the pseudo random generators
:param use_sde: Whether to use State Dependent Exploration (SDE)
instead of action noise exploration (default: False)
:param sde_sample_freq: Sample a new noise matrix every n steps when using gSDE
Default: -1 (only sample at the beginning of the rollout)
:param use_sde_at_warmup: (bool) Whether to use gSDE instead of uniform sampling
during the warm up phase (before learning starts)
:param sde_support: (bool) Whether the model support gSDE or not
"""
def __init__(self,
policy: Type[BasePolicy],
env: Union[GymEnv, str],
policy_base: Type[BasePolicy],
learning_rate: Union[float, Callable],
buffer_size: int = int(1e6),
learning_starts: int = 100,
batch_size: int = 256,
policy_kwargs: Dict[str, Any] = None,
tensorboard_log: Optional[str] = None,
verbose: int = 0,
device: Union[th.device, str] = 'auto',
support_multi_env: bool = False,
create_eval_env: bool = False,
monitor_wrapper: bool = True,
seed: Optional[int] = None,
use_sde: bool = False,
sde_sample_freq: int = -1,
use_sde_at_warmup: bool = False,
sde_support: bool = True):
super(OffPolicyAlgorithm, self).__init__(policy=policy, env=env, policy_base=policy_base,
learning_rate=learning_rate, policy_kwargs=policy_kwargs,
tensorboard_log=tensorboard_log, verbose=verbose,
device=device, support_multi_env=support_multi_env,
create_eval_env=create_eval_env, monitor_wrapper=monitor_wrapper,
seed=seed, use_sde=use_sde, sde_sample_freq=sde_sample_freq)
self.buffer_size = buffer_size
self.batch_size = batch_size
self.learning_starts = learning_starts
self.actor = None # type: Optional[th.nn.Module]
self.replay_buffer = None # type: Optional[ReplayBuffer]
# Update policy keyword arguments
if sde_support:
self.policy_kwargs['use_sde'] = self.use_sde
self.policy_kwargs['device'] = self.device
# For gSDE only
self.use_sde_at_warmup = use_sde_at_warmup
def _setup_model(self):
self._setup_lr_schedule()
self.set_random_seed(self.seed)
self.replay_buffer = ReplayBuffer(self.buffer_size, self.observation_space,
self.action_space, self.device)
self.policy = self.policy_class(self.observation_space, self.action_space,
self.lr_schedule, **self.policy_kwargs)
self.policy = self.policy.to(self.device)
def save_replay_buffer(self, path: str):
"""
Save the replay buffer as a pickle file.
:param path: (str) Path to a log folder
"""
assert self.replay_buffer is not None, "The replay buffer is not defined"
with open(os.path.join(path, 'replay_buffer.pkl'), 'wb') as file_handler:
pickle.dump(self.replay_buffer, file_handler)
def load_replay_buffer(self, path: str):
"""
Load a replay buffer from a pickle file.
:param path: (str) Path to the pickled replay buffer.
"""
with open(path, 'rb') as file_handler:
self.replay_buffer = pickle.load(file_handler)
assert isinstance(self.replay_buffer, ReplayBuffer), 'The replay buffer must inherit from ReplayBuffer class'
def collect_rollouts(self, # noqa: C901
env: VecEnv,
# Type hint as string to avoid circular import
callback: 'BaseCallback',
n_episodes: int = 1,
n_steps: int = -1,
action_noise: Optional[ActionNoise] = None,
learning_starts: int = 0,
replay_buffer: Optional[ReplayBuffer] = None,
log_interval: Optional[int] = None) -> RolloutReturn:
"""
Collect experiences and store them into a ReplayBuffer.
:param env: (VecEnv) The training environment
:param callback: (BaseCallback) Callback that will be called at each step
(and at the beginning and end of the rollout)
:param n_episodes: (int) Number of episodes to use to collect rollout data
You can also specify a ``n_steps`` instead
:param n_steps: (int) Number of steps to use to collect rollout data
You can also specify a ``n_episodes`` instead.
:param action_noise: (Optional[ActionNoise]) Action noise that will be used for exploration
Required for deterministic policy (e.g. TD3). This can also be used
in addition to the stochastic policy for SAC.
:param learning_starts: (int) Number of steps before learning for the warm-up phase.
:param replay_buffer: (ReplayBuffer)
:param log_interval: (int) Log data every ``log_interval`` episodes
:return: (RolloutReturn)
"""
episode_rewards, total_timesteps = [], []
total_steps, total_episodes = 0, 0
assert isinstance(env, VecEnv), "You must pass a VecEnv"
assert env.num_envs == 1, "OffPolicyAlgorithm only support single environment"
if n_episodes > 0 and n_steps > 0:
# Note we are refering to the constructor arguments
# that are named `train_freq` and `n_episodes_rollout`
# but correspond to `n_steps` and `n_episodes` here
warnings.warn("You passed a positive value for `train_freq` and `n_episodes_rollout`."
"Please make sure this is intended. "
"The agent will collect data by stepping in the environment "
"until both conditions are true: "
"`number of steps in the env` >= `train_freq` and "
"`number of episodes` > `n_episodes_rollout`")
if self.use_sde:
self.actor.reset_noise()
callback.on_rollout_start()
continue_training = True
while total_steps < n_steps or total_episodes < n_episodes:
done = False
episode_reward, episode_timesteps = 0.0, 0
while not done:
if self.use_sde and self.sde_sample_freq > 0 and total_steps % self.sde_sample_freq == 0:
# Sample a new noise matrix
self.actor.reset_noise()
# Select action randomly or according to policy
if self.num_timesteps < learning_starts and not (self.use_sde and self.use_sde_at_warmup):
# Warmup phase
unscaled_action = np.array([self.action_space.sample()])
else:
# Note: we assume that the policy uses tanh to scale the action
# We use non-deterministic action in the case of SAC, for TD3, it does not matter
unscaled_action, _ = self.predict(self._last_obs, deterministic=False)
# Rescale the action from [low, high] to [-1, 1]
if isinstance(self.action_space, gym.spaces.Box):
scaled_action = self.policy.scale_action(unscaled_action)
# Add noise to the action (improve exploration)
if action_noise is not None:
# NOTE: in the original implementation of TD3, the noise was applied to the unscaled action
# Update(October 2019): Not anymore
scaled_action = np.clip(scaled_action + action_noise(), -1, 1)
# We store the scaled action in the buffer
buffer_action = scaled_action
action = self.policy.unscale_action(scaled_action)
else:
# Discrete case, no need to normalize or clip
buffer_action = unscaled_action
action = buffer_action
# Rescale and perform action
new_obs, reward, done, infos = env.step(action)
# Only stop training if return value is False, not when it is None.
if callback.on_step() is False:
return RolloutReturn(0.0, total_steps, total_episodes, continue_training=False)
episode_reward += reward
# Retrieve reward and episode length if using Monitor wrapper
self._update_info_buffer(infos, done)
# Store data in replay buffer
if replay_buffer is not None:
# Store only the unnormalized version
if self._vec_normalize_env is not None:
new_obs_ = self._vec_normalize_env.get_original_obs()
reward_ = self._vec_normalize_env.get_original_reward()
else:
# Avoid changing the original ones
self._last_original_obs, new_obs_, reward_ = self._last_obs, new_obs, reward
replay_buffer.add(self._last_original_obs, new_obs_, buffer_action, reward_, done)
self._last_obs = new_obs
# Save the unnormalized observation
if self._vec_normalize_env is not None:
self._last_original_obs = new_obs_
self.num_timesteps += 1
episode_timesteps += 1
total_steps += 1
if 0 < n_steps <= total_steps:
break
if done:
total_episodes += 1
self._episode_num += 1
episode_rewards.append(episode_reward)
total_timesteps.append(episode_timesteps)
if action_noise is not None:
action_noise.reset()
# Log training infos
if log_interval is not None and self._episode_num % log_interval == 0:
fps = int(self.num_timesteps / (time.time() - self.start_time))
logger.record("time/episodes", self._episode_num, exclude="tensorboard")
if len(self.ep_info_buffer) > 0 and len(self.ep_info_buffer[0]) > 0:
logger.record('rollout/ep_rew_mean', safe_mean([ep_info['r'] for ep_info in self.ep_info_buffer]))
logger.record('rollout/ep_len_mean', safe_mean([ep_info['l'] for ep_info in self.ep_info_buffer]))
logger.record("time/fps", fps)
logger.record('time/time_elapsed', int(time.time() - self.start_time), exclude="tensorboard")
logger.record("time/total timesteps", self.num_timesteps, exclude="tensorboard")
if self.use_sde:
logger.record("train/std", (self.actor.get_std()).mean().item())
if len(self.ep_success_buffer) > 0:
logger.record('rollout/success rate', safe_mean(self.ep_success_buffer))
# Pass the number of timesteps for tensorboard
logger.dump(step=self.num_timesteps)
mean_reward = np.mean(episode_rewards) if total_episodes > 0 else 0.0
callback.on_rollout_end()
return RolloutReturn(mean_reward, total_steps, total_episodes, continue_training)
| 1.921875 | 2 |
src/oscar/apps/dashboard/app.py | frmdstryr/django-oscar | 0 | 17187 | from django.conf.urls import url
from django.contrib.auth import views as auth_views
from django.contrib.auth.forms import AuthenticationForm
from oscar.core.application import (
DashboardApplication as BaseDashboardApplication)
from oscar.core.loading import get_class
class DashboardApplication(BaseDashboardApplication):
name = 'dashboard'
permissions_map = {
'index': (['is_staff'], ['partner.dashboard_access']),
}
index_view = get_class('dashboard.views', 'IndexView')
reports_app = get_class('dashboard.reports.app', 'application')
orders_app = get_class('dashboard.orders.app', 'application')
users_app = get_class('dashboard.users.app', 'application')
catalogue_app = get_class('dashboard.catalogue.app', 'application')
promotions_app = get_class('dashboard.promotions.app', 'application')
pages_app = get_class('dashboard.pages.app', 'application')
partners_app = get_class('dashboard.partners.app', 'application')
offers_app = get_class('dashboard.offers.app', 'application')
ranges_app = get_class('dashboard.ranges.app', 'application')
reviews_app = get_class('dashboard.reviews.app', 'application')
vouchers_app = get_class('dashboard.vouchers.app', 'application')
comms_app = get_class('dashboard.communications.app', 'application')
shipping_app = get_class('dashboard.shipping.app', 'application')
system_app = get_class('dashboard.system.app', 'application')
def get_urls(self):
urls = [
url(r'^$', self.index_view.as_view(), name='index'),
url(r'^catalogue/', self.catalogue_app.urls),
url(r'^reports/', self.reports_app.urls),
url(r'^orders/', self.orders_app.urls),
url(r'^users/', self.users_app.urls),
url(r'^content-blocks/', self.promotions_app.urls),
url(r'^pages/', self.pages_app.urls),
url(r'^partners/', self.partners_app.urls),
url(r'^offers/', self.offers_app.urls),
url(r'^ranges/', self.ranges_app.urls),
url(r'^reviews/', self.reviews_app.urls),
url(r'^vouchers/', self.vouchers_app.urls),
url(r'^comms/', self.comms_app.urls),
url(r'^shipping/', self.shipping_app.urls),
url(r'^system/', self.system_app.urls),
url(r'^login/$',
auth_views.LoginView.as_view(template_name='dashboard/login.html',
authentication_form=AuthenticationForm),
name='login'),
url(r'^logout/$', auth_views.LogoutView.as_view(next_page='/'), name='logout'),
]
return self.post_process_urls(urls)
application = DashboardApplication()
| 1.8125 | 2 |
wedding/migrations/0004_auto_20170407_2017.py | chadgates/thetravelling2 | 0 | 17188 | <gh_stars>0
# -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2017-04-07 20:17
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import uuid
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('wedding', '0003_auto_20170214_1543'),
]
operations = [
migrations.CreateModel(
name='Cart',
fields=[
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='CartItem',
fields=[
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('amount', models.PositiveIntegerField(verbose_name='Item count')),
('buyer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Gift',
fields=[
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('name', models.CharField(max_length=300, verbose_name='Name')),
('description', models.TextField(blank=True, null=True, verbose_name='Description')),
('link', models.TextField(blank=True, null=True, verbose_name='Link')),
('price', models.DecimalField(decimal_places=2, max_digits=7, verbose_name='Price')),
('gift_is_part', models.BooleanField(default=False, verbose_name='Gift is part')),
('max_parts', models.PositiveIntegerField(verbose_name='Maximum number of parts')),
('taken_parts', models.PositiveIntegerField(default=0, verbose_name='Number of parts taken')),
('img', models.ImageField(blank=True, null=True, upload_to='')),
],
options={
'verbose_name': 'Gift',
'verbose_name_plural': 'Gifts',
},
),
migrations.CreateModel(
name='GiftOrder',
fields=[
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('voucher_from', models.CharField(max_length=300, verbose_name='Voucher is from')),
('voucher_greeting', models.TextField(blank=True, null=True, verbose_name='Voucher Greeting')),
('voucher_senddirect', models.BooleanField(default=False, verbose_name='Send voucher directly')),
('buyer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='GiftOrderItem',
fields=[
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('quantity', models.PositiveIntegerField(verbose_name='Item count')),
('gift', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='wedding.Gift')),
('giftorder', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='wedding.GiftOrder')),
],
options={
'abstract': False,
},
),
migrations.AlterModelOptions(
name='rsvp',
options={'permissions': (('view_list', 'Can see the RSVP list'),), 'verbose_name': 'RSVP', 'verbose_name_plural': 'RSVPs'},
),
migrations.AddField(
model_name='cartitem',
name='gift',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='wedding.Gift'),
),
]
| 1.695313 | 2 |
taskonomy/utils/log_utils.py | shikhar-srivastava/hover_net | 0 | 17189 | import pandas as pd
import pickle
def read_metric_logs(bucket_type):
metrics = pd.DataFrame(columns=['source_type', 'target_type', 'stats'])
type_list_path = f'/l/users/shikhar.srivastava/data/pannuke/{bucket_type}/selected_types.csv'
type_list = pd.read_csv(type_list_path)['0']
for source_type in type_list:
for target_type in type_list:
logs_path = f'/l/users/shikhar.srivastava/workspace/hover_net/logs/test/second_order/{bucket_type}/ckpts/{source_type}-{target_type}/per_image_stat.pkl'
# Read pickle file
with open(logs_path, 'rb') as f:
stats = pickle.load(f)
metrics = metrics.append({'source_type': source_type, 'target_type': target_type, 'stats': stats}, ignore_index=True)
return metrics, type_list | 2.71875 | 3 |
train_DEU.py | JosephineRabbit/MLMSNet | 61 | 17190 | <filename>train_DEU.py
from D_E_U import *
D_E = DSS(*extra_layer(vgg(base['dss'], 3), extra['dss']),config.BATCH_SIZE).cuda()
U = D_U().cuda()
U.cuda()
data_dirs = [
("/home/rabbit/Datasets/DUTS/DUT-train/DUT-train-Image",
"/home/rabbit/Datasets/DUTS/DUT-train/DUT-train-Mask"),
]
test_dirs = [("/home/rabbit/Datasets/SED1/SED1-Image",
"/home/rabbit/Datasets/SED1/SED1-Mask")]
D_E.base.load_state_dict(torch.load('/home/rabbit/Desktop/DUT_train/weights/vgg16_feat.pth'))
initialize_weights(U)
DE_optimizer = optim.Adam(D_E.parameters(), lr=config.D_LEARNING_RATE, betas=(0.5, 0.999))
U_optimizer = optim.Adam(U.parameters(), lr=config.U_LEARNING_RATE, betas=(0.5, 0.999))
BCE_loss = torch.nn.BCELoss().cuda()
def process_data_dir(data_dir):
files = os.listdir(data_dir)
files = map(lambda x: os.path.join(data_dir, x), files)
return sorted(files)
batch_size =BATCH_SIZE
DATA_DICT = {}
IMG_FILES = []
GT_FILES = []
IMG_FILES_TEST = []
GT_FILES_TEST = []
for dir_pair in data_dirs:
X, y = process_data_dir(dir_pair[0]), process_data_dir(dir_pair[1])
IMG_FILES.extend(X)
GT_FILES.extend(y)
for dir_pair in test_dirs:
X, y = process_data_dir(dir_pair[0]), process_data_dir(dir_pair[1])
IMG_FILES_TEST.extend(X)
GT_FILES_TEST.extend(y)
IMGS_train, GT_train = IMG_FILES, GT_FILES
train_folder = DataFolder(IMGS_train, GT_train, True)
train_data = DataLoader(train_folder, batch_size=BATCH_SIZE, num_workers=NUM_WORKERS, shuffle=True,
drop_last=True)
test_folder = DataFolder(IMG_FILES_TEST, GT_FILES_TEST, trainable=False)
test_data = DataLoader(test_folder, batch_size=1, num_workers=NUM_WORKERS, shuffle=False)
def cal_DLoss(out_m,out_e, mask, edge):
# if l == 0:
# 0 f 1 t
# ll = Variable(torch.ones(mask.shape()))
D_masks_loss = 0
D_edges_loss = 0
for i in range(6):
#print(out_m[i].size())
#print(mask.size())
D_masks_loss += F.binary_cross_entropy(out_m[i], mask)
for i in range(6):
D_edges_loss += F.binary_cross_entropy(out_e[i], edge)
return ( D_masks_loss, D_edges_loss)
best_eval = None
x = 0
ma = 1
for epoch in range(1, config.NUM_EPOCHS + 1):
sum_train_mae = 0
sum_train_loss = 0
sum_train_gan = 0
##train
for iter_cnt, (img_batch, label_batch, edges, shape, name) in enumerate(train_data):
D_E.train()
x = x + 1
# print(img_batch.size())
label_batch = Variable(label_batch).cuda()
# print(torch.typename(label_batch))
print('training start!!')
# for iter, (x_, _) in enumerate(train_data):
img_batch = Variable(img_batch.cuda()) # ,Variable(z_.cuda())
edges = Variable(edges).cuda()
##########DSS#########################
######train dis
##fake
f,y1,y2 = D_E(img_batch)
m_l_1,e_l_1 = cal_DLoss(y1,y2,label_batch,edges)
DE_optimizer.zero_grad()
DE_l_1 = m_l_1 +e_l_1
DE_l_1.backward()
DE_optimizer.step()
w = [2,2,3,3]
f, y1, y2 = D_E(img_batch)
masks,DIC = U(f)
pre_ms_l = 0
ma = torch.abs(label_batch-masks[4]).mean()
pre_m_l = F.binary_cross_entropy(masks[4],label_batch)
for i in range(4):
pre_ms_l +=w[i] * F.binary_cross_entropy(masks[i],label_batch)
DE_optimizer.zero_grad()
DE_l_1 = pre_ms_l/20+30*pre_m_l
DE_l_1.backward()
DE_optimizer.step()
f, y1, y2 = D_E(img_batch)
masks,DIC = U(f)
pre_ms_l = 0
ma = torch.abs(label_batch-masks[4]).mean()
pre_m_l = F.binary_cross_entropy(masks[4], label_batch)
for i in range(4):
pre_ms_l += w[i] * F.binary_cross_entropy(masks[i], label_batch)
U_optimizer.zero_grad()
U_l_1 = pre_ms_l/20+30*pre_m_l
U_l_1.backward()
U_optimizer.step()
sum_train_mae += ma.data.cpu()
print("Epoch:{}\t {}/{}\ \t mae:{}".format(epoch, iter_cnt + 1,
len(train_folder) / config.BATCH_SIZE,
sum_train_mae / (iter_cnt + 1)))
##########save model
# torch.save(D.state_dict(), './checkpoint/DSS/with_e_2/D15epoch%d.pkl' % epoch)
torch.save(D_E.state_dict(), './checkpoint/DSS/with_e_2/D_Eepoch%d.pkl' % epoch)
torch.save(U.state_dict(), './checkpoint/DSS/with_e_2/Uis.pkl')
print('model saved')
###############test
eval1 = 0
eval2 = 0
t_mae = 0
for iter_cnt, (img_batch, label_batch, edges, shape, name) in enumerate(test_data):
D_E.eval()
U.eval()
label_batch = Variable(label_batch).cuda()
print('val!!')
# for iter, (x_, _) in enumerate(train_data):
img_batch = Variable(img_batch.cuda()) # ,Variable(z_.cuda())
f,y1,y2 = D_E(img_batch)
masks, DIC = U(f)
mae_v2 = torch.abs(label_batch - masks[4]).mean().data[0]
# eval1 += mae_v1
eval2 += mae_v2
# m_eval1 = eval1 / (iter_cnt + 1)
m_eval2 = eval2 / (iter_cnt + 1)
print("test mae", m_eval2)
with open('results1.txt', 'a+') as f:
f.write(str(epoch) + " 2:" + str(m_eval2) + "\n")
| 2 | 2 |
solutions/1497_check_if_array_pairs_are_divisible_by_k.py | YiqunPeng/leetcode_pro | 0 | 17191 | <reponame>YiqunPeng/leetcode_pro<gh_stars>0
class Solution:
def canArrange(self, arr: List[int], k: int) -> bool:
"""Hash table.
Running time: O(n) where n == len(arr).
"""
d = collections.defaultdict(int)
for a in arr:
d[a % k] += 1
for key, v in d.items():
if key == 0 and v % 2 == 1:
return False
elif key != 0 and v != d[k - key]:
return False
return True
| 2.890625 | 3 |
machine-learning-ex2/ex2/ex2.py | DuffAb/coursera-ml-py | 0 | 17192 | # Machine Learning Online Class - Exercise 2: Logistic Regression
#
# Instructions
# ------------
#
# This file contains code that helps you get started on the logistic
# regression exercise. You will need to complete the following functions
# in this exericse:
#
# sigmoid.py
# costFunction.py
# predict.py
# costFunctionReg.py
#
# For this exercise, you will not need to change any code in this file,
# or any other files other than those mentioned above.
import matplotlib.pyplot as plt
import numpy as np
import scipy.optimize as opt
from plotData import *
import costFunction as cf
import plotDecisionBoundary as pdb
import predict as predict
from sigmoid import *
plt.ion()
# Load data
# The first two columns contain the exam scores and the third column contains the label.
data = np.loadtxt('ex2data1.txt', delimiter=',')
print('plot_decision_boundary data[0, 0:1] = \n{}'.format(data[0, 0:1]))
print('plot_decision_boundary data[0, 0:2] = \n{}'.format(data[0, 0:2]))
print('plot_decision_boundary data[0, 0:3] = \n{}'.format(data[0, 0:3]))
print('plot_decision_boundary data[0, 1:1] = \n{}'.format(data[0, 1:1]))
print('plot_decision_boundary data[0, 1:2] = \n{}'.format(data[0, 1:2]))
print('plot_decision_boundary data[0, 1:3] = \n{}'.format(data[0, 1:3]))
print('plot_decision_boundary data[0, 2:1] = \n{}'.format(data[0, 2:1]))
print('plot_decision_boundary data[0, 2:2] = \n{}'.format(data[0, 2:2]))
print('plot_decision_boundary data[0, 2:3] = \n{}'.format(data[0, 2:3]))
X = data[:, 0:2]
y = data[:, 2]
# ===================== Part 1: Plotting =====================
# We start the exercise by first plotting the data to understand the
# the problem we are working with.
print('Plotting Data with + indicating (y = 1) examples and o indicating (y = 0) examples.')
plot_data(X, y)
plt.axis([30, 100, 30, 100])
# Specified in plot order. 按绘图顺序指定
plt.legend(['Admitted', 'Not admitted'], loc=1)
plt.xlabel('Exam 1 score')
plt.ylabel('Exam 2 score')
input('Program paused. Press ENTER to continue')
# ===================== Part 2: Compute Cost and Gradient =====================
# In this part of the exercise, you will implement the cost and gradient
# for logistic regression. You need to complete the code in
# costFunction.py
# Setup the data array appropriately, and add ones for the intercept term
(m, n) = X.shape
# Add intercept term
X = np.c_[np.ones(m), X]
# Initialize fitting parameters
initial_theta = np.zeros(n + 1) # 初始化权重theta
# Compute and display initial cost and gradient
cost, grad = cf.cost_function(initial_theta, X, y)
np.set_printoptions(formatter={'float': '{: 0.4f}\n'.format})
print('Cost at initial theta (zeros): {:0.3f}'.format(cost))
print('Expected cost (approx): 0.693')
print('Gradient at initial theta (zeros): \n{}'.format(grad))
print('Expected gradients (approx): \n-0.1000\n-12.0092\n-11.2628')
# Compute and display cost and gradient with non-zero theta
test_theta = np.array([-24, 0.2, 0.2])
cost, grad = cf.cost_function(test_theta, X, y)
print('Cost at test theta (zeros): {:0.3f}'.format(cost))
print('Expected cost (approx): 0.218')
print('Gradient at test theta: \n{}'.format(grad))
print('Expected gradients (approx): \n0.043\n2.566\n2.647')
input('Program paused. Press ENTER to continue')
# ===================== Part 3: Optimizing using fmin_bfgs =====================
# In this exercise, you will use a built-in function (opt.fmin_bfgs) to find the
# optimal parameters theta
def cost_func(t):
return cf.cost_function(t, X, y)[0]
def grad_func(t):
return cf.cost_function(t, X, y)[1]
# Run fmin_bfgs to obtain the optimal theta
theta, cost, *unused = opt.fmin_bfgs(f=cost_func, fprime=grad_func, x0=initial_theta, maxiter=400, full_output=True, disp=False)
print('Cost at theta found by fmin: {:0.4f}'.format(cost))
print('Expected cost (approx): 0.203')
print('theta: \n{}'.format(theta))
print('Expected Theta (approx): \n-25.161\n0.206\n0.201')
# Plot boundary 画出二分边界
pdb.plot_decision_boundary(theta, X, y)
plt.xlabel('Exam 1 score')
plt.ylabel('Exam 2 score')
input('Program paused. Press ENTER to continue')
# ===================== Part 4: Predict and Accuracies =====================
# After learning the parameters, you'll like to use it to predict the outcomes
# on unseen data. In this part, you will use the logistic regression model
# to predict the probability that a student with score 45 on exam 1 and
# score 85 on exam 2 will be admitted
#
# Furthermore, you will compute the training and test set accuracies of our model.
#
# Your task is to complete the code in predict.py
# Predict probability for a student with score 45 on exam 1
# and score 85 on exam 2
prob = sigmoid(np.array([1, 45, 85]).dot(theta))
print('For a student with scores 45 and 85, we predict an admission probability of {:0.4f}'.format(prob))
print('Expected value : 0.775 +/- 0.002')
# Compute the accuracy on our training set
p = predict.predict(theta, X)
print('Train accuracy: {}'.format(np.mean(y == p) * 100))
print('Expected accuracy (approx): 89.0')
input('ex2 Finished. Press ENTER to exit')
| 4.09375 | 4 |
tests/test_apiFunc.py | Reid1923/py-GoldsberryTest | 0 | 17193 | <filename>tests/test_apiFunc.py
# -*- coding: utf-8 -*-
import pytest
import goldsberry
test_data = [
(goldsberry._nbaLeague, 'NBA', '00'),
(goldsberry._nbaLeague, 'WNBA', '10'),
(goldsberry._nbaLeague, 'NBADL', '20'),
(goldsberry._nbaSeason, 1999, '1999-00'),
(goldsberry._nbaSeason, 2000, '2000-01'),
(goldsberry._seasonID, 1999, '21999'),
(goldsberry._measureType, 1, 'Base'),
(goldsberry._measureType, 2, 'Advanced'),
(goldsberry._Scope, 1, ''),
(goldsberry._PerModeSmall48, 1, 'Totals'),
(goldsberry._PerModeSmall36, 1, 'Totals'),
(goldsberry._PerModeMini, 1, 'Totals'),
(goldsberry._PerModeLarge, 1, 'Totals'),
(goldsberry._AheadBehind, 1, 'Ahead or Behind'),
(goldsberry._ClutchTime, 1, 'Last 5 Minutes'),
(goldsberry._GameScope, 2, 'Yesterday'),
(goldsberry._PlayerExperience, 2, 'Rookie'),
(goldsberry._PlayerPosition, 2, 'F'),
(goldsberry._StarterBench, 2, 'Starters'),
(goldsberry._PlusMinus, 2, 'Y'),
(goldsberry._PaceAdjust, 2, 'Y'),
(goldsberry._Rank, 2, 'Y'),
(goldsberry._SeasonType, 1, 'Regular Season'),
(goldsberry._SeasonType4, 1, 'Regular Season'),
(goldsberry._Outcome, 2, 'W'),
(goldsberry._Location, 2, 'Home'),
(goldsberry._SeasonSegment, 2, 'Post All-Star'),
(goldsberry._VsConference, 2, 'East'),
(goldsberry._VsDivision, 2, 'Atlantic'),
(goldsberry._GameSegment, 2, 'First Half'),
(goldsberry._DistanceRange, 1, '5ft Range'),
(goldsberry._valiDate, '', ''),
(goldsberry._valiDate, '2015-01-02', '2015-01-02'),
(goldsberry._ContextMeasure, 1, 'FGM'),
(goldsberry._Position, 2, 'Guard'),
(goldsberry._StatCategory, 1, 'MIN'),
]
@pytest.mark.parametrize("func,key,response", test_data)
def test_api_func(func, key, response):
assert func(key) == response
@pytest.mark.parametrize('func,key', [
(goldsberry._nbaLeague, 'BAD VALUE'),
(goldsberry._nbaSeason, -1),
(goldsberry._seasonID, -1),
(goldsberry._measureType, -1),
(goldsberry._Scope, -1),
(goldsberry._PerModeSmall48, -1),
(goldsberry._PerModeSmall36, -1),
(goldsberry._PerModeMini, -1),
(goldsberry._PerModeLarge, -1),
(goldsberry._AheadBehind, -1),
(goldsberry._ClutchTime, -1),
(goldsberry._GameScope, -1),
(goldsberry._PlayerExperience, -1),
(goldsberry._PlayerPosition, -1),
(goldsberry._StarterBench, -1),
(goldsberry._PlusMinus, 0),
(goldsberry._PaceAdjust, 0),
(goldsberry._Rank, 0),
(goldsberry._SeasonType, 0),
(goldsberry._SeasonType4, 0),
(goldsberry._Outcome, 0),
(goldsberry._Location, 0),
(goldsberry._SeasonSegment, 0),
(goldsberry._VsConference, 0),
(goldsberry._VsDivision, 0),
(goldsberry._GameSegment, 0),
(goldsberry._DistanceRange, 0),
(goldsberry._valiDate, 'date'),
(goldsberry._ContextMeasure, 0),
(goldsberry._Position, 0),
(goldsberry._StatCategory, 0)
])
def test_api_func_raises_valueerror(func, key):
with pytest.raises(ValueError):
func(key)
| 1.890625 | 2 |
sasmodels/models/poly_gauss_coil.py | zattala/sasmodels | 0 | 17194 | #poly_gauss_coil model
#conversion of Poly_GaussCoil.py
#converted by <NAME>, Mar 2016
r"""
This empirical model describes the scattering from *polydisperse* polymer
chains in theta solvents or polymer melts, assuming a Schulz-Zimm type
molecular weight distribution.
To describe the scattering from *monodisperse* polymer chains, see the
:ref:`mono-gauss-coil` model.
Definition
----------
.. math::
I(q) = \text{scale} \cdot I_0 \cdot P(q) + \text{background}
where
.. math::
I_0 &= \phi_\text{poly} \cdot V \cdot (\rho_\text{poly}-\rho_\text{solv})^2 \\
P(q) &= 2 [(1 + UZ)^{-1/U} + Z - 1] / [(1 + U) Z^2] \\
Z &= [(q R_g)^2] / (1 + 2U) \\
U &= (Mw / Mn) - 1 = \text{polydispersity ratio} - 1 \\
V &= M / (N_A \delta)
Here, $\phi_\text{poly}$, is the volume fraction of polymer, $V$ is the
volume of a polymer coil, $M$ is the molecular weight of the polymer,
$N_A$ is Avogadro's Number, $\delta$ is the bulk density of the polymer,
$\rho_\text{poly}$ is the sld of the polymer, $\rho_\text{solv}$ is the
sld of the solvent, and $R_g$ is the radius of gyration of the polymer coil.
The 2D scattering intensity is calculated in the same way as the 1D,
but where the $q$ vector is redefined as
.. math::
q = \sqrt{q_x^2 + q_y^2}
References
----------
.. [#] O Glatter and O Kratky (editors), *Small Angle X-ray Scattering*, Academic Press, (1982) Page 404
.. [#] <NAME>, <NAME>, *Polymers and Neutron Scattering*, Oxford Science Publications, (1996)
.. [#] <NAME>, *Small Angle Neutron Scattering* in *Modern Techniques for Polymer Characterisation*, Wiley, (1999)
.. [#] http://www.ncnr.nist.gov/staff/hammouda/distance_learning/chapter_28.pdf
Authorship and Verification
----------------------------
* **Author:**
* **Last Modified by:**
* **Last Reviewed by:**
"""
import numpy as np
from numpy import inf, expm1, power
name = "poly_gauss_coil"
title = "Scattering from polydisperse polymer coils"
description = """
Evaluates the scattering from
polydisperse polymer chains.
"""
category = "shape-independent"
# pylint: disable=bad-whitespace, line-too-long
# ["name", "units", default, [lower, upper], "type", "description"],
parameters = [
["i_zero", "1/cm", 70.0, [0.0, inf], "", "Intensity at q=0"],
["rg", "Ang", 75.0, [0.0, inf], "", "Radius of gyration"],
["polydispersity", "None", 2.0, [1.0, inf], "", "Polymer Mw/Mn"],
]
# pylint: enable=bad-whitespace, line-too-long
# NB: Scale and Background are implicit parameters on every model
def Iq(q, i_zero, rg, polydispersity):
# pylint: disable = missing-docstring
u = polydispersity - 1.0
z = q**2 * (rg**2 / (1.0 + 2.0*u))
# need to trap the case of the polydispersity being 1 (ie, monodisperse!)
if polydispersity == 1.0:
result = 2.0 * (expm1(-z) + z)
index = q != 0.
result[index] /= z[index]**2
result[~index] = 1.0
else:
# Taylor series around z=0 of (2*(1+uz)^(-1/u) + z - 1) / (z^2(u+1))
p = [
#(-1 - 20*u - 155*u**2 - 580*u**3 - 1044*u**4 - 720*u**5) / 2520.,
#(+1 + 14*u + 71*u**2 + 154*u**3 + 120*u**4) / 360.,
#(-1 - 9*u - 26*u**2 - 24*u**3) / 60.,
(+1 + 5*u + 6*u**2) / 12.,
(-1 - 2*u) / 3.,
(+1),
]
result = 2.0 * (power(1.0 + u*z, -1.0/u) + z - 1.0) / (1.0 + u)
index = z > 1e-4
result[index] /= z[index]**2
result[~index] = np.polyval(p, z[~index])
return i_zero * result
Iq.vectorized = True # Iq accepts an array of q values
def random():
"""Return a random parameter set for the model."""
rg = 10**np.random.uniform(0, 4)
#rg = 1e3
polydispersity = 10**np.random.uniform(0, 3)
pars = dict(
#scale=1, background=0,
i_zero=1e7, # i_zero is a simple scale
rg=rg,
polydispersity=polydispersity,
)
return pars
demo = dict(scale=1.0,
i_zero=70.0,
rg=75.0,
polydispersity=2.0,
background=0.0)
# these unit test values taken from SasView 3.1.2
tests = [
[{'scale': 1.0, 'i_zero': 70.0, 'rg': 75.0,
'polydispersity': 2.0, 'background': 0.0},
[0.0106939, 0.469418], [57.6405, 0.169016]],
]
| 2.515625 | 3 |
examples/information_extraction/msra_ner/eval.py | BenfengXu/PaddleNLP | 1 | 17195 | <filename>examples/information_extraction/msra_ner/eval.py
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import ast
import random
import time
import math
from functools import partial
import numpy as np
import paddle
from paddle.io import DataLoader
import paddlenlp as ppnlp
from paddlenlp.datasets import load_dataset
from paddlenlp.data import Stack, Tuple, Pad, Dict
from paddlenlp.transformers import BertForTokenClassification, BertTokenizer
from paddlenlp.metrics import ChunkEvaluator
parser = argparse.ArgumentParser()
# yapf: disable
parser.add_argument("--model_name_or_path", default=None, type=str, required=True, help="Path to pre-trained model or shortcut name selected in the list: " + ", ".join(list(BertTokenizer.pretrained_init_configuration.keys())))
parser.add_argument("--init_checkpoint_path", default=None, type=str, required=True, help="The model checkpoint path.", )
parser.add_argument("--max_seq_length", default=128, type=int, help="The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded.", )
parser.add_argument("--batch_size", default=8, type=int, help="Batch size per GPU/CPU for training.", )
parser.add_argument("--device", default="gpu", type=str, choices=["cpu", "gpu", "xpu"] ,help="The device to select to train the model, is must be cpu/gpu/xpu.")
# yapf: enable
def tokenize_and_align_labels(example, tokenizer, no_entity_id,
max_seq_len=512):
labels = example['labels']
example = example['tokens']
tokenized_input = tokenizer(
example,
return_length=True,
is_split_into_words=True,
max_seq_len=max_seq_len)
# -2 for [CLS] and [SEP]
if len(tokenized_input['input_ids']) - 2 < len(labels):
labels = labels[:len(tokenized_input['input_ids']) - 2]
tokenized_input['labels'] = [no_entity_id] + labels + [no_entity_id]
tokenized_input['labels'] += [no_entity_id] * (
len(tokenized_input['input_ids']) - len(tokenized_input['labels']))
return tokenized_input
def do_eval(args):
paddle.set_device(args.device)
# Create dataset, tokenizer and dataloader.
train_ds, eval_ds = load_dataset(
'msra_ner', splits=('train', 'test'), lazy=False)
tokenizer = BertTokenizer.from_pretrained(args.model_name_or_path)
label_list = train_ds.label_list
label_num = len(label_list)
no_entity_id = label_num - 1
trans_func = partial(
tokenize_and_align_labels,
tokenizer=tokenizer,
no_entity_id=no_entity_id,
max_seq_len=args.max_seq_length)
ignore_label = -100
batchify_fn = lambda samples, fn=Dict({
'input_ids': Pad(axis=0, pad_val=tokenizer.pad_token_id), # input
'token_type_ids': Pad(axis=0, pad_val=tokenizer.pad_token_type_id), # segment
'seq_len': Stack(),
'labels': Pad(axis=0, pad_val=ignore_label) # label
}): fn(samples)
eval_ds = eval_ds.map(trans_func)
eval_data_loader = DataLoader(
dataset=eval_ds,
collate_fn=batchify_fn,
num_workers=0,
batch_size=args.batch_size,
return_list=True)
# Define the model netword and its loss
model = BertForTokenClassification.from_pretrained(
args.model_name_or_path, num_classes=label_num)
if args.init_checkpoint_path:
model_dict = paddle.load(args.init_checkpoint_path)
model.set_dict(model_dict)
loss_fct = paddle.nn.loss.CrossEntropyLoss(ignore_index=ignore_label)
metric = ChunkEvaluator(label_list=label_list)
model.eval()
metric.reset()
for step, batch in enumerate(eval_data_loader):
input_ids, token_type_ids, length, labels = batch
logits = model(input_ids, token_type_ids)
loss = loss_fct(logits, labels)
avg_loss = paddle.mean(loss)
preds = logits.argmax(axis=2)
num_infer_chunks, num_label_chunks, num_correct_chunks = metric.compute(
length, preds, labels)
metric.update(num_infer_chunks.numpy(),
num_label_chunks.numpy(), num_correct_chunks.numpy())
precision, recall, f1_score = metric.accumulate()
print("eval loss: %f, precision: %f, recall: %f, f1: %f" %
(avg_loss, precision, recall, f1_score))
if __name__ == "__main__":
args = parser.parse_args()
do_eval(args)
| 2.15625 | 2 |
src/python/module/z5py/util.py | constantinpape/z5 | 82 | 17196 | import os
from itertools import product
from concurrent import futures
from contextlib import closing
from datetime import datetime
import numpy as np
from . import _z5py
from .file import File, S3File
from .dataset import Dataset
from .shape_utils import normalize_slices
def product1d(inrange):
for ii in inrange:
yield ii
def blocking(shape, block_shape, roi=None, center_blocks_at_roi=False):
""" Generator for nd blocking.
Args:
shape (tuple): nd shape
block_shape (tuple): nd block shape
roi (tuple[slice]): region of interest (default: None)
center_blocks_at_roi (bool): if given a roi,
whether to center the blocks being generated
at the roi's origin (default: False)
"""
assert len(shape) == len(block_shape), "Invalid number of dimensions."
if roi is None:
# compute the ranges for the full shape
ranges = [range(sha // bsha if sha % bsha == 0 else sha // bsha + 1)
for sha, bsha in zip(shape, block_shape)]
min_coords = [0] * len(shape)
max_coords = shape
else:
# make sure that the roi is valid
roi, _ = normalize_slices(roi, shape)
ranges = [range(rr.start // bsha,
rr.stop // bsha if rr.stop % bsha == 0 else rr.stop // bsha + 1)
for rr, bsha in zip(roi, block_shape)]
min_coords = [rr.start for rr in roi]
max_coords = [rr.stop for rr in roi]
need_shift = False
if roi is not None and center_blocks_at_roi:
shift = [rr.start % bsha for rr, bsha in zip(roi, block_shape)]
need_shift = sum(shift) > 0
# product raises memory error for too large ranges,
# because input iterators are cast to tuple
# so far I have only seen this for 1d "open-ended" datasets
# and hence just implemented a workaround for this case,
# but it should be fairly easy to implement an nd version of product
# without casting to tuple for our use case using the imglib loop trick, see also
# https://stackoverflow.com/questions/8695422/why-do-i-get-a-memoryerror-with-itertools-product
try:
start_points = product(*ranges)
except MemoryError:
assert len(ranges) == 1
start_points = product1d(ranges)
for start_point in start_points:
positions = [sp * bshape for sp, bshape in zip(start_point, block_shape)]
if need_shift:
positions = [pos + sh for pos, sh in zip(positions, shift)]
if any(pos > maxc for pos, maxc in zip(positions, max_coords)):
continue
yield tuple(slice(max(pos, minc), min(pos + bsha, maxc))
for pos, bsha, minc, maxc in zip(positions, block_shape,
min_coords, max_coords))
def copy_dataset_impl(f_in, f_out, in_path_in_file, out_path_in_file,
n_threads, chunks=None, block_shape=None, dtype=None,
roi=None, fit_to_roi=False, **new_compression):
""" Implementation of copy dataset.
Used to implement `copy_dataset`, `convert_to_h5` and `convert_from_h5`.
Can also be used for more flexible use cases, like copying from a zarr/n5
cloud dataset to a filesytem dataset.
Args:
f_in (File): input file object.
f_out (File): output file object.
in_path_in_file (str): name of input dataset.
out_path_in_file (str): name of output dataset.
n_threads (int): number of threads used for copying.
chunks (tuple): chunks of the output dataset.
By default same as input dataset's chunks. (default: None)
block_shape (tuple): block shape used for copying. Must be a multiple
of ``chunks``, which are used by default (default: None)
dtype (str): datatype of the output dataset, default does not change datatype (default: None).
roi (tuple[slice]): region of interest that will be copied. (default: None)
fit_to_roi (bool): if given a roi, whether to set the shape of
the output dataset to the roi's shape
and align chunks with the roi's origin. (default: False)
**new_compression: compression library and options for output dataset. If not given,
the same compression as in the input is used.
"""
ds_in = f_in[in_path_in_file]
# check if we can copy chunk by chunk
in_is_z5 = isinstance(f_in, (File, S3File))
out_is_z5 = isinstance(f_out, (File, S3File))
copy_chunks = (in_is_z5 and out_is_z5) and (chunks is None or chunks == ds_in.chunks) and (roi is None)
# get dataset metadata from input dataset if defaults were given
chunks = ds_in.chunks if chunks is None else chunks
dtype = ds_in.dtype if dtype is None else dtype
# zarr objects may not have compression attribute. if so set it to the settings sent to this function
if not hasattr(ds_in, "compression"):
ds_in.compression = new_compression
compression = new_compression.pop("compression", ds_in.compression)
compression_opts = new_compression
same_lib = in_is_z5 == out_is_z5
if same_lib and compression == ds_in.compression:
compression_opts = compression_opts if compression_opts else ds_in.compression_opts
if out_is_z5:
compression = None if compression == 'raw' else compression
compression_opts = {} if compression_opts is None else compression_opts
else:
compression_opts = {'compression_opts': None} if compression_opts is None else compression_opts
# if we don't have block-shape explitictly given, use chunk size
# otherwise check that it's a multiple of chunks
if block_shape is None:
block_shape = chunks
else:
assert all(bs % ch == 0 for bs, ch in zip(block_shape, chunks)),\
"block_shape must be a multiple of chunks"
shape = ds_in.shape
# we need to create the blocking here, before the shape is potentially altered
# if fit_to_roi == True
blocks = blocking(shape, block_shape, roi, fit_to_roi)
if roi is not None:
roi, _ = normalize_slices(roi, shape)
if fit_to_roi:
shape = tuple(rr.stop - rr.start for rr in roi)
ds_out = f_out.require_dataset(out_path_in_file,
dtype=dtype,
shape=shape,
chunks=chunks,
compression=compression,
**compression_opts)
def write_single_block(bb):
data_in = ds_in[bb].astype(dtype, copy=False)
if np.sum(data_in) == 0:
return
if fit_to_roi and roi is not None:
bb = tuple(slice(b.start - rr.start, b.stop - rr.start)
for b, rr in zip(bb, roi))
ds_out[bb] = data_in
def write_single_chunk(bb):
chunk_id = tuple(b.start // ch for b, ch in zip(bb, chunks))
chunk_in = ds_in.read_chunk(chunk_id)
if chunk_in is None:
return
# check if this is a varlen chunk
varlen = tuple(chunk_in.shape) != tuple(b.stop - b.start for b in bb)
ds_out.write_chunk(chunk_id, chunk_in.astype(dtype, copy=False), varlen)
write_single = write_single_chunk if copy_chunks else write_single_block
with futures.ThreadPoolExecutor(max_workers=n_threads) as tp:
tasks = [tp.submit(write_single, bb) for bb in blocks]
[t.result() for t in tasks]
# copy attributes
in_attrs = ds_in.attrs
out_attrs = ds_out.attrs
for key, val in in_attrs.items():
out_attrs[key] = val
def copy_dataset(in_path, out_path,
in_path_in_file, out_path_in_file,
n_threads, chunks=None,
block_shape=None, dtype=None,
use_zarr_format=None, roi=None,
fit_to_roi=False, **new_compression):
""" Copy dataset, optionally change metadata.
The input dataset will be copied to the output dataset chunk by chunk.
Allows to change chunks, datatype, file format and compression.
Can also just copy a roi.
Args:
in_path (str): path to the input file.
out_path (str): path to the output file.
in_path_in_file (str): name of input dataset.
out_path_in_file (str): name of output dataset.
n_threads (int): number of threads used for copying.
chunks (tuple): chunks of the output dataset.
By default same as input dataset's chunks. (default: None)
block_shape (tuple): block shape used for copying. Must be a multiple
of ``chunks``, which are used by default (default: None)
dtype (str): datatype of the output dataset, default does not change datatype (default: None).
use_zarr_format (bool): file format of the output file,
default does not change format (default: None).
roi (tuple[slice]): region of interest that will be copied. (default: None)
fit_to_roi (bool): if given a roi, whether to set the shape of
the output dataset to the roi's shape
and align chunks with the roi's origin. (default: False)
**new_compression: compression library and options for output dataset. If not given,
the same compression as in the input is used.
"""
f_in = File(in_path)
# check if the file format was specified
# if not, keep the format of the input file
# otherwise set the file format
is_zarr = f_in.is_zarr if use_zarr_format is None else use_zarr_format
f_out = File(out_path, use_zarr_format=is_zarr)
copy_dataset_impl(f_in, f_out, in_path_in_file, out_path_in_file,
n_threads, chunks=chunks, block_shape=block_shape,
dtype=dtype, roi=roi, fit_to_roi=fit_to_roi,
**new_compression)
def copy_group(in_path, out_path, in_path_in_file, out_path_in_file, n_threads):
""" Copy group recursively.
Copy the group recursively, using copy_dataset. Metadata of datasets that
are copied cannot be changed and rois cannot be applied.
Args:
in_path (str): path to the input file.
out_path (str): path to the output file.
in_path_in_file (str): name of input group.
out_path_in_file (str): name of output group.
n_threads (int): number of threads used to copy datasets.
"""
f_in = File(in_path)
f_out = File(out_path)
def copy_attrs(gin, gout):
in_attrs = gin.attrs
out_attrs = gout.attrs
for key, val in in_attrs.items():
out_attrs[key] = val
g_in = f_in[in_path_in_file]
g_out = f_out.require_group(out_path_in_file)
copy_attrs(g_in, g_out)
def copy_object(name, obj):
abs_in_key = os.path.join(in_path_in_file, name)
abs_out_key = os.path.join(out_path_in_file, name)
if isinstance(obj, Dataset):
copy_dataset(in_path, out_path,
abs_in_key, abs_out_key, n_threads)
else:
g = f_out.require_group(abs_out_key)
copy_attrs(obj, g)
g_in.visititems(copy_object)
class Timer:
def __init__(self):
self.start_time = None
self.stop_time = None
@property
def elapsed(self):
try:
return (self.stop_time - self.start_time).total_seconds()
except TypeError as e:
if "'NoneType'" in str(e):
raise RuntimeError("{} either not started, or not stopped".format(self))
def start(self):
self.start_time = datetime.utcnow()
def stop(self):
self.stop_time = datetime.utcnow()
return self.elapsed
def __enter__(self):
self.start()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.stop()
def fetch_test_data_stent():
from imageio import volread
data_i16 = volread('imageio:stent.npz')
return (data_i16 / data_i16.max() * 255).astype(np.uint8)
def fetch_test_data():
try:
from urllib.request import urlopen
except ImportError:
from urllib2 import urlopen
try:
from io import BytesIO as Buffer
except ImportError:
from StringIO import StringIO as Buffer
import zipfile
from imageio import volread
im_url = "https://imagej.nih.gov/ij/images/t1-head-raw.zip"
with closing(urlopen(im_url)) as response:
if response.status != 200:
raise RuntimeError("Test data could not be found at {}, status code {}".format(
im_url, response.status
))
zip_buffer = Buffer(response.read())
with zipfile.ZipFile(zip_buffer) as zf:
tif_buffer = Buffer(zf.read('JeffT1_le.tif'))
return np.asarray(volread(tif_buffer, format='tif'), dtype=np.uint8)
def remove_trivial_chunks(dataset, n_threads,
remove_specific_value=None):
""" Remove chunks that only contain a single value.
The input dataset will be copied to the output dataset chunk by chunk.
Allows to change datatype, file format and compression as well.
Args:
dataset (z5py.Dataset)
n_threads (int): number of threads
remove_specific_value (int or float): only remove chunks that contain (only) this specific value (default: None)
"""
dtype = dataset.dtype
function = getattr(_z5py, 'remove_trivial_chunks_%s' % dtype)
remove_specific = remove_specific_value is not None
value = remove_specific_value if remove_specific else 0
function(dataset._impl, n_threads, remove_specific, value)
def remove_dataset(dataset, n_threads):
""" Remvoe dataset multi-threaded.
"""
_z5py.remove_dataset(dataset._impl, n_threads)
def remove_chunk(dataset, chunk_id):
""" Remove a chunk
"""
dataset._impl.remove_chunk(dataset._impl, chunk_id)
def remove_chunks(dataset, bounding_box):
""" Remove all chunks overlapping the bounding box
"""
shape = dataset.shape
chunks = dataset.chunks
blocks = blocking(shape, chunks, roi=bounding_box)
for block in blocks:
chunk_id = tuple(b.start // ch for b, ch in zip(block, chunks))
remove_chunk(dataset, chunk_id)
def unique(dataset, n_threads, return_counts=False):
""" Find unique values in dataset.
Args:
dataset (z5py.Dataset)
n_threads (int): number of threads
return_counts (bool): return counts of unique values (default: False)
"""
dtype = dataset.dtype
if return_counts:
function = getattr(_z5py, 'unique_with_counts_%s' % dtype)
else:
function = getattr(_z5py, 'unique_%s' % dtype)
return function(dataset._impl, n_threads)
| 2.28125 | 2 |
custom_components/waste_collection_schedule/sensor.py | trstns/hacs_waste_collection_schedule | 0 | 17197 | <reponame>trstns/hacs_waste_collection_schedule
"""Sensor platform support for Waste Collection Schedule."""
import collections
import datetime
import logging
from enum import Enum
import homeassistant.helpers.config_validation as cv
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import CONF_NAME, CONF_VALUE_TEMPLATE, STATE_UNKNOWN
from homeassistant.core import callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.entity import Entity
from .const import DOMAIN, UPDATE_SENSORS_SIGNAL
_LOGGER = logging.getLogger(__name__)
CONF_SOURCE_INDEX = "source_index"
CONF_DETAILS_FORMAT = "details_format"
CONF_COUNT = "count"
CONF_LEADTIME = "leadtime"
CONF_DATE_TEMPLATE = "date_template"
CONF_APPOINTMENT_TYPES = "types"
class DetailsFormat(Enum):
"""Values for CONF_DETAILS_FORMAT."""
upcoming = "upcoming" # list of "<date> <type1, type2, ...>"
appointment_types = "appointment_types" # list of "<type> <date>"
generic = "generic" # all values in separate attributes
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_NAME): cv.string,
vol.Optional(CONF_SOURCE_INDEX, default=0): cv.positive_int,
vol.Optional(CONF_DETAILS_FORMAT, default="upcoming"): cv.enum(DetailsFormat),
vol.Optional(CONF_COUNT): cv.positive_int,
vol.Optional(CONF_LEADTIME): cv.positive_int,
vol.Optional(CONF_APPOINTMENT_TYPES): cv.ensure_list,
vol.Optional(CONF_VALUE_TEMPLATE): cv.template,
vol.Optional(CONF_DATE_TEMPLATE): cv.template,
}
)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
value_template = config.get(CONF_VALUE_TEMPLATE)
if value_template is not None:
value_template.hass = hass
date_template = config.get(CONF_DATE_TEMPLATE)
if date_template is not None:
date_template.hass = hass
entities = []
entities.append(
ScheduleSensor(
hass=hass,
api=hass.data[DOMAIN],
name=config[CONF_NAME],
source_index=config[CONF_SOURCE_INDEX],
details_format=config[CONF_DETAILS_FORMAT],
count=config.get(CONF_COUNT),
leadtime=config.get(CONF_LEADTIME),
appointment_types=config.get(CONF_APPOINTMENT_TYPES),
value_template=value_template,
date_template=date_template,
)
)
async_add_entities(entities)
class ScheduleSensor(Entity):
"""Base for sensors."""
def __init__(
self,
hass,
api,
name,
source_index,
details_format,
count,
leadtime,
appointment_types,
value_template,
date_template,
):
"""Initialize the entity."""
self._api = api
self._name = name
self._source_index = source_index
self._details_format = details_format
self._count = count
self._leadtime = leadtime
self._appointment_types = appointment_types
self._value_template = value_template
self._date_template = date_template
self._state = STATE_UNKNOWN
self._icon = None
self._picture = None
self._attributes = []
async_dispatcher_connect(hass, UPDATE_SENSORS_SIGNAL, self._update_sensor)
@property
def name(self):
return self._name
@property
def unique_id(self):
return self._name
@property
def should_poll(self):
return False
@property
def icon(self):
return "mdi:trash-can" if self._icon is None else self._icon
@property
def entity_picture(self):
return self._picture
@property
def state(self):
"""Return the state of the entity."""
return self._state
@property
def device_state_attributes(self):
"""Return attributes for the entity."""
return self._attributes
async def async_added_to_hass(self):
"""Entities have been added to hass."""
self._update_sensor()
@property
def _scraper(self):
return self._api.get_scraper(self._source_index)
@property
def _separator(self):
"""Return separator string used to join waste types."""
return self._api.separator
@property
def _include_today(self):
"""Return true if appointments for today shall be included in the results."""
return datetime.datetime.now().time() < self._api._day_switch_time
def _add_refreshtime(self):
"""Add refresh-time (= last fetch time) to device-state-attributes."""
refreshtime = ""
if self._scraper.refreshtime is not None:
refreshtime = self._scraper.refreshtime.strftime("%x %X")
self._attributes["attribution"] = f"Last update: {refreshtime}"
def _set_state(self, upcoming):
"""Set entity state with default format."""
if len(upcoming) == 0:
self._state = ""
self._icon = None
self._picture = None
return
appointment = upcoming[0]
# appointment::=CollectionAppointmentGroup{date=2020-04-01, types=['Type1', 'Type2']}
if self._value_template is not None:
self._state = self._value_template.async_render_with_possible_json_value(
appointment, None
)
else:
self._state = f"{self._separator.join(appointment.types)} in {appointment.daysTo} days"
self._icon = appointment.icon
self._picture = appointment.picture
def _render_date(self, appointment):
if self._date_template is not None:
return self._date_template.async_render_with_possible_json_value(
appointment, None
)
else:
return appointment.date.isoformat()
@callback
def _update_sensor(self):
"""Update the state and the device-state-attributes of the entity.
Called if a new data has been fetched from the scraper source.
"""
if self._scraper is None:
_LOGGER.error(f"source_index {self._source_index} out of range")
return None
self._set_state(
self._scraper.get_upcoming_group_by_day(
count=1,
types=self._appointment_types,
include_today=self._include_today,
)
)
attributes = collections.OrderedDict()
appointment_types = (
sorted(self._scraper.get_types())
if self._appointment_types is None
else self._appointment_types
)
if self._details_format == DetailsFormat.upcoming:
# show upcoming events list in details
upcoming = self._scraper.get_upcoming_group_by_day(
count=self._count,
leadtime=self._leadtime,
types=self._appointment_types,
include_today=self._include_today,
)
for appointment in upcoming:
attributes[self._render_date(appointment)] = self._separator.join(
appointment.types
)
elif self._details_format == DetailsFormat.appointment_types:
# show list of appointments in details
for t in appointment_types:
appointments = self._scraper.get_upcoming(
count=1, types=[t], include_today=self._include_today
)
date = (
"" if len(appointments) == 0 else self._render_date(appointments[0])
)
attributes[t] = date
elif self._details_format == DetailsFormat.generic:
# insert generic attributes into details
attributes["types"] = appointment_types
attributes["upcoming"] = self._scraper.get_upcoming(
count=self._count,
leadtime=self._leadtime,
types=self._appointment_types,
include_today=self._include_today,
)
refreshtime = ""
if self._scraper.refreshtime is not None:
refreshtime = self._scraper.refreshtime.isoformat(timespec="seconds")
attributes["last_update"] = refreshtime
self._attributes = attributes
self._add_refreshtime()
if self.hass is not None:
self.async_schedule_update_ha_state()
| 2.015625 | 2 |
qidian.py | kivson/qidian-dl | 0 | 17198 | from concurrent.futures import ThreadPoolExecutor
from functools import partial
from json import JSONDecodeError
import requests
from funcy.calc import cache
from funcy.debug import print_calls
from funcy.simple_funcs import curry
HEADERS = {
"Accept": "application/json, text/javascript, */*; q=0.01",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) "
"Chrome/58.0.3029.110 Safari/537.36",
"X-Requested-With": "XMLHttpRequest"
}
HOME_URL = "https://www.webnovel.com/"
class QidianException(Exception):
pass
@cache(60)
def _get_csrftoken():
response = requests.get(HOME_URL)
return response.cookies.get('_csrfToken', None)
def novels():
for page in range(1, 10000):
response = requests.get("https://www.webnovel.com/apiajax/listing/popularAjax", headers=HEADERS, params={
'_csrfToken': _get_csrftoken(),
'category': '',
'pageIndex': page
})
data = _response_to_json(response)
if 'data' not in data or 'items' not in data['data'] or 'isLast' not in data['data']:
raise QidianException('Expected data not found')
yield from data['data']['items']
if data['data']['isLast'] == 1:
break
def _response_to_json(response):
try:
data = response.json()
except JSONDecodeError:
raise QidianException('Json parse Error')
return data
def charpters_list(bookId):
response = requests.get('https://www.webnovel.com/apiajax/chapter/GetChapterList', headers=HEADERS, params={
'_csrfToken': _get_csrftoken(),
'bookId': bookId
})
data = _response_to_json(response)
if 'data' not in data or 'chapterItems' not in data['data']:
raise QidianException('Expected data not found')
yield from data['data']['chapterItems']
def chapter(bookId, chapterId):
response = requests.get('https://www.webnovel.com/apiajax/chapter/GetContent', headers=HEADERS, params={
'_csrfToken': _get_csrftoken(),
'bookId': bookId,
'chapterId': chapterId
})
data = _response_to_json(response)
if 'data' not in data or 'chapterInfo' not in data['data']:
raise QidianException('Expected data not found')
return data['data']['chapterInfo']
def all_chapters(bookId, poolsize=10):
charpters = charpters_list(bookId=bookId)
with ThreadPoolExecutor(max_workers=poolsize) as executor:
chapter_getter = partial(chapter, bookId)
yield from executor.map(chapter_getter, (c['chapterId'] for c in charpters))
| 2.640625 | 3 |
srcWatteco/TICs/_poubelle/TIC_ICEp.py | OStephan29/Codec-Python | 1 | 17199 | # -*- coding: utf-8 -*-
# Pour passer de TICDataXXXFromBitfields @ TICDataBatchXXXFromFieldIndex
# Expressions régulière notepad++
# Find : TICDataSelectorIfBit\( ([0-9]*), Struct\("([^\"]*)"\/([^\)]*).*
# Replace: \1 : \3, # \2
from ._TIC_Tools import *
from ._TIC_Types import *
TICDataICEpFromBitfields = Struct(
TICDataSelectorIfBit( 0, Struct("DEBUTp"/TYPE_DMYhms) ),
TICDataSelectorIfBit( 1, Struct("FINp"/TYPE_DMYhms)),
TICDataSelectorIfBit( 2, Struct("CAFp"/Int16ub) ),
TICDataSelectorIfBit( 3, Struct("DATE_EAp"/TYPE_DMYhms) ),
TICDataSelectorIfBit( 4, Struct("EApP"/Int24ub) ),
TICDataSelectorIfBit( 5, Struct("EApPM"/Int24ub) ),
TICDataSelectorIfBit( 6, Struct("EApHCE"/Int24ub) ),
TICDataSelectorIfBit( 7, Struct("EApHCH"/Int24ub) ),
TICDataSelectorIfBit( 8, Struct("EApHH"/Int24ub) ),
TICDataSelectorIfBit( 9, Struct("EApHCD"/Int24ub) ),
TICDataSelectorIfBit( 10, Struct("EApHD"/Int24ub) ),
TICDataSelectorIfBit( 11, Struct("EApJA"/Int24ub) ),
TICDataSelectorIfBit( 12, Struct("EApHPE"/Int24ub) ),
TICDataSelectorIfBit( 13, Struct("EApHPH"/Int24ub) ),
TICDataSelectorIfBit( 14, Struct("EApHPD"/Int24ub) ),
TICDataSelectorIfBit( 15, Struct("EApSCM"/Int24ub) ),
TICDataSelectorIfBit( 16, Struct("EApHM"/Int24ub) ),
TICDataSelectorIfBit( 17, Struct("EApDSM"/Int24ub) ),
TICDataSelectorIfBit( 18, Struct("DATE_ERPp"/TYPE_DMYhms) ),
TICDataSelectorIfBit( 19, Struct("ERPpP"/Int24ub) ),
TICDataSelectorIfBit( 20, Struct("ERPpPM"/Int24ub) ),
TICDataSelectorIfBit( 21, Struct("ERPpHCE"/Int24ub) ),
TICDataSelectorIfBit( 22, Struct("ERPpHCH"/Int24ub) ),
TICDataSelectorIfBit( 23, Struct("ERPpHH"/Int24ub) ),
TICDataSelectorIfBit( 24, Struct("ERPpHCD"/Int24ub) ),
TICDataSelectorIfBit( 25, Struct("ERPpHD"/Int24ub) ),
TICDataSelectorIfBit( 26, Struct("ERPpJA"/Int24ub) ),
TICDataSelectorIfBit( 27, Struct("ERPpHPE"/Int24ub) ),
TICDataSelectorIfBit( 28, Struct("ERPpHPH"/Int24ub) ),
TICDataSelectorIfBit( 29, Struct("ERPpHPD"/Int24ub) ),
TICDataSelectorIfBit( 30, Struct("ERPpSCM"/Int24ub) ),
TICDataSelectorIfBit( 31, Struct("ERPpHM"/Int24ub) ),
TICDataSelectorIfBit( 32, Struct("ERPpDSM"/Int24ub) ),
TICDataSelectorIfBit( 33, Struct("DATE_ERNp"/TYPE_DMYhms) ),
TICDataSelectorIfBit( 34, Struct("ERNpP"/Int24ub) ),
TICDataSelectorIfBit( 35, Struct("ERNpPM"/Int24ub) ),
TICDataSelectorIfBit( 36, Struct("ERNpHCE"/Int24ub) ),
TICDataSelectorIfBit( 37, Struct("ERNpHCH"/Int24ub) ),
TICDataSelectorIfBit( 38, Struct("ERNpHH"/Int24ub) ),
TICDataSelectorIfBit( 39, Struct("ERNpHCD"/Int24ub) ),
TICDataSelectorIfBit( 40, Struct("ERNpHD"/Int24ub) ),
TICDataSelectorIfBit( 41, Struct("ERNpJA"/Int24ub) ),
TICDataSelectorIfBit( 42, Struct("ERNpHPE"/Int24ub) ),
TICDataSelectorIfBit( 43, Struct("ERNpHPH"/Int24ub) ),
TICDataSelectorIfBit( 44, Struct("ERNpHPD"/Int24ub) ),
TICDataSelectorIfBit( 45, Struct("ERNpSCM"/Int24ub) ),
TICDataSelectorIfBit( 46, Struct("ERNpHM"/Int24ub) ),
TICDataSelectorIfBit( 47, Struct("ERNpDSM"/Int24ub) )
)
# NOTE: For Batch only scalar/numeric values are accepeted
TICDataBatchICEpFromFieldIndex = Switch( FindFieldIndex,
{
#0 : TYPE_DMYhms, # DEBUTp
#1 : TYPE_DMYhms, # FINp
2 : Int16ub, # CAFp
#3 : TYPE_DMYhms, # DATE_EAp
4 : Int24ub, # EApP
5 : Int24ub, # EApPM
6 : Int24ub, # EApHCE
7 : Int24ub, # EApHCH
8 : Int24ub, # EApHH
9 : Int24ub, # EApHCD
10 : Int24ub, # EApHD
11 : Int24ub, # EApJA
12 : Int24ub, # EApHPE
13 : Int24ub, # EApHPH
14 : Int24ub, # EApHPD
15 : Int24ub, # EApSCM
16 : Int24ub, # EApHM
17 : Int24ub, # EApDSM
#18 : TYPE_DMYhms, # DATE_ERPp
19 : Int24ub, # ERPpP
20 : Int24ub, # ERPpPM
21 : Int24ub, # ERPpHCE
22 : Int24ub, # ERPpHCH
23 : Int24ub, # ERPpHH
24 : Int24ub, # ERPpHCD
25 : Int24ub, # ERPpHD
26 : Int24ub, # ERPpJA
27 : Int24ub, # ERPpHPE
28 : Int24ub, # ERPpHPH
29 : Int24ub, # ERPpHPD
30 : Int24ub, # ERPpSCM
31 : Int24ub, # ERPpHM
32 : Int24ub, # ERPpDSM
#33 : TYPE_DMYhms, # DATE_ERNp
34 : Int24ub, # ERNpP
35 : Int24ub, # ERNpPM
36 : Int24ub, # ERNpHCE
37 : Int24ub, # ERNpHCH
38 : Int24ub, # ERNpHH
39 : Int24ub, # ERNpHCD
40 : Int24ub, # ERNpHD
41 : Int24ub, # ERNpJA
42 : Int24ub, # ERNpHPE
43 : Int24ub, # ERNpHPH
44 : Int24ub, # ERNpHPD
45 : Int24ub, # ERNpSCM
46 : Int24ub, # ERNpHM
47 : Int24ub, # ERNpDSM
}, default = TICUnbatchableFieldError()
)
| 2.140625 | 2 |