code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
|---|---|---|---|---|---|
import colander
from grano.core import db
from grano.model import Permission
from grano.logic.references import ProjectRef, AccountRef
class PermissionValidator(colander.MappingSchema):
project = colander.SchemaNode(ProjectRef())
account = colander.SchemaNode(AccountRef())
reader = colander.SchemaNode(colander.Boolean(), missing=False)
editor = colander.SchemaNode(colander.Boolean(), missing=False)
admin = colander.SchemaNode(colander.Boolean(), missing=False)
def save(data, permission=None):
validator = PermissionValidator()
data = validator.deserialize(data)
if permission is None:
q = Permission.all()
q = q.filter(Permission.project == data['project'])
q = q.filter(Permission.account == data['account'])
permission = q.first()
if permission is None:
permission = Permission()
permission.project = data.get('project')
permission.account = data.get('account')
permission.reader = data['reader'] or data['editor'] or data['admin']
permission.editor = data['editor'] or data['admin']
permission.admin = data['admin']
db.session.add(permission)
db.session.flush()
return permission
def delete(permission):
db.session.delete(permission)
db.session.flush()
|
CodeForAfrica/grano
|
grano/logic/permissions.py
|
Python
|
mit
| 1,296
|
#!/usr/bin/python
## @file
## @brief Main file
import ConfigParser
import atexit
import inspect
import logging
import logging.config
import os
import sys
from time import sleep
import pydevd
from pubsub import pub
from schedule import Schedule
@atexit.register
## @fn def clean_exit():
## @brief Cleanup function
## @details Close all debug/log session and perform clean exit
def clean_exit():
global _config
print 'Exit'
try:
if _config.getboolean('Debug', 'Debug'):
print 'Stoping debug session debug session.'
pydevd.stoptrace()
except:
pass
## @fn def aria_start():
## @brief Startup function
## @details Start all services and load runners
def aria_start():
# Start reading _config file
_config = ConfigParser.SafeConfigParser(allow_no_value=True)
# TODO load 'default' _config
_config.read('Aria.cfg')
# Setting up debug session
try:
if _config.getboolean('Debug', 'Debug'):
print 'Trying to start debug session.'
sys.path.append(r'/home/pi/pysrc')
_debug_host = _config.get('Debug', 'host').strip()
_debug_port = _config.getint('Debug', 'port')
print 'Remote host - %s on port %i' % (_debug_host, _debug_port)
pydevd.settrace(_debug_host, port=_debug_port, stdoutToServer=True, stderrToServer=True, suspend=False)
print '#################################################'
print '######## Remote debug session started ##########'
print '#################################################'
else:
print 'Start in normal mode.'
except ConfigParser.NoSectionError:
print 'No debug section found.Starting in normal mode'
print 'Missing debug parameters.Please refer manual.Starting in normal mode'
# setting up logger
try:
logging.config.fileConfig('logging.conf')
_logger = logging.getLogger('root')
except ConfigParser.NoSectionError as e:
print 'Fatal error - fail to set _logger.Error: %s ' % e.message
exit(-1)
_logger.debug('Logger started')
# starting schedule service
_schedule_service = Schedule(_config)
_schedule_service.start_schedule()
sleep(1)
# Loading modules
# Storing loaded modules
active_modules = []
try:
plugin_dir = _config.get('Modules', 'Path').strip()
_logger.info('Searching modules in: %s' % plugin_dir)
except IOError:
_logger.info('Error getting plugin dir using default - plugins')
plugin_dir = 'plugins'
try:
disable_modules = _config.get('Modules', 'Disabled')
disable_modules = disable_modules.strip().split(',')
except:
disable_modules = []
_logger.info('Disabled modules : %s' % disable_modules)
if not os.path.exists(plugin_dir):
_logger.critical('Plugins folder not exist')
exit(-1)
# Searching .py files in folder 'plugins'
for fname in os.listdir(plugin_dir):
# Look only for py files
if fname.endswith('.py'):
# Cut .py from path
module_name = fname[: -3]
# Skip base,__init__ and disabled files
if module_name != 'base' and module_name != '__init__' and not (module_name in disable_modules):
_logger.info('Found module %s' % module_name)
# Load module and add it to list of loaded modules
package_obj = __import__(plugin_dir + '.' + module_name)
active_modules.append(module_name)
else:
_logger.info('Skiping %s' % fname)
# Retrieving modules
_loaded_modules = []
for modulename in active_modules:
module_obj = getattr(package_obj, modulename)
# Looking for classes in file
for elem in dir(module_obj):
obj = getattr(module_obj, elem)
# If this a class ?
if inspect.isclass(obj):
# Creating object
try:
_logger.info('Loading module %s from %s' % (elem, modulename))
_loaded_modules.append(obj(_config))
except ConfigParser.NoSectionError:
_logger.warning('Incorrect logger settings')
except ImportWarning:
_logger.warning('Failed to load %s from %s' % (elem, modulename))
sleep(10)
_logger.info('All modules loaded')
pub.sendMessage('SpeakCMD', cmd2say="welcome")
# Test - audio record
# threading.Thread(target=avg_calc, args=(_config,)).start()
while True:
try:
while True:
sleep(1)
except KeyboardInterrupt:
print "Quit"
break
_schedule_service.stop_schedule()
for _module in _loaded_modules:
try:
_logger.info('Unloading module %s' % _module)
_module.stop()
except:
pass
print "Bay"
if __name__ == '__main__':
aria_start()
|
Merlin71/Aria
|
Aria.py
|
Python
|
gpl-3.0
| 5,038
|
import random
from templates.text import TextTemplate
def process(input):
howareyou = [
'I\'m amazing, dude!',
'I\'m awesome, dude!'
]
output = {
'input': input,
'output': TextTemplate(random.choice(howareyou)).get_message(),
'success': True
}
return output
|
manparvesh/BotDude
|
modules/src/howareyou.py
|
Python
|
mit
| 320
|
import sys
import re
from collections import namedtuple
from ascii2graph import graph
from .color import get_color
from .svg import draw_box, draw_line, draw_arrow, embed_svg, get_box_dims
def get_box_center(scale, text, origin):
(x, y) = origin
width, height = get_box_dims(scale, text)
return x + 0.5 * width, y + 0.5 * height
def parse_graph(file_name):
with open(file_name, 'r') as f:
text = f.read()
return graph(text)
def move_children_behind_current_commit(scale,
time_direction,
min_separation,
commit,
commits,
placement):
(_, _, text) = commit
width, height = get_box_dims(scale, text)
if time_direction in [90, 270]:
_time_dim = 0
_sep_dim = 1
else:
_time_dim = 1
_sep_dim = 0
children = commits[commit]
for child in children:
(line_number, character_position, text, angle) = child
k = (line_number, character_position, text)
# in our definition the angle difference cannot be larger than 90
# unless the time_direction is 0
if abs(angle - time_direction) > 90:
angle -= 360
# time direction | smaller | larger
# 0 | left | right
# 90 | up | down
# 180 | right | left
# 270 | down | up
if time_direction in [0, 90]:
if (angle - time_direction) < 0:
shift = 'min'
elif (angle - time_direction) > 0:
shift = 'max'
else:
shift = None
else:
if (angle - time_direction) < 0:
shift = 'max'
elif (angle - time_direction) > 0:
shift = 'min'
else:
shift = None
if shift == 'min':
placement[k][_sep_dim] = min(placement[k][_sep_dim], placement[commit][_sep_dim] - min_separation)
elif shift == 'max':
placement[k][_sep_dim] = max(placement[k][_sep_dim], placement[commit][_sep_dim] + min_separation)
else:
placement[k][_sep_dim] = placement[commit][_sep_dim]
# displace children in the direction of the time arrow
# make sure children are placed behind parents
if time_direction in [90, 180]:
placement[k][_time_dim] = max(placement[k][_time_dim], placement[commit][_time_dim] + min_separation)
else:
placement[k][_time_dim] = min(placement[k][_time_dim], placement[commit][_time_dim] - min_separation)
placement = move_children_behind_current_commit(scale,
time_direction,
min_separation,
k,
commits,
placement)
return placement
def point_inside_box(point_xy,
box_center_xy,
box_width,
box_height):
if point_xy[0] > box_center_xy[0] + 0.5 * box_width:
return False
if point_xy[0] < box_center_xy[0] - 0.5 * box_width:
return False
if point_xy[1] > box_center_xy[1] + 0.5 * box_height:
return False
if point_xy[1] < box_center_xy[1] - 0.5 * box_height:
return False
return True
def arrow_head(arrow_origin_xy,
box_center_xy,
box_width,
box_height):
'''
Locates the arrow head so that it is just barely outside the target box.
'''
vx = arrow_origin_xy[0] - box_center_xy[0]
vy = arrow_origin_xy[1] - box_center_xy[1]
# this is done in a silly way
num_steps = 100
for f in range(num_steps + 1):
s = f / num_steps
x = box_center_xy[0] + s * vx
y = box_center_xy[1] + s * vy
if not point_inside_box((x, y),
box_center_xy,
box_width,
box_height):
return x, y
sys.stderr.write('ERROR: head not found in arrow_head\n')
sys.exit(1)
def main(scale, in_file, time_direction):
assert time_direction in [0, 90, 180, 270]
if time_direction == 0:
forward_angles = [315, 0, 45]
elif time_direction == 90:
forward_angles = [45, 90, 135]
elif time_direction == 180:
forward_angles = [135, 180, 225]
elif time_direction == 270:
forward_angles = [225, 270, 315]
file_name = in_file
_graph = parse_graph(file_name)
# filter out all branches and tags
_commits = {node: _graph[node] for node in _graph if not node[2].startswith('[')}
# only keep pointers (branches, tags)
pointers = {node: _graph[node] for node in _graph if node[2].startswith('[')}
# make sure edges point to children only
# we do this by filtering out all angles that point "backwards"
commits = {}
for node in _commits:
commits[node] = []
for node in _commits:
for child in _commits[node]:
angle = child[3]
if angle in forward_angles:
commits[node].append(child)
parents = {}
for node in commits:
for child in commits[node]:
(line_number, character_position, text, _) = child
k = (line_number, character_position, text)
if k not in parents:
parents[k] = []
parents[k].append(node)
# find root commit, this is the commit without any parents
commits_without_parents = [commit for commit in commits if commit not in parents]
assert len(commits_without_parents) == 1
root_commit = commits_without_parents[0]
placement = {}
for (line_number, character_position, name) in commits:
placement[(line_number, character_position, name)] = [0.0, 0.0]
placement = move_children_behind_current_commit(scale=scale,
time_direction=time_direction,
min_separation=scale * 60.0,
commit=root_commit,
commits=commits,
placement=placement)
s_svg = ''
# first we place the edges
for node in commits:
center_parent = get_box_center(scale, name, placement[node])
for child in commits[node]:
(line_number, character_position, text, _) = child
k = (line_number, character_position, text)
center_child = get_box_center(scale, text, placement[k])
s_svg += draw_line(x1=center_parent[0],
y1=center_parent[1],
x2=center_child[0],
y2=center_child[1],
scale=scale,
color='#000000',
opacity=0.8)
x_min = sys.float_info.max
y_min = sys.float_info.max
x_max = -sys.float_info.max
y_max = -sys.float_info.max
# later we place the nodes so that they show up "on top"
for node in commits:
(_, _, name) = node
(x, y) = placement[node]
width, height = get_box_dims(scale, name)
x_min = min(x_min, x)
y_min = min(y_min, y)
x_max = max(x_max, x + width)
y_max = max(y_max, y + height)
stroke_color, fill_color = get_color(name)
s_svg += draw_box(text=name,
x=x,
y=y,
scale=scale,
stroke_color=stroke_color,
fill_color=fill_color,
opacity=1.0,
rounded=True)
# finally place branches and tags
for node in pointers:
(line_number, character_position, text, angle) = pointers[node][0]
(x, y) = placement[(line_number, character_position, text)]
(x_target, y_target) = get_box_center(scale, text, (x, y))
(_, _, tag_text) = node
target_width, target_height = get_box_dims(scale, text)
# remove the starting [ and ending ]
tag_text = tag_text[1:-1]
# tags can be a comma separated list
for tag in tag_text.split(','):
tag_width, tag_height = get_box_dims(scale, tag)
if angle == 0:
x = x + 0.5 * target_width - 0.5 * tag_width
y = y + target_height + scale * 35.0
elif angle == 180:
x = x + 0.5 * target_width - 0.5 * tag_width
y = y - tag_height - scale * 35.0
if angle == 90:
x = x - tag_width - scale * 35.0
elif angle == 270:
x = x + target_width + scale * 35.0
x_min = min(x_min, x)
y_min = min(y_min, y)
x_max = max(x_max, x + tag_width)
y_max = max(y_max, y + tag_height)
center_tag = get_box_center(scale, tag, (x, y))
x2, y2 = arrow_head(arrow_origin_xy=center_tag,
box_center_xy=(x_target, y_target),
box_width=target_width,
box_height=target_height)
# update target
target_width, target_height = tag_width, tag_height
(x_target, y_target) = center_tag
if tag.startswith('_'):
tag_opacity = 0.2
fill_color = '#ffffff'
_tag = tag[1:]
ghost = True
else:
tag_opacity = 1.0
fill_color = '#dddddd'
_tag = tag
ghost = False
s_svg += draw_arrow(x1=center_tag[0],
y1=center_tag[1],
x2=x2,
y2=y2,
scale=scale,
color='#000000',
ghost=ghost)
s_svg += draw_box(text=_tag,
x=x,
y=y,
scale=scale,
stroke_color='#000000',
fill_color=fill_color,
opacity=tag_opacity,
rounded=False)
bbox = (x_min, y_min, x_max, y_max)
s_svg = embed_svg(text=s_svg, bbox=(x_min, y_min, x_max, y_max))
return s_svg
|
bast/gitink
|
gitink/main.py
|
Python
|
mpl-2.0
| 10,896
|
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2017, Anaconda, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
json = {
"attrs": {
"Figure" : {
"background_fill_color": "#20262B",
"border_fill_color": "#15191C",
"outline_line_color": "#E0E0E0",
"outline_line_alpha": 0.25
},
"Grid": {
"grid_line_color": "#E0E0E0",
"grid_line_alpha": 0.25
},
"Axis": {
"major_tick_line_alpha": 0,
"major_tick_line_color": "#E0E0E0",
"minor_tick_line_alpha": 0,
"minor_tick_line_color": "#E0E0E0",
"axis_line_alpha": 0,
"axis_line_color": "#E0E0E0",
"major_label_text_color": "#E0E0E0",
"major_label_text_font": "Helvetica",
"major_label_text_font_size": "1.025em",
"axis_label_standoff": 10,
"axis_label_text_color": "#E0E0E0",
"axis_label_text_font": "Helvetica",
"axis_label_text_font_size": "1.25em",
"axis_label_text_font_style": "normal"
},
"Legend": {
"spacing": 8,
"glyph_width": 15,
"label_standoff": 8,
"label_text_color": "#E0E0E0",
"label_text_font": "Helvetica",
"label_text_font_size": "1.025em",
"border_line_alpha": 0,
"background_fill_alpha": 0.25,
"background_fill_color": "#20262B"
},
"ColorBar": {
"title_text_color": "#E0E0E0",
"title_text_font": "Helvetica",
"title_text_font_size": "1.025em",
"title_text_font_style": "normal",
"major_label_text_color": "#E0E0E0",
"major_label_text_font": "Helvetica",
"major_label_text_font_size": "1.025em",
"background_fill_color": "#15191C",
"major_tick_line_alpha": 0,
"bar_line_alpha": 0
},
"Title": {
"text_color": "#E0E0E0",
"text_font": "Helvetica",
"text_font_size": "1.15em"
}
}
}
|
dennisobrien/bokeh
|
bokeh/themes/_dark_minimal.py
|
Python
|
bsd-3-clause
| 2,468
|
"""
Some Python 2 & 3 compatibility code.
"""
import sys
PY3 = sys.version_info[0] == 3
PY26 = sys.version_info[:2] == (2,6)
if PY3:
stringtypes = (bytes, str, )
def int2byte(i):
"""Converts int (0 through 255) into b'...' character."""
return bytes((i,))
def byte2int(b):
"""Converts b'...' character into int (0 through 255)."""
return ord(b)
def str2bytes(s):
"""Converts '...' str into b'...' bytes. On PY2 they are equivalent."""
return s.encode("utf8")
def bytes2str(b):
"""Converts b'...' bytes into str. On PY2 they are equivalent."""
return b.decode("utf8")
def str2unicode(s):
"""Converts '...' str into u'...' unicode string. On PY3 they are equivalent."""
return s
def unicode2str(s):
"""Converts u'...' string into '...' str. On PY3 they are equivalent."""
return s
def unknownstring2bytes(s):
return unicode2str(s) if isinstance(s,str) else s
def iteratebytes(s):
"""Itarates though b'...' string yielding characters as ints. On PY3 iter is the same."""
return s
else:
stringtypes = (str, unicode, )
def int2byte(i):
"""Converts int (0 through 255) into b'...' character."""
return chr(i)
def byte2int(s):
"""Converts b'...' character into int (0 through 255)."""
return ord(s)
def str2bytes(s):
"""Converts '...' str into b'...' bytes. On PY2 they are equivalent."""
return s
def bytes2str(b):
"""Converts b'...' bytes into str. On PY2 they are equivalent."""
return b
def str2unicode(b):
"""Converts '...' str into u'...' unicode string. On PY3 they are equivalent."""
return b.encode("utf8")
def unicode2str(s):
"""Converts u'...' string into '...' str. On PY3 they are equivalent."""
return s.decode("utf8")
def unknownstring2bytes(s):
return unicode2str(s) if isinstance(s,unicode) else s
def iteratebytes(s):
"""Itarates though b'...' string yielding characters as ints. On PY3 iter is the same."""
for c in s:
yield byte2int(c)
|
gkonstantyno/construct
|
construct/lib/py3compat.py
|
Python
|
mit
| 2,194
|
# -*- coding: utf-8 -*-
import pygame
import random
import classes.board
import classes.extras as ex
import classes.game_driver as gd
import classes.level_controller as lc
class Board(gd.BoardGame):
def __init__(self, mainloop, speaker, config, screen_w, screen_h):
self.level = lc.Level(self, mainloop, 99, 17)
gd.BoardGame.__init__(self, mainloop, speaker, config, screen_w, screen_h, 13, 9)
def create_game_objects(self, level=1):
self.allow_unit_animations = False
self.board.decolorable = False
self.vis_buttons = [0, 1, 1, 1, 1, 0, 1, 1, 0]
self.mainloop.info.hide_buttonsa(self.vis_buttons)
self.ai_enabled = True
self.board.draw_grid = False
h = random.randrange(0, 255, 5)
color0 = ex.hsv_to_rgb(h, 30, 230) # highlight 1
self.color = color0
self.highlight_color = ex.hsv_to_rgb(h, 230, 150)
font_color = ex.hsv_to_rgb(h, 70, 230)
white = (255, 255, 255)
if self.mainloop.scheme is not None:
if self.mainloop.scheme.dark:
white = (0, 0, 0)
self.level.game_step = 1
self.start_sequence = True
self.completed_mode = False
self.game_over_mode = False
self.disp_counter = 0
self.disp_len = 3
self.found = set()
# setting level variable
# data = [x_count, y_count, number_count, top_limit, ordered]
if self.level.lvl == 1:
data = [13, 9, 5, 3, 3]
elif self.level.lvl == 2:
data = [13, 9, 8, 3, 4]
elif self.level.lvl == 3:
data = [12, 9, 8, 4, 4]
elif self.level.lvl == 4:
data = [12, 9, 7, 4, 5]
elif self.level.lvl == 5:
data = [13, 9, 7, 5, 5]
elif self.level.lvl == 6:
data = [13, 9, 11, 5, 6]
elif self.level.lvl == 7:
data = [12, 9, 11, 6, 6]
elif self.level.lvl == 8:
data = [12, 9, 15, 6, 7]
elif self.level.lvl == 9:
data = [13, 9, 15, 7, 7]
elif self.level.lvl == 10:
data = [13, 9, 9, 7, 8]
elif self.level.lvl == 11:
data = [12, 9, 9, 8, 8]
elif self.level.lvl == 12:
data = [12, 9, 14, 8, 9]
elif self.level.lvl == 13:
data = [13, 9, 14, 9, 9]
elif self.level.lvl == 14:
data = [12, 9, 14, 10, 9]
elif self.level.lvl == 15:
data = [13, 9, 14, 11, 9]
elif self.level.lvl == 16:
data = [12, 9, 14, 12, 9]
elif self.level.lvl == 17:
data = [13, 9, 14, 13, 9]
self.chapters = [1, 3, 5, 7, 9, 11, 13, 15, 17]
# rescale the number of squares horizontally to better match the screen width
m = data[0] % 2
if m == 0:
data[0] = self.get_x_count(data[1], even=True)
else:
data[0] = self.get_x_count(data[1], even=False)
self.data = data
self.square_count = self.data[3] * self.data[4]
if self.square_count % 2 == 0:
a = 0
else:
a = 1
self.max_games = self.square_count // 2 + a
self.level.games_per_lvl = self.max_games
self.layout.update_layout(data[0], data[1])
self.board.level_start(data[0], data[1], self.layout.scale)
self.current_count = 1
self.choice_list = [x for x in range(1, data[2] + 1)]
self.shuffled = self.choice_list[:]
random.shuffle(self.shuffled)
h1 = (data[1] - data[4]) // 2 # height of the top margin
h2 = data[1] - h1 - data[4] # -1 #height of the bottom margin minus 1 (game label)
w2 = (data[0] - data[3]) // 2 # side margin width
self.board.add_door(w2, h1, data[3], data[4], classes.board.Door, "", white, "")
x = w2
y = h1
for i in range(self.square_count):
caption = str(i + 1)
self.board.add_unit(x, y, 1, 1, classes.board.Letter, caption, color0, "", 3)
self.board.ships[i].highlight = False
self.board.ships[i].readable = False
self.board.ships[i].font_color = font_color
if x >= w2 + data[3] - 1:
x = w2
y += 1
else:
x += 1
self.outline_all(0, 1)
# horizontal
if data[4] < 8:
self.board.add_unit(0, 0, data[0], h1, classes.board.Obstacle, "", white, "", 7) # top
if data[4] < 9:
self.board.add_unit(0, h1 + data[4], data[0], h2, classes.board.Obstacle, "", white, "", 7) # bottom 1
# side obstacles
if data[3] < 12:
self.board.add_unit(0, h1, w2, data[4], classes.board.Obstacle, "", white, "", 7) # left
self.board.add_unit(w2 + data[3], h1, w2, data[4], classes.board.Obstacle, "", white, "", 7) # right
self.board.all_sprites_list.move_to_front(self.board.units[0])
self.draw_nums()
def handle(self, event):
gd.BoardGame.handle(self, event)
if event.type == pygame.MOUSEBUTTONDOWN and not self.show_msg and not self.start_sequence:
if 0 <= self.board.active_ship < self.square_count:
active = self.board.ships[self.board.active_ship]
if active.unit_id in self.chosen:
active.initcolor = self.highlight_color
active.color = active.initcolor
self.found.add(active.unit_id)
if len(self.found) == self.current_count:
self.completed_mode = True
self.ai_enabled = True
else:
active.initcolor = (255, 0, 0)
active.color = active.initcolor
self.game_over_mode = True # self.game_over()
self.ai_enabled = True
def update(self, game):
game.fill((255, 255, 255))
gd.BoardGame.update(self, game)
def draw_nums(self):
choice = [x for x in range(self.square_count)]
shuffled = choice[:]
random.shuffle(shuffled)
self.chosen = shuffled[0:self.current_count]
def next_level(self):
self.current_step = 0
self.current_count += 1
self.found = set()
self.level.game_step = self.current_count
self.mainloop.redraw_needed[1] = True
if self.current_count <= self.max_games:
self.draw_nums()
self.reset_colors()
self.start_sequence = True
self.ai_enabled = True
else:
# self.update_score(self.level.lvl + self.max_games)
self.mainloop.db.update_completion(self.mainloop.userid, self.active_game.dbgameid, self.level.lvl)
self.level.levelup()
def game_over(self):
self.level.game_step = 0
self.current_count = 0
self.next_level()
def highlight_colors(self):
for each in self.board.ships:
if each.unit_id in self.chosen:
each.initcolor = self.highlight_color
each.color = each.initcolor
each.update_me = True
self.mainloop.redraw_needed[0] = True
self.mainloop.redraw_needed[1] = True
def reset_colors(self):
for each in self.board.ships:
each.initcolor = self.color
each.color = each.initcolor
each.update_me = True
self.mainloop.redraw_needed[0] = True
def ai_walk(self):
if self.start_sequence:
if self.disp_counter < self.disp_len:
if self.disp_counter == 0:
self.highlight_colors()
self.disp_counter += 1
else:
self.reset_colors()
self.start_sequence = False
self.ai_enabled = False
self.disp_counter = 0
elif self.completed_mode:
self.disp_counter += 1
if self.disp_counter > 1: # self.disp_len:
self.completed_mode = False
self.disp_counter = 0
self.next_level()
elif self.game_over_mode:
self.disp_counter += 1
self.highlight_colors()
if self.disp_counter > 2: # self.disp_len:
self.game_over_mode = False
self.disp_counter = 0
self.game_over()
def check_result(self):
pass
|
imiolek-ireneusz/eduActiv8
|
game_boards/game006.py
|
Python
|
gpl-3.0
| 8,486
|
#!/usr/bin/env python3
##### ##### ##### ##### #### ####
# # # # # # # # # # #### #### # # #
##### #### ##### ##### ##### # # # # ####
# # # # # # # # # # # # #
# # # # # # # # #### # #### # ####
#finds the password of a desired rar or zip file using a brute-force algorithm
##will fail to find the password if the password has a character that isnt in
##the english alphabet or isnt a number (you can change the char. list though)
#now using itertools!
#importing needed modules
import time,os,sys,shutil,itertools
#checking if the user has unrar/p7zip installed
for which in ["unrar","p7zip"]:
if not shutil.which(which):
print("ERROR:",which,"isn't installed.\nExiting...")
sys.exit(-1)
#defining the function
def rc(rf):
alphabet="aAbBcCdDeEfFgGhHiIjJkKlLmMnNoOpPqQrRsStTuUvVwWxXyYzZ1234567890"
start=time.time()
tryn=0
for a in range(1,len(alphabet)+1):
for b in itertools.product(alphabet,repeat=a):
k="".join(b)
if rf[-4:]==".rar":
print("Trying:",k)
kf=os.popen("unrar t -y -p%s %s 2>&1|grep 'All OK'"%(k,rf))
tryn+=1
for rkf in kf.readlines():
if rkf=="All OK\n":
print("Found password:",repr(k))
print("Tried combination count:",tryn)
print("It took",round(time.time()-start,3),"seconds")
print("Exiting...")
time.sleep(2)
sys.exit(1)
elif rf[-4:]==".zip" or rf[-3:]==".7z":
print("Trying:",k)
kf=os.popen("7za t -p%s %s 2>&1|grep 'Everything is Ok'"%(k,rf))
tryn+=1
for rkf in kf.readlines():
if rkf=="Everything is Ok\n":
print("Found password:",repr(k))
print("Tried combination count:",tryn)
print("It took",round(time.time()-start,3),"seconds")
print("Exiting...")
time.sleep(2)
sys.exit(1)
else:
print("ERROR: File isnt a RAR, ZIP or 7z file.\nExiting...")
#checking if the file exists/running the function
if len(sys.argv)==2:
if os.path.exists(sys.argv[1]):
rc(sys.argv[1])
else:
print("ERROR: File doesn't exist.\nExiting...")
else:
print("Usage:",os.path.basename(__file__),"[rar file]")
print("Example:",os.path.basename(__file__),"foobar.rar")
|
jonDel/pyrarcr
|
old/pyrarcr-0.2.py
|
Python
|
mit
| 2,210
|
import os
import time
from collections import deque
import pickle
from baselines.ddpg.ddpg import DDPG
from baselines.ddpg.util import mpi_mean, mpi_std, mpi_max, mpi_sum
import baselines.common.tf_util as U
from baselines import logger
import numpy as np
import tensorflow as tf
from mpi4py import MPI
def train(env, nb_epochs, nb_epoch_cycles, render_eval, reward_scale, render, param_noise, actor, critic,
normalize_returns, normalize_observations, critic_l2_reg, actor_lr, critic_lr, action_noise, logdir,
popart, gamma, clip_norm, nb_train_steps, nb_rollout_steps, nb_eval_steps, batch_size, memory,
tau=0.01, eval_env=None, param_noise_adaption_interval=50):
rank = MPI.COMM_WORLD.Get_rank()
assert (np.abs(env.action_space.low) == env.action_space.high).all() # we assume symmetric actions.
max_action = env.action_space.high
logger.info('scaling actions by {} before executing in env'.format(max_action))
agent = DDPG(actor, critic, memory, env.observation_space.shape, env.action_space.shape,
gamma=gamma, tau=tau, normalize_returns=normalize_returns, normalize_observations=normalize_observations,
batch_size=batch_size, action_noise=action_noise, param_noise=param_noise, critic_l2_reg=critic_l2_reg,
actor_lr=actor_lr, critic_lr=critic_lr, enable_popart=popart, clip_norm=clip_norm,
reward_scale=reward_scale)
logger.info('Using agent with the following configuration:')
logger.info(str(agent.__dict__.items()))
# Set up logging stuff only for a single worker.
if rank == 0:
saver = tf.train.Saver()
else:
saver = None
step = 0
episode = 0
eval_episode_rewards_history = deque(maxlen=100)
episode_rewards_history = deque(maxlen=100)
with U.single_threaded_session() as sess:
# Prepare everything.
agent.initialize(sess)
sess.graph.finalize()
agent.reset()
obs = env.reset()
if eval_env is not None:
eval_obs = eval_env.reset()
done = False
episode_reward = 0.
episode_step = 0
episodes = 0
t = 0
epoch = 0
start_time = time.time()
epoch_episode_rewards = []
epoch_episode_steps = []
epoch_episode_eval_rewards = []
epoch_episode_eval_steps = []
epoch_start_time = time.time()
epoch_actions = []
epoch_qs = []
epoch_episodes = 0
for epoch in range(nb_epochs):
for cycle in range(nb_epoch_cycles):
# Perform rollouts.
for t_rollout in range(nb_rollout_steps):
# Predict next action.
action, q = agent.pi(obs, apply_noise=True, compute_Q=True)
assert action.shape == env.action_space.shape
# Execute next action.
if rank == 0 and render:
env.render()
assert max_action.shape == action.shape
new_obs, r, done, info = env.step(max_action * action) # scale for execution in env (as far as DDPG is concerned, every action is in [-1, 1])
t += 1
if rank == 0 and render:
env.render()
episode_reward += r
episode_step += 1
# Book-keeping.
epoch_actions.append(action)
epoch_qs.append(q)
agent.store_transition(obs, action, r, new_obs, done)
obs = new_obs
if done:
# Episode done.
epoch_episode_rewards.append(episode_reward)
episode_rewards_history.append(episode_reward)
epoch_episode_steps.append(episode_step)
episode_reward = 0.
episode_step = 0
epoch_episodes += 1
episodes += 1
agent.reset()
obs = env.reset()
# Train.
epoch_actor_losses = []
epoch_critic_losses = []
epoch_adaptive_distances = []
for t_train in range(nb_train_steps):
# Adapt param noise, if necessary.
if memory.nb_entries >= batch_size and t % param_noise_adaption_interval == 0:
distance = agent.adapt_param_noise()
epoch_adaptive_distances.append(distance)
cl, al = agent.train()
epoch_critic_losses.append(cl)
epoch_actor_losses.append(al)
agent.update_target_net()
# Evaluate.
eval_episode_rewards = []
eval_qs = []
if eval_env is not None:
eval_episode_reward = 0.
for t_rollout in range(nb_eval_steps):
eval_action, eval_q = agent.pi(eval_obs, apply_noise=False, compute_Q=True)
eval_obs, eval_r, eval_done, eval_info = eval_env.step(max_action * eval_action) # scale for execution in env (as far as DDPG is concerned, every action is in [-1, 1])
if render_eval:
eval_env.render()
eval_episode_reward += eval_r
eval_qs.append(eval_q)
if eval_done:
eval_obs = eval_env.reset()
eval_episode_rewards.append(eval_episode_reward)
eval_episode_rewards_history.append(eval_episode_reward)
eval_episode_reward = 0.
# Log stats.
epoch_train_duration = time.time() - epoch_start_time
duration = time.time() - start_time
stats = agent.get_stats()
combined_stats = {}
for key in sorted(stats.keys()):
combined_stats[key] = mpi_mean(stats[key])
# Rollout statistics.
combined_stats['rollout/return'] = mpi_mean(epoch_episode_rewards)
combined_stats['rollout/return_history'] = mpi_mean(np.mean(episode_rewards_history))
combined_stats['rollout/episode_steps'] = mpi_mean(epoch_episode_steps)
combined_stats['rollout/episodes'] = mpi_sum(epoch_episodes)
combined_stats['rollout/actions_mean'] = mpi_mean(epoch_actions)
combined_stats['rollout/actions_std'] = mpi_std(epoch_actions)
combined_stats['rollout/Q_mean'] = mpi_mean(epoch_qs)
# Train statistics.
combined_stats['train/loss_actor'] = mpi_mean(epoch_actor_losses)
combined_stats['train/loss_critic'] = mpi_mean(epoch_critic_losses)
combined_stats['train/param_noise_distance'] = mpi_mean(epoch_adaptive_distances)
# Evaluation statistics.
if eval_env is not None:
combined_stats['eval/return'] = mpi_mean(eval_episode_rewards)
combined_stats['eval/return_history'] = mpi_mean(np.mean(eval_episode_rewards_history))
combined_stats['eval/Q'] = mpi_mean(eval_qs)
combined_stats['eval/episodes'] = mpi_mean(len(eval_episode_rewards))
# Total statistics.
combined_stats['total/duration'] = mpi_mean(duration)
combined_stats['total/steps_per_second'] = mpi_mean(float(t) / float(duration))
combined_stats['total/episodes'] = mpi_mean(episodes)
combined_stats['total/epochs'] = epoch + 1
combined_stats['total/steps'] = t
for key in sorted(combined_stats.keys()):
logger.record_tabular(key, combined_stats[key])
logger.dump_tabular()
logger.info('')
if rank == 0 and logdir:
if hasattr(env, 'get_state'):
with open(os.path.join(logdir, 'env_state.pkl'), 'wb') as f:
pickle.dump(env.get_state(), f)
if eval_env and hasattr(eval_env, 'get_state'):
with open(os.path.join(logdir, 'eval_env_state.pkl'), 'wb') as f:
pickle.dump(eval_env.get_state(), f)
|
learnercys/baselines
|
baselines/ddpg/training.py
|
Python
|
mit
| 8,411
|
import os
# os.chdir('./ClericPy.github.io')
os.system('python main.py')
os.system(r'git add * -f')
os.system(r'git commit -m "常规更新"')
os.system(r'git push origin master')
os.system('pause')
|
ClericPy/ClericPy.github.io
|
upload.py
|
Python
|
gpl-2.0
| 199
|
from django.contrib import admin
from manager.song.models import Song
from .models import Playlist
class SongInline(admin.StackedInline):
model = Song
extra = 5
class PlaylistAdmin(admin.ModelAdmin):
inlines = [SongInline]
list_display = ['name', 'user', 'created_at', 'updated_at']
admin.site.register(Playlist, PlaylistAdmin)
|
chaocodes/playlist-manager-django
|
manager/playlist/admin.py
|
Python
|
mit
| 348
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import json
import re
from dragon.common import exception
PARAMETER_KEYS = (
TYPE, DEFAULT, NO_ECHO, ALLOWED_VALUES, ALLOWED_PATTERN,
MAX_LENGTH, MIN_LENGTH, MAX_VALUE, MIN_VALUE,
DESCRIPTION, CONSTRAINT_DESCRIPTION
) = (
'Type', 'Default', 'NoEcho', 'AllowedValues', 'AllowedPattern',
'MaxLength', 'MinLength', 'MaxValue', 'MinValue',
'Description', 'ConstraintDescription'
)
PARAMETER_TYPES = (
STRING, NUMBER, COMMA_DELIMITED_LIST, JSON
) = (
'String', 'Number', 'CommaDelimitedList', 'Json'
)
PSEUDO_PARAMETERS = (
PARAM_STACK_ID, PARAM_STACK_NAME, PARAM_REGION
) = (
'AWS::StackId', 'AWS::StackName', 'AWS::Region'
)
class ParamSchema(dict):
'''Parameter schema.'''
def __init__(self, schema):
super(ParamSchema, self).__init__(schema)
def do_check(self, name, value, keys):
for k in keys:
check = self.check(k)
const = self.get(k)
if check is None or const is None:
continue
check(name, value, const)
def constraints(self):
ptype = self[TYPE]
keys = {
STRING: [ALLOWED_VALUES, ALLOWED_PATTERN, MAX_LENGTH, MIN_LENGTH],
NUMBER: [ALLOWED_VALUES, MAX_VALUE, MIN_VALUE],
JSON: [MAX_LENGTH, MIN_LENGTH]
}.get(ptype)
list_keys = {
COMMA_DELIMITED_LIST: [ALLOWED_VALUES],
JSON: [ALLOWED_VALUES]
}.get(ptype)
return (keys, list_keys)
def validate(self, name, value):
(keys, list_keys) = self.constraints()
if keys:
self.do_check(name, value, keys)
if list_keys:
values = value
for value in values:
self.do_check(name, value, list_keys)
def raise_error(self, name, message, desc=True):
if desc:
message = self.get(CONSTRAINT_DESCRIPTION) or message
raise ValueError('%s %s' % (name, message))
def check_allowed_values(self, name, val, const, desc=None):
vals = list(const)
if val not in vals:
err = '"%s" not in %s "%s"' % (val, ALLOWED_VALUES, vals)
self.raise_error(name, desc or err)
def check_allowed_pattern(self, name, val, p, desc=None):
m = re.match(p, val)
if m is None or m.end() != len(val):
err = '"%s" does not match %s "%s"' % (val, ALLOWED_PATTERN, p)
self.raise_error(name, desc or err)
def check_max_length(self, name, val, const, desc=None):
max_len = int(const)
val_len = len(val)
if val_len > max_len:
err = 'length (%d) overflows %s (%d)' % (val_len,
MAX_LENGTH, max_len)
self.raise_error(name, desc or err)
def check_min_length(self, name, val, const, desc=None):
min_len = int(const)
val_len = len(val)
if val_len < min_len:
err = 'length (%d) underflows %s (%d)' % (val_len,
MIN_LENGTH, min_len)
self.raise_error(name, desc or err)
def check_max_value(self, name, val, const, desc=None):
max_val = float(const)
val = float(val)
if val > max_val:
err = '%d overflows %s %d' % (val, MAX_VALUE, max_val)
self.raise_error(name, desc or err)
def check_min_value(self, name, val, const, desc=None):
min_val = float(const)
val = float(val)
if val < min_val:
err = '%d underflows %s %d' % (val, MIN_VALUE, min_val)
self.raise_error(name, desc or err)
def check(self, const_key):
return {ALLOWED_VALUES: self.check_allowed_values,
ALLOWED_PATTERN: self.check_allowed_pattern,
MAX_LENGTH: self.check_max_length,
MIN_LENGTH: self.check_min_length,
MAX_VALUE: self.check_max_value,
MIN_VALUE: self.check_min_value}.get(const_key)
class Parameter(object):
'''A template parameter.'''
def __new__(cls, name, schema, value=None, validate_value=True):
'''Create a new Parameter of the appropriate type.'''
if cls is not Parameter:
return super(Parameter, cls).__new__(cls)
param_type = schema[TYPE]
if param_type == STRING:
ParamClass = StringParam
elif param_type == NUMBER:
ParamClass = NumberParam
elif param_type == COMMA_DELIMITED_LIST:
ParamClass = CommaDelimitedListParam
elif param_type == JSON:
ParamClass = JsonParam
else:
raise ValueError('Invalid Parameter type "%s"' % param_type)
return ParamClass(name, schema, value, validate_value)
def __init__(self, name, schema, value=None, validate_value=True):
'''
Initialise the Parameter with a name, schema and optional user-supplied
value.
'''
self.name = name
self.schema = schema
self.user_value = value
if validate_value:
if self.has_default():
self.validate(self.default())
if self.user_value is not None:
self.validate(self.user_value)
elif not self.has_default():
raise exception.UserParameterMissing(key=self.name)
def value(self):
'''Get the parameter value, optionally sanitising it for output.'''
if self.user_value is not None:
return self.user_value
if self.has_default():
return self.default()
raise KeyError('Missing parameter %s' % self.name)
def no_echo(self):
'''
Return whether the parameter should be sanitised in any output to
the user.
'''
return str(self.schema.get(NO_ECHO, 'false')).lower() == 'true'
def description(self):
'''Return the description of the parameter.'''
return self.schema.get(DESCRIPTION, '')
def has_default(self):
'''Return whether the parameter has a default value.'''
return DEFAULT in self.schema
def default(self):
'''Return the default value of the parameter.'''
return self.schema.get(DEFAULT)
def __str__(self):
'''Return a string representation of the parameter'''
value = self.value()
if self.no_echo():
return '******'
else:
return str(value)
class NumberParam(Parameter):
'''A template parameter of type "Number".'''
def __int__(self):
'''Return an integer representation of the parameter'''
return int(super(NumberParam, self).value())
def __float__(self):
'''Return a float representation of the parameter'''
return float(super(NumberParam, self).value())
def validate(self, val):
self.schema.validate(self.name, val)
def value(self):
try:
return int(self)
except ValueError:
return float(self)
class StringParam(Parameter):
'''A template parameter of type "String".'''
def validate(self, val):
self.schema.validate(self.name, val)
class CommaDelimitedListParam(Parameter, collections.Sequence):
'''A template parameter of type "CommaDelimitedList".'''
def __init__(self, name, schema, value=None, validate_value=True):
super(CommaDelimitedListParam, self).__init__(name, schema, value,
validate_value)
self.parsed = self.parse(self.user_value or self.default())
def parse(self, value):
try:
if value:
return value.split(',')
except (KeyError, AttributeError) as err:
message = 'Value must be a comma-delimited list string: %s'
raise ValueError(message % str(err))
return value
def value(self):
return self.parsed
def __len__(self):
'''Return the length of the list.'''
return len(self.parsed)
def __getitem__(self, index):
'''Return an item from the list.'''
return self.parsed[index]
def validate(self, val):
parsed = self.parse(val)
self.schema.validate(self.name, parsed)
class JsonParam(Parameter, collections.Mapping):
"""A template parameter who's value is valid map."""
def __init__(self, name, schema, value=None, validate_value=True):
super(JsonParam, self).__init__(name, schema, value,
validate_value)
self.parsed = self.parse(self.user_value or self.default())
def parse(self, value):
try:
val = value
if isinstance(val, collections.Mapping):
val = json.dumps(val)
if val:
return json.loads(val)
except (ValueError, TypeError) as err:
message = 'Value must be valid JSON: %s' % str(err)
raise ValueError(message)
return value
def value(self):
val = super(JsonParam, self).value()
if isinstance(val, collections.Mapping):
try:
val = json.dumps(val)
self.user_value = val
except (ValueError, TypeError) as err:
message = 'Value must be valid JSON'
raise ValueError("%s: %s" % (message, str(err)))
return val
def __getitem__(self, key):
return self.parsed[key]
def __iter__(self):
return iter(self.parsed)
def __len__(self):
return len(self.parsed)
def validate(self, val):
val = self.parse(val)
self.schema.validate(self.name, val)
class Parameters(collections.Mapping):
'''
The parameters of a stack, with type checking, defaults &c. specified by
the stack's template.
'''
def __init__(self, stack_name, tmpl, user_params={}, stack_id=None,
validate_value=True):
'''
Create the parameter container for a stack from the stack name and
template, optionally setting the user-supplied parameter values.
'''
def parameters():
yield Parameter(PARAM_STACK_ID,
ParamSchema({TYPE: STRING,
DESCRIPTION: 'Stack ID',
DEFAULT: str(stack_id)}))
if stack_name is not None:
yield Parameter(PARAM_STACK_NAME,
ParamSchema({TYPE: STRING,
DESCRIPTION: 'Stack Name',
DEFAULT: stack_name}))
yield Parameter(PARAM_REGION,
ParamSchema({TYPE: STRING,
DEFAULT: 'ap-southeast-1',
ALLOWED_VALUES:
['us-east-1',
'us-west-1',
'us-west-2',
'sa-east-1',
'eu-west-1',
'ap-southeast-1',
'ap-northeast-1']}))
schemata = self.tmpl.param_schemata().iteritems()
for name, schema in schemata:
value = user_params.get(name)
yield Parameter(name, schema, value, validate_value)
self.tmpl = tmpl
self._validate_tmpl_parameters()
self._validate(user_params)
self.params = dict((p.name, p) for p in parameters())
def __contains__(self, key):
'''Return whether the specified parameter exists.'''
return key in self.params
def __iter__(self):
'''Return an iterator over the parameter names.'''
return iter(self.params)
def __len__(self):
'''Return the number of parameters defined.'''
return len(self.params)
def __getitem__(self, key):
'''Get a parameter value.'''
return self.params[key].value()
def map(self, func, filter_func=lambda p: True):
'''
Map the supplied filter function onto each Parameter (with an
optional filter function) and return the resulting dictionary.
'''
return dict((n, func(p))
for n, p in self.params.iteritems() if filter_func(p))
def set_stack_id(self, stack_id):
'''
Set the AWS::StackId pseudo parameter value
'''
self.params[PARAM_STACK_ID].schema[DEFAULT] = stack_id
def _validate(self, user_params):
schemata = self.tmpl.param_schemata()
for param in user_params:
if param not in schemata:
raise exception.UnknownUserParameter(key=param)
def _validate_tmpl_parameters(self):
param = None
for key in self.tmpl.t.keys():
if key == 'Parameters' or key == 'parameters':
param = key
break
if param is not None:
template_params = self.tmpl.t[key]
for name, attrs in template_params.iteritems():
if not isinstance(attrs, dict):
raise exception.InvalidTemplateParameter(key=name)
|
os-cloud-storage/openstack-workload-disaster-recovery
|
dragon/engine/parameters.py
|
Python
|
apache-2.0
| 14,040
|
# Copyright 2012, Tim Bielawa <tbielawa@redhat.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import datetime
import signal
import termios
import time
import tty
from os import isatty
from ansible.errors import AnsibleError
from ansible.plugins.action import ActionBase
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
class AnsibleTimeoutExceeded(Exception):
pass
def timeout_handler(signum, frame):
raise AnsibleTimeoutExceeded
class ActionModule(ActionBase):
''' pauses execution for a length or time, or until input is received '''
PAUSE_TYPES = ['seconds', 'minutes', 'prompt', '']
BYPASS_HOST_LOOP = True
def run(self, tmp=None, task_vars=None):
''' run the pause action module '''
if task_vars is None:
task_vars = dict()
result = super(ActionModule, self).run(tmp, task_vars)
duration_unit = 'minutes'
prompt = None
seconds = None
result.update(dict(
changed = False,
rc = 0,
stderr = '',
stdout = '',
start = None,
stop = None,
delta = None,
))
# Is 'args' empty, then this is the default prompted pause
if self._task.args is None or len(self._task.args.keys()) == 0:
prompt = "[%s]\nPress enter to continue:" % self._task.get_name().strip()
# Are 'minutes' or 'seconds' keys that exist in 'args'?
elif 'minutes' in self._task.args or 'seconds' in self._task.args:
try:
if 'minutes' in self._task.args:
# The time() command operates in seconds so we need to
# recalculate for minutes=X values.
seconds = int(self._task.args['minutes']) * 60
else:
seconds = int(self._task.args['seconds'])
duration_unit = 'seconds'
except ValueError as e:
result['failed'] = True
result['msg'] = "non-integer value given for prompt duration:\n%s" % str(e)
return result
# Is 'prompt' a key in 'args'?
elif 'prompt' in self._task.args:
prompt = "[%s]\n%s:" % (self._task.get_name().strip(), self._task.args['prompt'])
else:
# I have no idea what you're trying to do. But it's so wrong.
result['failed'] = True
result['msg'] = "invalid pause type given. must be one of: %s" % ", ".join(self.PAUSE_TYPES)
return result
########################################################################
# Begin the hard work!
start = time.time()
result['start'] = str(datetime.datetime.now())
result['user_input'] = ''
fd = None
old_settings = None
try:
if seconds is not None:
# setup the alarm handler
signal.signal(signal.SIGALRM, timeout_handler)
signal.alarm(seconds)
# show the prompt
display.display("Pausing for %d seconds" % seconds)
display.display("(ctrl+C then 'C' = continue early, ctrl+C then 'A' = abort)\r"),
else:
display.display(prompt)
# save the attributes on the existing (duped) stdin so
# that we can restore them later after we set raw mode
fd = None
try:
fd = self._connection._new_stdin.fileno()
except ValueError:
# someone is using a closed file descriptor as stdin
pass
if fd is not None:
if isatty(fd):
old_settings = termios.tcgetattr(fd)
tty.setraw(fd)
# flush the buffer to make sure no previous key presses
# are read in below
termios.tcflush(self._connection._new_stdin, termios.TCIFLUSH)
while True:
try:
if fd is not None:
key_pressed = self._connection._new_stdin.read(1)
if key_pressed == '\x03':
raise KeyboardInterrupt
if not seconds:
if fd is None or not isatty(fd):
display.warning("Not waiting from prompt as stdin is not interactive")
break
# read key presses and act accordingly
if key_pressed == '\r':
break
else:
result['user_input'] += key_pressed
except KeyboardInterrupt:
if seconds is not None:
signal.alarm(0)
display.display("Press 'C' to continue the play or 'A' to abort \r"),
if self._c_or_a():
break
else:
raise AnsibleError('user requested abort!')
except AnsibleTimeoutExceeded:
# this is the exception we expect when the alarm signal
# fires, so we simply ignore it to move into the cleanup
pass
finally:
# cleanup and save some information
# restore the old settings for the duped stdin fd
if not(None in (fd, old_settings)) and isatty(fd):
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
duration = time.time() - start
result['stop'] = str(datetime.datetime.now())
result['delta'] = int(duration)
if duration_unit == 'minutes':
duration = round(duration / 60.0, 2)
else:
duration = round(duration, 2)
result['stdout'] = "Paused for %s %s" % (duration, duration_unit)
return result
def _c_or_a(self):
while True:
key_pressed = self._connection._new_stdin.read(1)
if key_pressed.lower() == 'a':
return False
elif key_pressed.lower() == 'c':
return True
|
benjixx/ansible
|
lib/ansible/plugins/action/pause.py
|
Python
|
gpl-3.0
| 6,991
|
# $Id: vms_connect.py,v 1.3 2007/06/10 14:11:58 mggrant Exp $
#
# Xlib.support.vms_connect -- VMS-type display connection functions
#
# Copyright (C) 2000 Peter Liljenberg <petli@ctrl-c.liu.se>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import re
import socket
from Xlib import error
display_re = re.compile(r'^([-a-zA-Z0-9._]*):([0-9]+)(\.([0-9]+))?$')
def get_display(display):
# Use dummy display if none is set. We really should
# check DECW$DISPLAY instead, but that has to wait
if display is None:
return ':0.0', 'localhost', 0, 0
m = display_re.match(display)
if not m:
raise error.DisplayNameError(display)
name = display
# Always return a host, since we don't have AF_UNIX sockets
host = m.group(1)
if not host:
host = 'localhost'
dno = int(m.group(2))
screen = m.group(4)
if screen:
screen = int(screen)
else:
screen = 0
return name, host, dno, screen
def get_socket(dname, host, dno):
try:
# Always use TCP/IP sockets. Later it would be nice to
# be able to use DECNET och LOCAL connections.
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((host, 6000 + dno))
except socket.error, val:
raise error.DisplayConnectionError(dname, str(val))
return s
def get_auth(sock, dname, host, dno):
# VMS doesn't have xauth
return '', ''
|
nvazquez/Turtlebots
|
plugins/xevents/Xlib/support/vms_connect.py
|
Python
|
mit
| 2,122
|
# -*- coding: utf-8 -*-
# (c) 2015 Andreas Motl, Elmyra UG <andreas.motl@elmyra.de>
from kotori.version import __VERSION__
from pyramid.config import Configurator
def main(global_config, **settings):
"""This function returns a Pyramid WSGI application."""
settings['SOFTWARE_VERSION'] = __VERSION__
config = Configurator(settings=settings)
# Addons
config.include('pyramid_jinja2')
# http://docs.pylonsproject.org/projects/pyramid-jinja2/en/latest/#adding-or-overriding-a-renderer
config.add_jinja2_renderer('.html')
config.include('cornice')
# Views and routes
config.add_static_view('static/app', 'static/app', cache_max_age=0)
config.add_static_view('static/lib', 'static/lib', cache_max_age=60 * 24)
config.add_route('index', '/')
config.scan()
return config.make_wsgi_app()
|
daq-tools/kotori
|
kotori/frontend/app.py
|
Python
|
agpl-3.0
| 843
|
import surrealism as s
import unittest
import random
import sys
class SurrealismUnittests(unittest.TestCase):
"""getsentence() unittests"""
def setUp(self):
self.variable_types = ''
if sys.version_info[0] < 3:
# noinspection PyUnresolvedReferences
self.variable_types = (unicode, str)
else:
self.variable_types = str
def tearDown(self):
self.variable_types = ''
pass
def test_show_sentences_returns_a_dict(self):
sentence_dict = s.show_sentences()
self.assertIsInstance(sentence_dict, dict)
# def test_show_sentences_returns_a_list_of_tuples(self):
# sentence_list = s.show_sentences()
# for tup in sentence_list:
# self.assertIsInstance(tup, tuple)
def test_sentence_test_does_something(self):
sentence_list = s.sentence_test()
self.assertIsInstance(sentence_list, list)
def test_sentence_test_returns_a_list_of_tuples(self):
sentence_list = s.sentence_test()
for tup in sentence_list:
self.assertIsInstance(tup, tuple)
def test_get_sentence_returns_a_unicode_string(self):
sentence = s.get_sentence()
self.assertIsInstance(sentence, self.variable_types)
def test_get_sentence_returns_a_unicode_string_with_integer_upper_limit(self):
limits = s.__get_table_limits()
upper_limit = limits['max_sen']
sentence = s.get_sentence(upper_limit)
self.assertIsInstance(sentence, self.variable_types)
def test_get_sentence_returns_a_unicode_string_with_integer_lower_limit(self):
lower_limit = 1
sentence = s.get_sentence(lower_limit)
self.assertIsInstance(sentence, self.variable_types)
def test_get_sentence_with_a_random_integer(self):
limits = s.__get_table_limits()
upper_sentence_limit = limits['max_sen']
sen_id = random.randint(1, upper_sentence_limit)
sentence = s.get_sentence(sen_id)
self.assertIsInstance(sentence, self.variable_types)
def test_get_sentence_returns_a_unicode_string_over_integer_upper_limit(self):
limits = s.__get_table_limits()
over_limit = limits['max_sen'] + 1
self.assertRaises(TypeError, (s.getsentence(over_limit)))
def test_get_sentence_returns_an_error_when_we_submit_a_string(self):
_string = 'hello my name is bob'
self.assertRaises(TypeError, (s.getsentence(_string)))
def test_get_sentence_returns_an_error_when_we_submit_a_number_as_a_string(self):
_string = '64'
self.assertRaises(TypeError, (s.getsentence(_string)))
def test_get_sentence_returns_an_error_when_we_submit_a_negative_number(self):
_number = -1
self.assertRaises(TypeError, (s.getsentence(_number)))
def test_get_sentence_handles_it_when_we_submit_a_float(self):
_number = 98.9
sentence = s.get_sentence(_number)
print(sentence)
self.assertIsInstance(sentence, self.variable_types)
def test_that_no_hashed_keywords_remain_in_sentence(self):
keywords = ['#VERB', '#NOUN', '#ADJECTIVE', '#NAME',
'#ADJECTIVE_MAYBE', '#AN', '#RANDOM', '#CAPITALISE', '#CAPALL']
sentence = s.get_sentence()
for keyword in keywords:
self.assertNotIn(keyword, sentence)
def test_that_repeating_elements_actually_are_replaced(self):
_number = 47
sentence = s.get_sentence(_number)
self.assertIsInstance(sentence, self.variable_types)
keywords = ['#REPEAT', '#DEFINE_REPEAT']
sentence = s.get_sentence()
for keyword in keywords:
self.assertNotIn(keyword, sentence)
# noinspection PyStatementEffect
"""getfault() unit tests"""
def test_show_faults_returns_a_list(self):
fault_list = s.show_faults()
self.assertIsInstance(fault_list, list)
def test_show_faults_returns_a_list_of_tuples(self):
fault_list = s.show_faults()
for tup in fault_list:
self.assertIsInstance(tup, tuple)
def test_fault_test_returns_a_list(self):
fault_list = s.fault_test()
self.assertIsInstance(fault_list, list)
def test_fault_test_returns_a_list_of_tuples(self):
fault_list = s.fault_test()
for tup in fault_list:
self.assertIsInstance(tup, tuple)
def test_get_fault_returns_a_unicode_string(self):
fault = s.get_fault()
self.assertIsInstance(fault, self.variable_types)
def test_get_fault_returns_a_unicode_string_with_integer_upper_limit(self):
limits = s.__get_table_limits()
upper_limit = limits['max_fau']
fault = s.get_fault(upper_limit)
self.assertIsInstance(fault, self.variable_types)
def test_get_fault_returns_a_unicode_string_with_integer_lower_limit(self):
lower_limit = 1
fault = s.get_fault(lower_limit)
self.assertIsInstance(fault, self.variable_types)
def test_get_fault_with_a_random_integer(self):
limits = s.__get_table_limits()
upper_fault_limit = limits['max_fau']
fau_id = random.randint(1, upper_fault_limit)
fault = s.get_fault(fau_id)
self.assertIsInstance(fault, self.variable_types)
def test_get_fault_returns_a_unicode_string_over_integer_upper_limit(self):
limits = s.__get_table_limits()
over_limit = limits['max_fau'] + 1
self.assertRaises(TypeError, (s.getsentence(over_limit)))
def test_get_fault_returns_an_error_when_we_submit_a_string(self):
_string = 'hello my name is bob'
self.assertRaises(TypeError, (s.getfault(_string)))
def test_get_fault_returns_an_error_when_we_submit_a_number_as_a_string(self):
_string = '64'
self.assertRaises(TypeError, (s.getfault(_string)))
def test_get_fault_returns_an_error_when_we_submit_a_negative_number(self):
_number = -1
self.assertRaises(TypeError, (s.getfault(_number)))
def test_get_fault_handles_it_when_we_submit_a_float(self):
_number = 98.9
fault = s.get_fault(_number)
print(fault)
self.assertIsInstance(fault, self.variable_types)
def test_that_no_hashed_keywords_remain_in_fault(self):
keywords = ['#VERB', '#NOUN', '#ADJECTIVE', '#NAME',
'#ADJECTIVE_MAYBE', '#AN', '#RANDOM', '#CAPITALISE', '#CAPALL']
fault = s.getfault()
for keyword in keywords:
self.assertNotIn(keyword, fault)
|
Morrolan/surrealism
|
test_surrealism.py
|
Python
|
gpl-3.0
| 6,547
|
# -*- coding: utf-8 -*-
# Copyright (C) 2014-present Taiga Agile LLC
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import pytest
import json
from django.urls import reverse
from .. import factories as f
pytestmark = pytest.mark.django_db
def test_watch_task(client):
user = f.UserFactory.create()
task = f.create_task(owner=user, milestone=None)
f.MembershipFactory.create(project=task.project, user=user, is_admin=True)
url = reverse("tasks-watch", args=(task.id,))
client.login(user)
response = client.post(url)
assert response.status_code == 200
def test_unwatch_task(client):
user = f.UserFactory.create()
task = f.create_task(owner=user, milestone=None)
f.MembershipFactory.create(project=task.project, user=user, is_admin=True)
url = reverse("tasks-watch", args=(task.id,))
client.login(user)
response = client.post(url)
assert response.status_code == 200
def test_list_task_watchers(client):
user = f.UserFactory.create()
task = f.TaskFactory(owner=user)
f.MembershipFactory.create(project=task.project, user=user, is_admin=True)
f.WatchedFactory.create(content_object=task, user=user)
url = reverse("task-watchers-list", args=(task.id,))
client.login(user)
response = client.get(url)
assert response.status_code == 200
assert response.data[0]['id'] == user.id
def test_get_task_watcher(client):
user = f.UserFactory.create()
task = f.TaskFactory(owner=user)
f.MembershipFactory.create(project=task.project, user=user, is_admin=True)
watch = f.WatchedFactory.create(content_object=task, user=user)
url = reverse("task-watchers-detail", args=(task.id, watch.user.id))
client.login(user)
response = client.get(url)
assert response.status_code == 200
assert response.data['id'] == watch.user.id
def test_get_task_watchers(client):
user = f.UserFactory.create()
task = f.TaskFactory(owner=user)
f.MembershipFactory.create(project=task.project, user=user, is_admin=True)
url = reverse("tasks-detail", args=(task.id,))
f.WatchedFactory.create(content_object=task, user=user)
client.login(user)
response = client.get(url)
assert response.status_code == 200
assert response.data['watchers'] == [user.id]
assert response.data['total_watchers'] == 1
def test_get_task_is_watcher(client):
user = f.UserFactory.create()
task = f.create_task(owner=user, milestone=None)
f.MembershipFactory.create(project=task.project, user=user, is_admin=True)
url_detail = reverse("tasks-detail", args=(task.id,))
url_watch = reverse("tasks-watch", args=(task.id,))
url_unwatch = reverse("tasks-unwatch", args=(task.id,))
client.login(user)
response = client.get(url_detail)
assert response.status_code == 200
assert response.data['watchers'] == []
assert response.data['is_watcher'] == False
response = client.post(url_watch)
assert response.status_code == 200
response = client.get(url_detail)
assert response.status_code == 200
assert response.data['watchers'] == [user.id]
assert response.data['is_watcher'] == True
response = client.post(url_unwatch)
assert response.status_code == 200
response = client.get(url_detail)
assert response.status_code == 200
assert response.data['watchers'] == []
assert response.data['is_watcher'] == False
def test_remove_task_watcher(client):
user = f.UserFactory.create()
project = f.ProjectFactory.create()
task = f.TaskFactory(project=project,
user_story=None,
status__project=project,
milestone__project=project)
task.add_watcher(user)
role = f.RoleFactory.create(project=project, permissions=['modify_task', 'view_tasks'])
f.MembershipFactory.create(project=project, user=user, role=role)
url = reverse("tasks-detail", args=(task.id,))
client.login(user)
data = {"version": task.version, "watchers": []}
response = client.json.patch(url, json.dumps(data))
assert response.status_code == 200
assert response.data['watchers'] == []
assert response.data['is_watcher'] == False
|
taigaio/taiga-back
|
tests/integration/test_watch_tasks.py
|
Python
|
agpl-3.0
| 4,832
|
# Copyright (C) 2017 Leandro Lisboa Penz <lpenz@lpenz.org>
# This file is subject to the terms and conditions defined in
# file 'LICENSE', which is part of this source code package.
'''Collection of reporter classes'''
from collections import OrderedDict
import json
class Reporter(object):
def __init__(self):
self.num_errors = 0
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
pass
def report(self, error):
self.num_errors += 1
class ReporterGcc(Reporter):
'''Default reporter object with human-readable line-oriented
format (gcc)'''
def __init__(self, fd):
super(ReporterGcc, self).__init__()
self.fd = fd
def report(self, error):
super(ReporterGcc, self).report(error)
self.fd.write(error.gcc_style())
self.fd.write('\n')
class ReporterJsonList(Reporter):
'''Reporter that writes a json list of error dictionaries'''
def __init__(self, fd):
super(ReporterJsonList, self).__init__()
self.fd = fd
def __enter__(self):
self.fd.write('[\n')
def __exit__(self, type, value, traceback):
self.fd.write(']\n')
def report(self, error):
super(ReporterJsonList, self).report(error)
self.fd.write(' ' * 2)
json.dump(OrderedDict(error), self.fd)
self.fd.write('\n')
class ReporterList(Reporter):
'''Reporter that stores the errors in an in-memory list'''
def __init__(self):
super(ReporterList, self).__init__()
self.list = []
def report(self, error):
super(ReporterList, self).report(error)
self.list.append(OrderedDict(error))
return self.list
REPORTERS = {
'gcc': ReporterGcc,
'json-list': ReporterJsonList,
}
|
lpenz/omnilint
|
container/omnilint/reporters.py
|
Python
|
mit
| 1,800
|
# --- Day 16: Aunt Sue ---
#
# Your Aunt Sue has given you a wonderful gift, and you'd like to send her a thank you card. However, there's a small
# problem: she signed it "From, Aunt Sue".
#
# You have 500 Aunts named "Sue".
#
# So, to avoid sending the card to the wrong person, you need to figure out which Aunt Sue (which you conveniently
# number 1 to 500, for sanity) gave you the gift. You open the present and, as luck would have it, good ol' Aunt Sue
# got you a My First Crime Scene Analysis Machine! Just what you wanted. Or needed, as the case may be.
#
# The My First Crime Scene Analysis Machine (MFCSAM for short) can detect a few specific compounds in a given sample,
# as well as how many distinct kinds of those compounds there are. According to the instructions, these are what the
# MFCSAM can detect:
#
# children, by human DNA age analysis.
# cats. It doesn't differentiate individual breeds.
# Several seemingly random breeds of dog: samoyeds, pomeranians, akitas, and vizslas.
# goldfish. No other kinds of fish.
# trees, all in one group.
# cars, presumably by exhaust or gasoline or something.
# perfumes, which is handy, since many of your Aunts Sue wear a few kinds.
#
# In fact, many of your Aunts Sue have many of these. You put the wrapping from the gift into the MFCSAM. It beeps
# inquisitively at you a few times and then prints out a message on ticker tape:
#
# children: 3
# cats: 7
# samoyeds: 2
# pomeranians: 3
# akitas: 0
# vizslas: 0
# goldfish: 5
# trees: 3
# cars: 2
# perfumes: 1
#
# You make a list of the things you can remember about each Aunt Sue. Things missing from your list aren't zero - you
# simply don't remember the value.
#
# What is the number of the Sue that got you the gift?
#
# --Part 2--
# As you're about to send the thank you note, something in the MFCSAM's instructions catches your eye. Apparently, it
# has an outdated retroencabulator, and so the output from the machine isn't exact values - some of them indicate
# ranges.
#
# In particular, the cats and treesreadings indicates that there aregreater than that many (due to the unpredictable
# nuclear decay of cat dander and tree pollen), while the pomeranians and goldfish readings indicate that there are
# fewer than that many (due to the modial interaction of magnetoreluctance).
def part1_matches(items):
shared_items = set(items.items()) & set(ticker_tape.items())
if len(shared_items) == 3:
return True
return False
def part2_matches(items):
matches = 0
for i in sue_items:
if i == "cats" or i == "tree":
if ticker_tape[i] < items[i]:
matches += 1
elif i == "pomeranians" or i == "goldfish":
if ticker_tape[i] > items[i]:
matches += 1
else:
if ticker_tape[i] == items[i]:
matches += 1
if matches == 3:
return True
return False
ticker_tape = {
"children": 3,
"cats": 7,
"samoyeds": 2,
"pomeranians": 3,
"akitas": 0,
"vizslas": 0,
"goldfish": 5,
"trees": 3,
"cars": 2,
"perfumes": 1
}
part1_answer = 0
part2_answer = 0
for line in open("day16_input").readlines():
sue_no = int(line.split(":")[0][4:])
sue_items = eval("{\"" + "".join(line.split(" ")[2:]).replace(":", "\":").replace(",", ",\"") + "}")
if part1_answer == 0 and part1_matches(sue_items):
part1_answer = sue_no
if part2_answer == 0 and part2_matches(sue_items):
part2_answer = sue_no
if part1_answer != 0 and part2_answer != 0:
break
print("Part 1 = {0}".format(part1_answer))
print("Part 2 = {0}".format(part2_answer))
|
hubbardgary/AdventOfCode
|
day16.py
|
Python
|
mit
| 3,655
|
#!/usr/bin/env python3
__all__ = [
'get_client', 'Client', 'ThriftServer', 'Struct', 'BadEnum', 'Error',
'ApplicationError', 'TransportError', 'SSLPolicy',
]
try:
from thrift.py3.client import get_client, Client
except ImportError:
__all__.remove('Client')
__all__.remove('get_client')
try:
from thrift.py3.server import ThriftServer, SSLPolicy
except ImportError:
__all__.remove('ThriftServer')
__all__.remove('SSLPolicy')
try:
from thrift.py3.types import Struct, BadEnum
except ImportError:
__all__.remove('Struct')
__all__.remove('BadEnum')
try:
from thrift.py3.exceptions import Error, ApplicationError, TransportError
except ImportError:
__all__.remove('Error')
__all__.remove('ApplicationError')
__all__.remove('TransportError')
|
SergeyMakarenko/fbthrift
|
thrift/lib/py3/__init__.py
|
Python
|
apache-2.0
| 800
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import logging
import os
import os.path
import signal
import sys
import time
import tornado
import tornado.web
import tornado.locale
import tornado.ioloop
import tornado.options
import tornado.httpserver
from tornado.options import define, options
try:
import appstack
except ImportError:
APPSDIR = os.path.realpath("..") # /appstack/application
BASEDIR = os.path.join(APPSDIR, "..") # /appstack
sys.path.append(BASEDIR) # /appstack
import appstack
import appstack.applications
import appstack.database
import appstack.libraries
import appstack.settings
import appstack.vendor
from appstack.applications import controllers
from appstack.libraries import (
cache as redis,
database as sqlalchemy
)
# from appstack.database import schema, seeds
# --- const vars ---
APPSDIR = os.path.realpath("..") # /appstack/application
BASEDIR = os.path.join(APPSDIR, "..") # /appstack
# --- default settings ---
define("cache_driver", default="default") # Redis
define("cache_host", default="localhost")
define("cache_name")
define("cache_port", default=6379) # for Redis
define("cookie_secret")
define("database_driver", default="postgresql") # Posgresql
define("database_host", default="localhost")
define("database_name")
define("database_password")
define("database_port", default=5432) # for Posgresql
define("database_username")
define("debug", default=True, type=bool, help="global debug flag for debug options")
define("port", default=8000, type=int, help="run backend server on the given port")
define("processor", default=1, type=int, help="run backend server with the processors")
define("settings")
define("static_path")
define("template_path")
define("xsrf_cookies")
# cache: driver://host:port/cache
define("cache", default=options.cache_driver+"://" \
+options.cache_host+":" \
+str(options.cache_port)+"/" \
+options.cache_name, type=str, help="cache connections urls")
# database: dialect+driver://username:password@host:port/database
define("database", default=options.database_driver+"://" \
+options.database_username+":" \
+options.database_password+"@" \
+options.database_host+":" \
+str(options.database_port)+"/" \
+options.database_name+"?charset=utf-8", type=str, help="database connections urls")
# --- global vars ---
cache = redis.Redis(options.cache).instance() \
if options.debug else redis.Redis(options.cache).instance()
database = sqlalchemy.SQLAlchemy(options.database) \
if options.debug else \
sqlalchemy.SQLAlchemy(options.database, pool_size=10, pool_recycle=7200)
# --- application ---
class Application(tornado.web.Application):
def __init__(self):
settings = {
"cookie_secret": options.cookie_secret,
"debug": options.debug or False,
"static_path": options.static_path or os.path.join(os.path.dirname(__file__), "assets"),
"template_path": options.template_path or os.path.join(os.path.dirname(__file__), "views"),
"xsrf_cookies": options.xsrf_cookies or True,
}
handlers = [
(r'/', controllers.IndexController),
]
super(Application, self).__init__(handlers, **settings)
# --- global application vars ---
self.cache = cache # redis
self.database = database # postgresql
# --- main ---
def main():
tornado.options.parse_command_line()
# settings
if options.settings:
tornado.options.parse_config_file(options.settings)
else:
tornado.options.parse_config_file(os.path.join(APPSDIR, 'settings', '__init__.py'))
# i18n translations
# tornado.locale.load_translations(settings.TRANSLATIONDIR)
# tornado.locale.get_supported_locales()
# tornado.locale.set_supported_locales("en_US")
# httpserver
httpserver = tornado.httpserver.HTTPServer(Application(), xheaders=True)
httpserver.bind(options.port, "127.0.0.1")
httpserver.start(options.processor or int(1))
# WARNING: this timestamp must equal to supervisord.readear.conf stopwaitsecs = 10
# WARNING: if not or less, the server will be killed by supervisord before max_wait_seconds_before_shutdown
if options.debug:
MAX_WAIT_SECONDS_BEFORE_SHUTDOWN = 0
else:
MAX_WAIT_SECONDS_BEFORE_SHUTDOWN = 10
# signal handler
def sig_handler(sig, frame):
logging.warning("Catching Signal: %s", sig)
tornado.ioloop.IOLoop.instance().add_callback(shutdown)
# signal handler's callback
def shutdown():
logging.info("Stopping HttpServer...")
httpserver.stop() # No longer accept new http traffic
logging.info("IOLoop Will be Terminate in %s Seconds...",
MAX_WAIT_SECONDS_BEFORE_SHUTDOWN)
instance = tornado.ioloop.IOLoop.instance()
deadline = time.time() + MAX_WAIT_SECONDS_BEFORE_SHUTDOWN
# recursion for terminate IOLoop.instance()
def terminate():
now = time.time()
if now < deadline and (instance._callbacks or instance._timeouts):
instance.add_timeout(now + 1, terminate)
else:
instance.stop() # After process all _callbacks and _timeouts, break IOLoop.instance()
logging.info('Shutdown...')
# process recursion
terminate()
# signal register
signal.signal(signal.SIGINT, sig_handler)
signal.signal(signal.SIGTERM, sig_handler)
# start ioloop for socket, infinite before catch signal
tornado.ioloop.IOLoop.instance().start()
logging.info("Exit...")
if __name__ == '__main__':
main()
|
mywaiting/appstack
|
appstack/applications/application.py
|
Python
|
mit
| 5,267
|
from typing import Optional
from ray.rllib.utils.annotations import override
from ray.rllib.utils.framework import try_import_torch
from ray.rllib.utils.schedules.schedule import Schedule
from ray.rllib.utils.typing import TensorType
torch, _ = try_import_torch()
class ExponentialSchedule(Schedule):
"""Exponential decay schedule from `initial_p` to `final_p`.
Reduces output over `schedule_timesteps`. After this many time steps
always returns `final_p`.
"""
def __init__(
self,
schedule_timesteps: int,
framework: Optional[str] = None,
initial_p: float = 1.0,
decay_rate: float = 0.1,
):
"""Initializes a ExponentialSchedule instance.
Args:
schedule_timesteps: Number of time steps for which to
linearly anneal initial_p to final_p.
framework: The framework descriptor string, e.g. "tf",
"torch", or None.
initial_p: Initial output value.
decay_rate: The percentage of the original value after
100% of the time has been reached (see formula above).
>0.0: The smaller the decay-rate, the stronger the decay.
1.0: No decay at all.
"""
super().__init__(framework=framework)
assert schedule_timesteps > 0
self.schedule_timesteps = schedule_timesteps
self.initial_p = initial_p
self.decay_rate = decay_rate
@override(Schedule)
def _value(self, t: TensorType) -> TensorType:
"""Returns the result of: initial_p * decay_rate ** (`t`/t_max)."""
if self.framework == "torch" and torch and isinstance(t, torch.Tensor):
t = t.float()
return self.initial_p * self.decay_rate ** (t / self.schedule_timesteps)
|
ray-project/ray
|
rllib/utils/schedules/exponential_schedule.py
|
Python
|
apache-2.0
| 1,807
|
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("cases", "0014_auto_20150312_2338")]
operations = [
migrations.AlterField(
model_name="case",
name="modified_by",
field=models.ForeignKey(
related_name="case_modified",
on_delete=models.CASCADE,
to=settings.AUTH_USER_MODEL,
null=True,
),
preserve_default=True,
)
]
|
watchdogpolska/poradnia.siecobywatelska.pl
|
poradnia/cases/migrations/0015_auto_20150312_2340.py
|
Python
|
bsd-3-clause
| 551
|
#!/usr/bin/python
import numpy as np
from numpy import pi, cos, sin, exp, conj
from warnings import warn
import epg
import time
import sys
import scipy.io
class PulseTrain:
def __init__(self, state_file, T, TE, TR, loss_fun, loss_fun_prime, angles_rad=None, phase_rad=None, verbose=False, step=.01, max_iter=100):
self.state_file = state_file
self.T = T
self.TE = TE
self.TR = TR
self.loss_fun = loss_fun
self.loss_fun_prime = loss_fun_prime
self.max_iter = max_iter
self.step = step
self.verbose = verbose
self.excitation_dict = None
self.inversion_dict = None
self.reset()
if angles_rad is not None:
self.set_angles_rad(angles_rad, phase_rad)
def set_angles_rad(self, angles_rad, pase_rad):
T = len(angles_rad)
if T < self.T:
self.angles_rad = np.hstack((angles_rad, np.zeros((self.T-T))))
self.phase_rad = np.hstack((phase_rad, np.zeros((self.T-T))))
self.phase_rad[:2] = np.pi/2
else:
self.angles_rad = angles_rad[:self.T]
self.phase_rad = phase_rad[:self.T]
def reset(self):
self.angles_rad = DEG2RAD(50 + (120 - 50) * np.random.rand(self.T))
self.phase_rad = np.zeros((self.T,))
self.loss = []
def save_state(self, filename=None):
state = {
'angles_rad': self.angles_rad,
'phase_rad': self.phase_rad,
'loss': self.loss,
'max_iter': self.max_iter,
'step': self.step,
'T': self.T,
'TE': self.TE,
'verbose': self.verbose,
}
if filename is None:
scipy.io.savemat(self.state_file, state, appendmat=False)
else:
scipy.io.savemat(filename, state, appendmat=False)
def load_state(self, filename=None):
if filename is None:
state = scipy.io.loadmat(self.state_file)
else:
state = scipy.io.loadmat(filename)
self.angles_rad = state['angles_rad'].ravel()
self.phase_rad = state['phase_rad'].ravel()
self.loss = list(state['loss'].ravel())
self.max_iter = state['max_iter'].ravel()[0]
self.step = state['step'].ravel()[0]
self.T = state['T'].ravel()[0]
self.TE = state['TE'].ravel()[0]
self.verbose = state['verbose'].ravel()[0]
def train(self, theta1, theta2):
for i in range(self.max_iter):
#angles_prime, angle_e_prime, angle_TI_prime = self.loss_fun_prime(theta1, theta2, self.angles_rad, self.phase_rad, self.TE, self.TR, self.excitation_dict, self.inversion_dict)
angles_prime = self.loss_fun_prime(theta1, theta2, self.angles_rad, self.phase_rad, self.TE, self.TR)
self.angles_rad = self.angles_rad + self.step * angles_prime
#if self.excitation_dict != None:
#self.anlge_e = self.angle_e + self.step * angle_e_prime
#self.angle_TI = self.angle_TI + self.step * angle_TI_prime
self.loss.append(self.loss_fun(theta1, theta2, self.angles_rad, self.phase_rad, self.TE, self.TR))
str = '%d\t%3.3f' % (i, self.loss[-1])
self.print_verbose(str)
def print_verbose(self, str):
if self.verbose:
print str, RAD2DEG(self.angles_rad)
def plot_vals(self, thetas):
plt.subplot(2,1,1)
plt.plot(range(self.T), RAD2DEG(self.angles_rad), 'o-')
plt.xlim((0,self.T))
plt.subplot(2,1,2)
for theta in thetas:
plt.plot(range(self.T), epg.FSE_signal(self.angles_rad, self.phase_rad, self.TE, theta['T1'], theta['T2']))
plt.xlim((0,self.T))
plt.ylim((0,1))
def forward(self, theta):
return epg.FSE_signal(self.angles_rad, self.phase_rad, TE, theta['T1'], theta['T2']).ravel()
def loss(theta1, theta2, angles_rad, phase_rad, TE, TR):
T = len(angles_rad)
x1 = epg.FSE_signal(angles_rad, phase_rad, TE, theta1['T1'], theta1['T2']) * (1 - exp(-(TR - T * TE)/theta1['T1']))
x2 = epg.FSE_signal(angles_rad, phase_rad, TE, theta2['T1'], theta2['T2']) * (1 - exp(-(TR - T * TE)/theta2['T1']))
return 0.5 * np.linalg.norm(x1, ord=2)**2 + 0.5 * np.linalg.norm(x2, ord=2)**2 - 0.5 * np.vdot(x1.ravel(), x2.ravel()) - 0.5 * np.vdot(x2.ravel(), x1.ravel())
def normalized_loss(theta1, theta2, angles_rad, phase_rad, TE, TR):
T = len(angles_rad)
x1 = epg.FSE_signal(angles_rad, phase_rad, TE, theta1['T1'], theta1['T2']) * (1 - exp(-(TR - T * TE)/theta1['T1']))
x2 = epg.FSE_signal(angles_rad, phase_rad, TE, theta2['T1'], theta2['T2']) * (1 - exp(-(TR - T * TE)/theta2['T1']))
x1 = x1 / np.linalg.norm(x1, ord=2)
x2 = x2 / np.linalg.norm(x2, ord=2)
return -0.5 * (np.vdot(x1.ravel(), x2.ravel()) + np.vdot(x2.ravel(), x1.ravel()))
def loss_prime(theta1, theta2, angles_rad, phase_rad, TE, TR, excitation_dict=None, inversion_dict=None):
T = len(angles_rad)
x1 = epg.FSE_signal(angles_rad, phase_rad, TE, theta1['T1'], theta1['T2'],
excitation_dict=excitation_dict, inversion_dict=inversion_dict).ravel() * (1 - exp(-(TR - T * TE)/theta1['T1']))
x2 = epg.FSE_signal(angles_rad, phase_rad, TE, theta2['T1'], theta2['T2'],
excitation_dict=excitation_dict, inversion_dict=inversion_dict).ravel() * (1 - exp(-(TR - T * TE)/theta1['T1']))
T = len(angles_rad)
alpha_prime = np.zeros((T,))
angle_e_prime = 0.
angle_TI_prime = 0.
for i in range(T):
x1_prime = sig_prime_i(theta1, angles_rad, phase_rad, i).ravel() * (1 - exp(-(TR - T * TE)/theta1['T1']))
x2_prime = sig_prime_i(theta2, angles_rad, phase_rad, i).ravel() * (1 - exp(-(TR - T * TE)/theta2['T1']))
M1 = 0.5 * (np.vdot(x1, x1_prime) + np.vdot(x1_prime, x1))
M2 = 0.5 * (np.vdot(x2, x2_prime) + np.vdot(x2_prime, x2))
M3 = 0.5 * (np.vdot(x2_prime, x1) + np.vdot(x1, x2_prime))
M4 = 0.5 * (np.vdot(x1_prime, x2) + np.vdot(x2, x1_prime))
alpha_prime[i] = np.real(M1 + M2 - M3 - M4)
return alpha_prime
def sig_prime_i(theta, angles_rad, phase_rad, idx, excitation_dict=None, inversion_dict=None):
T1, T2 = get_params(theta)
T = len(angles_rad)
zi = np.hstack((np.array([[1],[1],[0]]), np.zeros((3, T))))
z_prime = np.zeros((T, 1))
for i in range(T):
alpha = angles_rad[i]
phi = phase_rad[i]
if i < idx:
zi = epg.FSE_TE(zi, alpha, phi, TE, T1, T2, noadd=True)
z_prime[i] = 0
elif i == idx:
wi = epg.FSE_TE_prime(zi, alpha, phi, TE, T1, T2, noadd=True)
z_prime[i] = np.real(wi[0,0])
else:
wi = epg.FSE_TE(wi, alpha, TE, phi, T1, T2, noadd=True, recovery=False)
z_prime[i] = np.real(wi[0,0])
return z_prime
def get_params(theta):
return theta['T1'], theta['T2']
def numerical_gradient(theta1, theta2, angles_rad, phase_rad, TE, TR):
initial_params = angles_rad
num_grad = np.zeros(initial_params.shape)
perturb = np.zeros(initial_params.shape)
e = 1e-5
for p in range(len(initial_params)):
perturb[p] = e
loss2 = loss(theta1, theta2, angles_rad + perturb, phase_rad, TE, TR)
loss1 = loss(theta1, theta2, angles_rad - perturb, phase_rad, TE, TR)
num_grad[p] = np.real(loss2 - loss1) / (2 * e)
perturb[p] = 0
return num_grad
def DEG2RAD(angle):
return np.pi * angle / 180
def RAD2DEG(angle_rad):
return 180 * angle_rad / np.pi
def read_angles(fliptable):
f = open(fliptable, 'r')
angles = []
for line in f.readlines():
angles.append(float(line))
return np.array(angles)
def print_table(P1, P2, P3):
print
print '\tP1\tP2\tP3\nloss\t%3.3f\t%3.3f\t%3.3f\nnloss\t%3.3f\t%3.3f\t%3.3f\n' % (
loss(theta1, theta2, P1.angles_rad, P1.phase_rad, TE, TR),
loss(theta1, theta2, P2.angles_rad, P2.phase_rad, TE, TR),
loss(theta1, theta2, P3.angles_rad, P3.phase_rad, TE, TR),
normalized_loss(theta1, theta2, P1.angles_rad, P1.phase_rad, TE, TR),
normalized_loss(theta1, theta2, P2.angles_rad, P2.phase_rad, TE, TR),
normalized_loss(theta1, theta2, P3.angles_rad, P3.phase_rad, TE, TR)
)
if __name__ == "__main__":
import matplotlib.pyplot as plt
np.set_printoptions(suppress=True, precision=3)
T1 = 1000e-3
T2 = 200e-3
TE = 50e-3
TI = 0.
TR = 1.4
if len(sys.argv) > 1:
T = int(sys.argv[1])
else:
T = 10
angles = 150 * np.ones((T,))
angles = read_angles('../data/flipangles.txt.408183520')
TT = len(angles)
if TT < T:
T = TT
else:
angles = angles[:T]
phases = np.zeros(angles.shape)
angles_rad = DEG2RAD(angles)
phase_rad = DEG2RAD(phases)
S = epg.FSE_signal(angles_rad, phase_rad, TE, T1, T2)
S2 = abs(S)
theta1 = {'T1': 1000e-3, 'T2': 20e-3}
theta2 = {'T1': 1000e-3, 'T2': 100e-3}
t1 = time.time()
NG = numerical_gradient(theta1, theta2, angles_rad, phase_rad, TE, TR)
t2 = time.time()
LP = loss_prime(theta1, theta2, angles_rad, phase_rad, TE, TR)
t3 = time.time()
NG_time = t2 - t1
LP_time = t3 - t2
print 'Numerical Gradient\t(%03.3f s)\t' % NG_time, NG
print
print 'Analytical Gradient\t(%03.3f s)\t' % LP_time, LP
print
print 'Error:', np.linalg.norm(NG - LP) / np.linalg.norm(NG)
#plt.plot(TE*1000*np.arange(1, T+1), S2)
#plt.xlabel('time (ms)')
#plt.ylabel('signal')
#plt.title('T1 = %.2f ms, T2 = %.2f ms' % (T1 * 1000, T2 * 1000))
#plt.show()
a = angles_rad
#a = np.pi * np.ones((T,))
a = None
P1 = PulseTrain('angles_rand.mat', T, TE, TR, loss, loss_prime, angles_rad=a, verbose=True)
#P1.load_state()
P2 = PulseTrain('angles_180.mat', T, TE, TR, loss, loss_prime, angles_rad=np.pi * np.ones((T,)), verbose=True)
P3 = PulseTrain('angles_vfa.mat', T, TE, TR, loss, loss_prime, angles_rad=angles_rad, verbose=True)
print_table(P1, P2, P3)
P1.train(theta1, theta2)
print_table(P1, P2, P3)
plt.figure(1)
plt.clf()
P1.plot_vals((theta1, theta2))
plt.figure(2)
plt.clf()
P2.plot_vals((theta1, theta2))
plt.figure(3)
plt.clf()
P3.plot_vals((theta1, theta2))
plt.show()
MAX_ANGLE = DEG2RAD(120)
MIN_ANGLE = DEG2RAD(50)
|
jtamir/mri-sim-py
|
TODO/epg-prop_2spin.py
|
Python
|
mit
| 10,519
|
from bespin.errors import BadOption, MissingPlan
from input_algorithms.spec_base import NotSpecified
class Plan(object):
@classmethod
def find_stacks(kls, configuration, stacks, plan):
if plan in (None, NotSpecified):
raise BadOption("Please specify a plan", available=list(configuration["plans"].keys()))
if plan not in configuration["plans"]:
raise MissingPlan(wanted=plan, available=configuration["plans"].keys())
missing = []
for stack in configuration["plans"][plan]:
if stack not in stacks:
missing.append(stack)
if missing:
raise BadOption("Some stacks in the plan don't exist", missing=missing, available=list(stacks.keys()))
for stack in configuration["plans"][plan]:
yield stack
|
realestate-com-au/bespin
|
bespin/operations/plan.py
|
Python
|
mit
| 829
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'panconvert_diag_info.ui'
#
# Created by: PyQt5 UI code generator 5.8.1
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Information_Dialog(object):
def setupUi(self, Information_Dialog):
Information_Dialog.setObjectName("Information_Dialog")
Information_Dialog.resize(707, 575)
self.gridLayout_2 = QtWidgets.QGridLayout(Information_Dialog)
self.gridLayout_2.setObjectName("gridLayout_2")
self.gridLayout = QtWidgets.QGridLayout()
self.gridLayout.setObjectName("gridLayout")
self.ButtonCancel = QtWidgets.QPushButton(Information_Dialog)
self.ButtonCancel.setObjectName("ButtonCancel")
self.gridLayout.addWidget(self.ButtonCancel, 0, 0, 1, 1)
self.ButtonInfo = QtWidgets.QPushButton(Information_Dialog)
self.ButtonInfo.setObjectName("ButtonInfo")
self.gridLayout.addWidget(self.ButtonInfo, 0, 2, 1, 1)
self.ButtonMoreInfo = QtWidgets.QPushButton(Information_Dialog)
self.ButtonMoreInfo.setObjectName("ButtonMoreInfo")
self.gridLayout.addWidget(self.ButtonMoreInfo, 0, 1, 1, 1)
self.gridLayout_2.addLayout(self.gridLayout, 1, 0, 1, 1)
self.textBrowser = QtWebEngineWidgets.QWebEngineView(Information_Dialog)
self.textBrowser.setObjectName("textBrowser")
self.gridLayout_2.addWidget(self.textBrowser, 0, 0, 1, 1)
self.retranslateUi(Information_Dialog)
QtCore.QMetaObject.connectSlotsByName(Information_Dialog)
def retranslateUi(self, Information_Dialog):
_translate = QtCore.QCoreApplication.translate
Information_Dialog.setWindowTitle(_translate("Information_Dialog", "Information"))
self.ButtonCancel.setText(_translate("Information_Dialog", "Cancel"))
self.ButtonInfo.setText(_translate("Information_Dialog", "Pandoc-Options"))
self.ButtonMoreInfo.setText(_translate("Information_Dialog", "More Information"))
from PyQt5 import QtWebEngineWidgets
|
apaeffgen/PanConvert
|
source/gui/panconvert_diag_info.py
|
Python
|
gpl-3.0
| 2,106
|
#! /usr/bin/env python
"""
Module with the MCMC (``emcee``) sampling for NEGFC parameter estimation.
"""
from __future__ import print_function
__author__ = 'O. Wertz, C. Gomez @ ULg'
__all__ = ['lnprior',
'lnlike',
'mcmc_negfc_sampling',
'chain_zero_truncated',
'show_corner_plot',
'show_walk_plot',
'confidence']
import numpy as np
import os
import emcee
from math import isinf, floor, ceil
import inspect
import datetime
import corner
from matplotlib import pyplot as plt
from matplotlib.ticker import MaxNLocator
from matplotlib.mlab import normpdf
from scipy.stats import norm
from ..fits import open_adicube, open_fits
from ..phot import cube_inject_companions
from ..conf import time_ini, timing, sep
from .simplex_fmerit import get_values_optimize
import warnings
warnings.filterwarnings("ignore", category=DeprecationWarning)
def lnprior(param, bounds):
"""
Define the prior log-function.
Parameters
----------
param: tuple
The model parameters.
bounds: list
The bounds for each model parameter.
Ex: bounds = [(10,20),(0,360),(0,5000)]
Returns
-------
out: float.
0 -- All the model parameters satisfy the prior
conditions defined here.
-np.inf -- At least one model parameters is out of bounds.
"""
try:
r, theta, flux = param
except TypeError:
print('paraVector must be a tuple, {} was given'.format(type(param)))
try:
r_bounds, theta_bounds, flux_bounds = bounds
except TypeError:
print('bounds must be a list of tuple, {} was given'.format(type(param)))
if r_bounds[0] <= r <= r_bounds[1] and \
theta_bounds[0] <= theta <= theta_bounds[1] and \
flux_bounds[0] <= flux <= flux_bounds[1]:
return 0.0
else:
return -np.inf
def lnlike(param, cube, angs, plsc, psf_norm, fwhm, annulus_width,
ncomp, aperture_radius, initial_state, cube_ref=None, svd_mode='lapack',
scaling='temp-mean', fmerit='sum', collapse='median', debug=False):
""" Define the likelihood log-function.
Parameters
----------
param: tuple
The model parameters, typically (r, theta, flux).
cube: numpy.array
The cube of fits images expressed as a numpy.array.
angs: numpy.array
The parallactic angle fits image expressed as a numpy.array.
plsc: float
The platescale, in arcsec per pixel.
psf_norm: numpy.array
The scaled psf expressed as a numpy.array.
annulus_width: float
The width of the annulus of interest in terms of the FWHM.
ncomp: int
The number of principal components.
fwhm : float
The FHWM in pixels.
aperture_radius: float
The radius of the circular aperture in terms of the FWHM.
initial_state: numpy.array
The initial guess for the position and the flux of the planet.
cube_ref: array_like, 3d, optional
Reference library cube. For Reference Star Differential Imaging.
svd_mode : {'lapack', 'randsvd', 'eigen', 'arpack'}, str optional
Switch for different ways of computing the SVD and selected PCs.
scaling : {'temp-mean', 'temp-standard'} or None, optional
With None, no scaling is performed on the input data before SVD. With
"temp-mean" then temporal px-wise mean subtraction is done and with
"temp-standard" temporal mean centering plus scaling to unit variance
is done.
fmerit : {'sum', 'stddev'}, string optional
Chooses the figure of merit to be used. stddev works better for close in
companions sitting on top of speckle noise.
collapse : {'median', 'mean', 'sum', 'trimmean', None}, str or None, optional
Sets the way of collapsing the frames for producing a final image. If
None then the cube of residuals is used when measuring the function of
merit (instead of a single final frame).
debug: boolean
If True, the cube is returned along with the likelihood log-function.
Returns
-------
out: float
The log of the likelihood.
"""
# Create the cube with the negative fake companion injected
cube_negfc = cube_inject_companions(cube, psf_norm, angs, flevel=-param[2],
plsc=plsc, rad_dists=[param[0]], n_branches=1,
theta=param[1], verbose=False, imlib='opencv')
# Perform PCA and extract the zone of interest
values = get_values_optimize(cube_negfc,angs,ncomp,annulus_width*fwhm,
aperture_radius*fwhm, initial_state[0],
initial_state[1], cube_ref=cube_ref,
svd_mode=svd_mode, scaling=scaling,
collapse=collapse)
# Function of merit
if fmerit=='sum':
lnlikelihood = -0.5 * np.sum(np.abs(values))
elif fmerit=='stddev':
values = values[values!=0]
lnlikelihood = -1*np.std(np.abs(values))
else:
raise RuntimeError('fmerit choice not recognized')
if debug:
return lnlikelihood, cube_negfc
else:
return lnlikelihood
def lnprob(param,bounds, cube, angs, plsc, psf_norm, fwhm,
annulus_width, ncomp, aperture_radius, initial_state, cube_ref=None,
svd_mode='lapack', scaling='temp-mean', fmerit='sum',
collapse='median',display=False):
""" Define the probability log-function as the sum between the prior and
likelihood log-funtions.
Parameters
----------
param: tuple
The model parameters.
bounds: list
The bounds for each model parameter.
Ex: bounds = [(10,20),(0,360),(0,5000)]
cube: numpy.array
The cube of fits images expressed as a numpy.array.
angs: numpy.array
The parallactic angle fits image expressed as a numpy.array.
plsc: float
The platescale, in arcsec per pixel.
psf_norm: numpy.array
The scaled psf expressed as a numpy.array.
fwhm : float
The FHWM in pixels.
annulus_width: float
The width in pixel of the annulus on wich the PCA is performed.
ncomp: int
The number of principal components.
aperture_radius: float
The radius of the circular aperture.
initial_state: numpy.array
The initial guess for the position and the flux of the planet.
cube_ref : array_like, 3d, optional
Reference library cube. For Reference Star Differential Imaging.
svd_mode : {'lapack', 'randsvd', 'eigen', 'arpack'}, str optional
Switch for different ways of computing the SVD and selected PCs.
scaling : {'temp-mean', 'temp-standard'} or None, optional
With None, no scaling is performed on the input data before SVD. With
"temp-mean" then temporal px-wise mean subtraction is done and with
"temp-standard" temporal mean centering plus scaling to unit variance
is done.
fmerit : {'sum', 'stddev'}, string optional
Chooses the figure of merit to be used. stddev works better for close in
companions sitting on top of speckle noise.
collapse : {'median', 'mean', 'sum', 'trimmean', None}, str or None, optional
Sets the way of collapsing the frames for producing a final image. If
None then the cube of residuals is used when measuring the function of
merit (instead of a single final frame).
display: boolean
If True, the cube is displayed with ds9.
Returns
-------
out: float
The probability log-function.
"""
if initial_state is None:
initial_state = param
lp = lnprior(param, bounds)
if isinf(lp):
return -np.inf
return lp + lnlike(param, cube, angs, plsc, psf_norm, fwhm,
annulus_width, ncomp, aperture_radius, initial_state,
cube_ref, svd_mode, scaling, fmerit, collapse, display)
def gelman_rubin(x):
"""
Determine the Gelman-Rubin \hat{R} statistical test between Markov chains.
Parameters
----------
x: numpy.array
The numpy.array on which the Gelman-Rubin test is applied. This array
should contain at least 2 set of data, i.e. x.shape >= (2,).
Returns
-------
out: float
The Gelman-Rubin \hat{R}.
Example
-------
>>> x1 = np.random.normal(0.0,1.0,(1,100))
>>> x2 = np.random.normal(0.1,1.3,(1,100))
>>> x = np.vstack((x1,x2))
>>> gelman_rubin(x)
1.0366629898991262
>>> gelman_rubin(np.vstack((x1,x1)))
0.99
"""
if np.shape(x) < (2,):
raise ValueError(
'Gelman-Rubin diagnostic requires multiple chains of the same length.')
try:
m, n = np.shape(x)
except ValueError:
print("Bad shape for the chains")
return
# Calculate between-chain variance
B_over_n = np.sum((np.mean(x, 1) - np.mean(x)) ** 2) / (m - 1)
# Calculate within-chain variances
W = np.sum([(x[i] - xbar) ** 2 for i, xbar in enumerate(np.mean(x,
1))]) / (m * (n - 1))
# (over) estimate of variance
s2 = W * (n - 1) / n + B_over_n
# Pooled posterior variance estimate
V = s2 + B_over_n / m
# Calculate PSRF
R = V / W
return R
def gelman_rubin_from_chain(chain, burnin):
"""
Pack the MCMC chain and determine the Gelman-Rubin \hat{R} statistical test.
In other words, two sub-sets are extracted from the chain (burnin parts are
taken into account) and the Gelman-Rubin statistical test is performed.
Parameters
----------
chain: numpy.array
The MCMC chain with the shape walkers x steps x model_parameters
burnin: float \in [0,1]
The fraction of a walker which is discarded.
Returns
-------
out: float
The Gelman-Rubin \hat{R}.
"""
dim = chain.shape[2]
k = chain.shape[1]
threshold0 = int(floor(burnin*k))
threshold1 = int(floor((1-burnin)*k*0.25))
rhat = np.zeros(dim)
for j in range(dim):
part1 = chain[:,threshold0:threshold0+threshold1,j].reshape((-1))
part2 = chain[:,threshold0+3*threshold1:threshold0+4*threshold1,j].reshape((-1))
series = np.vstack((part1,part2))
rhat[j] = gelman_rubin(series)
return rhat
def mcmc_negfc_sampling(cubes, angs, psfn, ncomp, plsc, initial_state,
fwhm=4, annulus_width=3, aperture_radius=4, cube_ref=None,
svd_mode='lapack', scaling='temp-mean', fmerit='sum',
collapse='median', nwalkers=1000, bounds=None, a=2.0,
burnin=0.3, rhat_threshold=1.01, rhat_count_threshold=1,
niteration_min=0, niteration_limit=1e02,
niteration_supp=0, check_maxgap=1e04, nproc=1,
output_file=None, display=False, verbose=True, save=False):
""" Runs an affine invariant mcmc sampling algorithm in order to determine
the position and the flux of the planet using the 'Negative Fake Companion'
technique. The result of this procedure is a chain with the samples from the
posterior distributions of each of the 3 parameters.
This technique can be summarized as follows:
1) We inject a negative fake companion (one candidate) at a given
position and characterized by a given flux, both close to the expected
values.
2) We run PCA on an full annulus which pass through the initial guess,
regardless of the position of the candidate.
3) We extract the intensity values of all the pixels contained in a
circular aperture centered on the initial guess.
4) We calculate the function of merit. The associated chi^2 is given by
chi^2 = sum(|I_j|) where j \in {1,...,N} with N the total number of
pixels contained in the circular aperture.
The steps 1) to 4) are looped. At each iteration, the candidate model
parameters are defined by the emcee Affine Invariant algorithm.
Parameters
----------
cubes: str or numpy.array
The relative path to the cube of fits images OR the cube itself.
angs: str or numpy.array
The relative path to the parallactic angle fits image or the angs itself.
psfn: str or numpy.array
The relative path to the instrumental PSF fits image or the PSF itself.
The PSF must be centered and the flux in a 1*FWHM aperture must equal 1.
ncomp: int
The number of principal components.
plsc: float
The platescale, in arcsec per pixel.
annulus_width: float, optional
The width in pixel of the annulus on which the PCA is performed.
aperture_radius: float, optional
The radius of the circular aperture.
nwalkers: int optional
The number of Goodman & Weare 'walkers'.
initial_state: numpy.array
The first guess for the position and flux of the planet, respectively.
Each walker will start in a small ball around this preferred position.
cube_ref : array_like, 3d, optional
Reference library cube. For Reference Star Differential Imaging.
svd_mode : {'lapack', 'randsvd', 'eigen', 'arpack'}, str optional
Switch for different ways of computing the SVD and selected PCs.
'randsvd' is not recommended for the negative fake companion technique.
scaling : {'temp-mean', 'temp-standard'} or None, optional
With None, no scaling is performed on the input data before SVD. With
"temp-mean" then temporal px-wise mean subtraction is done and with
"temp-standard" temporal mean centering plus scaling to unit variance
is done.
fmerit : {'sum', 'stddev'}, string optional
Chooses the figure of merit to be used. stddev works better for close in
companions sitting on top of speckle noise.
collapse : {'median', 'mean', 'sum', 'trimmean', None}, str or None, optional
Sets the way of collapsing the frames for producing a final image. If
None then the cube of residuals is used when measuring the function of
merit (instead of a single final frame).
bounds: numpy.array or list, default=None, optional
The prior knowledge on the model parameters. If None, large bounds will
be automatically estimated from the initial state.
a: float, default=2.0
The proposal scale parameter. See notes.
burnin: float, default=0.3
The fraction of a walker which is discarded.
rhat_threshold: float, default=0.01
The Gelman-Rubin threshold used for the test for nonconvergence.
rhat_count_threshold: int, optional
The Gelman-Rubin test must be satisfied 'rhat_count_threshold' times in
a row before claiming that the chain has converged.
niteration_min: int, optional
Steps per walker lower bound. The simulation will run at least this
number of steps per walker.
niteration_limit: int, optional
Steps per walker upper bound. If the simulation runs up to
'niteration_limit' steps without having reached the convergence
criterion, the run is stopped.
niteration_supp: int, optional
Number of iterations to run after having "reached the convergence".
check_maxgap: int, optional
Maximum number of steps per walker between two Gelman-Rubin test.
nproc: int, optional
The number of processes to use for parallelization.
output_file: str
The name of the ouput file which contains the MCMC results
(if save is True).
display: boolean
If True, the walk plot is displayed at each evaluation of the Gelman-
Rubin test.
verbose: boolean
Display informations in the shell.
save: boolean
If True, the MCMC results are pickled.
Returns
-------
out : numpy.array
The MCMC chain.
Notes
-----
The parameter 'a' must be > 1. For more theoretical information concerning
this parameter, see Goodman & Weare, 2010, Comm. App. Math. Comp. Sci.,
5, 65, Eq. [9] p70.
The parameter 'rhat_threshold' can be a numpy.array with individual
threshold value for each model parameter.
"""
if verbose:
start_time = time_ini()
print(" MCMC sampler for the NEGFC technique ")
print(sep)
# If required, one create the output folder.
if save:
if not os.path.exists('results'):
os.makedirs('results')
if output_file is None:
datetime_today = datetime.datetime.today()
output_file = str(datetime_today.year)+str(datetime_today.month)+\
str(datetime_today.day)+'_'+str(datetime_today.hour)+\
str(datetime_today.minute)+str(datetime_today.second)
if not os.path.exists('results/'+output_file):
os.makedirs('results/'+output_file)
# #########################################################################
# If required, one opens the source files
# #########################################################################
if isinstance(cubes,str) and isinstance(angs,str):
if angs is None:
cubes, angs = open_adicube(cubes, verbose=False)
else:
cubes = open_fits(cubes)
angs = open_fits(angs, verbose=False)
if isinstance(psfn,str):
psfn = open_fits(psfn)
if verbose:
print('The data has been loaded. Let''s continue !')
# #########################################################################
# Initialization of the variables
# #########################################################################
dim = 3 # There are 3 model parameters, resp. the radial and angular
# position of the planet and its flux.
itermin = niteration_min
limit = niteration_limit
supp = niteration_supp
maxgap = check_maxgap
initial_state = np.array(initial_state)
if itermin > limit:
itermin = 0
print("'niteration_min' must be < 'niteration_limit'.")
fraction = 0.3
geom = 0
lastcheck = 0
konvergence = np.inf
rhat_count = 0
chain = np.empty([nwalkers,1,dim])
isamples = np.empty(0)
pos = initial_state + np.random.normal(0,1e-01,(nwalkers,3))
nIterations = limit + supp
rhat = np.zeros(dim)
stop = np.inf
if bounds is None:
bounds = [(initial_state[0]-annulus_width/2.,initial_state[0]+annulus_width/2.), #radius
(initial_state[1]-10,initial_state[1]+10), #angle
(0,2*initial_state[2])] #flux
sampler = emcee.EnsembleSampler(nwalkers,dim,lnprob,a,
args =([bounds, cubes, angs, plsc, psfn,
fwhm, annulus_width, ncomp,
aperture_radius, initial_state,
cube_ref, svd_mode, scaling, fmerit,
collapse]),
threads=nproc)
start = datetime.datetime.now()
# #########################################################################
# Affine Invariant MCMC run
# #########################################################################
if verbose:
print('')
print('Start of the MCMC run ...')
print('Step | Duration/step (sec) | Remaining Estimated Time (sec)')
for k, res in enumerate(sampler.sample(pos,iterations=nIterations,
storechain=True)):
elapsed = (datetime.datetime.now()-start).total_seconds()
if verbose:
if k == 0:
q = 0.5
else:
q = 1
print('{}\t\t{:.5f}\t\t\t{:.5f}'.format(k,elapsed*q,elapsed*(limit-k-1)*q))
start = datetime.datetime.now()
# ---------------------------------------------------------------------
# Store the state manually in order to handle with dynamical sized chain.
# ---------------------------------------------------------------------
## Check if the size of the chain is long enough.
s = chain.shape[1]
if k+1 > s: #if not, one doubles the chain length
empty = np.zeros([nwalkers,2*s,dim])
chain = np.concatenate((chain,empty),axis=1)
## Store the state of the chain
chain[:,k] = res[0]
# ---------------------------------------------------------------------
# If k meets the criterion, one tests the non-convergence.
# ---------------------------------------------------------------------
criterion = np.amin([ceil(itermin*(1+fraction)**geom),\
lastcheck+floor(maxgap)])
if k == criterion:
if verbose:
print('')
print(' Gelman-Rubin statistic test in progress ...')
geom += 1
lastcheck = k
if display:
show_walk_plot(chain)
if save:
import pickle
with open('results/'+output_file+'/'+output_file+'_temp_k{}'.format(k),'wb') as fileSave:
myPickler = pickle.Pickler(fileSave)
myPickler.dump({'chain':sampler.chain,
'lnprob':sampler.lnprobability,
'AR':sampler.acceptance_fraction})
## We only test the rhat if we have reached the minimum number of steps.
if (k+1) >= itermin and konvergence == np.inf:
threshold0 = int(floor(burnin*k))
threshold1 = int(floor((1-burnin)*k*0.25))
# We calculate the rhat for each model parameter.
for j in range(dim):
part1 = chain[:,threshold0:threshold0+threshold1,j].reshape((-1))
part2 = chain[:,threshold0+3*threshold1:threshold0+4*threshold1,j].reshape((-1))
series = np.vstack((part1,part2))
rhat[j] = gelman_rubin(series)
if verbose:
print(' r_hat = {}'.format(rhat))
print(' r_hat <= threshold = {}'.format(rhat <= rhat_threshold))
print('')
# We test the rhat.
if (rhat <= rhat_threshold).all(): #and rhat_count < rhat_count_threshold:
rhat_count += 1
if rhat_count < rhat_count_threshold:
print("Gelman-Rubin test OK {}/{}".format(rhat_count,rhat_count_threshold))
elif rhat_count >= rhat_count_threshold:
print('... ==> convergence reached')
konvergence = k
stop = konvergence + supp
#elif (rhat <= rhat_threshold).all() and rhat_count >= rhat_count_threshold:
# print '... ==> convergence reached'
# konvergence = k
# stop = konvergence + supp
else:
rhat_count = 0
if (k+1) >= stop: #Then we have reached the maximum number of steps for our Markov chain.
print('We break the loop because we have reached convergence')
break
if k == nIterations-1:
print("We have reached the limit number of steps without having converged")
# #########################################################################
# Construction of the independent samples
# #########################################################################
temp = np.where(chain[0,:,0] == 0.0)[0]
if len(temp) != 0:
idxzero = temp[0]
else:
idxzero = chain.shape[1]
idx = np.amin([np.floor(2e05/nwalkers),np.floor(0.1*idxzero)])
if idx == 0:
isamples = chain[:,0:idxzero,:]
else:
isamples = chain[:,idxzero-idx:idxzero,:]
if save:
import pickle
frame = inspect.currentframe()
args, _, _, values = inspect.getargvalues(frame)
input_parameters = {j : values[j] for j in args[1:]}
output = {'isamples':isamples,
'chain': chain_zero_truncated(chain),
'input_parameters': input_parameters,
'AR': sampler.acceptance_fraction,
'lnprobability': sampler.lnprobability}
with open('results/'+output_file+'/MCMC_results','wb') as fileSave:
myPickler = pickle.Pickler(fileSave)
myPickler.dump(output)
print('')
print("The file MCMC_results has been stored in the folder {}".format('results/'+output_file+'/'))
if verbose:
timing(start_time)
return chain_zero_truncated(chain)
def chain_zero_truncated(chain):
"""
Return the Markov chain with the dimension: walkers x steps* x parameters,
where steps* is the last step before having 0 (not yet constructed chain).
Parameters
----------
chain: numpy.array
The MCMC chain.
Returns
-------
out: numpy.array
The truncated MCMC chain, that is to say, the chain which only contains
relevant information.
"""
try:
idxzero = np.where(chain[0,:,0] == 0.0)[0][0]
except:
idxzero = chain.shape[1]
return chain[:,0:idxzero,:]
def show_walk_plot(chain, save=False, **kwargs):
"""
Display or save a figure showing the path of each walker during the MCMC run
Parameters
----------
chain: numpy.array
The Markov chain. The shape of chain must be nwalkers x length x dim.
If a part of the chain is filled with zero values, the method will
discard these steps.
save: boolean, default: False
If True, a pdf file is created.
kwargs:
Additional attributs are passed to the matplotlib plot method.
Returns
-------
Display the figure or create a pdf file named walk_plot.pdf in the working
directory.
"""
temp = np.where(chain[0,:,0] == 0.0)[0]
if len(temp) != 0:
chain = chain[:,:temp[0],:]
labels = kwargs.pop('labels',["$r$",r"$\theta$","$f$"])
fig, axes = plt.subplots(3, 1, sharex=True, figsize=kwargs.pop('figsize',(8,6)))
axes[2].set_xlabel(kwargs.pop('xlabel','step number'))
axes[2].set_xlim(kwargs.pop('xlim',[0,chain.shape[1]]))
color = kwargs.pop('color','k')
alpha = kwargs.pop('alpha',0.4)
for j in range(3):
axes[j].plot(chain[:,:,j].T, color=color,
alpha=alpha,
**kwargs)
axes[j].yaxis.set_major_locator(MaxNLocator(5))
axes[j].set_ylabel(labels[j])
fig.tight_layout(h_pad=0.0)
if save:
plt.savefig('walk_plot.pdf')
plt.close(fig)
else:
plt.show()
def show_corner_plot(chain, burnin=0.5, save=False, **kwargs):
"""
Display or save a figure showing the corner plot (pdfs + correlation plots)
Parameters
----------
chain: numpy.array
The Markov chain. The shape of chain must be nwalkers x length x dim.
If a part of the chain is filled with zero values, the method will
discard these steps.
burnin: float, default: 0
The fraction of a walker we want to discard.
save: boolean, default: False
If True, a pdf file is created.
kwargs:
Additional attributs are passed to the corner.corner() method.
Returns
-------
Display the figure or create a pdf file named walk_plot.pdf in the working
directory.
Raises
------
ImportError
"""
#burnin = kwargs.pop('burnin',0)
try:
temp = np.where(chain[0,:,0] == 0.0)[0]
if len(temp) != 0:
chain = chain[:,:temp[0],:]
length = chain.shape[1]
chain = chain[:,int(np.floor(burnin*(length-1))):length,:].reshape((-1,3))
except IndexError:
pass
if chain.shape[0] == 0:
print("It seems that the chain is empty. Have you already run the MCMC ?")
else:
fig = corner.corner(chain, labels=kwargs.pop('labels',["$r$",r"$\theta$","$f$"]), **kwargs)
if save:
plt.savefig('corner_plot.pdf')
plt.close(fig)
else:
plt.show()
def writeText(document,text):
"""
Write a line of text in a txt file.
Parameters
----------
document: str
The path to the file to append or create.
text: str
The text to write.
Returns
-------
None
"""
with open(document,'a') as fileObject:
if isinstance(text,str):
fileObject.write("%s \n" % text)
elif isinstance(text,tuple):
defFormat = "%s"
for k in range(1,len(text)):
defFormat += "\t %s"
fileObject.write(defFormat % text)
def confidence(isamples, cfd=68.27, bins=100, gaussianFit=False, weights=None,
verbose=True, save=False, **kwargs):
"""
Determine the highly probable value for each model parameter, as well as
the 1-sigma confidence interval.
Parameters
----------
isamples: numpy.array
The independent samples for each model parameter.
cfd: float, optional
The confidence level given in percentage.
bins: int, optional
The number of bins used to sample the posterior distributions.
gaussianFit: boolean, optional
If True, a gaussian fit is performed in order to determine (\mu,\sigma)
weights : (n, ) array_like or None, optional
An array of weights for each sample.
verbose: boolean, optional
Display information in the shell.
save: boolean, optional
If "True", a txt file with the results is saved in the output repository.
kwargs: optional
Additional attributes are passed to the matplotlib hist() method.
Returns
-------
out: tuple
A 2 elements tuple with the highly probable solution and the confidence
interval.
"""
plsc = kwargs.pop('plsc',0.001)
title = kwargs.pop('title',None)
output_file = kwargs.pop('filename','confidence.txt')
try:
l = isamples.shape[1]
except Exception:
l = 1
confidenceInterval = dict()
val_max = dict()
pKey = ['r','theta','f']
if cfd == 100:
cfd = 99.9
#########################################
## Determine the confidence interval ##
#########################################
if gaussianFit:
mu = np.zeros(3)
sigma = np.zeros_like(mu)
if gaussianFit:
fig,ax = plt.subplots(2,3, figsize=(12,8))
else:
fig,ax = plt.subplots(1,3, figsize=(12,4))
for j in range(l):
label_file = ['r','theta','flux']
label = [r'$\Delta r$',r'$\Delta \theta$',r'$\Delta f$']
if gaussianFit:
n, bin_vertices, _ = ax[0][j].hist(isamples[:,j],bins=bins,
weights=weights, histtype='step',
edgecolor='gray')
else:
n, bin_vertices, _ = ax[j].hist(isamples[:,j],bins=bins,
weights=weights, histtype='step',
edgecolor='gray')
bins_width = np.mean(np.diff(bin_vertices))
surface_total = np.sum(np.ones_like(n)*bins_width * n)
n_arg_sort = np.argsort(n)[::-1]
test = 0
pourcentage = 0
for k,jj in enumerate(n_arg_sort):
test = test + bins_width*n[jj]
pourcentage = test/surface_total*100.
if pourcentage > cfd:
if verbose:
print('percentage for {}: {}%'.format(label_file[j],pourcentage))
break
n_arg_min = n_arg_sort[:k].min()
n_arg_max = n_arg_sort[:k+1].max()
if n_arg_min == 0: n_arg_min += 1
if n_arg_max == bins: n_arg_max -= 1
val_max[pKey[j]] = bin_vertices[n_arg_sort[0]]+bins_width/2.
confidenceInterval[pKey[j]] = np.array([bin_vertices[n_arg_min-1],
bin_vertices[n_arg_max+1]]-val_max[pKey[j]])
arg = (isamples[:,j]>=bin_vertices[n_arg_min-1])*(isamples[:,j]<=bin_vertices[n_arg_max+1])
if gaussianFit:
_ = ax[0][j].hist(isamples[arg,j],bins=bin_vertices,
facecolor='gray', edgecolor='darkgray',
histtype='stepfilled', alpha=0.5)
ax[0][j].vlines(val_max[pKey[j]], 0, n[n_arg_sort[0]],
linestyles='dashed', color='red')
ax[0][j].set_xlabel(label[j])
if j==0: ax[0][j].set_ylabel('Counts')
else:
_ = ax[j].hist(isamples[arg,j],bins=bin_vertices, facecolor='gray',
edgecolor='darkgray', histtype='stepfilled',
alpha=0.5)
ax[j].vlines(val_max[pKey[j]], 0, n[n_arg_sort[0]],
linestyles='dashed', color='red')
ax[j].set_xlabel(label[j])
if j==0: ax[j].set_ylabel('Counts')
if gaussianFit:
(mu[j], sigma[j]) = norm.fit(isamples[:,j])
n_fit, bins_fit = np.histogram(isamples[:,j], bins, normed=1,
weights=weights)
_= ax[1][j].hist(isamples[:,j], bins, normed=1, weights=weights,
facecolor='gray', edgecolor='darkgray',
histtype='step')
y = normpdf( bins_fit, mu[j], sigma[j])
ax[1][j].plot(bins_fit, y, 'r--', linewidth=2, alpha=0.7)
ax[1][j].set_xlabel(label[j])
if j==0: ax[1][j].set_ylabel('Counts')
if title is not None:
msg = r"$\mu$ = {:.4f}, $\sigma$ = {:.4f}"
ax[1][j].set_title(title+' '+msg.format(mu[j],sigma[j]),
fontsize=10)
else:
if title is not None:
ax[1].set_title(title, fontsize=10)
if save:
if gaussianFit:
plt.savefig('confi_hist_flux_r_theta_gaussfit.pdf')
else:
plt.savefig('confi_hist_flux_r_theta.pdf')
plt.tight_layout(w_pad=0.001)
if verbose:
print('')
print('Confidence intervals:')
print('r: {} [{},{}]'.format(val_max['r'],
confidenceInterval['r'][0],
confidenceInterval['r'][1]))
print('theta: {} [{},{}]'.format(val_max['theta'],
confidenceInterval['theta'][0],
confidenceInterval['theta'][1]))
print('flux: {} [{},{}]'.format(val_max['f'],
confidenceInterval['f'][0],
confidenceInterval['f'][1]))
if gaussianFit:
print('')
print('Gaussian fit results:')
print('r: {} +-{}'.format(mu[0],sigma[0]))
print('theta: {} +-{}'.format(mu[1],sigma[1]))
print('f: {} +-{}'.format(mu[2],sigma[2]))
##############################################
## Write inference results in a text file ##
##############################################
if save:
try:
fileObject = open(output_file,'r')
except IOError: # if the file doesn't exist, we create it (empty)
answer = 'y'
if answer == 'y':
fileObject = open(output_file,'w')
elif answer == 'n':
msg = "The file has not been created. The object cannot be "
msg += "created neither."
print(msg)
raise IOError("No such file has been found")
else:
msg = "You must choose between 'y' for yes and 'n' for no. The "
msg += "file has not been created. The object cannot be "
msg += "created neither."
print()
raise IOError("No such file has been found")
finally:
fileObject.close()
writeText(output_file,'###########################')
writeText(output_file,'#### INFERENCE TEST ###')
writeText(output_file,'###########################')
writeText(output_file,' ')
writeText(output_file,'Results of the MCMC fit')
writeText(output_file,'----------------------- ')
writeText(output_file,' ')
writeText(output_file,'>> Position and flux of the planet (highly probable):')
writeText(output_file,'{} % confidence interval'.format(cfd))
writeText(output_file,' ')
for i in range(3):
confidenceMax = confidenceInterval[pKey[i]][1]
confidenceMin = -confidenceInterval[pKey[i]][0]
if i == 2:
text = '{}: \t\t\t{:.3f} \t-{:.3f} \t+{:.3f}'
else:
text = '{}: \t\t\t{:.3f} \t\t-{:.3f} \t\t+{:.3f}'
writeText(output_file,text.format(pKey[i],val_max[pKey[i]],
confidenceMin,confidenceMax))
writeText(output_file,' ')
writeText(output_file,'Platescale = {} mas'.format(plsc*1000))
text = '{}: \t\t{:.2f} \t\t-{:.2f} \t\t+{:.2f}'
writeText(output_file,text.format('r (mas)', val_max[pKey[0]]*plsc*1000,
-confidenceInterval[pKey[0]][0]*plsc*1000,
confidenceInterval[pKey[0]][1]*plsc*1000))
if gaussianFit:
return (mu,sigma)
else:
return (val_max,confidenceInterval)
|
henry-ngo/VIP
|
vip_hci/negfc/mcmc_sampling.py
|
Python
|
mit
| 39,691
|
import json
import httpretty
import os
from similarweb import SourcesClient
TD = os.path.dirname(os.path.realpath(__file__))
def test_sources_client_has_user_key():
client = SourcesClient("test_key")
assert client.user_key == "test_key"
def test_sources_client_has_base_url():
client = SourcesClient("test_key")
assert client.base_url == "https://api.similarweb.com/Site/{0}/{1}/"
def test_sources_client_has_empty_full_url():
client = SourcesClient("test_key")
assert client.full_url == ""
@httpretty.activate
def test_sources_client_social_referrals_completes_full_url():
target_url = ("https://api.similarweb.com/Site/"
"example.com/v1/SocialReferringSites?"
"UserKey=test_key")
f = "{0}/fixtures/sources_client_social_referrals_good_response.json".format(TD)
with open(f) as data_file:
stringified = json.dumps(json.load(data_file))
httpretty.register_uri(httpretty.GET, target_url, body=stringified)
client = SourcesClient("test_key")
client.social_referrals("example.com")
assert client.full_url == target_url
@httpretty.activate
def test_sources_client_social_referrals_response_from_invalid_api_key():
expected = {"Error": "user_key_invalid"}
target_url = ("https://api.similarweb.com/Site/"
"example.com/v1/SocialReferringSites?"
"UserKey=invalid_key")
f = "{0}/fixtures/sources_client_social_referrals_invalid_api_key_response.json".format(TD)
with open(f) as data_file:
stringified = json.dumps(json.load(data_file))
httpretty.register_uri(httpretty.GET, target_url, body=stringified)
client = SourcesClient("invalid_key")
result = client.social_referrals("example.com")
assert result == expected
@httpretty.activate
def test_sources_client_social_referrals_response_from_malformed_url():
expected = {"Error": "Malformed or Unknown URL"}
target_url = ("https://api.similarweb.com/Site/"
"bad_url/v1/SocialReferringSites?"
"UserKey=test_key")
f = "{0}/fixtures/sources_client_social_referrals_url_malformed_response.json".format(TD)
with open(f) as data_file:
stringified = json.dumps(json.load(data_file))
httpretty.register_uri(httpretty.GET, target_url, body=stringified)
client = SourcesClient("test_key")
result = client.social_referrals("bad_url")
assert result == expected
@httpretty.activate
def test_sources_client_social_referrals_response_from_malformed_url_incl_http():
expected = {"Error": "Malformed or Unknown URL"}
target_url = ("https://api.similarweb.com/Site/"
"https://example.com/v1/SocialReferringSites?"
"UserKey=test_key")
f = "{0}/fixtures/sources_client_social_referrals_url_with_http_response.json".format(TD)
with open(f) as data_file:
stringified = data_file.read().replace("\n", "")
httpretty.register_uri(httpretty.GET, target_url, body=stringified)
client = SourcesClient("test_key")
result = client.social_referrals("https://example.com")
assert result == expected
@httpretty.activate
def test_sources_client_social_referrals_response_from_empty_response():
expected = {"Error": "Unknown Error"}
target_url = ("https://api.similarweb.com/Site/"
"example.com/v1/SocialReferringSites?"
"UserKey=test_key")
f = "{0}/fixtures/sources_client_social_referrals_empty_response.json".format(TD)
with open(f) as data_file:
stringified = json.dumps(json.load(data_file))
httpretty.register_uri(httpretty.GET, target_url, body=stringified)
client = SourcesClient("test_key")
result = client.social_referrals("example.com")
assert result == expected
@httpretty.activate
def test_sources_client_social_referrals_response_from_good_inputs():
expected = {"SocialSources": {
"Facebook": 0.5872484011274256,
"Reddit": 0.1955231030114612,
"Twitter": 0.13209235484709875,
"Youtube": 0.06292737412742913,
"Weibo.com": 0.010782551614770926},
"StartDate": "12/2014",
"EndDate": "02/2015"}
target_url = ("https://api.similarweb.com/Site/"
"example.com/v1/SocialReferringSites?"
"UserKey=test_key")
f = "{0}/fixtures/sources_client_social_referrals_good_response.json".format(TD)
with open(f) as data_file:
stringified = json.dumps(json.load(data_file))
httpretty.register_uri(httpretty.GET, target_url, body=stringified)
client = SourcesClient("test_key")
result = client.social_referrals("example.com")
assert result == expected
@httpretty.activate
def test_sources_client_organic_search_keywords_completes_full_url():
target_url = ("https://api.similarweb.com/Site/"
"example.com/v1/orgsearch?start=11-2014&"
"end=12-2014&md=False&page=1&UserKey=test_key")
f = "{0}/fixtures/sources_client_organic_search_keywords_good_response.json".format(TD)
with open(f) as data_file:
stringified = json.dumps(json.load(data_file))
httpretty.register_uri(httpretty.GET, target_url, body=stringified)
client = SourcesClient("test_key")
client.organic_search_keywords("example.com", 1, "11-2014", "12-2014", False)
assert client.full_url == target_url
@httpretty.activate
def test_sources_client_organic_search_keywords_response_from_invalid_api_key():
expected = {"Error": "user_key_invalid"}
target_url = ("https://api.similarweb.com/Site/"
"example.com/v1/orgsearch?start=11-2014&"
"end=12-2014&md=False&page=1&UserKey=invalid_key")
f = "{0}/fixtures/sources_client_organic_search_keywords_invalid_api_key_response.json".format(TD)
with open(f) as data_file:
stringified = json.dumps(json.load(data_file))
httpretty.register_uri(httpretty.GET, target_url, body=stringified)
client = SourcesClient("invalid_key")
result = client.organic_search_keywords("example.com", 1, "11-2014", "12-2014", False)
assert result == expected
@httpretty.activate
def test_sources_client_organic_search_keywords_response_from_malformed_url():
expected = {"Error": "Malformed or Unknown URL"}
target_url = ("https://api.similarweb.com/Site/"
"bad_url/v1/orgsearch?start=11-2014&"
"end=12-2014&md=False&page=1&UserKey=test_key")
f = "{0}/fixtures/sources_client_organic_search_keywords_url_malformed_response.json".format(TD)
with open(f) as data_file:
stringified = json.dumps(json.load(data_file))
httpretty.register_uri(httpretty.GET, target_url, body=stringified)
client = SourcesClient("test_key")
result = client.organic_search_keywords("bad_url", 1, "11-2014", "12-2014", False)
assert result == expected
@httpretty.activate
def test_sources_client_organic_search_keywords_response_from_malformed_url_incl_http():
expected = {"Error": "Malformed or Unknown URL"}
target_url = ("https://api.similarweb.com/Site/"
"https://example.com/v1/orgsearch?start=11-2014&"
"end=12-2014&md=False&page=1&UserKey=test_key")
f = "{0}/fixtures/sources_client_organic_search_keywords_url_with_http_response.json".format(TD)
with open(f) as data_file:
stringified = json.dumps(json.load(data_file))
httpretty.register_uri(httpretty.GET, target_url, body=stringified)
client = SourcesClient("test_key")
result = client.organic_search_keywords("https://example.com", 1, "11-2014", "12-2014", False)
assert result == expected
@httpretty.activate
def test_sources_client_organic_search_keywords_response_from_bad_page():
expected = {"Error": "The field Page is invalid."}
target_url = ("https://api.similarweb.com/Site/"
"example.com/v1/orgsearch?start=11-2014&"
"end=12-2014&md=False&page=0&UserKey=test_key")
f = "{0}/fixtures/sources_client_organic_search_keywords_page_bad_response.json".format(TD)
with open(f) as data_file:
stringified = json.dumps(json.load(data_file))
httpretty.register_uri(httpretty.GET, target_url, body=stringified)
client = SourcesClient("test_key")
result = client.organic_search_keywords("example.com", 0, "11-2014", "12-2014", False)
assert result == expected
@httpretty.activate
def test_sources_client_organic_search_keywords_response_from_bad_start_date():
expected = {"Error": "The value '14-2014' is not valid for Start."}
target_url = ("https://api.similarweb.com/Site/"
"example.com/v1/orgsearch?start=14-2014&"
"end=12-2014&md=False&page=1&UserKey=test_key")
f = "{0}/fixtures/sources_client_organic_search_keywords_start_bad_response.json".format(TD)
with open(f) as data_file:
stringified = json.dumps(json.load(data_file))
httpretty.register_uri(httpretty.GET, target_url, body=stringified)
client = SourcesClient("test_key")
result = client.organic_search_keywords("example.com", 1, "14-2014", "12-2014", False)
assert result == expected
@httpretty.activate
def test_sources_client_organic_search_keywords_response_from_bad_end_date():
expected = {"Error": "The value '0-2014' is not valid for End."}
target_url = ("https://api.similarweb.com/Site/"
"example.com/v1/orgsearch?start=11-2014&"
"end=0-2014&md=False&page=1&UserKey=test_key")
f = "{0}/fixtures/sources_client_organic_search_keywords_end_bad_response.json".format(TD)
with open(f) as data_file:
stringified = json.dumps(json.load(data_file))
httpretty.register_uri(httpretty.GET, target_url, body=stringified)
client = SourcesClient("test_key")
result = client.organic_search_keywords("example.com", 1, "11-2014", "0-2014", False)
assert result == expected
@httpretty.activate
def test_sources_client_organic_search_keywords_response_out_of_order_dates():
expected = {"Error": "Date range is not valid"}
target_url = ("https://api.similarweb.com/Site/"
"example.com/v1/orgsearch?start=12-2014&"
"end=9-2014&md=False&page=1&UserKey=test_key")
f = "{0}/fixtures/sources_client_organic_search_keywords_out_of_order_response.json".format(TD)
with open(f) as data_file:
stringified = json.dumps(json.load(data_file))
httpretty.register_uri(httpretty.GET, target_url, body=stringified)
client = SourcesClient("test_key")
result = client.organic_search_keywords("example.com", 1, "12-2014", "9-2014", False)
assert result == expected
@httpretty.activate
def test_sources_client_organic_search_keywords_response_from_bad_main_domain():
expected = {"Error": "The value 'other' is not valid for Md."}
target_url = ("https://api.similarweb.com/Site/"
"example.com/v1/orgsearch?start=12-2014&"
"end=9-2014&md=other&page=1&UserKey=test_key")
f = "{0}/fixtures/sources_client_organic_search_keywords_main_domain_bad_response.json".format(TD)
with open(f) as data_file:
stringified = json.dumps(json.load(data_file))
httpretty.register_uri(httpretty.GET, target_url, body=stringified)
client = SourcesClient("test_key")
result = client.organic_search_keywords("example.com", 1, "11-2014", "12-2014", "other")
assert result == expected
@httpretty.activate
def test_sources_client_organic_search_keywords_response_empty_response():
expected = {"Error": "Unknown Error"}
target_url = ("https://api.similarweb.com/Site/"
"example.com/v1/orgsearch?start=11-2014&"
"end=12-2014&md=False&page=1&UserKey=test_key")
f = "{0}/fixtures/sources_client_organic_search_keywords_empty_response.json".format(TD)
with open(f) as data_file:
stringified = json.dumps(json.load(data_file))
httpretty.register_uri(httpretty.GET, target_url, body=stringified)
client = SourcesClient("test_key")
result = client.organic_search_keywords("example.com", 1, "11-2014", "12-2014", False)
assert result == expected
@httpretty.activate
def test_sources_client_organic_search_keywords_response_from_good_inputs():
expected = {"Data": [{"SearchTerm": "nba",
"Visits": 0.3112634279302496,
"Change": -0.1057165328807857},
{"SearchTerm": "nba league pass",
"Visits": 0.04344402567022226,
"Change": -0.28494011618310383},
{"SearchTerm": "nba standings",
"Visits": 0.026975361122759705,
"Change": 0.506606019589038},
{"SearchTerm": "nba.com",
"Visits": 0.024175302297685337,
"Change": -0.21470821673506887},
{"SearchTerm": "lakers",
"Visits": 0.020901069293644634,
"Change": -0.015496674800445101},
{"SearchTerm": "nba store",
"Visits": 0.009603534757614565,
"Change": 0.1954208822589453},
{"SearchTerm": "chicago bulls",
"Visits": 0.0093402142272212,
"Change": 0.09747235068892213},
{"SearchTerm": "raptors",
"Visits": 0.008992397410119684,
"Change": -0.32299852511496613},
{"SearchTerm": "cleveland cavaliers",
"Visits": 0.007440700569164507,
"Change": -0.13185965639266445},
{"SearchTerm": "nba game time",
"Visits": 0.007300049344512848,
"Change": -0.14326631607193804}],
"ResultsCount": 10,
"TotalCount": 11975,
"Next": "http://api.similarweb.com/Site/example.com/v1/orgsearch?start=11-2014&end=12-2014&md=false&UserKey=test_key&page=2"}
target_url = ("https://api.similarweb.com/Site/"
"example.com/v1/orgsearch?start=11-2014&"
"end=12-2014&md=False&page=1&UserKey=test_key")
f = "{0}/fixtures/sources_client_organic_search_keywords_good_response.json".format(TD)
with open(f) as data_file:
stringified = json.dumps(json.load(data_file))
httpretty.register_uri(httpretty.GET, target_url, body=stringified)
client = SourcesClient("test_key")
result = client.organic_search_keywords("example.com", 1, "11-2014", "12-2014", False)
assert result == expected
@httpretty.activate
def test_sources_client_paid_search_keywords_completes_full_url():
target_url = ("https://api.similarweb.com/Site/"
"example.com/v1/paidsearch?start=11-2014&"
"end=12-2014&md=False&page=1&UserKey=test_key")
f = "{0}/fixtures/sources_client_paid_search_keywords_good_response.json".format(TD)
with open(f) as data_file:
stringified = json.dumps(json.load(data_file))
httpretty.register_uri(httpretty.GET, target_url, body=stringified)
client = SourcesClient("test_key")
client.paid_search_keywords("example.com", 1, "11-2014", "12-2014", False)
assert client.full_url == target_url
@httpretty.activate
def test_sources_client_paid_search_keywords_response_from_invalid_api_key():
expected = {"Error": "user_key_invalid"}
target_url = ("https://api.similarweb.com/Site/"
"example.com/v1/paidsearch?start=11-2014&"
"end=12-2014&md=False&page=1&UserKey=invalid_key")
f = "{0}/fixtures/sources_client_paid_search_keywords_invalid_api_key_response.json".format(TD)
with open(f) as data_file:
stringified = json.dumps(json.load(data_file))
httpretty.register_uri(httpretty.GET, target_url, body=stringified)
client = SourcesClient("invalid_key")
result = client.paid_search_keywords("example.com", 1, "11-2014", "12-2014", False)
assert result == expected
@httpretty.activate
def test_sources_client_paid_search_keywords_response_from_malformed_url():
expected = {"Error": "Malformed or Unknown URL"}
target_url = ("https://api.similarweb.com/Site/"
"bad_url/v1/paidsearch?start=11-2014&"
"end=12-2014&md=False&page=1&UserKey=test_key")
f = "{0}/fixtures/sources_client_paid_search_keywords_url_malformed_response.json".format(TD)
with open(f) as data_file:
stringified = json.dumps(json.load(data_file))
httpretty.register_uri(httpretty.GET, target_url, body=stringified)
client = SourcesClient("test_key")
result = client.paid_search_keywords("bad_url", 1, "11-2014", "12-2014", False)
assert result == expected
@httpretty.activate
def test_sources_client_paid_search_keywords_response_from_malformed_url_incl_http():
expected = {"Error": "Malformed or Unknown URL"}
target_url = ("https://api.similarweb.com/Site/"
"https://example.com/v1/paidsearch?start=11-2014&"
"end=12-2014&md=False&page=1&UserKey=test_key")
f = "{0}/fixtures/sources_client_paid_search_keywords_url_with_http_response.json".format(TD)
with open(f) as data_file:
stringified = json.dumps(json.load(data_file))
httpretty.register_uri(httpretty.GET, target_url, body=stringified)
client = SourcesClient("test_key")
result = client.paid_search_keywords("https://example.com", 1, "11-2014", "12-2014", False)
assert result == expected
@httpretty.activate
def test_sources_client_paid_search_keywords_response_from_bad_page():
expected = {"Error": "The field Page is invalid."}
target_url = ("https://api.similarweb.com/Site/"
"example.com/v1/paidsearch?start=11-2014&"
"end=12-2014&md=False&page=0&UserKey=test_key")
f = "{0}/fixtures/sources_client_paid_search_keywords_page_bad_response.json".format(TD)
with open(f) as data_file:
stringified = json.dumps(json.load(data_file))
httpretty.register_uri(httpretty.GET, target_url, body=stringified)
client = SourcesClient("test_key")
result = client.paid_search_keywords("example.com", 0, "11-2014", "12-2014", False)
assert result == expected
@httpretty.activate
def test_sources_client_paid_search_keywords_response_from_bad_start_date():
expected = {"Error": "The value '14-2014' is not valid for Start."}
target_url = ("https://api.similarweb.com/Site/"
"example.com/v1/paidsearch?start=14-2014&"
"end=12-2014&md=False&page=1&UserKey=test_key")
f = "{0}/fixtures/sources_client_paid_search_keywords_start_bad_response.json".format(TD)
with open(f) as data_file:
stringified = json.dumps(json.load(data_file))
httpretty.register_uri(httpretty.GET, target_url, body=stringified)
client = SourcesClient("test_key")
result = client.paid_search_keywords("example.com", 1, "14-2014", "12-2014", False)
assert result == expected
@httpretty.activate
def test_sources_client_paid_search_keywords_response_from_bad_end_date():
expected = {"Error": "The value '0-2014' is not valid for End."}
target_url = ("https://api.similarweb.com/Site/"
"example.com/v1/paidsearch?start=11-2014&"
"end=0-2014&md=False&page=1&UserKey=test_key")
f = "{0}/fixtures/sources_client_paid_search_keywords_end_bad_response.json".format(TD)
with open(f) as data_file:
stringified = json.dumps(json.load(data_file))
httpretty.register_uri(httpretty.GET, target_url, body=stringified)
client = SourcesClient("test_key")
result = client.paid_search_keywords("example.com", 1, "11-2014", "0-2014", False)
assert result == expected
@httpretty.activate
def test_sources_client_paid_search_keywords_response_out_of_order_dates():
expected = {"Error": "Date range is not valid"}
target_url = ("https://api.similarweb.com/Site/"
"example.com/v1/paidsearch?start=12-2014&"
"end=9-2014&md=False&page=1&UserKey=test_key")
f = "{0}/fixtures/sources_client_paid_search_keywords_out_of_order_response.json".format(TD)
with open(f) as data_file:
stringified = json.dumps(json.load(data_file))
httpretty.register_uri(httpretty.GET, target_url, body=stringified)
client = SourcesClient("test_key")
result = client.paid_search_keywords("example.com", 1, "12-2014", "9-2014", False)
assert result == expected
@httpretty.activate
def test_sources_client_paid_search_keywords_response_from_bad_main_domain():
expected = {"Error": "The value 'other' is not valid for Md."}
target_url = ("https://api.similarweb.com/Site/"
"example.com/v1/paidsearch?start=12-2014&"
"end=9-2014&md=other&page=1&UserKey=test_key")
f = "{0}/fixtures/sources_client_paid_search_keywords_main_domain_bad_response.json".format(TD)
with open(f) as data_file:
stringified = json.dumps(json.load(data_file))
httpretty.register_uri(httpretty.GET, target_url, body=stringified)
client = SourcesClient("test_key")
result = client.paid_search_keywords("example.com", 1, "11-2014", "12-2014", "other")
assert result == expected
@httpretty.activate
def test_sources_client_paid_search_keywords_response_empty_response():
expected = {"Error": "Unknown Error"}
target_url = ("https://api.similarweb.com/Site/"
"example.com/v1/paidsearch?start=11-2014&"
"end=12-2014&md=False&page=1&UserKey=test_key")
f = "{0}/fixtures/sources_client_paid_search_keywords_empty_response.json".format(TD)
with open(f) as data_file:
stringified = json.dumps(json.load(data_file))
httpretty.register_uri(httpretty.GET, target_url, body=stringified)
client = SourcesClient("test_key")
result = client.paid_search_keywords("example.com", 1, "11-2014", "12-2014", False)
assert result == expected
@httpretty.activate
def test_sources_client_paid_search_keywords_response_from_good_inputs():
expected = {"Data": [{"SearchTerm": "nba",
"Visits": 0.003344231214687972,
"Change": -0.062129215957638526},
{"SearchTerm": "nba store",
"Visits": 0.0029060240173658432,
"Change": 0.20846193245606223},
{"SearchTerm": "nba league pass",
"Visits": 0.0012716036162238515,
"Change": -0.7999901486315264},
{"SearchTerm": "nbastore",
"Visits": 0.0005023612587764184,
"Change": 0.9680003994439492},
{"SearchTerm": "portland trail blazers",
"Visits": 0.0005004670752349531,
"Change": 0.3499677164230108},
{"SearchTerm": "nba shop",
"Visits": 0.00045529952818624186,
"Change": 0.7850990143291091},
{"SearchTerm": "nba.com",
"Visits": 0.0003494276280350276,
"Change": -0.3860372438596248},
{"SearchTerm": "league pass",
"Visits": 0.00020733505676232166,
"Change": -0.8220804130834665},
{"SearchTerm": "nba ugly sweaters",
"Visits": 0.00019551746013417756,
"Change": 0.10131215601631173},
{"SearchTerm": "celtics schedule",
"Visits": 0.0001742655622935061,
"Change": 0.10131215601631166}],
"ResultsCount": 10,
"TotalCount": 243,
"Next": "http://api.similarweb.com/Site/example.com/v1/paidsearch?start=11-2014&end=12-2014&md=false&UserKey=test_key&page=2"}
target_url = ("https://api.similarweb.com/Site/"
"example.com/v1/paidsearch?start=11-2014&"
"end=12-2014&md=False&page=1&UserKey=test_key")
f = "{0}/fixtures/sources_client_paid_search_keywords_good_response.json".format(TD)
with open(f) as data_file:
stringified = json.dumps(json.load(data_file))
httpretty.register_uri(httpretty.GET, target_url, body=stringified)
client = SourcesClient("test_key")
result = client.paid_search_keywords("example.com", 1, "11-2014", "12-2014", False)
assert result == expected
@httpretty.activate
def test_sources_client_destinations_completes_full_url():
target_url = ("https://api.similarweb.com/Site/"
"example.com/v2/leadingdestinationsites?"
"UserKey=test_key")
f = "{0}/fixtures/sources_client_destinations_good_response.json".format(TD)
with open(f) as data_file:
stringified = json.dumps(json.load(data_file))
httpretty.register_uri(httpretty.GET, target_url, body=stringified)
client = SourcesClient("test_key")
client.destinations("example.com")
assert client.full_url == target_url
@httpretty.activate
def test_sources_client_destinations_response_from_invalid_api_key():
expected = {"Error": "user_key_invalid"}
target_url = ("https://api.similarweb.com/Site/"
"example.com/v2/leadingdestinationsites?"
"UserKey=invalid_key")
f = "{0}/fixtures/sources_client_destinations_invalid_api_key_response.json".format(TD)
with open(f) as data_file:
stringified = json.dumps(json.load(data_file))
httpretty.register_uri(httpretty.GET, target_url, body=stringified)
client = SourcesClient("invalid_key")
result = client.destinations("example.com")
assert result == expected
@httpretty.activate
def test_sources_client_destinations_response_from_malformed_url():
expected = {"Error": "Malformed or Unknown URL"}
target_url = ("https://api.similarweb.com/Site/"
"bad_url/v2/leadingdestinationsites?"
"UserKey=test_key")
f = "{0}/fixtures/sources_client_destinations_url_malformed_response.json".format(TD)
with open(f) as data_file:
stringified = json.dumps(json.load(data_file))
httpretty.register_uri(httpretty.GET, target_url, body=stringified)
client = SourcesClient("test_key")
result = client.destinations("bad_url")
assert result == expected
# This response is not JSON-formatted
@httpretty.activate
def test_sources_client_destinations_response_from_malformed_url_incl_http():
expected = {"Error": "Malformed or Unknown URL"}
target_url = ("https://api.similarweb.com/Site/"
"https://example.com/v2/leadingdestinationsites?"
"UserKey=test_key")
f = "{0}/fixtures/sources_client_destinations_url_with_http_response.json".format(TD)
with open(f) as data_file:
stringified = data_file.read().replace("\n", "")
httpretty.register_uri(httpretty.GET, target_url, body=stringified)
client = SourcesClient("test_key")
result = client.destinations("https://example.com")
assert result == expected
@httpretty.activate
def test_sources_client_destinations_response_from_empty_response():
expected = {"Error": "Unknown Error"}
target_url = ("https://api.similarweb.com/Site/"
"example.com/v2/leadingdestinationsites?"
"UserKey=test_key")
f = "{0}/fixtures/sources_client_destinations_empty_response.json".format(TD)
with open(f) as data_file:
stringified = json.dumps(json.load(data_file))
httpretty.register_uri(httpretty.GET, target_url, body=stringified)
client = SourcesClient("test_key")
result = client.destinations("example.com")
assert result == expected
@httpretty.activate
def test_sources_client_destinations_response_from_good_inputs():
expected = {"Sites": ["ticketmaster.com",
"jmpdirect01.com",
"youradexchange.com",
"facebook.com",
"youtube.com",
"adcash.com",
"i.cdn.turner.com",
"oss.ticketmaster.com",
"mavs.com",
"spox.com"],
"StartDate": "12/2014",
"EndDate": "02/2015"}
target_url = ("https://api.similarweb.com/Site/"
"example.com/v2/leadingdestinationsites?"
"UserKey=test_key")
f = "{0}/fixtures/sources_client_destinations_good_response.json".format(TD)
with open(f) as data_file:
stringified = json.dumps(json.load(data_file))
httpretty.register_uri(httpretty.GET, target_url, body=stringified)
client = SourcesClient("test_key")
result = client.destinations("example.com")
assert result == expected
@httpretty.activate
def test_sources_client_referrals_completes_full_url():
target_url = ("https://api.similarweb.com/Site/"
"example.com/v1/referrals?start=11-2014&"
"end=12-2014&page=1&UserKey=test_key")
f = "{0}/fixtures/sources_client_referrals_good_response.json".format(TD)
with open(f) as data_file:
stringified = json.dumps(json.load(data_file))
httpretty.register_uri(httpretty.GET, target_url, body=stringified)
client = SourcesClient("test_key")
client.referrals("example.com", 1, "11-2014", "12-2014")
assert client.full_url == target_url
@httpretty.activate
def test_sources_client_referrals_response_from_invalid_api_key():
expected = {"Error": "user_key_invalid"}
target_url = ("https://api.similarweb.com/Site/"
"example.com/v1/referrals?start=11-2014&"
"end=12-2014&page=1&UserKey=invalid_key")
f = "{0}/fixtures/sources_client_referrals_invalid_api_key_response.json".format(TD)
with open(f) as data_file:
stringified = json.dumps(json.load(data_file))
httpretty.register_uri(httpretty.GET, target_url, body=stringified)
client = SourcesClient("invalid_key")
result = client.referrals("example.com", 1, "11-2014", "12-2014")
assert result == expected
@httpretty.activate
def test_sources_client_referrals_response_from_malformed_url():
expected = {"Error": "Malformed or Unknown URL"}
target_url = ("https://api.similarweb.com/Site/"
"bad_url/v1/referrals?start=11-2014&"
"end=12-2014&page=1&UserKey=test_key")
f = "{0}/fixtures/sources_client_referrals_url_malformed_response.json".format(TD)
with open(f) as data_file:
stringified = json.dumps(json.load(data_file))
httpretty.register_uri(httpretty.GET, target_url, body=stringified)
client = SourcesClient("test_key")
result = client.referrals("bad_url", 1, "11-2014", "12-2014")
assert result == expected
@httpretty.activate
def test_sources_client_referrals_response_from_malformed_url_incl_http():
expected = {"Error": "Malformed or Unknown URL"}
target_url = ("https://api.similarweb.com/Site/"
"https://example.com/v1/referrals?start=11-2014&"
"end=12-2014&page=1&UserKey=test_key")
f = "{0}/fixtures/sources_client_referrals_url_with_http_response.json".format(TD)
with open(f) as data_file:
stringified = json.dumps(json.load(data_file))
httpretty.register_uri(httpretty.GET, target_url, body=stringified)
client = SourcesClient("test_key")
result = client.referrals("https://example.com", 1, "11-2014", "12-2014")
assert result == expected
@httpretty.activate
def test_sources_client_referrals_response_from_bad_page():
expected = {"Error": "The field Page is invalid."}
target_url = ("https://api.similarweb.com/Site/"
"example.com/v1/referrals?start=11-2014&"
"end=12-2014&page=0&UserKey=test_key")
f = "{0}/fixtures/sources_client_referrals_page_bad_response.json".format(TD)
with open(f) as data_file:
stringified = json.dumps(json.load(data_file))
httpretty.register_uri(httpretty.GET, target_url, body=stringified)
client = SourcesClient("test_key")
result = client.referrals("example.com", 0, "11-2014", "12-2014")
assert result == expected
@httpretty.activate
def test_sources_client_referrals_response_from_bad_start_date():
expected = {"Error": "The value '14-2014' is not valid for Start."}
target_url = ("https://api.similarweb.com/Site/"
"example.com/v1/referrals?start=14-2014&"
"end=12-2014&page=1&UserKey=test_key")
f = "{0}/fixtures/sources_client_referrals_start_bad_response.json".format(TD)
with open(f) as data_file:
stringified = json.dumps(json.load(data_file))
httpretty.register_uri(httpretty.GET, target_url, body=stringified)
client = SourcesClient("test_key")
result = client.referrals("example.com", 1, "14-2014", "12-2014")
assert result == expected
@httpretty.activate
def test_sources_client_referrals_response_from_bad_end_date():
expected = {"Error": "The value '0-2014' is not valid for End."}
target_url = ("https://api.similarweb.com/Site/"
"example.com/v1/referrals?start=11-2014&"
"end=0-2014&page=1&UserKey=test_key")
f = "{0}/fixtures/sources_client_referrals_end_bad_response.json".format(TD)
with open(f) as data_file:
stringified = json.dumps(json.load(data_file))
httpretty.register_uri(httpretty.GET, target_url, body=stringified)
client = SourcesClient("test_key")
result = client.referrals("example.com", 1, "11-2014", "0-2014")
assert result == expected
@httpretty.activate
def test_sources_client_referrals_response_out_of_order_dates():
expected = {"Error": "Date range is not valid"}
target_url = ("https://api.similarweb.com/Site/"
"example.com/v1/referrals?start=12-2014&"
"end=9-2014&page=1&UserKey=test_key")
f = "{0}/fixtures/sources_client_referrals_out_of_order_response.json".format(TD)
with open(f) as data_file:
stringified = json.dumps(json.load(data_file))
httpretty.register_uri(httpretty.GET, target_url, body=stringified)
client = SourcesClient("test_key")
result = client.referrals("example.com", 1, "12-2014", "9-2014")
assert result == expected
@httpretty.activate
def test_sources_client_referrals_response_empty_response():
expected = {"Error": "Unknown Error"}
target_url = ("https://api.similarweb.com/Site/"
"example.com/v1/referrals?start=11-2014&"
"end=12-2014&page=1&UserKey=test_key")
f = "{0}/fixtures/sources_client_referrals_empty_response.json".format(TD)
with open(f) as data_file:
stringified = json.dumps(json.load(data_file))
httpretty.register_uri(httpretty.GET, target_url, body=stringified)
client = SourcesClient("test_key")
result = client.referrals("example.com", 1, "11-2014", "12-2014")
assert result == expected
@httpretty.activate
def test_sources_client_referrals_response_from_good_inputs():
expected = {"Data": [
{"Site": "bleacherreport.com",
"Visits": 0.13685091444901207,
"Change": -0.13783225525013185},
{"Site": "spox.com",
"Visits": 0.05541198081414703,
"Change": 0.09829387565169896},
{"Site": "sportal.com.au",
"Visits": 0.047765555793472955,
"Change": -0.10571922332026579},
{"Site": "espn.go.com",
"Visits": 0.034809331570584835,
"Change": 0.19681275612219412},
{"Site": "ajansspor.com",
"Visits": 0.033445473460156236,
"Change": 0.045225921939734626},
{"Site": "baloncesto.as.com",
"Visits": 0.0329538056630076,
"Change": 0.23717753959296534},
{"Site": "gazzetta.it",
"Visits": 0.032524041065096425,
"Change": 0.28017484722779185},
{"Site": "en.wikipedia.org",
"Visits": 0.028624177362280356,
"Change": 0.10503524254762013},
{"Site": "nba.sport24.gr",
"Visits": 0.02734001447504297,
"Change": -0.057600677746467314},
{"Site": "nba.co.jp",
"Visits": 0.02628877965004145,
"Change": -0.21513542547182316}],
"ResultsCount": 10,
"TotalCount": 1540,
"Next": "http://api.similarweb.com/Site/example.com/v1/referrals?start=11-2014&end=12-2014&UserKey=test_key&page=2"}
target_url = ("https://api.similarweb.com/Site/"
"example.com/v1/referrals?start=11-2014&"
"end=12-2014&page=1&UserKey=test_key")
f = "{0}/fixtures/sources_client_referrals_good_response.json".format(TD)
with open(f) as data_file:
stringified = json.dumps(json.load(data_file))
httpretty.register_uri(httpretty.GET, target_url, body=stringified)
client = SourcesClient("test_key")
result = client.referrals("example.com", 1, "11-2014", "12-2014")
assert result == expected
@httpretty.activate
def test_sources_client_organic_keyword_competitors_completes_full_url():
target_url = ("https://api.similarweb.com/Site/"
"example.com/v1/orgkwcompetitor?start=11-2014&"
"end=12-2014&md=False&page=1&UserKey=test_key")
f = "{0}/fixtures/sources_client_organic_keyword_competitors_good_response.json".format(TD)
with open(f) as data_file:
stringified = json.dumps(json.load(data_file))
httpretty.register_uri(httpretty.GET, target_url, body=stringified)
client = SourcesClient("test_key")
client.organic_keyword_competitors("example.com", 1, "11-2014", "12-2014", False)
assert client.full_url == target_url
@httpretty.activate
def test_sources_client_organic_keyword_competitors_response_from_invalid_api_key():
expected = {"Error": "user_key_invalid"}
target_url = ("https://api.similarweb.com/Site/"
"example.com/v1/orgkwcompetitor?start=11-2014&"
"end=12-2014&md=False&page=1&UserKey=invalid_key")
f = "{0}/fixtures/sources_client_organic_keyword_competitors_invalid_api_key_response.json".format(TD)
with open(f) as data_file:
stringified = json.dumps(json.load(data_file))
httpretty.register_uri(httpretty.GET, target_url, body=stringified)
client = SourcesClient("invalid_key")
result = client.organic_keyword_competitors("example.com", 1, "11-2014", "12-2014", False)
assert result == expected
@httpretty.activate
def test_sources_client_organic_keyword_competitors_response_from_malformed_url():
expected = {"Error": "Malformed or Unknown URL"}
target_url = ("https://api.similarweb.com/Site/"
"bad_url/v1/orgkwcompetitor?start=11-2014&"
"end=12-2014&md=False&page=1&UserKey=test_key")
f = "{0}/fixtures/sources_client_organic_keyword_competitors_url_malformed_response.json".format(TD)
with open(f) as data_file:
stringified = json.dumps(json.load(data_file))
httpretty.register_uri(httpretty.GET, target_url, body=stringified)
client = SourcesClient("test_key")
result = client.organic_keyword_competitors("bad_url", 1, "11-2014", "12-2014", False)
assert result == expected
@httpretty.activate
def test_sources_client_organic_keyword_competitors_response_from_malformed_url_incl_http():
expected = {"Error": "Malformed or Unknown URL"}
target_url = ("https://api.similarweb.com/Site/"
"https://example.com/v1/orgkwcompetitor?start=11-2014&"
"end=12-2014&md=False&page=1&UserKey=test_key")
f = "{0}/fixtures/sources_client_organic_keyword_competitors_url_with_http_response.json".format(TD)
with open(f) as data_file:
stringified = json.dumps(json.load(data_file))
httpretty.register_uri(httpretty.GET, target_url, body=stringified)
client = SourcesClient("test_key")
result = client.organic_keyword_competitors("https://example.com", 1, "11-2014", "12-2014", False)
assert result == expected
@httpretty.activate
def test_sources_client_organic_keyword_competitors_response_from_bad_page():
expected = {"Error": "The field Page is invalid."}
target_url = ("https://api.similarweb.com/Site/"
"example.com/v1/orgkwcompetitor?start=11-2014&"
"end=12-2014&md=False&page=0&UserKey=test_key")
f = "{0}/fixtures/sources_client_organic_keyword_competitors_page_bad_response.json".format(TD)
with open(f) as data_file:
stringified = json.dumps(json.load(data_file))
httpretty.register_uri(httpretty.GET, target_url, body=stringified)
client = SourcesClient("test_key")
result = client.organic_keyword_competitors("example.com", 0, "11-2014", "12-2014", False)
assert result == expected
@httpretty.activate
def test_sources_client_organic_keyword_competitors_response_from_bad_start_date():
expected = {"Error": "The value '14-2014' is not valid for Start."}
target_url = ("https://api.similarweb.com/Site/"
"example.com/v1/orgkwcompetitor?start=14-2014&"
"end=12-2014&md=False&page=1&UserKey=test_key")
f = "{0}/fixtures/sources_client_organic_keyword_competitors_start_bad_response.json".format(TD)
with open(f) as data_file:
stringified = json.dumps(json.load(data_file))
httpretty.register_uri(httpretty.GET, target_url, body=stringified)
client = SourcesClient("test_key")
result = client.organic_keyword_competitors("example.com", 1, "14-2014", "12-2014", False)
assert result == expected
@httpretty.activate
def test_sources_client_organic_keyword_competitors_response_from_bad_end_date():
expected = {"Error": "The value '0-2014' is not valid for End."}
target_url = ("https://api.similarweb.com/Site/"
"example.com/v1/orgkwcompetitor?start=11-2014&"
"end=0-2014&md=False&page=1&UserKey=test_key")
f = "{0}/fixtures/sources_client_organic_keyword_competitors_end_bad_response.json".format(TD)
with open(f) as data_file:
stringified = json.dumps(json.load(data_file))
httpretty.register_uri(httpretty.GET, target_url, body=stringified)
client = SourcesClient("test_key")
result = client.organic_keyword_competitors("example.com", 1, "11-2014", "0-2014", False)
assert result == expected
@httpretty.activate
def test_sources_client_organic_keyword_competitors_response_out_of_order_dates():
expected = {"Error": "Date range is not valid"}
target_url = ("https://api.similarweb.com/Site/"
"example.com/v1/orgkwcompetitor?start=12-2014&"
"end=9-2014&md=False&page=1&UserKey=test_key")
f = "{0}/fixtures/sources_client_organic_keyword_competitors_out_of_order_response.json".format(TD)
with open(f) as data_file:
stringified = json.dumps(json.load(data_file))
httpretty.register_uri(httpretty.GET, target_url, body=stringified)
client = SourcesClient("test_key")
result = client.organic_keyword_competitors("example.com", 1, "12-2014", "9-2014", False)
assert result == expected
@httpretty.activate
def test_sources_client_organic_keyword_competitors_response_from_bad_main_domain():
expected = {"Error": "The value 'other' is not valid for Md."}
target_url = ("https://api.similarweb.com/Site/"
"example.com/v1/orgkwcompetitor?start=12-2014&"
"end=9-2014&md=other&page=1&UserKey=test_key")
f = "{0}/fixtures/sources_client_organic_keyword_competitors_main_domain_bad_response.json".format(TD)
with open(f) as data_file:
stringified = json.dumps(json.load(data_file))
httpretty.register_uri(httpretty.GET, target_url, body=stringified)
client = SourcesClient("test_key")
result = client.organic_keyword_competitors("example.com", 1, "11-2014", "12-2014", "other")
assert result == expected
@httpretty.activate
def test_sources_client_organic_keyword_competitors_response_empty_response():
expected = {"Error": "Unknown Error"}
target_url = ("https://api.similarweb.com/Site/"
"example.com/v1/orgkwcompetitor?start=11-2014&"
"end=12-2014&md=False&page=1&UserKey=test_key")
f = "{0}/fixtures/sources_client_organic_keyword_competitors_empty_response.json".format(TD)
with open(f) as data_file:
stringified = json.dumps(json.load(data_file))
httpretty.register_uri(httpretty.GET, target_url, body=stringified)
client = SourcesClient("test_key")
result = client.organic_keyword_competitors("example.com", 1, "11-2014", "12-2014", False)
assert result == expected
@httpretty.activate
def test_sources_client_organic_keyword_competitors_response_from_good_inputs():
expected = {"Data": {"espn.go.com": 0.029560747253298304,
"bleacherreport.com": 0.018884794539523263,
"sports.yahoo.com": 0.01660474680676441,
"probasketballtalk.nbcsports.com": 0.011305560678493313,
"basketball-reference.com": 0.01121870592591383,
"cbssports.com": 0.010414195102828587,
"sports.sina.com.cn": 0.009849368251728115,
"si.com": 0.00836387054675375,
"thestar.com": 0.008291876367334255,
"stubhub.com": 0.007999157326624741},
"ResultsCount": 10,
"TotalCount": 1510,
"Next": "https://api.similarweb.com/Site/example.com/v1/orgkwcompetitor?start=11-2014&end=12-2014&md=false&UserKey=test_key&page=2"}
target_url = ("https://api.similarweb.com/Site/"
"example.com/v1/orgkwcompetitor?start=11-2014&"
"end=12-2014&md=False&page=1&UserKey=test_key")
f = "{0}/fixtures/sources_client_organic_keyword_competitors_good_response.json".format(TD)
with open(f) as data_file:
stringified = json.dumps(json.load(data_file))
expected = json.loads(stringified)
httpretty.register_uri(httpretty.GET, target_url, body=stringified)
client = SourcesClient("test_key")
result = client.organic_keyword_competitors("example.com", 1, "11-2014", "12-2014", False)
assert result == expected
@httpretty.activate
def test_sources_client_paid_keyword_competitors_completes_full_url():
target_url = ("https://api.similarweb.com/Site/"
"example.com/v1/paidkwcompetitor?start=11-2014&"
"end=12-2014&md=False&page=1&UserKey=test_key")
f = "{0}/fixtures/sources_client_paid_keyword_competitors_good_response.json".format(TD)
with open(f) as data_file:
stringified = json.dumps(json.load(data_file))
httpretty.register_uri(httpretty.GET, target_url, body=stringified)
client = SourcesClient("test_key")
client.paid_keyword_competitors("example.com", 1, "11-2014", "12-2014", False)
assert client.full_url == target_url
@httpretty.activate
def test_sources_client_paid_keyword_competitors_response_from_invalid_api_key():
expected = {"Error": "user_key_invalid"}
target_url = ("https://api.similarweb.com/Site/"
"example.com/v1/paidkwcompetitor?start=11-2014&"
"end=12-2014&md=False&page=1&UserKey=invalid_key")
f = "{0}/fixtures/sources_client_paid_keyword_competitors_invalid_api_key_response.json".format(TD)
with open(f) as data_file:
stringified = json.dumps(json.load(data_file))
httpretty.register_uri(httpretty.GET, target_url, body=stringified)
client = SourcesClient("invalid_key")
result = client.paid_keyword_competitors("example.com", 1, "11-2014", "12-2014", False)
assert result == expected
@httpretty.activate
def test_sources_client_paid_keyword_competitors_response_from_malformed_url():
expected = {"Error": "Malformed or Unknown URL"}
target_url = ("https://api.similarweb.com/Site/"
"bad_url/v1/paidkwcompetitor?start=11-2014&"
"end=12-2014&md=False&page=1&UserKey=test_key")
f = "{0}/fixtures/sources_client_paid_keyword_competitors_url_malformed_response.json".format(TD)
with open(f) as data_file:
stringified = json.dumps(json.load(data_file))
httpretty.register_uri(httpretty.GET, target_url, body=stringified)
client = SourcesClient("test_key")
result = client.paid_keyword_competitors("bad_url", 1, "11-2014", "12-2014", False)
assert result == expected
@httpretty.activate
def test_sources_client_paid_keyword_competitors_response_from_malformed_url_incl_http():
expected = {"Error": "Malformed or Unknown URL"}
target_url = ("https://api.similarweb.com/Site/"
"https://example.com/v1/paidkwcompetitor?start=11-2014&"
"end=12-2014&md=False&page=1&UserKey=test_key")
f = "{0}/fixtures/sources_client_paid_keyword_competitors_url_with_http_response.json".format(TD)
with open(f) as data_file:
stringified = json.dumps(json.load(data_file))
httpretty.register_uri(httpretty.GET, target_url, body=stringified)
client = SourcesClient("test_key")
result = client.paid_keyword_competitors("https://example.com", 1, "11-2014", "12-2014", False)
assert result == expected
@httpretty.activate
def test_sources_client_paid_keyword_competitors_response_from_bad_page():
expected = {"Error": "The field Page is invalid."}
target_url = ("https://api.similarweb.com/Site/"
"example.com/v1/paidkwcompetitor?start=11-2014&"
"end=12-2014&md=False&page=0&UserKey=test_key")
f = "{0}/fixtures/sources_client_paid_keyword_competitors_page_bad_response.json".format(TD)
with open(f) as data_file:
stringified = json.dumps(json.load(data_file))
httpretty.register_uri(httpretty.GET, target_url, body=stringified)
client = SourcesClient("test_key")
result = client.paid_keyword_competitors("example.com", 0, "11-2014", "12-2014", False)
assert result == expected
@httpretty.activate
def test_sources_client_paid_keyword_competitors_response_from_bad_start_date():
expected = {"Error": "The value '14-2014' is not valid for Start."}
target_url = ("https://api.similarweb.com/Site/"
"example.com/v1/paidkwcompetitor?start=14-2014&"
"end=12-2014&md=False&page=1&UserKey=test_key")
f = "{0}/fixtures/sources_client_paid_keyword_competitors_start_bad_response.json".format(TD)
with open(f) as data_file:
stringified = json.dumps(json.load(data_file))
httpretty.register_uri(httpretty.GET, target_url, body=stringified)
client = SourcesClient("test_key")
result = client.paid_keyword_competitors("example.com", 1, "14-2014", "12-2014", False)
assert result == expected
@httpretty.activate
def test_sources_client_paid_keyword_competitors_response_from_bad_end_date():
expected = {"Error": "The value '0-2014' is not valid for End."}
target_url = ("https://api.similarweb.com/Site/"
"example.com/v1/paidkwcompetitor?start=11-2014&"
"end=0-2014&md=False&page=1&UserKey=test_key")
f = "{0}/fixtures/sources_client_paid_keyword_competitors_end_bad_response.json".format(TD)
with open(f) as data_file:
stringified = json.dumps(json.load(data_file))
httpretty.register_uri(httpretty.GET, target_url, body=stringified)
client = SourcesClient("test_key")
result = client.paid_keyword_competitors("example.com", 1, "11-2014", "0-2014", False)
assert result == expected
@httpretty.activate
def test_sources_client_paid_keyword_competitors_response_out_of_order_dates():
expected = {"Error": "Date range is not valid"}
target_url = ("https://api.similarweb.com/Site/"
"example.com/v1/paidkwcompetitor?start=12-2014&"
"end=9-2014&md=False&page=1&UserKey=test_key")
f = "{0}/fixtures/sources_client_paid_keyword_competitors_out_of_order_response.json".format(TD)
with open(f) as data_file:
stringified = json.dumps(json.load(data_file))
httpretty.register_uri(httpretty.GET, target_url, body=stringified)
client = SourcesClient("test_key")
result = client.paid_keyword_competitors("example.com", 1, "12-2014", "9-2014", False)
assert result == expected
@httpretty.activate
def test_sources_client_paid_keyword_competitors_response_from_bad_main_domain():
expected = {"Error": "The value 'other' is not valid for Md."}
target_url = ("https://api.similarweb.com/Site/"
"example.com/v1/paidkwcompetitor?start=12-2014&"
"end=9-2014&md=other&page=1&UserKey=test_key")
f = "{0}/fixtures/sources_client_paid_keyword_competitors_main_domain_bad_response.json".format(TD)
with open(f) as data_file:
stringified = json.dumps(json.load(data_file))
httpretty.register_uri(httpretty.GET, target_url, body=stringified)
client = SourcesClient("test_key")
result = client.paid_keyword_competitors("example.com", 1, "11-2014", "12-2014", "other")
assert result == expected
@httpretty.activate
def test_sources_client_paid_keyword_competitors_response_empty_response():
expected = {"Error": "Unknown Error"}
target_url = ("https://api.similarweb.com/Site/"
"example.com/v1/paidkwcompetitor?start=11-2014&"
"end=12-2014&md=False&page=1&UserKey=test_key")
f = "{0}/fixtures/sources_client_paid_keyword_competitors_empty_response.json".format(TD)
with open(f) as data_file:
stringified = json.dumps(json.load(data_file))
httpretty.register_uri(httpretty.GET, target_url, body=stringified)
client = SourcesClient("test_key")
result = client.paid_keyword_competitors("example.com", 1, "11-2014", "12-2014", False)
assert result == expected
@httpretty.activate
def test_sources_client_paid_keyword_competitors_response_from_good_inputs():
expected = {"Data": {"lojanba.com": 0.07917791674254333,
"ticketscenter.co": 0.049233018800901834,
"fanatics.com": 0.04447244942383259,
"eventticketscenter.com": 0.039140080023049,
"fansedge.com": 0.038988779340718,
"aceticket.com": 0.03627101965540125,
"ticketmaster.com": 0.02999530980384764,
"oldglory.com": 0.020328655817132217,
"ticketnetwork.com": 0.01767243856389301,
"vividseats.com": 0.016849267072753825},
"ResultsCount": 10,
"TotalCount": 489,
"Next": "https://api.similarweb.com/Site/example.com/v1/paidkwcompetitor?start=11-2014&end=12-2014&md=false&UserKey=test_key&page=2"}
target_url = ("https://api.similarweb.com/Site/"
"example.com/v1/paidkwcompetitor?start=11-2014&"
"end=12-2014&md=False&page=1&UserKey=test_key")
f = "{0}/fixtures/sources_client_paid_keyword_competitors_good_response.json".format(TD)
with open(f) as data_file:
stringified = json.dumps(json.load(data_file))
expected = json.loads(stringified)
httpretty.register_uri(httpretty.GET, target_url, body=stringified)
client = SourcesClient("test_key")
result = client.paid_keyword_competitors("example.com", 1, "11-2014", "12-2014", False)
assert result == expected
|
danwagnerco/similarweb
|
tests/test_sources_client.py
|
Python
|
mit
| 57,331
|
"""
Artificial data
===============
Collection of functions to create artificial data.
"""
|
tgquintela/ElectionsTools
|
ElectionsTools/artificial_data/__init__.py
|
Python
|
mit
| 93
|
#Joshua Pepperman
class Batters:
def __init__(self):
self.batters = []
def __iter__(self):
return iter(self.batters)
def __getitem__(self, key):
return self.batters[key]
def __len__(self):
return len(self.batters)
def indexOf(self, player):
index = 0
for p in self.batters:
if p == player:
return index
else:
index = index + 1
return -1
def addBatter(self, batter):
self.batters.append(batter)
def hasBatter(self, playerName):
for p in self.batters:
if p.getStat('name') == playerName:
return True
def calculateScores(self):
r = 1
h = 1
b2 = 2
b3 = 3
hr = 4
rbi = 1
sb = 2
cs = -1
bb = 1
for player in self.batters:
player.statDict['score'] = player.getStat('r')*r + player.getStat('h')*h + player.getStat('b2')*b2 + player.getStat('b3')*b3 + player.getStat('hr')*hr + player.getStat('rbi')*rbi + player.getStat('sb')*sb + player.getStat('cs')*cs + player.getStat('bb')*bb
def getStadiumRank(self, team):
rank = 0
if 'WSH' in team:
rank = 1
elif 'TOR' in team:
rank = 1
elif 'TEX' in team:
rank = 1
elif 'TB' in team:
rank = 1
elif 'STL' in team:
rank = 1
elif 'SF' in team:
rank = 1
elif 'SEA' in team:
rank = 1
elif 'SD' in team:
rank = 1
elif 'PIT' in team:
rank = 1
elif 'PHI' in team:
rank = 1
elif 'OAK' in team:
rank = 1
elif 'NYY' in team:
rank = 1
elif 'NYM' in team:
rank = 1
elif 'MIN' in team:
rank = 1
elif 'MIL' in team:
rank = 1
elif 'MIA' in team:
rank = 1
elif 'LAD' in team:
rank = 1
elif 'LAA' in team:
rank = 1
elif 'KC' in team:
rank = 1
elif 'HOU' in team:
rank = 1
elif 'DET' in team:
rank = 1
elif 'COL' in team:
rank = 1
elif 'CLE' in team:
rank = 1
elif 'CIN' in team:
rank = 1
elif 'CHW' in team:
rank = 1
elif 'CHC' in team:
rank = 1
elif 'BOS' in team:
rank = 1
elif 'BAL' in team:
rank = 1
elif 'ATL' in team:
rank = 1
elif 'ARI' in team:
rank = 1
def getBatter(self, playerName):
for player in self.batters:
if player.getStat('name') == playerName:
return player
def sortBy(self, index):
self.index = index
self.batters.sort(key=lambda x: x.statDict[index], reverse=True)
def toCSV(self):
batterStringFile = "name,ab,r,h,2b,3b,hr,rbi,sb,cs,bb,so,avg,obp,slg,ops,war,score\n"
for batter in self.batters:
batterStringFile = batterStringFile + batter.toCSV() + '\n'
return batterStringFile
def toString(self):
batterString = ""
for batter in self.batters:
batterString = batterString + batter.toString() + "\t" + str(batter.getStat(self.index)) + '\n'
return batterString
def toStringInRange(self, rang):
batterString = ""
for i in rang:
batterString = batterString + self.batters[i].toString() + "\t" + str(self.batters[i].getStat(self.index)) + '\n'
return batterString
|
jdpepperman/baseballStats
|
Batters.py
|
Python
|
gpl-2.0
| 2,886
|
import time
import numpy
def getDataFromFile(filename):
"""
Return an array with data from a CSV file
"""
data_array = numpy.loadtxt(open(filename,"rb"),delimiter=",",skiprows=1)
output = numpy.ndarray(shape=(numpy.amax(data_array[:,0])+1,
numpy.amax(data_array[:,1])+1,
numpy.amax(data_array[:,2])+1,
numpy.amax(data_array[:,3])+1))
for l in range(data_array.shape[0]):
output[data_array[l, 0],data_array[l, 1],data_array[l, 2],data_array[l, 3]]= data_array[l, 4]
return output
def parseList(input_string):
blocks = input_string.split(",")
output = []
for block in blocks:
block = block.split("..")
if len(block) > 1:
output.extend(range(int(block[0]), int(block[1])+1))
continue
block = block[0].split("+")
if len(block) > 1:
output.append([int(x) for x in block])
continue
output.extend([int(block[0])])
return output
def exportResults(filename, options, func_names, fitting_functions,
fitting_scores, popts, datasets_codes, r_squared_coeffs):
"""
Export the curve fitting results into the file filename.
Currently the norm of the error as well as the function parameters are
exported.
A summary of some options and a reference for the functions (built from the
functions docstrings are also written to file)
"""
f = open(filename, 'w')
index = 0
for i,scores in enumerate(fitting_scores):
for j in range(len(func_names)):
string = ""
for code in datasets_codes[i]:
string += str(code) + " "
string += str(func_names[j]) + " "
for param in popts[i][j]:
string += str(param) + " "
f.write(string + str(scores[j]) + " " + str(r_squared_coeffs[i][j]) + "\n")
index+=1
# f.write("Curve fitting results - " + time.strftime("%c") + "\n")
# f.write("\n")
# f.write("Options :\n")
# for key,value in options.iteritems():
# f.write(key + " : " + str(value) + "\numpy")
# f.write("\n")
# f.write("Function reference\n")
# for i,func in enumerate(fitting_functions):
# f.write(func_names[i] + "\n")
# f.write(func.__doc__)
# f.write("\n\n")
# for i,scores in enumerate(fitting_scores):
# f.write("Dataset #" + str(i) + "\n")
# for j in range(len(func_names)):
# f.write("Function " + str(func_names[j]) + "\n")
# f.write(" Parameters : " + str(popts[i][j]) + "\n")
# f.write(" Error : " + str(scores[j]) + "\n")
# f.write("\n")
f.close()
|
grungi-ankhfire/curvefit
|
util.py
|
Python
|
mit
| 2,780
|
# -*- coding: utf-8 -*-
# EForge project management system, Copyright © 2010, Element43
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#
from django.db import models
from django.core.urlresolvers import reverse
from eforge.models import Project
from eforge.update.models import Update, register_update_type
from eforge.vcs import project_repository
class Revision(models.Model):
id_no = models.AutoField(primary_key=True)
id = models.CharField(max_length=40, db_index=True)
project = models.ForeignKey(Project)
parents = models.ManyToManyField('self', related_name='children')
date = models.DateTimeField()
@property
def vcs_revision(self):
""" Revision object from the VCS plugin """
if not getattr(self, '_vcs_revision', None):
self._vcs_revision = project_repository(self.project).revision(self.id)
return self._vcs_revision
class Update:
@classmethod
def user(self, revision):
return revision.author_user
@classmethod
def project(self, revision):
return revision.project
@classmethod
def summary(self, revision):
return 'Revision %s' % revision.short_id
@classmethod
def description(self, revision):
return revision.message
@classmethod
def url(self, revision):
return reverse('browse-revision',
args=[revision.project.slug, revision.id])
@classmethod
def date(self, revision):
return revision.date
register_update_type(Revision)
def _proxy_property(name):
def _proxy(self):
return getattr(self.vcs_revision, name)
setattr(Revision, name, property(_proxy))
_proxy_property('short_id')
_proxy_property('author_email')
_proxy_property('author_name')
_proxy_property('author_user')
_proxy_property('message')
_proxy_property('short_message')
_proxy_property('root')
|
oshepherd/eforge
|
eforge/vcs/models.py
|
Python
|
isc
| 2,863
|
# Copyright (c) 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Track resources like memory and disk for a compute host. Provides the
scheduler with useful information about availability through the ComputeNode
model.
"""
import copy
from oslo_config import cfg
from oslo_serialization import jsonutils
from oslo_utils import importutils
from nova.compute import claims
from nova.compute import flavors
from nova.compute import monitors
from nova.compute import resources as ext_resources
from nova.compute import task_states
from nova.compute import vm_states
from nova import conductor
from nova import exception
from nova.i18n import _, _LI, _LW
from nova import objects
from nova.objects import base as obj_base
from nova.objects import instance as instance_obj
from nova.openstack.common import log as logging
from nova.pci import manager as pci_manager
from nova.pci import whitelist as pci_whitelist
from nova import rpc
from nova.scheduler import client as scheduler_client
from nova import utils
from nova.virt import hardware
resource_tracker_opts = [
cfg.IntOpt('reserved_host_disk_mb', default=0,
help='Amount of disk in MB to reserve for the host'),
cfg.IntOpt('reserved_host_memory_mb', default=512,
help='Amount of memory in MB to reserve for the host'),
cfg.StrOpt('compute_stats_class',
default='nova.compute.stats.Stats',
help='Class that will manage stats for the local compute host'),
cfg.ListOpt('compute_resources',
default=['vcpu'],
help='The names of the extra resources to track.'),
]
CONF = cfg.CONF
CONF.register_opts(resource_tracker_opts)
LOG = logging.getLogger(__name__)
COMPUTE_RESOURCE_SEMAPHORE = "compute_resources"
CONF.import_opt('my_ip', 'nova.netconf')
class ResourceTracker(object):
"""Compute helper class for keeping track of resource usage as instances
are built and destroyed.
"""
def __init__(self, host, driver, nodename):
self.host = host
self.driver = driver
self.pci_tracker = None
self.pci_filter = pci_whitelist.get_pci_devices_filter()
self.nodename = nodename
self.compute_node = None
self.stats = importutils.import_object(CONF.compute_stats_class)
self.tracked_instances = {}
self.tracked_migrations = {}
self.conductor_api = conductor.API()
monitor_handler = monitors.ResourceMonitorHandler()
self.monitors = monitor_handler.choose_monitors(self)
self.ext_resources_handler = \
ext_resources.ResourceHandler(CONF.compute_resources)
self.old_resources = {}
self.scheduler_client = scheduler_client.SchedulerClient()
@utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE)
def instance_claim(self, context, instance_ref, limits=None):
"""Indicate that some resources are needed for an upcoming compute
instance build operation.
This should be called before the compute node is about to perform
an instance build operation that will consume additional resources.
:param context: security context
:param instance_ref: instance to reserve resources for
:param limits: Dict of oversubscription limits for memory, disk,
and CPUs.
:returns: A Claim ticket representing the reserved resources. It can
be used to revert the resource usage if an error occurs
during the instance build.
"""
if self.disabled:
# compute_driver doesn't support resource tracking, just
# set the 'host' and node fields and continue the build:
self._set_instance_host_and_node(context, instance_ref)
return claims.NopClaim()
# sanity checks:
if instance_ref['host']:
LOG.warning(_LW("Host field should not be set on the instance "
"until resources have been claimed."),
instance=instance_ref)
if instance_ref['node']:
LOG.warning(_LW("Node field should not be set on the instance "
"until resources have been claimed."),
instance=instance_ref)
# get memory overhead required to build this instance:
overhead = self.driver.estimate_instance_overhead(instance_ref)
LOG.debug("Memory overhead for %(flavor)d MB instance; %(overhead)d "
"MB", {'flavor': instance_ref['memory_mb'],
'overhead': overhead['memory_mb']})
claim = claims.Claim(context, instance_ref, self, self.compute_node,
overhead=overhead, limits=limits)
self._set_instance_host_and_node(context, instance_ref)
instance_ref['numa_topology'] = claim.claimed_numa_topology
# Mark resources in-use and update stats
self._update_usage_from_instance(context, self.compute_node,
instance_ref)
elevated = context.elevated()
# persist changes to the compute node:
self._update(elevated, self.compute_node)
return claim
@utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE)
def resize_claim(self, context, instance, instance_type,
image_meta=None, limits=None):
"""Indicate that resources are needed for a resize operation to this
compute host.
:param context: security context
:param instance: instance object to reserve resources for
:param instance_type: new instance_type being resized to
:param limits: Dict of oversubscription limits for memory, disk,
and CPUs
:returns: A Claim ticket representing the reserved resources. This
should be turned into finalize a resource claim or free
resources after the compute operation is finished.
"""
image_meta = image_meta or {}
if self.disabled:
# compute_driver doesn't support resource tracking, just
# generate the migration record and continue the resize:
migration = self._create_migration(context, instance,
instance_type)
return claims.NopClaim(migration=migration)
# get memory overhead required to build this instance:
overhead = self.driver.estimate_instance_overhead(instance_type)
LOG.debug("Memory overhead for %(flavor)d MB instance; %(overhead)d "
"MB", {'flavor': instance_type['memory_mb'],
'overhead': overhead['memory_mb']})
instance_ref = instance_obj.compat_instance(instance)
claim = claims.ResizeClaim(context, instance_ref, instance_type,
image_meta, self, self.compute_node,
overhead=overhead, limits=limits)
migration = self._create_migration(context, instance_ref,
instance_type)
claim.migration = migration
# Mark the resources in-use for the resize landing on this
# compute host:
self._update_usage_from_migration(context, instance_ref, image_meta,
self.compute_node, migration)
elevated = context.elevated()
self._update(elevated, self.compute_node)
return claim
def _create_migration(self, context, instance, instance_type):
"""Create a migration record for the upcoming resize. This should
be done while the COMPUTE_RESOURCES_SEMAPHORE is held so the resource
claim will not be lost if the audit process starts.
"""
old_instance_type = flavors.extract_flavor(instance)
migration = objects.Migration(context=context.elevated())
migration.dest_compute = self.host
migration.dest_node = self.nodename
migration.dest_host = self.driver.get_host_ip_addr()
migration.old_instance_type_id = old_instance_type['id']
migration.new_instance_type_id = instance_type['id']
migration.status = 'pre-migrating'
migration.instance_uuid = instance['uuid']
migration.source_compute = instance['host']
migration.source_node = instance['node']
migration.create()
return migration
def _set_instance_host_and_node(self, context, instance_ref):
"""Tag the instance as belonging to this host. This should be done
while the COMPUTE_RESOURCES_SEMAPHORE is held so the resource claim
will not be lost if the audit process starts.
"""
values = {'host': self.host, 'node': self.nodename,
'launched_on': self.host}
self.conductor_api.instance_update(context, instance_ref['uuid'],
**values)
instance_ref['host'] = self.host
instance_ref['launched_on'] = self.host
instance_ref['node'] = self.nodename
@utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE)
def abort_instance_claim(self, context, instance):
"""Remove usage from the given instance."""
# flag the instance as deleted to revert the resource usage
# and associated stats:
instance['vm_state'] = vm_states.DELETED
self._update_usage_from_instance(context, self.compute_node, instance)
self._update(context.elevated(), self.compute_node)
@utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE)
def drop_resize_claim(self, context, instance, instance_type=None,
image_meta=None, prefix='new_'):
"""Remove usage for an incoming/outgoing migration."""
if instance['uuid'] in self.tracked_migrations:
migration, itype = self.tracked_migrations.pop(instance['uuid'])
if not instance_type:
ctxt = context.elevated()
instance_type = self._get_instance_type(ctxt, instance, prefix)
if image_meta is None:
image_meta = utils.get_image_from_system_metadata(
instance['system_metadata'])
if instance_type['id'] == itype['id']:
numa_topology = hardware.numa_get_constraints(
itype, image_meta)
usage = self._get_usage_dict(
itype, numa_topology=numa_topology)
if self.pci_tracker:
self.pci_tracker.update_pci_for_migration(context,
instance,
sign=-1)
self._update_usage(context, self.compute_node, usage, sign=-1)
ctxt = context.elevated()
self._update(ctxt, self.compute_node)
@utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE)
def update_usage(self, context, instance):
"""Update the resource usage and stats after a change in an
instance
"""
if self.disabled:
return
uuid = instance['uuid']
# don't update usage for this instance unless it submitted a resource
# claim first:
if uuid in self.tracked_instances:
self._update_usage_from_instance(context, self.compute_node,
instance)
self._update(context.elevated(), self.compute_node)
@property
def disabled(self):
return self.compute_node is None
def _get_host_metrics(self, context, nodename):
"""Get the metrics from monitors and
notify information to message bus.
"""
metrics = []
metrics_info = {}
for monitor in self.monitors:
try:
metrics += monitor.get_metrics(nodename=nodename)
except Exception:
LOG.warning(_LW("Cannot get the metrics from %s."), monitor)
if metrics:
metrics_info['nodename'] = nodename
metrics_info['metrics'] = metrics
metrics_info['host'] = self.host
metrics_info['host_ip'] = CONF.my_ip
notifier = rpc.get_notifier(service='compute', host=nodename)
notifier.info(context, 'compute.metrics.update', metrics_info)
return metrics
def update_available_resource(self, context):
"""Override in-memory calculations of compute node resource usage based
on data audited from the hypervisor layer.
Add in resource claims in progress to account for operations that have
declared a need for resources, but not necessarily retrieved them from
the hypervisor layer yet.
"""
LOG.audit(_("Auditing locally available compute resources"))
resources = self.driver.get_available_resource(self.nodename)
if not resources:
# The virt driver does not support this function
LOG.audit(_("Virt driver does not support "
"'get_available_resource' Compute tracking is disabled."))
self.compute_node = None
return
resources['host_ip'] = CONF.my_ip
# We want the 'cpu_info' to be None from the POV of the
# virt driver, but the DB requires it to be non-null so
# just force it to empty string
if ("cpu_info" not in resources or
resources["cpu_info"] is None):
resources["cpu_info"] = ''
# TODO(berrange): remove this once all virt drivers are updated
# to report topology
if "numa_topology" not in resources:
resources["numa_topology"] = None
self._verify_resources(resources)
self._report_hypervisor_resource_view(resources)
return self._update_available_resource(context, resources)
@utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE)
def _update_available_resource(self, context, resources):
if 'pci_passthrough_devices' in resources:
if not self.pci_tracker:
self.pci_tracker = pci_manager.PciDevTracker()
devs = []
for dev in jsonutils.loads(resources.pop(
'pci_passthrough_devices')):
if dev['dev_type'] == 'type-PF':
continue
if self.pci_filter.device_assignable(dev):
devs.append(dev)
self.pci_tracker.set_hvdevs(devs)
# Grab all instances assigned to this node:
instances = objects.InstanceList.get_by_host_and_node(
context, self.host, self.nodename,
expected_attrs=['system_metadata',
'numa_topology'])
# Now calculate usage based on instance utilization:
self._update_usage_from_instances(context, resources, instances)
# Grab all in-progress migrations:
capi = self.conductor_api
migrations = capi.migration_get_in_progress_by_host_and_node(context,
self.host, self.nodename)
self._update_usage_from_migrations(context, resources, migrations)
# Detect and account for orphaned instances that may exist on the
# hypervisor, but are not in the DB:
orphans = self._find_orphaned_instances()
self._update_usage_from_orphans(context, resources, orphans)
# NOTE(yjiang5): Because pci device tracker status is not cleared in
# this periodic task, and also because the resource tracker is not
# notified when instances are deleted, we need remove all usages
# from deleted instances.
if self.pci_tracker:
self.pci_tracker.clean_usage(instances, migrations, orphans)
resources['pci_stats'] = jsonutils.dumps(self.pci_tracker.stats)
else:
resources['pci_stats'] = jsonutils.dumps([])
self._report_final_resource_view(resources)
metrics = self._get_host_metrics(context, self.nodename)
resources['metrics'] = jsonutils.dumps(metrics)
self._sync_compute_node(context, resources)
def _sync_compute_node(self, context, resources):
"""Create or update the compute node DB record."""
if not self.compute_node:
# we need a copy of the ComputeNode record:
service = self._get_service(context)
if not service:
# no service record, disable resource
return
cn = self._get_compute_node(context)
if cn:
self.compute_node = cn
if self.pci_tracker:
self.pci_tracker.set_compute_node_id(cn['id'])
if not self.compute_node:
# Need to create the ComputeNode record:
resources['service_id'] = service['id']
resources['host'] = self.host
self._create(context, resources)
if self.pci_tracker:
self.pci_tracker.set_compute_node_id(self.compute_node['id'])
LOG.info(_LI('Compute_service record created for '
'%(host)s:%(node)s'),
{'host': self.host, 'node': self.nodename})
else:
# just update the record:
# TODO(sbauza): Juno compute nodes are missing the host field and
# the Juno ResourceTracker does not set this field, even if
# the ComputeNode object can show it.
# Unfortunately, as we're not yet using ComputeNode.save(), we need
# to add this field in the resources dict until the RT is using
# the ComputeNode.save() method for populating the table.
# tl;dr: To be removed once RT is using ComputeNode.save()
resources['host'] = self.host
self._update(context, resources)
LOG.info(_LI('Compute_service record updated for '
'%(host)s:%(node)s'),
{'host': self.host, 'node': self.nodename})
def _get_compute_node(self, context):
"""Returns compute node for the host and nodename."""
try:
compute = objects.ComputeNode.get_by_host_and_nodename(
context, self.host, self.nodename)
return obj_base.obj_to_primitive(compute)
except exception.NotFound:
LOG.warning(_LW("No compute node record for %(host)s:%(node)s"),
{'host': self.host, 'node': self.nodename})
def _write_ext_resources(self, resources):
resources['stats'] = {}
resources['stats'].update(self.stats)
self.ext_resources_handler.write_resources(resources)
def _create(self, context, values):
"""Create the compute node in the DB."""
# initialize load stats from existing instances:
self._write_ext_resources(values)
# NOTE(pmurray): the stats field is stored as a json string. The
# json conversion will be done automatically by the ComputeNode object
# so this can be removed when using ComputeNode.
values['stats'] = jsonutils.dumps(values['stats'])
self.compute_node = self.conductor_api.compute_node_create(context,
values)
# NOTE(sbauza): We don't want to miss the first creation event
self._update_resource_stats(context, values)
def _get_service(self, context):
try:
return self.conductor_api.service_get_by_compute_host(context,
self.host)
except exception.NotFound:
LOG.warning(_LW("No service record for host %s"), self.host)
def _report_hypervisor_resource_view(self, resources):
"""Log the hypervisor's view of free resources.
This is just a snapshot of resource usage recorded by the
virt driver.
The following resources are logged:
- free memory
- free disk
- free CPUs
- assignable PCI devices
"""
free_ram_mb = resources['memory_mb'] - resources['memory_mb_used']
free_disk_gb = resources['local_gb'] - resources['local_gb_used']
LOG.debug("Hypervisor: free ram (MB): %s" % free_ram_mb)
LOG.debug("Hypervisor: free disk (GB): %s" % free_disk_gb)
vcpus = resources['vcpus']
if vcpus:
free_vcpus = vcpus - resources['vcpus_used']
LOG.debug("Hypervisor: free VCPUs: %s" % free_vcpus)
else:
LOG.debug("Hypervisor: VCPU information unavailable")
if ('pci_passthrough_devices' in resources and
resources['pci_passthrough_devices']):
LOG.debug("Hypervisor: assignable PCI devices: %s" %
resources['pci_passthrough_devices'])
else:
LOG.debug("Hypervisor: no assignable PCI devices")
def _report_final_resource_view(self, resources):
"""Report final calculate of physical memory, used virtual memory,
disk, usable vCPUs, used virtual CPUs and PCI devices,
including instance calculations and in-progress resource claims. These
values will be exposed via the compute node table to the scheduler.
"""
LOG.audit(_("Total physical ram (MB): %(pram)s, "
"total allocated virtual ram (MB): %(vram)s"),
{'pram': resources['memory_mb'],
'vram': resources['memory_mb_used']})
LOG.audit(_("Total physical disk (GB): %(pdisk)s, "
"total allocated virtual disk (GB): %(vdisk)s"),
{'pdisk': resources['local_gb'],
'vdisk': resources['local_gb_used']})
vcpus = resources['vcpus']
if vcpus:
LOG.audit(_("Total usable vcpus: %(tcpu)s, "
"total allocated vcpus: %(ucpu)s"),
{'tcpu': vcpus, 'ucpu': resources['vcpus_used']})
else:
LOG.audit(_("Free VCPU information unavailable"))
if 'pci_stats' in resources:
LOG.audit(_("PCI stats: %s"), resources['pci_stats'])
def _resource_change(self, resources):
"""Check to see if any resouces have changed."""
if cmp(resources, self.old_resources) != 0:
self.old_resources = copy.deepcopy(resources)
return True
return False
def _update(self, context, values):
"""Update partial stats locally and populate them to Scheduler."""
self._write_ext_resources(values)
# NOTE(pmurray): the stats field is stored as a json string. The
# json conversion will be done automatically by the ComputeNode object
# so this can be removed when using ComputeNode.
values['stats'] = jsonutils.dumps(values['stats'])
if not self._resource_change(values):
return
if "service" in self.compute_node:
del self.compute_node['service']
# NOTE(sbauza): Now the DB update is asynchronous, we need to locally
# update the values
self.compute_node.update(values)
# Persist the stats to the Scheduler
self._update_resource_stats(context, values)
if self.pci_tracker:
self.pci_tracker.save(context)
def _update_resource_stats(self, context, values):
stats = values.copy()
stats['id'] = self.compute_node['id']
self.scheduler_client.update_resource_stats(
context, (self.host, self.nodename), stats)
def _update_usage(self, context, resources, usage, sign=1):
mem_usage = usage['memory_mb']
overhead = self.driver.estimate_instance_overhead(usage)
mem_usage += overhead['memory_mb']
resources['memory_mb_used'] += sign * mem_usage
resources['local_gb_used'] += sign * usage.get('root_gb', 0)
resources['local_gb_used'] += sign * usage.get('ephemeral_gb', 0)
# free ram and disk may be negative, depending on policy:
resources['free_ram_mb'] = (resources['memory_mb'] -
resources['memory_mb_used'])
resources['free_disk_gb'] = (resources['local_gb'] -
resources['local_gb_used'])
resources['running_vms'] = self.stats.num_instances
self.ext_resources_handler.update_from_instance(usage, sign)
# Calculate the numa usage
free = sign == -1
updated_numa_topology = hardware.get_host_numa_usage_from_instance(
resources, usage, free)
resources['numa_topology'] = updated_numa_topology
def _update_usage_from_migration(self, context, instance, image_meta,
resources, migration):
"""Update usage for a single migration. The record may
represent an incoming or outbound migration.
"""
uuid = migration['instance_uuid']
LOG.audit(_("Updating from migration %s") % uuid)
incoming = (migration['dest_compute'] == self.host and
migration['dest_node'] == self.nodename)
outbound = (migration['source_compute'] == self.host and
migration['source_node'] == self.nodename)
same_node = (incoming and outbound)
record = self.tracked_instances.get(uuid, None)
itype = None
if same_node:
# same node resize. record usage for whichever instance type the
# instance is *not* in:
if (instance['instance_type_id'] ==
migration['old_instance_type_id']):
itype = self._get_instance_type(context, instance, 'new_',
migration['new_instance_type_id'])
else:
# instance record already has new flavor, hold space for a
# possible revert to the old instance type:
itype = self._get_instance_type(context, instance, 'old_',
migration['old_instance_type_id'])
elif incoming and not record:
# instance has not yet migrated here:
itype = self._get_instance_type(context, instance, 'new_',
migration['new_instance_type_id'])
elif outbound and not record:
# instance migrated, but record usage for a possible revert:
itype = self._get_instance_type(context, instance, 'old_',
migration['old_instance_type_id'])
if image_meta is None:
image_meta = utils.get_image_from_system_metadata(
instance['system_metadata'])
if itype:
host_topology = resources.get('numa_topology')
if host_topology:
host_topology = objects.NUMATopology.obj_from_db_obj(
host_topology)
numa_topology = hardware.numa_get_constraints(itype, image_meta)
numa_topology = (
hardware.numa_fit_instance_to_host(
host_topology, numa_topology))
usage = self._get_usage_dict(
itype, numa_topology=numa_topology)
if self.pci_tracker:
self.pci_tracker.update_pci_for_migration(context, instance)
self._update_usage(context, resources, usage)
if self.pci_tracker:
resources['pci_stats'] = jsonutils.dumps(
self.pci_tracker.stats)
else:
resources['pci_stats'] = jsonutils.dumps([])
self.tracked_migrations[uuid] = (migration, itype)
def _update_usage_from_migrations(self, context, resources, migrations):
self.tracked_migrations.clear()
filtered = {}
# do some defensive filtering against bad migrations records in the
# database:
for migration in migrations:
instance = migration['instance']
if not instance:
# migration referencing deleted instance
continue
uuid = instance['uuid']
# skip migration if instance isn't in a resize state:
if not self._instance_in_resize_state(instance):
LOG.warning(_LW("Instance not resizing, skipping migration."),
instance_uuid=uuid)
continue
# filter to most recently updated migration for each instance:
m = filtered.get(uuid, None)
if not m or migration['updated_at'] >= m['updated_at']:
filtered[uuid] = migration
for migration in filtered.values():
instance = migration['instance']
try:
self._update_usage_from_migration(context, instance, None,
resources, migration)
except exception.FlavorNotFound:
LOG.warning(_LW("Flavor could not be found, skipping "
"migration."), instance_uuid=uuid)
continue
def _update_usage_from_instance(self, context, resources, instance):
"""Update usage for a single instance."""
uuid = instance['uuid']
is_new_instance = uuid not in self.tracked_instances
is_deleted_instance = instance['vm_state'] == vm_states.DELETED
if is_new_instance:
self.tracked_instances[uuid] = instance_obj.compat_instance(
instance)
sign = 1
if is_deleted_instance:
self.tracked_instances.pop(uuid)
sign = -1
self.stats.update_stats_for_instance(instance)
if self.pci_tracker:
self.pci_tracker.update_pci_for_instance(context, instance)
# if it's a new or deleted instance:
if is_new_instance or is_deleted_instance:
# new instance, update compute node resource usage:
self._update_usage(context, resources, instance, sign=sign)
resources['current_workload'] = self.stats.calculate_workload()
if self.pci_tracker:
resources['pci_stats'] = jsonutils.dumps(self.pci_tracker.stats)
else:
resources['pci_stats'] = jsonutils.dumps([])
def _update_usage_from_instances(self, context, resources, instances):
"""Calculate resource usage based on instance utilization. This is
different than the hypervisor's view as it will account for all
instances assigned to the local compute host, even if they are not
currently powered on.
"""
self.tracked_instances.clear()
# purge old stats and init with anything passed in by the driver
self.stats.clear()
self.stats.digest_stats(resources.get('stats'))
# set some initial values, reserve room for host/hypervisor:
resources['local_gb_used'] = CONF.reserved_host_disk_mb / 1024
resources['memory_mb_used'] = CONF.reserved_host_memory_mb
resources['free_ram_mb'] = (resources['memory_mb'] -
resources['memory_mb_used'])
resources['free_disk_gb'] = (resources['local_gb'] -
resources['local_gb_used'])
resources['current_workload'] = 0
resources['running_vms'] = 0
# Reset values for extended resources
self.ext_resources_handler.reset_resources(resources, self.driver)
for instance in instances:
if instance['vm_state'] != vm_states.DELETED:
self._update_usage_from_instance(context, resources, instance)
def _find_orphaned_instances(self):
"""Given the set of instances and migrations already account for
by resource tracker, sanity check the hypervisor to determine
if there are any "orphaned" instances left hanging around.
Orphans could be consuming memory and should be accounted for in
usage calculations to guard against potential out of memory
errors.
"""
uuids1 = frozenset(self.tracked_instances.keys())
uuids2 = frozenset(self.tracked_migrations.keys())
uuids = uuids1 | uuids2
usage = self.driver.get_per_instance_usage()
vuuids = frozenset(usage.keys())
orphan_uuids = vuuids - uuids
orphans = [usage[uuid] for uuid in orphan_uuids]
return orphans
def _update_usage_from_orphans(self, context, resources, orphans):
"""Include orphaned instances in usage."""
for orphan in orphans:
memory_mb = orphan['memory_mb']
LOG.warning(_LW("Detected running orphan instance: %(uuid)s "
"(consuming %(memory_mb)s MB memory)"),
{'uuid': orphan['uuid'], 'memory_mb': memory_mb})
# just record memory usage for the orphan
usage = {'memory_mb': memory_mb}
self._update_usage(context, resources, usage)
def _verify_resources(self, resources):
resource_keys = ["vcpus", "memory_mb", "local_gb", "cpu_info",
"vcpus_used", "memory_mb_used", "local_gb_used",
"numa_topology"]
missing_keys = [k for k in resource_keys if k not in resources]
if missing_keys:
reason = _("Missing keys: %s") % missing_keys
raise exception.InvalidInput(reason=reason)
def _instance_in_resize_state(self, instance):
vm = instance['vm_state']
task = instance['task_state']
if vm == vm_states.RESIZED:
return True
if (vm in [vm_states.ACTIVE, vm_states.STOPPED]
and task in [task_states.RESIZE_PREP,
task_states.RESIZE_MIGRATING, task_states.RESIZE_MIGRATED,
task_states.RESIZE_FINISH]):
return True
return False
def _get_instance_type(self, context, instance, prefix,
instance_type_id=None):
"""Get the instance type from sys metadata if it's stashed. If not,
fall back to fetching it via the object API.
See bug 1164110
"""
try:
extracted_flavor = flavors.extract_flavor(instance, prefix)
except KeyError:
if not instance_type_id:
instance_type_id = instance['instance_type_id']
return objects.Flavor.get_by_id(context, instance_type_id)
return extracted_flavor
def _get_usage_dict(self, object_or_dict, **updates):
"""Make a usage dict _update methods expect.
Accepts a dict or an Instance or Flavor object, and a set of updates.
Converts the object to a dict and applies the updates.
:param object_or_dict: instance or flavor as an object or just a dict
:param updates: key-value pairs to update the passed object.
Currently only considers 'numa_topology', all other
keys are ignored.
:returns: a dict with all the information from object_or_dict updated
with updates
"""
usage = {}
if isinstance(object_or_dict, objects.Instance):
usage = instance_obj.compat_instance(object_or_dict)
elif isinstance(object_or_dict, objects.Flavor):
usage = obj_base.obj_to_primitive(object_or_dict)
else:
usage.update(object_or_dict)
for key in ('numa_topology',):
if key in updates:
usage[key] = updates[key]
return usage
|
sajeeshcs/nested_quota_final
|
nova/compute/resource_tracker.py
|
Python
|
apache-2.0
| 36,224
|
'''Trains a simple convnet on the MNIST dataset.
Gets to 99.25% test accuracy after 12 epochs
(there is still a lot of margin for parameter tuning).
16 seconds per epoch on a GRID K520 GPU.
'''
from __future__ import print_function
import keras
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras import backend as K
batch_size = 128
num_classes = 10
epochs = 12
# input image dimensions
img_rows, img_cols = 28, 28
# the data, shuffled and split between train and test sets
(x_train, y_train), (x_test, y_test) = mnist.load_data()
if K.image_data_format() == 'channels_first':
x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)
x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols)
input_shape = (1, img_rows, img_cols)
else:
x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)
x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)
input_shape = (img_rows, img_cols, 1)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
# convert class vectors to binary class matrices
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
model = Sequential()
model.add(Conv2D(32, kernel_size=(3, 3),
activation='relu',
input_shape=input_shape))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.Adadelta(),
metrics=['accuracy'])
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_data=(x_test, y_test))
score = model.evaluate(x_test, y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
|
rsteed11/GAT
|
gat/scraping/ArabicTextExtractor/DeepMNISTKeras.py
|
Python
|
mit
| 2,269
|
"""Support for PlayStation 4 consoles."""
import logging
import os
import voluptuous as vol
from pyps4_2ndscreen.ddp import async_create_ddp_endpoint
from pyps4_2ndscreen.media_art import COUNTRIES
from homeassistant.components.media_player.const import (
ATTR_MEDIA_CONTENT_TYPE,
ATTR_MEDIA_TITLE,
MEDIA_TYPE_GAME,
)
from homeassistant.const import (
ATTR_COMMAND,
ATTR_ENTITY_ID,
ATTR_LOCKED,
CONF_REGION,
CONF_TOKEN,
)
from homeassistant.core import split_entity_id
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers import entity_registry, config_validation as cv
from homeassistant.helpers.typing import HomeAssistantType
from homeassistant.util import location
from homeassistant.util.json import load_json, save_json
from .config_flow import PlayStation4FlowHandler # noqa: pylint: disable=unused-import
from .const import ATTR_MEDIA_IMAGE_URL, COMMANDS, DOMAIN, GAMES_FILE, PS4_DATA
_LOGGER = logging.getLogger(__name__)
SERVICE_COMMAND = "send_command"
PS4_COMMAND_SCHEMA = vol.Schema(
{
vol.Required(ATTR_ENTITY_ID): cv.entity_ids,
vol.Required(ATTR_COMMAND): vol.In(list(COMMANDS)),
}
)
class PS4Data:
"""Init Data Class."""
def __init__(self):
"""Init Class."""
self.devices = []
self.protocol = None
async def async_setup(hass, config):
"""Set up the PS4 Component."""
hass.data[PS4_DATA] = PS4Data()
transport, protocol = await async_create_ddp_endpoint()
hass.data[PS4_DATA].protocol = protocol
_LOGGER.debug("PS4 DDP endpoint created: %s, %s", transport, protocol)
service_handle(hass)
return True
async def async_setup_entry(hass, config_entry):
"""Set up PS4 from a config entry."""
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(config_entry, "media_player")
)
return True
async def async_unload_entry(hass, entry):
"""Unload a PS4 config entry."""
await hass.config_entries.async_forward_entry_unload(entry, "media_player")
return True
async def async_migrate_entry(hass, entry):
"""Migrate old entry."""
config_entries = hass.config_entries
data = entry.data
version = entry.version
_LOGGER.debug("Migrating PS4 entry from Version %s", version)
reason = {
1: "Region codes have changed",
2: "Format for Unique ID for entity registry has changed",
}
# Migrate Version 1 -> Version 2: New region codes.
if version == 1:
loc = await location.async_detect_location_info(
hass.helpers.aiohttp_client.async_get_clientsession()
)
if loc:
country = loc.country_name
if country in COUNTRIES:
for device in data["devices"]:
device[CONF_REGION] = country
version = entry.version = 2
config_entries.async_update_entry(entry, data=data)
_LOGGER.info(
"PlayStation 4 Config Updated: \
Region changed to: %s",
country,
)
# Migrate Version 2 -> Version 3: Update identifier format.
if version == 2:
# Prevent changing entity_id. Updates entity registry.
registry = await entity_registry.async_get_registry(hass)
for entity_id, e_entry in registry.entities.items():
if e_entry.config_entry_id == entry.entry_id:
unique_id = e_entry.unique_id
# Remove old entity entry.
registry.async_remove(entity_id)
# Format old unique_id.
unique_id = format_unique_id(entry.data[CONF_TOKEN], unique_id)
# Create new entry with old entity_id.
new_id = split_entity_id(entity_id)[1]
registry.async_get_or_create(
"media_player",
DOMAIN,
unique_id,
suggested_object_id=new_id,
config_entry=entry,
device_id=e_entry.device_id,
)
entry.version = 3
_LOGGER.info(
"PlayStation 4 identifier for entity: %s \
has changed",
entity_id,
)
config_entries.async_update_entry(entry)
return True
msg = """{} for the PlayStation 4 Integration.
Please remove the PS4 Integration and re-configure
[here](/config/integrations).""".format(
reason[version]
)
hass.components.persistent_notification.async_create(
title="PlayStation 4 Integration Configuration Requires Update",
message=msg,
notification_id="config_entry_migration",
)
return False
def format_unique_id(creds, mac_address):
"""Use last 4 Chars of credential as suffix. Unique ID per PSN user."""
suffix = creds[-4:]
return f"{mac_address}_{suffix}"
def load_games(hass: HomeAssistantType) -> dict:
"""Load games for sources."""
g_file = hass.config.path(GAMES_FILE)
try:
games = load_json(g_file, dict)
except HomeAssistantError as error:
games = {}
_LOGGER.error("Failed to load games file: %s", error)
if not isinstance(games, dict):
_LOGGER.error("Games file was not parsed correctly")
games = {}
# If file exists
if os.path.isfile(g_file):
games = _reformat_data(hass, games)
return games
def save_games(hass: HomeAssistantType, games: dict):
"""Save games to file."""
g_file = hass.config.path(GAMES_FILE)
try:
save_json(g_file, games)
except OSError as error:
_LOGGER.error("Could not save game list, %s", error)
def _reformat_data(hass: HomeAssistantType, games: dict) -> dict:
"""Reformat data to correct format."""
data_reformatted = False
for game, data in games.items():
# Convert str format to dict format.
if not isinstance(data, dict):
# Use existing title. Assign defaults.
games[game] = {
ATTR_LOCKED: False,
ATTR_MEDIA_TITLE: data,
ATTR_MEDIA_IMAGE_URL: None,
ATTR_MEDIA_CONTENT_TYPE: MEDIA_TYPE_GAME,
}
data_reformatted = True
_LOGGER.debug("Reformatting media data for item: %s, %s", game, data)
if data_reformatted:
save_games(hass, games)
return games
def service_handle(hass: HomeAssistantType):
"""Handle for services."""
async def async_service_command(call):
"""Service for sending commands."""
entity_ids = call.data[ATTR_ENTITY_ID]
command = call.data[ATTR_COMMAND]
for device in hass.data[PS4_DATA].devices:
if device.entity_id in entity_ids:
await device.async_send_command(command)
hass.services.async_register(
DOMAIN, SERVICE_COMMAND, async_service_command, schema=PS4_COMMAND_SCHEMA
)
|
joopert/home-assistant
|
homeassistant/components/ps4/__init__.py
|
Python
|
apache-2.0
| 7,078
|
import _plotly_utils.basevalidators
class LenValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(self, plotly_name="len", parent_name="heatmap.colorbar", **kwargs):
super(LenValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
min=kwargs.pop("min", 0),
**kwargs
)
|
plotly/plotly.py
|
packages/python/plotly/plotly/validators/heatmap/colorbar/_len.py
|
Python
|
mit
| 436
|
# The Admin4 Project
# (c) 2013-2014 Andreas Pflug
#
# Licensed under the Apache License,
# see LICENSE.TXT for conditions of usage
from _objects import SchemaObject
from _pgsql import pgQuery
import adm
from wh import xlt, shlexSplit, localTimeMillis
import logger
from _pgsql import quoteIdent, quoteValue
persistenceStr={'p': "persistent", 't': "temporary", 'u': "unlogged" }
class Table(SchemaObject):
typename=xlt("Table")
shortname=xlt("Table")
refreshOid="rel.oid"
allGrants="arwdDxt"
favtype='t'
relkind='r'
@staticmethod
def FindQuery(schemaName, schemaOid, patterns):
sql=pgQuery("pg_class c")
sql.AddCol("relkind as kind")
sql.AddCol("nspname")
sql.AddCol("relname as name")
sql.AddCol("n.oid as nspoid")
sql.AddCol("c.oid")
sql.AddJoin("pg_namespace n ON n.oid=relnamespace")
sql.AddWhere("relkind='r'")
SchemaObject.AddFindRestrictions(sql, schemaName, schemaOid, 'relname', patterns)
return sql
@staticmethod
def InstancesQuery(parentNode):
sql=pgQuery("pg_class rel")
sql.AddCol("rel.oid, relname as name, nspname, ns.oid as nspoid, spcname, pg_get_userbyid(relowner) AS owner, relacl as acl, rel.*")
if parentNode.GetServer().version < 8.4:
sql.AddCol("'t' AS relpersistence")
elif parentNode.GetServer().version < 9.1:
sql.AddCol("CASE WHEN relistemp THEN 't' ELSE 'p' END AS relpersistence")
else:
sql.AddCol("relpersistence")
sql.AddCol("description")
sql.AddJoin("pg_namespace ns ON ns.oid=rel.relnamespace")
sql.AddLeft("pg_tablespace ta ON ta.oid=rel.reltablespace")
sql.AddLeft("pg_description des ON (des.objoid=rel.oid AND des.objsubid=0)")
sql.AddLeft("pg_constraint c ON c.conrelid=rel.oid AND c.contype='p'")
sql.AddWhere("relkind", 'r')
sql.AddWhere("relnamespace", parentNode.parentNode.GetOid())
sql.AddOrder("CASE WHEN nspname='%s' THEN ' ' else nspname END" % "public")
sql.AddOrder("relname")
return sql
def GetIcon(self):
icons=[]
icons.append("Table")
if self.GetOid() in self.GetDatabase().favourites:
icons.append('fav')
return self.GetImageId(icons)
def __init__(self, parentNode, info):
super(Table, self).__init__(parentNode, info)
self.rowcount=xlt("Not counted")
self.Init()
def Refresh(self):
self.rowcount=xlt("Not counted")
self.DoRefresh()
def Init(self):
self.columns=[]
self.constraints=None
def GetProperties(self):
if not len(self.properties):
self.properties = [
(xlt("Name"), self.info['name']),
(xlt("Namespace"), self.info['nspname']),
( "OID" , self.info['oid']),
(xlt("Owner"), self.info['owner']),
(xlt("Tablespace"), self.info['spcname']),
(xlt("Persistence"), "%s (%s)" % (self.info['relpersistence'], xlt(persistenceStr.get(self.info['relpersistence'], "unknown")))),
(xlt("Rows (estimated)"), int(self.info['reltuples'])),
(xlt("Rows (counted)"), self.rowcount),
(xlt("ACL"), self.info['acl'])
]
self.AddProperty(xlt("Description"), self.info['description'])
return self.properties
def GetStatisticsQuery(self):
cols=[( 'seq_scan', xlt("Sequential Scans") ),
( 'seq_tup_read', xlt("Sequential Tuples Read") ),
( 'idx_scan', xlt("Index scans") ),
( 'idx_tup_fetch', xlt("Index Tuples Fetched") ),
( 'n_tup_ins', xlt("tuples inserted") ),
( 'n_tup_upd', xlt("tuples updated") ),
( 'n_tup_del', xlt("tuples deleted") ),
( 'heap_blks_read', xlt("Heap Blocks Read") ),
( 'heap_blks_hit', xlt("Heap Blocks Hit") ),
( 'idx_blks_read', xlt("Index Blocks Read") ),
( 'idx_blks_hit', xlt("Index Blocks Hit") ),
( 'toast_blks_read',xlt("Toast Blocks Read") ),
( 'toast_blks_hit', xlt("Toast Blocks Hit") ),
( 'tidx_blks_read', xlt("Toast Index Blocks Read") ),
( 'tidx_blks_hit', xlt("Toast Index Blocks Hit") ),
( 'pg_size_pretty(pg_relation_size(stat.relid))', xlt("Table Size") ),
( """CASE WHEN cl.reltoastrelid = 0 THEN '%s' ELSE pg_size_pretty(pg_relation_size(cl.reltoastrelid)+
COALESCE((SELECT SUM(pg_relation_size(indexrelid))
FROM pg_index WHERE indrelid=cl.reltoastrelid)::int8, 0)) END""" %xlt("None"), xlt("Toast Table Size") ),
( """pg_size_pretty(COALESCE((SELECT SUM(pg_relation_size(indexrelid))
FROM pg_index WHERE indrelid=stat.relid)::int8, 0))""", xlt("Index Size"))
]
return """
SELECT %(cols)s
FROM pg_stat_all_tables stat
JOIN pg_statio_all_tables statio ON stat.relid = statio.relid
JOIN pg_class cl ON cl.oid=stat.relid
WHERE stat.relid = %(relid)d
""" % {'relid': self.GetOid(),
'cols': self.GetServer().ExpandColDefs(cols)}
def GetSql(self):
self.populateColumns()
cols=[]
for col in self.columns:
cols.append(quoteIdent(col['attname']) + ' ' + self.colTypeName(col));
constraints=[]
self.populateConstraints()
for constraint in self.constraints:
c=[]
for col in constraint['colnames']:
c.append(quoteIdent(col))
if constraint['indisprimary']:
cols.append("PRIMARY KEY("+ ", ".join(c)+")")
else:
if constraint['type'] == 'index':
info=['CREATE']
if constraint['indisunique']:
info.append('UNIQUE')
info.append("INDEX")
info.append(constraint['fullname'])
info.append('ON ' + self.NameSql())
info.append("(%s)" % ",".join(c))
constraints.append(" ".join(info) + ";")
elif constraint['type'] == 'foreignkey':
info=["ALTER TABLE " + self.NameSql() + "\n ADD CONSTRAINT "]
info.append(constraint['fullname'])
info.append("REFERENCES " + quoteIdent(constraint['reftable']))
info.append("(%s)" % ",".join(map(quoteIdent, constraint['refcolnames'])))
constraints.append(" ".join(info) +";")
elif constraint['type'] == 'check':
pass
sql=[]
sql.append("CREATE TABLE " + self.NameSql())
sql.append("(");
sql.append(" " + ",\n ".join(cols))
if (self.info.get('relhasoids')):
sql.append(") WITH OIDs;")
else:
sql.append(");")
sql.append("ALTER TABLE " + self.NameSql() + " OWNER TO " + quoteIdent(self.info['owner']) + ";")
sql.extend(constraints)
sql.extend(self.getAclDef('relacl', "arwdDxt"))
sql.extend(self.getCommentDef())
return "\n".join(sql);
def populateColumns(self):
if not self.columns:
self.columns = self.GetCursor().ExecuteDictList("""
SELECT att.*, format_type(atttypid, atttypmod) as typename, t.typname, t.typcategory, t.typbasetype,
pg_get_expr(adbin, attrelid) as adsrc,
description, cs.relname AS sername, ns.nspname AS serschema
FROM pg_attribute att
JOIN pg_type t ON att.atttypid=t.oid
LEFT OUTER JOIN pg_attrdef def ON adrelid=attrelid AND adnum=attnum
LEFT OUTER JOIN pg_description des ON des.objoid=attrelid AND des.objsubid=attnum
LEFT OUTER JOIN (pg_depend JOIN pg_class cs ON objid=cs.oid AND cs.relkind='S') ON refobjid=attrelid AND refobjsubid=attnum
LEFT OUTER JOIN pg_namespace ns ON ns.oid=cs.relnamespace
WHERE attrelid = %(attrelid)d
AND attnum > 0
AND attisdropped IS FALSE
ORDER BY attnum""" % { 'attrelid': self.GetOid()})
@staticmethod
def getConstraintQuery(oid):
return """
SELECT 1 AS conclass, CASE WHEN indisprimary THEN 'primarykey' ELSE 'index' END as type, indexrelid as oid,
CASE WHEN nspname='%(defaultNamespace)s' THEN '' else nspname||'.' END || relname AS fullname,
array_agg(attname) as colnames, description,
indisprimary, indisunique, null::bool as condeferrable, null::bool as condeferred,
null::text as reftable, null as refcolnames
FROM pg_index i
JOIN pg_class c ON indexrelid=c.oid
JOIN pg_namespace nsp on relnamespace=nsp.oid
LEFT JOIN pg_attribute a ON attrelid=indrelid AND attnum IN (SELECT unnest(indkey))
LEFT JOIN pg_description ON objoid=indexrelid
WHERE indrelid=%(relid)d
GROUP BY indexrelid, nspname, relname, indisprimary, indisunique, description
UNION
SELECT 2, 'foreignkey', conrelid, conname,
array_agg(a.attname), description,
null, null, condeferrable, condeferred,
CASE WHEN nspname='%(defaultNamespace)s' THEN '' else nspname||'.' END || relname,
array_agg(r.attname)
FROM pg_constraint
LEFT JOIN pg_attribute a ON a.attrelid=conrelid AND a.attnum IN (SELECT unnest(conkey))
LEFT JOIN pg_description ON objoid=conrelid
JOIN pg_class c on c.oid=confrelid
JOIN pg_namespace nsp on relnamespace=nsp.oid
LEFT JOIN pg_attribute r ON r.attrelid=confrelid AND r.attnum IN (SELECT unnest(confkey))
WHERE conrelid=%(relid)s
GROUP BY conrelid, conname, condeferrable, condeferred, relname, nspname, description
ORDER BY 1, 7 DESC, 4
""" % { 'relid': oid, 'defaultNamespace': "public" }
def populateConstraints(self):
if self.constraints == None:
self.constraints=self.GetCursor().ExecuteDictList(self.getConstraintQuery(self.GetOid()))
def colTypeName(self, col):
n= [col['typename'], ['NULL', 'NOT NULL'][col['attnotnull']] ]
default=col['adsrc']
if default != None:
if default == "nextval('%s_%s_seq'::regclass)" % (self.info['relname'], col['attname']):
if n[0] == "integer":
n[0] = "serial"
elif n[0] == "bigint":
n[0] = "bigserial"
else:
logger.debug("Unknown serial type %s for %s", n[0], default)
n.append("DEFAULT")
n.append(default)
else:
n.append("DEFAULT")
n.append(default)
return " ".join(n)
class ColumnPanel(adm.NotebookPanel):
name=xlt("Column")
def __init__(self, dlg, notebook):
adm.NotebookPanel.__init__(self, dlg, notebook)
self.typeInfo={}
self.Bind("DataType", self.OnTypeChange)
self.BindAll("DataType")
def OnTypeChange(self, evt):
precFlag=self.typeInfo.get(self.DataType)
self.EnableControls("Length", precFlag>0)
self.EnableControls("Precision", precFlag==2)
self.OnCheck(evt)
def Go(self):
cd=self.dialog.colDef
self.ColName = cd['attname']
self.NotNull = cd['attnotnull']
self.DefaultVal=cd['adsrc']
self.Description=cd['description']
self.Statistics = cd['attstattarget']
type=cd['typename']
ci=type.find('(')
if (ci > 0):
prec=type[ci+1:-1].split(',')
self.Length=int(prec[0])
if len(prec) > 1:
self.Precision = int(prec[1])
self.typeInfo={}
types=self.dialog.node.GetCursor().ExecuteDictList("SELECT oid, typname, typmodin FROM pg_type WHERE typcategory=%s ORDER BY oid" % quoteValue(cd['typcategory']))
for t in types:
oid=t['oid']
self["DataType"].AppendKey(t['oid'], t['typname'])
if t['typmodin'] != '-':
precFlag=1
else:
precFlag=0
self.typeInfo[oid] = precFlag
self.DataType=cd['atttypid']
if cd['atttypid'] in (20, 23) or cd['typbasetype'] in (20,23):
if cd['sername']:
if cd['serschema'] != 'public':
sn="%(serschema)s.%(sername)s" % cd
else:
sn=cd['sername']
self['Sequence'].Append(sn)
self.Sequence=sn
else:
self['Sequence'].Disable()
if self.dialog.GetServer().version >= 9.1:
if cd['typcategory'] == 'S':
colls=self.dialog.node.GetCursor().ExecuteDictList(
"SELECT oid,collname FROM pg_collation WHERE collencoding IN (-1, %d) ORDER BY oid"
% self.dialog.node.GetDatabase().info['encoding'])
for c in colls:
self['Collation'].AppendKey(c['oid'], c['collname'])
if cd['attcollation']:
self.Collation = cd['attcollation']
else:
self['Collation'].Disable()
else:
self.ShowControls("Collation", False)
self.Comment=cd['description']
# Not yet supported
self.ShowControls("Sequence Storage Statistics", False)
self.OnTypeChange(None)
self.SetUnchanged()
def Check(self):
if self.typeInfo.get(self.DataType) == 2:
return self.CheckValid(True, self.Precision <= self.Length, xlt("Precision must be <= Length"))
return True
def GetSql(self):
sql=[]
params={ "colname": quoteIdent(self.ColName), "oldcol": quoteIdent(self['ColName'].unchangedValue)}
if self.HasChanged("ColName"):
sql.append("RENAME COLUMN %(oldcol)s TO %(colname)s" % params)
if self.HasChanged("NotNull"):
if self.NotNull:
params['val'] = "SET"
else:
params['val'] = "DROP"
sql.append("ALTER COLUMN %(colname)s %(val)s NOT NULL" % params)
if self.HasChanged("DefaultVal"):
if self.DefaultVal:
params['default'] = self.DefaultVal
sql.append("ALTER COLUMN %(colname)s SET DEFAULT %(default)s" % params)
else:
sql.append("ALTER COLUMN (%colname)s DROP DEFAULT" % params)
if self.HasChanged("DataType Collation Length Precision"):
params['type']=self['DataType'].GetValue()
n="ALTER COLUMN %(colname)s TYPE %(type)s" % params
precFlag=self.typeInfo.get(self.DataType)
if precFlag and self.Length:
n += "(%d" % self.Length
if precFlag == 2 and self['Precision'].GetValue():
n += ", %d" % self.Precision
n += ")"
if self.HasChanged("Collation"):
n += " COLLATE %s" % quoteIdent(self['Collation'].GetValue())
sql.append(n)
if self.HasChanged("Statistics"):
params['val'] = self.Statistics
sql.append("ALTER COLUMN %(colname)s SET STATISTICS %(val)d" % params)
# type, len, prec, collate
# if self.HasChanged("Collation"):
# params['val'] = self["Collation"].GetValue()
# sql.append("ALTER COLUMN %(colname)s SET COLLATE \"%(val)d\";" % params)
if sql:
sql=["ALTER TABLE %s\n %s;" % (self.dialog.node.NameSql() , ",\n ".join(sql))]
if self.HasChanged('Comment'):
params['tabname'] = self.dialog.node.NameSql()
params['comment'] = quoteValue(self.Comment)
sql.append("COMMENT ON COLUMN %(tabname)s.%(colname)s IS %(comment)s" % params)
if sql:
return "\n".join(sql)
return ""
class PrivilegePanel(adm.NotebookPanel):
name=xlt("Privileges")
privString={ 'a': "INSERT",
'r': "SELECT",
'w': "UPDATE",
'd': "DELETE",
'D': "TRUNCATE",
'x': "REFERENCE",
't': "TRIGGER",
'U': "USAGE",
'C': "CREATE",
'T': "TEMP",
'c': "CONNECT",
'X': "EXECUTE",
}
@classmethod
def CreatePanel(cls, dlg, notebook):
if dlg.GetServer().version < 8.4:
return None
return cls(dlg, notebook)
def Go(self):
pl=self['PrivList']
pl.ClearAll()
pl.AddColumnInfo(xlt("Usr/Group"), 20)
pl.AddColumnInfo(xlt("Privilege"), -1)
acls=self.dialog.colDef['attacl']
if acls:
for acl in shlexSplit(acls[1:-1], ','):
up = shlexSplit(acl, '=')
if len(up) == 1:
priv=up[0]
usr="public"
else:
usr=up[0]
priv=up[1]
up=shlexSplit(priv, '/')
priv=up[0]
if len(up) > 1: grantor=up[1]
else: grantor=None
print usr, priv, grantor
pl.Show()
class SecurityPanel(adm.NotebookPanel):
name=xlt("Security Labels")
@classmethod
def CreatePanel(cls, dlg, notebook):
if dlg.GetServer().version < 9.1:
return None
return cls(dlg, notebook)
class SqlPanel(adm.NotebookPanel):
name=xlt("SQL")
def Display(self):
sql=self.dialog.GetSql()
if sql:
self.SqlText=sql
else:
self.SqlText=xlt("-- No change")
self.Show()
class Column(adm.PagedPropertyDialog):
name=xlt("Column")
privFlags='arwx'
# panelClasses=[ColumnPanel, PrivilegePanel, SecurityPanel, SqlPanel]
panelClasses=[ColumnPanel, SqlPanel]
def __init__(self, parentWin, node, colDef):
adm.PagedPropertyDialog.__init__(self, parentWin, node, None)
self.colDef=colDef
def GetSql(self):
sql=""
for panel in self.panels:
if hasattr(panel, "GetSql"):
sql += panel.GetSql()
return sql
def Save(self):
sql=self.GetSql()
if sql:
self.startTime=localTimeMillis();
self.node.GetCursor().Execute(sql)
return True
class ColumnsPage(adm.NotebookPage):
name=xlt("Columns")
order=1
def Display(self, node, _detached):
if node != self.lastNode:
def _typename(row):
return node.colTypeName(row)
self.lastNode=node
self.control.ClearAll()
self.control.AddColumnInfo(xlt("Name"), 20, colname='attname')
self.control.AddColumnInfo(xlt("Type"), 30, proc=_typename)
self.control.AddColumnInfo(xlt("Comment"), -1, colname='description')
self.RestoreListcols()
node.populateColumns()
icon=node.GetImageId('column')
values=[]
for col in node.columns:
values.append( (col, icon))
self.control.Fill(values, 'attname')
def OnItemDoubleClick(self, evt):
adm.DisplayDialog(Column, self.control, self.lastNode, self.lastNode.columns[evt.Index])
class ConstraintPage(adm.NotebookPage):
name=xlt("Constraints")
order=2
def Display(self, node, _detached):
if node != self.lastNode:
self.lastNode=node
def _getDetails(row):
info=[]
if row['type'] == 'primarykey':
info.append('PRIMARY')
elif row['type'] == 'index':
if row['indisunique']:
info.append('UNIQUE')
elif row['type'] == 'foreignkey':
info.append(row['reftable'])
info.append("(%s)" % ",".join(row['refcolnames']))
elif row['type'] == 'check':
pass
return "".join(info)
self.control.AddColumnInfo(xlt("Name"), 10, colname='fullname')
self.control.AddColumnInfo(xlt("Columns"), 15, colname='colnames', proc=lambda x: ", ".join(x))
self.control.AddColumnInfo(xlt("Details"), 15, proc=_getDetails)
self.control.AddColumnInfo(xlt("Description"), -1, colname='description')
self.RestoreListcols()
node.populateConstraints()
values=[]
for con in node.constraints:
icon = node.GetImageId(con['type'])
values.append( (con, icon) )
self.control.Fill(values, 'fullname')
nodeinfo= [ { "class" : Table, "parents": ["Schema"], "sort": 10, "collection": "Tables", "pages": [ColumnsPage, ConstraintPage, "StatisticsPage" , "SqlPage"] } ]
pageinfo=[ColumnsPage, ConstraintPage]
class RowCount:
name=xlt("Count")
help=xlt("Count rows in table")
@staticmethod
def OnExecute(_parentWin, node):
node.rowcount=node.ExecuteSingle("SELECT COUNT(1) FROM ONLY %s" % node.NameSql())
return True
menuinfo = [
{ "class" : RowCount, "nodeclasses" : Table, 'sort': 80 },
]
|
andreas-p/admin4
|
modPg/Table.py
|
Python
|
apache-2.0
| 19,913
|
# -*- coding: utf-8 -*-
"""
Created on Fri Jul 21 10:53:07 2017
@author: asus
"""
import theano
from theano import tensor as T
from theano.tensor.nnet import conv2d
import numpy as np
x=T.ivector('x')
w=T.ivector('w')
w=np.array([1,0,0,1])
w=w.reshape((1,1,2,2))
input=T.tensor4(name='input')
w = theano.shared( np.asarray(w,dtype=input.dtype), name ='w')
conv_out=conv2d(input,w)
f=theano.function([input],conv_out)
x=np.array([1,2,3,4,5,6,7,8,9])
x=x.reshape((1,1,3,3))
print(f(x))
|
RayleighChen/SummerVac
|
dou/simple_CNN.py
|
Python
|
gpl-2.0
| 493
|
from __future__ import absolute_import
#!/usr/bin/env python
import numpy as np
from nibabel import Nifti1Image
from ..affine import Affine, Rigid
from ..histogram_registration import HistogramRegistration
from .._register import _joint_histogram
from numpy.testing import (assert_array_equal,
assert_equal,
assert_almost_equal,
assert_raises)
dummy_affine = np.eye(4)
def make_data_bool(dx=100, dy=100, dz=50):
return (np.random.rand(dx, dy, dz)
- np.random.rand()) > 0
def make_data_uint8(dx=100, dy=100, dz=50):
return (256 * (np.random.rand(dx, dy, dz)
- np.random.rand())).astype('uint8')
def make_data_int16(dx=100, dy=100, dz=50):
return (256 * (np.random.rand(dx, dy, dz)
- np.random.rand())).astype('int16')
def make_data_float64(dx=100, dy=100, dz=50):
return (256 * (np.random.rand(dx, dy, dz)
- np.random.rand())).astype('float64')
def _test_clamping(I, thI=0.0, clI=256, mask=None):
R = HistogramRegistration(I, I, bins=clI,
from_mask=mask, to_mask=mask,
spacing=[1, 1, 1])
Ic = R._from_data
Ic2 = R._to_data[1:-1, 1:-1, 1:-1]
assert_equal(Ic, Ic2)
dyn = Ic.max() + 1
assert_equal(dyn, R._joint_hist.shape[0])
assert_equal(dyn, R._joint_hist.shape[1])
return Ic, Ic2
def test_clamping_uint8():
I = Nifti1Image(make_data_uint8(), dummy_affine)
_test_clamping(I)
def test_clamping_uint8_nonstd():
I = Nifti1Image(make_data_uint8(), dummy_affine)
_test_clamping(I, 10, 165)
def test_clamping_int16():
I = Nifti1Image(make_data_int16(), dummy_affine)
_test_clamping(I)
def test_masked_clamping_int16():
I = Nifti1Image(make_data_int16(), dummy_affine)
_test_clamping(I, mask=make_data_bool())
def test_clamping_int16_nonstd():
I = Nifti1Image(make_data_int16(), dummy_affine)
_test_clamping(I, 10, 165)
def test_clamping_float64():
I = Nifti1Image(make_data_float64(), dummy_affine)
_test_clamping(I)
def test_clamping_float64_nonstd():
I = Nifti1Image(make_data_float64(), dummy_affine)
_test_clamping(I, 10, 165)
def _test_similarity_measure(simi, val):
I = Nifti1Image(make_data_int16(), dummy_affine)
J = Nifti1Image(I.get_data().copy(), dummy_affine)
R = HistogramRegistration(I, J, spacing=[2, 1, 3])
R.similarity = simi
assert_almost_equal(R.eval(Affine()), val)
def _test_renormalization(simi, simi2ll):
I = Nifti1Image(make_data_int16(), dummy_affine)
J = Nifti1Image(make_data_int16(), dummy_affine)
R = HistogramRegistration(I, J, similarity=simi, spacing=[2, 1, 3])
def_s = simi2ll(R.eval(Affine()))
R._set_similarity(simi, renormalize='ml')
assert_almost_equal(R.eval(Affine()), def_s)
R._set_similarity(simi, renormalize='nml')
assert_almost_equal(R.eval(Affine()), def_s)
def test_correlation_coefficient():
_test_similarity_measure('cc', 1.0)
def test_correlation_ratio():
_test_similarity_measure('cr', 1.0)
def test_correlation_ratio_L1():
_test_similarity_measure('crl1', 1.0)
def test_supervised_likelihood_ratio():
I = Nifti1Image(make_data_int16(), dummy_affine)
J = Nifti1Image(make_data_int16(), dummy_affine)
R = HistogramRegistration(I, J, similarity='slr', dist=np.ones((256, 256)) / (256 ** 2))
assert_almost_equal(R.eval(Affine()), 0.0)
assert_raises(ValueError, HistogramRegistration, I, J, similarity='slr', dist=None)
assert_raises(ValueError, HistogramRegistration, I, J, similarity='slr', dist=np.random.rand(100, 127))
def test_normalized_mutual_information():
_test_similarity_measure('nmi', 2.0)
def test_renormalized_correlation_coefficient():
simi2ll = lambda x: -.5 * np.log(1 - x)
_test_renormalization('cc', simi2ll)
def test_renormalized_correlation_ratio():
simi2ll = lambda x: -.5 * np.log(1 - x)
_test_renormalization('cr', simi2ll)
def test_renormalized_correlation_ratio_l1():
simi2ll = lambda x: -np.log(1 - x)
_test_renormalization('crl1', simi2ll)
def test_renormalized_mutual_information():
simi2ll = lambda x: x
_test_renormalization('mi', simi2ll)
def test_joint_hist_eval():
I = Nifti1Image(make_data_int16(), dummy_affine)
J = Nifti1Image(I.get_data().copy(), dummy_affine)
# Obviously the data should be the same
assert_array_equal(I.get_data(), J.get_data())
# Instantiate default thing
R = HistogramRegistration(I, J, spacing=[1, 1, 1])
R.similarity = 'cc'
null_affine = Affine()
val = R.eval(null_affine)
assert_almost_equal(val, 1.0)
# Try with what should be identity
assert_array_equal(R._from_data.shape, I.shape)
def test_joint_hist_raw():
# Set up call to joint histogram
jh_arr = np.zeros((10, 10), dtype=np.double)
data_shape = (2, 3, 4)
data = np.random.randint(size=data_shape,
low=0, high=10).astype(np.short)
data2 = np.zeros(np.array(data_shape) + 2, dtype=np.short)
data2[:] = -1
data2[1:-1, 1:-1, 1:-1] = data.copy()
vox_coords = np.indices(data_shape).transpose((1, 2, 3, 0))
vox_coords = np.ascontiguousarray(vox_coords.astype(np.double))
_joint_histogram(jh_arr, data.flat, data2, vox_coords, 0)
assert_almost_equal(np.diag(np.diag(jh_arr)), jh_arr)
def test_explore():
I = Nifti1Image(make_data_int16(), dummy_affine)
J = Nifti1Image(make_data_int16(), dummy_affine)
R = HistogramRegistration(I, J)
T = Affine()
simi, params = R.explore(T, (0, [-1, 0, 1]), (1, [-1, 0, 1]))
def test_histogram_registration():
""" Test the histogram registration class.
"""
I = Nifti1Image(make_data_int16(), dummy_affine)
J = Nifti1Image(I.get_data().copy(), dummy_affine)
assert_raises(ValueError, HistogramRegistration,
I, J, spacing=[0, 1, 3])
def test_set_fov():
I = Nifti1Image(make_data_int16(), dummy_affine)
J = Nifti1Image(I.get_data().copy(), dummy_affine)
R = HistogramRegistration(I, J)
R.set_fov(npoints=np.prod(I.shape))
assert_equal(R._from_data.shape, I.shape)
half_shape = tuple([I.shape[i] / 2 for i in range(3)])
R.set_fov(spacing=(2, 2, 2))
assert_equal(R._from_data.shape, half_shape)
R.set_fov(corner=half_shape)
assert_equal(R._from_data.shape, half_shape)
R.set_fov(size=half_shape)
assert_equal(R._from_data.shape, half_shape)
def test_histogram_masked_registration():
""" Test the histogram registration class.
"""
I = Nifti1Image(make_data_int16(dx=100, dy=100, dz=50),
dummy_affine)
J = Nifti1Image(make_data_int16(dx=100, dy=100, dz=50),
dummy_affine)
mask = (np.zeros((100, 100, 50)) == 1)
mask[10:20, 10:20, 10:20] = True
R = HistogramRegistration(I, J, to_mask=mask, from_mask=mask)
sim1 = R.eval(Affine())
I = Nifti1Image(I.get_data()[mask].reshape(10, 10, 10),
dummy_affine)
J = Nifti1Image(J.get_data()[mask].reshape(10, 10, 10),
dummy_affine)
R = HistogramRegistration(I, J)
sim2 = R.eval(Affine())
assert_equal(sim1, sim2)
def test_similarity_derivatives():
""" Test gradient and Hessian computation of the registration
objective function.
"""
I = Nifti1Image(make_data_int16(dx=100, dy=100, dz=50),
dummy_affine)
J = Nifti1Image(np.ones((100, 100, 50), dtype='int16'),
dummy_affine)
R = HistogramRegistration(I, J)
T = Rigid()
g = R.eval_gradient(T)
assert_equal(g.dtype, float)
assert_equal(g, np.zeros(6))
H = R.eval_hessian(T)
assert_equal(H.dtype, float)
assert_equal(H, np.zeros((6, 6)))
def test_smoothing():
""" Test smoothing the `to` image.
"""
I = Nifti1Image(make_data_int16(dx=100, dy=100, dz=50),
dummy_affine)
T = Rigid()
R = HistogramRegistration(I, I)
R1 = HistogramRegistration(I, I, sigma=(0,1))
s = R.eval(T)
s1 = R1.eval(T)
assert_almost_equal(s, 1)
assert s1 < s
assert_raises(ValueError, HistogramRegistration, I, I, sigma=-1)
if __name__ == "__main__":
import nose
nose.run(argv=['', __file__])
|
alexis-roche/nireg
|
nireg/tests/test_histogram_registration.py
|
Python
|
bsd-3-clause
| 8,361
|
import json
import re
from base64 import b64decode
from datetime import datetime
from email.utils import mktime_tz, parsedate_tz
class MandrillInbound(object):
def __init__(self, *args, **kwargs):
if not kwargs.get('json') and not kwargs.get('source'):
raise Exception('Mandrill Inbound Error: you must \
provide json or source')
if kwargs.get('source'):
self.source = kwargs.get('source')
else:
self.source = json.loads(kwargs.get('json'))[0]
self.msg = self.source.get('msg')
if self.source['event'] != 'inbound':
raise Exception('Mandrill event not inbound')
if not self.msg:
raise Exception('Mandrill msg not found')
def _normalize_addresses(self, addresses):
recipients = []
for email, name in addresses:
recipients.append((name, email))
return recipients
@property
def subject(self):
"""
The subject line of the message
"""
return self.msg.get('subject')
@property
def sender(self):
"""
An array of the From Name and Email.
"""
return (self.msg.get('from_name'), self.msg.get('from_email'))
@property
def cc(self):
"""
All names and emails the email was indirectly sent to (cc'd)
"""
return self._normalize_addresses(self.msg.get('cc', []))
@property
def to(self):
"""
All names and emails the email was directly sent to (to)
"""
return self._normalize_addresses(self.msg.get('to', []))
@property
def recipients(self):
"""
All recipients of the message, both to'd and cc'd
"""
return self.to + self.cc
@property
def headers(self):
"""
A dictionary of all headers recieved for the message.
"""
headers = self.msg.get('headers')
return headers
@property
def message_id(self):
return self.headers['Message-Id']
@property
def attachments(self):
"""
All attachments for this email.
"""
attachments = []
for name, attachment in self.msg.get('attachments', {}).items():
attachments.append(Attachment(attachment))
return attachments
@property
def has_attachments(self):
"""
A boolean of the attachment type.
"""
if not self.attachments:
return False
return True
@property
def html_body(self):
"""
The body of the email in html form.
"""
return self.msg.get('html')
@property
def text_body(self):
"""
The body of the email in text form.
"""
return self.msg.get('text')
@property
def tags(self):
"""
Included for completeness, but "but inbound messages generally won't have tags since they're being received instead of sent, and the sender is null since the event is for a message not being sent by Mandrill"
According to http://help.mandrill.com/entries/22092308-What-is-the-format-of-inbound-email-webhooks-
"""
return self.msg.get('tags')
@property
def dkim(self):
"""
Returns a boolean, if True, DKIM was present and valid.
If False, DKIM was not valid.
DomainKeys Identified Mail(DKIM) protects against email spoofing.
Yahoo, Gmail, AOL, and various others should use DKIM.
DKIM lets you verify an email came on behalf of the claimed domain.
"""
return self.msg.get('dkim').get('valid')
@property
def spf(self):
"""
Returns a string of the the spf validation result. One of: pass, neutral, fail, softfail, temperror, permerror, none.
Sender Policy Framework (SPF), is a tool to detect email spoofing by verifying sender IP addresses.
"""
return self.msg.get('spf').get('result')
@property
def spam_score(self):
"""
Returns a SpamAssassin score (float/int)
The lower a score, the less likely the message is spam.
"""
return self.msg.get('spam_report').get('score')
@property
def mailbox_hash(self):
matches = re.search(r"\+(\S+)\@", self.msg.get('email'))
if matches:
return matches.group(1)
return None
@property
def send_date(self):
"""
Returns a date object of when the email was sent.
"""
date = None
rfc_2822 = self.headers['Date']
if rfc_2822:
try:
date = datetime.fromtimestamp(mktime_tz(parsedate_tz(rfc_2822)))
except:
pass
return date
@property
def ts(self):
"""
UTC unix timestamp when that the event occurred
"""
return datetime.fromtimestamp(self.source.get('ts'))
class Attachment(object):
def __init__(self, attachment, **kwargs):
self.attachment = attachment
@property
def name(self):
"""
The name of the attachent.
"""
return self.attachment.get('name')
@property
def content_type(self):
"""
The MIME type of the attachment.
"""
return self.attachment.get('type')
def read(self):
"""
Provides the raw binary content of the attachment file.
"""
return b64decode(self.attachment.get('content'))
def download(self, directory='', allowed_types=[]):
"""
Download attachments into a directory.
"""
if len(directory) == 0:
raise Exception('Mandrill Inbound Error: you must provide \
the upload path')
if allowed_types and self.content_type not in allowed_types:
raise Exception('Mandrill Inbound Error: the file type %s is \
not allowed' % self.content_type)
try:
attachment = open('%s%s' % (directory, self.name), 'w')
attachment.write(self.read())
except IOError:
raise Exception('Mandrill Inbound Error: cannot save the file, \
check path and rights.')
else:
attachment.close()
|
jpadilla/mandrill-inbound-python
|
mandrill_inbound/__init__.py
|
Python
|
mit
| 6,411
|
from distutils.core import setup
setup(
name='pyevent',
version='0.0.1',
packages=['pyevent'],
url='https://github.com/andy9775/pyevent',
license='MIT',
author='andy',
author_email='andy9775@gmail.com',
description='A micro event library for python',
)
|
andy9775/pyevent
|
setup.py
|
Python
|
mit
| 286
|
"""
Source: https://github.com/kerguler/hoppMCMC (01/12/18)
adaptive basin-hopping Markov-chain Monte Carlo for Bayesian optimisation
This is the python (v2.7) implementation of the hoppMCMC algorithm aiming to
identify and sample from the high-probability regions of a posterior
distribution. The algorithm combines three strategies: (i) parallel MCMC,
(ii) adaptive Gibbs sampling and (iii) simulated annealing.
Overall, hoppMCMC resembles the basin-hopping algorithm implemented in the
optimize module of scipy, but it is developed for a wide range of modelling
approaches including stochastic models with or without time-delay.
"""
import os
# import sys
import numpy as np
from struct import pack, unpack
from scipy.stats import ttest_1samp as ttest
MPI_MASTER = 0
try:
from mpi4py import MPI
MPI_SIZE = MPI.COMM_WORLD.Get_size()
MPI_RANK = MPI.COMM_WORLD.Get_rank()
def Abort(str):
print("ERROR: " + str)
MPI.COMM_WORLD.Abort(1)
except:
MPI_SIZE = 1
MPI_RANK = 0
def Abort(str):
raise errorMCMC(str)
EPS_PULSE_VAR_MIN = 1e-12
EPS_VARMAT_MIN = 1e-7
EPS_VARMAT_MAX = 1e7
class errorMCMC(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class binfile():
def __init__(self, fname, mode, rowsize=1):
self.headsize = 4
self.bitsize = 8
self.fname = fname
self.mode = mode
self.rowsize = rowsize
if self.mode == 'r':
try:
self.f = open(self.fname, "rb")
except IOError:
Abort("File not found: " + self.fname)
self.rowsize = unpack('<i', self.f.read(self.headsize))[0]
elif self.mode == 'w':
try:
self.f = open(self.fname, "w+b")
except IOError:
Abort("File not found: " + self.fname)
tmp = self.f.read(self.headsize)
if not tmp:
self.f.write(pack('<i', self.rowsize))
else:
self.rowsize = unpack('<i', tmp)[0]
else:
Abort("Wrong i/o mode: " + self.mode)
self.fmt = '<' + 'd' * self.rowsize
self.size = self.bitsize * self.rowsize
print("Success: %s opened for %s size %d double rows" % (
self.fname, "reading" if self.mode == 'r' else
"writing", self.rowsize))
def writeRow(self, row):
self.f.write(pack(self.fmt, *row))
self.f.flush()
def readRows(self):
self.f.seek(0, os.SEEK_END)
filesize = self.f.tell()
self.f.seek(self.headsize)
tmp = self.f.read(filesize - self.headsize)
ret = np.array(
unpack('<' + 'd' * int(np.floor((
filesize - self.headsize) / self.bitsize)), tmp),
dtype=np.float64).reshape((int(np.floor(((
filesize - self.headsize) / self.bitsize) / self.rowsize)),
self.rowsize))
return ret
def close(self):
self.f.close()
print("Success: %s closed" % (self.fname if self.fname else "file"))
def readFile(filename):
"""
Reads a binary output file and returns all rows/columns
Parameters
----------
filename:
name of the output file
Returns
-------
a np array with all the rows/columns
"""
a = binfile(filename, "r")
b = a.readRows()
a.close()
return b
def parMin(parmat):
prm = min(parmat[:, 0])
prmi = np.where(parmat[:, 0] == prm)[0][0]
return {'i': prmi, 'f': prm}
def diagdot(mat, vec):
for n in np.arange(mat.shape[0]):
mat[n, n] *= vec[n]
def covariance(mat):
return np.cov(mat, rowvar=False)
def coefVar(mat):
mean0 = 1.0 / np.mean(mat, 0)
return mean0 * (np.cov(mat, rowvar=False).T * mean0).T
def sensVar(mat):
mean0 = np.mean(mat, 0)
return mean0 * (np.linalg.inv(np.cov(mat, rowvar=False)).T * mean0).T
cov = covariance
rnorm = np.random.multivariate_normal
determinant = np.linalg.det
def join(a, s):
return s.join(["%.16g" % (x) for x in a])
def logsumexp(x):
a = np.max(x)
return a + np.log(np.sum(np.exp(x - a)))
def compareAUCs(parmats, groups, tol=1.0):
from scipy.stats import gaussian_kde
ids = np.unique(groups)
parmats = parmats[:, np.var(parmats, axis=0) != 0]
parmat_list = [parmats[groups == n, :] for n in ids]
counts = [np.sum(n == groups) for n in ids]
try:
kde = gaussian_kde(parmats[:, 1:].T)
except:
print("Warning: Problem encountered in compareAUC!")
return {ids[g]: 0 for g in range(len(ids))}
wt_list = np.array([
[kde.evaluate(pr) for pr in parmat[:, 1:]] for parmat in parmat_list])
mn = np.min(wt_list)
wt_list = np.log(wt_list / mn)
# Importance sampling for Monte Carlo integration:
favg_exp = np.array([
[-parmat_list[n][m, 0] / tol - wt_list[n][m] for m in
range(parmat_list[n].shape[0])] for n in range(len(parmat_list))])
favg_logsums = np.array([logsumexp(x) for x in favg_exp]) - np.log(counts)
favg_list = np.exp(favg_logsums - logsumexp(favg_logsums))
return {ids[g]: favg_list[g] for g in range(len(ids))}
def compareAUC(parmat0, parmat1, T):
from scipy.stats import gaussian_kde
parmats = np.vstack((parmat0, parmat1))
parmat0cp = parmat0[:, np.var(parmats, axis=0) != 0].copy()
parmat1cp = parmat1[:, np.var(parmats, axis=0) != 0].copy()
parmats = parmats[:, np.var(parmats, axis=0) != 0]
try:
kde = gaussian_kde(parmats[:, 1:].T)
except:
print("Warning: Problem encountered in compareAUC!")
return {'acc': 0, 'favg0': 0, 'favg1': 0}
wt0 = np.array([kde.evaluate(pr) for pr in parmat0cp[:, 1:]])
wt1 = np.array([kde.evaluate(pr) for pr in parmat1cp[:, 1:]])
mn = np.min([wt0, wt1])
wt0 /= mn
wt1 /= mn
# Importance sampling for Monte Carlo integration:
favg0 = np.mean([
np.exp(-parmat0cp[m, 0] / T) / wt0[m] for m in
range(parmat0cp.shape[0])])
favg1 = np.mean([
np.exp(-parmat1cp[m, 0] / T) / wt1[m] for m in
range(parmat1cp.shape[0])])
acc = (not np.isnan(favg0) and not np.isnan(favg1) and
favg0 > 0 and favg1 >= 0 and
(favg1 >= favg0 or
(np.random.uniform() < (favg1 / favg0))))
return {'acc': acc, 'favg0': favg0, 'favg1': favg1}
def anneal_exp(y0, y1, steps):
return y0 * np.exp(-(np.arange(
steps, dtype=np.float64) / (steps - 1.)) * np.log(np.float64(y0) / y1))
def anneal_linear(y0, y1, steps):
return np.append(np.arange(
y0, y1, (y1 - y0) / (steps - 1), dtype=np.float64), y1)
def anneal_sigma(y0, y1, steps):
return y0 + (y1 - y0) * (1. - 1. / (1. + np.exp(-(
np.arange(steps) - (0.5 * steps)))))
def anneal_sigmasoft(y0, y1, steps):
return y0 + (y1 - y0) * (1. - 1. / (1. + np.exp(-12.5 * (
np.arange(steps) - (0.5 * steps)) / steps)))
def ldet(mat):
try:
det = determinant(mat)
except:
return -np.Inf
if det == 0:
return -np.Inf
else:
return np.log(det)
def finalTest(fitFun, param, testnum=10):
for n in range(testnum):
f = fitFun(param)
if not np.isnan(f) and not np.isinf(f):
return f
Abort("Incompatible parameter set (%g): %s" % (f, join(param, ",")))
class hoppMCMC:
def __init__(
self, fitFun, param, varmat, inferpar=None, gibbs=True, num_hopp=3,
num_adapt=25, num_chain=12, chain_length=50, rangeT=None,
model_comp=1000.0, outfilename=''):
"""
Adaptive Basin-Hopping MCMC Algorithm
Parameters
----------
fitFun:
fitFun(x) - objective function which takes a np array as the
only argument
param:
initial parameter vector
varmat:
2-dimensional array of initial covariance matrix
inferpar:
an array of indexes of parameter dimensions to be inferred
(all parameters are inferred by default)
gibbs:
indicates the type of chain iteration
True - Gibbs iteration where each parameter dimension has its
own univariate Gaussian proposal distribution (default)
False - Metropolis-Hastings iteration where there is a single
multivariate Gaussian proposal distribution
num_hopp:
number of hopp-steps (default=3)
num_adapt:
number of adaptation steps (default=25)
num_chain:
number of MCMC chains (default=12)
chain_length:
size of each chain (default=50)
rangeT:
[min,max] - range of annealing temperatures for each hopp-step
(default=[1,1000])
min should be as low as possible but not lower
max should be sufficiently permissive to be able to jump between
posterior modes
model_comp:
tolerance for accepting subsequent hopp-steps (default=1000)
this should ideally be equal to or higher than max(rangeT)
outfilename:
name of the output file (default='')
use this option for a detailed account of the results
outfilename.final lists information on hopp-steps:
hopp-step
acceptance (0/1)
weighted average of exp{-f0/model_comp}
weighted average of exp{-f1/model_comp}
outfilename.parmat lists chain status at the end of each
adaptation step:
adaptation step
chain id
annealing temperature
score (f)
parameter values
both files can be read using the readFile function
Returns
-------
A hoppMCMC object with
parmat: an array of num_chain x (1+len(param))
current score (f) and parameter values for each chain
varmat: an array of len(inferpar) x len(inferpar)
latest proposal distribution
parmats: a list of parameter values (i.e. parmat) accepted at the
end of hopp-steps
See also
--------
the documentation (doc/hoppMCMC_manual.pdf) for more information and
examples
"""
self.multi = {'cov': covariance,
'rnorm': np.random.multivariate_normal,
'det': np.linalg.det}
self.single = {'cov': covariance,
'rnorm': np.random.normal,
'det': abs}
self.stat = self.multi
self.gibbs = gibbs
self.num_hopp = num_hopp
self.num_adapt = num_adapt
self.num_chain = num_chain
self.chain_length = chain_length
self.rangeT = np.sort([1.0, 1000.0] if rangeT is None else rangeT)
self.model_comp = model_comp
# ---
self.fitFun = fitFun
self.param = np.array(param, dtype=np.float64, ndmin=1)
f0 = finalTest(self.fitFun, self.param)
self.parmat = np.array([
[f0] + self.param.tolist() for n in range(self.num_chain)],
dtype=np.float64)
self.varmat = np.array(varmat, dtype=np.float64, ndmin=2)
self.parmats = []
# ---
if inferpar is None:
self.inferpar = np.arange(len(self.param), dtype=np.int32)
else:
self.inferpar = np.array(inferpar, dtype=np.int32)
print("Parameters to infer: %s" % (join(self.inferpar, ",")))
# ---
self.rank_indices = [
np.arange(i, self.num_chain, MPI_SIZE) for i in range(MPI_SIZE)]
self.worker_indices = np.delete(range(MPI_SIZE), MPI_MASTER)
# ---
self.outfilename = outfilename
self.outparmat = None
self.outfinal = None
if MPI_RANK == MPI_MASTER:
if self.outfilename:
self.outparmat = binfile(
self.outfilename + '.parmat', 'w',
self.parmat.shape[1] + 3)
self.outfinal = binfile(self.outfilename + '.final', 'w', 4)
# ---
for hopp_step in range(self.num_hopp):
self.anneal = anneal_sigmasoft(
self.rangeT[0], self.rangeT[1], self.num_adapt)
for adapt_step in range(self.num_adapt):
self.runAdaptStep(hopp_step * self.num_adapt + adapt_step)
if MPI_RANK == MPI_MASTER:
test = {'acc': True, 'favg0': np.nan, 'favg1': np.nan} if\
len(self.parmats) == 0 else compareAUC(
self.parmats[-1][
:, [0] + (1 + self.inferpar).tolist()],
self.parmat[
:, [0] + (1 + self.inferpar).tolist()],
self.model_comp)
if test['acc']:
self.parmats.append(self.parmat)
else:
self.parmat = self.parmats[-1].copy()
self.param = self.parmat[
parMin(self.parmat)['i'], 1:].copy()
# ---
if self.outfinal:
self.outfinal.writeRow([
hopp_step, test['acc'], test['favg0'], test['favg1']])
else:
print("parMatAcc.final: %d,%s" % (
hopp_step,
join([test['acc'], test['favg0'], test['favg1']],
",")))
# ---
if MPI_SIZE > 1:
self.parmat = MPI.COMM_WORLD.bcast(
self.parmat, root=MPI_MASTER)
self.param = MPI.COMM_WORLD.bcast(self.param, root=MPI_MASTER)
# ---
if MPI_SIZE > 1:
self.parmats = MPI.COMM_WORLD.bcast(self.parmats, root=MPI_MASTER)
if self.outparmat:
self.outparmat.close()
if self.outfinal:
self.outfinal.close()
def runAdaptStep(self, adapt_step):
if MPI_RANK == MPI_MASTER:
pm = parMin(self.parmat)
self.param = self.parmat[pm['i'], 1:].copy()
self.parmat = np.array([
self.parmat[pm['i'], :].tolist() for n in
range(self.num_chain)], dtype=np.float64)
if MPI_SIZE > 1:
self.param = MPI.COMM_WORLD.bcast(self.param, root=MPI_MASTER)
self.parmat = MPI.COMM_WORLD.bcast(self.parmat, root=MPI_MASTER)
# ---
for chain_id in self.rank_indices[MPI_RANK]:
# ---
mcmc = chainMCMC(self.fitFun,
self.param,
self.varmat,
gibbs=self.gibbs,
chain_id=chain_id,
pulsevar=1.0,
anneal=self.anneal[0],
accthr=0.5,
inferpar=self.inferpar,
varmat_change=0,
pulse_change=10,
pulse_change_ratio=2,
print_iter=0)
for m in range(self.chain_length):
mcmc.iterate()
self.parmat[chain_id, :] = mcmc.getParam()
# ---
if MPI_RANK == MPI_MASTER:
for worker in self.worker_indices:
parmat = MPI.COMM_WORLD.recv(source=worker, tag=1)
for chain_id in self.rank_indices[worker]:
self.parmat[chain_id, :] = parmat[chain_id, :]
# ---
self.varmat = np.array(
self.stat['cov'](self.parmat[:, 1 + self.inferpar]), ndmin=2)
self.varmat[np.abs(self.varmat) < EPS_VARMAT_MIN] = 1.0
# ---
for chain_id in range(self.num_chain):
if self.outparmat:
tmp = [adapt_step, chain_id, self.anneal[0]] +\
self.parmat[chain_id, :].tolist()
self.outparmat.writeRow(tmp)
else:
print("param.mat.step: %d,%d,%g,%s" % (
adapt_step, chain_id, self.anneal[0],
join(self.parmat[chain_id, :], ",")))
# ---
if len(self.anneal) > 1:
self.anneal = self.anneal[1:]
else:
MPI.COMM_WORLD.send(self.parmat, dest=MPI_MASTER, tag=1)
if MPI_SIZE > 1:
self.parmat = MPI.COMM_WORLD.bcast(self.parmat, root=MPI_MASTER)
self.varmat = MPI.COMM_WORLD.bcast(self.varmat, root=MPI_MASTER)
self.anneal = MPI.COMM_WORLD.bcast(self.anneal, root=MPI_MASTER)
class chainMCMC:
def __init__(self,
fitFun,
param,
varmat,
inferpar=None,
gibbs=True,
chain_id=0,
pulsevar=1.0,
anneal=1,
accthr=0.5,
varmat_change=0,
pulse_change=10,
pulse_change_ratio=2,
pulse_allow_decrease=True,
pulse_allow_increase=True,
pulse_min=1e-7,
pulse_max=1e7,
print_iter=0):
"""
MCMC Chain with Adaptive Proposal Distribution
Usage
-----
Once created, a chainMCMC is iterated using the iterate method.
Depending on the value of gibbs, this method calls either iterateMulti or iterateSingle.
Parameters
----------
fitFun:
fitFun(x) - objective function which takes a np array as the only argument
param:
initial parameter vector
varmat:
2-dimensional array of initial covariance matrix
inferpar:
an array of indexes of parameter dimensions to be inferred
(all parameters are inferred by default)
gibbs:
indicates the type of chain iteration
True - Gibbs iteration where each parameter dimension has its own univariate
Gaussian proposal distribution (default)
False - Metropolis-Hastings iteration where there is a single multivariate
Gaussian proposal distribution
chain_id:
a chain identifier (default=0)
pulsevar:
scaling factor for the variance of the proposal distribution (default=1)
anneal:
annealing temperature (default=1)
accthr:
desired acceptance rate (default=0.5)
varmat_change:
how often variance should be updated? (default=0)
varmat_change=0 - fixed variance
varmat_change=n - variance is updated at each nth step
pulse_change:
how often pulsevar should be updated? (default=10)
pulse_change=0 - fixed pulsevar
pulse_change=n - pulsevar is updated at each nth step
pulse_change_ratio:
how should pulsevar be updated? (default=2)
(pulsevar *= pulse_change_ratio)
pulse_allow_increase:
allow pulse to increase (default=True)
pulse_allow_decrease:
allow pulse to decrease (default=True)
pulse_min:
minimum value of pulse (default=1e-7)
pulse_max:
maximum value of pulse (default=1e7)
print_iter:
how often chain status should be printed? (default=0)
print_iter=0 - do not print status
print_iter=n - print status at each nth step
(default=0)
Returns
-------
A chainMCMC object with
getParam: a method for obtaining the latest iteration (f + parameter values)
getVarmat: a method for obtaining the latest proposal distribution (varmat * pulsevar)
See also
--------
the documentation (doc/hoppMCMC_manual.pdf) for more information and examples
"""
self.multi = {'cov': covariance,
'rnorm': np.random.multivariate_normal,
'det': np.linalg.det}
self.single = {'cov': covariance,
'rnorm': np.random.normal,
'det': abs}
# ---
self.chain_id = chain_id;
self.fitFun = fitFun
self.parmat = np.array(param,dtype=np.float64,ndmin=1)
self.varmat = np.array(varmat,dtype=np.float64,ndmin=2)
if inferpar is None:
self.inferpar = np.arange(len(param),dtype=np.int32)
else:
self.inferpar = np.array(inferpar,dtype=np.int32)
self.anneal = np.array(anneal,dtype=np.float64,ndmin=1)
self.accthr = np.array(accthr,dtype=np.float64,ndmin=1)
# ---
self.pulse_change_ratio = pulse_change_ratio
self.pulse_nochange = np.float64(1)
self.pulse_increase = np.float64(self.pulse_change_ratio)
self.pulse_decrease = np.float64(1.0/self.pulse_change_ratio)
self.pulse_change = pulse_change
self.pulse_collect = max(1,self.pulse_change)
self.allow_pincr = pulse_allow_increase
self.allow_pdecr = pulse_allow_decrease
self.pulse_min = pulse_min
self.pulse_max = pulse_max
self.varmat_change = varmat_change
self.varmat_collect = max(1,self.varmat_change)
# ---
if self.parmat.ndim==1:
f0 = finalTest(self.fitFun,self.parmat)
self.parmat = np.array([[f0]+self.parmat.tolist() for i in range(self.varmat_collect)])
elif self.parmat.shape[0]!=self.varmat_collect:
Abort("Dimension mismatch in chainMCMC! parmat.shape[0]=%d collect=%d" %(self.parmat.shape[0],self.varmat_collect))
if self.varmat.shape and self.inferpar.shape[0] != self.varmat.shape[0]:
Abort("Dimension mismatch in chainMCMC! inferpar.shape[0]=%d varmat.shape[0]=%d" %(self.inferpar.shape[0],self.varmat.shape[0]))
# ---
self.gibbs = gibbs
if self.gibbs:
self.pulsevar = np.array(np.repeat(pulsevar,len(self.inferpar)),dtype=np.float64)
self.acc_vecs = [np.repeat(False,self.pulse_collect) for n in range(len(self.inferpar))]
self.iterate = self.iterateSingle
else:
self.pulsevar = pulsevar
self.acc_vec = np.repeat(False,self.pulse_collect)
self.iterate = self.iterateMulti
self.pulsevar0 = self.pulsevar
if not self.gibbs and (self.parmat.shape[1]==2 or self.inferpar.shape[0]==1):
Abort("Please set gibbs=True!")
self.varmat = self.varmat*self.pulsevar
# ---
self.halfa = 0.025
if self.pulse_change<25:
self.halfa = 0.05
self.print_iter = print_iter
self.step = 0
self.index = 0
self.index_acc = 0
def getParam(self):
return self.parmat[self.index,:].copy()
def getVarmat(self):
return (self.varmat*self.pulsevar).copy()
def getVarPar(self):
return ldet(self.multi['cov'](self.parmat[:,1+self.inferpar]))
def getVarVar(self):
return ldet(self.varmat)
def getAcc(self):
if self.gibbs:
return np.array([np.mean(acc_vec) for acc_vec in self.acc_vecs])
else:
return np.mean(self.acc_vec)
def setParam(self,parmat):
self.parmat[self.index,:] = np.array(parmat,dtype=np.float64,ndmin=1).copy()
def newParamSingle(self,param,param_id):
try:
param1 = self.single['rnorm'](param,
self.varmat[param_id,param_id]*self.pulsevar[param_id])
except:
print("Warning: Failed to generate a new parameter set")
param1 = np.copy(param)
return param1
def newParamMulti(self):
try:
param1 = self.multi['rnorm'](self.parmat[self.index,1:][self.inferpar],self.varmat*self.pulsevar)
except np.linalg.linalg.LinAlgError:
print("Warning: Failed to generate a new parameter set")
param1 = np.copy(self.parmat[self.index,1:][self.inferpar])
return param1
def checkMove(self,f0,f1):
acc = (not np.isnan(f1) and not np.isinf(f1) and
f1 >= 0 and
(f1 <= f0 or
(np.log(np.random.uniform()) < (f0-f1)/self.anneal[0])))
# --- f0 = 0.5*SS_0
# --- f = 0.5*SS
# --- sqrt(anneal) == st.dev.
# --- 0.5*x^2/(T*s^2)
# --- return exp(-0.5*SS/anneal)/exp(-0.5*SS_0/anneal)
return(acc)
def pulsevarUpdate(self,acc_vec):
# --- Test if mean(acc_vec) is equal to accthr
try:
r = ttest(acc_vec,self.accthr)
except ZeroDivisionError:
if all(acc_vec)<=0 and self.allow_pdecr:
return self.pulse_decrease
elif all(acc_vec)>=0 and self.allow_pincr:
return self.pulse_increase
else:
return self.pulse_nochange
# ---
if r[1]>=self.halfa: return self.pulse_nochange
if r[0]>0 and self.allow_pincr: return self.pulse_increase
if r[0]<0 and self.allow_pdecr: return self.pulse_decrease
# --- Return default
return self.pulse_nochange
def iterateMulti(self):
self.step += 1
# ---
acc = False
f0 = self.parmat[self.index,0]
param1 = np.copy(self.parmat[self.index,1:])
param1[self.inferpar] = self.newParamMulti()
f1 = self.fitFun(param1)
acc = self.checkMove(f0,f1)
# ---
self.index_acc = (self.index_acc+1)%self.pulse_collect
if acc:
self.index = (self.index+1)%self.varmat_collect
# ---
self.acc_vec[self.index_acc] = acc
if acc:
self.parmat[self.index,0] = f1
self.parmat[self.index,1:] = param1
# ---
if self.print_iter and (self.step%self.print_iter)==0:
print("param.mat.chain: %d,%d,%s" %(self.step,self.chain_id,join(self.parmat[self.index,:],",")))
# ---
if self.step>1:
# ---
if self.pulse_change and (self.step%self.pulse_change)==0:
self.pulsevar = max(1e-7,self.pulsevar*self.pulsevarUpdate(self.acc_vec))
# ---
if self.varmat_change and (self.step%self.varmat_change)==0:
self.varmat = np.array(self.multi['cov'](self.parmat[:,1+self.inferpar]),ndmin=2)
a = np.diag(self.varmat)<EPS_VARMAT_MIN
self.varmat[a,a] = EPS_VARMAT_MIN
# ---
if self.print_iter and (self.step%self.print_iter)==0:
print("parMatAcc.chain: %s" %(join([self.step,self.chain_id,ldet(self.multi['cov'](self.parmat[:,1+self.inferpar])),ldet(self.varmat),np.mean(self.acc_vec),self.pulsevar],",")))
def iterateSingle(self):
self.step += 1
self.index_acc = (self.index_acc+1)%self.pulse_collect
# ---
acc_steps = False
f0 = self.parmat[self.index,0]
param0 = self.parmat[self.index,1:].copy()
for param_id in np.arange(len(self.inferpar)):
param1 = param0.copy()
param1[self.inferpar[param_id]] = self.newParamSingle(param1[self.inferpar[param_id]],param_id)
f1 = self.fitFun(param1)
acc = self.checkMove(f0,f1)
if acc:
acc_steps = True
f0 = f1
param0[self.inferpar[param_id]] = np.copy(param1[self.inferpar[param_id]])
self.acc_vecs[param_id][self.index_acc] = acc
# ---
if acc_steps:
self.index = (self.index+1)%self.varmat_collect
self.parmat[self.index,0] = f0
self.parmat[self.index,1:] = param0
if np.isnan(f0) or np.isinf(f0):
Abort("Iterate single failed with %g: %s" %(f0,join(param0,",")))
# ---
if self.print_iter and (self.step%self.print_iter)==0:
print("param.mat.chain: %d,%d,%s" %(self.step,self.chain_id,join(self.parmat[self.index,:],",")))
# ---
if self.step>1:
# ---
if self.pulse_change and (self.step%self.pulse_change)==0:
for param_id in np.arange(len(self.inferpar)):
tmp = min(self.pulse_max,max(self.pulse_min,self.pulsevar[param_id]*self.pulsevarUpdate(self.acc_vecs[param_id])))
if np.abs(self.varmat[param_id,param_id]*tmp) >= EPS_PULSE_VAR_MIN:
self.pulsevar[param_id] = tmp
# ---
if self.varmat_change and (self.step%self.varmat_change)==0:
for param_id in np.arange(len(self.inferpar)):
tmp = max(EPS_VARMAT_MIN,self.single['cov'](self.parmat[:,1+self.inferpar[param_id]]))
if np.abs(tmp*self.pulsevar[param_id]) >= EPS_PULSE_VAR_MIN:
self.varmat[param_id,param_id] = tmp
# ---
if self.print_iter and (self.step%self.print_iter)==0:
print("parMatAcc.chain: %s" %(join([self.step,self.chain_id,ldet(self.multi['cov'](self.parmat[:,1+self.inferpar])),ldet(self.varmat)],",")))
print("parMatAcc.chain.accs: %d,%d,%s" %(self.step,self.chain_id,join([np.mean(acc_vec) for acc_vec in self.acc_vecs],",")))
print("parMatAcc.chain.pulses: %d,%d,%s" %(self.step,self.chain_id,join(self.pulsevar,",")))
|
asteca/ASteCA
|
packages/best_fit/DEPRECATED/hopp_DEPRECATED/hoppMCMC.py
|
Python
|
gpl-3.0
| 30,000
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from simple import simple
from rml import rml, rml2html, rml2txt, odt2odt , html2html, makohtml2html
from render import render
try:
from PIL import Image
except ImportError:
import logging
_logger = logging.getLogger(__name__)
_logger.warning('Python Imaging not installed, you can use only .JPG pictures !')
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
diogocs1/comps
|
web/openerp/report/render/__init__.py
|
Python
|
apache-2.0
| 1,374
|
import logging
import re
import uuid
from requests.cookies import cookiejar_from_dict
from streamlink import PluginError
from streamlink.cache import Cache
from streamlink.plugin import Plugin, PluginArgument, PluginArguments
from streamlink.plugin.api import useragents, validate
from streamlink.stream import DASHStream, HLSStream
from streamlink.utils.args import comma_list_filter
log = logging.getLogger(__name__)
class Zattoo(Plugin):
API_CHANNELS = '{0}/zapi/v2/cached/channels/{1}?details=False'
API_HELLO = '{0}/zapi/session/hello'
API_HELLO_V2 = '{0}/zapi/v2/session/hello'
API_HELLO_V3 = '{0}/zapi/v3/session/hello'
API_LOGIN = '{0}/zapi/v2/account/login'
API_LOGIN_V3 = '{0}/zapi/v3/account/login'
API_SESSION = '{0}/zapi/v2/session'
API_WATCH = '{0}/zapi/watch'
API_WATCH_REC = '{0}/zapi/watch/recording/{1}'
API_WATCH_VOD = '{0}/zapi/avod/videos/{1}/watch'
STREAMS_ZATTOO = ['dash', 'hls', 'hls5']
TIME_CONTROL = 60 * 60 * 2
TIME_SESSION = 60 * 60 * 24 * 30
_url_re = re.compile(r'''(?x)
https?://
(?P<base_url>
(?:(?:
iptv\.glattvision|www\.(?:myvisiontv|saktv|vtxtv)
)\.ch
)|(?:(?:
mobiltv\.quickline|www\.quantum-tv|zattoo
)\.com
)|(?:(?:
tvonline\.ewe|nettv\.netcologne|tvplus\.m-net
)\.de
)|(?:(?:
player\.waly|www\.(?:1und1|netplus)
)\.tv)
|www\.bbv-tv\.net
|www\.meinewelt\.cc
)/
(?:
(?:
recording(?:s\?recording=|/)
|
(?:ondemand/)?(?:watch/(?:[^/\s]+)(?:/[^/]+/))
)(?P<recording_id>\d+)
|
(?:
(?:live/|watch/)|(?:channels(?:/\w+)?|guide)\?channel=
)(?P<channel>[^/\s]+)
|
ondemand(?:\?video=|/watch/)(?P<vod_id>[^-]+)
)
''')
_app_token_re = re.compile(r"""window\.appToken\s+=\s+'([^']+)'""")
_channels_schema = validate.Schema({
'success': bool,
'channel_groups': [{
'channels': [
{
'display_alias': validate.text,
'cid': validate.text
},
]
}]},
validate.get('channel_groups'),
)
_session_schema = validate.Schema({
'success': bool,
'session': {
'loggedin': bool
}
}, validate.get('session'))
arguments = PluginArguments(
PluginArgument(
"email",
requires=["password"],
metavar="EMAIL",
help="""
The email associated with your zattoo account,
required to access any zattoo stream.
"""),
PluginArgument(
"password",
sensitive=True,
metavar="PASSWORD",
help="""
A zattoo account password to use with --zattoo-email.
"""),
PluginArgument(
"purge-credentials",
action="store_true",
help="""
Purge cached zattoo credentials to initiate a new session
and reauthenticate.
"""),
PluginArgument(
'stream-types',
metavar='TYPES',
type=comma_list_filter(STREAMS_ZATTOO),
default=['hls'],
help='''
A comma-delimited list of stream types which should be used,
the following types are allowed:
- {0}
Default is "hls".
'''.format('\n - '.join(STREAMS_ZATTOO))
)
)
def __init__(self, url):
super().__init__(url)
self.domain = self._url_re.match(url).group('base_url')
self._session_attributes = Cache(
filename='plugin-cache.json',
key_prefix='zattoo:attributes:{0}'.format(self.domain))
self._uuid = self._session_attributes.get('uuid')
self._authed = (self._session_attributes.get('power_guide_hash')
and self._uuid
and self.session.http.cookies.get('pzuid', domain=self.domain)
and self.session.http.cookies.get('beaker.session.id', domain=self.domain)
)
self._session_control = self._session_attributes.get('session_control',
False)
self.base_url = 'https://{0}'.format(self.domain)
self.headers = {
'User-Agent': useragents.CHROME,
'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
'X-Requested-With': 'XMLHttpRequest',
'Referer': self.base_url
}
@classmethod
def can_handle_url(cls, url):
return cls._url_re.match(url) is not None
def _hello(self):
log.debug('_hello ...')
# a new session is required for the app_token
self.session.http.cookies = cookiejar_from_dict({})
if self.base_url == 'https://zattoo.com':
app_token_url = 'https://zattoo.com/client/token-2fb69f883fea03d06c68c6e5f21ddaea.json'
elif self.base_url == 'https://www.quantum-tv.com':
app_token_url = 'https://www.quantum-tv.com/token-4d0d61d4ce0bf8d9982171f349d19f34.json'
else:
app_token_url = self.base_url
res = self.session.http.get(app_token_url)
if self.base_url == 'https://www.quantum-tv.com':
app_token = self.session.http.json(res)["session_token"]
hello_url = self.API_HELLO_V3.format(self.base_url)
elif self.base_url == 'https://zattoo.com':
app_token = self.session.http.json(res)['app_tid']
hello_url = self.API_HELLO_V2.format(self.base_url)
else:
match = self._app_token_re.search(res.text)
app_token = match.group(1)
hello_url = self.API_HELLO.format(self.base_url)
if self._uuid:
__uuid = self._uuid
else:
__uuid = str(uuid.uuid4())
self._session_attributes.set(
'uuid', __uuid, expires=self.TIME_SESSION)
if self.base_url == 'https://zattoo.com':
params = {
'uuid': __uuid,
'app_tid': app_token,
'app_version': '1.0.0'
}
else:
params = {
'client_app_token': app_token,
'uuid': __uuid,
}
if self.base_url == 'https://www.quantum-tv.com':
params['app_version'] = '3.2028.3'
else:
params['lang'] = 'en'
params['format'] = 'json'
res = self.session.http.post(hello_url, headers=self.headers, data=params)
def _login(self, email, password):
log.debug('_login ... Attempting login as {0}'.format(email))
params = {
'login': email,
'password': password,
'remember': 'true'
}
if self.base_url == 'https://quantum-tv.com':
login_url = self.API_LOGIN_V3.format(self.base_url)
else:
login_url = self.API_LOGIN.format(self.base_url)
try:
res = self.session.http.post(login_url, headers=self.headers, data=params)
except Exception as e:
if '400 Client Error' in str(e):
raise PluginError(
'Failed to login, check your username/password')
raise e
data = self.session.http.json(res)
self._authed = data['success']
log.debug('New Session Data')
self.save_cookies(default_expires=self.TIME_SESSION)
self._session_attributes.set('power_guide_hash',
data['session']['power_guide_hash'],
expires=self.TIME_SESSION)
self._session_attributes.set(
'session_control', True, expires=self.TIME_CONTROL)
def _watch(self):
log.debug('_watch ...')
match = self._url_re.match(self.url)
if not match:
log.debug('_watch ... no match')
return
channel = match.group('channel')
vod_id = match.group('vod_id')
recording_id = match.group('recording_id')
params = {'https_watch_urls': True}
if channel:
watch_url = self.API_WATCH.format(self.base_url)
params_cid = self._get_params_cid(channel)
if not params_cid:
return
params.update(params_cid)
elif vod_id:
log.debug('Found vod_id: {0}'.format(vod_id))
watch_url = self.API_WATCH_VOD.format(self.base_url, vod_id)
elif recording_id:
log.debug('Found recording_id: {0}'.format(recording_id))
watch_url = self.API_WATCH_REC.format(self.base_url, recording_id)
else:
log.debug('Missing watch_url')
return
zattoo_stream_types = self.get_option('stream-types') or ['hls']
for stream_type in zattoo_stream_types:
params_stream_type = {'stream_type': stream_type}
params.update(params_stream_type)
try:
res = self.session.http.post(watch_url, headers=self.headers, data=params)
except Exception as e:
if '404 Client Error' in str(e):
log.error('Unfortunately streaming is not permitted in '
'this country or this channel does not exist.')
elif '402 Client Error: Payment Required' in str(e):
log.error('Paid subscription required for this channel.')
log.info('If paid subscription exist, use --zattoo-purge'
'-credentials to start a new session.')
elif '403 Client Error' in str(e):
log.debug('Force session reset for watch_url')
self.reset_session()
else:
log.error(str(e))
return
data = self.session.http.json(res)
log.debug('Found data for {0}'.format(stream_type))
if data['success'] and stream_type in ['hls', 'hls5']:
for url in data['stream']['watch_urls']:
for s in HLSStream.parse_variant_playlist(
self.session, url['url']).items():
yield s
elif data['success'] and stream_type == 'dash':
for url in data['stream']['watch_urls']:
for s in DASHStream.parse_manifest(
self.session, url['url']).items():
yield s
def _get_params_cid(self, channel):
log.debug('get channel ID for {0}'.format(channel))
channels_url = self.API_CHANNELS.format(
self.base_url,
self._session_attributes.get('power_guide_hash'))
try:
res = self.session.http.get(channels_url, headers=self.headers)
except Exception:
log.debug('Force session reset for _get_params_cid')
self.reset_session()
return False
data = self.session.http.json(res, schema=self._channels_schema)
c_list = []
for d in data:
for c in d['channels']:
c_list.append(c)
cid = []
zattoo_list = []
for c in c_list:
zattoo_list.append(c['display_alias'])
if c['display_alias'] == channel:
cid = c['cid']
log.debug('Available zattoo channels in this country: {0}'.format(
', '.join(sorted(zattoo_list))))
if not cid:
cid = channel
log.debug('CHANNEL ID: {0}'.format(cid))
return {'cid': cid}
def reset_session(self):
self._session_attributes.set('power_guide_hash', None, expires=0)
self._session_attributes.set('uuid', None, expires=0)
self.clear_cookies()
self._authed = False
def _get_streams(self):
email = self.get_option('email')
password = self.get_option('password')
if self.options.get('purge_credentials'):
self.reset_session()
log.info('All credentials were successfully removed.')
elif (self._authed and not self._session_control):
# check every two hours, if the session is actually valid
log.debug('Session control for {0}'.format(self.domain))
res = self.session.http.get(self.API_SESSION.format(self.base_url))
res = self.session.http.json(res, schema=self._session_schema)
if res['loggedin']:
self._session_attributes.set(
'session_control', True, expires=self.TIME_CONTROL)
else:
log.debug('User is not logged in')
self._authed = False
if not self._authed and (not email and not password):
log.error(
'A login for Zattoo is required, use --zattoo-email EMAIL'
' --zattoo-password PASSWORD to set them')
return
if not self._authed:
self._hello()
self._login(email, password)
return self._watch()
__plugin__ = Zattoo
|
beardypig/streamlink
|
src/streamlink/plugins/zattoo.py
|
Python
|
bsd-2-clause
| 13,444
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Author: OpenDrive Ltda
# Copyright (c) 2013 Opendrive Ltda
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsibility of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# guarantees and support are strongly advised to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
##############################################################################
from osv import osv, fields
class InvoiceInterval(osv.osv):
"""
This object represents a invoice interval the user can used when invoicing rent orders.
In this object, we define the name of the interval, and the name of the python method called.
If you want to add support for a specific interval, just creates one of this object, inherit rent.order
and add your custom method with this signature :
method(self, cr, uid, order, context=None)
Where order is the result of a browse() on the current order. This method must returns a list of the created
invoices ids, or raise an exception.
"""
_name = 'rent.interval'
_columns = {
'name':fields.char('Name', size=150, required=True, translate=True),
'method':fields.char('Method', size=255, required=True),
'not_allowed_duration_unities':fields.many2many('product.uom', 'rent_interval_not_allowed_durations',
'interval_id', 'duration_id', string='Duration not allowed with this interval !'),
}
InvoiceInterval()
|
kailIII/emaresa
|
trunk.pe/rent/intervals.py
|
Python
|
agpl-3.0
| 2,316
|
#
# The goal for this algorithm is going to be to run a few tests on different market parameters to
# determine what kind of market we are looking at (eg. given a new market)
#
# This will help with short term and long term market analysis, and teach the program what market it is looking
# at. For the user it makes it simple to make investment decisions and allows them to make important decisions
# on if new markets look like they are good ones to enter or not.
#
# We will look at a tech market, a failed market, and a natural resource market for these tests.
#
# Based on the result we will teach the program what decisions are best to make as long as it chose the
# correct market fromt the data.
#
# Another feature to add will be a function that says the market is indetermiable at the moment, saying it needs
# more data. This will allow the risk to be calculated so that the program doesn't make rash decisions costing the
# user a lot of money.
#
# To run an algorithm in Quantopian, you need two functions:
# initialize and handle_data.
def initialize(context):
# The initialize function sets any data or variables that
# you'll use in your algorithm.
# For instance, you'll want to define the security
# (or securities) you want to backtest.
# You'll also want to define any parameters or values
# you're going to use later.
# It's only called once at the beginning of your algorithm.
# In our example, we're looking at Apple.
# If you re-type this line you'll see
# the auto-complete that is available for security.
context.security = symbol('AAPL')
# initialize a day counter so that days can be taken into account
context.daycounter = 0
#comment
# The handle_data function is where the real work is done.
# This function is run either every minute
# (in live trading and minute backtesting mode)
# or every day (in daily backtesting mode).
def handle_data(context, data):
# We add a new day each time we iterate through this function...
context.daycounter += 1
# We've built a handful of useful data transforms for you to use,
# such as moving average.
# To make market decisions, we're calculating the stock's
# moving average for the last 5 days and its current price.
average_price = data[context.security].mavg(5)
current_price = data[context.security].price
# Another powerful built-in feature of the Quantopian backtester is the
# portfolio object. The portfolio object tracks your positions, cash,
# cost basis of specific holdings, and more. In this line, we calculate
# the current amount of cash in our portfolio.
cash = context.portfolio.cash
# Here is the meat of our algorithm.
# If the current price is 1% above the 5-day average price
# AND we have enough cash, then we will order.
# If the current price is below the average price,
# then we want to close our position to 0 shares.
if current_price > 1.03*average_price and cash > current_price and context.daycounter > 3:
# Need to calculate how many shares we can buy
number_of_shares = int(cash/current_price)
# Place the buy order (positive means buy, negative means sell)
order(context.security, +number_of_shares)
log.info("Buying %s" % (context.security.symbol))
# if we are successful in buying shares we reset the day counter to
# measure our success for the next month...
context.daycounter = 0
# if the current price is lower than the average price for more than 30 days...
# We sell all of our stocks, because the price is probably tanking too much.
elif current_price < average_price and context.daycounter > 30:
# Sell all of our shares by setting the target position to zero
order_target(context.security, 0)
log.info("Selling %s" % (context.security.symbol))
# You can use the record() method to track any custom signal.
# The record graph tracks up to five different variables.
# Here we record the Apple stock price.
record(stock_price=data[context.security].price)
|
mmilutinovic1313/zipline-with-algorithms
|
ALGORITHMS/AnalyzeMarket.py
|
Python
|
apache-2.0
| 4,190
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
os.environ.setdefault('LANG', 'en_US.UTF-8')
from litchi import settings
def main():
try:
python_interpreter = settings.PYTHON_INTERPRETER
if not os.path.exists(python_interpreter):
raise IOError
except (AttributeError, IOError):
python_interpreter = sys.executable
finally:
script = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'navigator.py')
cmd = ' '.join((python_interpreter, script))
os.system(cmd)
if __name__ == '__main__':
main()
|
245967906/litchi
|
utils/startup.py
|
Python
|
gpl-3.0
| 762
|
# Copyright (C) 2014 ABRT Team
# Copyright (C) 2014 Red Hat, Inc.
#
# This file is part of faf.
#
# faf is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# faf is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with faf. If not, see <http://www.gnu.org/licenses/>.
from pyfaf.actions import Action
from pyfaf.common import FafError
from pyfaf.problemtypes import problemtypes
from pyfaf.queries import get_reports_by_type, get_report
from pyfaf.storage import ReportHash
from pyfaf.utils.hash import hash_list
class AddCompatHashes(Action):
name = "addcompathashes"
def _unmap_offset(self, offset) -> int:
if offset < 0:
offset += 1 << 63
return offset
def _hash_backtrace(self, db_backtrace, hashbase=None, offset=False) -> str:
if hashbase is None:
hashbase = []
crashthreads = [t for t in db_backtrace.threads if t.crashthread]
if not crashthreads:
raise FafError("No crash thread found")
if len(crashthreads) > 1:
raise FafError("Multiple crash threads found")
frames = [f for f in crashthreads[0].frames if not f.inlined][:16]
has_names = all(f.symbolsource.symbol is not None and
f.symbolsource.symbol.name is not None and
f.symbolsource.symbol.name != "??" for f in frames)
has_hashes = all(f.symbolsource.hash is not None for f in frames)
# use function names if available
if has_names:
# also hash offset for reports that use it as line numbers
# these reports always have function names
if offset:
hashbase.extend(["{0} @ {1} + {2}"
.format(f.symbolsource.symbol.name,
f.symbolsource.path,
f.symbolsource.offset) for f in frames])
else:
hashbase.extend(["{0} @ {1}"
.format(f.symbolsource.symbol.name,
f.symbolsource.path) for f in frames])
# fallback to hashes
elif has_hashes:
hashbase.extend(["{0} @ {1}"
.format(f.symbolsource.hash,
f.symbolsource.path) for f in frames])
else:
raise FafError("either function names or hashes are required")
return hash_list(hashbase)
def run(self, cmdline, db) -> int:
if cmdline.problemtype is None or not cmdline.problemtype:
ptypes = list(problemtypes.keys())
else:
ptypes = []
for ptype in cmdline.problemtype:
if ptype not in problemtypes:
self.log_warn("Problem type '{0}' is not supported"
.format(ptype))
continue
ptypes.append(ptype)
if not ptypes:
self.log_info("Nothing to do")
return 1
for i, ptype in enumerate(ptypes, start=1):
problemtype = problemtypes[ptype]
self.log_info("[{0} / {1}] Processing problem type '{2}'"
.format(i, len(ptypes), problemtype.nice_name))
db_reports = get_reports_by_type(db, ptype)
for j, db_report in enumerate(db_reports, start=1):
self.log_info(" [{0} / {1}] Processing report #{2}"
.format(j, len(db_reports), db_report.id))
hashes = set()
for k, db_backtrace in enumerate(db_report.backtraces, start=1):
self.log_debug("\t[%d / %d] Processing backtrace #%d",
k, len(db_report.backtraces), db_backtrace.id)
try:
component = db_report.component.name
include_offset = ptype.lower() == "python"
bthash = self._hash_backtrace(db_backtrace,
hashbase=[component],
offset=include_offset)
self.log_debug("\t%s", bthash)
db_dup = get_report(db, bthash)
if db_dup is None:
self.log_info(" Adding hash '{0}'"
.format(bthash))
if not bthash in hashes:
db_reporthash = ReportHash()
db_reporthash.report = db_report
db_reporthash.hash = bthash
db.session.add(db_reporthash)
hashes.add(bthash)
elif db_dup == db_report:
self.log_debug("\tHash '%s' already assigned", bthash)
else:
self.log_warn((" Conflict! Skipping hash '{0}'"
" (report #{1})").format(bthash,
db_dup.id))
except FafError as ex:
self.log_warn(" {0}".format(str(ex)))
continue
db.session.flush()
return 0
def tweak_cmdline_parser(self, parser) -> None:
parser.add_problemtype(multiple=True)
|
abrt/faf
|
src/pyfaf/actions/addcompathashes.py
|
Python
|
gpl-3.0
| 5,960
|
"""
Interfaces and helpers for the virtio_serial ports.
:copyright: 2012 Red Hat Inc.
"""
from __future__ import division
from threading import Thread
from collections import deque
import logging
import os
import random
import select
import socket
import time
import struct
import aexpect
from avocado.core import exceptions
from avocado.utils import process
from six.moves import xrange
from virttest import data_dir
from virttest.compat_52lts import decode_to_text
SOCKET_SIZE = 2048
class VirtioPortException(Exception):
""" General virtio_port exception """
pass
class VirtioPortFatalException(VirtioPortException):
""" Fatal virtio_port exception """
pass
class _VirtioPort(object):
"""
Define structure to keep information about used port.
"""
def __init__(self, qemu_id, name, hostfile, port_type="unix_socket"):
"""
:param name: Name of port for guest side.
:param hostfile: Path to port on host side.
"""
self.qemu_id = qemu_id
self.name = name
self.hostfile = hostfile
self.is_console = None # "yes", "no"
self.sock = None
self.port_was_opened = None
self.port_type = port_type
def __str__(self):
"""
Convert to text.
"""
return ("%s,%s,%s,%s,%d" % ("Socket", self.name, self.is_console,
self.hostfile, self.is_open()))
def __getstate__(self):
"""
socket is unpickable so we need to remove it and say it's closed.
Used by autotest env.
"""
# TODO: add port cleanup into qemu_vm.py
if self.is_open():
logging.warn("Force closing virtio_port socket, FIX the code to "
" close the socket prior this to avoid possible err.")
self.close()
return self.__dict__.copy()
def is_open(self):
""" :return: host port status (open/closed) """
if self.sock:
return True
else:
return False
def for_guest(self):
"""
Format data for communication with guest side.
"""
return [self.name, self.is_console]
def open(self): # @ReservedAssignment
"""
Open port on host side.
"""
if self.is_open():
return
attempt = 11
while attempt > 0:
try:
if self.port_type == 'unix_socket':
sock_flag = socket.AF_UNIX
elif self.port_type in ('tcp_socket', 'udp'):
sock_flag = socket.AF_INET
if self.port_type == 'udp':
sock_type = socket.SOCK_DGRAM
elif self.port_type in ('tcp_socket', 'unix_socket'):
sock_type = socket.SOCK_STREAM
self.sock = socket.socket(sock_flag, sock_type)
self.sock.settimeout(1)
self.sock.connect(self.hostfile)
self.sock.setsockopt(1, socket.SO_SNDBUF, SOCKET_SIZE)
self.sock.settimeout(None)
self.port_was_opened = True
return
except Exception:
attempt -= 1
time.sleep(1)
raise exceptions.TestFail("Can't open the %s sock (%s)" % (self.name,
self.hostfile))
def clean_port(self):
"""
Clean all data from opened port on host side.
"""
if self.is_open():
self.close()
elif not self.port_was_opened:
# BUG: Don't even try opening port which was never used. It
# hangs for ever... (virtio_console bug)
logging.debug("No need to clean port %s", self)
return
logging.debug("Cleaning port %s", self)
self.open()
ret = select.select([self.sock], [], [], 1.0)
if ret[0]:
buf = self.sock.recv(1024)
logging.debug("Rest in socket: " + repr(buf))
def close(self):
"""
Close port.
"""
if self.is_open():
self.sock.shutdown(socket.SHUT_RDWR)
self.sock.close()
self.sock = None
def mark_as_clean(self):
"""
Mark port as cleaned
"""
self.port_was_opened = False
class VirtioSerial(_VirtioPort):
""" Class for handling virtio-serialport """
def __init__(self, qemu_id, name, hostfile, port_type="unix_socket"):
"""
:param name: Name of port for guest side.
:param hostfile: Path to port on host side.
"""
super(VirtioSerial, self).__init__(qemu_id, name, hostfile, port_type)
self.is_console = "no"
class VirtioConsole(_VirtioPort):
""" Class for handling virtio-console """
def __init__(self, qemu_id, name, hostfile, port_type="unix_socket"):
"""
:param name: Name of port for guest side.
:param hostfile: Path to port on host side.
"""
super(VirtioConsole, self).__init__(qemu_id, name, hostfile, port_type)
self.is_console = "yes"
class GuestWorker(object):
"""
Class for executing "virtio_console_guest" script on guest
"""
def __init__(self, vm):
""" Initialize worker for use (including port init on guest) """
self.vm = vm
self.session = self.vm.wait_for_login()
self.__cmd_execute_worker = None
# Detect the OS version
guest_script_py = "virtio_console_guest.py"
out = self.session.cmd_output("echo on")
if "on" in out:
self.os_linux = True
guest_script_path = os.path.join("/tmp", guest_script_py)
cmd_guest_size = ("du -b %s | cut -f1"
% guest_script_path)
cmd_already_compiled_chck = "ls %so" % guest_script_path
cmd_compile = ("`command -v python python3 | head -1` -OO %s -c "
"&& echo -n 'PASS: Compile virtio_guest finished' "
"|| echo -n 'FAIL: Compile virtio_guest failed'"
% guest_script_path)
self.__cmd_execute_worker = ("`command -v python python3 | head -1` %so"
"&& echo -n 'PASS: virtio_guest finished' "
"|| echo -n 'FAIL: virtio_guest failed'"
% guest_script_path)
else:
self.os_linux = False
guest_script_path = "C:\\%s" % guest_script_py
cmd_guest_size = ("for %%I in (%s) do @echo %%~zI"
% guest_script_path)
cmd_already_compiled_chck = "dir %so" % guest_script_path
cmd_compile = ("%s -c "
"&& echo PASS: Compile virtio_guest finished "
"|| echo FAIL: Compile virtio_guest failed"
% guest_script_path)
self.__cmd_execute_worker = ("%so "
"&& echo PASS: virtio_guest finished "
"|| echo FAIL: virtio_guest failed"
% guest_script_path)
# Copy, compile and run the worker
timeout = 10
guest_script_src = os.path.join(data_dir.get_shared_dir(), 'scripts',
'virtio_console_guest.py')
script_size = decode_to_text(process.system_output("du -b %s | cut -f1" %
guest_script_src, shell=True)).strip()
script_size_guest = self.session.cmd_output(cmd_guest_size).strip()
if (script_size != script_size_guest or
self.session.cmd_status(cmd_already_compiled_chck)):
if self.os_linux:
# Disable serial-getty@hvc0.service on systemd-like hosts
self.session.cmd_status('systemctl mask '
'serial-getty@hvc0.service')
self.session.cmd_status('systemctl stop '
'serial-getty@hvc0.service')
# Copy virtio_console_guest.py into guests
self.vm.copy_files_to(guest_script_src, guest_script_path)
# set echo off (self.cmd() musn't contain C:)
self.session.sendline("echo off")
# Compile worker
logging.debug("Compile %s on guest %s", guest_script_py,
self.vm.name)
try:
self.cmd(cmd_compile, timeout)
except VirtioPortException:
if not self.os_linux:
logging.error("Script execution failed, do you have python"
" and pywin32 installed? Currently this "
"needs to be done manually!")
raise
self.session.sendline()
# set echo off (self.cmd() musn't contain C:)
self.session.sendline("echo off")
logging.debug("Starting %so on guest %s", guest_script_py,
self.vm.name)
self._execute_worker(timeout)
self._init_guest(timeout)
def _execute_worker(self, timeout=10):
""" Execute worker on guest """
try:
self.cmd(self.__cmd_execute_worker, timeout)
except VirtioPortException:
if not self.os_linux:
logging.error("Script execution failed, do you have python"
" and pywin32 installed? Currently this "
"needs to be done manually!")
raise
# Let the system rest
# FIXME: Is this always necessarily?
time.sleep(2)
def _init_guest(self, timeout=10):
""" Initialize worker on guest """
ports = []
for port in self.vm.virtio_ports:
ports.append(port.for_guest())
self.cmd("virt.init(%s)" % (ports), timeout)
def reconnect(self, vm, timeout=10):
"""
Reconnect to guest_worker (eg. after migration)
:param vm: New VM object
"""
self.vm = vm
self.session = self.vm.wait_for_login()
self._execute_worker(timeout)
def cmd(self, cmd, timeout=10, patterns=None):
"""
Wrapper around the self.cmd command which executes the command on
guest. Unlike self._cmd command when the command fails it raises the
test exceptions.
:param command: Command that will be executed.
:param timeout: Timeout used to verify expected output.
:return: Tuple (match index, data)
"""
match, data = self._cmd(cmd, timeout, patterns)
if match == 1 or match is None:
raise VirtioPortException("Failed to execute '%s' on"
" virtio_console_guest.py, "
"vm: %s, output:\n%s" %
(cmd, self.vm.name, data))
return (match, data)
def _cmd(self, cmd, timeout=10, patterns=None):
"""
Execute given command inside the script's main loop.
:param command: Command that will be executed.
:param timeout: Timeout used to verify expected output.
:param patterns: Expected patterns; have to startwith ^PASS: or ^FAIL:
:return: Tuple (match index, data)
"""
if not patterns:
patterns = ("^PASS:", "^FAIL:")
logging.debug("Executing '%s' on virtio_console_guest.py,"
" vm: %s, timeout: %s", cmd, self.vm.name, timeout)
self.session.sendline(cmd)
try:
(match, data) = self.session.read_until_any_line_matches(patterns,
timeout=timeout)
if patterns[match].startswith('^PASS:'):
match = 0
elif patterns[match].startswith('^FAIL:'):
match = 1
else:
data = ("Incorrect pattern %s. Data in console:\n%s"
% (patterns[match], data))
match = None
except aexpect.ExpectError as inst:
match = None
data = "Cmd process timeout. Data in console:\n" + inst.output
self.vm.verify_kernel_crash()
return (match, data)
def read_nonblocking(self, internal_timeout=None, timeout=None):
"""
Reads-out all remaining output from GuestWorker.
:param internal_timeout: Time (seconds) to wait before we give up
reading from the child process, or None to
use the default value.
:param timeout: Timeout for reading child process output.
"""
return self.session.read_nonblocking(internal_timeout, timeout)
def _cleanup_ports(self):
"""
Read all data from all ports, in both sides of each port.
"""
for port in self.vm.virtio_ports:
openned = port.is_open()
port.clean_port()
self.cmd("virt.clean_port('%s'),1024" % port.name, 10)
if not openned:
port.close()
self.cmd("virt.close('%s'),1024" % port.name, 10)
def safe_exit_loopback_threads(self, send_pts, recv_pts):
"""
Safely executes on_guest("virt.exit_threads()") using workaround of
the stuck thread in loopback in mode=virt.LOOP_NONE .
:param send_pts: list of possible send sockets we need to work around.
:param recv_pts: list of possible recv sockets we need to read-out.
"""
# No need to clean ports when VM is dead
if not self.vm or self.vm.is_dead():
return
# in LOOP_NONE mode it might stuck in read/write
# This command can't fail, can only freze so wait for the correct msg
match, tmp = self._cmd("virt.exit_threads()", 3, ("^PASS: All threads"
" finished",))
if match is None:
logging.warn("Workaround the stuck thread on guest")
# Thread is stuck in read/write
for send_pt in send_pts:
timeout = None
try:
timeout = send_pt.sock.gettimeout()
send_pt.sock.settimeout(1)
send_pt.sock.send(b".")
except socket.timeout:
pass # If still stuck VM gets destroyed below
send_pt.sock.settimeout(timeout)
elif match != 0:
# Something else
raise VirtioPortException("Unexpected fail\nMatch: %s\nData:\n%s"
% (match, tmp))
# Read-out all remaining data
for recv_pt in recv_pts:
while select.select([recv_pt.sock], [], [], 0.1)[0]:
recv_pt.sock.recv(1024)
# This will cause fail in case anything went wrong.
match, tmp = self._cmd("print('PASS: nothing')", 10, ('^PASS: nothing',
'^FAIL:'))
if match is not 0:
logging.error("Python is stuck/FAILed after read-out:\n%s", tmp)
try:
self.session.close()
self.session = self.vm.wait_for_login()
if self.os_linux: # On windows it dies with the connection
self.cmd("killall -9 `command -v python python3 | head -1` "
"&& echo -n PASS: python killed"
"|| echo -n PASS: python was already dead", 10)
self._execute_worker()
self._init_guest()
except Exception as inst:
logging.error(inst)
raise VirtioPortFatalException("virtio-console driver is "
"irreparably blocked, further tests might FAIL.")
def cleanup_ports(self):
"""
Clean state of all ports and set port to default state.
Default state: No data on port or in port buffer. Read mode = blocking.
"""
# Check if python is still alive
match, tmp = self._cmd("is_alive()", 10)
if match is not 0:
logging.error("Python died/is stuck/have remaining threads")
logging.debug(tmp)
try:
self.vm.verify_kernel_crash()
match, tmp = self._cmd("guest_exit()", 10, ('^FAIL:',
'^PASS: virtio_guest finished'))
self.session.close()
self.session = self.vm.wait_for_login()
# On windows it dies with the connection
if match is not 0 and self.os_linux:
logging.debug(tmp)
self.cmd("killall -9 `command -v python python3 | head -1` "
"&& echo -n PASS: python killed"
"|| echo -n PASS: python was already dead", 10)
self._execute_worker()
self._init_guest()
self._cleanup_ports()
except Exception as inst:
logging.error(inst)
raise VirtioPortFatalException("virtio-console driver is "
"irreparably blocked, further tests might FAIL.")
def cleanup(self):
""" Cleanup ports and quit the worker """
# Verify that guest works
if self.session and self.vm and self.vm.is_alive():
self.cleanup_ports()
if self.vm:
self.vm.verify_kernel_crash()
# Quit worker
if self.session and self.vm and self.vm.is_alive():
match, tmp = self._cmd("guest_exit()", 10)
self.session.close()
# On windows it dies with the connection
if match is not 0 and self.os_linux:
logging.warn('guest_worker stuck during cleanup:\n%s\n,'
' killing python...', tmp)
self.session = self.vm.wait_for_login()
self.cmd("killall -9 `command -v python python3 | head -1` "
"&& echo -n PASS: python killed"
"|| echo -n PASS: python was already dead", 10)
self.session.close()
self.session = None
self.vm = None
class ThSend(Thread):
"""
Random data sender thread.
"""
def __init__(self, port, data, exit_event, quiet=False):
"""
:param port: Destination port.
:param data: The data intend to be send in a loop.
:param exit_event: Exit event.
:param quiet: If true don't raise event when crash.
"""
Thread.__init__(self)
self.port = port
# FIXME: socket.send(data>>127998) without read blocks thread
if len(data) > 102400:
data = data[0:102400]
logging.error("Data is too long, using only first %d bytes",
len(data))
self.data = data
self.exitevent = exit_event
self.idx = 0
self.quiet = quiet
self.ret_code = 1 # sets to 0 when finish properly
def run(self):
logging.debug("ThSend %s: run", self.getName())
try:
while not self.exitevent.isSet():
self.idx += self.port.send(self.data)
logging.debug("ThSend %s: exit(%d)", self.getName(),
self.idx)
except Exception as ints:
if not self.quiet:
raise ints
logging.debug(ints)
self.ret_code = 0
class ThSendCheck(Thread):
"""
Random data sender thread.
"""
def __init__(self, port, exit_event, queues, blocklen=1024,
migrate_event=None, reduced_set=False):
"""
:param port: Destination port
:param exit_event: Exit event
:param queues: Queues for the control data (FIFOs)
:param blocklen: Block length
:param migrate_event: Event indicating port was changed and is ready.
"""
Thread.__init__(self)
self.port = port
self.port.sock.settimeout(1)
self.queues = queues
# FIXME: socket.send(data>>127998) without read blocks thread
if blocklen > 102400:
blocklen = 102400
logging.error("Data is too long, using blocklen = %d",
blocklen)
self.blocklen = blocklen
self.exitevent = exit_event
self.migrate_event = migrate_event
self.idx = 0
self.ret_code = 1 # sets to 0 when finish properly
self.reduced_set = reduced_set
def run(self):
logging.debug("ThSendCheck %s: run", self.getName())
_err_msg_exception = ('ThSendCheck ' + str(self.getName()) + ': Got '
'exception %s, continuing')
_err_msg_disconnect = ('ThSendCheck ' + str(self.getName()) + ': Port '
'disconnected, waiting for new port.')
_err_msg_reconnect = ('ThSendCheck ' + str(self.getName()) + ': Port '
'reconnected, continuing.')
too_much_data = False
if self.reduced_set:
rand_a = 65
rand_b = 91
else:
rand_a = 0
rand_b = 255
while not self.exitevent.isSet():
# FIXME: workaround the problem with qemu-kvm stall when too
# much data is sent without receiving
for queue in self.queues:
while not self.exitevent.isSet() and len(queue) > 1048576:
too_much_data = True
time.sleep(0.1)
try:
ret = select.select([], [self.port.sock], [], 1.0)
except Exception as inst:
# self.port is not yet set while reconnecting
if self.migrate_event is None:
raise exceptions.TestFail("ThSendCheck %s: Broken pipe. If this"
" is expected behavior set migrate_event "
"to support reconnection." % self.getName())
if self.port.sock is None:
logging.debug(_err_msg_disconnect)
while self.port.sock is None:
if self.exitevent.isSet():
break
time.sleep(0.1)
logging.debug(_err_msg_reconnect)
else:
logging.debug(_err_msg_exception, inst)
continue
if ret[1]:
# Generate blocklen of random data add them to the FIFO
# and send them over virtio_console
buf = b""
for _ in range(self.blocklen):
char = b"%c" % random.randrange(rand_a, rand_b)
buf += char
for queue in self.queues:
queue.append(char)
target = self.idx + self.blocklen
while not self.exitevent.isSet() and self.idx < target:
try:
idx = self.port.sock.send(buf)
except socket.timeout:
continue
except Exception as inst:
# Broken pipe
if not hasattr(inst, 'errno') or inst.errno != 32:
continue
if self.migrate_event is None:
self.exitevent.set()
raise exceptions.TestFail("ThSendCheck %s: Broken "
"pipe. If this is expected behavior "
"set migrate_event to support "
"reconnection." % self.getName())
logging.debug("ThSendCheck %s: Broken pipe "
", reconnecting. ", self.getName())
attempt = 10
while (attempt > 1 and
not self.exitevent.isSet()):
# Wait until main thread sets the new self.port
while not (self.exitevent.isSet() or
self.migrate_event.wait(1)):
pass
if self.exitevent.isSet():
break
logging.debug("ThSendCheck %s: Broken pipe resumed"
", reconnecting...", self.getName())
self.port.sock = False
self.port.open()
try:
idx = self.port.sock.send(buf)
except Exception:
attempt -= 1
time.sleep(10)
else:
attempt = 0
buf = buf[idx:]
self.idx += idx
logging.debug("ThSendCheck %s: exit(%d)", self.getName(),
self.idx)
if too_much_data:
logging.error("ThSendCheck: working around the 'too_much_data'"
"bug")
self.ret_code = 0
class ThRecv(Thread):
"""
Receives data and throws it away.
"""
def __init__(self, port, event, blocklen=1024, quiet=False):
"""
:param port: Data source port.
:param event: Exit event.
:param blocklen: Block length.
:param quiet: If true don't raise event when crash.
"""
Thread.__init__(self)
self.port = port
self._port_timeout = self.port.gettimeout()
self.port.settimeout(0.1)
self.exitevent = event
self.blocklen = blocklen
self.idx = 0
self.quiet = quiet
self.ret_code = 1 # sets to 0 when finish properly
def run(self):
logging.debug("ThRecv %s: run", self.getName())
try:
while not self.exitevent.isSet():
# TODO: Workaround, it didn't work with select :-/
try:
self.idx += len(self.port.recv(self.blocklen))
except socket.timeout:
pass
self.port.settimeout(self._port_timeout)
logging.debug("ThRecv %s: exit(%d)", self.getName(), self.idx)
except Exception as ints:
if not self.quiet:
raise ints
logging.debug(ints)
self.ret_code = 0
class ThRecvCheck(Thread):
"""
Random data receiver/checker thread.
"""
def __init__(self, port, buff, exit_event, blocklen=1024, sendlen=0,
migrate_event=None, debug=None):
"""
:param port: Source port.
:param buff: Control data buffer (FIFO).
:param exit_event: Exit event.
:param blocklen: Block length.
:param sendlen: Block length of the send function (on guest)
:param migrate_event: Event indicating port was changed and is ready.
:param debug: Set the execution mode, when nothing run normal.
"""
Thread.__init__(self)
self.port = port
self.buff = buff
self.exitevent = exit_event
self.migrate_event = migrate_event
self.blocklen = blocklen
self.idx = 0
self.sendlen = sendlen + 1 # >=
self.ret_code = 1 # sets to 0 when finish properly
self.debug = debug # see the self.run_* docstrings for details
# self.sendidx is the maxiaml number of skipped/duplicated values
# 1) autoreload when the host socket is reconnected. In this case
# it waits <30s for migrate_event and reloads sendidx to sendlen
# 2) manual write to this value (eg. before you reconnect guest port).
# RecvThread decreases this value whenever data loss/dup occurs.
self.sendidx = -1
self.minsendidx = self.sendlen
def reload_loss_idx(self):
"""
This function reloads the acceptable loss to the original value
(Reload the self.sendidx to self.sendlen)
:note: This function is automatically called during port reconnection.
"""
if self.sendidx >= 0:
self.minsendidx = min(self.minsendidx, self.sendidx)
logging.debug("ThRecvCheck %s: Previous data loss was %d.",
self.getName(), (self.sendlen - self.sendidx))
self.sendidx = self.sendlen
def run(self):
""" Pick the right mode and execute it """
if self.debug == 'debug':
self.run_debug()
elif self.debug == 'normal' or not self.debug:
self.run_normal()
else:
logging.error('ThRecvCheck %s: Unsupported debug mode, using '
'normal mode.', self.getName())
self.run_normal()
def run_normal(self):
"""
Receives data and verifies, whether they match the self.buff (queue).
It allow data loss up to self.sendidx which can be manually loaded
after host socket reconnection or you can overwrite this value from
other thread.
"""
logging.debug("ThRecvCheck %s: run", self.getName())
_err_msg_missing_migrate_ev = ("ThRecvCheck %s: Broken pipe. If "
"this is expected behavior set migrate_event to "
"support reconnection." % self.getName())
_err_msg_exception = ('ThRecvCheck ' + str(self.getName()) + ': Got '
'exception %s, continuing')
_err_msg_disconnect = ('ThRecvCheck ' + str(self.getName()) + ': Port '
'disconnected, waiting for new port.')
_err_msg_reconnect = ('ThRecvCheck ' + str(self.getName()) + ': Port '
'reconnected, continuing.')
attempt = 10
while not self.exitevent.isSet():
try:
ret = select.select([self.port.sock], [], [], 1.0)
except Exception as inst:
# self.port is not yet set while reconnecting
if self.port.sock is None:
logging.debug(_err_msg_disconnect)
while self.port.sock is None:
if self.exitevent.isSet():
break
time.sleep(0.1)
logging.debug(_err_msg_reconnect)
else:
logging.debug(_err_msg_exception, inst)
continue
if ret[0] and (not self.exitevent.isSet()):
try:
buf = self.port.sock.recv(self.blocklen)
except Exception as inst:
# self.port is not yet set while reconnecting
if self.port.sock is None:
logging.debug(_err_msg_disconnect)
while self.port.sock is None:
if self.exitevent.isSet():
break
time.sleep(0.1)
logging.debug(_err_msg_reconnect)
else:
logging.debug(_err_msg_exception, inst)
continue
if buf:
# Compare the received data with the control data
for char in bytearray(buf):
char = struct.pack('B', char)
_char = self.buff.popleft()
if char == _char:
self.idx += 1
else:
# TODO BUG: data from the socket on host can
# be lost during migration
while char != _char:
if self.sendidx > 0:
self.sendidx -= 1
_char = self.buff.popleft()
else:
self.exitevent.set()
logging.error("ThRecvCheck %s: "
"Failed to recv %dth "
"character",
self.getName(), self.idx)
logging.error("ThRecvCheck %s: "
"%s != %s",
self.getName(),
repr(char), repr(_char))
logging.error("ThRecvCheck %s: "
"Recv = %s",
self.getName(), repr(buf))
# sender might change the buff :-(
time.sleep(1)
_char = b""
for buf in self.buff:
_char += buf
_char += b' '
logging.error("ThRecvCheck %s: "
"Queue = %s",
self.getName(), repr(_char))
logging.info("ThRecvCheck %s: "
"MaxSendIDX = %d",
self.getName(),
(self.sendlen - self.sendidx))
raise exceptions.TestFail("ThRecvCheck %s: "
"incorrect data" %
self.getName())
attempt = 10
else: # ! buf
# Broken socket
if attempt > 0:
attempt -= 1
if self.migrate_event is None:
self.exitevent.set()
raise exceptions.TestFail(
_err_msg_missing_migrate_ev)
logging.debug("ThRecvCheck %s: Broken pipe "
", reconnecting. ", self.getName())
self.reload_loss_idx()
# Wait until main thread sets the new self.port
while not (self.exitevent.isSet() or
self.migrate_event.wait(1)):
pass
if self.exitevent.isSet():
break
logging.debug("ThRecvCheck %s: Broken pipe resumed, "
"reconnecting...", self.getName())
self.port.sock = False
self.port.open()
if self.sendidx >= 0:
self.minsendidx = min(self.minsendidx, self.sendidx)
if (self.sendlen - self.minsendidx):
logging.error("ThRecvCheck %s: Data loss occurred during socket"
"reconnection. Maximal loss was %d per one "
"migration.", self.getName(),
(self.sendlen - self.minsendidx))
logging.debug("ThRecvCheck %s: exit(%d)", self.getName(),
self.idx)
self.ret_code = 0
def run_debug(self):
"""
viz run_normal.
Additionally it stores last n verified characters and in
case of failures it quickly receive enough data to verify failure or
allowed loss and then analyze this data. It provides more info about
the situation.
Unlike normal run this one supports booth - loss and duplications.
It's not friendly to data corruption.
"""
logging.debug("ThRecvCheck %s: run", self.getName())
attempt = 10
max_loss = 0
sum_loss = 0
verif_buf = deque(maxlen=max(self.blocklen, self.sendlen))
while not self.exitevent.isSet():
ret = select.select([self.port.sock], [], [], 1.0)
if ret[0] and (not self.exitevent.isSet()):
buf = self.port.sock.recv(self.blocklen)
if buf:
# Compare the received data with the control data
for idx_char in xrange(len(buf)):
_char = self.buff.popleft()
char = struct.pack('B', (bytearray(buf)[idx_char]))
if char == _char:
self.idx += 1
verif_buf.append(_char)
else:
# Detect the duplicated/lost characters.
logging.debug("ThRecvCheck %s: fail to receive "
"%dth character.", self.getName(),
self.idx)
buf = buf[idx_char:]
for i in xrange(100):
if len(self.buff) < self.sendidx:
time.sleep(0.01)
else:
break
sendidx = min(self.sendidx, len(self.buff))
if sendidx < self.sendidx:
logging.debug("ThRecvCheck %s: sendidx was "
"lowered as there is not enough "
"data after 1s. Using sendidx="
"%s.", self.getName(), sendidx)
for _ in xrange(sendidx // self.blocklen):
if self.exitevent.isSet():
break
buf += self.port.sock.recv(self.blocklen)
queue = _char
for _ in xrange(sendidx):
queue += self.buff[_]
offset_a = None
offset_b = None
for i in xrange(sendidx):
length = min(len(buf[i:]), len(queue))
if buf[i:] == queue[:length]:
offset_a = i
break
for i in xrange(sendidx):
length = min(len(queue[i:]), len(buf))
if queue[i:][:length] == buf[:length]:
offset_b = i
break
if (offset_b and offset_b < offset_a) or offset_a:
# Data duplication
self.sendidx -= offset_a
max_loss = max(max_loss, offset_a)
sum_loss += offset_a
logging.debug("ThRecvCheck %s: DUP %s (out of "
"%s)", self.getName(), offset_a,
sendidx)
buf = buf[offset_a + 1:]
for _ in xrange(len(buf)):
self.buff.popleft()
verif_buf.extend(buf)
self.idx += len(buf)
elif offset_b: # Data loss
max_loss = max(max_loss, offset_b)
sum_loss += offset_b
logging.debug("ThRecvCheck %s: LOST %s (out of"
" %s)", self.getName(), offset_b,
sendidx)
# Pop-out the lost characters from verif_queue
# (first one is already out)
self.sendidx -= offset_b
for i in xrange(offset_b - 1):
self.buff.popleft()
for _ in xrange(len(buf)):
self.buff.popleft()
self.idx += len(buf)
verif_buf.extend(buf)
else: # Too big data loss or duplication
verif = ""
for _ in xrange(-min(sendidx, len(verif_buf)),
0):
verif += verif_buf[_]
logging.error("ThRecvCheck %s: mismatched data"
":\nverified: ..%s\nreceived: "
"%s\nsent: %s",
self.getName(), repr(verif),
repr(buf), repr(queue))
raise exceptions.TestFail("Recv and sendqueue "
"don't match with any offset.")
# buf was changed, break from this loop
attempt = 10
break
attempt = 10
else: # ! buf
# Broken socket
if attempt > 0:
attempt -= 1
if self.migrate_event is None:
self.exitevent.set()
raise exceptions.TestFail("ThRecvCheck %s: Broken pipe."
" If this is expected behavior set migrate"
"_event to support reconnection." %
self.getName())
logging.debug("ThRecvCheck %s: Broken pipe "
", reconnecting. ", self.getName())
self.reload_loss_idx()
# Wait until main thread sets the new self.port
while not (self.exitevent.isSet() or
self.migrate_event.wait(1)):
pass
if self.exitevent.isSet():
break
logging.debug("ThRecvCheck %s: Broken pipe resumed, "
"reconnecting...", self.getName())
self.port.sock = False
self.port.open()
if self.sendidx >= 0:
self.minsendidx = min(self.minsendidx, self.sendidx)
if (self.sendlen - self.minsendidx):
logging.debug("ThRecvCheck %s: Data loss occurred during socket"
"reconnection. Maximal loss was %d per one "
"migration.", self.getName(),
(self.sendlen - self.minsendidx))
if sum_loss > 0:
logging.debug("ThRecvCheck %s: Data offset detected, cumulative "
"err: %d, max err: %d(%d)", self.getName(), sum_loss,
max_loss, float(max_loss) / self.blocklen)
logging.debug("ThRecvCheck %s: exit(%d)", self.getName(),
self.idx)
self.ret_code = 0
|
ldoktor/avocado-vt
|
virttest/qemu_virtio_port.py
|
Python
|
gpl-2.0
| 44,379
|
#!/usr/bin/env python
'''
find max value in a file
'''
import rsf.api as rsf
import numpy as np
import sys
par = rsf.Par()
verb = par.bool('verb',False) # verbosity flag
Fin = rsf.Input() # input
n1 = Fin.int ("n1")
nn = Fin.size(1); # number of traces
din = np.zeros(n1,'f')
Fou = rsf.Output() # output
Fou.put("n1",1)
Fou.put("o1",0)
Fou.put('d1',1)
Fou.put("n2",1)
Fou.put("n3",1)
Fou.put("n4",1)
dou = np.zeros(1,'f')
mymax=0
for i in range(nn):
Fin.read(din)
for i1 in range(n1):
if abs(din[i1])>abs(mymax):
mymax=din[i1]
dou[0]=mymax
Fou.write(dou)
print >> sys.stderr,'max=',mymax
# ------------------------------------------------------------
Fin.close()
Fou.close()
|
zxtstarry/src
|
user/psava/Mfindmax.py
|
Python
|
gpl-2.0
| 741
|
import numpy as np
from numpy import linalg as LA
from discreteMarkovChain import markovChain
class randomWalk(markovChain):
"""
A random walk where we move up and down with rate 1.0 in each
state between bounds m and M.
For the transition function to work well, we define some
class variables in the __init__ function.
"""
def __init__(self,m,M):
super(randomWalk, self).__init__()
self.initialState = m
self.m = m
self.M = M
self.uprate = 1.0
self.downrate = 1.0
def transition(self,state):
#Specify the reachable states from state and their rates.
#A dictionary is extremely easy here!
rates = {}
if self.m < state < self.M:
rates[state+1] = self.uprate
rates[state-1] = self.downrate
elif state == self.m:
rates[state+1] = self.uprate
elif state == self.M:
rates[state-1] = self.downrate
return rates
mc = randomWalk(0,5)
P = mc.getTransitionMatrix()
hittingset=[0]
one = np.ones(mc.size)
one[hittingset] = 0
k = np.zeros(mc.size)
for i in range(100):
k = P.dot(k)+one
k[hittingset] = 0
print(k)
mask = np.zeros(mc.size)
for i in range(mc.size):
if i in hittingset:
mask[i]=1
k1 = np.zeros(mc.size)
k2 = one + P.dot(k1)
i = 0
while(LA.norm(k1-k2)>1e-6):
k1=k2
k2 = one + P.dot(k1)
np.putmask(k2, mask, 0)
i += 1
print(k2)
print(i)
|
gvanderheide/discreteMarkovChain
|
discreteMarkovChain/hitting_time.py
|
Python
|
mit
| 1,491
|
__author__ = 'wenjusun'
import os
import shutil
from multiprocessing.pool import ThreadPool
from PIL import Image
import time
def get_current_time():
return int(round(time.time()*1000))
class PhotoSelector():
""" copy specified photo to destination"""
def __init__(self,src_folder,dest_folder):
self.src_folder = src_folder
self.dest_folder = dest_folder
if not os.path.exists(dest_folder):
os.makedirs(dest_folder)
print "Destination does not exist,created."
else:
print "Photos will be copied to an existing folder."
def copy_photo(self,src):
shutil.copy2(src,self.dest_folder)
''' selected list is a file contains string separated by comma '''
def copy_selected_photos(self,selected_list_file):
#get file content
id_str = ''
with open(selected_list_file) as f:
id_str += f.readline()
photo_name_list = id_str.split(',')
src_photos = []
for photo in photo_name_list:
# shutil.copy(file_pattern %(src_folder,photo),dest_folder)
src_photos.append(os.path.join(self.src_folder,photo))
pool = ThreadPool(4)
print pool.map(self.copy_photo,src_photos)
pool.close()
pool.join()
print "%d photos are copied to %s" %(len(photo_name_list),self.dest_folder)
class PhotoShop():
def __init__(self,src_folder,dest_folder,final_size):
self.src_folder = src_folder
self.dest_folder = dest_folder
self.final_size = final_size
if not os.path.exists(dest_folder):
os.makedirs(dest_folder)
print "Destination does not exist,created."
else:
print "Photos will be copied to an existing folder."
def compress_photo(self,photo_file):
image = Image.open(os.path.join(self.src_folder,photo_file))
om = image.copy()
ox,oy = om.size
n_size=self.final_size,int(float(self.final_size)/ox*oy)
om.thumbnail(n_size,Image.ANTIALIAS)
om.save(os.path.join(self.dest_folder,photo_file))
def batch_compress_photos(self):
pool = ThreadPool(4)
photos = os.listdir(self.src_folder)
print "%d photos founded in %s" %(len(photos),self.src_folder)
pool.map(self.compress_photo,photos)
pool.close()
pool.join()
print "%d photos compressed to width:%d" %(len(photos),self.final_size)
def batch_compress_photos2(self):
"This takes double time of the Threadpool one."
photos = os.listdir(self.src_folder)
print "%d photos founded in %s" %(len(photos),self.src_folder)
for photo in photos:
self.compress_photo(photo)
print "%d photos compressed to width:%d" %(len(photos),self.final_size)
def copy_photo(src):
shutil.copy2(src,dest_folder)
if __name__ == '__main22__':
start_time =get_current_time()
dest_folder = r"c:\ZZZZZ\abc"
src_folder = r"C:\Users\wenjusun\Pictures\2016_02_10"
ps = PhotoSelector(src_folder,dest_folder)
ps.copy_selected_photos(r"c:\ZZZZZ\0-sunwj\simglist.txt")
end_time =get_current_time()
print "%d seconds used " % ((end_time-start_time)/1000)
if __name__ == '__main__':
start_time =get_current_time()
dest_folder = r"c:\ZZZZZ\abc3"
src_folder = r"C:\Users\wenjusun\Pictures\timages"
src_folder = r"C:\ZZZZZ\1-photo\2"
ps = PhotoShop(src_folder,dest_folder,600)
ps.batch_compress_photos2()
end_time =get_current_time()
print "%d seconds used " % ((end_time-start_time)/1000)
|
swenker/studio
|
shijing_img/tools/toolbox/photo_handler.py
|
Python
|
apache-2.0
| 3,619
|
#!/usr/bin/env python3
"""Test GAUGE main."""
# Copyright (C) 2015 Brad Cowie, Christopher Lorier and Joe Stringer.
# Copyright (C) 2015 Research and Innovation Advanced Network New Zealand Ltd.
# Copyright (C) 2015--2019 The Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
# pylint: disable=import-error
from faucet.__main__ import parse_args, build_ryu_args
class MainTestCase(unittest.TestCase): # pytype: disable=module-attr
"""Test __main__ methods."""
def test_parse_args(self):
"""Sanity check argument parsing."""
self.assertFalse(parse_args([]).verbose)
self.assertTrue(parse_args(['--verbose']).verbose)
def test_build_ryu_args(self):
"""Test build_ryu_args()."""
self.assertTrue(build_ryu_args(['gauge', '--use-stderr', '--use-syslog', '--verbose']))
self.assertFalse(build_ryu_args(['gauge', '--version']))
if __name__ == "__main__":
unittest.main() # pytype: disable=module-attr
|
anarkiwi/faucet
|
tests/unit/gauge/test_main.py
|
Python
|
apache-2.0
| 1,494
|
###
# Copyright (c) 2005, Jeremiah Fincher
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
import os
import gen
class Error(Exception):
def __init__(self, msg, e=None):
self.msg = msg
self.e = e
def __str__(self):
if self.e is not None:
return os.linesep.join([self.msg, gen.exnToString(self.e)])
else:
return self.msg
# vim:set shiftwidth=4 softtabstop=8 expandtab textwidth=78:
|
buildbot/supybot
|
src/utils/error.py
|
Python
|
bsd-3-clause
| 1,941
|
from django.conf import settings
from django.core.mail import send_mail
from django.template.loader import render_to_string
from django.utils.translation import ugettext
from .base import BaseBackend
class EmailBackend(BaseBackend):
spam_sensitivity = 2
def can_send(self, user, notice_type, scoping):
can_send = super().can_send(user, notice_type, scoping)
if can_send and user.email:
return True
return False
def deliver(self, recipient, sender, notice_type, extra_context):
# TODO: require this to be passed in extra_context
context = self.default_context()
context.update({
"recipient": recipient,
"sender": sender,
"notice": ugettext(notice_type.display),
})
context.update(extra_context)
messages = self.get_formatted_messages((
"short.txt",
"full.txt"
), notice_type.label, context)
context.update({
"message": messages["short.txt"],
})
subject = "".join(render_to_string("pinax/notifications/email_subject.txt", context).splitlines())
context.update({
"message": messages["full.txt"]
})
body = render_to_string("pinax/notifications/email_body.txt", context)
send_mail(subject, body, settings.DEFAULT_FROM_EMAIL, [recipient.email])
|
pinax/pinax-notifications
|
pinax/notifications/backends/email.py
|
Python
|
mit
| 1,393
|
import re
from flask import Blueprint, request, render_template
import jinja2
from .models import SystemEvent, SYSLOG_FACILITY, SYSLOG_LEVEL
from mountain.app import app
from mountain.query.parser import QueryParser, ParserError
# Define the blueprint
mod_rsyslog = Blueprint('rsyslog', __name__)
# Syslog specifics
SYSLOG_FACILITY_MAP = {facility: i for i, facility in enumerate(SYSLOG_FACILITY)}
SYSLOG_FACILITY_DEFAULT = (
SYSLOG_FACILITY.index('kern'),
SYSLOG_FACILITY.index('user'),
SYSLOG_FACILITY.index('daemon'),
SYSLOG_FACILITY.index('auth'),
SYSLOG_FACILITY.index('syslog'),
SYSLOG_FACILITY.index('authpriv'),
)
# Link filters
LINK_FILTERS = (
( # IPv4 address
'message:',
re.compile(r'\b((?:\d{1,3}\.){3}\d{1,3})\b'),
),
( # Mac address
'message:',
re.compile(r'\b((?:[0-9a-fA-F]{2}[:-]){5}(?:[0-9a-fA-F]{2}))\b'),
),
( # Postfix queue id
'message:',
re.compile(r'\b([0-9A-F]{10})\b'),
),
( # user
'message:',
re.compile(r'user[ =](\S+)', re.I),
),
)
def rsyslog_link(message, query=''):
replace = {}
for term, match in LINK_FILTERS:
for hit in match.findall(message):
replace[hit] = term
if query:
query = '+' + query
for hit, term in replace.items():
message = message.replace(
hit,
'<a href="?_q={query}{term}\'{link}\'">{hit}</a>'.format(
query=query,
hit=hit,
link=''.join(['%{:02x}'.format(ord(c)) for c in hit]),
term=term,
)
)
return jinja2.Markup(message)
app.jinja_env.filters['rsyslog_link'] = rsyslog_link
@mod_rsyslog.route('/')
def index():
try:
page = int(request.args.get('page', 1))
except (KeyError, ValueError):
page = 1
facilities = request.args.getlist('facility')
facilities = facilities or SYSLOG_FACILITY_DEFAULT
try:
facilities = map(int, facilities)
except ValueError:
facilities = SYSLOG_FACILITY_DEFAULT
filters = None
query = request.args.get('_q', '')
query_error = None
if query:
scope = SYSLOG_FACILITY_MAP.copy()
try:
filters = QueryParser(
SystemEvent, 'message',
).parse(query)
except (ParserError, SyntaxError) as query_error:
pass
if filters is not None:
print 'filters', filters
events = SystemEvent.query.filter(filters)
else:
events = SystemEvent.query.filter(
SystemEvent.facility.in_(facilities),
)
events = events.order_by('receivedat DESC')
pager = events.paginate(page, 50, False)
return render_template(
'rsyslog/index.html',
pager=pager,
query=query,
query_error=query_error,
facility=SYSLOG_FACILITY,
current_facilities=facilities,
)
|
tehmaze-labs/mountain
|
mountain/rsyslog/views.py
|
Python
|
mit
| 2,967
|
"""SCons.Tool.filesystem
Tool-specific initialization for the filesystem tools.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/filesystem.py 5023 2010/06/14 22:05:46 scons"
import SCons
from SCons.Tool.install import copyFunc
copyToBuilder, copyAsBuilder = None, None
def copyto_emitter(target, source, env):
""" changes the path of the source to be under the target (which
are assumed to be directories.
"""
n_target = []
for t in target:
n_target = n_target + [t.File( str( s ) ) for s in source]
return (n_target, source)
def copy_action_func(target, source, env):
assert( len(target) == len(source) ), "\ntarget: %s\nsource: %s" %(list(map(str, target)),list(map(str, source)))
for t, s in zip(target, source):
if copyFunc(t.get_path(), s.get_path(), env):
return 1
return 0
def copy_action_str(target, source, env):
return env.subst_target_source(env['COPYSTR'], 0, target, source)
copy_action = SCons.Action.Action( copy_action_func, copy_action_str )
def generate(env):
try:
env['BUILDERS']['CopyTo']
env['BUILDERS']['CopyAs']
except KeyError, e:
global copyToBuilder
if copyToBuilder is None:
copyToBuilder = SCons.Builder.Builder(
action = copy_action,
target_factory = env.fs.Dir,
source_factory = env.fs.Entry,
multi = 1,
emitter = [ copyto_emitter, ] )
global copyAsBuilder
if copyAsBuilder is None:
copyAsBuilder = SCons.Builder.Builder(
action = copy_action,
target_factory = env.fs.Entry,
source_factory = env.fs.Entry )
env['BUILDERS']['CopyTo'] = copyToBuilder
env['BUILDERS']['CopyAs'] = copyAsBuilder
env['COPYSTR'] = 'Copy file(s): "$SOURCES" to "$TARGETS"'
def exists(env):
return 1
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
kerwinxu/barcodeManager
|
zxing/cpp/scons/scons-local-2.0.0.final.0/SCons/Tool/filesystem.py
|
Python
|
bsd-2-clause
| 3,577
|
"""
.. module:: Image
:synopsis: A image database model.
.. moduleauthor:: Dan Schlosser <dan@schlosser.io>
"""
import re
import os
from datetime import datetime
from flask import url_for, current_app
from mongoengine import (Document, DateTimeField, StringField, ReferenceField,
signals)
from eventum.models import BaseEventumDocument
from eventum.lib.regex import Regex
now = datetime.now
class Image(Document, BaseEventumDocument):
"""
:ivar date_created: :class:`mongoengine.fields.DateTimeField` - The date
when the document was created, localized to the server.
:ivar date_modified: :class:`mongoengine.fields.DateTimeField` - The date
when the document was last modified, localized to the server.
:ivar filename: :class:`mongoengine.fields.StringField` - The filename with
extension of the image.
:ivar creator: :class:`mongoengine.fields.ReferenceField` - Reference to
the User that uploaded the photo.
:ivar caption: :class:`mongoengine.fields.StringField` - A caption for the
photo.
:ivar source: :class:`mongoengine.fields.StringField` - A source credit for
the picture, if one is needed.
:ivar default_path: :class:`mongoengine.fields.StringField` - The path to
the image that should be used.
"""
# MongoEngine ORM metadata
meta = {
'allow_inheritance': True,
'indexes': ['creator'],
'ordering': ['-date_created']
}
date_created = DateTimeField(default=now, required=True)
date_modified = DateTimeField(default=now, required=True)
filename = StringField(unique=True,
max_length=255,
required=True,
regex=Regex.FULL_FILENAME_REGEX)
creator = ReferenceField('User', required=True)
caption = StringField()
source = StringField()
default_path = StringField(required=True)
def url(self):
"""Returns the URL path that points to the image.
:returns: The URL path like ``"/static/img/cat.jpg"``.
:rtype: str
"""
return url_for('media.file', filename=self.filename)
def clean(self):
"""Called by Mongoengine on every ``.save()`` to the object.
Update date_modified.
:raises: :class:`wtforms.validators.ValidationError`
"""
self.date_modified = now()
if not re.compile(Regex.VALID_PATHS).match(self.default_path):
self.default_path = os.path.join(
current_app.config['EVENTUM_BASEDIR'],
self.default_path)
@classmethod
def post_delete(cls, sender, document, **kwargs):
"""Called by Mongoengine after the object has been delted.
Moves the deleted image's assocaited files to the DELETED_FOLDER.
"""
old_path = document.default_path
_, filename = os.path.split(document.default_path)
delete_folder = current_app.config['EVENTUM_DELETE_FOLDER']
if not os.path.isdir(delete_folder):
os.mkdir(delete_folder)
new_path = os.path.join(delete_folder, filename)
try:
os.rename(old_path, new_path)
except IOError:
pass # TODO: Do something real if this fails. Logging?
def __unicode__(self):
"""This image, as a unicode string.
:returns: The filename of the image.
:rtype: str
"""
return self.filename
def __repr__(self):
"""The representation of this image.
:returns: The image's details.
:rtype: str
"""
rep = 'Photo(filename={}, default_path={}, caption={})'.format(
self.filename,
self.default_path,
self.caption
)
return rep
# Connects the ``post_delte`` method using the signals library.
signals.post_delete.connect(Image.post_delete, sender=Image)
|
danrschlosser/eventum
|
eventum/models/Image.py
|
Python
|
mit
| 3,940
|
from jsonrpc import ServiceProxy
import sys
import string
# ===== BEGIN USER SETTINGS =====
# if you do not set these you will be prompted for a password for every command
rpcuser = ""
rpcpass = ""
# ====== END USER SETTINGS ======
if rpcpass == "":
access = ServiceProxy("http://127.0.0.1:9332")
else:
access = ServiceProxy("http://"+rpcuser+":"+rpcpass+"@127.0.0.1:9332")
cmd = sys.argv[1].lower()
if cmd == "backupwallet":
try:
path = raw_input("Enter destination path/filename: ")
print access.backupwallet(path)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccount":
try:
addr = raw_input("Enter a Doctorcoin address: ")
print access.getaccount(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccountaddress":
try:
acct = raw_input("Enter an account name: ")
print access.getaccountaddress(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getaddressesbyaccount":
try:
acct = raw_input("Enter an account name: ")
print access.getaddressesbyaccount(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getbalance":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getbalance(acct, mc)
except:
print access.getbalance()
except:
print "\n---An error occurred---\n"
elif cmd == "getblockbycount":
try:
height = raw_input("Height: ")
print access.getblockbycount(height)
except:
print "\n---An error occurred---\n"
elif cmd == "getblockcount":
try:
print access.getblockcount()
except:
print "\n---An error occurred---\n"
elif cmd == "getblocknumber":
try:
print access.getblocknumber()
except:
print "\n---An error occurred---\n"
elif cmd == "getconnectioncount":
try:
print access.getconnectioncount()
except:
print "\n---An error occurred---\n"
elif cmd == "getdifficulty":
try:
print access.getdifficulty()
except:
print "\n---An error occurred---\n"
elif cmd == "getgenerate":
try:
print access.getgenerate()
except:
print "\n---An error occurred---\n"
elif cmd == "gethashespersec":
try:
print access.gethashespersec()
except:
print "\n---An error occurred---\n"
elif cmd == "getinfo":
try:
print access.getinfo()
except:
print "\n---An error occurred---\n"
elif cmd == "getnewaddress":
try:
acct = raw_input("Enter an account name: ")
try:
print access.getnewaddress(acct)
except:
print access.getnewaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaccount":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaccount(acct, mc)
except:
print access.getreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaddress":
try:
addr = raw_input("Enter a Doctorcoin address (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaddress(addr, mc)
except:
print access.getreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "gettransaction":
try:
txid = raw_input("Enter a transaction ID: ")
print access.gettransaction(txid)
except:
print "\n---An error occurred---\n"
elif cmd == "getwork":
try:
data = raw_input("Data (optional): ")
try:
print access.gettransaction(data)
except:
print access.gettransaction()
except:
print "\n---An error occurred---\n"
elif cmd == "help":
try:
cmd = raw_input("Command (optional): ")
try:
print access.help(cmd)
except:
print access.help()
except:
print "\n---An error occurred---\n"
elif cmd == "listaccounts":
try:
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.listaccounts(mc)
except:
print access.listaccounts()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaccount":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaccount(mc, incemp)
except:
print access.listreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaddress":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaddress(mc, incemp)
except:
print access.listreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "listtransactions":
try:
acct = raw_input("Account (optional): ")
count = raw_input("Number of transactions (optional): ")
frm = raw_input("Skip (optional):")
try:
print access.listtransactions(acct, count, frm)
except:
print access.listtransactions()
except:
print "\n---An error occurred---\n"
elif cmd == "move":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.move(frm, to, amt, mc, comment)
except:
print access.move(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendfrom":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendfrom(frm, to, amt, mc, comment, commentto)
except:
print access.sendfrom(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendmany":
try:
frm = raw_input("From: ")
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.sendmany(frm,to,mc,comment)
except:
print access.sendmany(frm,to)
except:
print "\n---An error occurred---\n"
elif cmd == "sendtoaddress":
try:
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
amt = raw_input("Amount:")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendtoaddress(to,amt,comment,commentto)
except:
print access.sendtoaddress(to,amt)
except:
print "\n---An error occurred---\n"
elif cmd == "setaccount":
try:
addr = raw_input("Address: ")
acct = raw_input("Account:")
print access.setaccount(addr,acct)
except:
print "\n---An error occurred---\n"
elif cmd == "setgenerate":
try:
gen= raw_input("Generate? (true/false): ")
cpus = raw_input("Max processors/cores (-1 for unlimited, optional):")
try:
print access.setgenerate(gen, cpus)
except:
print access.setgenerate(gen)
except:
print "\n---An error occurred---\n"
elif cmd == "settxfee":
try:
amt = raw_input("Amount:")
print access.settxfee(amt)
except:
print "\n---An error occurred---\n"
elif cmd == "stop":
try:
print access.stop()
except:
print "\n---An error occurred---\n"
elif cmd == "validateaddress":
try:
addr = raw_input("Address: ")
print access.validateaddress(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrase":
try:
pwd = raw_input("Enter wallet passphrase: ")
access.walletpassphrase(pwd, 60)
print "\n---Wallet unlocked---\n"
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrasechange":
try:
pwd = raw_input("Enter old wallet passphrase: ")
pwd2 = raw_input("Enter new wallet passphrase: ")
access.walletpassphrasechange(pwd, pwd2)
print
print "\n---Passphrase changed---\n"
except:
print
print "\n---An error occurred---\n"
print
else:
print "Command not found or not supported"
|
juanbond/doctorcoin
|
contrib/bitrpc/bitrpc.py
|
Python
|
mit
| 7,842
|
from django.conf.urls import url
from .views import HomePageView
urlpatterns = [
url(r'^$', HomePageView.as_view(), name='homepage'),
]
|
mittya/duoclub
|
duoclub/homepage/urls.py
|
Python
|
mit
| 143
|
## This file is part of Invenio.
## Copyright (C) 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
# pylint: disable=C0301
"""Invenio WebSearch Administrator Interface."""
__revision__ = "$Id$"
import cgi
import random
import time
import sys
from six import iteritems
from invenio.utils.date import strftime
if sys.hexversion < 0x2040000:
# pylint: disable=W0622
from sets import Set as set
# pylint: enable=W0622
from invenio.config import \
CFG_CACHEDIR, \
CFG_SITE_LANG, \
CFG_SITE_NAME, \
CFG_SITE_URL,\
CFG_WEBCOMMENT_ALLOW_COMMENTS, \
CFG_WEBSEARCH_SHOW_COMMENT_COUNT, \
CFG_WEBCOMMENT_ALLOW_REVIEWS, \
CFG_WEBSEARCH_SHOW_REVIEW_COUNT, \
CFG_WEBLINKBACK_TRACKBACK_ENABLED, \
CFG_BIBRANK_SHOW_CITATION_LINKS, \
CFG_INSPIRE_SITE, \
CFG_CERN_SITE
from invenio.legacy.bibrank.adminlib import \
write_outcome, \
modify_translations, \
get_def_name, \
get_name, \
get_languages, \
addadminbox, \
tupletotable, \
createhiddenform
from invenio.legacy.dbquery import \
run_sql, \
get_table_update_time
from invenio.legacy.websearch_external_collections import \
external_collections_dictionary, \
external_collection_sort_engine_by_name, \
external_collection_get_state, \
external_collection_get_update_state_list, \
external_collection_apply_changes
from invenio.legacy.websearch_external_collections.utils import \
get_collection_descendants
from invenio.legacy.websearch_external_collections.config import CFG_EXTERNAL_COLLECTION_STATES_NAME
#from invenio.modules.formatter.format_elements import bfe_references
#from invenio.modules.formatter.engine import BibFormatObject
from invenio.legacy.bibdocfile.api import BibRecDocs
from invenio.base.i18n import gettext_set_language
#from invenio.legacy.bibrank.citation_searcher import get_cited_by
from invenio.modules.access.control import acc_get_action_id
from invenio.modules.access.local_config import VIEWRESTRCOLL
from invenio.ext.logging import register_exception
from intbitset import intbitset
from invenio.legacy.bibrank.citation_searcher import get_cited_by, get_cited_by_count
from invenio.legacy.bibrecord import record_get_field_instances
def getnavtrail(previous = ''):
"""Get the navtrail"""
navtrail = """<a class="navtrail" href="%s/help/admin">Admin Area</a> """ % (CFG_SITE_URL,)
navtrail = navtrail + previous
return navtrail
def fix_collection_scores():
"""
Re-calculate and re-normalize de scores of the collection relationship.
"""
for id_dad in intbitset(run_sql("SELECT id_dad FROM collection_collection")):
for index, id_son in enumerate(run_sql("SELECT id_son FROM collection_collection WHERE id_dad=%s ORDER BY score ASC", (id_dad, ))):
run_sql("UPDATE collection_collection SET score=%s WHERE id_dad=%s AND id_son=%s", (index, id_dad, id_son[0]))
def perform_modifytranslations(colID, ln, sel_type='', trans=[], confirm=-1, callback='yes'):
"""Modify the translations of a collection
sel_type - the nametype to modify
trans - the translations in the same order as the languages from get_languages()"""
output = ''
subtitle = ''
sitelangs = get_languages()
if sel_type in ('r', 'v', 'l'):
table = 'collectionbox'
identifier_column = "id_collection"
else:
table = 'collection'
identifier_column = None
if type(trans) is str:
trans = [trans]
if confirm in ["2", 2] and colID:
finresult = modify_translations(colID, sitelangs, sel_type, trans, table, identifier_column)
col_dict = dict(get_def_name('', "collection"))
if colID and int(colID) in col_dict:
colID = int(colID)
subtitle = """<a name="3">3. Modify translations for collection '%s'</a> <small>[<a href="%s/help/admin/websearch-admin-guide#3.3">?</a>]</small>""" % (col_dict[colID], CFG_SITE_URL)
if sel_type == '':
sel_type = get_col_nametypes()[0][0]
header = ['Language', 'Translation']
actions = []
types = get_col_nametypes()
types.extend([('v', '"Focus on" box'), ('r', '"Narrow by" box'), ('l', '"Latest additions" box')])
if len(types) > 1:
text = """
<span class="adminlabel">Name type</span>
<select name="sel_type" class="admin_w200">
"""
for (key, value) in types:
text += """<option value="%s" %s>%s""" % (key, key == sel_type and 'selected="selected"' or '', value)
trans_names = get_name(colID, ln, key, "collection")
if trans_names and trans_names[0][0]:
text += ": %s" % trans_names[0][0]
text += "</option>"
text += """</select>"""
output += createhiddenform(action="modifytranslations#3",
text=text,
button="Select",
colID=colID,
ln=ln,
confirm=0)
if confirm in [-1, "-1", 0, "0"]:
trans = []
for (key, value) in sitelangs:
try:
trans_names = get_name(colID, key, sel_type, table, identifier_column)
trans.append(trans_names[0][0])
except StandardError as e:
trans.append('')
for nr in range(0, len(sitelangs)):
actions.append(["%s" % (sitelangs[nr][1],)])
actions[-1].append('<input type="text" name="trans" size="30" value="%s"/>' % trans[nr])
text = tupletotable(header=header, tuple=actions)
output += createhiddenform(action="modifytranslations#3",
text=text,
button="Modify",
colID=colID,
sel_type=sel_type,
ln=ln,
confirm=2)
if sel_type and len(trans) and confirm in ["2", 2]:
output += write_outcome(finresult)
body = [output]
if callback:
return perform_editcollection(colID, ln, "perform_modifytranslations", addadminbox(subtitle, body))
else:
return addadminbox(subtitle, body)
def perform_modifyrankmethods(colID, ln, func='', rnkID='', confirm=0, callback='yes'):
"""Modify which rank methods is visible to the collection
func - remove or add rank method
rnkID - the id of the rank method."""
output = ""
subtitle = ""
col_dict = dict(get_def_name('', "collection"))
rnk_dict = dict(get_def_name('', "rnkMETHOD"))
if colID and int(colID) in col_dict:
colID = int(colID)
if func in ["0", 0] and confirm in ["1", 1]:
finresult = attach_rnk_col(colID, rnkID)
elif func in ["1", 1] and confirm in ["1", 1]:
finresult = detach_rnk_col(colID, rnkID)
subtitle = """<a name="9">9. Modify rank options for collection '%s'</a> <small>[<a title="See guide" href="%s/help/admin/websearch-admin-guide#3.9">?</a>]</small>""" % (col_dict[colID], CFG_SITE_URL)
output = """
<dl>
<dt>The rank methods enabled for the collection '%s' is:</dt>
""" % col_dict[colID]
rnkmethods = get_col_rnk(colID, ln)
output += """<dd>"""
if not rnkmethods:
output += """No rank methods"""
else:
for id, name in rnkmethods:
output += """%s, """ % name
output += """</dd>
</dl>
"""
rnk_list = get_def_name('', "rnkMETHOD")
rnk_dict_in_col = dict(get_col_rnk(colID, ln))
rnk_list = filter(lambda x: x[0] not in rnk_dict_in_col, rnk_list)
if rnk_list:
text = """
<span class="adminlabel">Enable:</span>
<select name="rnkID" class="admin_w200">
<option value="-1">- select rank method -</option>
"""
for (id, name) in rnk_list:
text += """<option value="%s" %s>%s</option>""" % (id, (func in ["0", 0] and confirm in ["0", 0] and int(rnkID) == int(id)) and 'selected="selected"' or '' , name)
text += """</select>"""
output += createhiddenform(action="modifyrankmethods#9",
text=text,
button="Enable",
colID=colID,
ln=ln,
func=0,
confirm=1)
if confirm in ["1", 1] and func in ["0", 0] and int(rnkID) != -1:
output += write_outcome(finresult)
elif confirm not in ["0", 0] and func in ["0", 0]:
output += """<b><span class="info">Please select a rank method.</span></b>"""
coll_list = get_col_rnk(colID, ln)
if coll_list:
text = """
<span class="adminlabel">Disable:</span>
<select name="rnkID" class="admin_w200">
<option value="-1">- select rank method-</option>
"""
for (id, name) in coll_list:
text += """<option value="%s" %s>%s</option>""" % (id, (func in ["1", 1] and confirm in ["0", 0] and int(rnkID) == int(id)) and 'selected="selected"' or '' , name)
text += """</select>"""
output += createhiddenform(action="modifyrankmethods#9",
text=text,
button="Disable",
colID=colID,
ln=ln,
func=1,
confirm=1)
if confirm in ["1", 1] and func in ["1", 1] and int(rnkID) != -1:
output += write_outcome(finresult)
elif confirm not in ["0", 0] and func in ["1", 1]:
output += """<b><span class="info">Please select a rank method.</span></b>"""
body = [output]
if callback:
return perform_editcollection(colID, ln, "perform_modifyrankmethods", addadminbox(subtitle, body))
else:
return addadminbox(subtitle, body)
def perform_addcollectiontotree(colID, ln, add_dad='', add_son='', rtype='', mtype='', callback='yes', confirm=-1):
"""Form to add a collection to the tree.
add_dad - the dad to add the collection to
add_son - the collection to add
rtype - add it as a regular or virtual
mtype - add it to the regular or virtual tree."""
output = ""
output2 = ""
subtitle = """Attach collection to tree <small>[<a title="See guide" href="%s/help/admin/websearch-admin-guide#2.2">?</a>]</small>""" % (CFG_SITE_URL)
col_dict = dict(get_def_name('', "collection"))
if confirm not in [-1, "-1"] and not (add_son and add_dad and rtype):
output2 += """<b><span class="info">All fields must be filled.</span></b><br /><br />
"""
elif add_son and add_dad and rtype:
add_son = int(add_son)
add_dad = int(add_dad)
if confirm not in [-1, "-1"]:
if add_son == add_dad:
output2 += """<b><span class="info">Cannot add a collection as a pointer to itself.</span></b><br /><br />
"""
elif check_col(add_dad, add_son):
res = add_col_dad_son(add_dad, add_son, rtype)
output2 += write_outcome(res)
if res[0] == 1:
output2 += """<b><span class="info"><br /> The collection will appear on your website after the next webcoll run. You can either run it manually or wait until bibsched does it for you.</span></b><br /><br />
"""
else:
output2 += """<b><span class="info">Cannot add the collection '%s' as a %s subcollection of '%s' since it will either create a loop, or the association already exists.</span></b><br /><br />
""" % (col_dict[add_son], (rtype=="r" and 'regular' or 'virtual'), col_dict[add_dad])
add_son = ''
add_dad = ''
rtype = ''
tree = get_col_tree(colID)
col_list = col_dict.items()
col_list.sort(compare_on_val)
output = show_coll_not_in_tree(colID, ln, col_dict)
text = """
<span class="adminlabel">Attach collection:</span>
<select name="add_son" class="admin_w200">
<option value="">- select collection -</option>
"""
for (id, name) in col_list:
if id != colID:
text += """<option value="%s" %s>%s</option>""" % (id, str(id)==str(add_son) and 'selected="selected"' or '', name)
text += """
</select><br />
<span class="adminlabel">to parent collection:</span>
<select name="add_dad" class="admin_w200">
<option value="">- select parent collection -</option>
"""
for (id, name) in col_list:
text += """<option value="%s" %s>%s</option>
""" % (id, str(id)==add_dad and 'selected="selected"' or '', name)
text += """</select><br />
"""
text += """
<span class="adminlabel">with relationship:</span>
<select name="rtype" class="admin_w200">
<option value="">- select relationship -</option>
<option value="r" %s>Regular (Narrow by...)</option>
<option value="v" %s>Virtual (Focus on...)</option>
</select>
""" % ((rtype=="r" and 'selected="selected"' or ''), (rtype=="v" and 'selected="selected"' or ''))
output += createhiddenform(action="%s/admin/websearch/websearchadmin.py/addcollectiontotree" % CFG_SITE_URL,
text=text,
button="Add",
colID=colID,
ln=ln,
confirm=1)
output += output2
#output += perform_showtree(colID, ln)
body = [output]
if callback:
return perform_index(colID, ln, mtype="perform_addcollectiontotree", content=addadminbox(subtitle, body))
else:
return addadminbox(subtitle, body)
def perform_addcollection(colID, ln, colNAME='', dbquery='', callback="yes", confirm=-1):
"""form to add a new collection.
colNAME - the name of the new collection
dbquery - the dbquery of the new collection"""
output = ""
subtitle = """Create new collection <small>[<a title="See guide" href="%s/help/admin/websearch-admin-guide#2.1">?</a>]</small>""" % (CFG_SITE_URL)
text = """
<span class="adminlabel">Default name</span>
<input class="admin_w200" type="text" name="colNAME" value="%s" /><br />
""" % colNAME
output = createhiddenform(action="%s/admin/websearch/websearchadmin.py/addcollection" % CFG_SITE_URL,
text=text,
colID=colID,
ln=ln,
button="Add collection",
confirm=1)
if colNAME and confirm in ["1", 1]:
res = add_col(colNAME, '')
output += write_outcome(res)
if res[0] == 1:
output += perform_addcollectiontotree(colID=colID, ln=ln, add_son=res[1], callback='')
elif confirm not in ["-1", -1]:
output += """<b><span class="info">Please give the collection a name.</span></b>"""
body = [output]
if callback:
return perform_index(colID, ln=ln, mtype="perform_addcollection", content=addadminbox(subtitle, body))
else:
return addadminbox(subtitle, body)
def perform_modifydbquery(colID, ln, dbquery='', callback='yes', confirm=-1):
"""form to modify the dbquery of the collection.
dbquery - the dbquery of the collection."""
subtitle = ''
output = ""
col_dict = dict(get_def_name('', "collection"))
if colID and int(colID) in col_dict:
colID = int(colID)
subtitle = """<a name="1">1. Modify collection query for collection '%s'</a> <small>[<a title="See guide" href="%s/help/admin/websearch-admin-guide#3.1">?</a>]</small>""" % (col_dict[colID], CFG_SITE_URL)
if confirm == -1:
res = run_sql("SELECT dbquery FROM collection WHERE id=%s" % colID)
dbquery = res[0][0]
if not dbquery:
dbquery = ''
reg_sons = len(get_col_tree(colID, 'r'))
vir_sons = len(get_col_tree(colID, 'v'))
if reg_sons > 1:
if dbquery:
output += "Warning: This collection got subcollections, and should because of this not have a collection query, for further explanation, check the WebSearch Guide<br />"
elif reg_sons <= 1:
if not dbquery:
output += "Warning: This collection does not have any subcollections, and should because of this have a collection query, for further explanation, check the WebSearch Guide<br />"
text = """
<span class="adminlabel">Query</span>
<input class="admin_w200" type="text" name="dbquery" value="%s" /><br />
""" % cgi.escape(dbquery, 1)
output += createhiddenform(action="modifydbquery",
text=text,
button="Modify",
colID=colID,
ln=ln,
confirm=1)
if confirm in ["1", 1]:
res = modify_dbquery(colID, dbquery)
if res:
if dbquery == "":
text = """<b><span class="info">Query removed for this collection.</span></b>"""
else:
text = """<b><span class="info">Query set for this collection.</span></b>"""
else:
text = """<b><span class="info">Sorry, could not change query.</span></b>"""
output += text
body = [output]
if callback:
return perform_editcollection(colID, ln, "perform_modifydbquery", addadminbox(subtitle, body))
else:
return addadminbox(subtitle, body)
def perform_modifycollectiontree(colID, ln, move_up='', move_down='', move_from='', move_to='', delete='', rtype='', callback='yes', confirm=0):
"""to modify the collection tree: move a collection up and down, delete a collection, or change the father of the collection.
colID - the main collection of the tree, the root
move_up - move this collection up (is not the collection id, but the place in the tree)
move_up - move this collection down (is not the collection id, but the place in the tree)
move_from - move this collection from the current positon (is not the collection id, but the place in the tree)
move_to - move the move_from collection and set this as it's father. (is not the collection id, but the place in the tree)
delete - delete this collection from the tree (is not the collection id, but the place in the tree)
rtype - the type of the collection in the tree, regular or virtual"""
colID = int(colID)
tree = get_col_tree(colID, rtype)
col_dict = dict(get_def_name('', "collection"))
subtitle = """Modify collection tree: %s <small>[<a title="See guide" href="%s/help/admin/websearch-admin-guide#2.3">?</a>] <a href="%s/admin/websearch/websearchadmin.py/showtree?colID=%s&ln=%s">Printer friendly version</a></small>""" % (col_dict[colID], CFG_SITE_URL, CFG_SITE_URL, colID, ln)
fin_output = ""
output = ""
try:
if move_up:
move_up = int(move_up)
switch = find_last(tree, move_up)
if switch and switch_col_treescore(tree[move_up], tree[switch]):
output += """<b><span class="info">Moved the %s collection '%s' up and '%s' down.</span></b><br /><br />
""" % ((rtype=="r" and 'regular' or 'virtual'), col_dict[tree[move_up][0]], col_dict[tree[switch][0]])
else:
output += """<b><span class="info">Could not move the %s collection '%s' up and '%s' down.</span></b><br /><br />
""" % ((rtype=="r" and 'regular' or 'virtual'), col_dict[tree[move_up][0]], col_dict[tree[switch][0]])
elif move_down:
move_down = int(move_down)
switch = find_next(tree, move_down)
if switch and switch_col_treescore(tree[move_down], tree[switch]):
output += """<b><span class="info">Moved the %s collection '%s' down and '%s' up.</span></b><br /><br />
""" % ((rtype=="r" and 'regular' or 'virtual'), col_dict[tree[move_down][0]], col_dict[tree[switch][0]])
else:
output += """<b><span class="info">Could not move the %s collection '%s' up and '%s' down.</span></b><br /><br />
""" % ((rtype=="r" and 'regular' or 'virtual'), col_dict[tree[move_up][0]],col_dict[tree[switch][0]])
elif delete:
delete = int(delete)
if confirm in [0, "0"]:
if col_dict[tree[delete][0]] != col_dict[tree[delete][3]]:
text = """<b>Do you want to remove the %s collection '%s' and its subcollections in the %s collection '%s'.</b>
""" % ((tree[delete][4]=="r" and 'regular' or 'virtual'), col_dict[tree[delete][0]], (rtype=="r" and 'regular' or 'virtual'), col_dict[tree[delete][3]])
else:
text = """<b>Do you want to remove all subcollections of the %s collection '%s'.</b>
""" % ((rtype=="r" and 'regular' or 'virtual'), col_dict[tree[delete][3]])
output += createhiddenform(action="%s/admin/websearch/websearchadmin.py/modifycollectiontree#tree" % CFG_SITE_URL,
text=text,
button="Confirm",
colID=colID,
delete=delete,
rtype=rtype,
ln=ln,
confirm=1)
output += createhiddenform(action="%s/admin/websearch/websearchadmin.py/index?mtype=perform_modifycollectiontree#tree" % CFG_SITE_URL,
text="<b>To cancel</b>",
button="Cancel",
colID=colID,
ln=ln)
else:
if remove_col_subcol(tree[delete][0], tree[delete][3], rtype):
if col_dict[tree[delete][0]] != col_dict[tree[delete][3]]:
output += """<b><span class="info">Removed the %s collection '%s' and its subcollections in subdirectory '%s'.</span></b><br /><br />
""" % ((tree[delete][4]=="r" and 'regular' or 'virtual'), col_dict[tree[delete][0]], col_dict[tree[delete][3]])
else:
output += """<b><span class="info">Removed the subcollections of the %s collection '%s'.</span></b><br /><br />
""" % ((rtype=="r" and 'regular' or 'virtual'), col_dict[tree[delete][3]])
else:
output += """<b><span class="info">Could not remove the collection from the tree.</span></b><br /><br />
"""
delete = ''
elif move_from and not move_to:
move_from_rtype = move_from[0]
move_from_id = int(move_from[1:len(move_from)])
text = """<b>Select collection to place the %s collection '%s' under.</b><br /><br />
""" % ((move_from_rtype=="r" and 'regular' or 'virtual'), col_dict[tree[move_from_id][0]])
output += createhiddenform(action="%s/admin/websearch/websearchadmin.py/index?mtype=perform_modifycollectiontree#tree" % CFG_SITE_URL,
text=text,
button="Cancel",
colID=colID,
ln=ln)
elif move_from and move_to:
move_from_rtype = move_from[0]
move_from_id = int(move_from[1:len(move_from)])
move_to_rtype = move_to[0]
move_to_id = int(move_to[1:len(move_to)])
tree_from = get_col_tree(colID, move_from_rtype)
tree_to = get_col_tree(colID, move_to_rtype)
if confirm in [0, '0']:
if move_from_id == move_to_id and move_from_rtype == move_to_rtype:
output += """<b><span class="info">Cannot move to itself.</span></b><br /><br />
"""
elif tree_from[move_from_id][3] == tree_to[move_to_id][0] and move_from_rtype==move_to_rtype:
output += """<b><span class="info">The collection is already there.</span></b><br /><br />
"""
elif check_col(tree_to[move_to_id][0], tree_from[move_from_id][0]) or (tree_to[move_to_id][0] == 1 and tree_from[move_from_id][3] == tree_to[move_to_id][0] and move_from_rtype != move_to_rtype):
text = """<b>Move %s collection '%s' to the %s collection '%s'.</b>
""" % ((tree_from[move_from_id][4]=="r" and 'regular' or 'virtual'), col_dict[tree_from[move_from_id][0]], (tree_to[move_to_id][4]=="r" and 'regular' or 'virtual'), col_dict[tree_to[move_to_id][0]])
output += createhiddenform(action="%s/admin/websearch/websearchadmin.py/modifycollectiontree#tree" % CFG_SITE_URL,
text=text,
button="Confirm",
colID=colID,
move_from=move_from,
move_to=move_to,
ln=ln,
rtype=rtype,
confirm=1)
output += createhiddenform(action="%s/admin/websearch/websearchadmin.py/index?mtype=perform_modifycollectiontree#tree" % CFG_SITE_URL,
text="""<b>To cancel</b>""",
button="Cancel",
colID=colID,
ln=ln)
else:
output += """<b><span class="info">Cannot move the collection '%s' and set it as a subcollection of '%s' since it will create a loop.</span></b><br /><br />
""" % (col_dict[tree_from[move_from_id][0]], col_dict[tree_to[move_to_id][0]])
else:
if (move_to_id != 0 and move_col_tree(tree_from[move_from_id], tree_to[move_to_id])) or (move_to_id == 0 and move_col_tree(tree_from[move_from_id], tree_to[move_to_id], move_to_rtype)):
output += """<b><span class="info">Moved %s collection '%s' to the %s collection '%s'.</span></b><br /><br />
""" % ((move_from_rtype=="r" and 'regular' or 'virtual'), col_dict[tree_from[move_from_id][0]], (move_to_rtype=="r" and 'regular' or 'virtual'), col_dict[tree_to[move_to_id][0]])
else:
output += """<b><span class="info">Could not move %s collection '%s' to the %s collection '%s'.</span></b><br /><br />
""" % ((move_from_rtype=="r" and 'regular' or 'virtual'), col_dict[tree_from[move_from_id][0]], (move_to_rtype=="r" and 'regular' or 'virtual'), col_dict[tree_to[move_to_id][0]])
move_from = ''
move_to = ''
else:
output += """
"""
except StandardError as e:
register_exception()
return """<b><span class="info">An error occured.</span></b>
"""
output += """<table border ="0" width="100%">
<tr><td width="50%">
<b>Narrow by collection:</b>
</td><td width="50%">
<b>Focus on...:</b>
</td></tr><tr><td valign="top">
"""
tree = get_col_tree(colID, 'r')
output += create_colltree(tree, col_dict, colID, ln, move_from, move_to, 'r', "yes")
output += """</td><td valign="top">
"""
tree = get_col_tree(colID, 'v')
output += create_colltree(tree, col_dict, colID, ln, move_from, move_to, 'v', "yes")
output += """</td>
</tr>
</table>
"""
body = [output]
if callback:
return perform_index(colID, ln, mtype="perform_modifycollectiontree", content=addadminbox(subtitle, body))
else:
return addadminbox(subtitle, body)
def perform_showtree(colID, ln):
"""create collection tree/hiarchy"""
col_dict = dict(get_def_name('', "collection"))
subtitle = "Collection tree: %s" % col_dict[int(colID)]
output = """<table border ="0" width="100%">
<tr><td width="50%">
<b>Narrow by collection:</b>
</td><td width="50%">
<b>Focus on...:</b>
</td></tr><tr><td valign="top">
"""
tree = get_col_tree(colID, 'r')
output += create_colltree(tree, col_dict, colID, ln, '', '', 'r', '')
output += """</td><td valign="top">
"""
tree = get_col_tree(colID, 'v')
output += create_colltree(tree, col_dict, colID, ln, '', '', 'v', '')
output += """</td>
</tr>
</table>
"""
body = [output]
return addadminbox(subtitle, body)
def perform_addportalbox(colID, ln, title='', body='', callback='yes', confirm=-1):
"""form to add a new portalbox
title - the title of the portalbox
body - the body of the portalbox"""
col_dict = dict(get_def_name('', "collection"))
colID = int(colID)
subtitle = """<a name="5.1"></a>Create new portalbox"""
text = """
<span class="adminlabel">Title</span>
<textarea cols="50" rows="1" class="admin_wvar" type="text" name="title">%s</textarea><br />
<span class="adminlabel">Body</span>
<textarea cols="50" rows="10" class="admin_wvar" type="text" name="body">%s</textarea><br />
""" % (cgi.escape(title), cgi.escape(body))
output = createhiddenform(action="addportalbox#5.1",
text=text,
button="Add",
colID=colID,
ln=ln,
confirm=1)
if body and confirm in [1, "1"]:
res = add_pbx(title, body)
output += write_outcome(res)
if res[1] == 1:
output += """<b><span class="info"><a href="addexistingportalbox?colID=%s&ln=%s&pbxID=%s#5">Add portalbox to collection</a></span></b>""" % (colID, ln, res[1])
elif confirm not in [-1, "-1"]:
output += """<b><span class="info">Body field must be filled.</span></b>
"""
body = [output]
return perform_showportalboxes(colID, ln, content=addadminbox(subtitle, body))
def perform_addexistingportalbox(colID, ln, pbxID=-1, score=0, position='', sel_ln='', callback='yes', confirm=-1):
"""form to add an existing portalbox to a collection.
colID - the collection to add the portalbox to
pbxID - the portalbox to add
score - the importance of the portalbox.
position - the position of the portalbox on the page
sel_ln - the language of the portalbox"""
subtitle = """<a name="5.2"></a>Add existing portalbox to collection"""
output = ""
colID = int(colID)
res = get_pbx()
pos = get_pbx_pos()
lang = dict(get_languages())
col_dict = dict(get_def_name('', "collection"))
pbx_dict = dict(map(lambda x: (x[0], x[1]), res))
col_pbx = get_col_pbx(colID)
col_pbx = dict(map(lambda x: (x[0], x[5]), col_pbx))
if len(res) > 0:
text = """
<span class="adminlabel">Portalbox</span>
<select name="pbxID" class="admin_w200">
<option value="-1">- Select portalbox -</option>
"""
for (id, t_title, t_body) in res:
text += """<option value="%s" %s>%s - %s...</option>\n""" % \
(id, id == int(pbxID) and 'selected="selected"' or '',
t_title[:40], cgi.escape(t_body[0:40 - min(40, len(t_title))]))
text += """</select><br />
<span class="adminlabel">Language</span>
<select name="sel_ln" class="admin_w200">
<option value="">- Select language -</option>
"""
listlang = lang.items()
listlang.sort()
for (key, name) in listlang:
text += """<option value="%s" %s>%s</option>
""" % (key, key == sel_ln and 'selected="selected"' or '', name)
text += """</select><br />
<span class="adminlabel">Position</span>
<select name="position" class="admin_w200">
<option value="">- Select position -</option>
"""
listpos = pos.items()
listpos.sort()
for (key, name) in listpos:
text += """<option value="%s" %s>%s</option>""" % (key, key==position and 'selected="selected"' or '', name)
text += "</select>"
output += createhiddenform(action="addexistingportalbox#5.2",
text=text,
button="Add",
colID=colID,
ln=ln,
confirm=1)
else:
output = """No existing portalboxes to add, please create a new one.
"""
if pbxID > -1 and position and sel_ln and confirm in [1, "1"]:
pbxID = int(pbxID)
res = add_col_pbx(colID, pbxID, sel_ln, position, '')
output += write_outcome(res)
elif pbxID > -1 and confirm not in [-1, "-1"]:
output += """<b><span class="info">All fields must be filled.</span></b>
"""
body = [output]
output = "<br />" + addadminbox(subtitle, body)
return perform_showportalboxes(colID, ln, content=output)
def perform_deleteportalbox(colID, ln, pbxID=-1, callback='yes', confirm=-1):
"""form to delete a portalbox which is not in use.
colID - the current collection.
pbxID - the id of the portalbox"""
subtitle = """<a name="5.3"></a>Delete an unused portalbox"""
output = ""
colID = int(colID)
if pbxID not in [-1, "-1"] and confirm in [1, "1"]:
ares = get_pbx()
pbx_dict = dict(map(lambda x: (x[0], x[1]), ares))
if int(pbxID) in pbx_dict:
pname = pbx_dict[int(pbxID)]
ares = delete_pbx(int(pbxID))
else:
return """<b><span class="info">This portalbox does not exist</span></b>"""
res = get_pbx()
col_dict = dict(get_def_name('', "collection"))
pbx_dict = dict(map(lambda x: (x[0], x[1]), res))
col_pbx = get_col_pbx()
col_pbx = dict(map(lambda x: (x[0], x[5]), col_pbx))
if len(res) > 0:
text = """
<span class="adminlabel">Portalbox</span>
<select name="pbxID" class="admin_w200">
"""
text += """<option value="-1">- Select portalbox -"""
for (id, t_title, t_body) in res:
if id not in col_pbx:
text += """<option value="%s" %s>%s - %s...""" % (id, id == int(pbxID) and 'selected="selected"' or '', t_title, cgi.escape(t_body[0:10]))
text += "</option>"
text += """</select><br />"""
output += createhiddenform(action="deleteportalbox#5.3",
text=text,
button="Delete",
colID=colID,
ln=ln,
confirm=1)
if pbxID not in [-1, "-1"]:
pbxID = int(pbxID)
if confirm in [1, "1"]:
output += write_outcome(ares)
elif confirm not in [-1, "-1"]:
output += """<b><span class="info">Choose a portalbox to delete.</span></b>
"""
body = [output]
output = "<br />" + addadminbox(subtitle, body)
return perform_showportalboxes(colID, ln, content=output)
def perform_modifyportalbox(colID, ln, pbxID=-1, score='', position='', sel_ln='', title='', body='', callback='yes', confirm=-1):
"""form to modify a portalbox in a collection, or change the portalbox itself.
colID - the id of the collection.
pbxID - the portalbox to change
score - the score of the portalbox connected to colID which should be changed.
position - the position of the portalbox in collection colID to change."""
subtitle = ""
output = ""
colID = int(colID)
res = get_pbx()
pos = get_pbx_pos()
lang = dict(get_languages())
col_dict = dict(get_def_name('', "collection"))
pbx_dict = dict(map(lambda x: (x[0], x[1]), res))
col_pbx = get_col_pbx(colID)
col_pbx = dict(map(lambda x: (x[0], x[5]), col_pbx))
if pbxID not in [-1, "-1"]:
pbxID = int(pbxID)
subtitle = """<a name="5.4"></a>Modify portalbox '%s' for this collection""" % pbx_dict[pbxID]
col_pbx = get_col_pbx(colID)
if not (score and position) and not (body and title):
for (id_pbx, id_collection, tln, score, position, title, body) in col_pbx:
if id_pbx == pbxID:
break
output += """Collection (presentation) specific values (Changes implies only to this collection.)<br />"""
text = """
<span class="adminlabel">Position</span>
<select name="position" class="admin_w200">
"""
listpos = pos.items()
listpos.sort()
for (key, name) in listpos:
text += """<option value="%s" %s>%s""" % (key, key==position and 'selected="selected"' or '', name)
text += "</option>"
text += """</select><br />"""
output += createhiddenform(action="modifyportalbox#5.4",
text=text,
button="Modify",
colID=colID,
pbxID=pbxID,
score=score,
title=title,
body=cgi.escape(body, 1),
sel_ln=sel_ln,
ln=ln,
confirm=3)
if pbxID > -1 and score and position and confirm in [3, "3"]:
pbxID = int(pbxID)
res = modify_pbx(colID, pbxID, sel_ln, score, position, '', '')
res2 = get_pbx()
pbx_dict = dict(map(lambda x: (x[0], x[1]), res2))
output += write_outcome(res)
output += """<br />Portalbox (content) specific values (any changes appears everywhere the portalbox is used.)"""
text = """
<span class="adminlabel">Title</span>
<textarea cols="50" rows="1" class="admin_wvar" type="text" name="title">%s</textarea><br />
""" % cgi.escape(title)
text += """
<span class="adminlabel">Body</span>
<textarea cols="50" rows="10" class="admin_wvar" type="text" name="body">%s</textarea><br />
""" % cgi.escape(body)
output += createhiddenform(action="modifyportalbox#5.4",
text=text,
button="Modify",
colID=colID,
pbxID=pbxID,
sel_ln=sel_ln,
score=score,
position=position,
ln=ln,
confirm=4)
if pbxID > -1 and confirm in [4, "4"]:
pbxID = int(pbxID)
res = modify_pbx(colID, pbxID, sel_ln, '', '', title, body)
output += write_outcome(res)
else:
output = """No portalbox to modify."""
body = [output]
output = "<br />" + addadminbox(subtitle, body)
return perform_showportalboxes(colID, ln, content=output)
def perform_switchpbxscore(colID, id_1, id_2, sel_ln, ln):
"""Switch the score of id_1 and id_2 in collection_portalbox.
colID - the current collection
id_1/id_2 - the id's to change the score for.
sel_ln - the language of the portalbox"""
output = ""
res = get_pbx()
pbx_dict = dict(map(lambda x: (x[0], x[1]), res))
res = switch_pbx_score(colID, id_1, id_2, sel_ln)
output += write_outcome(res)
return perform_showportalboxes(colID, ln, content=output)
def perform_showportalboxes(colID, ln, callback='yes', content='', confirm=-1):
"""show the portalboxes of this collection.
colID - the portalboxes to show the collection for."""
colID = int(colID)
col_dict = dict(get_def_name('', "collection"))
subtitle = """<a name="5">5. Modify portalboxes for collection '%s'</a> <small>[<a title="See guide" href="%s/help/admin/websearch-admin-guide#3.5">?</a>]</small>""" % (col_dict[colID], CFG_SITE_URL)
output = ""
pos = get_pbx_pos()
output = """<dl>
<dt>Portalbox actions (not related to this collection)</dt>
<dd><a href="addportalbox?colID=%s&ln=%s#5.1">Create new portalbox</a></dd>
<dd><a href="deleteportalbox?colID=%s&ln=%s#5.3">Delete an unused portalbox</a></dd>
<dt>Collection specific actions</dt>
<dd><a href="addexistingportalbox?colID=%s&ln=%s#5.2">Add existing portalbox to collection</a></dd>
</dl>
""" % (colID, ln, colID, ln, colID, ln)
header = ['Position', 'Language', '', 'Title', 'Actions']
actions = []
sitelangs = get_languages()
lang = dict(sitelangs)
pos_list = pos.items()
pos_list.sort()
if len(get_col_pbx(colID)) > 0:
for (key, value) in sitelangs:
for (pos_key, pos_value) in pos_list:
res = get_col_pbx(colID, key, pos_key)
i = 0
for (pbxID, colID_pbx, tln, score, position, title, body) in res:
move = """<table cellspacing="1" cellpadding="0" border="0"><tr><td>"""
if i != 0:
move += """<a href="%s/admin/websearch/websearchadmin.py/switchpbxscore?colID=%s&ln=%s&id_1=%s&id_2=%s&sel_ln=%s&rand=%s#5"><img border="0" src="%s/img/smallup.gif" title="Move portalbox up" alt="up" /></a>""" % (CFG_SITE_URL, colID, ln, pbxID, res[i - 1][0], tln, random.randint(0, 1000), CFG_SITE_URL)
else:
move += " "
move += "</td><td>"
i += 1
if i != len(res):
move += """<a href="%s/admin/websearch/websearchadmin.py/switchpbxscore?colID=%s&ln=%s&id_1=%s&id_2=%s&sel_ln=%s&rand=%s#5"><img border="0" src="%s/img/smalldown.gif" title="Move portalbox down" alt="down" /></a>""" % (CFG_SITE_URL, colID, ln, pbxID, res[i][0], tln, random.randint(0, 1000), CFG_SITE_URL)
move += """</td></tr></table>"""
actions.append(["%s" % (i==1 and pos[position] or ''), "%s" % (i==1 and lang[tln] or ''), move, "%s" % title])
for col in [(('Modify', 'modifyportalbox'), ('Remove', 'removeportalbox'),)]:
actions[-1].append('<a href="%s/admin/websearch/websearchadmin.py/%s?colID=%s&ln=%s&pbxID=%s&sel_ln=%s#5.4">%s</a>' % (CFG_SITE_URL, col[0][1], colID, ln, pbxID, tln, col[0][0]))
for (str, function) in col[1:]:
actions[-1][-1] += ' / <a href="%s/admin/websearch/websearchadmin.py/%s?colID=%s&ln=%s&pbxID=%s&sel_ln=%s#5.5">%s</a>' % (CFG_SITE_URL, function, colID, ln, pbxID, tln, str)
output += tupletotable(header=header, tuple=actions)
else:
output += """No portalboxes exists for this collection"""
output += content
body = [output]
if callback:
return perform_editcollection(colID, ln, "perform_showportalboxes", addadminbox(subtitle, body))
else:
return addadminbox(subtitle, body)
def perform_removeportalbox(colID, ln, pbxID='', sel_ln='', callback='yes', confirm=0):
"""form to remove a portalbox from a collection.
colID - the current collection, remove the portalbox from this collection.
sel_ln - remove the portalbox with this language
pbxID - remove the portalbox with this id"""
subtitle = """<a name="5.5"></a>Remove portalbox"""
output = ""
col_dict = dict(get_def_name('', "collection"))
res = get_pbx()
pbx_dict = dict(map(lambda x: (x[0], x[1]), res))
if colID and pbxID and sel_ln:
colID = int(colID)
pbxID = int(pbxID)
if confirm in ["0", 0]:
text = """Do you want to remove the portalbox '%s' from the collection '%s'.""" % (pbx_dict[pbxID], col_dict[colID])
output += createhiddenform(action="removeportalbox#5.5",
text=text,
button="Confirm",
colID=colID,
pbxID=pbxID,
sel_ln=sel_ln,
confirm=1)
elif confirm in ["1", 1]:
res = remove_pbx(colID, pbxID, sel_ln)
output += write_outcome(res)
body = [output]
output = "<br />" + addadminbox(subtitle, body)
return perform_showportalboxes(colID, ln, content=output)
def perform_switchfmtscore(colID, type, id_1, id_2, ln):
"""Switch the score of id_1 and id_2 in the table type.
colID - the current collection
id_1/id_2 - the id's to change the score for.
type - like "format" """
fmt_dict = dict(get_def_name('', "format"))
res = switch_score(colID, id_1, id_2, type)
output = write_outcome(res)
return perform_showoutputformats(colID, ln, content=output)
def perform_switchfldscore(colID, id_1, id_2, fmeth, ln):
"""Switch the score of id_1 and id_2 in collection_field_fieldvalue.
colID - the current collection
id_1/id_2 - the id's to change the score for."""
fld_dict = dict(get_def_name('', "field"))
res = switch_fld_score(colID, id_1, id_2)
output = write_outcome(res)
if fmeth == "soo":
return perform_showsortoptions(colID, ln, content=output)
elif fmeth == "sew":
return perform_showsearchfields(colID, ln, content=output)
elif fmeth == "seo":
return perform_showsearchoptions(colID, ln, content=output)
def perform_switchfldvaluescore(colID, id_1, id_fldvalue_1, id_fldvalue_2, ln):
"""Switch the score of id_1 and id_2 in collection_field_fieldvalue.
colID - the current collection
id_1/id_2 - the id's to change the score for."""
name_1 = run_sql("SELECT name from fieldvalue where id=%s", (id_fldvalue_1, ))[0][0]
name_2 = run_sql("SELECT name from fieldvalue where id=%s", (id_fldvalue_2, ))[0][0]
res = switch_fld_value_score(colID, id_1, id_fldvalue_1, id_fldvalue_2)
output = write_outcome(res)
return perform_modifyfield(colID, fldID=id_1, ln=ln, content=output)
def perform_addnewfieldvalue(colID, fldID, ln, name='', value='', callback="yes", confirm=-1):
"""form to add a new fieldvalue.
name - the name of the new fieldvalue
value - the value of the new fieldvalue
"""
output = ""
subtitle = """<a name="7.4"></a>Add new value"""
text = """
<span class="adminlabel">Display name</span>
<input class="admin_w200" type="text" name="name" value="%s" /><br />
<span class="adminlabel">Search value</span>
<input class="admin_w200" type="text" name="value" value="%s" /><br />
""" % (name, value)
output = createhiddenform(action="%s/admin/websearch/websearchadmin.py/addnewfieldvalue" % CFG_SITE_URL,
text=text,
colID=colID,
fldID=fldID,
ln=ln,
button="Add",
confirm=1)
if name and value and confirm in ["1", 1]:
res = add_fldv(name, value)
output += write_outcome(res)
if res[0] == 1:
res = add_col_fld(colID, fldID, 'seo', res[1])
if res[0] == 0:
output += "<br />" + write_outcome(res)
elif confirm not in ["-1", -1]:
output += """<b><span class="info">Please fill in name and value.</span></b>
"""
body = [output]
output = "<br />" + addadminbox(subtitle, body)
return perform_modifyfield(colID, fldID=fldID, ln=ln, content=output)
def perform_modifyfieldvalue(colID, fldID, fldvID, ln, name='', value='', callback="yes", confirm=-1):
"""form to modify a fieldvalue.
name - the name of the fieldvalue
value - the value of the fieldvalue
"""
if confirm in [-1, "-1"]:
res = get_fld_value(fldvID)
(id, name, value) = res[0]
output = ""
subtitle = """<a name="7.4"></a>Modify existing value"""
output = """<dl>
<dt><b><span class="info">Warning: Modifications done below will also inflict on all places the modified data is used.</span></b></dt>
</dl>"""
text = """
<span class="adminlabel">Display name</span>
<input class="admin_w200" type="text" name="name" value="%s" /><br />
<span class="adminlabel">Search value</span>
<input class="admin_w200" type="text" name="value" value="%s" /><br />
""" % (name, value)
output += createhiddenform(action="%s/admin/websearch/websearchadmin.py/modifyfieldvalue" % CFG_SITE_URL,
text=text,
colID=colID,
fldID=fldID,
fldvID=fldvID,
ln=ln,
button="Update",
confirm=1)
output += createhiddenform(action="%s/admin/websearch/websearchadmin.py/modifyfieldvalue" % CFG_SITE_URL,
text="Delete value and all associations",
colID=colID,
fldID=fldID,
fldvID=fldvID,
ln=ln,
button="Delete",
confirm=2)
if name and value and confirm in ["1", 1]:
res = update_fldv(fldvID, name, value)
output += write_outcome(res)
#if res:
# output += """<b><span class="info">Operation successfully completed.</span></b>"""
#else:
# output += """<b><span class="info">Operation failed.</span></b>"""
elif confirm in ["2", 2]:
res = delete_fldv(fldvID)
output += write_outcome(res)
elif confirm not in ["-1", -1]:
output += """<b><span class="info">Please fill in name and value.</span></b>"""
body = [output]
output = "<br />" + addadminbox(subtitle, body)
return perform_modifyfield(colID, fldID=fldID, ln=ln, content=output)
def perform_removefield(colID, ln, fldID='', fldvID='', fmeth='', callback='yes', confirm=0):
"""form to remove a field from a collection.
colID - the current collection, remove the field from this collection.
sel_ln - remove the field with this language
fldID - remove the field with this id"""
if fmeth == "soo":
field = "sort option"
elif fmeth == "sew":
field = "search field"
elif fmeth == "seo":
field = "search option"
else:
field = "field"
subtitle = """<a name="6.4"><a name="7.4"><a name="8.4"></a>Remove %s""" % field
output = ""
col_dict = dict(get_def_name('', "collection"))
fld_dict = dict(get_def_name('', "field"))
res = get_fld_value()
fldv_dict = dict(map(lambda x: (x[0], x[1]), res))
if colID and fldID:
colID = int(colID)
fldID = int(fldID)
if fldvID and fldvID != "None":
fldvID = int(fldvID)
if confirm in ["0", 0]:
text = """Do you want to remove the %s '%s' %s from the collection '%s'.""" % (field, fld_dict[fldID], (fldvID not in["", "None"] and "with value '%s'" % fldv_dict[fldvID] or ''), col_dict[colID])
output += createhiddenform(action="removefield#6.5",
text=text,
button="Confirm",
colID=colID,
fldID=fldID,
fldvID=fldvID,
fmeth=fmeth,
confirm=1)
elif confirm in ["1", 1]:
res = remove_fld(colID, fldID, fldvID)
output += write_outcome(res)
body = [output]
output = "<br />" + addadminbox(subtitle, body)
if fmeth == "soo":
return perform_showsortoptions(colID, ln, content=output)
elif fmeth == "sew":
return perform_showsearchfields(colID, ln, content=output)
elif fmeth == "seo":
return perform_showsearchoptions(colID, ln, content=output)
def perform_removefieldvalue(colID, ln, fldID='', fldvID='', fmeth='', callback='yes', confirm=0):
"""form to remove a field from a collection.
colID - the current collection, remove the field from this collection.
sel_ln - remove the field with this language
fldID - remove the field with this id"""
subtitle = """<a name="7.4"></a>Remove value"""
output = ""
col_dict = dict(get_def_name('', "collection"))
fld_dict = dict(get_def_name('', "field"))
res = get_fld_value()
fldv_dict = dict(map(lambda x: (x[0], x[1]), res))
if colID and fldID:
colID = int(colID)
fldID = int(fldID)
if fldvID and fldvID != "None":
fldvID = int(fldvID)
if confirm in ["0", 0]:
text = """Do you want to remove the value '%s' from the search option '%s'.""" % (fldv_dict[fldvID], fld_dict[fldID])
output += createhiddenform(action="removefieldvalue#7.4",
text=text,
button="Confirm",
colID=colID,
fldID=fldID,
fldvID=fldvID,
fmeth=fmeth,
confirm=1)
elif confirm in ["1", 1]:
res = remove_fld(colID, fldID, fldvID)
output += write_outcome(res)
body = [output]
output = "<br />" + addadminbox(subtitle, body)
return perform_modifyfield(colID, fldID=fldID, ln=ln, content=output)
def perform_rearrangefieldvalue(colID, fldID, ln, callback='yes', confirm=-1):
"""rearrang the fieldvalues alphabetically
colID - the collection
fldID - the field to rearrange the fieldvalue for
"""
subtitle = "Order values alphabetically"
output = ""
col_fldv = get_col_fld(colID, 'seo', fldID)
col_fldv = dict(map(lambda x: (x[1], x[0]), col_fldv))
fldv_names = get_fld_value()
fldv_names = map(lambda x: (x[0], x[1]), fldv_names)
if None not in col_fldv:
vscore = len(col_fldv)
for (fldvID, name) in fldv_names:
if fldvID in col_fldv:
run_sql("UPDATE collection_field_fieldvalue SET score_fieldvalue=%s WHERE id_collection=%s and id_field=%s and id_fieldvalue=%s", (vscore, colID, fldID, fldvID))
vscore -= 1
output += write_outcome((1, ""))
else:
output += write_outcome((0, (0, "No values to order")))
body = [output]
output = "<br />" + addadminbox(subtitle, body)
return perform_modifyfield(colID, fldID, ln, content=output)
def perform_rearrangefield(colID, ln, fmeth, callback='yes', confirm=-1):
"""rearrang the fields alphabetically
colID - the collection
"""
subtitle = "Order fields alphabetically"
output = ""
col_fld = dict(map(lambda x: (x[0], x[1]), get_col_fld(colID, fmeth)))
fld_names = get_def_name('', "field")
if len(col_fld) > 0:
score = len(col_fld)
for (fldID, name) in fld_names:
if fldID in col_fld:
run_sql("UPDATE collection_field_fieldvalue SET score=%s WHERE id_collection=%s and id_field=%s", (score, colID, fldID))
score -= 1
output += write_outcome((1, ""))
else:
output += write_outcome((0, (0, "No fields to order")))
body = [output]
output = "<br />" + addadminbox(subtitle, body)
if fmeth == "soo":
return perform_showsortoptions(colID, ln, content=output)
elif fmeth == "sew":
return perform_showsearchfields(colID, ln, content=output)
elif fmeth == "seo":
return perform_showsearchoptions(colID, ln, content=output)
def perform_addexistingfieldvalue(colID, fldID, fldvID=-1, ln=CFG_SITE_LANG, callback='yes', confirm=-1):
"""form to add an existing fieldvalue to a field.
colID - the collection
fldID - the field to add the fieldvalue to
fldvID - the fieldvalue to add"""
subtitle = """</a><a name="7.4"></a>Add existing value to search option"""
output = ""
if fldvID not in [-1, "-1"] and confirm in [1, "1"]:
fldvID = int(fldvID)
ares = add_col_fld(colID, fldID, 'seo', fldvID)
colID = int(colID)
fldID = int(fldID)
lang = dict(get_languages())
res = get_def_name('', "field")
col_dict = dict(get_def_name('', "collection"))
fld_dict = dict(res)
col_fld = dict(map(lambda x: (x[0], x[1]), get_col_fld(colID, 'seo')))
fld_value = get_fld_value()
fldv_dict = dict(map(lambda x: (x[0], x[1]), fld_value))
text = """
<span class="adminlabel">Value</span>
<select name="fldvID" class="admin_w200">
<option value="-1">- Select value -</option>
"""
res = run_sql("SELECT id,name,value FROM fieldvalue ORDER BY name")
for (id, name, value) in res:
text += """<option value="%s" %s>%s - %s</option>
""" % (id, id == int(fldvID) and 'selected="selected"' or '', name, value)
text += """</select><br />"""
output += createhiddenform(action="addexistingfieldvalue#7.4",
text=text,
button="Add",
colID=colID,
fldID=fldID,
ln=ln,
confirm=1)
if fldvID not in [-1, "-1"] and confirm in [1, "1"]:
output += write_outcome(ares)
elif confirm in [1, "1"]:
output += """<b><span class="info">Select a value to add and try again.</span></b>"""
body = [output]
output = "<br />" + addadminbox(subtitle, body)
return perform_modifyfield(colID, fldID, ln, content=output)
def perform_addexistingfield(colID, ln, fldID=-1, fldvID=-1, fmeth='', callback='yes', confirm=-1):
"""form to add an existing field to a collection.
colID - the collection to add the field to
fldID - the field to add
sel_ln - the language of the field"""
subtitle = """<a name="6.2"></a><a name="7.2"></a><a name="8.2"></a>Add existing field to collection"""
output = ""
if fldID not in [-1, "-1"] and confirm in [1, "1"]:
fldID = int(fldID)
ares = add_col_fld(colID, fldID, fmeth, fldvID)
colID = int(colID)
lang = dict(get_languages())
res = get_def_name('', "field")
col_dict = dict(get_def_name('', "collection"))
fld_dict = dict(res)
col_fld = dict(map(lambda x: (x[0], x[1]), get_col_fld(colID, fmeth)))
fld_value = get_fld_value()
fldv_dict = dict(map(lambda x: (x[0], x[1]), fld_value))
if fldvID:
fldvID = int(fldvID)
text = """
<span class="adminlabel">Field</span>
<select name="fldID" class="admin_w200">
<option value="-1">- Select field -</option>
"""
for (id, var) in res:
if fmeth == 'seo' or (fmeth != 'seo' and id not in col_fld):
text += """<option value="%s" %s>%s</option>
""" % (id, '', fld_dict[id])
text += """</select><br />"""
output += createhiddenform(action="addexistingfield#6.2",
text=text,
button="Add",
colID=colID,
fmeth=fmeth,
ln=ln,
confirm=1)
if fldID not in [-1, "-1"] and confirm in [1, "1"]:
output += write_outcome(ares)
elif fldID in [-1, "-1"] and confirm not in [-1, "-1"]:
output += """<b><span class="info">Select a field.</span></b>
"""
body = [output]
output = "<br />" + addadminbox(subtitle, body)
if fmeth == "soo":
return perform_showsortoptions(colID, ln, content=output)
elif fmeth == "sew":
return perform_showsearchfields(colID, ln, content=output)
elif fmeth == "seo":
return perform_showsearchoptions(colID, ln, content=output)
def perform_showsortoptions(colID, ln, callback='yes', content='', confirm=-1):
"""show the sort fields of this collection.."""
colID = int(colID)
col_dict = dict(get_def_name('', "collection"))
fld_dict = dict(get_def_name('', "field"))
fld_type = get_sort_nametypes()
subtitle = """<a name="8">8. Modify sort options for collection '%s'</a> <small>[<a title="See guide" href="%s/help/admin/websearch-admin-guide#3.8">?</a>]</small>""" % (col_dict[colID], CFG_SITE_URL)
output = """<dl>
<dt>Field actions (not related to this collection)</dt>
<dd>Go to the BibIndex interface to modify the available sort options</dd>
<dt>Collection specific actions
<dd><a href="addexistingfield?colID=%s&ln=%s&fmeth=soo#8.2">Add sort option to collection</a></dd>
<dd><a href="rearrangefield?colID=%s&ln=%s&fmeth=soo#8.2">Order sort options alphabetically</a></dd>
</dl>
""" % (colID, ln, colID, ln)
header = ['', 'Sort option', 'Actions']
actions = []
sitelangs = get_languages()
lang = dict(sitelangs)
fld_type_list = fld_type.items()
if len(get_col_fld(colID, 'soo')) > 0:
res = get_col_fld(colID, 'soo')
i = 0
for (fldID, fldvID, stype, score, score_fieldvalue) in res:
move = """<table cellspacing="1" cellpadding="0" border="0"><tr><td>"""
if i != 0:
move += """<a href="%s/admin/websearch/websearchadmin.py/switchfldscore?colID=%s&ln=%s&id_1=%s&id_2=%s&fmeth=soo&rand=%s#8"><img border="0" src="%s/img/smallup.gif" title="Move up"></a>""" % (CFG_SITE_URL, colID, ln, fldID, res[i - 1][0], random.randint(0, 1000), CFG_SITE_URL)
else:
move += " "
move += "</td><td>"
i += 1
if i != len(res):
move += """<a href="%s/admin/websearch/websearchadmin.py/switchfldscore?colID=%s&ln=%s&id_1=%s&id_2=%s&fmeth=soo&rand=%s#8"><img border="0" src="%s/img/smalldown.gif" title="Move down"></a>""" % (CFG_SITE_URL, colID, ln, fldID, res[i][0], random.randint(0, 1000), CFG_SITE_URL)
move += """</td></tr></table>"""
actions.append([move, fld_dict[int(fldID)]])
for col in [(('Remove sort option', 'removefield'),)]:
actions[-1].append('<a href="%s/admin/websearch/websearchadmin.py/%s?colID=%s&ln=%s&fldID=%s&fmeth=soo#8.4">%s</a>' % (CFG_SITE_URL, col[0][1], colID, ln, fldID, col[0][0]))
for (str, function) in col[1:]:
actions[-1][-1] += ' / <a href="%s/admin/websearch/websearchadmin.py/%s?colID=%s&ln=%s&fldID=%s&fmeth=soo#8.5">%s</a>' % (CFG_SITE_URL, function, colID, ln, fldID, str)
output += tupletotable(header=header, tuple=actions)
else:
output += """No sort options exists for this collection"""
output += content
body = [output]
if callback:
return perform_editcollection(colID, ln, "perform_showsortoptions", addadminbox(subtitle, body))
else:
return addadminbox(subtitle, body)
def perform_showsearchfields(colID, ln, callback='yes', content='', confirm=-1):
"""show the search fields of this collection.."""
colID = int(colID)
col_dict = dict(get_def_name('', "collection"))
fld_dict = dict(get_def_name('', "field"))
fld_type = get_sort_nametypes()
subtitle = """<a name="6">6. Modify search fields for collection '%s'</a> <small>[<a title="See guide" href="%s/help/admin/websearch-admin-guide#3.6">?</a>]</small>""" % (col_dict[colID], CFG_SITE_URL)
output = """<dl>
<dt>Field actions (not related to this collection)</dt>
<dd>Go to the BibIndex interface to modify the available search fields</dd>
<dt>Collection specific actions
<dd><a href="addexistingfield?colID=%s&ln=%s&fmeth=sew#6.2">Add search field to collection</a></dd>
<dd><a href="rearrangefield?colID=%s&ln=%s&fmeth=sew#6.2">Order search fields alphabetically</a></dd>
</dl>
""" % (colID, ln, colID, ln)
header = ['', 'Search field', 'Actions']
actions = []
sitelangs = get_languages()
lang = dict(sitelangs)
fld_type_list = fld_type.items()
if len(get_col_fld(colID, 'sew')) > 0:
res = get_col_fld(colID, 'sew')
i = 0
for (fldID, fldvID, stype, score, score_fieldvalue) in res:
move = """<table cellspacing="1" cellpadding="0" border="0"><tr><td>"""
if i != 0:
move += """<a href="%s/admin/websearch/websearchadmin.py/switchfldscore?colID=%s&ln=%s&id_1=%s&id_2=%s&fmeth=sew&rand=%s#6"><img border="0" src="%s/img/smallup.gif" title="Move up"></a>""" % (CFG_SITE_URL, colID, ln, fldID, res[i - 1][0], random.randint(0, 1000), CFG_SITE_URL)
else:
move += " "
move += "</td><td>"
i += 1
if i != len(res):
move += '<a href="%s/admin/websearch/websearchadmin.py/switchfldscore?colID=%s&ln=%s&id_1=%s&id_2=%s&fmeth=sew&rand=%s#6"><img border="0" src="%s/img/smalldown.gif" title="Move down"></a>' % (CFG_SITE_URL, colID, ln, fldID, res[i][0], random.randint(0, 1000), CFG_SITE_URL)
move += """</td></tr></table>"""
actions.append([move, fld_dict[int(fldID)]])
for col in [(('Remove search field', 'removefield'),)]:
actions[-1].append('<a href="%s/admin/websearch/websearchadmin.py/%s?colID=%s&ln=%s&fldID=%s&fmeth=sew#6.4">%s</a>' % (CFG_SITE_URL, col[0][1], colID, ln, fldID, col[0][0]))
for (str, function) in col[1:]:
actions[-1][-1] += ' / <a href="%s/admin/websearch/websearchadmin.py/%s?colID=%s&ln=%s&fldID=%s#6.5">%s</a>' % (CFG_SITE_URL, function, colID, ln, fldID, str)
output += tupletotable(header=header, tuple=actions)
else:
output += """No search fields exists for this collection"""
output += content
body = [output]
if callback:
return perform_editcollection(colID, ln, "perform_showsearchfields", addadminbox(subtitle, body))
else:
return addadminbox(subtitle, body)
def perform_showsearchoptions(colID, ln, callback='yes', content='', confirm=-1):
"""show the sort and search options of this collection.."""
colID = int(colID)
col_dict = dict(get_def_name('', "collection"))
fld_dict = dict(get_def_name('', "field"))
fld_type = get_sort_nametypes()
subtitle = """<a name="7">7. Modify search options for collection '%s'</a> <small>[<a title="See guide" href="%s/help/admin/websearch-admin-guide#3.7">?</a>]</small>""" % (col_dict[colID], CFG_SITE_URL)
output = """<dl>
<dt>Field actions (not related to this collection)</dt>
<dd>Go to the BibIndex interface to modify the available search options</dd>
<dt>Collection specific actions
<dd><a href="addexistingfield?colID=%s&ln=%s&fmeth=seo#7.2">Add search option to collection</a></dd>
<dd><a href="rearrangefield?colID=%s&ln=%s&fmeth=seo#7.2">Order search options alphabetically</a></dd>
</dl>
""" % (colID, ln, colID, ln)
header = ['', 'Search option', 'Actions']
actions = []
sitelangs = get_languages()
lang = dict(sitelangs)
fld_type_list = fld_type.items()
fld_distinct = run_sql("SELECT distinct(id_field) FROM collection_field_fieldvalue WHERE type='seo' AND id_collection=%s ORDER by score desc", (colID, ))
if len(fld_distinct) > 0:
i = 0
for (id) in fld_distinct:
fldID = id[0]
col_fld = get_col_fld(colID, 'seo', fldID)
move = ""
if i != 0:
move += """<a href="%s/admin/websearch/websearchadmin.py/switchfldscore?colID=%s&ln=%s&id_1=%s&id_2=%s&fmeth=seo&rand=%s#7"><img border="0" src="%s/img/smallup.gif" title="Move up"></a>""" % (CFG_SITE_URL, colID, ln, fldID, fld_distinct[i - 1][0], random.randint(0, 1000), CFG_SITE_URL)
else:
move += " "
i += 1
if i != len(fld_distinct):
move += '<a href="%s/admin/websearch/websearchadmin.py/switchfldscore?colID=%s&ln=%s&id_1=%s&id_2=%s&fmeth=seo&rand=%s#7"><img border="0" src="%s/img/smalldown.gif" title="Move down"></a>' % (CFG_SITE_URL, colID, ln, fldID, fld_distinct[i][0], random.randint(0, 1000), CFG_SITE_URL)
actions.append([move, "%s" % fld_dict[fldID]])
for col in [(('Modify values', 'modifyfield'), ('Remove search option', 'removefield'),)]:
actions[-1].append('<a href="%s/admin/websearch/websearchadmin.py/%s?colID=%s&ln=%s&fldID=%s#7.3">%s</a>' % (CFG_SITE_URL, col[0][1], colID, ln, fldID, col[0][0]))
for (str, function) in col[1:]:
actions[-1][-1] += ' / <a href="%s/admin/websearch/websearchadmin.py/%s?colID=%s&ln=%s&fldID=%s&fmeth=seo#7.3">%s</a>' % (CFG_SITE_URL, function, colID, ln, fldID, str)
output += tupletotable(header=header, tuple=actions)
else:
output += """No search options exists for this collection"""
output += content
body = [output]
if callback:
return perform_editcollection(colID, ln, "perform_showsearchoptions", addadminbox(subtitle, body))
else:
return addadminbox(subtitle, body)
def perform_modifyfield(colID, fldID, fldvID='', ln=CFG_SITE_LANG, content='', callback='yes', confirm=0):
"""Modify the fieldvalues for a field"""
colID = int(colID)
col_dict = dict(get_def_name('', "collection"))
fld_dict = dict(get_def_name('', "field"))
fld_type = get_sort_nametypes()
fldID = int(fldID)
subtitle = """<a name="7.3">Modify values for field '%s'</a>""" % (fld_dict[fldID])
output = """<dl>
<dt>Value specific actions
<dd><a href="addexistingfieldvalue?colID=%s&ln=%s&fldID=%s#7.4">Add existing value to search option</a></dd>
<dd><a href="addnewfieldvalue?colID=%s&ln=%s&fldID=%s#7.4">Add new value to search option</a></dd>
<dd><a href="rearrangefieldvalue?colID=%s&ln=%s&fldID=%s#7.4">Order values alphabetically</a></dd>
</dl>
""" % (colID, ln, fldID, colID, ln, fldID, colID, ln, fldID)
header = ['', 'Value name', 'Actions']
actions = []
sitelangs = get_languages()
lang = dict(sitelangs)
fld_type_list = fld_type.items()
col_fld = list(get_col_fld(colID, 'seo', fldID))
if len(col_fld) == 1 and col_fld[0][1] is None:
output += """<b><span class="info">No values added for this search option yet</span></b>"""
else:
j = 0
for (fldID, fldvID, stype, score, score_fieldvalue) in col_fld:
fieldvalue = get_fld_value(fldvID)
move = ""
if j != 0:
move += """<a href="%s/admin/websearch/websearchadmin.py/switchfldvaluescore?colID=%s&ln=%s&id_1=%s&id_fldvalue_1=%s&id_fldvalue_2=%s&rand=%s#7.3"><img border="0" src="%s/img/smallup.gif" title="Move up"></a>""" % (CFG_SITE_URL, colID, ln, fldID, fldvID, col_fld[j - 1][1], random.randint(0, 1000), CFG_SITE_URL)
else:
move += " "
j += 1
if j != len(col_fld):
move += """<a href="%s/admin/websearch/websearchadmin.py/switchfldvaluescore?colID=%s&ln=%s&id_1=%s&id_fldvalue_1=%s&id_fldvalue_2=%s&rand=%s#7.3"><img border="0" src="%s/img/smalldown.gif" title="Move down"></a>""" % (CFG_SITE_URL, colID, ln, fldID, fldvID, col_fld[j][1], random.randint(0, 1000), CFG_SITE_URL)
if fieldvalue[0][1] != fieldvalue[0][2] and fldvID is not None:
actions.append([move, "%s - %s" % (fieldvalue[0][1], fieldvalue[0][2])])
elif fldvID is not None:
actions.append([move, "%s" % fieldvalue[0][1]])
move = ''
for col in [(('Modify value', 'modifyfieldvalue'), ('Remove value', 'removefieldvalue'),)]:
actions[-1].append('<a href="%s/admin/websearch/websearchadmin.py/%s?colID=%s&ln=%s&fldID=%s&fldvID=%s&fmeth=seo#7.4">%s</a>' % (CFG_SITE_URL, col[0][1], colID, ln, fldID, fldvID, col[0][0]))
for (str, function) in col[1:]:
actions[-1][-1] += ' / <a href="%s/admin/websearch/websearchadmin.py/%s?colID=%s&ln=%s&fldID=%s&fldvID=%s#7.4">%s</a>' % (CFG_SITE_URL, function, colID, ln, fldID, fldvID, str)
output += tupletotable(header=header, tuple=actions)
output += content
body = [output]
output = "<br />" + addadminbox(subtitle, body)
if len(col_fld) == 0:
output = content
return perform_showsearchoptions(colID, ln, content=output)
def perform_showoutputformats(colID, ln, callback='yes', content='', confirm=-1):
"""shows the outputformats of the current collection
colID - the collection id."""
colID = int(colID)
col_dict = dict(get_def_name('', "collection"))
subtitle = """<a name="10">10. Modify output formats for collection '%s'</a> <small>[<a title="See guide" href="%s/help/admin/websearch-admin-guide#3.10">?</a>]</small>""" % (col_dict[colID], CFG_SITE_URL)
output = """
<dl>
<dt>Output format actions (not specific to the chosen collection)
<dd>Go to the BibFormat interface to modify</dd>
<dt>Collection specific actions
<dd><a href="addexistingoutputformat?colID=%s&ln=%s#10.2">Add existing output format to collection</a></dd>
</dl>
""" % (colID, ln)
header = ['', 'Code', 'Output format', 'Actions']
actions = []
col_fmt = get_col_fmt(colID)
fmt_dict = dict(get_def_name('', "format"))
i = 0
if len(col_fmt) > 0:
for (id_format, colID_fld, code, score) in col_fmt:
move = """<table cellspacing="1" cellpadding="0" border="0"><tr><td>"""
if i != 0:
move += """<a href="%s/admin/websearch/websearchadmin.py/switchfmtscore?colID=%s&ln=%s&type=format&id_1=%s&id_2=%s&rand=%s#10"><img border="0" src="%s/img/smallup.gif" title="Move format up"></a>""" % (CFG_SITE_URL, colID, ln, id_format, col_fmt[i - 1][0], random.randint(0, 1000), CFG_SITE_URL)
else:
move += " "
move += "</td><td>"
i += 1
if i != len(col_fmt):
move += '<a href="%s/admin/websearch/websearchadmin.py/switchfmtscore?colID=%s&ln=%s&type=format&id_1=%s&id_2=%s&rand=%s#10"><img border="0" src="%s/img/smalldown.gif" title="Move format down"></a>' % (CFG_SITE_URL, colID, ln, id_format, col_fmt[i][0], random.randint(0, 1000), CFG_SITE_URL)
move += """</td></tr></table>"""
actions.append([move, code, fmt_dict[int(id_format)]])
for col in [(('Remove', 'removeoutputformat'),)]:
actions[-1].append('<a href="%s/admin/websearch/websearchadmin.py/%s?colID=%s&ln=%s&fmtID=%s#10">%s</a>' % (CFG_SITE_URL, col[0][1], colID, ln, id_format, col[0][0]))
for (str, function) in col[1:]:
actions[-1][-1] += ' / <a href="%s/admin/websearch/websearchadmin.py/%s?colID=%s&ln=%s&fmtID=%s#10">%s</a>' % (CFG_SITE_URL, function, colID, ln, id_format, str)
output += tupletotable(header=header, tuple=actions)
else:
output += """No output formats exists for this collection"""
output += content
body = [output]
if callback:
return perform_editcollection(colID, ln, "perform_showoutputformats", addadminbox(subtitle, body))
else:
return addadminbox(subtitle, body)
def external_collections_build_select(colID, external_collection):
output = '<select name="state" class="admin_w200">'
if external_collection.parser:
max_state = 4
else:
max_state = 2
num_selected = external_collection_get_state(external_collection, colID)
for num in range(max_state):
state_name = CFG_EXTERNAL_COLLECTION_STATES_NAME[num]
if num == num_selected:
selected = ' selected'
else:
selected = ''
output += '<option value="%(num)d"%(selected)s>%(state_name)s</option>' % {'num': num, 'selected': selected, 'state_name': state_name}
output += '</select>\n'
return output
def perform_manage_external_collections(colID, ln, callback='yes', content='', confirm=-1):
"""Show the interface to configure external collections to the user."""
colID = int(colID)
subtitle = """<a name="11">11. Configuration of related external collections</a>
<small>[<a title="See guide" href="%s/help/admin/websearch-admin-guide#3.11">?</a>]</small>""" % CFG_SITE_URL
output = '<form action="update_external_collections" method="POST"><input type="hidden" name="colID" value="%(colID)d">' % {'colID': colID}
table_header = ['External collection', 'Mode', 'Apply also to daughter collections?']
table_content = []
external_collections = external_collection_sort_engine_by_name(external_collections_dictionary.values())
for external_collection in external_collections:
collection_name = external_collection.name
select = external_collections_build_select(colID, external_collection)
recurse = '<input type=checkbox name="recurse" value="%(collection_name)s">' % {'collection_name': collection_name}
table_content.append([collection_name, select, recurse])
output += tupletotable(header=table_header, tuple=table_content)
output += '<input class="adminbutton" type="submit" value="Modify"/>'
output += '</form>'
return addadminbox(subtitle, [output])
def perform_update_external_collections(colID, ln, state_list, recurse_list):
colID = int(colID)
changes = []
output = ""
if not state_list:
return 'Warning : No state found.<br />' + perform_manage_external_collections(colID, ln)
external_collections = external_collection_sort_engine_by_name(external_collections_dictionary.values())
if len(external_collections) != len(state_list):
return 'Warning : Size of state_list different from external_collections!<br />' + perform_manage_external_collections(colID, ln)
for (external_collection, state) in zip(external_collections, state_list):
state = int(state)
collection_name = external_collection.name
recurse = recurse_list and collection_name in recurse_list
oldstate = external_collection_get_state(external_collection, colID)
if oldstate != state or recurse:
changes += external_collection_get_update_state_list(external_collection, colID, state, recurse)
external_collection_apply_changes(changes)
return output + '<br /><br />' + perform_manage_external_collections(colID, ln)
def perform_showdetailedrecordoptions(colID, ln, callback='yes', content='', confirm=-1):
"""Show the interface to configure detailed record page to the user."""
colID = int(colID)
subtitle = """<a name="12">12. Configuration of detailed record page</a>
<small>[<a title="See guide" href="%s/help/admin/websearch-admin-guide#3.12">?</a>]</small>""" % CFG_SITE_URL
output = '''<form action="update_detailed_record_options" method="post">
<table><tr><td>
<input type="hidden" name="colID" value="%(colID)d">
<dl>
<dt><b>Show tabs:</b></dt>
<dd>
''' % {'colID': colID}
for (tab_id, tab_info) in iteritems(get_detailed_page_tabs(colID)):
if tab_id == 'comments' and \
not CFG_WEBCOMMENT_ALLOW_REVIEWS and \
not CFG_WEBCOMMENT_ALLOW_COMMENTS:
continue
check = ''
output += '''<input type="checkbox" id="id%(tabid)s" name="tabs" value="%(tabid)s" %(check)s />
<label for="id%(tabid)s"> %(label)s</label><br />
''' % {'tabid':tab_id,
'check':((tab_info['visible'] and 'checked="checked"') or ''),
'label':tab_info['label']}
output += '</dd></dl></td><td>'
output += '</td></tr></table><input class="adminbutton" type="submit" value="Modify"/>'
output += '''<input type="checkbox" id="recurse" name="recurse" value="1" />
<label for="recurse"> Also apply to subcollections</label>'''
output += '</form>'
return addadminbox(subtitle, [output])
def perform_update_detailed_record_options(colID, ln, tabs, recurse):
"""Update the preferences for the tab to show/hide in the detailed record page."""
colID = int(colID)
changes = []
output = '<b><span class="info">Operation successfully completed.</span></b>'
if '' in tabs:
tabs.remove('')
tabs.append('metadata')
def update_settings(colID, tabs, recurse):
run_sql("DELETE FROM collectiondetailedrecordpagetabs WHERE id_collection=%s", (colID, ))
run_sql("REPLACE INTO collectiondetailedrecordpagetabs" + \
" SET id_collection=%s, tabs=%s", (colID, ';'.join(tabs)))
## for enabled_tab in tabs:
## run_sql("REPLACE INTO collectiondetailedrecordpagetabs" + \
## " SET id_collection='%s', tabs='%s'" % (colID, ';'.join(tabs)))
if recurse:
for descendant_id in get_collection_descendants(colID):
update_settings(descendant_id, tabs, recurse)
update_settings(colID, tabs, recurse)
## for colID in colIDs:
## run_sql("DELETE FROM collectiondetailedrecordpagetabs WHERE id_collection='%s'" % colID)
## for enabled_tab in tabs:
## run_sql("REPLACE INTO collectiondetailedrecordpagetabs" + \
## " SET id_collection='%s', tabs='%s'" % (colID, ';'.join(tabs)))
#if callback:
return perform_editcollection(colID, ln, "perform_modifytranslations",
'<br /><br />' + output + '<br /><br />' + \
perform_showdetailedrecordoptions(colID, ln))
#else:
# return addadminbox(subtitle, body)
#return output + '<br /><br />' + perform_showdetailedrecordoptions(colID, ln)
def perform_addexistingoutputformat(colID, ln, fmtID=-1, callback='yes', confirm=-1):
"""form to add an existing output format to a collection.
colID - the collection the format should be added to
fmtID - the format to add."""
subtitle = """<a name="10.2"></a>Add existing output format to collection"""
output = ""
if fmtID not in [-1, "-1"] and confirm in [1, "1"]:
ares = add_col_fmt(colID, fmtID)
colID = int(colID)
res = get_def_name('', "format")
fmt_dict = dict(res)
col_dict = dict(get_def_name('', "collection"))
col_fmt = get_col_fmt(colID)
col_fmt = dict(map(lambda x: (x[0], x[2]), col_fmt))
if len(res) > 0:
text = """
<span class="adminlabel">Output format</span>
<select name="fmtID" class="admin_w200">
<option value="-1">- Select output format -</option>
"""
for (id, name) in res:
if id not in col_fmt:
text += """<option value="%s" %s>%s</option>
""" % (id, id == int(fmtID) and 'selected="selected"' or '', name)
text += """</select><br />
"""
output += createhiddenform(action="addexistingoutputformat#10.2",
text=text,
button="Add",
colID=colID,
ln=ln,
confirm=1)
else:
output = """No existing output formats to add, please create a new one."""
if fmtID not in [-1, "-1"] and confirm in [1, "1"]:
output += write_outcome(ares)
elif fmtID in [-1, "-1"] and confirm not in [-1, "-1"]:
output += """<b><span class="info">Please select output format.</span></b>"""
body = [output]
output = "<br />" + addadminbox(subtitle, body)
return perform_showoutputformats(colID, ln, content=output)
def perform_deleteoutputformat(colID, ln, fmtID=-1, callback='yes', confirm=-1):
"""form to delete an output format not in use.
colID - the collection id of the current collection.
fmtID - the format id to delete."""
subtitle = """<a name="10.3"></a>Delete an unused output format"""
output = """
<dl>
<dd>Deleting an output format will also delete the translations associated.</dd>
</dl>
"""
colID = int(colID)
if fmtID not in [-1, "-1"] and confirm in [1, "1"]:
fmt_dict = dict(get_def_name('', "format"))
old_colNAME = fmt_dict[int(fmtID)]
ares = delete_fmt(int(fmtID))
res = get_def_name('', "format")
fmt_dict = dict(res)
col_dict = dict(get_def_name('', "collection"))
col_fmt = get_col_fmt()
col_fmt = dict(map(lambda x: (x[0], x[2]), col_fmt))
if len(res) > 0:
text = """
<span class="adminlabel">Output format</span>
<select name="fmtID" class="admin_w200">
"""
text += """<option value="-1">- Select output format -"""
for (id, name) in res:
if id not in col_fmt:
text += """<option value="%s" %s>%s""" % (id, id == int(fmtID) and 'selected="selected"' or '', name)
text += "</option>"
text += """</select><br />"""
output += createhiddenform(action="deleteoutputformat#10.3",
text=text,
button="Delete",
colID=colID,
ln=ln,
confirm=0)
if fmtID not in [-1, "-1"]:
fmtID = int(fmtID)
if confirm in [0, "0"]:
text = """<b>Do you want to delete the output format '%s'.</b>
""" % fmt_dict[fmtID]
output += createhiddenform(action="deleteoutputformat#10.3",
text=text,
button="Confirm",
colID=colID,
fmtID=fmtID,
ln=ln,
confirm=1)
elif confirm in [1, "1"]:
output += write_outcome(ares)
elif confirm not in [-1, "-1"]:
output += """<b><span class="info">Choose a output format to delete.</span></b>
"""
body = [output]
output = "<br />" + addadminbox(subtitle, body)
return perform_showoutputformats(colID, ln, content=output)
def perform_removeoutputformat(colID, ln, fmtID='', callback='yes', confirm=0):
"""form to remove an output format from a collection.
colID - the collection id of the current collection.
fmtID - the format id.
"""
subtitle = """<a name="10.5"></a>Remove output format"""
output = ""
col_dict = dict(get_def_name('', "collection"))
fmt_dict = dict(get_def_name('', "format"))
if colID and fmtID:
colID = int(colID)
fmtID = int(fmtID)
if confirm in ["0", 0]:
text = """Do you want to remove the output format '%s' from the collection '%s'.""" % (fmt_dict[fmtID], col_dict[colID])
output += createhiddenform(action="removeoutputformat#10.5",
text=text,
button="Confirm",
colID=colID,
fmtID=fmtID,
confirm=1)
elif confirm in ["1", 1]:
res = remove_fmt(colID, fmtID)
output += write_outcome(res)
body = [output]
output = "<br />" + addadminbox(subtitle, body)
return perform_showoutputformats(colID, ln, content=output)
def perform_index(colID=1, ln=CFG_SITE_LANG, mtype='', content='', confirm=0):
"""The index method, calling methods to show the collection tree, create new collections and add collections to tree.
"""
subtitle = "Overview"
colID = int(colID)
col_dict = dict(get_def_name('', "collection"))
output = ""
fin_output = ""
if 1 not in col_dict:
res = add_col(CFG_SITE_NAME, '')
if res:
fin_output += """<b><span class="info">Created root collection.</span></b><br />"""
else:
return "Cannot create root collection, please check database."
if CFG_SITE_NAME != run_sql("SELECT name from collection WHERE id=1")[0][0]:
res = run_sql("update collection set name=%s where id=1", (CFG_SITE_NAME, ))
if res:
fin_output += """<b><span class="info">The name of the root collection has been modified to be the same as the %(sitename)s installation name given prior to installing %(sitename)s.</span><b><br />""" % {'sitename' : CFG_SITE_NAME}
else:
return "Error renaming root collection."
fin_output += """
<table>
<tr>
<td>0. <small><a href="{cfg_site_url}/admin/websearch/websearchadmin.py?colID={col_id}&ln={ln}&mtype=perform_showall">Show all</a></small></td>
<td>1. <small><a href="{cfg_site_url}/admin/websearch/websearchadmin.py?colID={col_id}&ln={ln}&mtype=perform_addcollection">Create new collection</a></small></td>
<td>2. <small><a href="{cfg_site_url}/admin/websearch/websearchadmin.py?colID={col_id}&ln={ln}&mtype=perform_addcollectiontotree">Attach collection to tree</a></small></td>
<td>3. <small><a href="{cfg_site_url}/admin/websearch/websearchadmin.py?colID={col_id}&ln={ln}&mtype=perform_modifycollectiontree">Modify collection tree</a></small></td>
<td>4. <small><a href="{cfg_site_url}/admin/websearch/websearchadmin.py?colID={col_id}&ln={ln}&mtype=perform_checkwebcollstatus">Webcoll Status</a></small></td>
</tr><tr>
<td>5. <small><a href="{cfg_site_url}/admin/websearch/websearchadmin.py?colID={col_id}&ln={ln}&mtype=perform_checkcollectionstatus">Collection Status</a></small></td>
<td>6. <small><a href="{cfg_site_url}/admin/websearch/websearchadmin.py?colID={col_id}&ln={ln}&mtype=perform_checkexternalcollections">Check external collections</a></small></td>
<td>7. <small><a href="{cfg_site_url}/help/admin/websearch-admin-guide?ln={col_id}">Guide</a></small></td>
</tr>
</table>
""".format(cfg_site_url=CFG_SITE_URL, col_id=colID, ln=ln)
if mtype == "":
fin_output += """<br /><br /><b><span class="info">To manage the collections, select an item from the menu.</span><b><br />"""
if mtype == "perform_addcollection" and content:
fin_output += content
elif mtype == "perform_addcollection" or mtype == "perform_showall":
fin_output += perform_addcollection(colID=colID, ln=ln, callback='')
fin_output += "<br />"
if mtype == "perform_addcollectiontotree" and content:
fin_output += content
elif mtype == "perform_addcollectiontotree" or mtype == "perform_showall":
fin_output += perform_addcollectiontotree(colID=colID, ln=ln, callback='')
fin_output += "<br />"
if mtype == "perform_modifycollectiontree" and content:
fin_output += content
elif mtype == "perform_modifycollectiontree" or mtype == "perform_showall":
fin_output += perform_modifycollectiontree(colID=colID, ln=ln, callback='')
fin_output += "<br />"
if mtype == "perform_checkwebcollstatus" and content:
fin_output += content
elif mtype == "perform_checkwebcollstatus" or mtype == "perform_showall":
fin_output += perform_checkwebcollstatus(colID, ln, callback='')
if mtype == "perform_checkcollectionstatus" and content:
fin_output += content
elif mtype == "perform_checkcollectionstatus" or mtype == "perform_showall":
fin_output += perform_checkcollectionstatus(colID, ln, callback='')
if mtype == "perform_checkexternalcollections" and content:
fin_output += content
elif mtype == "perform_checkexternalcollections" or mtype == "perform_showall":
fin_output += perform_checkexternalcollections(colID, ln, callback='')
body = [fin_output]
return addadminbox('<b>Menu</b>', body)
def show_coll_not_in_tree(colID, ln, col_dict):
"""Returns collections not in tree"""
tree = get_col_tree(colID)
in_tree = {}
output = "These collections are not in the tree, and should be added:<br />"
for (id, up, down, dad, reltype) in tree:
in_tree[id] = 1
in_tree[dad] = 1
res = run_sql("SELECT id from collection")
if len(res) != len(in_tree):
for id in res:
if id[0] not in in_tree:
output += """<a href="%s/admin/websearch/websearchadmin.py/editcollection?colID=%s&ln=%s" title="Edit collection">%s</a> ,
""" % (CFG_SITE_URL, id[0], ln, col_dict[id[0]])
output += "<br /><br />"
else:
output = ""
return output
def create_colltree(tree, col_dict, colID, ln, move_from='', move_to='', rtype='', edit=''):
"""Creates the presentation of the collection tree, with the buttons for modifying it.
tree - the tree to present, from get_tree()
col_dict - the name of the collections in a dictionary
colID - the collection id to start with
move_from - if a collection to be moved has been chosen
move_to - the collection which should be set as father of move_from
rtype - the type of the tree, regular or virtual
edit - if the method should output the edit buttons."""
if move_from:
move_from_rtype = move_from[0]
move_from_id = int(move_from[1:len(move_from)])
tree_from = get_col_tree(colID, move_from_rtype)
tree_to = get_col_tree(colID, rtype)
tables = 0
tstack = []
i = 0
text = """
<table border ="0" cellspacing="0" cellpadding="0">"""
for i in range(0, len(tree)):
id_son = tree[i][0]
up = tree[i][1]
down = tree[i][2]
dad = tree[i][3]
reltype = tree[i][4]
tmove_from = ""
j = i
while j > 0:
j = j - 1
try:
if tstack[j][1] == dad:
table = tstack[j][2]
for k in range(0, tables - table):
tables = tables - 1
text += """</table></td></tr>
"""
break
except StandardError as e:
pass
text += """<tr><td>
"""
if i > 0 and tree[i][1] == 0:
tables = tables + 1
text += """</td><td></td><td></td><td></td><td><table border="0" cellspacing="0" cellpadding="0"><tr><td>
"""
if i == 0:
tstack.append((id_son, dad, 1))
else:
tstack.append((id_son, dad, tables))
if up == 1 and edit:
text += """<a href="%s/admin/websearch/websearchadmin.py/modifycollectiontree?colID=%s&ln=%s&move_up=%s&rtype=%s#%s"><img border="0" src="%s/img/smallup.gif" title="Move collection up"></a>""" % (CFG_SITE_URL, colID, ln, i, rtype, tree[i][0], CFG_SITE_URL)
else:
text += """ """
text += "</td><td>"
if down == 1 and edit:
text += """<a href="%s/admin/websearch/websearchadmin.py/modifycollectiontree?colID=%s&ln=%s&move_down=%s&rtype=%s#%s"><img border="0" src="%s/img/smalldown.gif" title="Move collection down"></a>""" % (CFG_SITE_URL, colID, ln, i, rtype, tree[i][0], CFG_SITE_URL)
else:
text += """ """
text += "</td><td>"
if edit:
if move_from and move_to:
tmove_from = move_from
move_from = ''
if not (move_from == "" and i == 0) and not (move_from != "" and int(move_from[1:len(move_from)]) == i and rtype == move_from[0]):
check = "true"
if move_from:
#if tree_from[move_from_id][0] == tree_to[i][0] or not check_col(tree_to[i][0], tree_from[move_from_id][0]):
# check = ''
#elif not check_col(tree_to[i][0], tree_from[move_from_id][0]):
# check = ''
#if not check and (tree_to[i][0] == 1 and tree_from[move_from_id][3] == tree_to[i][0] and move_from_rtype != rtype):
# check = "true"
if check:
text += """<a href="%s/admin/websearch/websearchadmin.py/modifycollectiontree?colID=%s&ln=%s&move_from=%s&move_to=%s%s&rtype=%s#tree"><img border="0" src="%s/img/move_to.gif" title="Move '%s' to '%s'"></a>
""" % (CFG_SITE_URL, colID, ln, move_from, rtype, i, rtype, CFG_SITE_URL, col_dict[tree_from[int(move_from[1:len(move_from)])][0]], col_dict[tree_to[i][0]])
else:
try:
text += """<a href="%s/admin/websearch/websearchadmin.py/modifycollectiontree?colID=%s&ln=%s&move_from=%s%s&rtype=%s#%s"><img border="0" src="%s/img/move_from.gif" title="Move '%s' from this location."></a>""" % (CFG_SITE_URL, colID, ln, rtype, i, rtype, tree[i][0], CFG_SITE_URL, col_dict[tree[i][0]])
except KeyError:
pass
else:
text += """<img border="0" src="%s/img/white_field.gif">
""" % CFG_SITE_URL
else:
text += """<img border="0" src="%s/img/white_field.gif">
""" % CFG_SITE_URL
text += """
</td>
<td>"""
if edit:
try:
text += """<a href="%s/admin/websearch/websearchadmin.py/modifycollectiontree?colID=%s&ln=%s&delete=%s&rtype=%s#%s"><img border="0" src="%s/img/iconcross.gif" title="Remove colletion from tree"></a>""" % (CFG_SITE_URL, colID, ln, i, rtype, tree[i][0], CFG_SITE_URL)
except KeyError:
pass
elif i != 0:
text += """<img border="0" src="%s/img/white_field.gif">
""" % CFG_SITE_URL
text += """</td><td>
"""
if tmove_from:
move_from = tmove_from
try:
text += """<a name="%s"></a>%s<a href="%s/admin/websearch/websearchadmin.py/editcollection?colID=%s&ln=%s" title="Edit collection">%s</a>%s%s%s""" % (tree[i][0], (reltype=="v" and '<i>' or ''), CFG_SITE_URL, tree[i][0], ln, col_dict[id_son], (move_to=="%s%s" %(rtype, i) and ' <img border="0" src="%s/img/move_to.gif">' % CFG_SITE_URL or ''), (move_from=="%s%s" % (rtype, i) and ' <img border="0" src="%s/img/move_from.gif">' % CFG_SITE_URL or ''), (reltype=="v" and '</i>' or ''))
except KeyError:
pass
text += """</td></tr>
"""
while tables > 0:
text += """</table></td></tr>
"""
tables = tables - 1
text += """</table>"""
return text
def perform_deletecollection(colID, ln, confirm=-1, callback='yes'):
"""form to delete a collection
colID - id of collection
"""
subtitle =''
output = """
<span class="warning">
<strong>
<dl>
<dt>WARNING:</dt>
<dd>When deleting a collection, you also deletes all data related to the collection like translations, relations to other collections and information about which rank methods to use.
<br />For more information, please go to the <a title="See guide" href="%s/help/admin/websearch-admin-guide">WebSearch guide</a> and read the section regarding deleting a collection.</dd>
</dl>
</strong>
</span>
""" % CFG_SITE_URL
col_dict = dict(get_def_name('', "collection"))
if colID != 1 and colID and int(colID) in col_dict:
colID = int(colID)
subtitle = """<a name="4">4. Delete collection '%s'</a> <small>[<a title="See guide" href="%s/help/admin/websearch-admin-guide#3.4">?</a>]</small>""" % (col_dict[colID], CFG_SITE_URL)
res = run_sql("SELECT id_dad,id_son,type,score from collection_collection WHERE id_dad=%s", (colID, ))
res2 = run_sql("SELECT id_dad,id_son,type,score from collection_collection WHERE id_son=%s", (colID, ))
if not res and not res2:
if confirm in ["-1", -1]:
text = """Do you want to delete this collection."""
output += createhiddenform(action="deletecollection#4",
text=text,
colID=colID,
button="Delete",
confirm=0)
elif confirm in ["0", 0]:
text = """Are you sure you want to delete this collection."""
output += createhiddenform(action="deletecollection#4",
text=text,
colID=colID,
button="Confirm",
confirm=1)
elif confirm in ["1", 1]:
result = delete_col(colID)
if not result:
raise Exception
else:
output = """<b><span class="info">Can not delete a collection that is a part of the collection tree, remove collection from the tree and try again.</span></b>"""
else:
subtitle = """4. Delete collection"""
output = """<b><span class="info">Not possible to delete the root collection</span></b>"""
body = [output]
if callback:
return perform_editcollection(colID, ln, "perform_deletecollection", addadminbox(subtitle, body))
else:
return addadminbox(subtitle, body)
def perform_editcollection(colID=1, ln=CFG_SITE_LANG, mtype='', content=''):
"""interface to modify a collection. this method is calling other methods which again is calling this and sending back the output of the method.
if callback, the method will call perform_editcollection, if not, it will just return its output.
colID - id of the collection
mtype - the method that called this method.
content - the output from that method."""
colID = int(colID)
col_dict = dict(get_def_name('', "collection"))
if colID not in col_dict:
return """<b><span class="info">Collection deleted.</span></b>
"""
fin_output = """
<table>
<tr>
<td><b>Menu</b></td>
</tr>
<tr>
<td>0. <small><a href="editcollection?colID=%s&ln=%s">Show all</a></small></td>
<td>1. <small><a href="editcollection?colID=%s&ln=%s&mtype=perform_modifydbquery">Modify collection query</a></small></td>
<td>2. <small><a href="editcollection?colID=%s&ln=%s&mtype=perform_modifyrestricted">Modify access restrictions</a></small></td>
<td>3. <small><a href="editcollection?colID=%s&ln=%s&mtype=perform_modifytranslations">Modify translations</a></small></td>
<td>4. <small><a href="editcollection?colID=%s&ln=%s&mtype=perform_deletecollection">Delete collection</a></small></td>
</tr><tr>
<td>5. <small><a href="editcollection?colID=%s&ln=%s&mtype=perform_showportalboxes">Modify portalboxes</a></small></td>
<td>6. <small><a href="editcollection?colID=%s&ln=%s&mtype=perform_showsearchfields#6">Modify search fields</a></small></td>
<td>7. <small><a href="editcollection?colID=%s&ln=%s&mtype=perform_showsearchoptions#7">Modify search options</a></small></td>
<td>8. <small><a href="editcollection?colID=%s&ln=%s&mtype=perform_showsortoptions#8">Modify sort options</a></small></td>
<td>9. <small><a href="editcollection?colID=%s&ln=%s&mtype=perform_modifyrankmethods#9">Modify rank options</a></small></td>
</tr><tr>
<td>10. <small><a href="editcollection?colID=%s&ln=%s&mtype=perform_showoutputformats#10">Modify output formats</a></small></td>
<td>11. <small><a href="editcollection?colID=%s&ln=%s&mtype=perform_manage_external_collections#11">Configuration of related external collections</a></small></td>
<td>12. <small><a href="editcollection?colID=%s&ln=%s&mtype=perform_showdetailedrecordoptions#12">Detailed record page options</a></small></td>
</tr>
</table>
""" % (colID, ln, colID, ln, colID, ln, colID, ln, colID, ln, colID, ln, colID, ln, colID, ln, colID, ln, colID, ln, colID, ln, colID, ln, colID, ln)
if mtype == "perform_modifydbquery" and content:
fin_output += content
elif mtype == "perform_modifydbquery" or not mtype:
fin_output += perform_modifydbquery(colID, ln, callback='')
if mtype == "perform_modifyrestricted" and content:
fin_output += content
elif mtype == "perform_modifyrestricted" or not mtype:
fin_output += perform_modifyrestricted(colID, ln, callback='')
if mtype == "perform_modifytranslations" and content:
fin_output += content
elif mtype == "perform_modifytranslations" or not mtype:
fin_output += perform_modifytranslations(colID, ln, callback='')
if mtype == "perform_deletecollection" and content:
fin_output += content
elif mtype == "perform_deletecollection" or not mtype:
fin_output += perform_deletecollection(colID, ln, callback='')
if mtype == "perform_showportalboxes" and content:
fin_output += content
elif mtype == "perform_showportalboxes" or not mtype:
fin_output += perform_showportalboxes(colID, ln, callback='')
if mtype == "perform_showsearchfields" and content:
fin_output += content
elif mtype == "perform_showsearchfields" or not mtype:
fin_output += perform_showsearchfields(colID, ln, callback='')
if mtype == "perform_showsearchoptions" and content:
fin_output += content
elif mtype == "perform_showsearchoptions" or not mtype:
fin_output += perform_showsearchoptions(colID, ln, callback='')
if mtype == "perform_showsortoptions" and content:
fin_output += content
elif mtype == "perform_showsortoptions" or not mtype:
fin_output += perform_showsortoptions(colID, ln, callback='')
if mtype == "perform_modifyrankmethods" and content:
fin_output += content
elif mtype == "perform_modifyrankmethods" or not mtype:
fin_output += perform_modifyrankmethods(colID, ln, callback='')
if mtype == "perform_showoutputformats" and content:
fin_output += content
elif mtype == "perform_showoutputformats" or not mtype:
fin_output += perform_showoutputformats(colID, ln, callback='')
if mtype == "perform_manage_external_collections" and content:
fin_output += content
elif mtype == "perform_manage_external_collections" or not mtype:
fin_output += perform_manage_external_collections(colID, ln, callback='')
if mtype == "perform_showdetailedrecordoptions" and content:
fin_output += content
elif mtype == "perform_showdetailedrecordoptions" or not mtype:
fin_output += perform_showdetailedrecordoptions(colID, ln, callback='')
return addadminbox("Overview of edit options for collection '%s'" % col_dict[colID], [fin_output])
def perform_checkwebcollstatus(colID, ln, confirm=0, callback='yes'):
"""Check status of the collection tables with respect to the webcoll cache."""
subtitle = """<a name="11"></a>Webcoll Status [<a href="%s/help/admin/websearch-admin-guide#5">?</a>]""" % CFG_SITE_URL
output = ""
colID = int(colID)
col_dict = dict(get_def_name('', "collection"))
output += """<br /><b>Last updates:</b><br />"""
collection_table_update_time = ""
collection_web_update_time = ""
collection_table_update_time = get_table_update_time('collection')
output += "Collection table last updated: %s<br />" % collection_table_update_time
try:
file = open("%s/collections/last_updated" % CFG_CACHEDIR)
collection_web_update_time = file.readline().strip()
output += "Collection cache last updated: %s<br />" % collection_web_update_time
file.close()
except:
pass
# reformat collection_web_update_time to the format suitable for comparisons
try:
collection_web_update_time = strftime("%Y-%m-%d %H:%M:%S",
time.strptime(collection_web_update_time, "%d %b %Y %H:%M:%S"))
except ValueError as e:
pass
if collection_table_update_time > collection_web_update_time:
output += """<br /><b><span class="info">Warning: The collections have been modified since last time Webcoll was executed, to process the changes, Webcoll must be executed.</span></b><br />"""
header = ['ID', 'Name', 'Time', 'Status', 'Progress']
actions = []
output += """<br /><b>Last BibSched tasks:</b><br />"""
res = run_sql("select id, proc, host, user, runtime, sleeptime, arguments, status, progress from schTASK where proc='webcoll' and runtime< now() ORDER by runtime")
if len(res) > 0:
(id, proc, host, user, runtime, sleeptime, arguments, status, progress) = res[len(res) - 1]
webcoll__update_time = runtime
actions.append([id, proc, runtime, (status !="" and status or ''), (progress !="" and progress or '')])
else:
actions.append(['', 'webcoll', '', '', 'Not executed yet'])
res = run_sql("select id, proc, host, user, runtime, sleeptime, arguments, status, progress from schTASK where proc='bibindex' and runtime< now() ORDER by runtime")
if len(res) > 0:
(id, proc, host, user, runtime, sleeptime, arguments, status, progress) = res[len(res) - 1]
actions.append([id, proc, runtime, (status !="" and status or ''), (progress !="" and progress or '')])
else:
actions.append(['', 'bibindex', '', '', 'Not executed yet'])
output += tupletotable(header=header, tuple=actions)
output += """<br /><b>Next scheduled BibSched run:</b><br />"""
actions = []
res = run_sql("select id, proc, host, user, runtime, sleeptime, arguments, status, progress from schTASK where proc='webcoll' and runtime > now() ORDER by runtime")
webcoll_future = ""
if len(res) > 0:
(id, proc, host, user, runtime, sleeptime, arguments, status, progress) = res[0]
webcoll__update_time = runtime
actions.append([id, proc, runtime, (status !="" and status or ''), (progress !="" and progress or '')])
webcoll_future = "yes"
else:
actions.append(['', 'webcoll', '', '', 'Not scheduled'])
res = run_sql("select id, proc, host, user, runtime, sleeptime, arguments, status, progress from schTASK where proc='bibindex' and runtime > now() ORDER by runtime")
bibindex_future = ""
if len(res) > 0:
(id, proc, host, user, runtime, sleeptime, arguments, status, progress) = res[0]
actions.append([id, proc, runtime, (status !="" and status or ''), (progress !="" and progress or '')])
bibindex_future = "yes"
else:
actions.append(['', 'bibindex', '', '', 'Not scheduled'])
output += tupletotable(header=header, tuple=actions)
if webcoll_future == "":
output += """<br /><b><span class="info">Warning: Webcoll is not scheduled for a future run by bibsched, any updates to the collection will not be processed.</span></b><br />"""
if bibindex_future == "":
output += """<br /><b><span class="info">Warning: Bibindex is not scheduled for a future run by bibsched, any updates to the records will not be processed.</span></b><br />"""
body = [output]
if callback:
return perform_index(colID, ln, "perform_checkwebcollstatus", addadminbox(subtitle, body))
else:
return addadminbox(subtitle, body)
def perform_modifyrestricted(colID, ln, rest='', callback='yes', confirm=-1):
"""modify which apache group is allowed to access the collection.
rest - the groupname"""
subtitle = ''
output = ""
col_dict = dict(get_def_name('', "collection"))
action_id = acc_get_action_id(VIEWRESTRCOLL)
if colID and int(colID) in col_dict:
colID = int(colID)
subtitle = """<a name="2">2. Modify access restrictions for collection '%s'</a> <small>[<a title="See guide" href="%s/help/admin/websearch-admin-guide#3.2">?</a>]</small>""" % (col_dict[colID], CFG_SITE_URL)
output = """<p>Please note that Invenio versions greater than <em>0.92.1</em> manage collection restriction via the standard
<strong><a href="/admin/webaccess/webaccessadmin.py/showactiondetails?id_action=%i">WebAccess Admin Interface</a></strong> (action '%s').</p>
""" % (action_id, VIEWRESTRCOLL)
body = [output]
if callback:
return perform_editcollection(colID, ln, "perform_modifyrestricted", addadminbox(subtitle, body))
else:
return addadminbox(subtitle, body)
def perform_checkcollectionstatus(colID, ln, confirm=0, callback='yes'):
"""Check the configuration of the collections."""
from invenio.legacy.search_engine import collection_restricted_p, restricted_collection_cache
subtitle = """<a name="11"></a>Collection Status [<a href="%s/help/admin/websearch-admin-guide#6">?</a>]""" % CFG_SITE_URL
output = ""
colID = int(colID)
col_dict = dict(get_def_name('', "collection"))
collections = run_sql("SELECT id, name, dbquery, nbrecs FROM collection "
"ORDER BY id")
header = ['ID', 'Name','Query', 'Subcollections', 'Restricted', 'Hosted',
'I18N', 'Status', 'Number of records']
rnk_list = get_def_name('', "rnkMETHOD")
actions = []
restricted_collection_cache.recreate_cache_if_needed()
for (id, name, dbquery, nbrecs) in collections:
reg_sons = col_has_son(id, 'r')
vir_sons = col_has_son(id, 'v')
status = ""
hosted = ""
if str(dbquery).startswith("hostedcollection:"): hosted = """<b><span class="info">Yes</span></b>"""
else: hosted = """<b><span class="info">No</span></b>"""
langs = run_sql("SELECT ln from collectionname where id_collection=%s", (id, ))
i8n = ""
if len(langs) > 0:
for lang in langs:
i8n += "%s, " % lang
else:
i8n = """<b><span class="info">None</span></b>"""
if reg_sons and dbquery:
status = """<b><span class="warning">1:Conflict</span></b>"""
elif not dbquery and not reg_sons:
status = """<b><span class="warning">2:Empty</span></b>"""
if (reg_sons or vir_sons):
subs = """<b><span class="info">Yes</span></b>"""
else:
subs = """<b><span class="info">No</span></b>"""
if dbquery is None:
dbquery = """<b><span class="info">No</span></b>"""
restricted = collection_restricted_p(name, recreate_cache_if_needed=False)
if restricted:
restricted = """<b><span class="warning">Yes</span></b>"""
if status:
status += """<b><span class="warning">,3:Restricted</span></b>"""
else:
status += """<b><span class="warning">3:Restricted</span></b>"""
else:
restricted = """<b><span class="info">No</span></b>"""
if status == "":
status = """<b><span class="info">OK</span></b>"""
actions.append([id, """<a href="%s/admin/websearch/websearchadmin.py/editcollection?colID=%s&ln=%s">%s</a>""" % (CFG_SITE_URL, id, ln, name), dbquery, subs, restricted, hosted, i8n, status, nbrecs])
output += tupletotable(header=header, tuple=actions)
body = [output]
return addadminbox(subtitle, body)
if callback:
return perform_index(colID, ln, "perform_checkcollectionstatus", addadminbox(subtitle, body))
else:
return addadminbox(subtitle, body)
def perform_checkexternalcollections(colID, ln, icl=None, update="", confirm=0, callback='yes'):
"""Check the external collections for inconsistencies."""
subtitle = """<a name="7"></a>Check external collections [<a href="%s/help/admin/websearch-admin-guide#7">?</a>]""" % CFG_SITE_URL
output = ""
colID = int(colID)
if icl:
if update == "add":
# icl : the "inconsistent list" comes as a string, it has to be converted back into a list
icl = eval(icl)
#icl = icl[1:-1].split(',')
for collection in icl:
#collection = str(collection[1:-1])
query_select = "SELECT name FROM externalcollection WHERE name like '%(name)s';" % {'name': collection}
results_select = run_sql(query_select)
if not results_select:
query_insert = "INSERT INTO externalcollection (name) VALUES ('%(name)s');" % {'name': collection}
run_sql(query_insert)
output += """<br /><span class=info>New collection \"%s\" has been added to the database table \"externalcollection\".</span><br />""" % (collection)
else:
output += """<br /><span class=info>Collection \"%s\" has already been added to the database table \"externalcollection\" or was already there.</span><br />""" % (collection)
elif update == "del":
# icl : the "inconsistent list" comes as a string, it has to be converted back into a list
icl = eval(icl)
#icl = icl[1:-1].split(',')
for collection in icl:
#collection = str(collection[1:-1])
query_select = "SELECT id FROM externalcollection WHERE name like '%(name)s';" % {'name': collection}
results_select = run_sql(query_select)
if results_select:
query_delete = "DELETE FROM externalcollection WHERE id like '%(id)s';" % {'id': results_select[0][0]}
query_delete_states = "DELETE FROM collection_externalcollection WHERE id_externalcollection like '%(id)s';" % {'id': results_select[0][0]}
run_sql(query_delete)
run_sql(query_delete_states)
output += """<br /><span class=info>Collection \"%s\" has been deleted from the database table \"externalcollection\".</span><br />""" % (collection)
else:
output += """<br /><span class=info>Collection \"%s\" has already been delete from the database table \"externalcollection\" or was never there.</span><br />""" % (collection)
external_collections_file = []
external_collections_db = []
for coll in external_collections_dictionary.values():
external_collections_file.append(coll.name)
external_collections_file.sort()
query = """SELECT name from externalcollection"""
results = run_sql(query)
for result in results:
external_collections_db.append(result[0])
external_collections_db.sort()
number_file = len(external_collections_file)
number_db = len(external_collections_db)
if external_collections_file == external_collections_db:
output += """<br /><span class="info">External collections are consistent.</span><br /><br />
- database table \"externalcollection\" has %(number_db)s collections<br />
- configuration file \"websearch_external_collections_config.py\" has %(number_file)s collections""" % {
"number_db" : number_db,
"number_file" : number_file}
elif len(external_collections_file) > len(external_collections_db):
external_collections_diff = list(set(external_collections_file) - set(external_collections_db))
external_collections_db.extend(external_collections_diff)
external_collections_db.sort()
if external_collections_file == external_collections_db:
output += """<br /><span class="warning">There is an inconsistency:</span><br /><br />
- database table \"externalcollection\" has %(number_db)s collections
(<span class="warning">missing: %(diff)s</span>)<br />
- configuration file \"websearch_external_collections_config.py\" has %(number_file)s collections
<br /><br /><a href="%(site_url)s/admin/websearch/websearchadmin.py/checkexternalcollections?colID=%(colID)s&icl=%(diff)s&update=add&ln=%(ln)s">
Click here</a> to update your database adding the missing collections. If the problem persists please check your configuration manually.""" % {
"number_db" : number_db,
"number_file" : number_file,
"diff" : external_collections_diff,
"site_url" : CFG_SITE_URL,
"colID" : colID,
"ln" : ln}
else:
output += """<br /><span class="warning">There is an inconsistency:</span><br /><br />
- database table \"externalcollection\" has %(number_db)s collections<br />
- configuration file \"websearch_external_collections_config.py\" has %(number_file)s collections
<br /><br /><span class="warning">The external collections do not match.</span>
<br />To fix the problem please check your configuration manually.""" % {
"number_db" : number_db,
"number_file" : number_file}
elif len(external_collections_file) < len(external_collections_db):
external_collections_diff = list(set(external_collections_db) - set(external_collections_file))
external_collections_file.extend(external_collections_diff)
external_collections_file.sort()
if external_collections_file == external_collections_db:
output += """<br /><span class="warning">There is an inconsistency:</span><br /><br />
- database table \"externalcollection\" has %(number_db)s collections
(<span class="warning">extra: %(diff)s</span>)<br />
- configuration file \"websearch_external_collections_config.py\" has %(number_file)s collections
<br /><br /><a href="%(site_url)s/admin/websearch/websearchadmin.py/checkexternalcollections?colID=%(colID)s&icl=%(diff)s&update=del&ln=%(ln)s">
Click here</a> to force remove the extra collections from your database (warning: use with caution!). If the problem persists please check your configuration manually.""" % {
"number_db" : number_db,
"number_file" : number_file,
"diff" : external_collections_diff,
"site_url" : CFG_SITE_URL,
"colID" : colID,
"ln" : ln}
else:
output += """<br /><span class="warning">There is an inconsistency:</span><br /><br />
- database table \"externalcollection\" has %(number_db)s collections<br />
- configuration file \"websearch_external_collections_config.py\" has %(number_file)s collections
<br /><br /><span class="warning">The external collections do not match.</span>
<br />To fix the problem please check your configuration manually.""" % {
"number_db" : number_db,
"number_file" : number_file}
else:
output += """<br /><span class="warning">There is an inconsistency:</span><br /><br />
- database table \"externalcollection\" has %(number_db)s collections<br />
- configuration file \"websearch_external_collections_config.py\" has %(number_file)s collections
<br /><br /><span class="warning">The number of external collections is the same but the collections do not match.</span>
<br />To fix the problem please check your configuration manually.""" % {
"number_db" : number_db,
"number_file" : number_file}
body = [output]
return addadminbox(subtitle, body)
if callback:
return perform_index(colID, ln, "perform_checkexternalcollections", addadminbox(subtitle, body))
else:
return addadminbox(subtitle, body)
def col_has_son(colID, rtype='r'):
"""Return True if the collection has at least one son."""
return run_sql("SELECT id_son FROM collection_collection WHERE id_dad=%s and type=%s LIMIT 1", (colID, rtype)) != ()
def get_col_tree(colID, rtype=''):
"""Returns a presentation of the tree as a list. TODO: Add loop detection
colID - startpoint for the tree
rtype - get regular or virtual part of the tree"""
try:
colID = int(colID)
stack = [colID]
ssize = 0
tree = [(colID, 0, 0, colID, 'r')]
while len(stack) > 0:
ccolID = stack.pop()
if ccolID == colID and rtype:
res = run_sql("SELECT id_son, score, type FROM collection_collection WHERE id_dad=%s AND type=%s ORDER BY score DESC,id_son", (ccolID, rtype))
else:
res = run_sql("SELECT id_son, score, type FROM collection_collection WHERE id_dad=%s ORDER BY score DESC,id_son", (ccolID, ))
ssize += 1
ntree = []
for i in range(0, len(res)):
id_son = res[i][0]
score = res[i][1]
rtype = res[i][2]
stack.append(id_son)
if i == (len(res) - 1):
up = 0
else:
up = 1
if i == 0:
down = 0
else:
down = 1
ntree.insert(0, (id_son, up, down, ccolID, rtype))
tree = tree[0:ssize] + ntree + tree[ssize:len(tree)]
return tree
except StandardError as e:
register_exception()
return ()
def add_col_dad_son(add_dad, add_son, rtype):
"""Add a son to a collection (dad)
add_dad - add to this collection id
add_son - add this collection id
rtype - either regular or virtual"""
try:
res = run_sql("SELECT score FROM collection_collection WHERE id_dad=%s ORDER BY score ASC", (add_dad, ))
highscore = 0
for score in res:
if int(score[0]) > highscore:
highscore = int(score[0])
highscore += 1
res = run_sql("INSERT INTO collection_collection(id_dad,id_son,score,type) values(%s,%s,%s,%s)", (add_dad, add_son, highscore, rtype))
return (1, highscore)
except StandardError as e:
register_exception()
return (0, e)
def compare_on_val(first, second):
"""Compare the two values"""
return cmp(first[1], second[1])
def get_col_fld(colID=-1, type = '', id_field=''):
"""Returns either all portalboxes associated with a collection, or based on either colID or language or both.
colID - collection id
ln - language id"""
sql = "SELECT id_field,id_fieldvalue,type,score,score_fieldvalue FROM collection_field_fieldvalue, field WHERE id_field=field.id"
params = []
if colID > -1:
sql += " AND id_collection=%s"
params.append(colID)
if id_field:
sql += " AND id_field=%s"
params.append(id_field)
if type:
sql += " AND type=%s"
params.append(type)
sql += " ORDER BY type, score desc, score_fieldvalue desc"
res = run_sql(sql, tuple(params))
return res
def get_col_pbx(colID=-1, ln='', position = ''):
"""Returns either all portalboxes associated with a collection, or based on either colID or language or both.
colID - collection id
ln - language id"""
sql = "SELECT id_portalbox, id_collection, ln, score, position, title, body FROM collection_portalbox, portalbox WHERE id_portalbox = portalbox.id"
params = []
if colID > -1:
sql += " AND id_collection=%s"
params.append(colID)
if ln:
sql += " AND ln=%s"
params.append(ln)
if position:
sql += " AND position=%s"
params.append(position)
sql += " ORDER BY position, ln, score desc"
res = run_sql(sql, tuple(params))
return res
def get_col_fmt(colID=-1):
"""Returns all formats currently associated with a collection, or for one specific collection
colID - the id of the collection"""
if colID not in [-1, "-1"]:
res = run_sql("SELECT id_format, id_collection, code, score FROM collection_format, format WHERE id_format = format.id AND id_collection=%s ORDER BY score desc", (colID, ))
else:
res = run_sql("SELECT id_format, id_collection, code, score FROM collection_format, format WHERE id_format = format.id ORDER BY score desc")
return res
def get_col_rnk(colID, ln):
""" Returns a list of the rank methods the given collection is attached to
colID - id from collection"""
try:
res1 = dict(run_sql("SELECT id_rnkMETHOD, '' FROM collection_rnkMETHOD WHERE id_collection=%s", (colID, )))
res2 = get_def_name('', "rnkMETHOD")
result = filter(lambda x: x[0] in res1, res2)
return result
except StandardError as e:
return ()
def get_pbx():
"""Returns all portalboxes"""
res = run_sql("SELECT id, title, body FROM portalbox ORDER by title,body")
return res
def get_fld_value(fldvID = ''):
"""Returns fieldvalue"""
sql = "SELECT id, name, value FROM fieldvalue"
params = []
if fldvID:
sql += " WHERE id=%s"
params.append(fldvID)
sql += " ORDER BY name"
res = run_sql(sql, tuple(params))
return res
def get_pbx_pos():
"""Returns a list of all the positions for a portalbox"""
position = {}
position["rt"] = "Right Top"
position["lt"] = "Left Top"
position["te"] = "Title Epilog"
position["tp"] = "Title Prolog"
position["ne"] = "Narrow by coll epilog"
position["np"] = "Narrow by coll prolog"
return position
def get_sort_nametypes():
"""Return a list of the various translationnames for the fields"""
type = {}
type['soo'] = 'Sort options'
type['seo'] = 'Search options'
type['sew'] = 'Search within'
return type
def get_fmt_nametypes():
"""Return a list of the various translationnames for the output formats"""
type = []
type.append(('ln', 'Long name'))
return type
def get_fld_nametypes():
"""Return a list of the various translationnames for the fields"""
type = []
type.append(('ln', 'Long name'))
return type
def get_col_nametypes():
"""Return a list of the various translationnames for the collections"""
type = []
type.append(('ln', 'Collection name'))
return type
def find_last(tree, start_son):
"""Find the previous collection in the tree with the same father as start_son"""
id_dad = tree[start_son][3]
while start_son > 0:
start_son -= 1
if tree[start_son][3] == id_dad:
return start_son
def find_next(tree, start_son):
"""Find the next collection in the tree with the same father as start_son"""
id_dad = tree[start_son][3]
while start_son < len(tree):
start_son += 1
if tree[start_son][3] == id_dad:
return start_son
def remove_col_subcol(id_son, id_dad, type):
"""Remove a collection as a son of another collection in the tree, if collection isn't used elsewhere in the tree, remove all registered sons of the id_son.
id_son - collection id of son to remove
id_dad - the id of the dad"""
try:
if id_son != id_dad:
tree = get_col_tree(id_son)
run_sql("DELETE FROM collection_collection WHERE id_son=%s and id_dad=%s", (id_son, id_dad))
else:
tree = get_col_tree(id_son, type)
run_sql("DELETE FROM collection_collection WHERE id_son=%s and id_dad=%s and type=%s", (id_son, id_dad, type))
if not run_sql("SELECT id_dad,id_son,type,score from collection_collection WHERE id_son=%s and type=%s", (id_son, type)):
for (id, up, down, dad, rtype) in tree:
run_sql("DELETE FROM collection_collection WHERE id_son=%s and id_dad=%s", (id, dad))
return (1, "")
except StandardError as e:
return (0, e)
def check_col(add_dad, add_son):
"""Check if the collection can be placed as a son of the dad without causing loops.
add_dad - collection id
add_son - collection id"""
try:
stack = [add_dad]
res = run_sql("SELECT id_dad FROM collection_collection WHERE id_dad=%s AND id_son=%s", (add_dad, add_son))
if res:
raise StandardError
while len(stack) > 0:
colID = stack.pop()
res = run_sql("SELECT id_dad FROM collection_collection WHERE id_son=%s", (colID, ))
for id in res:
if int(id[0]) == int(add_son):
# raise StandardError # this was the original but it didnt work
return(0)
else:
stack.append(id[0])
return (1, "")
except StandardError as e:
return (0, e)
def attach_rnk_col(colID, rnkID):
"""attach rank method to collection
rnkID - id from rnkMETHOD table
colID - id of collection, as in collection table """
try:
res = run_sql("INSERT INTO collection_rnkMETHOD(id_collection, id_rnkMETHOD) values (%s,%s)", (colID, rnkID))
return (1, "")
except StandardError as e:
register_exception()
return (0, e)
def detach_rnk_col(colID, rnkID):
"""detach rank method from collection
rnkID - id from rnkMETHOD table
colID - id of collection, as in collection table """
try:
res = run_sql("DELETE FROM collection_rnkMETHOD WHERE id_collection=%s AND id_rnkMETHOD=%s", (colID, rnkID))
return (1, "")
except StandardError as e:
register_exception()
return (0, e)
def switch_col_treescore(col_1, col_2):
try:
res1 = run_sql("SELECT score FROM collection_collection WHERE id_dad=%s and id_son=%s", (col_1[3], col_1[0]))
res2 = run_sql("SELECT score FROM collection_collection WHERE id_dad=%s and id_son=%s", (col_2[3], col_2[0]))
res = run_sql("UPDATE collection_collection SET score=%s WHERE id_dad=%s and id_son=%s", (res2[0][0], col_1[3], col_1[0]))
res = run_sql("UPDATE collection_collection SET score=%s WHERE id_dad=%s and id_son=%s", (res1[0][0], col_2[3], col_2[0]))
return (1, "")
except StandardError as e:
register_exception()
return (0, e)
def move_col_tree(col_from, col_to, move_to_rtype=''):
"""Move a collection from one point in the tree to another. becomes a son of the endpoint.
col_from - move this collection from current point
col_to - and set it as a son of this collection.
move_to_rtype - either virtual or regular collection"""
try:
res = run_sql("SELECT score FROM collection_collection WHERE id_dad=%s ORDER BY score asc", (col_to[0], ))
highscore = 0
for score in res:
if int(score[0]) > highscore:
highscore = int(score[0])
highscore += 1
if not move_to_rtype:
move_to_rtype = col_from[4]
res = run_sql("DELETE FROM collection_collection WHERE id_son=%s and id_dad=%s", (col_from[0], col_from[3]))
res = run_sql("INSERT INTO collection_collection(id_dad,id_son,score,type) values(%s,%s,%s,%s)", (col_to[0], col_from[0], highscore, move_to_rtype))
return (1, "")
except StandardError as e:
register_exception()
return (0, e)
def remove_pbx(colID, pbxID, ln):
"""Removes a portalbox from the collection given.
colID - the collection the format is connected to
pbxID - the portalbox which should be removed from the collection.
ln - the language of the portalbox to be removed"""
try:
res = run_sql("DELETE FROM collection_portalbox WHERE id_collection=%s AND id_portalbox=%s AND ln=%s", (colID, pbxID, ln))
return (1, "")
except StandardError as e:
register_exception()
return (0, e)
def remove_fmt(colID, fmtID):
"""Removes a format from the collection given.
colID - the collection the format is connected to
fmtID - the format which should be removed from the collection."""
try:
res = run_sql("DELETE FROM collection_format WHERE id_collection=%s AND id_format=%s", (colID, fmtID))
return (1, "")
except StandardError as e:
register_exception()
return (0, e)
def remove_fld(colID, fldID, fldvID=''):
"""Removes a field from the collection given.
colID - the collection the format is connected to
fldID - the field which should be removed from the collection."""
try:
sql = "DELETE FROM collection_field_fieldvalue WHERE id_collection=%s AND id_field=%s"
params = [colID, fldID]
if fldvID:
if fldvID != "None":
sql += " AND id_fieldvalue=%s"
params.append(fldvID)
else:
sql += " AND id_fieldvalue is NULL"
res = run_sql(sql, tuple(params))
return (1, "")
except StandardError as e:
register_exception()
return (0, e)
def delete_fldv(fldvID):
"""Deletes all data for the given fieldvalue
fldvID - delete all data in the tables associated with fieldvalue and this id"""
try:
res = run_sql("DELETE FROM collection_field_fieldvalue WHERE id_fieldvalue=%s", (fldvID, ))
res = run_sql("DELETE FROM fieldvalue WHERE id=%s", (fldvID, ))
return (1, "")
except StandardError as e:
register_exception()
return (0, e)
def delete_pbx(pbxID):
"""Deletes all data for the given portalbox
pbxID - delete all data in the tables associated with portalbox and this id """
try:
res = run_sql("DELETE FROM collection_portalbox WHERE id_portalbox=%s", (pbxID, ))
res = run_sql("DELETE FROM portalbox WHERE id=%s", (pbxID, ))
return (1, "")
except StandardError as e:
register_exception()
return (0, e)
def delete_fmt(fmtID):
"""Deletes all data for the given format
fmtID - delete all data in the tables associated with format and this id """
try:
res = run_sql("DELETE FROM format WHERE id=%s", (fmtID, ))
res = run_sql("DELETE FROM collection_format WHERE id_format=%s", (fmtID, ))
res = run_sql("DELETE FROM formatname WHERE id_format=%s", (fmtID, ))
return (1, "")
except StandardError as e:
register_exception()
return (0, e)
def delete_col(colID):
"""Deletes all data for the given collection
colID - delete all data in the tables associated with collection and this id """
try:
res = run_sql("DELETE FROM collection WHERE id=%s", (colID, ))
res = run_sql("DELETE FROM collectionname WHERE id_collection=%s", (colID, ))
res = run_sql("DELETE FROM collection_rnkMETHOD WHERE id_collection=%s", (colID, ))
res = run_sql("DELETE FROM collection_collection WHERE id_dad=%s", (colID, ))
res = run_sql("DELETE FROM collection_collection WHERE id_son=%s", (colID, ))
res = run_sql("DELETE FROM collection_portalbox WHERE id_collection=%s", (colID, ))
res = run_sql("DELETE FROM collection_format WHERE id_collection=%s", (colID, ))
res = run_sql("DELETE FROM collection_field_fieldvalue WHERE id_collection=%s", (colID, ))
return (1, "")
except StandardError as e:
register_exception()
return (0, e)
def add_fmt(code, name, rtype):
"""Add a new output format. Returns the id of the format.
code - the code for the format, max 6 chars.
name - the default name for the default language of the format.
rtype - the default nametype"""
try:
res = run_sql("INSERT INTO format (code, name) values (%s,%s)", (code, name))
fmtID = run_sql("SELECT id FROM format WHERE code=%s", (code,))
res = run_sql("INSERT INTO formatname(id_format, type, ln, value) VALUES (%s,%s,%s,%s)",
(fmtID[0][0], rtype, CFG_SITE_LANG, name))
return (1, fmtID)
except StandardError as e:
register_exception()
return (0, e)
def update_fldv(fldvID, name, value):
"""Modify existing fieldvalue
fldvID - id of fieldvalue to modify
value - the value of the fieldvalue
name - the name of the fieldvalue."""
try:
res = run_sql("UPDATE fieldvalue set name=%s where id=%s", (name, fldvID))
res = run_sql("UPDATE fieldvalue set value=%s where id=%s", (value, fldvID))
return (1, "")
except StandardError as e:
register_exception()
return (0, e)
def add_fldv(name, value):
"""Add a new fieldvalue, returns id of fieldvalue
value - the value of the fieldvalue
name - the name of the fieldvalue."""
try:
res = run_sql("SELECT id FROM fieldvalue WHERE name=%s and value=%s", (name, value))
if not res:
res = run_sql("INSERT INTO fieldvalue (name, value) values (%s,%s)", (name, value))
res = run_sql("SELECT id FROM fieldvalue WHERE name=%s and value=%s", (name, value))
if res:
return (1, res[0][0])
else:
raise StandardError
except StandardError as e:
register_exception()
return (0, e)
def add_pbx(title, body):
try:
res = run_sql("INSERT INTO portalbox (title, body) values (%s,%s)", (title, body))
res = run_sql("SELECT id FROM portalbox WHERE title=%s AND body=%s", (title, body))
if res:
return (1, res[0][0])
else:
raise StandardError
except StandardError as e:
register_exception()
return (0, e)
def add_col(colNAME, dbquery=None):
"""Adds a new collection to collection table
colNAME - the default name for the collection, saved to collection and collectionname
dbquery - query related to the collection"""
# BTW, sometimes '' are passed instead of None, so change them to None
if not dbquery:
dbquery = None
try:
rtype = get_col_nametypes()[0][0]
colID = run_sql("SELECT id FROM collection WHERE id=1")
if colID:
res = run_sql("INSERT INTO collection (name,dbquery) VALUES (%s,%s)",
(colNAME,dbquery))
else:
res = run_sql("INSERT INTO collection (id,name,dbquery) VALUES (1,%s,%s)",
(colNAME,dbquery))
colID = run_sql("SELECT id FROM collection WHERE name=%s", (colNAME,))
res = run_sql("INSERT INTO collectionname(id_collection, type, ln, value) VALUES (%s,%s,%s,%s)",
(colID[0][0], rtype, CFG_SITE_LANG, colNAME))
if colID:
return (1, colID[0][0])
else:
raise StandardError
except StandardError as e:
register_exception()
return (0, e)
def add_col_pbx(colID, pbxID, ln, position, score=''):
"""add a portalbox to the collection.
colID - the id of the collection involved
pbxID - the portalbox to add
ln - which language the portalbox is for
score - decides which portalbox is the most important
position - position on page the portalbox should appear."""
try:
if score:
res = run_sql("INSERT INTO collection_portalbox(id_portalbox, id_collection, ln, score, position) values (%s,%s,'%s',%s,%s)", (pbxID, colID, ln, score, position))
else:
res = run_sql("SELECT score FROM collection_portalbox WHERE id_collection=%s and ln=%s and position=%s ORDER BY score desc, ln, position", (colID, ln, position))
if res:
score = int(res[0][0])
else:
score = 0
res = run_sql("INSERT INTO collection_portalbox(id_portalbox, id_collection, ln, score, position) values (%s,%s,%s,%s,%s)", (pbxID, colID, ln, (score + 1), position))
return (1, "")
except StandardError as e:
register_exception()
return (0, e)
def add_col_fmt(colID, fmtID, score=''):
"""Add a output format to the collection.
colID - the id of the collection involved
fmtID - the id of the format.
score - the score of the format, decides sorting, if not given, place the format on top"""
try:
if score:
res = run_sql("INSERT INTO collection_format(id_format, id_collection, score) values (%s,%s,%s)", (fmtID, colID, score))
else:
res = run_sql("SELECT score FROM collection_format WHERE id_collection=%s ORDER BY score desc", (colID, ))
if res:
score = int(res[0][0])
else:
score = 0
res = run_sql("INSERT INTO collection_format(id_format, id_collection, score) values (%s,%s,%s)", (fmtID, colID, (score + 1)))
return (1, "")
except StandardError as e:
register_exception()
return (0, e)
def add_col_fld(colID, fldID, type, fldvID=''):
"""Add a sort/search/field to the collection.
colID - the id of the collection involved
fldID - the id of the field.
fldvID - the id of the fieldvalue.
type - which type, seo, sew...
score - the score of the format, decides sorting, if not given, place the format on top"""
try:
if fldvID and fldvID not in [-1, "-1"]:
run_sql("DELETE FROM collection_field_fieldvalue WHERE id_collection=%s AND id_field=%s and type=%s and id_fieldvalue is NULL", (colID, fldID, type))
res = run_sql("SELECT score FROM collection_field_fieldvalue WHERE id_collection=%s AND id_field=%s and type=%s ORDER BY score desc", (colID, fldID, type))
if res:
score = int(res[0][0])
res = run_sql("SELECT score_fieldvalue FROM collection_field_fieldvalue WHERE id_collection=%s AND id_field=%s and type=%s ORDER BY score_fieldvalue desc", (colID, fldID, type))
else:
res = run_sql("SELECT score FROM collection_field_fieldvalue WHERE id_collection=%s and type=%s ORDER BY score desc", (colID, type))
if res:
score = int(res[0][0]) + 1
else:
score = 1
res = run_sql("SELECT id_collection,id_field,id_fieldvalue,type,score,score_fieldvalue FROM collection_field_fieldvalue where id_field=%s and id_collection=%s and type=%s and id_fieldvalue=%s", (fldID, colID, type, fldvID))
if not res:
run_sql("UPDATE collection_field_fieldvalue SET score_fieldvalue=score_fieldvalue+1 WHERE id_field=%s AND id_collection=%s and type=%s", (fldID, colID, type))
res = run_sql("INSERT INTO collection_field_fieldvalue(id_field, id_fieldvalue, id_collection, type, score, score_fieldvalue) values (%s,%s,%s,%s,%s,%s)", (fldID, fldvID, colID, type, score, 1))
else:
return (0, (1, "Already exists"))
else:
res = run_sql("SELECT id_collection,id_field,id_fieldvalue,type,score,score_fieldvalue FROM collection_field_fieldvalue WHERE id_collection=%s AND type=%s and id_field=%s and id_fieldvalue is NULL", (colID, type, fldID))
if res:
return (0, (1, "Already exists"))
else:
run_sql("UPDATE collection_field_fieldvalue SET score=score+1")
res = run_sql("INSERT INTO collection_field_fieldvalue(id_field, id_collection, type, score,score_fieldvalue) values (%s,%s,%s,%s, 0)", (fldID, colID, type, 1))
return (1, "")
except StandardError as e:
register_exception()
return (0, e)
def modify_dbquery(colID, dbquery=None):
"""Modify the dbquery of an collection.
colID - the id of the collection involved
dbquery - the new dbquery"""
# BTW, sometimes '' is passed instead of None, so change it to None
if not dbquery:
dbquery = None
try:
res = run_sql("UPDATE collection SET dbquery=%s WHERE id=%s", (dbquery, colID))
return (1, "")
except StandardError as e:
register_exception()
return (0, e)
def modify_pbx(colID, pbxID, sel_ln, score='', position='', title='', body=''):
"""Modify a portalbox
colID - the id of the collection involved
pbxID - the id of the portalbox that should be modified
sel_ln - the language of the portalbox that should be modified
title - the title
body - the content
score - if several portalboxes in one position, who should appear on top.
position - position on page"""
try:
if title:
res = run_sql("UPDATE portalbox SET title=%s WHERE id=%s", (title, pbxID))
if body:
res = run_sql("UPDATE portalbox SET body=%s WHERE id=%s", (body, pbxID))
if score:
res = run_sql("UPDATE collection_portalbox SET score=%s WHERE id_collection=%s and id_portalbox=%s and ln=%s", (score, colID, pbxID, sel_ln))
if position:
res = run_sql("UPDATE collection_portalbox SET position=%s WHERE id_collection=%s and id_portalbox=%s and ln=%s", (position, colID, pbxID, sel_ln))
return (1, "")
except Exception as e:
register_exception()
return (0, e)
def switch_fld_score(colID, id_1, id_2):
"""Switch the scores of id_1 and id_2 in collection_field_fieldvalue
colID - collection the id_1 or id_2 is connected to
id_1/id_2 - id field from tables like format..portalbox...
table - name of the table"""
try:
res1 = run_sql("SELECT score FROM collection_field_fieldvalue WHERE id_collection=%s and id_field=%s", (colID, id_1))
res2 = run_sql("SELECT score FROM collection_field_fieldvalue WHERE id_collection=%s and id_field=%s", (colID, id_2))
if res1[0][0] == res2[0][0]:
return (0, (1, "Cannot rearrange the selected fields, either rearrange by name or use the mySQL client to fix the problem."))
else:
res = run_sql("UPDATE collection_field_fieldvalue SET score=%s WHERE id_collection=%s and id_field=%s", (res2[0][0], colID, id_1))
res = run_sql("UPDATE collection_field_fieldvalue SET score=%s WHERE id_collection=%s and id_field=%s", (res1[0][0], colID, id_2))
return (1, "")
except StandardError as e:
register_exception()
return (0, e)
def switch_fld_value_score(colID, id_1, fldvID_1, fldvID_2):
"""Switch the scores of two field_value
colID - collection the id_1 or id_2 is connected to
id_1/id_2 - id field from tables like format..portalbox...
table - name of the table"""
try:
res1 = run_sql("SELECT score_fieldvalue FROM collection_field_fieldvalue WHERE id_collection=%s and id_field=%s and id_fieldvalue=%s", (colID, id_1, fldvID_1))
res2 = run_sql("SELECT score_fieldvalue FROM collection_field_fieldvalue WHERE id_collection=%s and id_field=%s and id_fieldvalue=%s", (colID, id_1, fldvID_2))
if res1[0][0] == res2[0][0]:
return (0, (1, "Cannot rearrange the selected fields, either rearrange by name or use the mySQL client to fix the problem."))
else:
res = run_sql("UPDATE collection_field_fieldvalue SET score_fieldvalue=%s WHERE id_collection=%s and id_field=%s and id_fieldvalue=%s", (res2[0][0], colID, id_1, fldvID_1))
res = run_sql("UPDATE collection_field_fieldvalue SET score_fieldvalue=%s WHERE id_collection=%s and id_field=%s and id_fieldvalue=%s", (res1[0][0], colID, id_1, fldvID_2))
return (1, "")
except Exception as e:
register_exception()
return (0, e)
def switch_pbx_score(colID, id_1, id_2, sel_ln):
"""Switch the scores of id_1 and id_2 in the table given by the argument.
colID - collection the id_1 or id_2 is connected to
id_1/id_2 - id field from tables like format..portalbox...
table - name of the table"""
try:
res1 = run_sql("SELECT score FROM collection_portalbox WHERE id_collection=%s and id_portalbox=%s and ln=%s", (colID, id_1, sel_ln))
res2 = run_sql("SELECT score FROM collection_portalbox WHERE id_collection=%s and id_portalbox=%s and ln=%s", (colID, id_2, sel_ln))
if res1[0][0] == res2[0][0]:
return (0, (1, "Cannot rearrange the selected fields, either rearrange by name or use the mySQL client to fix the problem."))
res = run_sql("UPDATE collection_portalbox SET score=%s WHERE id_collection=%s and id_portalbox=%s and ln=%s", (res2[0][0], colID, id_1, sel_ln))
res = run_sql("UPDATE collection_portalbox SET score=%s WHERE id_collection=%s and id_portalbox=%s and ln=%s", (res1[0][0], colID, id_2, sel_ln))
return (1, "")
except Exception as e:
register_exception()
return (0, e)
def switch_score(colID, id_1, id_2, table):
"""Switch the scores of id_1 and id_2 in the table given by the argument.
colID - collection the id_1 or id_2 is connected to
id_1/id_2 - id field from tables like format..portalbox...
table - name of the table"""
try:
res1 = run_sql("SELECT score FROM collection_%s WHERE id_collection=%%s and id_%s=%%s" % (table, table), (colID, id_1))
res2 = run_sql("SELECT score FROM collection_%s WHERE id_collection=%%s and id_%s=%%s" % (table, table), (colID, id_2))
if res1[0][0] == res2[0][0]:
return (0, (1, "Cannot rearrange the selected fields, either rearrange by name or use the mySQL client to fix the problem."))
res = run_sql("UPDATE collection_%s SET score=%%s WHERE id_collection=%%s and id_%s=%%s" % (table, table), (res2[0][0], colID, id_1))
res = run_sql("UPDATE collection_%s SET score=%%s WHERE id_collection=%%s and id_%s=%%s" % (table, table), (res1[0][0], colID, id_2))
return (1, "")
except Exception as e:
register_exception()
return (0, e)
def get_detailed_page_tabs(colID=None, recID=None, ln=CFG_SITE_LANG):
"""
Returns the complete list of tabs to be displayed in the
detailed record pages.
Returned structured is a dict with
- key : last component of the url that leads to detailed record tab: http:www.../CFG_SITE_RECORD/74/key
- values: a dictionary with the following keys:
- label: *string* label to be printed as tab (Not localized here)
- visible: *boolean* if False, tab should not be shown
- enabled: *boolean* if True, tab should be disabled
- order: *int* position of the tab in the list of tabs
- ln: language of the tab labels
returns dict
"""
_ = gettext_set_language(ln)
tabs = {'metadata' : {'label': _('Information'), 'visible': False, 'enabled': True, 'order': 1},
'references': {'label': _('References'), 'visible': False, 'enabled': True, 'order': 2},
'citations' : {'label': _('Citations'), 'visible': False, 'enabled': True, 'order': 3},
'keywords' : {'label': _('Keywords'), 'visible': False, 'enabled': True, 'order': 4},
'comments' : {'label': _('Comments'), 'visible': False, 'enabled': True, 'order': 5},
'reviews' : {'label': _('Reviews'), 'visible': False, 'enabled': True, 'order': 6},
'usage' : {'label': _('Usage statistics'), 'visible': False, 'enabled': True, 'order': 7},
'files' : {'label': _('Files'), 'visible': False, 'enabled': True, 'order': 8},
'plots' : {'label': _('Plots'), 'visible': False, 'enabled': True, 'order': 9},
'holdings' : {'label': _('Holdings'), 'visible': False, 'enabled': True, 'order': 10},
'linkbacks' : {'label': _('Linkbacks'), 'visible': False, 'enabled': True, 'order': 11},
'hepdata' : {'label': _('HepData'), 'visible': False, 'enabled': True, 'order': 12},
}
res = run_sql("SELECT tabs FROM collectiondetailedrecordpagetabs " + \
"WHERE id_collection=%s", (colID, ))
if len(res) > 0:
tabs_state = res[0][0].split(';')
for tab_state in tabs_state:
if tab_state in tabs:
tabs[tab_state]['visible'] = True;
else:
# no preference set for this collection.
# assume all tabs are displayed
for key in tabs.keys():
tabs[key]['visible'] = True
if not CFG_WEBLINKBACK_TRACKBACK_ENABLED:
tabs['linkbacks']['visible'] = False
if not CFG_WEBCOMMENT_ALLOW_COMMENTS:
tabs['comments']['visible'] = False
tabs['comments']['enabled'] = False
if not CFG_WEBCOMMENT_ALLOW_REVIEWS:
tabs['reviews']['visible'] = False
tabs['reviews']['enabled'] = False
if recID is not None:
# Disable references if no references found
#bfo = BibFormatObject(recID)
#if bfe_references.format_element(bfo, '', '') == '':
# tabs['references']['enabled'] = False
## FIXME: the above was commented out because bfe_references
## may be too slow. And we do not really need this anyway
## because we can disable tabs in WebSearch Admin on a
## collection-by-collection basis. If we need this, then we
## should probably call bfo.fields('999') here that should be
## much faster than calling bfe_references.
# Disable citations if not citations found
#if len(get_cited_by(recID)) == 0:
# tabs['citations']['enabled'] = False
## FIXME: the above was commented out because get_cited_by()
## may be too slow. And we do not really need this anyway
## because we can disable tags in WebSearch Admin on a
## collection-by-collection basis.
# Disable Files tab if no file found except for Plots:
disable_files_tab_p = True
for abibdoc in BibRecDocs(recID).list_bibdocs():
abibdoc_type = abibdoc.get_type()
if abibdoc_type == 'Plot':
continue # ignore attached plots
else:
if CFG_INSPIRE_SITE and not \
abibdoc_type in ('', 'INSPIRE-PUBLIC', 'Supplementary Material'):
# ignore non-empty, non-INSPIRE-PUBLIC, non-suppl doctypes for INSPIRE
continue
# okay, we found at least one non-Plot file:
disable_files_tab_p = False
break
if disable_files_tab_p:
tabs['files']['enabled'] = False
#Disable holdings tab if collection != Books
collection = run_sql("""select name from collection where id=%s""", (colID, ))
if collection[0][0] != 'Books':
tabs['holdings']['enabled'] = False
# Disable Plots tab if no docfile of doctype Plot found
brd = BibRecDocs(recID)
if len(brd.list_bibdocs('Plot')) == 0:
tabs['plots']['enabled'] = False
if CFG_CERN_SITE:
from invenio.legacy.search_engine import get_collection_reclist
if recID in get_collection_reclist("Books & Proceedings"):
tabs['holdings']['visible'] = True
tabs['holdings']['enabled'] = True
# now treating the HEP data -> we have to check if there is HepData
# associated with the record and if so, make the tab visible and enabled
has_hepdata = record_has_hepdata_attached(recID)
tabs['hepdata']['visible'] = has_hepdata
tabs['hepdata']['enabled'] = has_hepdata
tabs[''] = tabs['metadata']
del tabs['metadata']
return tabs
def record_has_hepdata_attached(recID):
"""returns True or False depending if there is HepData attached or not"""
from invenio.legacy.search_engine import search_pattern
return len(search_pattern(p="786__w:%s" % (str(recID)))) > 0
def get_detailed_page_tabs_counts(recID):
"""
Returns the number of citations, references and comments/reviews
that have to be shown on the corresponding tabs in the
detailed record pages
@param recID: record id
@return: dictionary with following keys
'Citations': number of citations to be shown in the "Citations" tab
'References': number of references to be shown in the "References" tab
'Comments': number of comments to be shown in the "Comments" tab
'Reviews': number of reviews to be shown in the "Reviews" tab
"""
num_comments = 0 #num of comments
num_reviews = 0 #num of reviews
tabs_counts = {'Citations' : 0,
'References' : -1,
'Discussions' : 0,
'Comments' : 0,
'Reviews' : 0
}
from invenio.legacy.search_engine import get_field_tags, get_record
if CFG_BIBRANK_SHOW_CITATION_LINKS:
if CFG_INSPIRE_SITE:
from invenio.legacy.search_engine import search_unit
citers_recids = intbitset(get_cited_by(recID))
citeable_recids = search_unit(p='citeable', f='collection')
tabs_counts['Citations'] = len(citers_recids & citeable_recids)
else:
tabs_counts['Citations'] = get_cited_by_count(recID)
if not CFG_CERN_SITE:#FIXME:should be replaced by something like CFG_SHOW_REFERENCES
reftag = ""
reftags = get_field_tags("reference")
if reftags:
reftag = reftags[0]
tmprec = get_record(recID)
if reftag and len(reftag) > 4:
tabs_counts['References'] = len(record_get_field_instances(tmprec, reftag[0:3], reftag[3], reftag[4]))
# obtain number of comments/reviews
from invenio.legacy.webcomment.adminlib import get_nb_reviews, get_nb_comments
if CFG_WEBCOMMENT_ALLOW_COMMENTS and CFG_WEBSEARCH_SHOW_COMMENT_COUNT:
num_comments = get_nb_comments(recID, count_deleted=False)
if CFG_WEBCOMMENT_ALLOW_REVIEWS and CFG_WEBSEARCH_SHOW_REVIEW_COUNT:
num_reviews = get_nb_reviews(recID, count_deleted=False)
if num_comments:
tabs_counts['Comments'] = num_comments
tabs_counts['Discussions'] += num_comments
if num_reviews:
tabs_counts['Reviews'] = num_reviews
tabs_counts['Discussions'] += num_reviews
return tabs_counts
|
egabancho/invenio
|
invenio/legacy/websearch/adminlib.py
|
Python
|
gpl-2.0
| 165,793
|
# -*- coding: utf-8 -*-
import os.path
import django
from django.core.management import call_command
def main(manifest_path):
basedir = os.path.dirname(manifest_path)
with open(manifest_path, 'r') as manifest:
for fixture_name in manifest:
fixture_path = os.path.join(basedir, fixture_name.strip())
call_command('loaddata', fixture_path)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument(
'manifest',
nargs='?',
default='./dumps/drdump.manifest',
)
if hasattr(django, 'setup'):
# django 1.7 +
django.setup()
ns = parser.parse_args()
main(ns.manifest)
|
emencia/dr-dump
|
drdump/load.py
|
Python
|
mit
| 715
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
List Object based on SCALA List
using functional programming principles
"""
from functools import reduce
import numpy
class List():
def __init__(self, lst):
self.lst = lst
def copy(self):
return List(self.lst.copy())
def all(self):
return all(self.lst)
def any(self):
return any(self.lst)
def reverse(self):
reversed = self.lst
reversed.reverse()
return List(reversed)
def map(self, function):
return List(list(map(function, self.lst)))
def filter(self, function):
return List(list(filter(function, self.lst)))
def reduce(self, function):
return reduce(function, self.lst)
def size(self):
return len(self.lst)
def __str__(self):
return str(self.lst)
def __repr__(self):
return str(self.lst)
def sum(self):
return sum(self.lst)
def __contains__(self, item):
return item in self.lst
def __int__(self):
return List(list(map(int, self.lst)))
def __float__(self):
return List(list(map(float, self.lst)))
def float(self):
return List(list(map(float, self.lst)))
def int(self):
return List(list(map(int, self.lst)))
def __bool__(self):
return len(self.lst) == 0
def transpose(self):
return List(list(zip(*self.lst)))
def enumerate(self):
return List(list(enumerate(self.lst)))
def sort(self):
new = self.lst.copy()
new.sort()
return List(new)
def joinstr(self, param=""):
return param.join(map(str, self.lst))
def is_allequal(self):
return all([x == self.lst[0] for x in self.lst])
def array(self):
return numpy.array(self.lst)
|
caiorss/m2py
|
pyhof/List.py
|
Python
|
bsd-3-clause
| 1,820
|
#! /usr/bin/env python
import numpy as np
from numpy.testing import assert_array_equal
import landlab.utils.structured_grid as sgrid
from landlab.grid.nodestatus import NodeStatus
def test_node_x_2d():
(x, _) = sgrid.node_coords((3, 2))
assert_array_equal(x, np.array([0.0, 1.0, 0.0, 1.0, 0.0, 1.0]))
def test_node_x_2d_with_spacing():
(x, _) = sgrid.node_coords((3, 2), (2.0, 10.0))
assert_array_equal(x, np.array([0.0, 10.0, 0.0, 10.0, 0.0, 10.0]))
def test_node_x_2d_with_origin():
(x, _) = sgrid.node_coords((3, 2), (2.0, 10.0), (-1.0, 1.0))
assert_array_equal(x, np.array([1.0, 11.0, 1.0, 11.0, 1.0, 11.0]))
def test_node_y_2d():
(_, y) = sgrid.node_coords((3, 2))
assert_array_equal(y, np.array([0.0, 0.0, 1.0, 1.0, 2.0, 2.0]))
def test_node_y_2d_with_spacing():
(_, y) = sgrid.node_coords((3, 2), (2.0, 10.0))
assert_array_equal(y, np.array([0.0, 0.0, 2.0, 2.0, 4.0, 4.0]))
def test_node_y_2d_with_origin():
(_, y) = sgrid.node_coords((3, 2), (2.0, 10.0), (-1.0, 1.0))
assert_array_equal(y, np.array([-1.0, -1.0, 1.0, 1.0, 3.0, 3.0]))
def test_round_off_error():
(x, y) = sgrid.node_coords(
(135, 127), (5.4563957090392, 5.4563957090392), (0.0, 0.0)
)
assert x.shape == (135 * 127,)
assert y.shape == (135 * 127,)
def test_2d_shape_2_by_3():
cell_nodes = sgrid.node_at_cell((2, 3))
assert_array_equal(cell_nodes, np.array([]))
def test_2d_shape_3_by_3():
cell_nodes = sgrid.node_at_cell((3, 3))
assert_array_equal(cell_nodes, np.array([4]))
def test_shape_4_by_5():
cell_nodes = sgrid.node_at_cell((4, 5))
assert_array_equal(cell_nodes, np.array([6, 7, 8, 11, 12, 13]))
def test_2d_3_by_2_from_links():
(from_indices, _) = sgrid.node_index_at_link_ends((3, 2))
assert_array_equal(from_indices, np.array([0, 1, 2, 3, 0, 2, 4]))
def test_2d_3_by_2_to_links():
(_, to_indices) = sgrid.node_index_at_link_ends((3, 2))
assert_array_equal(to_indices, np.array([2, 3, 4, 5, 1, 3, 5]))
def test_west_links():
links = sgrid.west_links((3, 4))
assert_array_equal(
links, np.array([[-1, 0, 1, 2], [-1, 7, 8, 9], [-1, 14, 15, 16]])
)
links = sgrid.west_links((1, 4))
assert_array_equal(links, np.array([[-1, 0, 1, 2]]))
links = sgrid.west_links((4, 1))
assert_array_equal(links, np.array([[-1], [-1], [-1], [-1]]))
def test_east_links():
links = sgrid.east_links((3, 4))
assert_array_equal(
links, np.array([[0, 1, 2, -1], [7, 8, 9, -1], [14, 15, 16, -1]])
)
links = sgrid.east_links((1, 4))
assert_array_equal(links, np.array([[0, 1, 2, -1]]))
links = sgrid.east_links((4, 1))
assert_array_equal(links, np.array([[-1, -1, -1, -1]]).T)
def test_north_links():
links = sgrid.north_links((3, 4))
assert_array_equal(
links, np.array([[3, 4, 5, 6], [10, 11, 12, 13], [-1, -1, -1, -1]])
)
links = sgrid.north_links((1, 4))
assert_array_equal(links, np.array([[-1, -1, -1, -1]]))
links = sgrid.north_links((4, 1))
assert_array_equal(links, np.array([[0, 1, 2, -1]]).T)
def test_south_links():
links = sgrid.south_links((3, 4))
assert_array_equal(
links, np.array([[-1, -1, -1, -1], [3, 4, 5, 6], [10, 11, 12, 13]])
)
links = sgrid.south_links((1, 4))
assert_array_equal(links, np.array([[-1, -1, -1, -1]]))
links = sgrid.south_links((4, 1))
assert_array_equal(links, np.array([[-1, 0, 1, 2]]).T)
def test_inlinks():
links = sgrid.inlinks((3, 4))
assert_array_equal(
np.array(
[
[-1, -1, -1, -1, 3, 4, 5, 6, 10, 11, 12, 13],
[-1, 0, 1, 2, -1, 7, 8, 9, -1, 14, 15, 16],
]
),
links,
)
def test_outlinks():
links = sgrid.outlinks((3, 4))
assert_array_equal(
np.array(
[
[3, 4, 5, 6, 10, 11, 12, 13, -1, -1, -1, -1],
[0, 1, 2, -1, 7, 8, 9, -1, 14, 15, 16, -1],
]
),
links,
)
def test_cell_count_one_cell():
n_cells = sgrid.cell_count((3, 3))
assert n_cells == 1
def test_no_cells():
n_cells = sgrid.cell_count((2, 3))
assert n_cells == 0
def test_interior_cell_count_one_cell():
n_cells = sgrid.interior_cell_count((3, 3))
assert n_cells == 1
def test_interior_cell_count_no_cells():
n_cells = sgrid.interior_cell_count((2, 3))
assert n_cells == 0
def test_active_cell_count_one_cell():
n_cells = sgrid.active_cell_count((3, 3))
assert n_cells == 1
def test_active_cell_count_no_cells():
n_cells = sgrid.active_cell_count((2, 3))
assert n_cells == 0
def test_interior_nodes_4_by_5():
interiors = sgrid.interior_nodes((4, 5))
assert_array_equal(interiors, np.array([6, 7, 8, 11, 12, 13]))
def test_no_interiors():
interiors = sgrid.interior_nodes((2, 3))
assert_array_equal(interiors, np.array([]))
def test_node_status_4_by_5():
status = sgrid.status_at_node((4, 5))
assert status.dtype == np.int8
assert_array_equal(
status,
np.array(
[[1, 1, 1, 1, 1], [1, 0, 0, 0, 1], [1, 0, 0, 0, 1], [1, 1, 1, 1, 1]]
).flatten(),
)
def test_node_status_no_interiors():
status = sgrid.status_at_node((2, 3))
assert status.dtype == np.int8
assert_array_equal(status, np.array([1, 1, 1, 1, 1, 1]))
"""
*--27-->*--28-->*--29-->*--30-->*
^ ^ ^ ^ ^
10 11 12 13 14
| | | | |
*--23-->*--24-->*--25-->*--26-->*
^ ^ ^ ^ ^
5 6 7 8 9
| | | | |
*--19-->*--20-->*--21-->*--22-->*
^ ^ ^ ^ ^
0 1 2 3 4
| | | | |
*--15-->*--16-->*--17-->*--18-->*
"""
def test_4_by_5():
active_links = sgrid.active_links((4, 5))
assert_array_equal(
active_links,
np.array([1, 2, 3, 6, 7, 8, 11, 12, 13, 19, 20, 21, 22, 23, 24, 25, 26]),
)
assert len(active_links) == sgrid.active_link_count((4, 5))
def test_with_status_at_node():
status = sgrid.status_at_node((4, 5))
status[6] = NodeStatus.CLOSED
active_links = sgrid.active_links((4, 5), node_status_array=status)
assert_array_equal(
active_links, np.array([2, 3, 7, 8, 11, 12, 13, 21, 22, 23, 24, 25, 26])
)
def test_with_link_nodes():
link_nodes = sgrid.node_index_at_link_ends((4, 5))
active_links = sgrid.active_links((4, 5), link_nodes=link_nodes)
assert_array_equal(
active_links,
np.array([1, 2, 3, 6, 7, 8, 11, 12, 13, 19, 20, 21, 22, 23, 24, 25, 26]),
)
assert len(active_links) == sgrid.active_link_count((4, 5))
def test_vertical_active_link_count():
link_count = sgrid.vertical_active_link_count((3, 4))
assert 4 == link_count
link_count = sgrid.vertical_active_link_count((3, 2))
assert 0 == link_count
node_status = np.ones((4, 5), dtype=int)
link_count = sgrid.vertical_active_link_count((4, 5), node_status=node_status)
assert 9 == link_count
link_count = sgrid.vertical_active_link_count((4, 5), node_status=node_status)
node_status[0, 1] = 0
link_count = sgrid.vertical_active_link_count((4, 5), node_status=node_status)
assert 8 == link_count
node_status[2, 1] = 0
link_count = sgrid.vertical_active_link_count((4, 5), node_status=node_status)
assert 6 == link_count
node_status[2, 2] = 0
link_count = sgrid.vertical_active_link_count((4, 5), node_status=node_status)
assert 4 == link_count
node_status[1, 1] = 0
link_count = sgrid.vertical_active_link_count((4, 5), node_status=node_status)
assert 4 == link_count
def test_horizontal_active_link_count():
link_count = sgrid.horizontal_active_link_count((3, 4))
assert 3 == link_count
link_count = sgrid.horizontal_active_link_count((2, 3))
assert 0 == link_count
node_status = np.ones((4, 5), dtype=int)
link_count = sgrid.horizontal_active_link_count((4, 5), node_status=node_status)
assert 8 == link_count
link_count = sgrid.horizontal_active_link_count((4, 5), node_status=node_status)
node_status[0, 1] = 0
link_count = sgrid.horizontal_active_link_count((4, 5), node_status=node_status)
assert 8 == link_count
node_status[2, 1] = 0
link_count = sgrid.horizontal_active_link_count((4, 5), node_status=node_status)
assert 6 == link_count
node_status[2, 2] = 0
link_count = sgrid.horizontal_active_link_count((4, 5), node_status=node_status)
assert 5 == link_count
node_status[1, 1] = 0
link_count = sgrid.horizontal_active_link_count((4, 5), node_status=node_status)
assert 3 == link_count
def test_horizontal_active_link_ids():
links = sgrid.horizontal_active_link_ids((3, 4))
assert_array_equal(links, np.array([[4, 5, 6]]))
links = sgrid.horizontal_active_link_ids((1, 4))
expected = np.array([], ndmin=2, dtype=np.int64)
expected.shape = (0, 3)
assert_array_equal(expected, links)
links = sgrid.horizontal_active_link_ids((4, 1))
expected.shape = (2, 0)
assert_array_equal(expected, links)
node_status = np.ones((4, 5), dtype=int)
links = sgrid.horizontal_active_link_ids((4, 5), node_status=node_status)
assert_array_equal(links, np.array([[9, 10, 11, 12], [13, 14, 15, 16]]))
node_status = np.ones((4, 5), dtype=int)
node_status[1, 1] = 0
links = sgrid.horizontal_active_link_ids((4, 5), node_status=node_status)
assert_array_equal(links, np.array([[-1, -1, 7, 8], [9, 10, 11, 12]]))
node_status[2, 1] = 0
links = sgrid.horizontal_active_link_ids((4, 5), node_status=node_status)
assert_array_equal(links, np.array([[-1, -1, 6, 7], [-1, -1, 8, 9]]))
node_status[0, 0] = 0
links = sgrid.horizontal_active_link_ids((4, 5), node_status=node_status)
assert_array_equal(links, np.array([[-1, -1, 6, 7], [-1, -1, 8, 9]]))
def test_vertical_active_link_ids():
links = sgrid.vertical_active_link_ids((3, 4))
assert_array_equal(links, np.array([[0, 1], [2, 3]]))
links = sgrid.vertical_active_link_ids((1, 4))
expected = np.array([], ndmin=2, dtype=np.int64)
expected.shape = (0, 2)
assert_array_equal(expected, links)
links = sgrid.vertical_active_link_ids((4, 1))
expected.shape = (3, 0)
assert_array_equal(expected, links)
node_status = np.ones((4, 5), dtype=int)
links = sgrid.vertical_active_link_ids((4, 5), node_status=node_status)
assert_array_equal(links, np.array([[0, 1, 2], [3, 4, 5], [6, 7, 8]]))
node_status = np.ones((4, 5), dtype=int)
node_status[1, 1] = 0
links = sgrid.vertical_active_link_ids((4, 5), node_status=node_status)
assert_array_equal(links, np.array([[-1, 0, 1], [-1, 2, 3], [4, 5, 6]]))
node_status[2, 1] = 0
links = sgrid.vertical_active_link_ids((4, 5), node_status=node_status)
assert_array_equal(links, np.array([[-1, 0, 1], [-1, 2, 3], [-1, 4, 5]]))
node_status[0, 0] = 0
links = sgrid.vertical_active_link_ids((4, 5), node_status=node_status)
assert_array_equal(links, np.array([[-1, 0, 1], [-1, 2, 3], [-1, 4, 5]]))
def test_active_west_links():
links = sgrid.active_west_links((3, 4))
assert_array_equal(
links, np.array([[-1, -1, -1, -1], [-1, 4, 5, 6], [-1, -1, -1, -1]])
)
links = sgrid.active_west_links((1, 4))
assert_array_equal(links, np.array([[-1, -1, -1, -1]]))
links = sgrid.active_west_links((4, 1))
assert_array_equal(links, np.array([[-1, -1, -1, -1]]).T)
def test_active_east_links():
links = sgrid.active_east_links((3, 4))
assert_array_equal(
links, np.array([[-1, -1, -1, -1], [4, 5, 6, -1], [-1, -1, -1, -1]])
)
links = sgrid.active_east_links((1, 4))
assert_array_equal(links, np.array([[-1, -1, -1, -1]]))
links = sgrid.active_east_links((4, 1))
assert_array_equal(links, np.array([[-1, -1, -1, -1]]).T)
links = sgrid.horizontal_active_link_ids((4, 5))
assert_array_equal(np.array([[9, 10, 11, 12], [13, 14, 15, 16]]), links)
links = sgrid.active_east_links((4, 5))
assert_array_equal(
np.array(
[
[-1, -1, -1, -1, -1],
[9, 10, 11, 12, -1],
[13, 14, 15, 16, -1],
[-1, -1, -1, -1, -1],
]
),
links,
)
def test_active_north_links():
links = sgrid.active_north_links((3, 4))
assert_array_equal(
links, np.array([[-1, 0, 1, -1], [-1, 2, 3, -1], [-1, -1, -1, -1]])
)
links = sgrid.active_north_links((1, 4))
assert_array_equal(links, np.array([[-1, -1, -1, -1]]))
links = sgrid.active_north_links((4, 1))
assert_array_equal(links, np.array([[-1, -1, -1, -1]]).T)
def test_active_south_links():
links = sgrid.active_south_links((3, 4))
assert_array_equal(
links, np.array([[-1, -1, -1, -1], [-1, 0, 1, -1], [-1, 2, 3, -1]])
)
links = sgrid.active_south_links((1, 4))
assert_array_equal(links, np.array([[-1, -1, -1, -1]]))
links = sgrid.active_south_links((4, 1))
assert_array_equal(links, np.array([[-1, -1, -1, -1]]).T)
def test_active_inlinks():
links = sgrid.active_inlinks((3, 4))
assert_array_equal(
np.array(
[
[-1, -1, -1, -1, -1, 0, 1, -1, -1, 2, 3, -1],
[-1, -1, -1, -1, -1, 4, 5, 6, -1, -1, -1, -1],
]
),
links,
)
def test_active_outlinks():
links = sgrid.active_outlinks((3, 4))
assert_array_equal(
np.array(
[
[-1, 0, 1, -1, -1, 2, 3, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, 4, 5, 6, -1, -1, -1, -1, -1],
]
),
links,
)
def test_active_outlinks_4x5():
links = sgrid.active_outlinks((4, 5))
assert_array_equal(
np.array(
[
[-1, 0, 1, 2, -1, -1, 3, 4, 5, -1, -1, 6, 7, 8, -1, -1, -1, -1, -1, -1],
[
-1,
-1,
-1,
-1,
-1,
9,
10,
11,
12,
-1,
13,
14,
15,
16,
-1,
-1,
-1,
-1,
-1,
-1,
],
]
),
links,
)
def test_active_inlinks_4x5():
links = sgrid.active_inlinks((4, 5))
assert_array_equal(
np.array(
[
[-1, -1, -1, -1, -1, -1, 0, 1, 2, -1, -1, 3, 4, 5, -1, -1, 6, 7, 8, -1],
[
-1,
-1,
-1,
-1,
-1,
-1,
9,
10,
11,
12,
-1,
13,
14,
15,
16,
-1,
-1,
-1,
-1,
-1,
],
]
),
links,
)
def test_face_count():
assert 17 == sgrid.face_count((4, 5))
assert 4 == sgrid.face_count((3, 3))
assert 0 == sgrid.face_count((2, 100))
assert 0 == sgrid.face_count((100, 2))
assert 0 == sgrid.face_count((100, 1))
def test_active_face_count():
assert 17 == sgrid.active_face_count((4, 5))
assert 4 == sgrid.active_face_count((3, 3))
assert 0 == sgrid.active_face_count((2, 100))
assert 0 == sgrid.active_face_count((100, 2))
assert 0 == sgrid.active_face_count((100, 1))
def test_active_faces():
active_faces = sgrid.active_face_index((4, 5))
assert_array_equal(
np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]),
active_faces,
)
def test_link_faces_4_by_5():
link_faces = sgrid.face_at_link((4, 5))
BAD = sgrid.BAD_INDEX_VALUE
assert_array_equal(
link_faces,
np.array(
[
BAD,
0,
1,
2,
BAD,
BAD,
3,
4,
5,
BAD,
BAD,
6,
7,
8,
BAD,
BAD,
BAD,
BAD,
BAD,
9,
10,
11,
12,
13,
14,
15,
16,
BAD,
BAD,
BAD,
BAD,
]
),
)
def test_with_active_links():
active_links = sgrid.active_links((4, 5))
active_links = active_links[:-1]
link_faces = sgrid.face_at_link((4, 5), actives=active_links)
BAD = sgrid.BAD_INDEX_VALUE
assert_array_equal(
link_faces,
np.array(
[
BAD,
0,
1,
2,
BAD,
BAD,
3,
4,
5,
BAD,
BAD,
6,
7,
8,
BAD,
BAD,
BAD,
BAD,
BAD,
9,
10,
11,
12,
13,
14,
15,
BAD,
BAD,
BAD,
BAD,
BAD,
]
),
)
def test_reshape_array_default():
x = np.arange(12.0)
y = sgrid.reshape_array((3, 4), x)
assert y.shape == (3, 4)
assert_array_equal(x, y.flat)
assert y.flags["C_CONTIGUOUS"]
assert y.base is x
def test_copy():
x = np.arange(12.0)
y = sgrid.reshape_array((3, 4), x, copy=True)
assert y.shape == (3, 4)
assert_array_equal(x, y.flat)
assert y.flags["C_CONTIGUOUS"]
assert y.base is None
y[0][0] = 0.0
assert_array_equal(
x, np.array([0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0])
)
def test_flip():
x = np.arange(12.0)
y = sgrid.reshape_array((3, 4), x, flip_vertically=True)
assert y.shape == (3, 4)
assert_array_equal(
y,
np.array([[8.0, 9.0, 10.0, 11.0], [4.0, 5.0, 6.0, 7.0], [0.0, 1.0, 2.0, 3.0]]),
)
assert not y.flags["C_CONTIGUOUS"]
assert y.base is not None
y[0][0] = 0.0
assert_array_equal(
x, np.array([0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 0.0, 9.0, 10.0, 11.0])
)
def test_flip_copy():
x = np.arange(12.0)
y = sgrid.reshape_array((3, 4), x, flip_vertically=True, copy=True)
assert y.shape == (3, 4)
assert_array_equal(
y,
np.array([[8.0, 9.0, 10.0, 11.0], [4.0, 5.0, 6.0, 7.0], [0.0, 1.0, 2.0, 3.0]]),
)
assert y.flags["C_CONTIGUOUS"]
assert y.base is not x
def test_diagonal_array_default():
diags = sgrid.diagonal_node_array((2, 3), out_of_bounds=-1)
assert_array_equal(
diags,
np.array(
[
[4, -1, -1, -1],
[5, 3, -1, -1],
[-1, 4, -1, -1],
[-1, -1, -1, 1],
[-1, -1, 0, 2],
[-1, -1, 1, -1],
]
),
)
assert diags.base is None
assert diags.flags["C_CONTIGUOUS"]
def test_non_contiguous():
diags = sgrid.diagonal_node_array((2, 3), out_of_bounds=-1, contiguous=False)
assert_array_equal(
diags,
np.array(
[
[4, -1, -1, -1],
[5, 3, -1, -1],
[-1, 4, -1, -1],
[-1, -1, -1, 1],
[-1, -1, 0, 2],
[-1, -1, 1, -1],
]
),
)
assert isinstance(diags.base, np.ndarray)
assert not diags.flags["C_CONTIGUOUS"]
def test_boundary_node_mask_no_actives():
diags = sgrid.diagonal_node_array((2, 3), out_of_bounds=-1, boundary_node_mask=-2)
assert_array_equal(diags, -2 * np.ones((6, 4)))
def test_boundary_node_mask():
diags = sgrid.diagonal_node_array((3, 3), out_of_bounds=-1, boundary_node_mask=-2)
assert_array_equal(
diags,
np.array(
[
[-2, -2, -2, -2],
[-2, -2, -2, -2],
[-2, -2, -2, -2],
[-2, -2, -2, -2],
[8, 6, 0, 2],
[-2, -2, -2, -2],
[-2, -2, -2, -2],
[-2, -2, -2, -2],
[-2, -2, -2, -2],
]
),
)
def test_neighbor_array_default():
neighbors = sgrid.neighbor_node_array((2, 3))
BAD = sgrid.BAD_INDEX_VALUE
assert_array_equal(
neighbors,
np.array(
[
[1, 3, BAD, BAD],
[2, 4, 0, BAD],
[BAD, 5, 1, BAD],
[4, BAD, BAD, 0],
[5, BAD, 3, 1],
[BAD, BAD, 4, 2],
]
).T,
)
assert neighbors.flags["C_CONTIGUOUS"]
assert neighbors.base is None
def test_set_out_of_bounds():
neighbors = sgrid.neighbor_node_array((2, 3), inactive=-1)
assert_array_equal(
neighbors,
np.array(
[
[1, 3, -1, -1],
[2, 4, 0, -1],
[-1, 5, 1, -1],
[4, -1, -1, 0],
[5, -1, 3, 1],
[-1, -1, 4, 2],
]
).T,
)
def test_no_inactive():
inlinks = sgrid.setup_active_inlink_matrix((4, 5), return_count=False)
assert_array_equal(
inlinks,
np.array(
[
[-1, -1, -1, -1, -1, -1, 0, 1, 2, -1, -1, 3, 4, 5, -1, -1, 6, 7, 8, -1],
[
-1,
-1,
-1,
-1,
-1,
-1,
9,
10,
11,
12,
-1,
13,
14,
15,
16,
-1,
-1,
-1,
-1,
-1,
],
]
),
)
def test_inactive():
status = np.ones((4, 5))
status[1, 1] = 0
inlinks = sgrid.setup_active_inlink_matrix(
(4, 5), return_count=False, node_status=status
)
assert_array_equal(
inlinks,
np.array(
[
[
-1,
-1,
-1,
-1,
-1,
-1,
-1,
0,
1,
-1,
-1,
-1,
2,
3,
-1,
-1,
4,
5,
6,
-1,
],
[
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
7,
8,
-1,
9,
10,
11,
12,
-1,
-1,
-1,
-1,
-1,
],
]
),
)
def test_out_link_ids_at_nodes():
links_ids = sgrid.outlink_index_at_node((4, 5))
assert_array_equal(
np.array(
[
[
4,
5,
6,
7,
8,
13,
14,
15,
16,
17,
22,
23,
24,
25,
26,
-1,
-1,
-1,
-1,
-1,
],
[
0,
1,
2,
3,
-1,
9,
10,
11,
12,
-1,
18,
19,
20,
21,
-1,
27,
28,
29,
30,
-1,
],
]
),
links_ids,
)
def test_in_link_ids_at_nodes():
links_ids = sgrid.inlink_index_at_node((4, 5))
assert_array_equal(
np.array(
[
[
-1,
-1,
-1,
-1,
-1,
4,
5,
6,
7,
8,
13,
14,
15,
16,
17,
22,
23,
24,
25,
26,
],
[
-1,
0,
1,
2,
3,
-1,
9,
10,
11,
12,
-1,
18,
19,
20,
21,
-1,
27,
28,
29,
30,
],
]
),
links_ids,
)
|
landlab/landlab
|
tests/utils/test_structured_grid.py
|
Python
|
mit
| 26,443
|
def add(a, b):
if b == 0: return a
sum = a ^ b
carry = (a & b) << 1
return add(sum, carry)
print add(100, 23)
|
linyaoli/acm
|
others/intermediate/add_two_numbers_without_operator.py
|
Python
|
gpl-2.0
| 126
|
import datetime
import struct
import logging
import numpy as np
import os
import time
import re
import pytz
import scipy.spatial.distance as distance
from tzlocal import get_localzone
import warnings
def get_filename_meta_data(fn):
"""
This function retrieves the meta information from a filename given the typical formatting in the Time Travel Task.
:param fn: a filename to parse for meta-data
:return: a dictionary containing keys 'subID', 'trial', 'phase', 'inverse' and 'datetime' of types string
with the exception of datetime which is of type datetime
"""
parts = fn.split('_')
dt = datetime.datetime.strptime(parts[4] + '_' + parts[5].split('.')[0], '%Y-%m-%d_%H-%M-%S')
return {"subID": parts[0], "trial": parts[1], "phase": parts[2], "inverse": parts[3], "datetime": dt}
def phase_num_to_str(phase):
"""
This function converts a phase integer into a nameable phase string.
:param phase: an integer which represents the phase type to be converted to a string
:return: 'VR Practice', 'VR Study', 'VR Test', 'VE Practice', 'VE Study', 'VE Test', '2D Practice', '2D Study',
'2D Test', in order, from 0 to 8.
"""
names = ['VR Practice', 'VR Study', 'VR Test', 'VE Practice', 'VE Study', 'VE Test',
'2D Practice', '2D Study', '2D Test']
lookup = phase
# noinspection PyCompatibility
if isinstance(lookup, str):
lookup = int(lookup)
return names[lookup]
def decode_7bit_int_length(fp):
"""
From:
http://stackoverflow.com/questions/1550560/encoding-an-integer-in-7-bit-format-of-c-sharp-binaryreader-readstring
This function takes a file pointer and extracts the appropriate next information which is expected to contain a
.NET 7bit binary datetime encoded value and extracts the length of that datetime value.
:param fp: a file pointer whose next expected element is a 7bit integer length of a binary datetime in .NET
:return: a length value representing the string length of a binary datetime in .NET
"""
string_length = 0
string_length_parsed = False
step = 0
while not string_length_parsed:
part = ord(fp.read(1))
string_length_parsed = ((part >> 7) == 0)
part_cutter = part & 127
to_add = part_cutter << (step * 7)
string_length += to_add
step += 1
return string_length
def datetime_from_dot_net_binary(data):
"""
From http://stackoverflow.com/questions/15919598/serialize-datetime-as-binary
This function converts data from a .NET datetime binary representation to a python datetime object
:param data: some binary data which is expected to convert to a datetime value
:return: a datetime value corresponding to the binary .NET datetime representation from the input data
"""
kind = (data % 2 ** 64) >> 62 # This says about UTC and stuff...
ticks = data & 0x3FFFFFFFFFFFFFFF
seconds = float(ticks) / 10000000.0
tz = pytz.utc
if kind == 0:
tz = get_localzone()
return datetime.datetime(1, 1, 1, tzinfo=tz) + datetime.timedelta(seconds=seconds)
def read_binary_file(path):
"""
This function reads a Time Travel Task binary file in its entirety and converts it into a list of iterations which
can be parsed independently.
:param path: a string absolute path to a Time Travel Task binary file
:return: a list of iterations (dictionaries containing values:
'version' - an integer version number
'datetime' - a python datetime object for this iteration
'time_val' - a time for this iteration
'timescale' - a timescale which the current time is proceding through
'x', 'y', 'z' - x, y, and z spatial coordinates in which the participant resides
'rx', 'ry', 'rz', 'rw' - x, y, z, and w rotation quaternion coordinates in which the participant resides
'keys', 'buttons', 'keylabels', 'buttonlabels' 0 the key states, button states, key labels and button
labels for every key and button which is registered to be logged
'itemsx', 'itemsy', 'itemsz', 'itemsactive', 'itemsclicked', 'itemsevent', 'itemstime' - the item x, y, z
spatial coordinates, the item active state (enabled or disabled in the environment), the item event,
and time
'boundarystate', 'br', 'bg', 'bb' - the boundary state and Red, Green, and Blue color intensities of the
boundary
'inventoryitemnumber'- the item numbers in the inventory this iteration
'activeinventoryitemnumber','activeinventoryeventinder' - the active item number and event number this
iteration)
"""
iterations = []
with open(path, 'rb') as f:
header_length = decode_7bit_int_length(f)
header = f.read(header_length)
split_header = header.split(',')
if split_header[0] != 'version': # Beta version with new version prefix
num_keys = header.count('key')
num_buttons = header.count('button')
num_items = header.count('itemXYZAC')
while f.read(1): # Look ahead for end of file
f.seek(-1, 1) # Go back one to undo the look-ahead
# Extract time_val information
date_time = datetime_from_dot_net_binary(struct.unpack_from('q', f.read(8))[0])
time_val = struct.unpack_from('f', f.read(4))[0]
time_scale = struct.unpack_from('f', f.read(4))[0]
# Extract position information
x = struct.unpack_from('f', f.read(4))[0]
y = struct.unpack_from('f', f.read(4))[0]
z = struct.unpack_from('f', f.read(4))[0]
# Extract rotation information
rx = struct.unpack_from('f', f.read(4))[0]
ry = struct.unpack_from('f', f.read(4))[0]
rz = struct.unpack_from('f', f.read(4))[0]
rw = struct.unpack_from('f', f.read(4))[0]
# Extract key, button, and item information according to expected numbers of each
keys = []
# noinspection PyRedeclaration
for i in range(0, num_keys):
keys.append(struct.unpack_from('?', f.read(1))[0])
buttons = []
# noinspection PyRedeclaration
for i in range(0, num_buttons):
buttons.append(struct.unpack_from('?', f.read(1))[0])
ix = []
iy = []
iz = []
i_active = []
i_clicked = []
# noinspection PyRedeclaration
for i in range(0, num_items):
ix.append(struct.unpack_from('f', f.read(4))[0])
iy.append(struct.unpack_from('f', f.read(4))[0])
iz.append(struct.unpack_from('f', f.read(4))[0])
i_active.append(struct.unpack_from('?', f.read(1))[0])
i_clicked.append(struct.unpack_from('?', f.read(1))[0])
# Extract boundary information
boundary_state = struct.unpack_from('i', f.read(4))[0]
br = struct.unpack_from('f', f.read(4))[0]
bg = struct.unpack_from('f', f.read(4))[0]
bb = struct.unpack_from('f', f.read(4))[0]
# Store all information in simple dictionary and add to list of iterations
iterations.append({"version": 0,
"datetime": date_time, "time_val": time_val, "timescale": time_scale,
"x": x, "y": y, "z": z,
"rx": rx, "ry": ry, "rz": rz, "rw": rw,
"keys": keys, "buttons": buttons,
"itemsx": ix, "itemsy": iy, "itemsz": iz, "itemsactive": i_active,
"itemsclicked": i_clicked,
"boundarystate": boundary_state, "br": br, "bg": bg, "bb": bb})
elif split_header[1] == '2': # Version 2
num_keys = header.count('key')
num_buttons = header.count('button')
num_items = header.count('itemXYZActiveClickedEventTime')
key_labels = []
key_split = header.split('key')
for i in range(1, len(key_split)):
key_labels.append(key_split[i].split('_')[0])
button_labels = []
button_split = header.split('button')
for i in range(1, len(button_split)):
button_labels.append(button_split[i].split('_')[0])
while f.read(1): # Look ahead for end of file
f.seek(-1, 1) # Go back one to undo the look-ahead
# Extract time_val information
date_time = datetime_from_dot_net_binary(struct.unpack_from('q', f.read(8))[0])
time_val = struct.unpack_from('f', f.read(4))[0]
time_scale = struct.unpack_from('f', f.read(4))[0]
# Extract position information
x = struct.unpack_from('f', f.read(4))[0]
y = struct.unpack_from('f', f.read(4))[0]
z = struct.unpack_from('f', f.read(4))[0]
# Extract rotation information
rx = struct.unpack_from('f', f.read(4))[0]
ry = struct.unpack_from('f', f.read(4))[0]
rz = struct.unpack_from('f', f.read(4))[0]
rw = struct.unpack_from('f', f.read(4))[0]
# Extract key, button, and item information according to expected numbers of each
keys = []
# noinspection PyRedeclaration
for i in range(0, num_keys):
keys.append(struct.unpack_from('?', f.read(1))[0])
buttons = []
# noinspection PyRedeclaration
for i in range(0, num_buttons):
buttons.append(struct.unpack_from('?', f.read(1))[0])
ix = []
iy = []
iz = []
i_active = []
i_clicked = []
i_event_type = []
i_event_time = []
# noinspection PyRedeclaration
for i in range(0, num_items):
ix.append(struct.unpack_from('f', f.read(4))[0])
iy.append(struct.unpack_from('f', f.read(4))[0])
iz.append(struct.unpack_from('f', f.read(4))[0])
i_active.append(struct.unpack_from('?', f.read(1))[0])
i_clicked.append(struct.unpack_from('?', f.read(1))[0])
i_event_type.append(struct.unpack_from('i', f.read(4))[0])
i_event_time.append(struct.unpack_from('f', f.read(4))[0])
# Extract boundary information
boundary_state = struct.unpack_from('i', f.read(4))[0]
br = struct.unpack_from('f', f.read(4))[0]
bg = struct.unpack_from('f', f.read(4))[0]
bb = struct.unpack_from('f', f.read(4))[0]
# Extract inventory state
inventory_item_numbers = []
for i in range(0, num_items):
inventory_item_numbers.append(struct.unpack_from('i', f.read(4))[0])
active_inventory_item_number = struct.unpack_from('i', f.read(4))[0]
active_inventory_event_index = struct.unpack_from('i', f.read(4))[0]
# Store all information in simple dictionary and add to list of iterations
iterations.append({"version": 2,
"datetime": date_time, "time_val": time_val, "timescale": time_scale,
"x": x, "y": y, "z": z,
"rx": rx, "ry": ry, "rz": rz, "rw": rw,
"keys": keys, "buttons": buttons,
'keylabels': key_labels, 'buttonlabels': button_labels,
"itemsx": ix, "itemsy": iy, "itemsz": iz, "itemsactive": i_active,
"itemsclicked": i_clicked, 'itemsevent': i_event_type, 'itemstime': i_event_time,
"boundarystate": boundary_state, "br": br, "bg": bg, "bb": bb,
'inventoryitemnumbers': inventory_item_numbers,
'activeinventoryitemnumber': active_inventory_item_number,
'activeinventoryeventindex': active_inventory_event_index})
return iterations
def find_last(lst, sought_elt):
"""
This function finds the last index that an element of a particular value appears in a list.
:param lst: the list to search
:param sought_elt: the element for which we search
:return: the index at which the last element matching sought_elt resides
"""
for r_idx, elt in enumerate(reversed(lst)):
if elt == sought_elt:
return len(lst) - 1 - r_idx
def parse_test_items(iterations, cols, item_number_label, event_state_labels):
"""
This function takes in a set of iterations parsed from read_binary_file, a set of color values, a set of item
labels, and a set of event labels and produces the items, reconstruction items and the order description for
each item in the reconstruction.
:param iterations: the iterations output from read_binary_file
:param cols: the colors of each item in canonical ordering
:param item_number_label: the labels for each item in canonical ordering
:param event_state_labels: the event labels for each item in canonical ordering
:return: a dictionary containing:
"direction" - a numeric value representing the up, down or stationary state
"pos" - the x, z, and t values representing the 2D position and time coordinates
"color" - the color value representing the indexed color value from cols associated with the item
"""
descrambler = [1, 2, 4, 7, 0, 3, 5, 6, 8, 9]
descrambler_type = [2, 2, 2, 2, 1, 1, 1, 1, 0, 0]
reconstruction_items = [None] * len(item_number_label)
if iterations[0]['version'] == 0:
# pos = np.empty((len(items), 3))
# size = np.empty((len(items)))
# color = np.empty((len(items), 4))
# end_time = 60 # End time for convenience
################################################################################################################
# BEGIN DESCRAMBLER
################################################################################################################
# start_state = iterations[0] # Store first iteration
# end_state = iterations[len(iterations) - 1] # Store last iteration
# prev_active = start_state['itemsactive'] # Create activity array
# Event state tracker variables (this works great)
number_placed = 0
event_state = 0
prev_event_btn_state = False
prev_drop_button_state = False
prev_inventory_button_state = False
inventory_index = 0
inventory = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
numbers_placed = [-1] * 10
flag_for_same_place_override = False
flag_for_same_place_location = None
for iterations_idx, i in enumerate(iterations):
if iterations_idx == len(iterations) - 1:
break
# Get the event state button (keys, 4, buttons, 4)
event_button_state = i['buttons'][4]
if event_button_state and not prev_event_btn_state: # On rising edge
event_state = (event_state + 1) % 3 # Set the event state appropriately
prev_event_btn_state = event_button_state # Update prev state for edge detection
# Get the item drop button state (keys 1, buttons 1)
drop_button_state = i['buttons'][1]
inventory_button_state = i['buttons'][3]
# Find the value of the index of this item in the descrambler, this is the correct item value (I think)
# Coordinate comparison
current_item_coords = []
for xx, zz in zip(i['itemsx'], i['itemsz']):
current_item_coords.append((xx, zz))
next_iteration = iterations[iterations_idx + 1]
next_item_coords = []
for xx, zz in zip(next_iteration['itemsx'], next_iteration['itemsz']):
next_item_coords.append((xx, zz))
# Check for changed state (at this point, active and button press simultaneously)
if (current_item_coords != next_item_coords) and (not iterations_idx == 0):
logging.debug(','.join(map(str, current_item_coords)))
logging.debug(','.join(map(str, next_item_coords)))
present_checklist = [False] * len(current_item_coords)
missing_item_index = -1
for idxx, (first, second) in enumerate(zip(current_item_coords, next_item_coords)):
if first in next_item_coords:
present_checklist[idxx] = True
if next_item_coords.count(first) > 1 or current_item_coords.count(second) > 1:
logging.warning('Items were found to be placed on top of each other. ' +
'This will likely make the item identities during reconstruction inaccurate.')
logging.debug('CASE: Multiple items in same location, ' +
'consulting Active list for differentiation.')
active_list = i['itemsactive']
next_active_list = iterations[iterations_idx + 1]['itemsactive']
logging.debug(','.join(map(str, active_list)))
logging.debug(','.join(map(str, next_active_list)))
logging.debug(event_state)
logging.debug(','.join(map(str, descrambler_type)))
for idxxx, (a, b) in enumerate(zip(current_item_coords, next_item_coords)):
if not a == b:
present_checklist[idxxx] = False
logging.debug('{0} found as move index'.format(idxxx))
break
for idxx, check in enumerate(present_checklist):
if not check:
if current_item_coords.count(current_item_coords[idxx]) > 1:
flag_for_same_place_override = True
flag_for_same_place_location = current_item_coords[idxx]
missing_item_index = idxx
logging.debug('{0} is missing item index'.format(missing_item_index))
if drop_button_state and not prev_drop_button_state: # Rising edge, item dropped
logging.info('item dropped/picked up.')
if current_item_coords == next_item_coords: # item picked up
# noinspection PyTypeChecker
inventory.insert(inventory_index, '?')
logging.info(
'item picked up to inventory index {0}: inventory state: {1}'.format(inventory_index,
inventory))
else:
if inventory:
inventory.pop(inventory_index)
numbers_placed[descrambler[missing_item_index]] = number_placed
number_placed += 1
logging.info(
'item dropped from inventory index {0}: inventory state: {1}'.format(inventory_index,
inventory))
if inventory_button_state and not prev_inventory_button_state:
inventory_index = (inventory_index + 1) % len(inventory)
# Now we know which item (according to the previous descrambler state) had its location change
# and which index it WAS in. We also know the event type it was placed as so we can compute its NEW
# position in the list.
# missing_item_index is the index of the placed item (according to current descrambler)
# event_state is the type of event which was placed
# Edge cases include if an item is placed precisely back where it was and if multiple items are
# placed in the same place... unfortunately this happens in 44.6% of test files...
# MORE NOTES:
# I think it is possible to completely descramble because if an item is inserted into a
# list of consecutive
# identical coordinates, the items form a stack (where the latest placed item is picked up first)...
# when picked up, we now know it's relative index in the inventory (and we can be relatively sure it's
# picked up because it should become inactive). Since it must be picked up to be placed correctly,
# if we track its position in the inventory until it is again placed, we can disentangle it from its
# identical partners. To track it in inventory, it is necessary to track inventory clicks as well as
# the number of items in the inventory so a proper modulus can be established.
# Ugh.
# DESCRAMBLER LOGIC
if flag_for_same_place_override:
tmp_max = -1
tmp_max_index = -1
for x, y in enumerate(numbers_placed):
if current_item_coords[descrambler[x]] == flag_for_same_place_location:
if y > tmp_max:
tmp_max = y
tmp_max_index = x
override_index = tmp_max_index
# override_index = [x for x, y in enumerate(numbers_placed) if y == max(numbers_placed)][0]
override_index_descrambled = [x for x, y in enumerate(descrambler) if y == override_index][0]
logging.info('same place override, most recent placed descramble index is {0} compared to '
'original missing index of {1}'.format(override_index_descrambled,
missing_item_index))
missing_item_index = override_index_descrambled
flag_for_same_place_override = False
# If the current event state is 0 (stationary), move the current item to the end of the list
insertion_index = -1
val = descrambler[missing_item_index]
del descrambler[missing_item_index]
del descrambler_type[missing_item_index]
if event_state == 0:
descrambler.append(val)
descrambler_type.append(event_state)
insertion_index = len(descrambler) - 1
# If the current event state is 1 (up/fly), move the current item to the last fly position
elif event_state == 1 or event_state == 2:
last = find_last(descrambler_type, event_state)
if last is None and event_state == 1:
last = find_last(descrambler_type, 2)
elif last is None and event_state == 2:
last = 0
logging.debug('inserting into {0}'.format((last + 1)))
descrambler.insert(last + 1, val)
descrambler_type.insert(last + 1, event_state)
insertion_index = last + 1
# Generate projected values (time is the important one, the space ones are replaced at the end
# according to the descrambler order)
placed_x = next_item_coords[insertion_index][0]
placed_z = next_item_coords[insertion_index][1]
placed_t = i['time_val']
# If the event is stationary, the time of placement doesn't matter, ignore it and set to 0
if event_state == 0:
placed_t = 0
# Add the item to the list using the correct IDX to look up the color
reconstruction_items[val] = {'direction': event_state,
'pos': (placed_x, placed_z, placed_t),
'color': cols[val]}
# Log debug information
logging.debug(','.join(map(str, descrambler)))
logging.debug("{0}, {1}, ({2}, {3}, {4})".format(
item_number_label[val].ljust(11, ' '),
event_state_labels[event_state], placed_x, placed_z, placed_t))
prev_drop_button_state = drop_button_state
prev_inventory_button_state = inventory_button_state
# Replace all of the position values with the descrambled position values at the final time point.
# Keep the time point the same as it should've been corrected earlier
# for idx in range(0, len(reconstruction_items)):
# reconstruction_items[idx]['pos'] = (end_state['itemsx'][descrambler[idx]],
# end_state['itemsz'][descrambler[idx]],
# reconstruction_items[idx]['pos'][2])
# reconstruction_items[idx]['color'] = cols[idx]
############################################################################################################
# END DESCRAMBLER
############################################################################################################
order = [[] for _ in range(0, len(reconstruction_items))]
if iterations[0]['version'] == 2:
end_state = iterations[len(iterations) - 1]
for idx, (x, y, z, active, clicked, event, time_val) in \
enumerate(zip(end_state['itemsx'], end_state['itemsy'], end_state['itemsz'], end_state['itemsactive'],
end_state['itemsclicked'], end_state['itemsevent'], end_state['itemstime'])):
reconstruction_items[idx] = {'direction': event, 'pos': (x, z, time_val), 'color': cols[idx]}
order_num = 0
for iter_idx, i in enumerate(iterations):
for idx, (x, y, z, active, clicked, event, time_val) in enumerate(zip(i['itemsx'], i['itemsy'], i['itemsz'],
i['itemsactive'], i['itemsclicked'],
i['itemsevent'], i['itemstime'])):
if active and not iterations[iter_idx - 1]['itemsactive'][idx]:
order[idx].append(order_num)
order_num += 1
return reconstruction_items, order
def get_click_locations_and_indicies(iterations, items, meta):
# If Study/Practice, label click events
"""
This function takes the iterations from read_binary_file, the items to be searched and the meta information from
the file and returns the clicked positions, indices in the iterations, size and colors for visualization.
:param iterations: the iterations from read_binary_file
:param items: the items to be visualized
:param meta: the meta information from the filename
:return: a dictionary containing:
click_pos - the x, z, time coordinates of the click position
click_idx - the index in iterations at which time the click happened
click_size - the size the click should be visualized as
click_color - the color with which the click should be visualized
"""
click_idx = np.empty(len(items))
click_pos = np.empty((len(items), 3))
click_size = np.zeros((len(iterations), len(items)))
click_color = np.empty((len(items), 4))
if meta['phase'] in ['0', '1', '3', '4', '6', '7']:
for idx, i in enumerate(iterations):
if idx + 1 < len(iterations):
for idxx, (i1, i2) in enumerate(zip(i['itemsclicked'], iterations[idx + 1]['itemsclicked'])):
if i['itemsclicked'][idxx]:
click_size[idx][idxx] = 0.5
if not i1 == i2:
click_idx[idxx] = idx
click_pos[idxx] = (i['x'], i['z'], i['time_val'])
click_color[idxx] = (128, 128, 128, 255)
else:
for idxx, i1 in enumerate(i['itemsclicked']):
if i['itemsclicked'][idxx]:
click_size[idx][idxx] = 0.5
return {'position': click_pos, 'index': click_idx, 'size': click_size, 'color': click_color}
def get_items_solutions(meta):
"""
This function returns the solution values given a particular meta-file information configuration.
:param meta: the meta information from get_filename_meta_data
:return: a tuple with items, times and directions where times contains numeric time constants, directions contains
numeric labels such that 2 is Fall, 1 is Fly, and 0 is Stationary/Stay, and items containts a list of
dictionaries containing values:
"direction" - the 0, 1, or 2 direction value
"pos" - the x, z, time coordinate of the item
"color" - the RGB color tuple for the item
"""
if meta['phase'] == '0' or meta['phase'] == '3' or meta['phase'] == '6':
times = [2, 12, 18, 25]
directions = [2, 1, 2, 1] # Fall = 2, Fly = 1, Stay = 0
if meta['inverse'] == '1':
times.reverse()
directions.reverse()
items = [{'direction': directions[0], 'pos': (2, -12, times[0]), 'color': (255, 255, 0)},
{'direction': directions[1], 'pos': (2, 13, times[1]), 'color': (255, 0, 0)},
{'direction': directions[2], 'pos': (-13, 2, times[2]), 'color': (0, 255, 0)},
{'direction': directions[3], 'pos': (-12, -17, times[3]), 'color': (0, 0, 255)},
{'direction': 0, 'pos': (13, 5, 0), 'color': (128, 0, 128)}]
# elif meta['phase'] == '7' or meta['phase'] == '8':
# times = [2, 8, 17, 23]
# directions = [2, 1, 1, 2] # Fall = 2, Fly = 1, Stay = 0
# if meta['inverse'] == '1':
# times.reverse()
# directions.reverse()
# items = [{'direction': directions[0], 'pos': (16, -14, times[0]), 'color': (255, 255, 0)},
# {'direction': directions[1], 'pos': (-10, -2, times[1]), 'color': (255, 0, 0)},
# {'direction': directions[2], 'pos': (15, -8, times[2]), 'color': (0, 255, 0)},
# {'direction': directions[3], 'pos': (-15, -15, times[3]), 'color': (0, 0, 255)},
# {'direction': 0, 'pos': (-2, 10, 0), 'color': (128, 0, 128)}]
else:
times = [4, 10, 16, 25, 34, 40, 46, 51]
directions = [2, 1, 1, 2, 1, 2, 2, 1] # Fall = 2, Fly = 1, Stay = 0
if meta['inverse'] == '1':
times.reverse()
directions.reverse()
items = [{'direction': directions[0], 'pos': (18, -13, times[0]), 'color': (255, 255, 0)},
{'direction': directions[1], 'pos': (-13, 9, times[1]), 'color': (255, 255, 0)},
{'direction': directions[2], 'pos': (-10, -2, times[2]), 'color': (255, 0, 0)},
{'direction': directions[3], 'pos': (6, -2, times[3]), 'color': (255, 0, 0)},
{'direction': directions[4], 'pos': (17, -8, times[4]), 'color': (0, 255, 0)},
{'direction': directions[5], 'pos': (-2, -7, times[5]), 'color': (0, 255, 0)},
{'direction': directions[6], 'pos': (-15, -15, times[6]), 'color': (0, 0, 255)},
{'direction': directions[7], 'pos': (6, 18, times[7]), 'color': (0, 0, 255)},
{'direction': 0, 'pos': (14, 6, 0), 'color': (128, 0, 128)},
{'direction': 0, 'pos': (-2, 10, 0), 'color': (128, 0, 128)}]
return items, times, directions
def find_data_files_in_directory(directory, file_regex=""):
"""
This function acts as a helper to search a directory for files that match a regular expression. The function
will raise an IOError if the input path is not found.
:param directory: the directory to search
:param file_regex: the regular expression to match for files
:return: a list of files which match the regular expression
"""
if not os.path.exists(directory):
raise IOError('The input path was not found.')
start_time = time.time()
data_files = []
file_index = []
file_roots_index = []
for root, dirs, files in os.walk(directory):
for f in files:
file_index.append(f)
file_roots_index.append(root)
regex = re.compile(file_regex)
for root, f in zip(file_roots_index, file_index):
if regex.search(os.path.basename(f)):
logging.debug('Found data file ({0}).'.format(f))
data_files.append(os.path.join(root, f))
logging.info('Found {0} data files in {1} seconds.'.format(len(data_files), time.time() - start_time))
return data_files
def get_exploration_metrics(iterations):
"""
This function gets the common exploration metrics from an iterations list returned by read_binary_file.
:param iterations: the iterations from read_binary_file
:return: a tuple containing total_time, space_travelled, time_travelled, and space_time_travelled
"""
total_time = (iterations[-1]['datetime'] - iterations[0]['datetime']).total_seconds()
space_travelled = 0
time_travelled = 0
space_time_travelled = 0
for idx, i in enumerate(iterations):
if idx == len(iterations) - 1:
break
t = iterations[idx]['time_val']
xy = [iterations[idx]['x'], iterations[idx]['y']]
xyt = xy + [t]
t_next = iterations[idx + 1]['time_val']
xy_next = [iterations[idx + 1]['x'], iterations[idx + 1]['y']]
xyt_next = xy_next + [t_next]
space_travelled += distance.euclidean(xy, xy_next)
space_time_travelled += distance.euclidean(xyt, xyt_next)
time_travelled += distance.euclidean(t, t_next)
return total_time, space_travelled, time_travelled, space_time_travelled
def is_correct_color(t, solution_t, bins=15.0):
"""
This function determines if a particular item time is correct given a time, solution time and bins in which a
timeline is divided.
:param bins: the float representing the bins into which the timeline is divided
:param t: the time of the item
:param solution_t: the correct time of the time
:return: a boolean value, true if the item is in the correct time region, false otherwise
"""
lower = float(np.floor(float(solution_t) / bins) * bins)
upper = float(np.ceil(float(solution_t) / bins) * bins)
# noinspection PyTypeChecker
return float(lower) < float(t) < float(upper)
def compute_accuracy(meta, items):
"""
Given some meta information from the file via get_filename_meta_data and the item information, compute the accuracy
of the items within and across contexts.
:param meta: the meta information from get_filename_meta_data
:param items: the items from parse_test_items
:return: a tuple containing:
space_misplacement - the amount of space-only misplacement
time_misplacement - the amount of time-only misplacement
space_time_misplacement - the total space and time misplacement (treating the values equally)
direction_correct_count - the number of correct direction labels
mean_context_crossing_excluding_wrong_context_pairs - the mean of the distance between context
crossing pairs
excluding those which are in the wrong context
mean_context_noncrossing_excluding_wrong_context_pairs - the mean of the distance between non-context
crossing pairs
excluding those which are in the wrong context
mean_context_crossing - the mean distance between context crossing pairs with no exclusions
mean_noncontext_crossing - the mean distance between non-context-crossing pairs with no exclusions
"""
solution_items, times_solution, directions_solution = get_items_solutions(meta)
xs = [item['pos'][0] for item in items]
zs = [item['pos'][1] for item in items]
times = [item['pos'][2] for item in items]
directions = [item['direction'] for item in items]
xs_solution = [item['pos'][0] for item in solution_items]
zs_solution = [item['pos'][1] for item in solution_items]
space_misplacement = 0
time_misplacement = 0
space_time_misplacement = 0
direction_correct_count = 0
for x, z, t, d, solx, solz, solt, sold in zip(xs, zs, times, directions, xs_solution, zs_solution, times_solution,
directions_solution):
space_misplacement += distance.euclidean((x, z), (solx, solz))
time_misplacement += np.abs(t - solt)
space_time_misplacement += distance.euclidean((x, z, t), (solx, solz, solt))
direction_correct_count += int(d == sold)
context_crossing_dist_exclude_wrong_colors_pairs = []
context_noncrossing_dist_exclude_wrong_colors_pairs = []
context_crossing_dist_pairs = []
context_noncrossing_dist_pairs = []
pairs = [(1, 1, 2), (1, 3, 4), (1, 5, 6), (0, 0, 1), (0, 2, 3), (0, 4, 5), (0, 6, 7)]
for pair in pairs:
crossing = pair[0] != 0
idx0 = pair[1]
idx1 = pair[2]
x0, z0, t0, d0 = (xs[idx0], zs[idx0], times[idx0], directions[idx0])
solx0, solz0, solt0, sold0 = (
xs_solution[idx0], zs_solution[idx0], times_solution[idx0], directions_solution[idx0])
x1, z1, t1, d1 = (xs[idx1], zs[idx1], times[idx1], directions[idx1])
solx1, solz1, solt1, sold1 = (
xs_solution[idx1], zs_solution[idx1], times_solution[idx1], directions_solution[idx1])
dist = np.abs(t0 - t1) / np.abs(solt0 - solt1)
if crossing:
context_crossing_dist_pairs.append(dist)
else:
context_noncrossing_dist_pairs.append(dist)
if is_correct_color(t0, solt0) and is_correct_color(t1, solt1):
if crossing:
context_crossing_dist_exclude_wrong_colors_pairs.append(dist)
else:
context_noncrossing_dist_exclude_wrong_colors_pairs.append(dist)
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=RuntimeWarning)
return space_misplacement, time_misplacement, space_time_misplacement, direction_correct_count, \
np.mean(context_crossing_dist_exclude_wrong_colors_pairs), \
np.mean(context_noncrossing_dist_exclude_wrong_colors_pairs), \
np.mean(context_crossing_dist_pairs), np.mean(context_noncrossing_dist_pairs), \
np.nan, np.nan, np.nan, np.nan # Patch to fix issue with space return values which are missing...
def get_item_details(pastel_factor=127):
"""
This function returns detailed information about the item solutions including strings representing the event
state, strings with the item labels, and filename image strings (JPG), as well as RGB color tuples for each item.
:param pastel_factor: a factor to render the RGB values via pastel shades (default 127)
:return: a tuple containing:
event_state_labels - a set of strings containing the labels for event states given an integer
item_number_label - a set of strings containing the labels for items given an integer
item_label_filenames - a set of strings containing the filename for JPGs containing the images of items
given an integer
cols - a set of RGB colors influenced by the input pastel_factor representing the item colors
"""
event_state_labels = ['stationary', 'up', 'down']
item_number_label = ['bottle', 'icecubetray', 'clover', 'basketball', 'boot', 'crown', 'bandana', 'hammer',
'fireext', 'guitar']
item_label_filename = ['bottle.jpg', 'icecubetray.jpg', 'clover.jpg', 'basketball.jpg',
'boot.jpg', 'crown.jpg', 'bandana.jpg', 'hammer.jpg',
'fireextinguisher.jpg', 'guitar.jpg']
cols = [(255, 255, pastel_factor), (255, 255, pastel_factor),
(255, pastel_factor, pastel_factor), (255, pastel_factor, pastel_factor),
(pastel_factor, 255, pastel_factor), (pastel_factor, 255, pastel_factor),
(pastel_factor, pastel_factor, 255),
(pastel_factor, pastel_factor, 255),
(128, pastel_factor / 2, 128), (128, pastel_factor / 2, 128)]
return event_state_labels, item_number_label, item_label_filename, cols
|
kevroy314/msl-iposition-pipeline
|
cogrecon/core/data_flexing/time_travel_task/time_travel_task_binary_reader.py
|
Python
|
gpl-3.0
| 41,651
|
from cupy.random import distributions
from cupy.random import generator
def rand(*size, **kwarg):
"""Returns an array of uniform random values over the interval ``[0, 1)``.
Each element of the array is uniformly distributed on the half-open
interval ``[0, 1)``. All elements are identically and independently
distributed (i.i.d.).
Args:
size (tuple of ints): The shape of the array.
dtype: Data type specifier. Only float32 and float64 types are allowed.
The default is float64.
Returns:
cupy.ndarray: A random array.
.. seealso:: :func:`numpy.random.rand`
"""
dtype = kwarg.pop('dtype', float)
if kwarg:
raise TypeError('rand() got unexpected keyword arguments %s'
% ', '.join(kwarg.keys()))
return random_sample(size=size, dtype=dtype)
def randn(*size, **kwarg):
"""Returns an array of standand normal random values.
Each element of the array is normally distributed with zero mean and unit
variance. All elements are identically and independently distributed
(i.i.d.).
Args:
size (tuple of ints): The shape of the array.
dtype: Data type specifier. Only float32 and float64 types are allowed.
The default is float64.
Returns:
cupy.ndarray: An array of standanr normal random values.
.. seealso:: :func:`numpy.random.randn`
"""
dtype = kwarg.pop('dtype', float)
if kwarg:
raise TypeError('randn() got unexpected keyword arguments %s'
% ', '.join(kwarg.keys()))
return distributions.normal(size=size, dtype=dtype)
def randint(low, high=None, size=None):
"""Returns a scalar or an array of integer values over ``[low, high)``.
Each element of returned values are independently sampled from
uniform distribution over left-close and right-open interval
``[low, high)``.
Args:
low (int): If ``high`` is not ``None``,
it is the lower bound of the interval.
Otherwise, it is the **upper** bound of the interval
and lower bound of the inteval is set to ``0``.
high (int): Upper bound of the interval.
size (None or int or tuple of ints): The shape of returned value.
Returns:
int or cupy.ndarray of ints: If size is ``None``,
it is single integer sampled.
If size is integer, it is the 1D-array of length ``size`` element.
Otherwise, it is the array whose shape specified by ``size``.
"""
if high is None:
lo = 0
hi = low
else:
lo = low
hi = high
if lo >= hi:
raise ValueError('low >= high')
diff = hi - lo - 1
rs = generator.get_random_state()
return lo + rs.interval(diff, size)
def random_integers(low, high=None, size=None):
"""Return a scalar or an array of interger values over ``[low, high]``
Each element of returned values are independently sampled from
uniform distribution over closed interval ``[low, high]``.
Args:
low (int): If ``high`` is not ``None``,
it is the lower bound of the interval.
Otherwise, it is the **upper** bound of the interval
and the lower bound is set to ``1``.
high (int): Upper bound of the interval.
size (None or int or tuple of ints): The shape of returned value.
Returns:
int or cupy.ndarray of ints: If size is ``None``,
it is single integer sampled.
If size is integer, it is the 1D-array of length ``size`` element.
Otherwise, it is the array whose shape specified by ``size``.
"""
if high is None:
high = low
low = 1
return randint(low, high + 1, size)
def random_sample(size=None, dtype=float):
"""Returns an array of random values over the interval ``[0, 1)``.
This is a variant of :func:`cupy.random.rand`.
Args:
size (int or tuple of ints): The shape of the array.
dtype: Data type specifier. Only float32 and float64 types are allowed.
Returns:
cupy.ndarray: An array of uniformly distributed random values.
.. seealso:: :func:`numpy.random.random_sample`
"""
rs = generator.get_random_state()
return rs.random_sample(size=size, dtype=dtype)
# TODO(okuta): Implement choice
|
cemoody/chainer
|
cupy/random/sample.py
|
Python
|
mit
| 4,357
|
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Module dedicated functions/classes dealing with rate limiting requests.
This module handles rate liming at a per-user level, so it should not be used
to prevent intentional Denial of Service attacks, as we can assume a DOS can
easily come through multiple user accounts. DOS protection should be done at a
different layer. Instead this module should be used to protect against
unintentional user actions. With that in mind the limits set here should be
high enough as to not rate-limit any intentional actions.
To find good rate-limit values, check how long requests are taking (see logs)
in your environment to assess your capabilities and multiply out to get
figures.
NOTE: As the rate-limiting here is done in memory, this only works per
process (each process will have its own rate limiting counter).
"""
import collections
import copy
import httplib
import math
import re
import time
import webob.dec
import webob.exc
from nova.api.openstack.compute.views import limits as limits_views
from nova.api.openstack import wsgi
from nova.api.openstack import xmlutil
from nova.openstack.common.gettextutils import _
from nova.openstack.common import importutils
from nova.openstack.common import jsonutils
from nova import quota
from nova import utils
from nova import wsgi as base_wsgi
QUOTAS = quota.QUOTAS
LIMITS_PREFIX = "limits."
limits_nsmap = {None: xmlutil.XMLNS_COMMON_V10, 'atom': xmlutil.XMLNS_ATOM}
class LimitsTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('limits', selector='limits')
rates = xmlutil.SubTemplateElement(root, 'rates')
rate = xmlutil.SubTemplateElement(rates, 'rate', selector='rate')
rate.set('uri', 'uri')
rate.set('regex', 'regex')
limit = xmlutil.SubTemplateElement(rate, 'limit', selector='limit')
limit.set('value', 'value')
limit.set('verb', 'verb')
limit.set('remaining', 'remaining')
limit.set('unit', 'unit')
limit.set('next-available', 'next-available')
absolute = xmlutil.SubTemplateElement(root, 'absolute',
selector='absolute')
limit = xmlutil.SubTemplateElement(absolute, 'limit',
selector=xmlutil.get_items)
limit.set('name', 0)
limit.set('value', 1)
return xmlutil.MasterTemplate(root, 1, nsmap=limits_nsmap)
class LimitsController(object):
"""Controller for accessing limits in the OpenStack API."""
@wsgi.serializers(xml=LimitsTemplate)
def index(self, req):
"""Return all global and rate limit information."""
context = req.environ['nova.context']
quotas = QUOTAS.get_project_quotas(context, context.project_id,
usages=False)
abs_limits = dict((k, v['limit']) for k, v in quotas.items())
rate_limits = req.environ.get("nova.limits", [])
builder = self._get_view_builder(req)
return builder.build(rate_limits, abs_limits)
def create(self, req, body):
"""Create a new limit."""
raise webob.exc.HTTPNotImplemented()
def delete(self, req, id):
"""Delete the limit."""
raise webob.exc.HTTPNotImplemented()
def detail(self, req):
"""Return limit details."""
raise webob.exc.HTTPNotImplemented()
def show(self, req, id):
"""Show limit information."""
raise webob.exc.HTTPNotImplemented()
def update(self, req, id, body):
"""Update existing limit."""
raise webob.exc.HTTPNotImplemented()
def _get_view_builder(self, req):
return limits_views.ViewBuilder()
def create_resource():
return wsgi.Resource(LimitsController())
class Limit(object):
"""
Stores information about a limit for HTTP requests.
"""
UNITS = dict([(v, k) for k, v in utils.TIME_UNITS.items()])
def __init__(self, verb, uri, regex, value, unit):
"""
Initialize a new `Limit`.
@param verb: HTTP verb (POST, PUT, etc.)
@param uri: Human-readable URI
@param regex: Regular expression format for this limit
@param value: Integer number of requests which can be made
@param unit: Unit of measure for the value parameter
"""
self.verb = verb
self.uri = uri
self.regex = regex
self.value = int(value)
self.unit = unit
self.unit_string = self.display_unit().lower()
self.remaining = int(value)
if value <= 0:
raise ValueError("Limit value must be > 0")
self.last_request = None
self.next_request = None
self.water_level = 0
self.capacity = self.unit
self.request_value = float(self.capacity) / float(self.value)
msg = _("Only %(value)s %(verb)s request(s) can be "
"made to %(uri)s every %(unit_string)s.")
self.error_message = msg % self.__dict__
def __call__(self, verb, url):
"""
Represents a call to this limit from a relevant request.
@param verb: string http verb (POST, GET, etc.)
@param url: string URL
"""
if self.verb != verb or not re.match(self.regex, url):
return
now = self._get_time()
if self.last_request is None:
self.last_request = now
leak_value = now - self.last_request
self.water_level -= leak_value
self.water_level = max(self.water_level, 0)
self.water_level += self.request_value
difference = self.water_level - self.capacity
self.last_request = now
if difference > 0:
self.water_level -= self.request_value
self.next_request = now + difference
return difference
cap = self.capacity
water = self.water_level
val = self.value
self.remaining = math.floor(((cap - water) / cap) * val)
self.next_request = now
def _get_time(self):
"""Retrieve the current time. Broken out for testability."""
return time.time()
def display_unit(self):
"""Display the string name of the unit."""
return self.UNITS.get(self.unit, "UNKNOWN")
def display(self):
"""Return a useful representation of this class."""
return {
"verb": self.verb,
"URI": self.uri,
"regex": self.regex,
"value": self.value,
"remaining": int(self.remaining),
"unit": self.display_unit(),
"resetTime": int(self.next_request or self._get_time()),
}
# "Limit" format is a dictionary with the HTTP verb, human-readable URI,
# a regular-expression to match, value and unit of measure (PER_DAY, etc.)
DEFAULT_LIMITS = [
Limit("POST", "*", ".*", 120, utils.TIME_UNITS['MINUTE']),
Limit("POST", "*/servers", "^/servers", 120, utils.TIME_UNITS['MINUTE']),
Limit("PUT", "*", ".*", 120, utils.TIME_UNITS['MINUTE']),
Limit("GET", "*changes-since*", ".*changes-since.*", 120,
utils.TIME_UNITS['MINUTE']),
Limit("DELETE", "*", ".*", 120, utils.TIME_UNITS['MINUTE']),
Limit("GET", "*/os-fping", "^/os-fping", 12, utils.TIME_UNITS['MINUTE']),
]
class RateLimitingMiddleware(base_wsgi.Middleware):
"""
Rate-limits requests passing through this middleware. All limit information
is stored in memory for this implementation.
"""
def __init__(self, application, limits=None, limiter=None, **kwargs):
"""
Initialize new `RateLimitingMiddleware`, which wraps the given WSGI
application and sets up the given limits.
@param application: WSGI application to wrap
@param limits: String describing limits
@param limiter: String identifying class for representing limits
Other parameters are passed to the constructor for the limiter.
"""
base_wsgi.Middleware.__init__(self, application)
# Select the limiter class
if limiter is None:
limiter = Limiter
else:
limiter = importutils.import_class(limiter)
# Parse the limits, if any are provided
if limits is not None:
limits = limiter.parse_limits(limits)
self._limiter = limiter(limits or DEFAULT_LIMITS, **kwargs)
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
"""
Represents a single call through this middleware. We should record the
request if we have a limit relevant to it. If no limit is relevant to
the request, ignore it.
If the request should be rate limited, return a fault telling the user
they are over the limit and need to retry later.
"""
verb = req.method
url = req.url
context = req.environ.get("nova.context")
if context:
username = context.user_id
else:
username = None
delay, error = self._limiter.check_for_delay(verb, url, username)
if delay:
msg = _("This request was rate-limited.")
retry = time.time() + delay
return wsgi.RateLimitFault(msg, error, retry)
req.environ["nova.limits"] = self._limiter.get_limits(username)
return self.application
class Limiter(object):
"""
Rate-limit checking class which handles limits in memory.
"""
def __init__(self, limits, **kwargs):
"""
Initialize the new `Limiter`.
@param limits: List of `Limit` objects
"""
self.limits = copy.deepcopy(limits)
self.levels = collections.defaultdict(lambda: copy.deepcopy(limits))
# Pick up any per-user limit information
for key, value in kwargs.items():
if key.startswith(LIMITS_PREFIX):
username = key[len(LIMITS_PREFIX):]
self.levels[username] = self.parse_limits(value)
def get_limits(self, username=None):
"""
Return the limits for a given user.
"""
return [limit.display() for limit in self.levels[username]]
def check_for_delay(self, verb, url, username=None):
"""
Check the given verb/user/user triplet for limit.
@return: Tuple of delay (in seconds) and error message (or None, None)
"""
delays = []
for limit in self.levels[username]:
delay = limit(verb, url)
if delay:
delays.append((delay, limit.error_message))
if delays:
delays.sort()
return delays[0]
return None, None
# Note: This method gets called before the class is instantiated,
# so this must be either a static method or a class method. It is
# used to develop a list of limits to feed to the constructor. We
# put this in the class so that subclasses can override the
# default limit parsing.
@staticmethod
def parse_limits(limits):
"""
Convert a string into a list of Limit instances. This
implementation expects a semicolon-separated sequence of
parenthesized groups, where each group contains a
comma-separated sequence consisting of HTTP method,
user-readable URI, a URI reg-exp, an integer number of
requests which can be made, and a unit of measure. Valid
values for the latter are "SECOND", "MINUTE", "HOUR", and
"DAY".
@return: List of Limit instances.
"""
# Handle empty limit strings
limits = limits.strip()
if not limits:
return []
# Split up the limits by semicolon
result = []
for group in limits.split(';'):
group = group.strip()
if group[:1] != '(' or group[-1:] != ')':
raise ValueError("Limit rules must be surrounded by "
"parentheses")
group = group[1:-1]
# Extract the Limit arguments
args = [a.strip() for a in group.split(',')]
if len(args) != 5:
raise ValueError("Limit rules must contain the following "
"arguments: verb, uri, regex, value, unit")
# Pull out the arguments
verb, uri, regex, value, unit = args
# Upper-case the verb
verb = verb.upper()
# Convert value--raises ValueError if it's not integer
value = int(value)
# Convert unit
unit = unit.upper()
if unit not in utils.TIME_UNITS:
raise ValueError("Invalid units specified")
unit = utils.TIME_UNITS[unit]
# Build a limit
result.append(Limit(verb, uri, regex, value, unit))
return result
class WsgiLimiter(object):
"""
Rate-limit checking from a WSGI application. Uses an in-memory `Limiter`.
To use, POST ``/<username>`` with JSON data such as::
{
"verb" : GET,
"path" : "/servers"
}
and receive a 204 No Content, or a 403 Forbidden with an X-Wait-Seconds
header containing the number of seconds to wait before the action would
succeed.
"""
def __init__(self, limits=None):
"""
Initialize the new `WsgiLimiter`.
@param limits: List of `Limit` objects
"""
self._limiter = Limiter(limits or DEFAULT_LIMITS)
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, request):
"""
Handles a call to this application. Returns 204 if the request is
acceptable to the limiter, else a 403 is returned with a relevant
header indicating when the request *will* succeed.
"""
if request.method != "POST":
raise webob.exc.HTTPMethodNotAllowed()
try:
info = dict(jsonutils.loads(request.body))
except ValueError:
raise webob.exc.HTTPBadRequest()
username = request.path_info_pop()
verb = info.get("verb")
path = info.get("path")
delay, error = self._limiter.check_for_delay(verb, path, username)
if delay:
headers = {"X-Wait-Seconds": "%.2f" % delay}
return webob.exc.HTTPForbidden(headers=headers, explanation=error)
else:
return webob.exc.HTTPNoContent()
class WsgiLimiterProxy(object):
"""
Rate-limit requests based on answers from a remote source.
"""
def __init__(self, limiter_address):
"""
Initialize the new `WsgiLimiterProxy`.
@param limiter_address: IP/port combination of where to request limit
"""
self.limiter_address = limiter_address
def check_for_delay(self, verb, path, username=None):
body = jsonutils.dumps({"verb": verb, "path": path})
headers = {"Content-Type": "application/json"}
conn = httplib.HTTPConnection(self.limiter_address)
if username:
conn.request("POST", "/%s" % (username), body, headers)
else:
conn.request("POST", "/", body, headers)
resp = conn.getresponse()
if 200 >= resp.status < 300:
return None, None
return resp.getheader("X-Wait-Seconds"), resp.read() or None
# Note: This method gets called before the class is instantiated,
# so this must be either a static method or a class method. It is
# used to develop a list of limits to feed to the constructor.
# This implementation returns an empty list, since all limit
# decisions are made by a remote server.
@staticmethod
def parse_limits(limits):
"""
Ignore a limits string--simply doesn't apply for the limit
proxy.
@return: Empty list.
"""
return []
|
OpenAcademy-OpenStack/nova-scheduler
|
nova/api/openstack/compute/limits.py
|
Python
|
apache-2.0
| 16,526
|
#!/usr/bin/env python3
import logging
import math
# from binascii import unhexlify
from struct import unpack
log = logging.getLogger("protocol_helpers")
def crc8(byteData):
"""
Generate 8 bit CRC of supplied string
"""
CRC = 0
# for j in range(0, len(str),2):
for b in byteData:
# char = int(str[j:j+2], 16)
# print(b)
CRC = CRC + b
CRC &= 0xFF
return CRC
def crc8P1(byteData):
"""
Generate 8 bit CRC of supplied string + 1
eg as used in REVO PI30 protocol
"""
CRC = 0
for b in byteData:
CRC = CRC + b
CRC += 1
CRC &= 0xFF
return CRC
def crcJK232(byteData):
"""
Generate JK RS232 / RS485 CRC
- 2 bytes, the verification field is "command code + length byte + data segment content",
the verification method is thesum of the above fields and then the inverse plus 1, the high bit is in the front and the low bit is in the back.
"""
CRC = 0
for b in byteData:
CRC += b
CRC = CRC ^ 0xFFFF
CRC += 1
crc_low = CRC & 0xFF
crc_high = (CRC >> 8) & 0xFF
return [crc_high, crc_low]
def vedHexChecksum(byteData):
"""
Generate VE Direct HEX Checksum
- sum of byteData + CS = 0x55
"""
CS = 0x55
for b in byteData:
CS -= b
CS = CS & 0xFF
return CS
def uptime(byteData):
"""
Decode 3 hex bytes to a JKBMS uptime
"""
# Make sure supplied String is the correct length
log.debug("uptime defn")
value = 0
for x, b in enumerate(byteData):
# b = byteData.pop(0)
value += b * 256 ** x
log.debug(f"Uptime int value {value} for pos {x}")
daysFloat = value / (60 * 60 * 24)
days = math.trunc(daysFloat)
hoursFloat = (daysFloat - days) * 24
hours = math.trunc(hoursFloat)
minutesFloat = (hoursFloat - hours) * 60
minutes = math.trunc(minutesFloat)
secondsFloat = (minutesFloat - minutes) * 60
seconds = round(secondsFloat)
uptime = f"{days}D{hours}H{minutes}M{seconds}S"
log.info(f"Uptime result {uptime}")
return uptime
def Hex2Int(hexString):
"""
Decode the first byte of a hexString to int
"""
answer = hexString[0]
log.debug(f"Hex {hexString} decoded to {answer}")
return answer
def Hex2Str(hexString):
"""
Return the hexString as ASCII representation of hex, ie 0x4a -> 4a
"""
answer = ""
for x in hexString:
answer += f"{x:02x}"
log.debug(f"Hex {hexString} decoded to {answer}")
return answer
def Hex2Ascii(hexString):
"""
Return the hexString as ASCII, ie 0x4a -> J
"""
answer = ""
for x in hexString:
if x != 0:
# Ignore 0x00 results
answer += f"{x:c}"
log.debug(f"Hex {hexString} decoded to {answer}")
return answer
def LittleHex2Short(hexString):
"""
Decode a 2 byte hexString to int (little endian coded)
"""
# Make sure supplied String is the correct length
if len(hexString) != 2:
log.info(f"Hex encoded value must be 2 bytes long. Was {len(hexString)} length")
return 0
answer = unpack("<h", hexString)[0]
log.debug(f"Hex {hexString} 2 byte decoded to {answer}")
return answer
def BigHex2Short(hexString):
"""
Decode a 2 byte hexString to int (big endian coded)
"""
# Make sure supplied String is the correct length
if len(hexString) != 2:
log.info(f"Hex encoded value must be 2 bytes long. Was {len(hexString)} length")
return 0
answer = unpack(">h", hexString)[0]
log.debug(f"Hex {hexString} 2 byte decoded to {answer}")
return answer
def BigHex2Float(hexString):
"""
Decode a 4 byte hexString to int (big endian coded)
"""
# Make sure supplied String is the correct length
if len(hexString) != 4:
log.info(f"Hex encoded value must be 4 bytes long. Was {len(hexString)} length")
return 0
answer = unpack(">I", hexString)[0]
# answer = int(hexString.hex(), 16)
log.debug(f"Hex {hexString} 4 byte decoded to {answer}")
return answer
def LittleHex2Float(hexString):
"""
Decode a 4 byte hexString to int (little endian coded)
"""
# Make sure supplied String is the correct length
if len(hexString) != 4:
log.info(f"Hex encoded value must be 4 bytes long. Was {len(hexString)} length")
return 0
answer = unpack("<f", hexString)[0]
log.debug(f"Hex {hexString} 4 byte decoded to {answer}")
return answer
def LittleHex2UInt(hexString):
"""
Decode a 4 byte hexString to Uint (little endian coded)
"""
# Make sure supplied String is the correct length
if len(hexString) != 4:
log.info(f"Hex encoded value must be 4 bytes long. Was {len(hexString)} length")
return 0
answer = unpack("<I", hexString)[0]
log.debug(f"Hex {hexString} 4 byte decoded to {answer}")
return answer
def LittleHex2Int(hexString):
"""
Decode a 4 byte hexString to int (little endian coded)
"""
# Make sure supplied String is the correct length
if len(hexString) != 4:
log.info(f"Hex encoded value must be 4 bytes long. Was {len(hexString)} length")
return 0
answer = unpack("<i", hexString)[0]
log.debug(f"Hex {hexString} 4 byte decoded to {answer}")
return answer
def decode2ByteHex(hexString):
"""
Code a 2 byte hexString to volts as per jkbms approach (blackbox determined)
- need to decode as 4 hex chars
"""
log.debug(f"hexString: {hexString}")
answer = 0.0
# Make sure supplied String is the correct length
if len(hexString) != 2:
log.warning(f"Hex encoded value must be 2 bytes long. Was {len(hexString)} length")
return 0
# Use python tools for decode
answer = unpack("<h", hexString)[0] / 1000
log.debug(f"Hex {hexString} 2 byte decoded to {answer}")
return answer
def _decode4ByteHex1000(hexToDecode):
"""
Code a 4 byte hexString per jkbms approach (blackbox determined)
- need to decode as 8 hex chars
"""
# hexString = bytes.fromhex(hexToDecode)
hexString = hexToDecode
log.debug(f"hexString: {hexString}")
answer = 0.0
# Make sure supplied String is long enough
if len(hexString) != 4:
log.warning(f"Hex encoded value must be 4 bytes long. Was {len(hexString)} length")
return 0
# 1st position
pos1 = hexString[0] >> 4
answer += pos1 * (2 ** 4 / 1000)
log.debug(f"answer after pos1 {answer}")
# 2nd position
pos2 = hexString[0] & 0x0F
answer += pos2 * (2 ** 0 / 1000)
log.debug(f"answer after pos2 {answer}")
# 3rd position
pos3 = hexString[1] >> 4
answer += pos3 * (2 ** 12 / 1000)
log.debug(f"answer after pos3 {answer}")
# 4th position
pos4 = hexString[1] & 0x0F
answer += pos4 * (2 ** 8 / 1000)
# 5st position
pos5 = hexString[2] >> 4
answer += pos5 * (2 ** 20 / 1000)
log.debug(f"answer after pos5 {answer}")
# 6st position
pos6 = hexString[2] & 0x0F
answer += pos6 * (2 ** 16 / 1000)
log.debug(f"answer after pos6 {answer}")
# 7th position
pos7 = hexString[3] >> 4
answer += pos7 * (2 ** 28 / 1000)
log.debug(f"answer after pos7 {answer}")
# 8th position
pos8 = hexString[3] & 0x0F
answer += pos8 * (2 ** 24 / 1000)
log.debug(f"answer after pos8 {answer}")
log.debug(f"Hex {hexString} 8 byte decoded to {answer}")
return answer
def _decode4ByteHex(hexToDecode):
"""
Code a 4 byte hexString to volts as per jkbms approach (blackbox determined)
"""
# hexString = decode2ByteHex
hexString = hexToDecode
log.debug(f"hexString: {hexString}")
answer = 0.0
# Make sure supplied String is long enough
if len(hexString) != 4:
log.warning(f"Hex encoded value must be 4 bytes long. Was {len(hexString)} length")
return 0
# Use python tools for decode
answer = unpack("<f", hexString)[0]
log.debug(f"Hex {hexString} 4 byte decoded to {answer}")
return answer
def crcPI(data_bytes):
"""
Calculates CRC for supplied data_bytes
"""
# assert type(byte_cmd) == bytes
log.debug(f"Calculating CRC for {data_bytes}")
crc = 0
da = 0
crc_ta = [
0x0000,
0x1021,
0x2042,
0x3063,
0x4084,
0x50A5,
0x60C6,
0x70E7,
0x8108,
0x9129,
0xA14A,
0xB16B,
0xC18C,
0xD1AD,
0xE1CE,
0xF1EF,
]
for c in data_bytes:
# log.debug('Encoding %s', c)
if type(c) == str:
c = ord(c)
da = ((crc >> 8) & 0xFF) >> 4
crc = (crc << 4) & 0xFFFF
index = da ^ (c >> 4)
crc ^= crc_ta[index]
da = ((crc >> 8) & 0xFF) >> 4
crc = (crc << 4) & 0xFFFF
index = da ^ (c & 0x0F)
crc ^= crc_ta[index]
crc_low = crc & 0xFF
crc_high = (crc >> 8) & 0xFF
if crc_low == 0x28 or crc_low == 0x0D or crc_low == 0x0A:
crc_low += 1
if crc_high == 0x28 or crc_high == 0x0D or crc_high == 0x0A:
crc_high += 1
crc = crc_high << 8
crc += crc_low
log.debug(f"Generated CRC {crc_high:#04x} {crc_low:#04x} {crc:#06x}")
return [crc_high, crc_low]
|
jblance/mpp-solar
|
mppsolar/protocols/protocol_helpers.py
|
Python
|
mit
| 9,335
|
#!/usr/bin/env python
#
# NuGet packaging script.
# Assembles a NuGet package using CI artifacts in S3
# and calls nuget (in docker) to finalize the package.
#
import sys
import re
import os
import tempfile
import shutil
import subprocess
import urllib
from fnmatch import fnmatch
from string import Template
from collections import defaultdict
import boto3
from zfile import zfile
# Rename token values
rename_vals = {'plat': {'windows': 'win'},
'arch': {'x86_64': 'x64',
'i386': 'x86',
'win32': 'x86'}}
# Collects CI artifacts from S3 storage, downloading them
# to a local directory, or collecting already downloaded artifacts from
# local directory.
#
# The artifacts' folder in the S3 bucket must have the following token
# format:
# <token>-[<value>]__ (repeat)
#
# Recognized tokens (unrecognized tokens are ignored):
# p - project (e.g., "confluent-kafka-python")
# bld - builder (e.g., "travis")
# plat - platform ("osx", "linux", ..)
# arch - arch ("x64", ..)
# tag - git tag
# sha - git sha
# bid - builder's build-id
# bldtype - Release, Debug (appveyor)
#
# Example:
# librdkafka/p-librdkafka__bld-travis__plat-linux__arch-x64__tag-v0.0.62__sha-d051b2c19eb0c118991cd8bc5cf86d8e5e446cde__bid-1562.1/librdkafka.tar.gz
s3_bucket = 'librdkafka-ci-packages'
dry_run = False
class Artifact (object):
def __init__(self, arts, path, info=None):
self.path = path
# Remove unexpanded AppVeyor $(..) tokens from filename
self.fname = re.sub(r'\$\([^\)]+\)', '', os.path.basename(path))
slpath = os.path.join(os.path.dirname(path), self.fname)
if os.path.isfile(slpath):
# Already points to local file in correct location
self.lpath = slpath
else:
# Prepare download location in dlpath
self.lpath = os.path.join(arts.dlpath, slpath)
if info is None:
self.info = dict()
else:
# Assign the map and convert all keys to lower case
self.info = {k.lower(): v for k, v in info.items()}
# Rename values, e.g., 'plat':'linux' to 'plat':'debian'
for k,v in self.info.items():
rdict = rename_vals.get(k, None)
if rdict is not None:
self.info[k] = rdict.get(v, v)
# Score value for sorting
self.score = 0
# AppVeyor symbol builds are of less value
if self.fname.find('.symbols.') != -1:
self.score -= 10
self.arts = arts
arts.artifacts.append(self)
def __repr__(self):
return self.path
def __lt__ (self, other):
return self.score < other.score
def download(self):
""" Download artifact from S3 and store in local directory .lpath.
If the artifact is already downloaded nothing is done. """
if os.path.isfile(self.lpath) and os.path.getsize(self.lpath) > 0:
return
print('Downloading %s' % self.path)
if dry_run:
return
ldir = os.path.dirname(self.lpath)
if not os.path.isdir(ldir):
os.makedirs(ldir, 0o755)
self.arts.s3_bucket.download_file(self.path, self.lpath)
class Artifacts (object):
def __init__(self, match, dlpath):
super(Artifacts, self).__init__()
self.match = match
self.artifacts = list()
# Download directory (make sure it ends with a path separator)
if not dlpath.endswith(os.path.sep):
dlpath = os.path.join(dlpath, '')
self.dlpath = dlpath
if not os.path.isdir(self.dlpath):
if not dry_run:
os.makedirs(self.dlpath, 0o755)
def collect_single(self, path, req_tag=True):
""" Collect single artifact, be it in S3 or locally.
:param: path string: S3 or local (relative) path
:param: req_tag bool: Require tag to match.
"""
#print('? %s' % path)
# For local files, strip download path.
# Also ignore any parent directories.
if path.startswith(self.dlpath):
folder = os.path.basename(os.path.dirname(path[len(self.dlpath):]))
else:
folder = os.path.basename(os.path.dirname(path))
# The folder contains the tokens needed to perform
# matching of project, gitref, etc.
rinfo = re.findall(r'(?P<tag>[^-]+)-(?P<val>.*?)__', folder)
if rinfo is None or len(rinfo) == 0:
print('Incorrect folder/file name format for %s' % folder)
return None
info = dict(rinfo)
# Ignore AppVeyor Debug builds
if info.get('bldtype', '').lower() == 'debug':
print('Ignoring debug artifact %s' % folder)
return None
tag = info.get('tag', None)
if tag is not None and (len(tag) == 0 or tag.startswith('$(')):
# AppVeyor doesn't substite $(APPVEYOR_REPO_TAG_NAME)
# with an empty value when not set, it leaves that token
# in the string - so translate that to no tag.
del info['tag']
# Perform matching
unmatched = list()
for m,v in self.match.items():
if m not in info or info[m] != v:
unmatched.append(m)
# Make sure all matches were satisfied, unless this is a
# common artifact.
if info.get('p', '') != 'common' and len(unmatched) > 0:
# print('%s: %s did not match %s' % (info.get('p', None), folder, unmatched))
return None
return Artifact(self, path, info)
def collect_s3(self):
""" Collect and download build-artifacts from S3 based on git reference """
print('Collecting artifacts matching %s from S3 bucket %s' % (self.match, s3_bucket))
self.s3 = boto3.resource('s3')
self.s3_bucket = self.s3.Bucket(s3_bucket)
self.s3_client = boto3.client('s3')
for item in self.s3_client.list_objects(Bucket=s3_bucket, Prefix='librdkafka/').get('Contents'):
self.collect_single(item.get('Key'))
for a in self.artifacts:
a.download()
def collect_local(self, path, req_tag=True):
""" Collect artifacts from a local directory possibly previously
collected from s3 """
for f in [os.path.join(dp, f) for dp, dn, filenames in os.walk(path) for f in filenames]:
if not os.path.isfile(f):
continue
self.collect_single(f, req_tag)
class Package (object):
""" Generic Package class
A Package is a working container for one or more output
packages for a specific package type (e.g., nuget) """
def __init__ (self, version, arts, ptype):
super(Package, self).__init__()
self.version = version
self.arts = arts
self.ptype = ptype
# These may be overwritten by specific sub-classes:
self.artifacts = arts.artifacts
# Staging path, filled in later.
self.stpath = None
self.kv = {'version': version}
self.files = dict()
def add_file (self, file):
self.files[file] = True
def build (self):
""" Build package output(s), return a list of paths to built packages """
raise NotImplementedError
def cleanup (self):
""" Optional cleanup routine for removing temporary files, etc. """
pass
def verify (self, path):
""" Optional post-build package verifier """
pass
def render (self, fname, destpath='.'):
""" Render template in file fname and save to destpath/fname,
where destpath is relative to stpath """
outf = os.path.join(self.stpath, destpath, fname)
if not os.path.isdir(os.path.dirname(outf)):
os.makedirs(os.path.dirname(outf), 0o0755)
with open(os.path.join('templates', fname), 'r') as tf:
tmpl = Template(tf.read())
with open(outf, 'w') as of:
of.write(tmpl.substitute(self.kv))
self.add_file(outf)
def copy_template (self, fname, target_fname=None, destpath='.'):
""" Copy template file to destpath/fname
where destpath is relative to stpath """
if target_fname is None:
target_fname = fname
outf = os.path.join(self.stpath, destpath, target_fname)
if not os.path.isdir(os.path.dirname(outf)):
os.makedirs(os.path.dirname(outf), 0o0755)
shutil.copy(os.path.join('templates', fname), outf)
self.add_file(outf)
class NugetPackage (Package):
""" All platforms, archs, et.al, are bundled into one set of
NuGet output packages: "main", redist and symbols """
def __init__ (self, version, arts):
if version.startswith('v'):
version = version[1:] # Strip v prefix
super(NugetPackage, self).__init__(version, arts, "nuget")
def cleanup(self):
if os.path.isdir(self.stpath):
shutil.rmtree(self.stpath)
def build (self, buildtype):
""" Build single NuGet package for all its artifacts. """
# NuGet removes the prefixing v from the version.
vless_version = self.kv['version']
if vless_version[0] == 'v':
vless_version = vless_version[1:]
self.stpath = tempfile.mkdtemp(prefix="out-", suffix="-%s" % buildtype,
dir=".")
self.render('librdkafka.redist.nuspec')
self.copy_template('librdkafka.redist.targets',
destpath=os.path.join('build', 'native'))
self.copy_template('librdkafka.redist.props',
destpath='build')
for f in ['../../README.md', '../../CONFIGURATION.md', '../../LICENSES.txt']:
shutil.copy(f, self.stpath)
# Generate template tokens for artifacts
for a in self.arts.artifacts:
if 'bldtype' not in a.info:
a.info['bldtype'] = 'release'
a.info['variant'] = '%s-%s-%s' % (a.info.get('plat'),
a.info.get('arch'),
a.info.get('bldtype'))
if 'toolset' not in a.info:
a.info['toolset'] = 'v120'
mappings = [
[{'arch': 'x64', 'plat': 'linux', 'fname_glob': 'librdkafka-gcc.tar.gz'}, './include/librdkafka/rdkafka.h', 'build/native/include/librdkafka/rdkafka.h'],
[{'arch': 'x64', 'plat': 'linux', 'fname_glob': 'librdkafka-gcc.tar.gz'}, './include/librdkafka/rdkafkacpp.h', 'build/native/include/librdkafka/rdkafkacpp.h'],
# Travis OSX build
[{'arch': 'x64', 'plat': 'osx', 'fname_glob': 'librdkafka-clang.tar.gz'}, './lib/librdkafka.dylib', 'runtimes/osx-x64/native/librdkafka.dylib'],
# Travis Debian 9 / Ubuntu 16.04 build
[{'arch': 'x64', 'plat': 'linux', 'fname_glob': 'librdkafka-debian9.tgz'}, './lib/librdkafka.so.1', 'runtimes/linux-x64/native/debian9-librdkafka.so'],
# Travis Ubuntu 14.04 build
[{'arch': 'x64', 'plat': 'linux', 'fname_glob': 'librdkafka-gcc.tar.gz'}, './lib/librdkafka.so.1', 'runtimes/linux-x64/native/librdkafka.so'],
# Travis CentOS 7 RPM build
[{'arch': 'x64', 'plat': 'linux', 'fname_glob': 'librdkafka1*.x86_64.rpm'}, './usr/lib64/librdkafka.so.1', 'runtimes/linux-x64/native/centos7-librdkafka.so'],
# Alpine build
[{'arch': 'x64', 'plat': 'linux', 'fname_glob': 'alpine-librdkafka.tgz'}, 'librdkafka.so.1', 'runtimes/linux-x64/native/alpine-librdkafka.so'],
# Common Win runtime
[{'arch': 'x64', 'plat': 'win', 'fname_glob': 'msvcr120.zip'}, 'msvcr120.dll', 'runtimes/win-x64/native/msvcr120.dll'],
[{'arch': 'x64', 'plat': 'win', 'fname_glob': 'msvcr120.zip'}, 'msvcp120.dll', 'runtimes/win-x64/native/msvcp120.dll'],
# matches librdkafka.redist.{VER}.nupkg
[{'arch': 'x64', 'plat': 'win', 'fname_glob': 'librdkafka.redist*'}, 'build/native/bin/v120/x64/Release/librdkafka.dll', 'runtimes/win-x64/native/librdkafka.dll'],
[{'arch': 'x64', 'plat': 'win', 'fname_glob': 'librdkafka.redist*'}, 'build/native/bin/v120/x64/Release/librdkafkacpp.dll', 'runtimes/win-x64/native/librdkafkacpp.dll'],
[{'arch': 'x64', 'plat': 'win', 'fname_glob': 'librdkafka.redist*'}, 'build/native/bin/v120/x64/Release/zlib.dll', 'runtimes/win-x64/native/zlib.dll'],
[{'arch': 'x64', 'plat': 'win', 'fname_glob': 'librdkafka.redist*'}, 'build/native/bin/v120/x64/Release/libzstd.dll', 'runtimes/win-x64/native/libzstd.dll'],
# matches librdkafka.{VER}.nupkg
[{'arch': 'x64', 'plat': 'win', 'fname_glob': 'librdkafka*', 'fname_excludes': ['redist', 'symbols']},
'build/native/lib/v120/x64/Release/librdkafka.lib', 'build/native/lib/win/x64/win-x64-Release/v120/librdkafka.lib'],
[{'arch': 'x64', 'plat': 'win', 'fname_glob': 'librdkafka*', 'fname_excludes': ['redist', 'symbols']},
'build/native/lib/v120/x64/Release/librdkafkacpp.lib', 'build/native/lib/win/x64/win-x64-Release/v120/librdkafkacpp.lib'],
[{'arch': 'x86', 'plat': 'win', 'fname_glob': 'msvcr120.zip'}, 'msvcr120.dll', 'runtimes/win-x86/native/msvcr120.dll'],
[{'arch': 'x86', 'plat': 'win', 'fname_glob': 'msvcr120.zip'}, 'msvcp120.dll', 'runtimes/win-x86/native/msvcp120.dll'],
# matches librdkafka.redist.{VER}.nupkg
[{'arch': 'x86', 'plat': 'win', 'fname_glob': 'librdkafka.redist*'}, 'build/native/bin/v120/Win32/Release/librdkafka.dll', 'runtimes/win-x86/native/librdkafka.dll'],
[{'arch': 'x86', 'plat': 'win', 'fname_glob': 'librdkafka.redist*'}, 'build/native/bin/v120/Win32/Release/librdkafkacpp.dll', 'runtimes/win-x86/native/librdkafkacpp.dll'],
[{'arch': 'x86', 'plat': 'win', 'fname_glob': 'librdkafka.redist*'}, 'build/native/bin/v120/Win32/Release/zlib.dll', 'runtimes/win-x86/native/zlib.dll'],
[{'arch': 'x86', 'plat': 'win', 'fname_glob': 'librdkafka.redist*'}, 'build/native/bin/v120/Win32/Release/libzstd.dll', 'runtimes/win-x86/native/libzstd.dll'],
# matches librdkafka.{VER}.nupkg
[{'arch': 'x86', 'plat': 'win', 'fname_glob': 'librdkafka*', 'fname_excludes': ['redist', 'symbols']},
'build/native/lib/v120/Win32/Release/librdkafka.lib', 'build/native/lib/win/x86/win-x86-Release/v120/librdkafka.lib'],
[{'arch': 'x86', 'plat': 'win', 'fname_glob': 'librdkafka*', 'fname_excludes': ['redist', 'symbols']},
'build/native/lib/v120/Win32/Release/librdkafkacpp.lib', 'build/native/lib/win/x86/win-x86-Release/v120/librdkafkacpp.lib']
]
for m in mappings:
attributes = m[0]
fname_glob = attributes['fname_glob']
del attributes['fname_glob']
fname_excludes = []
if 'fname_excludes' in attributes:
fname_excludes = attributes['fname_excludes']
del attributes['fname_excludes']
artifact = None
for a in self.arts.artifacts:
found = True
for attr in attributes:
if a.info[attr] != attributes[attr]:
found = False
break
if not fnmatch(a.fname, fname_glob):
found = False
for exclude in fname_excludes:
if exclude in a.fname:
found = False
break
if found:
artifact = a
break
if artifact is None:
raise Exception('unable to find artifact with tags %s matching "%s"' % (str(attributes), fname_glob))
outf = os.path.join(self.stpath, m[2])
member = m[1]
try:
zfile.ZFile.extract(artifact.lpath, member, outf)
except KeyError as e:
raise Exception('file not found in archive %s: %s. Files in archive are: %s' % (artifact.lpath, e, zfile.ZFile(artifact.lpath).getnames()))
print('Tree extracted to %s' % self.stpath)
# After creating a bare-bone nupkg layout containing the artifacts
# and some spec and props files, call the 'nuget' utility to
# make a proper nupkg of it (with all the metadata files).
subprocess.check_call("./nuget.sh pack %s -BasePath '%s' -NonInteractive" % \
(os.path.join(self.stpath, 'librdkafka.redist.nuspec'),
self.stpath), shell=True)
return 'librdkafka.redist.%s.nupkg' % vless_version
def verify (self, path):
""" Verify package """
expect = [
"librdkafka.redist.nuspec",
"LICENSES.txt",
"build/librdkafka.redist.props",
"build/native/librdkafka.redist.targets",
"build/native/include/librdkafka/rdkafka.h",
"build/native/include/librdkafka/rdkafkacpp.h",
"build/native/lib/win/x64/win-x64-Release/v120/librdkafka.lib",
"build/native/lib/win/x64/win-x64-Release/v120/librdkafkacpp.lib",
"build/native/lib/win/x86/win-x86-Release/v120/librdkafka.lib",
"build/native/lib/win/x86/win-x86-Release/v120/librdkafkacpp.lib",
"runtimes/linux-x64/native/centos7-librdkafka.so",
"runtimes/linux-x64/native/debian9-librdkafka.so",
"runtimes/linux-x64/native/alpine-librdkafka.so",
"runtimes/linux-x64/native/librdkafka.so",
"runtimes/osx-x64/native/librdkafka.dylib",
"runtimes/win-x64/native/librdkafka.dll",
"runtimes/win-x64/native/librdkafkacpp.dll",
"runtimes/win-x64/native/msvcr120.dll",
"runtimes/win-x64/native/msvcp120.dll",
"runtimes/win-x64/native/zlib.dll",
"runtimes/win-x64/native/libzstd.dll",
"runtimes/win-x86/native/librdkafka.dll",
"runtimes/win-x86/native/librdkafkacpp.dll",
"runtimes/win-x86/native/msvcr120.dll",
"runtimes/win-x86/native/msvcp120.dll",
"runtimes/win-x86/native/zlib.dll",
"runtimes/win-x86/native/libzstd.dll"]
missing = list()
with zfile.ZFile(path, 'r') as zf:
print('Verifying %s:' % path)
# Zipfiles may url-encode filenames, unquote them before matching.
pkgd = [urllib.unquote(x) for x in zf.getnames()]
missing = [x for x in expect if x not in pkgd]
if len(missing) > 0:
print('Missing files in package %s:\n%s' % (path, '\n'.join(missing)))
return False
else:
print('OK - %d expected files found' % len(expect))
return True
|
LiberatorUSA/GUCEF
|
dependencies/librdkafka/packaging/nuget/packaging.py
|
Python
|
apache-2.0
| 19,011
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
service_available_group = cfg.OptGroup(name="service_available",
title="Available OpenStack Services")
ServiceAvailableGroup = [
cfg.BoolOpt("senlin",
default=True,
help="Whether or not senlin is expected to be available"),
]
clustering_group = cfg.OptGroup(name="clustering",
title="Clustering Service Options")
ClusteringGroup = [
cfg.StrOpt("catalog_type",
default="clustering",
help="Catalog type of the clustering service."),
cfg.IntOpt("wait_timeout",
default=60,
help="Waiting time for a specific status, in seconds.")
]
|
tengqm/senlin-container
|
senlin/tests/tempest_tests/config.py
|
Python
|
apache-2.0
| 1,284
|
## TRACK FIXER##
# @file fixTracks.py
# @brief Tries to fix gaps in tracks, both small and large (very slow with higher settings)
## For MCEdit / Python 3
## Alpha ver. 0.2.4
## by Garrett Martin (GDroidbot)
## Changelog:
#0.2: Added optional trackbed laying
#0.2.4: Fixed bugs about trackbed
# Based off of SethBling's filters.
# Feel free to modify and use this filter however you wish. If you do,
# please give credit to SethBling.
# http://youtube.com/SethBling
from pymclevel import TAG_Compound
from pymclevel import TAG_Int
from pymclevel import TAG_Short
from pymclevel import TAG_Byte
from pymclevel import TAG_String
railBlocks = (27, 28, 66, 157)
transparentBlocks = [0,6, 8, 9, 10, 11, 18, 20, 26, 27, 28, 29, 30, 31, 32, 33, 34, 36, 37, 38, 39, 40, 44, 46, 50, 51, 52, 53, 54, 55, 59, 60, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 75, 76, 77, 78, 79, 81, 83, 85, 89, 90, 92, 93, 94, 95, 96, 97, 101, 102, 104, 105, 106, 107, 108, 109, 111, 113, 114, 115, 116, 117, 118, 119, 120, 122, 126, 127]
inputs = (
("For fixing or creating tracks (Like ones made by mineshafts)", "label"),
("Distance to check", (2, 1, 16)),
("Aggressive Mode", False),
("Fill in track bed?", False)
# 'Aggressive Mode' fills in larger gaps in tracks when run just once (To save a step)
)
displayName = "Fix broken tracks"
global trackBed
def perform(level, box, options):
distance = options["Distance to check"]
aggressive = options["Aggressive Mode"]
#global trackBed
trackBed = options["Fill in track bed?"]
if distance < 1 or distance > 8:
print("The distance " , distance, "may compute slowly, or even crash MCEdit.")
for x in xrange(box.minx, box.maxx):
for y in xrange(box.miny, box.maxy):
for z in xrange(box.minz, box.maxz):
if level.blockAt(x, y, z) == 66:
fixTracks(level, x, y, z, distance)
if aggressive:
fixTracks(level, x, y, z, distance-1)
continue
continue
def fixTracks(level, x, y, z, distance): # X+ = EAST X- = WEST Z+ = SOUTH Z- = NORTH
if distance < 1:
return
bData = level.blockDataAt(x, y, z)
global bedBlock
bedBlock = level.blockAt(x, y-1, z)
# East-West flat
if bData == 1:
directionalCheck(level, x, y, z, xdir = 1, r = distance)
return
# North-South flat rail
elif bData == 0:
directionalCheck(level, x, y, z, zdir = 1, r = distance)
return
# Up to the East
elif bData == 2: # Only check west
directionalCheck(level, x, y, z, -1, 0, 1)
return
# Up to West
elif bData == 3: # Only check east
directionalCheck(level, x, y, z, 1, 0, 1)
return
# Up to the North
elif bData == 4:
directionalCheck(level, x, y, z, 0, 1, 1)
return
# Up to the South
elif bData == 5:
directionalCheck(level, x, y, z, 0, -1, 1)
return
else:
isCornerTrack(level, x, y, z, distance, bData )
chunk = level.getChunk(x / 16, z / 16)
chunk.dirty = True
def isCornerTrack(level, x, y, z, distance = 1, data = 0):
# data = level.blockDataAt(x, y, z) ## redundant
print("Corner Data: ", data)
if data==6: # East and South corner
directionalCheck(level, x, y, z, 1, 1, distance)
elif data==7: # West and South corner
directionalCheck(level, x, y, z, -1, 1, distance)
elif data==8: # West and North corner
directionalCheck(level, x, y, z, -1, -1, distance)
elif data==9: # East and North corner
directionalCheck(level, x, y, z, 1, -1, distance)
return
else:
return False
def directionalCheck(level,x, y, z, xdir = None, zdir = None, r = 1):
i = r
global trackBed
if xdir == 1 and zdir is None: # Special case: flat track east-west
#for i in xrange(1, r, 2):
if level.blockAt(x+1+i, y, z) in railBlocks:
print("Laying track at:", x+i, y, z)
level.setBlockAt(x+i, y, z, 66)
level.setBlockDataAt(x+i, y, z, 1)
if trackBed:
layTrackBed(level, x+i, y-1, z)
if level.blockAt(x-1-i, y, z) in railBlocks:
print("Laying track at:", x-i, y, z)
level.setBlockAt(x-i, y, z, 66)
level.setBlockDataAt(x-i, y, z, 1)
if trackBed:
layTrackBed(level, x-i, y-1, z)
return
if xdir is None and zdir == 1: # Special case: flat track north-south
#for i in xrange(1, r, 2):
if level.blockAt(x, y, z+1+i) in railBlocks:
print("Laying track at:", x, y, z+i)
level.setBlockAt(x, y, z+i,66)
level.setBlockDataAt(x, y, z+i, 0)
if trackBed:
layTrackBed(level, x, y-1, z+i)
if level.blockAt(x, y, z-1-i) in railBlocks:
print("Laying track at:", x, y, z-i)
level.setBlockAt(x, y, z-i,66)
level.setBlockDataAt(x, y, z-i, 0)
if trackBed:
layTrackBed(level, x, y-1, z-i)
return
if abs(xdir) == 1 and abs(zdir) == 1:
#for i in reverse(xrange(1, r)):
if level.blockAt((xdir * (i + 1)) + x, y, z) in railBlocks:
level.setBlockAt((xdir * i) + x, y, z,66)
level.setBlockDataAt((xdir * i) + x, y,z, 1)
if trackBed:
layTrackBed(level, (xdir *i) + x, y-1, z)
if abs(zdir) == 1 and abs(xdir) == 1:
#for i in reverse(xrange(1, r)):
if level.blockAt(x, y, (zdir * (i + 1)) + z) in railBlocks:
level.setBlockAt(x, y, (zdir*i) + z,66)
level.setBlockDataAt(x, y, (zdir * i) + z, 0)
if trackBed:
layTrackBed(level, x, y-1, (zdir * i) + z)
return
def layTrackBed(level, x, y, z, ablock = None):
beforeBlock = level.blockAt(x, y, z)
block = bedBlock
if beforeBlock in transparentBlocks:
if block not in transparentBlocks:
level.setBlockAt(x, y, z, block)
|
GMart/GMart.github.io
|
fixTracks0.2.4.py
|
Python
|
gpl-2.0
| 5,705
|
#!/usr/bin/python2.4
#
# Copyright 2010 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This code is not supported by Google
#
"""A connector that crawls a public SMB share.
This connector requires smbcrawler, which is available at
https://raw.githubusercontent.com/google/gsa-admin-toolkit/master/smbcrawler.py
"""
__author__ = 'jonathanho@google.com (Jonathan Ho)'
import mimetypes
import os
import subprocess
import tempfile
import smbcrawler
import connector
class SMBConnector(connector.TimedConnector):
"""A connector that crawls an SMB share.
Pseudocode:
- Get a list of documents in the SMB share using smbcrawler
- For each document:
- Download it with smbclient to a temporary file
- Read the file's contents to memory
- Send the file's contents to the GSA as a content feed.
The mimetypes of a file in the SMB share is inferred from its name.
"""
CONNECTOR_TYPE = 'smb-connector'
CONNECTOR_CONFIG = {
'share': { 'type': 'text', 'label': 'SMB share' },
'delay': { 'type': 'text', 'label': 'Fetch delay' }
}
def init(self):
self.setInterval(int(self.getConfigParam('delay')))
self.share = self.getConfigParam('share')
# smbcrawler doesn't work unless the share ends in a slash
if self.share[-1] != '/':
self.share += '/'
self.smbconfig = smbcrawler.Config(['', self.share])
def run(self):
# fetch all the document URLs with smbcrawler
output = smbcrawler.Crawl(self.smbconfig)
# now download each file individually with smbclient into a temporary file,
# then send the file content as a content feed to the GSA
feed = connector.Feed('incremental')
devnull = open(os.devnull, 'w')
for url, doc in output.urls_map.iteritems():
if not doc.IsFile():
continue
filename = doc.filename[1:] # strip out initial slash
mimetype = mimetypes.guess_type(url)[0] or 'application/octet-stream'
# download the file to a temporary place, and read out its contents
tmp = tempfile.NamedTemporaryFile()
subprocess.call(['smbclient', self.share, '-N', '-c',
'get %s %s' % (filename, tmp.name)],
stdout=devnull, stderr=devnull)
tmp.seek(0)
filedata = tmp.read()
tmp.close()
feed.addRecord(url=url, action='add', mimetype=mimetype, content=filedata)
devnull.close()
self.pushFeed(feed)
|
IevgenPr/gsa-admin-toolkit
|
connectormanager/smb_connector.py
|
Python
|
apache-2.0
| 2,948
|
from django.contrib.sites.models import Site
from django.db.utils import IntegrityError
from django.test import TestCase
from django.template import Context, Template
from simpleblocks.models import SimpleBlock
def render_to_string(template, data):
t = Template(template)
c = Context(data)
return t.render(c)
class SimpleBlocksTest(TestCase):
def setUp(self):
"""Actions to be executed before each test"""
self.body = 'Test Body'
self.site = Site.objects.get_current()
self.template = '{% load simpleblocks_tags %}{% get_block "test" %}'
self.data = {}
def tearDown(self):
"""Actions to be executed after each test"""
SimpleBlock.objects.all().delete()
def create_block(self, key='test'):
"""Helper to create block"""
data = {'body': self.body,
'key': key,
'site': self.site}
return SimpleBlock.objects.create(**data)
def testCreateBlock(self):
"""Test block creation"""
data = {'body': self.body,
'key': 'test',
'site': self.site}
block = SimpleBlock.objects.create(**data)
assert block, 'Failed to create block'
def testRenderedStatic(self):
"""Test the tag with a static key"""
self.create_block()
rendered = render_to_string(self.template, self.data)
self.assertEquals(rendered, self.body)
def testRenderedVariable(self):
"""Test the tag with a variable key"""
self.create_block()
data = {'test_variable': 'test'}
template = '{% load simpleblocks_tags %}{% get_block test_variable %}'
rendered = render_to_string(template, data)
self.assertEquals(rendered, self.body)
def testFailedDuplicated(self):
"""Test failure upon duplicated key and site"""
self.create_block()
with self.assertRaises(IntegrityError):
self.create_block()
|
alfredo/django-simpleblocks
|
src/simpleblocks/tests.py
|
Python
|
bsd-3-clause
| 1,975
|
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests For PickledScheduler.
"""
import datetime
import six
from cinder.openstack.common import jsonutils
from cinder.scheduler import scheduler_options
from cinder import test
class FakeSchedulerOptions(scheduler_options.SchedulerOptions):
def __init__(self, last_checked, now, file_old, file_now, data, filedata):
super(FakeSchedulerOptions, self).__init__()
# Change internals ...
self.last_modified = file_old
self.last_checked = last_checked
self.data = data
# For overrides ...
self._time_now = now
self._file_now = file_now
self._file_data = filedata
self.file_was_loaded = False
def _get_file_timestamp(self, filename):
return self._file_now
def _get_file_handle(self, filename):
self.file_was_loaded = True
return six.StringIO(self._file_data)
def _get_time_now(self):
return self._time_now
class SchedulerOptionsTestCase(test.TestCase):
def test_get_configuration_first_time_no_flag(self):
last_checked = None
now = datetime.datetime(2012, 1, 1, 1, 1, 1)
file_old = None
file_now = datetime.datetime(2012, 1, 1, 1, 1, 1)
data = dict(a=1, b=2, c=3)
jdata = jsonutils.dumps(data)
fake = FakeSchedulerOptions(last_checked, now, file_old, file_now,
{}, jdata)
self.assertEqual({}, fake.get_configuration())
self.assertFalse(fake.file_was_loaded)
def test_get_configuration_first_time_empty_file(self):
last_checked = None
now = datetime.datetime(2012, 1, 1, 1, 1, 1)
file_old = None
file_now = datetime.datetime(2012, 1, 1, 1, 1, 1)
jdata = ""
fake = FakeSchedulerOptions(last_checked, now, file_old, file_now,
{}, jdata)
self.assertEqual({}, fake.get_configuration('foo.json'))
self.assertTrue(fake.file_was_loaded)
def test_get_configuration_first_time_happy_day(self):
last_checked = None
now = datetime.datetime(2012, 1, 1, 1, 1, 1)
file_old = None
file_now = datetime.datetime(2012, 1, 1, 1, 1, 1)
data = dict(a=1, b=2, c=3)
jdata = jsonutils.dumps(data)
fake = FakeSchedulerOptions(last_checked, now, file_old, file_now,
{}, jdata)
self.assertEqual(data, fake.get_configuration('foo.json'))
self.assertTrue(fake.file_was_loaded)
def test_get_configuration_second_time_no_change(self):
last_checked = datetime.datetime(2011, 1, 1, 1, 1, 1)
now = datetime.datetime(2012, 1, 1, 1, 1, 1)
file_old = datetime.datetime(2012, 1, 1, 1, 1, 1)
file_now = datetime.datetime(2012, 1, 1, 1, 1, 1)
data = dict(a=1, b=2, c=3)
jdata = jsonutils.dumps(data)
fake = FakeSchedulerOptions(last_checked, now, file_old, file_now,
data, jdata)
self.assertEqual(data, fake.get_configuration('foo.json'))
self.assertFalse(fake.file_was_loaded)
def test_get_configuration_second_time_too_fast(self):
last_checked = datetime.datetime(2011, 1, 1, 1, 1, 1)
now = datetime.datetime(2011, 1, 1, 1, 1, 2)
file_old = datetime.datetime(2012, 1, 1, 1, 1, 1)
file_now = datetime.datetime(2013, 1, 1, 1, 1, 1)
old_data = dict(a=1, b=2, c=3)
data = dict(a=11, b=12, c=13)
jdata = jsonutils.dumps(data)
fake = FakeSchedulerOptions(last_checked, now, file_old, file_now,
old_data, jdata)
self.assertEqual(old_data, fake.get_configuration('foo.json'))
self.assertFalse(fake.file_was_loaded)
def test_get_configuration_second_time_change(self):
last_checked = datetime.datetime(2011, 1, 1, 1, 1, 1)
now = datetime.datetime(2012, 1, 1, 1, 1, 1)
file_old = datetime.datetime(2012, 1, 1, 1, 1, 1)
file_now = datetime.datetime(2013, 1, 1, 1, 1, 1)
old_data = dict(a=1, b=2, c=3)
data = dict(a=11, b=12, c=13)
jdata = jsonutils.dumps(data)
fake = FakeSchedulerOptions(last_checked, now, file_old, file_now,
old_data, jdata)
self.assertEqual(data, fake.get_configuration('foo.json'))
self.assertTrue(fake.file_was_loaded)
|
Thingee/cinder
|
cinder/tests/scheduler/test_scheduler_options.py
|
Python
|
apache-2.0
| 5,061
|
# (c) 2015, Andrew Gaffney <andrew@agaffney.org>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.plugins.callback.default import CallbackModule as CallbackModule_default
class CallbackModule(CallbackModule_default):
CALLBACK_VERSION = 2.0
CALLBACK_TYPE = 'stdout'
CALLBACK_NAME = 'actionable'
def __init__(self):
self.super_ref = super(CallbackModule, self)
self.super_ref.__init__()
self.last_task = None
self.shown_title = False
def v2_playbook_on_task_start(self, task, is_conditional):
self.last_task = task
self.shown_title = False
def display_task_banner(self):
if not self.shown_title:
self.super_ref.v2_playbook_on_task_start(self.last_task, None)
self.shown_title = True
def v2_runner_on_failed(self, result, ignore_errors=False):
self.display_task_banner()
self.super_ref.v2_runner_on_failed(result, ignore_errors)
def v2_runner_on_ok(self, result):
if result._result.get('changed', False):
self.display_task_banner()
self.super_ref.v2_runner_on_ok(result)
def v2_runner_on_unreachable(self, result):
self.display_task_banner()
self.super_ref.v2_runner_on_unreachable(result)
def v2_runner_on_skipped(self, result):
pass
def v2_playbook_on_include(self, included_file):
pass
def v2_runner_item_on_ok(self, result):
if result._result.get('changed', False):
self.display_task_banner()
self.super_ref.v2_runner_item_on_ok(result)
def v2_runner_item_on_skipped(self, result):
pass
def v2_runner_item_on_failed(self, result):
self.display_task_banner()
self.super_ref.v2_runner_item_on_failed(result)
|
hfinucane/ansible
|
lib/ansible/plugins/callback/actionable.py
|
Python
|
gpl-3.0
| 2,532
|
from setuptools import setup, find_packages
import os
version = '1.3.0'
setup(name='collective.webservicespfgadapter',
version=version,
description="PloneFormGen adapter that sends the form submission \
to a web service",
long_description=open("README.md").read() + "\n" +
open("CHANGES.txt").read(),
# Get more strings from
# http://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
"Framework :: Plone",
"Programming Language :: Python",
],
keywords='Zope CMF Plone Web Service PloneFormGen forms integration',
author='Paul Rentschler',
author_email='par117@psu.edu',
url='',
license='GPL',
packages=find_packages(exclude=['ez_setup']),
namespace_packages=['collective'],
include_package_data=True,
zip_safe=False,
install_requires=[
'setuptools',
'plone.api>=1.5',
'Products.PloneFormGen>=1.7.0',
'requests',
],
entry_points="""
# -*- Entry points: -*-
[z3c.autoinclude.plugin]
target = plone
""",
)
|
PSUEducationalEquity/collective.webservicespfgadapter
|
setup.py
|
Python
|
bsd-3-clause
| 1,153
|
from givens_step import *
from ortho_basis import *
try:
from test import *
except ImportError:
pass
|
apatil/covariance-prior
|
cov_prior/__init__.py
|
Python
|
mit
| 108
|
#!/usr/bin/env python
import os
import shutil
import glob
import time
import sys
import subprocess
import string
from optparse import OptionParser, make_option
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
PKG_NAME = os.path.basename(SCRIPT_DIR)
PARAMETERS = None
#XW_ENV = "export DBUS_SESSION_BUS_ADDRESS=unix:path=/run/user/5000/dbus/user_bus_socket"
SRC_DIR = "/home/app/content"
PKG_SRC_DIR = "%s/tct/opt/%s" % (SRC_DIR, PKG_NAME)
def doCMD(cmd):
# Do not need handle timeout in this short script, let tool do it
print "-->> \"%s\"" % cmd
output = []
cmd_return_code = 1
cmd_proc = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True)
while True:
output_line = cmd_proc.stdout.readline().strip("\r\n")
cmd_return_code = cmd_proc.poll()
if output_line == '' and cmd_return_code != None:
break
sys.stdout.write("%s\n" % output_line)
sys.stdout.flush()
output.append(output_line)
return (cmd_return_code, output)
def updateCMD(cmd=None):
if "pkgcmd" in cmd:
cmd = "su - %s -c '%s;%s'" % (PARAMETERS.user, XW_ENV, cmd)
return cmd
def getUSERID():
if PARAMETERS.mode == "SDB":
cmd = "sdb -s %s shell id -u %s" % (
PARAMETERS.device, PARAMETERS.user)
else:
cmd = "ssh %s \"id -u %s\"" % (
PARAMETERS.device, PARAMETERS.user )
return doCMD(cmd)
def getPKGID(pkg_name=None):
if PARAMETERS.mode == "SDB":
cmd = "sdb -s %s shell %s" % (
PARAMETERS.device, updateCMD('pkgcmd -l'))
else:
cmd = "ssh %s \"%s\"" % (
PARAMETERS.device, updateCMD('pkgcmd -l'))
(return_code, output) = doCMD(cmd)
if return_code != 0:
return None
test_pkg_id = None
for line in output:
pkg_infos = line.split()
if len(pkg_infos) == 4:
continue
name = pkg_infos[5]
name = name.lstrip('[').rstrip(']')
print "name is: %s" % name
if pkg_name == name:
test_pkg_id = pkg_infos[3]
test_pkg_id = test_pkg_id.lstrip('[').rstrip(']')
print test_pkg_id
break
return test_pkg_id
def doRemoteCMD(cmd=None):
if PARAMETERS.mode == "SDB":
cmd = "sdb -s %s shell %s" % (PARAMETERS.device, updateCMD(cmd))
else:
cmd = "ssh %s \"%s\"" % (PARAMETERS.device, updateCMD(cmd))
return doCMD(cmd)
def doRemoteCopy(src=None, dest=None):
if PARAMETERS.mode == "SDB":
cmd_prefix = "sdb -s %s push" % PARAMETERS.device
cmd = "%s %s %s" % (cmd_prefix, src, dest)
else:
cmd = "scp -r %s %s:/%s" % (src, PARAMETERS.device, dest)
(return_code, output) = doCMD(cmd)
doRemoteCMD("sync")
if return_code != 0:
return True
else:
return False
def uninstPKGs():
action_status = True
for root, dirs, files in os.walk(SCRIPT_DIR):
for file in files:
if file.endswith(".wgt"):
pkg_id = getPKGID(os.path.basename(os.path.splitext(file)[0]))
if not pkg_id:
action_status = False
continue
(return_code, output) = doRemoteCMD(
"pkgcmd -u -t wgt -q -n %s" % pkg_id)
for line in output:
if "Failure" in line:
action_status = False
break
(return_code, output) = doRemoteCMD(
"rm -rf %s" % PKG_SRC_DIR)
if return_code != 0:
action_status = False
return action_status
def instPKGs():
action_status = True
(return_code, output) = doRemoteCMD(
"mkdir -p %s" % PKG_SRC_DIR)
if return_code != 0:
action_status = False
for root, dirs, files in os.walk(SCRIPT_DIR):
for file in files:
if file.endswith("%s.wgt" % PKG_NAME):
if not doRemoteCopy(os.path.join(root, file), "%s/%s" % (SRC_DIR, file)):
action_status = False
(return_code, output) = doRemoteCMD(
"pkgcmd -i -t wgt -q -p %s/%s" % (SRC_DIR, file))
doRemoteCMD("rm -rf %s/%s" % (SRC_DIR, file))
for line in output:
if "Failure" in line:
action_status = False
break
for item in glob.glob("%s/*" % SCRIPT_DIR):
if item.endswith(".wgt"):
continue
elif item.endswith("inst.py"):
continue
else:
if not doRemoteCopy(item, PKG_SRC_DIR):
action_status = False
return action_status
def main():
try:
usage = "usage: inst.py -i"
opts_parser = OptionParser(usage=usage)
opts_parser.add_option(
"-m", dest="mode", action="store", help="Specify mode")
opts_parser.add_option(
"-s", dest="device", action="store", help="Specify device")
opts_parser.add_option(
"-i", dest="binstpkg", action="store_true", help="Install package")
opts_parser.add_option(
"-u", dest="buninstpkg", action="store_true", help="Uninstall package")
opts_parser.add_option(
"-a", dest="user", action="store", help="User name")
global PARAMETERS
(PARAMETERS, args) = opts_parser.parse_args()
except Exception, e:
print "Got wrong option: %s, exit ..." % e
sys.exit(1)
if not PARAMETERS.user:
PARAMETERS.user = "app"
if not PARAMETERS.mode:
PARAMETERS.mode = "SDB"
if PARAMETERS.mode == "SDB":
if not PARAMETERS.device:
(return_code, output) = doCMD("sdb devices")
for line in output:
if str.find(line, "\tdevice") != -1:
PARAMETERS.device = line.split("\t")[0]
break
else:
PARAMETERS.mode = "SSH"
if not PARAMETERS.device:
print "No device provided"
sys.exit(1)
user_info = getUSERID()
re_code = user_info[0]
if re_code == 0 :
global XW_ENV
userid = user_info[1][0]
XW_ENV = "export DBUS_SESSION_BUS_ADDRESS=unix:path=/run/user/%s/dbus/user_bus_socket"%str(userid)
else:
print "[Error] cmd commands error : %s"%str(user_info[1])
sys.exit(1)
if PARAMETERS.binstpkg and PARAMETERS.buninstpkg:
print "-i and -u are conflict"
sys.exit(1)
if PARAMETERS.buninstpkg:
if not uninstPKGs():
sys.exit(1)
else:
if not instPKGs():
sys.exit(1)
if __name__ == "__main__":
main()
sys.exit(0)
|
yugang/crosswalk-test-suite
|
webapi/tct-widgetpolicy-w3c-tests/inst.wgt.py
|
Python
|
bsd-3-clause
| 6,719
|
import numpy as n
import scipy.interpolate
import scipy.ndimage
def congrid(a, newdims, method='linear', centre=False, minusone=False):
'''Arbitrary resampling of source array to new dimension sizes.
Currently only supports maintaining the same number of dimensions.
To use 1-D arrays, first promote them to shape (x,1).
Uses the same parameters and creates the same co-ordinate lookup points
as IDL''s congrid routine, which apparently originally came from a VAX/VMS
routine of the same name.
method:
neighbour - closest value from original data
nearest and linear - uses n x 1-D interpolations using
scipy.interpolate.interp1d
(see Numerical Recipes for validity of use of n 1-D interpolations)
spline - uses ndimage.map_coordinates
centre:
True - interpolation points are at the centres of the bins
False - points are at the front edge of the bin
minusone:
For example- inarray.shape = (i,j) & new dimensions = (x,y)
False - inarray is resampled by factors of (i/x) * (j/y)
True - inarray is resampled by(i-1)/(x-1) * (j-1)/(y-1)
This prevents extrapolation one element beyond bounds of input array.
'''
if not a.dtype in [n.float64, n.float32]:
a = n.cast[float](a)
m1 = n.cast[int](minusone)
ofs = n.cast[int](centre) * 0.5
old = n.array( a.shape )
ndims = len( a.shape )
if len( newdims ) != ndims:
print("[congrid] dimensions error. ")# \
#"This routine currently only support " \
#"rebinning to the same number of dimensions."
return None
newdims = n.asarray( newdims, dtype=float )
dimlist = []
if method == 'neighbour':
for i in range( ndims ):
base = n.indices(newdims)[i]
dimlist.append( (old[i] - m1) / (newdims[i] - m1) \
* (base + ofs) - ofs )
cd = n.array( dimlist ).round().astype(int)
newa = a[list( cd )]
return newa
elif method in ['nearest','linear']:
# calculate new dims
for i in range( ndims ):
base = n.arange( newdims[i] )
dimlist.append( (old[i] - m1) / (newdims[i] - m1) \
* (base + ofs) - ofs )
# specify old dims
olddims = [n.arange(i, dtype = n.float) for i in list( a.shape )]
# first interpolation - for ndims = any
mint = scipy.interpolate.interp1d( olddims[-1], a, kind=method, bounds_error=False, fill_value=0.0 )
newa = mint( dimlist[-1] )
trorder = [ndims - 1] + list(range( ndims - 1 ))
for i in range( ndims - 2, -1, -1 ):
newa = newa.transpose( trorder )
mint = scipy.interpolate.interp1d( olddims[i], newa, kind=method , bounds_error=False, fill_value=0.0 )
newa = mint( dimlist[i] )
if ndims > 1:
# need one more transpose to return to original dimensions
newa = newa.transpose( trorder )
return newa
elif method in ['spline']:
oslices = [ slice(0,j) for j in old ]
oldcoords = n.ogrid[oslices]
nslices = [ slice(0,j) for j in list(newdims) ]
newcoords = n.mgrid[nslices]
newcoords_dims = range(n.rank(newcoords))
#make first index last
newcoords_dims.append(newcoords_dims.pop(0))
newcoords_tr = newcoords.transpose(newcoords_dims)
# makes a view that affects newcoords
newcoords_tr += ofs
deltas = (n.asarray(old) - m1) / (newdims - m1)
newcoords_tr *= deltas
newcoords_tr -= ofs
newa = scipy.ndimage.map_coordinates(a, newcoords)
return newa
else:
print("Congrid error: Unrecognized interpolation type.\n")#, \
#"Currently only \'neighbour\', \'nearest\',\'linear\',", \
#"and \'spline\' are supported."
return None
|
gsnyder206/synthetic-image-morph
|
congrid.py
|
Python
|
gpl-2.0
| 3,937
|
import boto3
DEFAULTS = {
"region_name": "ap-southeast-2" # Sydney
}
def status_code(ret):
return ret["ResponseMetadata"]["HTTPStatusCode"]
class Rekognition():
def __init__(self, profile, region=DEFAULTS["region_name"]):
self.profile = profile
self.region_name = region
self.client = self.get_client()
def get_client(self):
session = boto3.Session(profile_name=self.profile)
client = session.client("rekognition", region_name=self.region_name)
return client
def list_collections(self):
ret = self.client.list_collections()
if status_code(ret) == 200:
print("list_collections: {}".format(ret["CollectionIds"]))
return ret["CollectionIds"]
print(status_code(ret))
return []
def collection_exist(self, collection_id):
return collection_id in self.list_collections()
def list_faces(self, collection_id):
ret = self.client.list_faces(CollectionId=collection_id)
if status_code(ret) == 200:
for face in ret["Faces"]:
print("FaceId: {}".format(face["FaceId"]))
print("ImageId: {}".format(face["ImageId"]))
print("ExternalImageId: {}".format(face["ExternalImageId"]))
print("Confidence: {}".format(face["Confidence"]))
return ret["Faces"]
print(status_code(ret))
return []
def create_collection(self, collection_id):
try:
ret = self.client.create_collection(CollectionId=collection_id)
print(ret)
return True
except Exception as e:
print(e)
return False
def delete_collection(self, collection_id):
try:
ret = self.client.delete_collection(CollectionId=collection_id)
print(ret)
return True
except Exception as e:
print(e)
return False
def index_faces(self, image_file, external_image_id, collection_id):
with open(image_file, "rb") as image:
ret = self.client.index_faces(
CollectionId=collection_id,
Image={"Bytes": image.read()},
ExternalImageId=external_image_id
)
if status_code(ret) == 200:
for rec in ret["FaceRecords"]:
face = rec["Face"]
print("FaceId: {}".format(face["FaceId"]))
print("ImageId: {}".format(face["ImageId"]))
print("ExternalImageId: {}".format(face["ExternalImageId"]))
print("Confidence: {}".format(face["Confidence"]))
return True
print("Unexpected status code: {}".format(status_code(ret)))
return False
def search_faces_by_image(self, image_file, external_image_id, collection_id):
"""
:param image_file:
:param external_image_id:
:param collection_id:
:return: ExternalImageId if find a match; None if unable to find a match
"""
id = None
similarity = 0.0
with open(image_file, "rb") as image:
print("Searching faces ...")
ret = self.client.search_faces_by_image(
CollectionId=collection_id,
Image={"Bytes": image.read()},
)
print(ret)
if status_code(ret) == 200:
for rec in ret["FaceMatches"]:
if external_image_id is not None and rec["Face"]["ExternalImageId"] != external_image_id:
continue
if rec["Similarity"] > similarity:
id = rec["Face"]["ExternalImageId"]
print("Similarity: {}".format(rec["Similarity"]))
print("FaceId: {}".format(rec["Face"]["FaceId"]))
print("ImageId: {}".format(rec["Face"]["ImageId"]))
print("ExternalImageId: {}".format(rec["Face"]["ExternalImageId"]))
print("Confidence: {}".format(rec["Face"]["Confidence"]))
return id
|
kyhau/reko
|
reko/rekognition.py
|
Python
|
mit
| 4,130
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
DEFAULT_EVENTGRID_SCOPE = "https://eventgrid.azure.net/.default"
EVENTGRID_KEY_HEADER = "aeg-sas-key"
EVENTGRID_TOKEN_HEADER = "aeg-sas-token"
DEFAULT_API_VERSION = "2018-01-01"
SAFE_ENCODE = "~()*!.'"
|
Azure/azure-sdk-for-python
|
sdk/eventgrid/azure-eventgrid/azure/eventgrid/_constants.py
|
Python
|
mit
| 548
|
from django.contrib import admin
from .models import ServiceName, Song, BookName, ParshaName, TorahReading, HaftarahReading, AlternateBookName, AlternateParshaName
# class AuthorAdmin(admin.ModelAdmin):
# list_display = ('first_name', 'last_name', 'email')
class ServiceNameAdmin(admin.ModelAdmin):
list_display = ('name', 'display', 'seq_number')
ordering = ('seq_number',)
admin.site.register(ServiceName, ServiceNameAdmin)
class SongAdmin(admin.ModelAdmin):
list_display = ('name', 'display', 'seq_number', 's3_obj_key', 'file_name')
ordering = ('seq_number',)
list_filter = ('service_name',)
admin.site.register(Song, SongAdmin)
class BookNameAdmin(admin.ModelAdmin):
list_display = ('name', 'display', 'seq_number')
ordering = ('seq_number',)
admin.site.register(BookName, BookNameAdmin)
class ParshaNameAdmin(admin.ModelAdmin):
list_display = ('name', 'display', 'seq_number')
ordering = ('seq_number',)
list_filter = ('book_name',)
admin.site.register(ParshaName, ParshaNameAdmin)
class TorahReadingAdmin(admin.ModelAdmin):
list_display = ('file_name', 'triennial', 'aliyah', 'extension', 'seq_number', 's3_obj_key')
ordering = ('seq_number',)
search_fields = ('file_name', 'triennial', 'aliyah',)
list_filter = ('parsha',)
admin.site.register(TorahReading, TorahReadingAdmin)
class HaftarahReadingAdmin(admin.ModelAdmin):
list_display = ('file_name', 'extension', 'seq_number', 's3_obj_key')
ordering = ('seq_number',)
search_fields = ('file_name',)
admin.site.register(HaftarahReading, HaftarahReadingAdmin)
class AlternateBookNameAdmin(admin.ModelAdmin):
list_display = ('book_name', 'alternate_name')
ordering = ('book_name', )
admin.site.register(AlternateBookName, AlternateBookNameAdmin)
class AlternateParshaNameAdmin(admin.ModelAdmin):
list_display = ('parsha_name', 'alternate_name')
ordering = ('parsha_name', )
admin.site.register(AlternateParshaName, AlternateParshaNameAdmin)
|
brentd-smith/smolkinsite
|
songs/admin.py
|
Python
|
gpl-3.0
| 2,014
|
#!/usr/bin/env python
import setuptools
if __name__ == "__main__":
setuptools.setup(name="pyrcrack")
|
XayOn/pyrcrack
|
setup.py
|
Python
|
isc
| 107
|
# -*- coding: utf-8 -*-
"""
test_utils
----------
Tests for `feedsqueeze.utils` module.
"""
import sys
if sys.version_info[:2] < (2, 7):
import unittest2 as unittest
else:
import unittest
from feedsqueeze import utils
from feedsqueeze.exceptions import FeedFileDoesNotExist
class TestUtils(unittest.TestCase):
def test_get_feed_list_from_file_missing(self):
"""
Test that `exceptions.FeedFileDoesNotExist` is thrown when given a file
that does not exist.
"""
self.assertRaises(FeedFileDoesNotExist, utils.get_feed_list_from_file,
'tests/test-utils/this-does-not-exist.txt')
def test_get_feed_list_from_file(self):
"""
Test that given a valid file, a list is returned.
"""
self.assertEqual(
utils.get_feed_list_from_file('tests/test-utils/feed_list.txt'),
['feed1', 'feed2', 'feed3'])
def test_render_template(self):
"""
Test that a properly interpolated string is returned.
"""
self.assertEqual(
utils.render_template('template.txt', {'bar':'quux'},
('tests', 'test-utils')), 'fooquux')
if __name__ == '__main__':
unittest.main()
|
jonyamo/feedsqueeze
|
tests/test_utils.py
|
Python
|
bsd-3-clause
| 1,263
|
##############################################################################
#
# Copyright (c) 2001, 2002 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
##############################################################################
"""ZODB-defined exceptions
$Id$"""
import sys
from ZODB.utils import oid_repr, readable_tid_repr
# BBB: We moved the two transactions to the transaction package
from transaction.interfaces import TransactionError, TransactionFailedError
import transaction.interfaces
def _fmt_undo(oid, reason):
s = reason and (": %s" % reason) or ""
return "Undo error %s%s" % (oid_repr(oid), s)
def _recon(class_, state):
err = class_.__new__(class_)
err.__setstate__(state)
return err
_recon.__no_side_effects__ = True
class POSError(Exception):
"""Persistent object system error."""
if sys.version_info[:2] == (2, 6):
# The 'message' attribute was deprecated for BaseException with
# Python 2.6; here we create descriptor properties to continue using it
def __set_message(self, v):
self.__dict__['message'] = v
def __get_message(self):
return self.__dict__['message']
def __del_message(self):
del self.__dict__['message']
message = property(__get_message, __set_message, __del_message)
if sys.version_info[:2] >= (2, 5):
def __reduce__(self):
# Copy extra data from internal structures
state = self.__dict__.copy()
if sys.version_info[:2] == (2, 5):
state['message'] = self.message
state['args'] = self.args
return (_recon, (self.__class__, state))
def __setstate__(self, state):
# PyPy doesn't store the 'args' attribute in an instance's
# __dict__; instead, it uses what amounts to a slot. Because
# we customize the pickled representation to just be a dictionary,
# the args would then get lost, leading to unprintable exceptions
# and worse. Manually assign to args from the state to be sure
# this doesn't happen.
super(POSError,self).__setstate__(state)
self.args = state['args']
class POSKeyError(POSError, KeyError):
"""Key not found in database."""
def __str__(self):
return oid_repr(self.args[0])
class ConflictError(POSError, transaction.interfaces.TransientError):
"""Two transactions tried to modify the same object at once.
This transaction should be resubmitted.
Instance attributes:
oid : string
the OID (8-byte packed string) of the object in conflict
class_name : string
the fully-qualified name of that object's class
message : string
a human-readable explanation of the error
serials : (string, string)
a pair of 8-byte packed strings; these are the serial numbers
related to conflict. The first is the revision of object that
is in conflict, the currently committed serial. The second is
the revision the current transaction read when it started.
data : string
The database record that failed to commit, used to put the
class name in the error message.
The caller should pass either object or oid as a keyword argument,
but not both of them. If object is passed, it should be a
persistent object with an _p_oid attribute.
"""
def __init__(self, message=None, object=None, oid=None, serials=None,
data=None):
if message is None:
self.message = "database conflict error"
else:
self.message = message
if object is None:
self.oid = None
self.class_name = None
else:
self.oid = object._p_oid
klass = object.__class__
self.class_name = klass.__module__ + "." + klass.__name__
if oid is not None:
assert self.oid is None
self.oid = oid
if data is not None:
# avoid circular import chain
from ZODB.utils import get_pickle_metadata
self.class_name = '.'.join(get_pickle_metadata(data))
self.serials = serials
def __str__(self):
extras = []
if self.oid:
extras.append("oid %s" % oid_repr(self.oid))
if self.class_name:
extras.append("class %s" % self.class_name)
if self.serials:
current, old = self.serials
extras.append("serial this txn started with %s" %
readable_tid_repr(old))
extras.append("serial currently committed %s" %
readable_tid_repr(current))
if extras:
return "%s (%s)" % (self.message, ", ".join(extras))
else:
return self.message
def get_oid(self):
return self.oid
def get_class_name(self):
return self.class_name
def get_old_serial(self):
return self.serials[1]
def get_new_serial(self):
return self.serials[0]
def get_serials(self):
return self.serials
class ReadConflictError(ConflictError):
"""Conflict detected when object was loaded.
An attempt was made to read an object that has changed in another
transaction (eg. another thread or process).
"""
def __init__(self, message=None, object=None, serials=None, **kw):
if message is None:
message = "database read conflict error"
ConflictError.__init__(self, message=message, object=object,
serials=serials, **kw)
class BTreesConflictError(ConflictError):
"""A special subclass for BTrees conflict errors."""
msgs = [# 0; i2 or i3 bucket split; positions are all -1
'Conflicting bucket split',
# 1; keys the same, but i2 and i3 values differ, and both values
# differ from i1's value
'Conflicting changes',
# 2; i1's value changed in i2, but key+value deleted in i3
'Conflicting delete and change',
# 3; i1's value changed in i3, but key+value deleted in i2
'Conflicting delete and change',
# 4; i1 and i2 both added the same key, or both deleted the
# same key
'Conflicting inserts or deletes',
# 5; i2 and i3 both deleted the same key
'Conflicting deletes',
# 6; i2 and i3 both added the same key
'Conflicting inserts',
# 7; i2 and i3 both deleted the same key, or i2 changed the value
# associated with a key and i3 deleted that key
'Conflicting deletes, or delete and change',
# 8; i2 and i3 both deleted the same key, or i3 changed the value
# associated with a key and i2 deleted that key
'Conflicting deletes, or delete and change',
# 9; i2 and i3 both deleted the same key
'Conflicting deletes',
# 10; i2 and i3 deleted all the keys, and didn't insert any,
# leaving an empty bucket; conflict resolution doesn't have
# enough info to unlink an empty bucket from its containing
# BTree correctly
'Empty bucket from deleting all keys',
# 11; conflicting changes in an internal BTree node
'Conflicting changes in an internal BTree node',
# 12; i2 or i3 was empty
'Empty bucket in a transaction',
# 13; delete of first key, which causes change to parent node
'Delete of first key',
]
def __init__(self, p1, p2, p3, reason):
self.p1 = p1
self.p2 = p2
self.p3 = p3
self.reason = reason
def __repr__(self):
return "BTreesConflictError(%d, %d, %d, %d)" % (self.p1,
self.p2,
self.p3,
self.reason)
def __str__(self):
return "BTrees conflict error at %d/%d/%d: %s" % (
self.p1, self.p2, self.p3, self.msgs[self.reason])
class DanglingReferenceError(POSError, transaction.interfaces.TransactionError):
"""An object has a persistent reference to a missing object.
If an object is stored and it has a reference to another object
that does not exist (for example, it was deleted by pack), this
exception may be raised. Whether a storage supports this feature,
it a quality of implementation issue.
Instance attributes:
referer: oid of the object being written
missing: referenced oid that does not have a corresponding object
"""
def __init__(self, Aoid, Boid):
self.referer = Aoid
self.missing = Boid
def __str__(self):
return "from %s to %s" % (oid_repr(self.referer),
oid_repr(self.missing))
############################################################################
# Only used in storages; versions are no longer supported.
class VersionError(POSError):
"""An error in handling versions occurred."""
class VersionCommitError(VersionError):
"""An invalid combination of versions was used in a version commit."""
class VersionLockError(VersionError, transaction.interfaces.TransactionError):
"""Modification to an object modified in an unsaved version.
An attempt was made to modify an object that has been modified in an
unsaved version.
"""
############################################################################
class UndoError(POSError):
"""An attempt was made to undo a non-undoable transaction."""
def __init__(self, reason, oid=None):
self._reason = reason
self._oid = oid
def __str__(self):
return _fmt_undo(self._oid, self._reason)
class MultipleUndoErrors(UndoError):
"""Several undo errors occurred during a single transaction."""
def __init__(self, errs):
# provide a reason and oid for clients that only look at that
UndoError.__init__(self, *errs[0])
self._errs = errs
def __str__(self):
return "\n".join([_fmt_undo(*pair) for pair in self._errs])
class StorageError(POSError):
"""Base class for storage based exceptions."""
class StorageTransactionError(StorageError):
"""An operation was invoked for an invalid transaction or state."""
class StorageSystemError(StorageError):
"""Panic! Internal storage error!"""
class MountedStorageError(StorageError):
"""Unable to access mounted storage."""
class ReadOnlyError(StorageError):
"""Unable to modify objects in a read-only storage."""
class TransactionTooLargeError(StorageTransactionError):
"""The transaction exhausted some finite storage resource."""
class ExportError(POSError):
"""An export file doesn't have the right format."""
class Unsupported(POSError):
"""A feature was used that is not supported by the storage."""
class ReadOnlyHistoryError(POSError):
"""Unable to add or modify objects in an historical connection."""
class InvalidObjectReference(POSError):
"""An object contains an invalid reference to another object.
An invalid reference may be one of:
o A reference to a wrapped persistent object.
o A reference to an object in a different database connection.
TODO: The exception ought to have a member that is the invalid object.
"""
class ConnectionStateError(POSError):
"""A Connection isn't in the required state for an operation.
o An operation such as a load is attempted on a closed connection.
o An attempt to close a connection is made while the connection is
still joined to a transaction (for example, a transaction is in
progress, with uncommitted modifications in the connection).
"""
|
wunderlins/learning
|
python/zodb/lib/osx/ZODB/POSException.py
|
Python
|
gpl-2.0
| 12,347
|
# coding=utf-8
import numpy as np
from qtpy import QtWidgets, QtCore
# OWN
from dataArtist.widgets.Tool import Tool
class ZoomTo(Tool):
'''
Zoom to a certain area that fulfils a given criterion
'''
icon = 'find.svg'
def __init__(self, imageDisplay):
Tool.__init__(self, imageDisplay)
pa = self.setParameterMenu()
self.pSizeX = pa.addChild({
'name': 'X-size',
'type': 'int',
'value': 300,
'min': 1})
self.pSizeY = pa.addChild({
'name': 'Y-size',
'type': 'int',
'value': 300,
'min': 1})
self.pObject = pa.addChild({
'name': 'Object',
'type': 'list',
'limits': ['current image', 'current color layer']})
self.pCriterion = pa.addChild({
'name': 'Criterion',
'type': 'list',
'limits': ['==', '>', '<']})
self.pValue = pa.addChild({
'name': 'Value',
'type': 'float',
'value': 100})
def activate(self):
obj = self.pObject.value()
w = self.display.widget
if obj == 'current image':
img = w.image
if img.ndim == 3:
img = img[w.currentIndex]
elif obj == 'current color layer':
img = list(w.cItems.values())[0].image
s = 'img %s %s' % (self.pCriterion.value(), self.pValue.value())
indices = eval(s)
self.positions = np.nonzero(indices)
self.n = 0
self.control = _ControlWidget(self.previous, self.__next__)
self.control.show()
def previous(self):
if self.n > 0:
self.n -= 1
self._goToPosition()
def __next__(self):
if self.n < len(self.positions[0]) - 1:
self.n += 1
self._goToPosition()
def _goToPosition(self):
cx = self.positions[0][self.n]
cy = self.positions[1][self.n]
hx = self.pSizeX.value()
hy = self.pSizeY.value()
self.display.widget.view.vb.setRange(
xRange=(cx - 0.5 * hx, cx + 0.5 * hx),
yRange=(cy - 0.5 * hy, cy + 0.5 * hy)
)
def deactivate(self):
self.control.close()
class _ControlWidget(QtWidgets.QWidget):
'''
A draggable control window with:
* Button 'Previous'
* Button 'Next'
to be connected with the given functions
'''
def __init__(self, fnPrevious, fnNext):
QtWidgets.QWidget.__init__(self)
# make frameles:
self.setWindowFlags(QtCore.Qt.FramelessWindowHint |
QtCore.Qt.WindowStaysOnTopHint)
layout = QtWidgets.QHBoxLayout()
self.setLayout(layout)
btn_previous = QtWidgets.QPushButton('Previous')
btn_previous.clicked.connect(fnPrevious)
btn_next = QtWidgets.QPushButton('Next')
btn_next.clicked.connect(fnNext)
layout.addWidget(btn_previous)
layout.addWidget(btn_next)
# make draggable:
def mousePressEvent(self, event):
self.offset = event.pos()
def mouseMoveEvent(self, event):
x = event.globalX()
y = event.globalY()
x_w = self.offset.x()
y_w = self.offset.y()
self.move(x - x_w, y - y_w)
|
radjkarl/dataArtist
|
dataArtist/figures/image/tools/filter/ZoomTo.py
|
Python
|
gpl-3.0
| 3,315
|
#!/usr/bin/python
from distutils.core import setup
setup(
# Basic package information.
name = 'hipchat',
version = '0.0.0',
packages = ['hipchat'],
include_package_data = True,
install_requires = ['httplib2', 'simplejson'],
url = 'https://github.com/alexcchan/hipchat/tree/master',
keywords = 'hipchat api',
description = 'HipChat API v2 Wrapper for Python',
classifiers = [
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Internet'
],
)
|
alexcchan/hipchat
|
setup.py
|
Python
|
mit
| 600
|
"""
Cinder configuration - file ``/etc/cinder/cinder.conf``
=======================================================
The Cinder configuration file is a standard '.ini' file and this parser uses
the ``IniConfigFile`` class to read it.
Sample configuration::
[DEFAULT]
rpc_backend=cinder.openstack.common.rpc.impl_kombu
control_exchange=openstack
osapi_volume_listen=10.22.100.58
osapi_volume_workers=32
api_paste_config=/etc/cinder/api-paste.ini
glance_api_servers=http://10.22.120.50:9292
glance_api_version=2
glance_num_retries=0
glance_api_insecure=False
glance_api_ssl_compression=False
enable_v1_api=True
enable_v2_api=True
storage_availability_zone=nova
default_availability_zone=nova
enabled_backends=tripleo_ceph
nova_catalog_info=compute:Compute Service:publicURL
nova_catalog_admin_info=compute:Compute Service:adminURL
[lvm]
iscsi_helper=lioadm
volume_group=cinder-volumes
iscsi_ip_address=192.168.88.10
volume_driver=cinder.volume.drivers.lvm.LVMVolumeDriver
volumes_dir=/var/lib/cinder/volumes
iscsi_protocol=iscsi
volume_backend_name=lvm
Examples:
>>> conf = shared[CinderConf]
>>> conf.sections()
['DEFAULT', 'lvm']
>>> 'lvm' in conf
True
>>> conf.has_option('DEFAULT', 'enabled_backends')
True
>>> conf.get("DEFAULT", "enabled_backends")
"tripleo_ceph"
>>> conf.get("DEFAULT", "glance_api_ssl_compression")
"False"
>>> conf.getboolean("DEFAULT", "glance_api_ssl_compression")
False
>>> conf.getint("DEFAULT", "glance_aip_version")
2
"""
from .. import parser, IniConfigFile
@parser("cinder.conf")
class CinderConf(IniConfigFile):
"""
Cinder configuration parser class, based on the ``IniConfigFile`` class.
"""
pass
|
PaulWay/insights-core
|
insights/parsers/cinder_conf.py
|
Python
|
apache-2.0
| 1,823
|
import json
from imhotep.testing_utils import fixture_path, Requester
from imhotep.shas import CommitInfo, PRInfo, get_pr_info
# via https://api.github.com/repos/justinabrahms/imhotep/pulls/10
with open(fixture_path('remote_pr.json')) as f:
remote_json_fixture = json.loads(f.read())
# via https://api.github.com/repos/justinabrahms/imhotep/pulls/1
with open(fixture_path('non_remote_pr.json')) as f:
not_remote_json = json.loads(f.read())
remote_pr = PRInfo(remote_json_fixture)
non_remote_pr = PRInfo(not_remote_json)
def test_commit_info():
commit_info = CommitInfo('02c774e4a8d74154468211b14f631748c1d23ef6',
'9216c7b61c6dbf547a22e5a5ad282252acc9735f',
None,
None)
assert commit_info.commit == '02c774e4a8d74154468211b14f631748c1d23ef6'
assert commit_info.origin == '9216c7b61c6dbf547a22e5a5ad282252acc9735f'
assert commit_info.remote_repo is None
def test_pr_info_base_sha():
assert remote_pr.base_sha == '02c774e4a8d74154468211b14f631748c1d23ef6'
def test_pr_info_head_sha():
assert remote_pr.head_sha == '9216c7b61c6dbf547a22e5a5ad282252acc9735f'
def test_pr_info_base_ref():
assert remote_pr.base_ref == 'master'
def test_pr_info_head_ref():
assert remote_pr.head_ref == 'the-cache-option'
def test_pr_info_has_remote_repo():
assert remote_pr.has_remote_repo
def test_pr_info_doesnt_have_remote():
assert not non_remote_pr.has_remote_repo
def test_pr_info_to_commit_info():
commit_info = remote_pr.to_commit_info()
assert commit_info.commit == '02c774e4a8d74154468211b14f631748c1d23ef6'
assert commit_info.origin == '9216c7b61c6dbf547a22e5a5ad282252acc9735f'
assert commit_info.remote_repo.name == 'scottjab'
assert commit_info.remote_repo.url == 'git@github.com:scottjab/imhotep.git'
def test_pr_info_to_commit_info_no_remote():
commit_info = non_remote_pr.to_commit_info()
assert commit_info.remote_repo is None
def test_pr_info_remote_repo():
remote = remote_pr.remote_repo
assert remote.name == 'scottjab'
assert remote.url == 'git@github.com:scottjab/imhotep.git'
def test_pr_info():
r = Requester(remote_json_fixture)
get_pr_info(r, 'justinabrahms/imhotep', 10)
assert r.url == 'https://api.github.com/repos/justinabrahms/imhotep/pulls/10'
|
Appdynamics/imhotep
|
imhotep/shas_test.py
|
Python
|
mit
| 2,368
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Author: Francesco Apruzzese
# Copyright 2015 Apulia Software srl
# Copyright 2015 Lorenzo Battistini - Agile Business Group
#
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
#
##############################################################################
from openerp.tests.common import TransactionCase
class TestDdt(TransactionCase):
def _create_picking(self):
return self.env['stock.picking'].create({
'partner_id': self.partner.id,
'picking_type_id': self.env.ref('stock.picking_type_out').id,
})
def _create_move(self, picking, product, quantity=1.0):
src_location = self.env.ref('stock.stock_location_stock')
dest_location = self.env.ref('stock.stock_location_customers')
return self.env['stock.move'].create({
'name': '/',
'picking_id': picking.id,
'product_id': product.id,
'product_uom_qty': quantity,
'product_uom': product.uom_id.id,
'location_id': src_location.id,
'location_dest_id': dest_location.id,
'partner_id': self.partner.id,
'invoice_state': '2binvoiced',
})
def _create_line(self, preparation, product=None, quantity=0):
return self.env['stock.picking.package.preparation.line'].create({
'name': 'test',
'product_id': product and product.id or False,
'product_uom_qty': quantity,
'product_uom': product and product.uom_id.id or False,
'package_preparation_id': preparation.id,
})
def _create_ddt(self, pickings=None):
values = {
'partner_id': self.partner.id,
'partner_invoice_id': self.partner.id,
'partner_shipping_id': self.partner.id,
'ddt_type_id': self.env.ref('l10n_it_ddt.ddt_type_ddt').id,
'carriage_condition_id': self.env.ref(
'l10n_it_ddt.carriage_condition_PF').id,
'goods_description_id': self.env.ref(
'l10n_it_ddt.goods_description_CAR').id,
'transportation_reason_id': self.env.ref(
'l10n_it_ddt.transportation_reason_VEN').id,
'transportation_method_id': self.env.ref(
'l10n_it_ddt.transportation_method_DES').id,
}
if pickings:
values.update({'picking_ids': [(6, 0, pickings.ids)], })
return self.ddt_model.create(values)
def _create_invoice_wizard(self, ddt_ids=None):
return self.env['ddt.create.invoice'].with_context(
active_ids=ddt_ids or []
).create({
'journal_id': self.env.ref('account.sales_journal').id,
})
def setUp(self):
super(TestDdt, self).setUp()
self.partner = self.env.ref('base.res_partner_2')
self.product1 = self.env.ref('product.product_product_25')
self.product2 = self.env.ref('product.product_product_26')
self.ddt_model = self.env['stock.picking.package.preparation']
self.picking = self._create_picking()
self.move = self._create_move(self.picking, self.product1)
self.ddt = self._create_ddt()
def test_invoice_from_ddt_created_by_picking(self):
# ----- Create a ddt from an existing picking, create invoice and test
# it
self.picking.action_confirm()
self.picking.action_assign()
self.ddt.picking_ids = [(6, 0, [self.picking.id, ])]
self.ddt.action_put_in_pack()
self.ddt.action_done()
wizard = self._create_invoice_wizard([self.ddt.id, ])
invoice_result = wizard.create_invoice()
self.assertTrue(invoice_result.get('res_id', False))
invoice = self.env[
invoice_result.get('res_model', 'account.invoice')
].browse(invoice_result.get('res_id', False))
self.assertEquals(invoice.invoice_line[0].product_id.id,
self.ddt.line_ids[0].product_id.id)
self.assertEquals(invoice.invoice_line[0].quantity,
self.ddt.line_ids[0].product_uom_qty)
def test_invoice_from_ddt_created_by_package_preparation_line(self):
# ----- Create a ddt with a line that create automatically picking,
# create invoice and test it
self._create_line(self.ddt, self.product1, 2.0)
self.ddt.action_put_in_pack()
self.ddt.action_done()
wizard = self._create_invoice_wizard([self.ddt.id, ])
invoice_result = wizard.create_invoice()
self.assertTrue(invoice_result.get('res_id', False))
invoice = self.env[
invoice_result.get('res_model', 'account.invoice')
].browse(invoice_result.get('res_id', False))
self.assertEquals(invoice.invoice_line[0].product_id.id,
self.ddt.line_ids[0].product_id.id)
self.assertEquals(invoice.invoice_line[0].quantity,
self.ddt.line_ids[0].product_uom_qty)
def test_create_ddt_from_picking(self):
self.picking1 = self._create_picking()
self._create_move(self.picking1, self.product1, quantity=2)
self.picking2 = self._create_picking()
self._create_move(self.picking2, self.product2, quantity=3)
wizard = self.env['ddt.from.pickings'].with_context({
'active_ids': [self.picking1.id, self.picking2.id]
}).create({})
res = wizard.create_ddt()
ddt = self.ddt_model.browse(res['res_id'])
self.assertEqual(len(ddt.picking_ids), 2)
self.assertEqual(len(ddt.line_ids), 2)
self.assertTrue(self.picking1 | self.picking2 == ddt.picking_ids)
for line in ddt.line_ids:
if line.product_id == self.product1:
self.assertEqual(line.product_uom_qty, 2)
if line.product_id == self.product2:
self.assertEqual(line.product_uom_qty, 3)
self.picking3 = self._create_picking()
self._create_move(self.picking3, self.product1, quantity=1)
self._create_move(self.picking3, self.product2, quantity=2)
wizard = self.env['add.pickings.to.ddt'].with_context({
'active_ids': [self.picking3.id]
}).create({'ddt_id': ddt.id})
wizard.add_to_ddt()
self.assertEqual(len(ddt.picking_ids), 3)
self.assertEqual(len(ddt.line_ids), 4)
self.assertTrue(
self.picking1 | self.picking2 | self.picking3 == ddt.picking_ids)
for line in ddt.line_ids:
if line.product_id == self.product1:
self.assertTrue(line.product_uom_qty in [1, 2])
if line.product_id == self.product2:
self.assertTrue(line.product_uom_qty in [2, 3])
def test_keep_changed_description(self):
self.picking.action_confirm()
self.picking.action_assign()
self.ddt.picking_ids = [(6, 0, [self.picking.id, ])]
self.ddt.line_ids[0].name = 'Changed for test'
self.ddt.action_put_in_pack()
self.assertEqual(self.ddt.line_ids[0].name, 'Changed for test')
def test_invoice_multi_ddt(self):
picking1 = self._create_picking()
self._create_move(picking1, self.product1, quantity=2)
picking2 = self._create_picking()
self._create_move(picking2, self.product1, quantity=3)
picking1.action_confirm()
picking1.action_assign()
picking2.action_confirm()
picking2.action_assign()
wiz_model = self.env['ddt.from.pickings']
wizard = wiz_model.with_context({
'active_ids': [picking1.id]
}).create({})
res = wizard.create_ddt()
ddt1 = self.ddt_model.browse(res['res_id'])
wizard = wiz_model.with_context({
'active_ids': [picking2.id]
}).create({})
res = wizard.create_ddt()
ddt2 = self.ddt_model.browse(res['res_id'])
ddt1.action_put_in_pack()
ddt1.action_done()
ddt2.action_put_in_pack()
ddt2.action_done()
wizard = self._create_invoice_wizard([ddt1.id, ddt2.id])
invoice_result = wizard.create_invoice()
invoice = self.env['account.invoice'].browse(
invoice_result.get('res_id', False))
self.assertEqual(len(invoice.invoice_line), 2)
for line in invoice.invoice_line:
self.assertEqual(line.product_id.id, self.product1.id)
|
scigghia/l10n-italy
|
l10n_it_ddt/tests/test_ddt.py
|
Python
|
agpl-3.0
| 8,583
|
"""Tasks related to projects, including fetching repository code, cleaning
``conf.py`` files, and rebuilding documentation.
"""
import fnmatch
import os
import shutil
import json
import logging
import socket
import requests
from celery import task
from django.conf import settings
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
from builds.models import Build, Version
from core.utils import send_email
from doc_builder.loader import loading as builder_loading
from doc_builder.base import restoring_chdir
from doc_builder.environments import DockerEnvironment
from projects.exceptions import ProjectImportError
from projects.models import ImportedFile, Project
from projects.utils import run, make_api_version, make_api_project
from projects.constants import LOG_TEMPLATE
from projects import symlinks
from privacy.loader import Syncer
from tastyapi import api, apiv2
from search.parse_json import process_all_json_files
from search.utils import process_mkdocs_json
from restapi.utils import index_search_request
from vcs_support import utils as vcs_support_utils
import tastyapi
try:
from readthedocs.projects.signals import before_vcs, after_vcs, before_build, after_build
except:
from projects.signals import before_vcs, after_vcs, before_build, after_build
log = logging.getLogger(__name__)
HTML_ONLY = getattr(settings, 'HTML_ONLY_PROJECTS', ())
@task(default_retry_delay=7 * 60, max_retries=5)
@restoring_chdir
def update_docs(pk, version_pk=None, build_pk=None, record=True, docker=False,
pdf=True, man=True, epub=True, dash=True,
search=True, force=False, intersphinx=True, localmedia=True,
api=None, basic=False, **kwargs):
"""
The main entry point for updating documentation.
It handles all of the logic around whether a project is imported or we
created it. Then it will build the html docs and other requested parts.
`pk`
Primary key of the project to update
`record`
Whether or not to keep a record of the update in the database. Useful
for preventing changes visible to the end-user when running commands
from the shell, for example.
"""
# Dependency injection to allow for testing
if api is None:
api = tastyapi.api
project_data = api.project(pk).get()
project = make_api_project(project_data)
log.info(LOG_TEMPLATE.format(project=project.slug, version='', msg='Building'))
version = ensure_version(api, project, version_pk)
build = create_build(build_pk)
results = {}
# Build Servery stuff
try:
record_build(api=api, build=build, record=record, results=results, state='cloning')
vcs_results = setup_vcs(version, build, api)
if vcs_results:
results.update(vcs_results)
if project.documentation_type == 'auto':
update_documentation_type(version, api)
if docker or settings.DOCKER_ENABLE:
record_build(api=api, build=build, record=record, results=results, state='building')
docker = DockerEnvironment(version)
build_results = docker.build()
results.update(build_results)
else:
record_build(api=api, build=build, record=record, results=results, state='installing')
setup_results = setup_environment(version)
results.update(setup_results)
record_build(api=api, build=build, record=record, results=results, state='building')
build_results = build_docs(version, force, pdf, man, epub, dash, search, localmedia)
results.update(build_results)
except vcs_support_utils.LockTimeout, e:
results['checkout'] = (423, "", "Version locked, retrying in 5 minutes.")
log.info(LOG_TEMPLATE.format(project=version.project.slug,
version=version.slug, msg="Unable to lock, will retry"))
# http://celery.readthedocs.org/en/3.0/userguide/tasks.html#retrying
# Should completely retry the task for us until max_retries is exceeded
update_docs.retry(exc=e, throw=False)
except ProjectImportError, e:
results['checkout'] = (404, "", 'Failed to import project; skipping build.\n\nError\n-----\n\n%s' % e.message)
# Close out build in finally with error.
pass
except Exception, e:
log.error(LOG_TEMPLATE.format(project=version.project.slug,
version=version.slug, msg="Top-level Build Failure"), exc_info=True)
finally:
record_build(api=api, build=build, record=record, results=results, state='finished')
record_pdf(api=api, record=record, results=results, state='finished', version=version)
log.info(LOG_TEMPLATE.format(project=version.project.slug, version='', msg='Build finished'))
build_id = build.get('id')
# Web Server Tasks
if build_id:
finish_build.delay(
version_pk=version.pk,
build_pk=build_id,
hostname=socket.gethostname(),
html=results.get('html', [404])[0] == 0,
localmedia=results.get('localmedia', [404])[0] == 0,
search=results.get('search', [404])[0] == 0,
pdf=results.get('pdf', [404])[0] == 0,
epub=results.get('epub', [404])[0] == 0,
)
def ensure_version(api, project, version_pk):
"""
Ensure we're using a sane version.
"""
if version_pk:
version_data = api.version(version_pk).get()
else:
version_data = api.version(project.slug).get(slug='latest')['objects'][0]
version = make_api_version(version_data)
return version
def update_documentation_type(version, api):
"""
Automatically determine the doc type for a user.
"""
checkout_path = version.project.checkout_path(version.slug)
os.chdir(checkout_path)
files = run('find .')[1].split('\n')
markdown = sphinx = 0
for filename in files:
if fnmatch.fnmatch(filename, '*.md') or fnmatch.fnmatch(filename, '*.markdown'):
markdown += 1
elif fnmatch.fnmatch(filename, '*.rst'):
sphinx += 1
ret = 'sphinx'
if markdown > sphinx:
ret = 'mkdocs'
project_data = api.project(version.project.pk).get()
project_data['documentation_type'] = ret
api.project(version.project.pk).put(project_data)
version.project.documentation_type = ret
def docker_build(version, pdf=True, man=True, epub=True, dash=True,
search=True, force=False, intersphinx=True, localmedia=True):
"""
The code that executes inside of docker
"""
environment_results = setup_environment(version)
results = build_docs(version=version, force=force, pdf=pdf, man=man,
epub=epub, dash=dash, search=search, localmedia=localmedia)
results.update(environment_results)
return results
def setup_vcs(version, build, api):
"""
Update the checkout of the repo to make sure it's the latest.
This also syncs versions in the DB.
"""
log.info(LOG_TEMPLATE.format(project=version.project.slug,
version=version.slug, msg='Updating docs from VCS'))
try:
update_output = update_imported_docs(version.pk, api)
commit = version.project.vcs_repo(version.slug).commit
if commit:
build['commit'] = commit
except ProjectImportError:
log.error(LOG_TEMPLATE.format(project=version.project.slug, version=version.slug,
msg='Failed to import project; skipping build'), exc_info=True)
raise
return update_output
@task()
def update_imported_docs(version_pk, api=None):
"""
Check out or update the given project's repository.
"""
if api is None:
api = tastyapi.api
version_data = api.version(version_pk).get()
version = make_api_version(version_data)
project = version.project
ret_dict = {}
# Make Dirs
if not os.path.exists(project.doc_path):
os.makedirs(project.doc_path)
if not project.vcs_repo():
raise ProjectImportError(("Repo type '{0}' unknown".format(project.repo_type)))
with project.repo_nonblockinglock(version=version,
max_lock_age=getattr(settings, 'REPO_LOCK_SECONDS', 30)):
before_vcs.send(sender=version)
# Get the actual code on disk
if version:
log.info(
LOG_TEMPLATE.format(
project=project.slug,
version=version.slug,
msg='Checking out version {slug}: {identifier}'.format(
slug=version.slug,
identifier=version.identifier
)
)
)
version_slug = version.slug
version_repo = project.vcs_repo(version_slug)
ret_dict['checkout'] = version_repo.checkout(
version.identifier,
)
else:
# Does this ever get called?
log.info(LOG_TEMPLATE.format(
project=project.slug, version=version.slug, msg='Updating to latest revision'))
version_slug = 'latest'
version_repo = project.vcs_repo(version_slug)
ret_dict['checkout'] = version_repo.update()
after_vcs.send(sender=version)
# Update tags/version
version_post_data = {'repo': version_repo.repo_url}
if version_repo.supports_tags:
version_post_data['tags'] = [
{'identifier': v.identifier,
'verbose_name': v.verbose_name,
} for v in version_repo.tags
]
if version_repo.supports_branches:
version_post_data['branches'] = [
{'identifier': v.identifier,
'verbose_name': v.verbose_name,
} for v in version_repo.branches
]
try:
apiv2.project(project.pk).sync_versions.post(version_post_data)
except Exception, e:
print "Sync Versions Exception: %s" % e.message
return ret_dict
def setup_environment(version):
"""
Build the virtualenv and install the project into it.
Always build projects with a virtualenv.
"""
ret_dict = {}
project = version.project
build_dir = os.path.join(project.venv_path(version=version.slug), 'build')
if os.path.exists(build_dir):
log.info(LOG_TEMPLATE.format(project=project.slug, version=version.slug, msg='Removing existing build dir'))
shutil.rmtree(build_dir)
if project.use_system_packages:
site_packages = '--system-site-packages'
else:
site_packages = '--no-site-packages'
# Here the command has been modified to support different
# interpreters.
ret_dict['venv'] = run(
'{cmd} {site_packages} {path}'.format(
cmd='virtualenv-2.7 -p {interpreter}'.format(
interpreter=project.python_interpreter),
site_packages=site_packages,
path=project.venv_path(version=version.slug)
)
)
# Other code expects sphinx-build to be installed inside the
# virtualenv. Using the -I option makes sure it gets installed
# even if it is already installed system-wide (and
# --system-site-packages is used)
if project.use_system_packages:
ignore_option = '-I'
else:
ignore_option = ''
wheeldir = os.path.join(settings.SITE_ROOT, 'deploy', 'wheels')
ret_dict['doc_builder'] = run(
(
'{cmd} install --use-wheel --find-links={wheeldir} -U {ignore_option} '
'sphinx==1.2.2 virtualenv==1.10.1 setuptools==1.1 docutils==0.11 readthedocs-sphinx-ext==0.4.4 mkdocs==0.11.1 mock==1.0.1 pillow==2.6.1'
).format(
cmd=project.venv_bin(version=version.slug, bin='pip'),
ignore_option=ignore_option,
wheeldir=wheeldir,
)
)
# Handle requirements
requirements_file_path = project.requirements_file
checkout_path = project.checkout_path(version.slug)
if not requirements_file_path:
docs_dir = builder_loading.get(project.documentation_type)(version).docs_dir()
for path in [docs_dir, '']:
for req_file in ['pip_requirements.txt', 'requirements.txt']:
test_path = os.path.join(checkout_path, path, req_file)
print('Testing %s' % test_path)
if os.path.exists(test_path):
requirements_file_path = test_path
break
if requirements_file_path:
os.chdir(checkout_path)
ret_dict['requirements'] = run(
'{cmd} install --exists-action=w -r {requirements}'.format(
cmd=project.venv_bin(version=version.slug, bin='pip'),
requirements=requirements_file_path))
# Handle setup.py
os.chdir(project.checkout_path(version.slug))
if os.path.isfile("setup.py"):
if getattr(settings, 'USE_PIP_INSTALL', False):
ret_dict['install'] = run(
'{cmd} install --ignore-installed .'.format(
cmd=project.venv_bin(version=version.slug, bin='pip')))
else:
ret_dict['install'] = run(
'{cmd} setup.py install --force'.format(
cmd=project.venv_bin(version=version.slug,
bin='python')))
else:
ret_dict['install'] = (999, "", "No setup.py, skipping install")
return ret_dict
@task()
def build_docs(version, force, pdf, man, epub, dash, search, localmedia):
"""
This handles the actual building of the documentation
"""
project = version.project
results = {}
before_build.send(sender=version)
with project.repo_nonblockinglock(version=version,
max_lock_age=getattr(settings, 'REPO_LOCK_SECONDS', 30)):
html_builder = builder_loading.get(project.documentation_type)(version)
if force:
html_builder.force()
html_builder.append_conf()
results['html'] = html_builder.build()
if results['html'][0] == 0:
html_builder.move()
# Gracefully attempt to move files via task on web workers.
try:
move_files.delay(
version_pk=version.pk,
html=True,
hostname=socket.gethostname(),
)
except socket.error:
pass
fake_results = (999, "Project Skipped, Didn't build",
"Project Skipped, Didn't build")
if 'mkdocs' in project.documentation_type:
if search:
try:
search_builder = builder_loading.get('mkdocs_json')(version)
results['search'] = search_builder.build()
if results['search'][0] == 0:
search_builder.move()
except:
log.error(LOG_TEMPLATE.format(
project=project.slug, version=version.slug, msg="JSON Build Error"), exc_info=True)
if 'sphinx' in project.documentation_type:
# Search builder. Creates JSON from docs and sends it to the
# server.
if search:
try:
search_builder = builder_loading.get(
'sphinx_search')(version)
results['search'] = search_builder.build()
if results['search'][0] == 0:
# Copy json for safe keeping
search_builder.move()
except:
log.error(LOG_TEMPLATE.format(
project=project.slug, version=version.slug, msg="JSON Build Error"), exc_info=True)
# Local media builder for singlepage HTML download archive
if localmedia:
try:
localmedia_builder = builder_loading.get(
'sphinx_singlehtmllocalmedia')(version)
results['localmedia'] = localmedia_builder.build()
if results['localmedia'][0] == 0:
localmedia_builder.move()
except:
log.error(LOG_TEMPLATE.format(
project=project.slug, version=version.slug, msg="Local Media HTML Build Error"), exc_info=True)
# Optional build steps
if version.project.slug not in HTML_ONLY and not project.skip:
if pdf:
pdf_builder = builder_loading.get('sphinx_pdf')(version)
results['pdf'] = pdf_builder.build()
# Always move pdf results even when there's an error.
# if pdf_results[0] == 0:
pdf_builder.move()
else:
results['pdf'] = fake_results
if epub:
epub_builder = builder_loading.get('sphinx_epub')(version)
results['epub'] = epub_builder.build()
if results['epub'][0] == 0:
epub_builder.move()
else:
results['epub'] = fake_results
after_build.send(sender=version)
return results
def create_build(build_pk):
"""
Old placeholder for build creation. Now it just gets it from the database.
"""
if build_pk:
build = api.build(build_pk).get()
for key in ['project', 'version', 'resource_uri', 'absolute_uri']:
if key in build:
del build[key]
else:
build = {}
return build
def record_build(api, record, build, results, state):
"""
Record a build by hitting the API.
Returns nothing
"""
if not record:
return None
setup_steps = ['checkout', 'venv', 'doc_builder', 'requirements', 'install']
output_steps = ['html']
all_steps = setup_steps + output_steps
build['state'] = state
if 'html' in results:
build['success'] = results['html'][0] == 0
else:
build['success'] = False
# Set global state
# for step in all_steps:
# if results.get(step, False):
# if results.get(step)[0] != 0:
# results['success'] = False
build['exit_code'] = max([results.get(step, [0])[0] for step in all_steps])
build['setup'] = build['setup_error'] = ""
build['output'] = build['error'] = ""
for step in setup_steps:
if step in results:
build['setup'] += "\n\n%s\n-----\n\n" % step
build['setup'] += results.get(step)[1]
build['setup_error'] += "\n\n%s\n-----\n\n" % step
build['setup_error'] += results.get(step)[2]
for step in output_steps:
if step in results:
build['output'] += "\n\n%s\n-----\n\n" % step
build['output'] += results.get(step)[1]
build['error'] += "\n\n%s\n-----\n\n" % step
build['error'] += results.get(step)[2]
# Attempt to stop unicode errors on build reporting
for key, val in build.items():
if isinstance(val, basestring):
build[key] = val.decode('utf-8', 'ignore')
try:
api.build(build['id']).put(build)
except Exception:
log.error("Unable to post a new build", exc_info=True)
def record_pdf(api, record, results, state, version):
if not record or 'sphinx' not in version.project.documentation_type:
return None
try:
if 'pdf' in results:
pdf_exit = results['pdf'][0]
pdf_success = pdf_exit == 0
pdf_output = results['pdf'][1]
pdf_error = results['pdf'][2]
else:
pdf_exit = 999
pdf_success = False
pdf_output = pdf_error = "PDF Failed"
pdf_output = pdf_output.decode('utf-8', 'ignore')
pdf_error = pdf_error.decode('utf-8', 'ignore')
api.build.post(dict(
state=state,
project='/api/v1/project/%s/' % version.project.pk,
version='/api/v1/version/%s/' % version.pk,
success=pdf_success,
type='pdf',
output=pdf_output,
error=pdf_error,
exit_code=pdf_exit,
))
except Exception:
log.error(LOG_TEMPLATE.format(project=version.project.slug,
version=version.slug, msg="Unable to post a new build"), exc_info=True)
###########
# Web tasks
###########
@task(queue='web')
def finish_build(version_pk, build_pk, hostname=None, html=False, localmedia=False, search=False, pdf=False, epub=False):
"""
Build Finished, do house keeping bits
"""
version = Version.objects.get(pk=version_pk)
build = Build.objects.get(pk=build_pk)
if html:
version.active = True
version.built = True
version.save()
move_files(
version_pk=version_pk,
hostname=hostname,
html=html,
localmedia=localmedia,
search=search,
pdf=pdf,
epub=epub,
)
symlinks.symlink_cnames(version)
symlinks.symlink_translations(version)
symlinks.symlink_subprojects(version)
if version.project.single_version:
symlinks.symlink_single_version(version)
else:
symlinks.remove_symlink_single_version(version)
# Delayed tasks
update_static_metadata.delay(version.project.pk)
fileify.delay(version.pk, commit=build.commit)
update_search.delay(version.pk, commit=build.commit)
if not html and version.slug != 'stable' and build.exit_code != 423:
send_notifications.delay(version.pk, build_pk=build.pk)
@task(queue='web')
def move_files(version_pk, hostname, html=False, localmedia=False, search=False, pdf=False, epub=False):
version = Version.objects.get(pk=version_pk)
if html:
from_path = version.project.artifact_path(version=version.slug, type=version.project.documentation_type)
target = version.project.rtd_build_path(version.slug)
Syncer.copy(from_path, target, host=hostname)
if 'sphinx' in version.project.documentation_type:
if localmedia:
from_path = version.project.artifact_path(version=version.slug, type='sphinx_localmedia')
to_path = version.project.get_production_media_path(type='htmlzip', version_slug=version.slug, include_file=False)
Syncer.copy(from_path, to_path, host=hostname)
if search:
from_path = version.project.artifact_path(version=version.slug, type='sphinx_search')
to_path = version.project.get_production_media_path(type='json', version_slug=version.slug, include_file=False)
Syncer.copy(from_path, to_path, host=hostname)
# Always move PDF's because the return code lies.
if pdf:
from_path = version.project.artifact_path(version=version.slug, type='sphinx_pdf')
to_path = version.project.get_production_media_path(type='pdf', version_slug=version.slug, include_file=False)
Syncer.copy(from_path, to_path, host=hostname)
if epub:
from_path = version.project.artifact_path(version=version.slug, type='sphinx_epub')
to_path = version.project.get_production_media_path(type='epub', version_slug=version.slug, include_file=False)
Syncer.copy(from_path, to_path, host=hostname)
if 'mkdocs' in version.project.documentation_type:
if search:
from_path = version.project.artifact_path(version=version.slug, type='mkdocs_json')
to_path = version.project.get_production_media_path(type='json', version_slug=version.slug, include_file=False)
Syncer.copy(from_path, to_path, host=hostname)
@task(queue='web')
def update_search(version_pk, commit):
version = Version.objects.get(pk=version_pk)
if 'sphinx' in version.project.documentation_type:
page_list = process_all_json_files(version, build_dir=False)
if 'mkdocs' in version.project.documentation_type:
page_list = process_mkdocs_json(version, build_dir=False)
log_msg = ' '.join([page['path'] for page in page_list])
log.info("(Search Index) Sending Data: %s [%s]" % (version.project.slug, log_msg))
index_search_request(version=version, page_list=page_list, commit=commit)
@task(queue='web')
def fileify(version_pk, commit):
"""
Create ImportedFile objects for all of a version's files.
This is a prereq for indexing the docs for search.
It also causes celery-haystack to kick off an index of the file.
"""
version = Version.objects.get(pk=version_pk)
project = version.project
path = project.rtd_build_path(version.slug)
if path:
log.info(LOG_TEMPLATE.format(
project=project.slug, version=version.slug, msg='Creating ImportedFiles'))
for root, dirnames, filenames in os.walk(path):
for filename in filenames:
if fnmatch.fnmatch(filename, '*.html'):
dirpath = os.path.join(root.replace(path, '').lstrip('/'),
filename.lstrip('/'))
obj, created = ImportedFile.objects.get_or_create(
project=project,
version=version,
path=dirpath,
name=filename,
commit=commit,
)
if not created:
obj.save()
# Delete ImportedFiles from previous versions
ImportedFile.objects.filter(project=project, version=version).exclude(commit=commit).delete()
else:
log.info(LOG_TEMPLATE.format(project=project.slug, version=version.slug, msg='No ImportedFile files'))
@task(queue='web')
def send_notifications(version_pk, build_pk):
version = Version.objects.get(pk=version_pk)
build = Build.objects.get(pk=build_pk)
for hook in version.project.webhook_notifications.all():
webhook_notification(version, build, hook.url)
for email in version.project.emailhook_notifications.all().values_list('email', flat=True):
email_notification(version, build, email)
def email_notification(version, build, email):
log.debug(LOG_TEMPLATE.format(project=version.project.slug, version=version.slug,
msg='sending email to: %s' % email))
context = {'version': version,
'project': version.project,
'build': build,
'build_url': 'https://{0}{1}'.format(
getattr(settings, 'PRODUCTION_DOMAIN', 'readthedocs.org'),
build.get_absolute_url()),
'unsub_url': 'https://{0}{1}'.format(
getattr(settings, 'PRODUCTION_DOMAIN', 'readthedocs.org'),
reverse('projects_notifications', args=[version.project.slug])),
}
if build.commit:
title = _('Failed: {project.name} ({commit})').format(commit=build.commit[:8], **context)
else:
title = _('Failed: {project.name} ({version.verbose_name})').format(**context)
send_email(
email,
title,
template='projects/email/build_failed.txt',
template_html='projects/email/build_failed.html',
context=context
)
def webhook_notification(version, build, hook_url):
data = json.dumps({
'name': project.name,
'slug': project.slug,
'build': {
'id': build.id,
'success': build.success,
'date': build.date.strftime('%Y-%m-%d %H:%M:%S'),
}
})
log.debug(LOG_TEMPLATE.format(project=project.slug, version='', msg='sending notification to: %s' % hook_url))
requests.post(hook_url, data=data)
@task(queue='web')
def update_static_metadata(project_pk):
"""Update static metadata JSON file
Metadata settings include the following project settings:
version
The default version for the project, default: `latest`
language
The default language for the project, default: `en`
languages
List of languages built by linked translation projects.
"""
project = Project.objects.get(pk=project_pk)
log.info(LOG_TEMPLATE.format(
project=project.slug,
version='',
msg='Updating static metadata',
))
translations = [trans.language for trans in project.translations.all()]
languages = set(translations)
# Convert to JSON safe types
metadata = {
'version': project.default_version,
'language': project.language,
'languages': list(languages),
'single_version': project.single_version,
}
try:
path = project.static_metadata_path()
fh = open(path, 'w')
json.dump(metadata, fh)
fh.close()
Syncer.copy(path, path, host=socket.gethostname(), file=True)
except (AttributeError, IOError) as e:
log.debug(LOG_TEMPLATE.format(
project=project.slug,
version='',
msg='Cannot write to metadata.json: {0}'.format(e)
))
##############
# Random Tasks
##############
@task()
def remove_dir(path):
"""
Remove a directory on the build/celery server.
This is mainly a wrapper around shutil.rmtree so that app servers
can kill things on the build server.
"""
log.info("Removing %s" % path)
shutil.rmtree(path)
# @task()
# def update_config_from_json(version_pk):
# """
# Check out or update the given project's repository.
# """
# Remove circular import
# from projects.forms import ImportProjectForm
# version_data = api.version(version_pk).get()
# version = make_api_version(version_data)
# project = version.project
# log.debug(LOG_TEMPLATE.format(project=project.slug, version=version.slug, msg="Checking for json config"))
# try:
# rtd_json = open(os.path.join(
# project.checkout_path(version.slug),
# '.rtd.json'
# ))
# json_obj = json.load(rtd_json)
# for key in json_obj.keys():
# Treat the defined fields on the Import form as
# the canonical list of allowed user editable fields.
# This is in essense just another UI for that form.
# if key not in ImportProjectForm._meta.fields:
# del json_obj[key]
# except IOError:
# log.debug(LOG_TEMPLATE.format(project=project.slug, version=version.slug, msg="No rtd.json found."))
# return None
# project_data = api.project(project.pk).get()
# project_data.update(json_obj)
# api.project(project.pk).put(project_data)
# log.debug(LOG_TEMPLATE.format(project=project.slug, version=version.slug, msg="Updated from JSON."))
# def update_state(version):
# """
# Keep state between the repo and the database
# """
# log.debug(LOG_TEMPLATE.format(project=version.project.slug, version=version.slug, msg='Setting config values from .rtd.yml'))
# try:
# update_config_from_json(version.pk)
# except Exception, e:
# Never kill the build, but log the error
# log.error(LOG_TEMPLATE.format(project=version.project.slug, version=version.slug, msg='Failure in config parsing code: %s ' % e.message))
# @task()
# def zenircbot_notification(version_id):
# version = version.objects.get(id=version_id)
# message = "build of %s successful" % version
# redis_obj = redis.redis(**settings.redis)
# irc = getattr(settings, 'irc_channel', '#readthedocs-build')
# try:
# redis_obj.publish('out',
# json.dumps({
# 'version': 1,
# 'type': 'privmsg',
# 'data': {
# 'to': irc,
# 'message': message,
# }
# }))
# except redis.connectionerror:
# return
# @task()
# def clear_artifacts(version_pk):
# """ Remove artifacts from the build server. """
# Stop doing this for now as it causes 403s if people build things back to
# back some times because of a race condition
# version_data = api.version(version_pk).get()
# version = make_api_version(version_data)
# run('rm -rf %s' % version.project.full_epub_path(version.slug))
# run('rm -rf %s' % version.project.full_man_path(version.slug))
# run('rm -rf %s' % version.project.full_build_path(version.slug))
# run('rm -rf %s' % version.project.full_latex_path(version.slug))
# @periodic_task(run_every=crontab(hour="*/12", minute="*", day_of_week="*"))
# def update_mirror_docs():
# """
# A periodic task used to update all projects that we mirror.
# """
# record = False
# current = datetime.datetime.now()
# Only record one build a day, at midnight.
# if current.hour == 0 and current.minute == 0:
# record = True
# data = apiv2.project().get(mirror=True, page_size=500)
# for project_data in data['results']:
# p = make_api_project(project_data)
# update_docs(pk=p.pk, record=record)
|
frodopwns/readthedocs.org
|
readthedocs/projects/tasks.py
|
Python
|
mit
| 33,386
|
from app import db
from app.models import ContactGroup, Gender, Contact
import random
from datetime import datetime
def get_random_name(names_list, size=1):
name_lst = [names_list[random.randrange(0, len(names_list))].capitalize() for i in range(0, size)]
return " ".join(name_lst)
try:
db.session.add(ContactGroup(name='Friends'))
db.session.add(ContactGroup(name='Family'))
db.session.add(ContactGroup(name='Work'))
db.session.commit()
except:
db.session.rollback()
try:
db.session.add(Gender(name='Male'))
db.session.add(Gender(name='Female'))
db.session.commit()
except:
db.session.rollback()
f = open('NAMES.DIC', "rb")
names_list = [x.strip() for x in f.readlines()]
f.close()
for i in range(1, 50):
c = Contact()
c.name = get_random_name(names_list, random.randrange(2, 6))
c.address = 'Street ' + names_list[random.randrange(0, len(names_list))]
c.personal_phone = random.randrange(1111111, 9999999)
c.personal_celphone = random.randrange(1111111, 9999999)
c.contact_group_id = random.randrange(1, 4)
c.gender_id = random.randrange(1, 3)
year = random.choice(range(1900, 2012))
month = random.choice(range(1, 12))
day = random.choice(range(1, 28))
c.birthday = datetime(year, month, day)
db.session.add(c)
try:
db.session.commit()
print "inserted", c
except:
db.session.rollback()
|
rpiotti/Flask-AppBuilder
|
examples/user_registration/testdata.py
|
Python
|
bsd-3-clause
| 1,431
|
import os
from django.core.files import File
from django.core.files.base import ContentFile
from django.test import TestCase
from . import models
class TestOverwriteStorage(TestCase):
def setUp(self):
self.text_file1 = "tests/data/text_file1.txt"
self.text_file2 = "tests/data/text_file2.txt"
self.png_file1 = "tests/data/dark_orange.png"
self.png_file2 = "tests/data/dark_green.png"
def get_file_name(self, file_path):
return os.path.basename(file_path)
def get_bogus_file_upload(self, file_path, file_name=None):
if file_name is None:
file_name = os.path.basename(file_path)
return File(open(file_path, "rb"), name=file_name)
def test_foo(self):
foo = models.Foo()
foo.save()
foo.doc = self.get_bogus_file_upload(self.text_file1)
foo.save()
expected_path = \
models.gen_foo_file_path(foo,
self.get_file_name(self.text_file1))
self.assertEqual(foo.doc.name, expected_path)
expected_content = open(self.text_file1, "rb").read()
self.assertEqual(foo.doc.read(), expected_content)
# read in text_file2 but save the name as text_file1
foo.doc = self.get_bogus_file_upload(
self.text_file2,
file_name=self.get_file_name(self.text_file1)
)
foo.save()
# foo.doc should still have now have text_file1's name but be
# clobbered text_file2's content
expected_path = models.gen_foo_file_path(foo,
self.get_file_name(self.text_file1))
self.assertEqual(foo.doc.name, expected_path)
expected_content = open(self.text_file2, "rb").read()
self.assertEqual(foo.doc.read(), expected_content)
def test_bar(self):
bar = models.Bar(doc=self.get_bogus_file_upload(self.text_file1))
bar.save()
expected_path = \
models.gen_bar_file_path(bar,
self.get_file_name(self.text_file1))
self.assertEqual(bar.doc.name, expected_path)
expected_content = open(self.text_file1, "rb").read()
self.assertEqual(bar.doc.read(), expected_content)
# read in text_file2 but save the name as text_file1
bar.doc = self.get_bogus_file_upload(
self.text_file2,
file_name=self.get_file_name(self.text_file1)
)
bar.save()
# foo.doc should still have now have text_file1's name but be
# clobbered text_file2's content
expected_path = models.gen_bar_file_path(bar,
self.get_file_name(self.text_file1))
self.assertEqual(bar.doc.name, expected_path)
expected_content = open(self.text_file2, "rb").read()
self.assertEqual(bar.doc.read(), expected_content)
# print baz_inst.doc.name
def test_baz(self):
baz = models.Baz(doc=self.get_bogus_file_upload(self.text_file1))
baz.save()
expected_path = \
models.gen_baz_file_path(baz,
self.get_file_name(self.text_file1))
self.assertEqual(baz.doc.name, expected_path)
expected_content = open(self.text_file1, "rb").read()
self.assertEqual(baz.doc.read(), expected_content)
# read in text_file2 but save the name as text_file1
baz.doc = self.get_bogus_file_upload(
self.text_file2,
file_name=self.get_file_name(self.text_file1)
)
baz.save()
# foo.doc should still have now have text_file1's name but be
# clobbered text_file2's content
expected_path = models.gen_baz_file_path(baz,
self.get_file_name(self.text_file1))
self.assertEqual(baz.doc.name, expected_path)
expected_content = open(self.text_file2, "rb").read()
self.assertEqual(baz.doc.read(), expected_content)
# print baz_inst.doc.name
def test_boo(self):
boo = models.Boo(doc=self.get_bogus_file_upload(self.text_file1))
boo.save()
expected_path = \
models.boo_storage.gen_file_path(boo,
self.get_file_name(self.text_file1))
self.assertEqual(boo.doc.name, expected_path)
expected_content = open(self.text_file1, "rb").read()
self.assertEqual(boo.doc.read(), expected_content)
# read in text_file2 but save the name as text_file1
boo.doc = self.get_bogus_file_upload(
self.text_file2,
file_name=self.get_file_name(self.text_file1)
)
boo.save()
# foo.doc should still have now have text_file1's name but be
# clobbered text_file2's content
expected_path = models.boo_storage.gen_file_path(boo,
self.get_file_name(self.text_file1))
self.assertEqual(boo.doc.name, expected_path)
expected_content = open(self.text_file2, "rb").read()
self.assertEqual(boo.doc.read(), expected_content)
# print baz_inst.doc.name
def test_quux(self):
quux = models.Quux(image=self.get_bogus_file_upload(self.png_file1))
quux.save()
expected_path = \
models.gen_quux_file_path(quux,
self.get_file_name(self.png_file1))
self.assertEqual(quux.image.name, expected_path)
expected_content = open(self.png_file1, "rb").read()
self.assertEqual(quux.image.read(), expected_content)
# read in png_file2 but save the name as png_file1
quux.image = self.get_bogus_file_upload(
self.png_file2,
file_name=self.get_file_name(self.png_file1)
)
quux.save()
# quux.image should still have now have png_file1's name but be
# clobbered png_file2's content
expected_path = models.gen_quux_file_path(quux,
self.get_file_name(self.png_file1))
self.assertEqual(quux.image.name, expected_path)
expected_content = open(self.png_file2, "rb").read()
self.assertEqual(quux.image.read(), expected_content)
|
ckot/django-overwrite-storage
|
tests/tests.py
|
Python
|
mit
| 6,321
|
from __future__ import absolute_import, print_function, unicode_literals
from builtins import dict, str
from indra.statements import *
from indra.belief import BeliefEngine
from indra.belief import _get_belief_package
ev1 = Evidence(source_api='reach')
ev2 = Evidence(source_api='trips')
ev3 = Evidence(source_api='assertion')
def test_prior_prob_one():
be = BeliefEngine()
prob = 1 - (be.prior_probs['rand']['reach'] +
be.prior_probs['syst']['reach'])
st = Phosphorylation(None, Agent('a'), evidence=[ev1])
assert(st.belief == 1)
be.set_prior_probs([st])
assert(st.belief == prob)
def test_prior_prob_two_same():
be = BeliefEngine()
prob = 1 - (be.prior_probs['rand']['reach']**2 +
be.prior_probs['syst']['reach'])
st = Phosphorylation(None, Agent('a'), evidence=[ev1, ev1])
assert(st.belief == 1)
be.set_prior_probs([st])
assert(st.belief == prob)
def test_prior_prob_two_different():
be = BeliefEngine()
prob = 1 - (be.prior_probs['rand']['reach'] +
be.prior_probs['syst']['reach']) * \
(be.prior_probs['rand']['trips'] +
be.prior_probs['syst']['trips'])
st = Phosphorylation(None, Agent('a'), evidence=[ev1, ev2])
assert(st.belief == 1)
be.set_prior_probs([st])
assert(st.belief == prob)
def test_prior_prob_one_two():
be = BeliefEngine()
prob = 1 - (be.prior_probs['rand']['reach']**2 +
be.prior_probs['syst']['reach']) * \
(be.prior_probs['rand']['trips'] +
be.prior_probs['syst']['trips'])
st = Phosphorylation(None, Agent('a'), evidence=[ev1, ev1, ev2])
assert(st.belief == 1)
be.set_prior_probs([st])
assert(st.belief == prob)
def test_prior_prob_assertion():
be = BeliefEngine()
st = Phosphorylation(None, Agent('a'), evidence=[ev1, ev1, ev2, ev3])
assert(st.belief == 1)
be.set_prior_probs([st])
assert(st.belief == 1)
def test_hierarchy_probs1():
be = BeliefEngine()
st1 = Phosphorylation(None, Agent('a'), evidence=[ev1])
st2 = Phosphorylation(None, Agent('b'), evidence=[ev2])
st2.supports = [st1]
st1.supported_by = [st2]
st1.belief = 0.5
st2.belief = 0.8
be.set_hierarchy_probs([st1, st2])
assert(st1.belief == 0.5)
assert(st2.belief == 0.9)
def test_hierarchy_probs2():
be = BeliefEngine()
st1 = Phosphorylation(None, Agent('a'), evidence=[ev1])
st2 = Phosphorylation(None, Agent('b'), evidence=[ev2])
st3 = Phosphorylation(None, Agent('c'), evidence=[ev3])
st2.supports = [st1]
st3.supports = [st1, st2]
st1.supported_by = [st2, st3]
st2.supported_by = [st3]
st1.belief = 0.5
st2.belief = 0.8
st3.belief = 0.2
be.set_hierarchy_probs([st1, st2, st3])
assert(st1.belief == 0.5)
assert(st2.belief == 0.9)
assert(st3.belief == 0.92)
def test_hierarchy_probs3():
be = BeliefEngine()
st1 = Phosphorylation(None, Agent('a'), evidence=[ev1])
st2 = Phosphorylation(None, Agent('b'), evidence=[ev2])
st3 = Phosphorylation(None, Agent('c'), evidence=[ev3])
st3.supports = [st1, st2]
st1.supported_by = [st3]
st2.supported_by = [st3]
st1.belief = 0.5
st2.belief = 0.8
st3.belief = 0.2
be.set_hierarchy_probs([st1, st2, st3])
assert(st1.belief == 0.5)
assert(st2.belief == 0.8)
assert(st3.belief == 0.92)
def test_hierarchy_probs4():
be = BeliefEngine()
st1 = Phosphorylation(None, Agent('a'), evidence=[ev1])
st2 = Phosphorylation(None, Agent('b'), evidence=[ev2])
st3 = Phosphorylation(None, Agent('c'), evidence=[ev3])
st4 = Phosphorylation(None, Agent('d'), evidence=[ev1])
st4.supports = [st1, st2, st3]
st3.supports = [st1]
st2.supports = [st1]
st1.supported_by = [st2, st3, st4]
st2.supported_by = [st4]
st3.supported_by = [st4]
st1.belief = 0.5
st2.belief = 0.8
st3.belief = 0.2
st4.belief = 0.6
be.set_hierarchy_probs([st1, st2, st3])
assert(st1.belief == 0.5)
assert(st2.belief == 0.9)
assert(st3.belief == 0.6)
assert(st4.belief == 0.968)
def test_get_belief_package1():
st1 = Phosphorylation(None, Agent('a'))
st1.belief = 0.53
package = _get_belief_package(st1)
assert(len(package) == 1)
assert(package[0][0] == 0.53)
assert(package[0][1] == st1.matches_key())
def test_get_belief_package2():
st1 = Phosphorylation(None, Agent('A1'))
st2 = Phosphorylation(None, Agent('A'))
st1.supported_by = [st2]
st2.supports = [st1]
st1.belief = 0.8
st2.belief = 0.6
package = _get_belief_package(st1)
assert(len(package) == 1)
assert(package[0][0] == 0.8)
assert(package[0][1] == st1.matches_key())
package = _get_belief_package(st2)
assert(len(package) == 2)
assert(package[0][0] == 0.8)
assert(package[0][1] == st1.matches_key())
assert(package[1][0] == 0.6)
assert(package[1][1] == st2.matches_key())
def test_get_belief_package3():
st1 = Phosphorylation(Agent('B'), Agent('A1'))
st2 = Phosphorylation(None, Agent('A1'))
st3 = Phosphorylation(None, Agent('A'))
st1.supported_by = [st2, st3]
st2.supported_by = [st3]
st2.supports = [st1]
st3.supports = [st1, st2]
st1.belief = 0.8
st2.belief = 0.6
st3.belief = 0.7
package = _get_belief_package(st1)
assert(len(package) == 1)
assert(set([p[0] for p in package]) == set([0.8]))
package = _get_belief_package(st2)
assert(len(package) == 2)
assert(set([p[0] for p in package]) == set([0.6, 0.8]))
package = _get_belief_package(st3)
assert(len(package) == 3)
assert(set([p[0] for p in package]) == set([0.6, 0.7, 0.8]))
|
jmuhlich/indra
|
indra/tests/test_belief_engine.py
|
Python
|
bsd-2-clause
| 5,746
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Xibo - Digitial Signage - http://www.xibo.org.uk
# Copyright (C) 2009-13 Alex Harrington
#
# This file is part of Xibo.
#
# Xibo is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# any later version.
#
# Xibo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Xibo. If not, see <http://www.gnu.org/licenses/>.
#
from VideoMedia import VideoMedia
from threading import Thread
import os
import urllib
class LocalvideoMedia(VideoMedia):
def add(self):
video = urllib.unquote(self.options['uri'])
if self.parent.numNodes == 1:
if self.config.getboolean('VideoMedia', 'loop'):
loop = '1'
else:
loop = '0'
else:
loop = '0'
tmpXML = str('<video href="%s" id="%s" opacity="0" loop="%s" />' % (video,self.mediaNodeName,loop))
self.p.enqueue('add',(tmpXML,self.regionNodeName))
def requiredFiles(self):
# We have to assume that the video will be where it's supposed to be!
return []
|
xibosignage/xibo-pyclient
|
plugins/media/LocalvideoMedia.py
|
Python
|
agpl-3.0
| 1,501
|
#!/usr/bin/env python
import bohrium as np
from bohrium import visualization
a = np.ones((100,100))
visualization.plot_surface(a)
|
Ektorus/bohrium
|
ve/cpu/tools/vizviz.py
|
Python
|
lgpl-3.0
| 131
|
# -*- coding: utf-8 -*-
#
# This file is part of CERN Analysis Preservation Framework.
# Copyright (C) 2018 CERN.
#
# CERN Analysis Preservation Framework is free software; you can redistribute
# it and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# CERN Analysis Preservation Framework is distributed in the hope that it will
# be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with CERN Analysis Preservation Framework; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""CAP Zenodo service views."""
import requests
from flask import current_app, jsonify
from invenio_files_rest.models import FileInstance, ObjectVersion
from . import blueprint
def _get_zenodo_record(zenodo_id):
"""Get record from zenodo."""
zenodo_server_url = current_app.config.get('ZENODO_SERVER_URL')
url = "{}/records/{}".format(zenodo_server_url, zenodo_id)
params = {"access_token": current_app.config.get('ZENODO_ACCESS_TOKEN')}
resp = requests.get(url, headers={'Content-Type': 'application/json'},
params=params)
return resp.json(), resp.status_code
@blueprint.route('/zenodo/record/<zenodo_id>')
def get_zenodo_record(zenodo_id):
"""Get record from zenodo (route)."""
resp, status = _get_zenodo_record(zenodo_id)
return jsonify(resp), status
@blueprint.route('/zenodo/<bucket_id>/<filename>')
def upload_to_zenodo(bucket_id, filename):
"""Upload code to zenodo."""
zenodo_server_url = current_app.config.get('ZENODO_SERVER_URL')
params = {"access_token": current_app.config.get(
'ZENODO_ACCESS_TOKEN')}
filename = filename + '.tar.gz'
r = requests.post(zenodo_server_url,
params=params, json={},
)
file_obj = ObjectVersion.get(bucket_id, filename)
file = FileInstance.get(file_obj.file_id)
bucket_url = r.json()['links']['bucket']
with open(file.uri, 'rb') as fp:
response = requests.put(
bucket_url + '/{}'.format(filename),
data=fp,
params=params,
)
return jsonify({"status": response.status_code})
|
pamfilos/data.cern.ch
|
cap/modules/services/views/zenodo.py
|
Python
|
gpl-2.0
| 2,719
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
__author__ = 'Fabien Poussin'
__version__ = '0.3'
from xml.etree import ElementTree as etree
from jinja2 import Template
from os.path import expanduser, sep, dirname, abspath
from argparse import ArgumentParser
from traceback import print_exc
import re
import pprint
pretty_print = pprint.PrettyPrinter(indent=2)
def pprint(*kwargs):
pretty_print.pprint(kwargs)
PIN_MODE_INPUT = "PIN_MODE_INPUT({0})"
PIN_MODE_OUTPUT = "PIN_MODE_OUTPUT({0})"
PIN_MODE_ALTERNATE = "PIN_MODE_ALTERNATE({0})"
PIN_MODE_ANALOG = "PIN_MODE_ANALOG({0})"
PIN_ODR_LOW = "PIN_ODR_LOW({0})"
PIN_ODR_HIGH = "PIN_ODR_HIGH({0})"
PIN_OTYPE_PUSHPULL = "PIN_OTYPE_PUSHPULL({0})"
PIN_OTYPE_OPENDRAIN = "PIN_OTYPE_OPENDRAIN({0})"
PIN_OSPEED_VERYLOW = "PIN_OSPEED_VERYLOW({0})"
PIN_OSPEED_LOW = "PIN_OSPEED_LOW({0})"
PIN_OSPEED_MEDIUM = "PIN_OSPEED_MEDIUM({0})"
PIN_OSPEED_HIGH = "PIN_OSPEED_HIGH({0})"
PIN_PUPDR_FLOATING = "PIN_PUPDR_FLOATING({0})"
PIN_PUPDR_PULLUP = "PIN_PUPDR_PULLUP({0})"
PIN_PUPDR_PULLDOWN = "PIN_PUPDR_PULLDOWN({0})"
PIN_AFIO_AF = "PIN_AFIO_AF({0}, {1})"
FMT = '{0}'
FMT_DEF = '({0})'
PIN_CONF_LIST = ['MODER', 'OTYPER', 'OSPEEDR', 'PUPDR', 'ODR']
PIN_CONF_LIST_AF = ['AFRL', 'AFRH']
DEFAULT_PAD = {"SIGNAL": "UNUSED",
"LABEL": "",
"MODER": PIN_MODE_ANALOG,
"OTYPER": PIN_OTYPE_PUSHPULL,
"OSPEEDR": PIN_OSPEED_VERYLOW,
"PUPDR": PIN_PUPDR_FLOATING,
"ODR": PIN_ODR_HIGH}
PIN_MODE_TRANSLATE = {"GPIO_MODE_AF_PP": PIN_MODE_ALTERNATE,
"GPIO_MODE_ANALOG": PIN_MODE_ANALOG,
"GPIO_MODE_INPUT": PIN_MODE_INPUT,
"GPIO_MODE_OUTPUT": PIN_MODE_OUTPUT,
"GPIO_MODE_OUTPUT_PP": PIN_MODE_OUTPUT,
"GPIO_MODE_OUTPUT_OD": PIN_MODE_OUTPUT}
PIN_OTYPE_TRANSLATE = {"GPIO_MODE_OUTPUT_PP": PIN_OTYPE_PUSHPULL,
"GPIO_MODE_OUTPUT_OD": PIN_OTYPE_OPENDRAIN}
PIN_OSPEED_TRANSLATE = {"GPIO_SPEED_FREQ_LOW": PIN_OSPEED_VERYLOW,
"GPIO_SPEED_FREQ_MEDIUM": PIN_OSPEED_LOW,
"GPIO_SPEED_FREQ_HIGH": PIN_OSPEED_MEDIUM,
"GPIO_SPEED_FREQ_VERY_HIGH": PIN_OSPEED_HIGH
}
PIN_PUPDR_TRANSLATE = {"GPIO_NOPULL": PIN_PUPDR_FLOATING,
"GPIO_PULLUP": PIN_PUPDR_PULLUP,
"GPIO_PULLDOWN": PIN_PUPDR_PULLDOWN}
parser = ArgumentParser(description='Generate ChibiOS GPIO header file from STM32CubeMX project files.')
group = parser.add_mutually_exclusive_group(required=False)
group.add_argument('-m', '--mx', default='', type=str, help='STM32CubeMX path. Invalid if -g is used.')
group.add_argument('-g', '--gpio', default='', type=str, help='STM32CubeMX Gpio file, if you don\'t have STM32CubeMX installed. Invalid if -m is used.')
parser.add_argument('-p', '--project', required=True, type=str, help="STM32CubeMX Project file")
parser.add_argument('-o', '--output', default='board_gpio.h', type=str, help='Output file name')
def open_xml(filename):
# Remove namespace
with open(filename, 'r') as xmlfile:
xml = re.sub(' xmlns="[^"]+"', '', xmlfile.read(), count=1)
return etree.fromstring(xml)
def char_range(c1, c2):
"""Generates the characters from `c1` to `c2`, inclusive."""
for c in range(ord(c1), ord(c2)+1):
yield chr(c)
def get_gpio_file(proj_file, mx_path):
mcu_name = None
gpio_file = None
path = None
mcu_info = None
print('Opening ' + proj_file)
with open(proj_file, 'r') as f:
proj_data = f.readlines()
for l in proj_data:
if l.startswith('Mcu.Name'):
print('MCU is ' + l.split('=')[-1].strip())
mcu_name = '{}.xml'.format(l.split('=')[-1].strip())
if not mcu_name:
print('Could not find MCU name in project file')
exit(1)
if "STM32F1" in mcu_name:
print('STM32F1xx are not compatible with this script. (old GPIO)')
exit(1)
found = False
for p in (mx_path,
'{0}{1}STM32CubeMX'.format(expanduser("~"), sep),
'C:{0}Program Files{0}STMicroelectronics{0}STM32Cube{0}STM32CubeMX'.format(sep)):
if not p:
continue
try:
path = '{1}{0}db{0}mcu{0}'.format(sep, p)
mcu_info = open_xml(path + mcu_name)
found = True
break
except IOError:
continue
if not found:
print('Could not find GPIO file')
exit(1)
print('Opened ' + path)
for ip in mcu_info.findall("IP"):
if ip.attrib['Name'] == 'GPIO':
gpio_file = '{0}{2}IP{2}GPIO-{1}_Modes.xml'.format(path,
ip.attrib['Version'],
sep)
return gpio_file
def read_gpio(filename):
gpio = {'ports': {}, 'defaults': {}, 'modes': {}}
print('Opening GPIO file ' + filename)
root = open_xml(filename)
gpio['defaults']['GPIO_Mode'] = 'GPIO_MODE_ANALOG'
for modes in root.findall("RefParameter"):
try:
name = modes.attrib['Name']
gpio['defaults'][name] = modes.attrib['DefaultValue']
gpio['modes'][name] = []
except KeyError as e:
continue
if 'GPIO_' not in name:
continue
for m in modes.findall("PossibleValue"):
prop_val = m.attrib['Value']
gpio['modes'][name].append(prop_val)
for pin in root.findall('GPIO_Pin'):
try:
port = pin.attrib['Name'][1]
num = int(pin.attrib['Name'][2:])
if port not in gpio['ports']:
gpio['ports'][port] = {}
if num not in gpio['ports'][port]:
gpio['ports'][port][num] = {}
except ValueError as e:
continue
for s in pin.findall('PinSignal'):
try:
af = s.find('SpecificParameter/PossibleValue').text
af = int(''.join(af.split('_')[1])[2:])
gpio['ports'][port][num][s.attrib['Name']] = af
except ValueError as e:
print_exc(e)
except AttributeError as e:
print_exc(e)
return gpio
# Extract signals from IOC
def read_project(gpio, filename):
print('Opening project file ' + filename)
with open(filename, 'r') as mx_file:
tmp = mx_file.readlines()
pads = {}
# Default all pads to analog
for p in gpio['ports'].keys():
pads[p] = {}
for i in range(0, 16):
pads[p][i] = DEFAULT_PAD.copy()
pads[p][i]['PUPDR'] = PIN_PUPDR_TRANSLATE[gpio['defaults']['GPIO_PuPdOD']]
pads[p][i]['OTYPER'] = PIN_OTYPE_TRANSLATE[gpio['defaults']['GPIO_ModeDefaultOutputPP']]
pads[p][i]['OSPEEDR'] = PIN_OSPEED_TRANSLATE[gpio['defaults']['GPIO_Speed']]
for t in tmp:
if re.search(r"^P[A-Z]\d{1,2}(-OSC.+)?\.", t, re.M):
split = t.split('=')
pad_name = split[0].split(".")[0]
pad_port = pad_name[1:2]
pad_num = int(pad_name[2:4].replace('.', '').replace('-', ''))
pad_prop = split[0].split(".")[-1]
prop_value = split[-1].rstrip('\r\n')
if pad_prop == "Signal":
if 'S_TIM' in prop_value:
prop_value = prop_value[2:]
if prop_value.startswith('ADC') \
or 'DAC' in prop_value \
or 'OSC' in prop_value:
pads[pad_port][pad_num]["MODER"] = PIN_MODE_ANALOG
elif 'GPIO_Output' == prop_value:
pads[pad_port][pad_num]["MODER"] = PIN_MODE_OUTPUT
elif 'GPIO_Input' == prop_value:
pads[pad_port][pad_num]["MODER"] = PIN_MODE_INPUT
else:
pads[pad_port][pad_num]["SIGNAL"] = prop_value
pads[pad_port][pad_num]["MODER"] = PIN_MODE_ALTERNATE
pads[pad_port][pad_num]["OSPEEDR"] = PIN_OSPEED_MEDIUM
elif pad_prop == "GPIO_Mode":
pads[pad_port][pad_num]["MODER"] = PIN_MODE_TRANSLATE[prop_value]
elif pad_prop == "GPIO_Label":
pads[pad_port][pad_num]["LABEL"] = prop_value
elif pad_prop == "GPIO_PuPd":
pads[pad_port][pad_num]["PUPDR"] = PIN_PUPDR_TRANSLATE[prop_value]
elif pad_prop == "GPIO_ModeDefaultOutputPP":
pads[pad_port][pad_num]["OTYPER"] = PIN_OTYPE_TRANSLATE[prop_value]
pads[pad_port][pad_num]["MODER"] = PIN_MODE_OUTPUT
elif pad_prop == "GPIO_Speed":
pads[pad_port][pad_num]["OSPEEDR"] = PIN_OSPEED_TRANSLATE[prop_value]
return pads
# Add defines for all pins with labels
def gen_defines(project):
defines = {}
for port_key in sorted(project.keys()):
for pad_key in sorted(project[port_key].keys()):
pad_data = project[port_key][pad_key]
if pad_data['SIGNAL'] != 'UNUSED' and not pad_data['LABEL']:
pad_data['LABEL'] = pad_data['SIGNAL']
pad_data['LABEL'] = pad_data['LABEL'].replace('-', '_')
label = pad_data['LABEL']
signal = pad_data['SIGNAL']
if not label:
continue
defines['PORT_'+label] = 'GPIO' + port_key
defines['PAD_'+label] = pad_key
if re.search(r"TIM\d+_CH\d$", signal, re.M):
timer = signal.replace('S_TIM', '').replace('_CH', '')[:-1]
ch_num = int(signal[-1:])
defines['TIM_' + label] = timer
defines['CCR_' + label] = 'CCR' + timer[-1]
defines['PWMD_' + label] = 'PWMD' + timer[-1]
defines['ICUD_' + label] = 'ICUD' + timer[-1]
defines['CHN_' + label] = ch_num - 1
return defines
# Each Port (A.B.C...)
def gen_ports(gpio, project):
ports = {}
for port_key in sorted(project.keys()):
ports[port_key] = {}
# Each property (mode, output/input...)
for conf in PIN_CONF_LIST:
ports[port_key][conf] = []
for pin in project[port_key]:
out = project[port_key][pin][conf]
out = out.format(pin)
ports[port_key][conf].append(out)
conf = PIN_CONF_LIST_AF[0]
ports[port_key][conf] = []
for pin in range(0, 8):
try:
af = project[port_key][pin]['SIGNAL']
out = PIN_AFIO_AF.format(pin, gpio['ports'][port_key][pin][af])
except KeyError as e:
out = PIN_AFIO_AF.format(pin, 0)
ports[port_key][conf].append(out)
conf = PIN_CONF_LIST_AF[1]
ports[port_key][conf] = []
for pin in range(8, 16):
try:
af = project[port_key][pin]['SIGNAL']
out = PIN_AFIO_AF.format(pin, gpio['ports'][port_key][pin][af])
except KeyError:
out = PIN_AFIO_AF.format(pin, 0)
ports[port_key][conf].append(out)
return ports
if __name__ == '__main__':
args = parser.parse_args()
cur_path = dirname(abspath(__file__))
if args.gpio:
gpio = read_gpio(args.gpio)
else:
gpio_file = get_gpio_file(args.project, args.mx)
gpio = read_gpio(gpio_file)
proj = read_project(gpio, args.project)
defines = gen_defines(proj)
ports = gen_ports(gpio, proj)
with open(cur_path + '/board_gpio.tpl', 'r') as tpl_file:
tpl = tpl_file.read()
template = Template(tpl)
defines_sorted = []
for d in sorted(defines.keys()):
defines_sorted.append((d, defines[d]))
ports_sorted = []
for p in sorted(ports.keys()):
ports_sorted.append((p, ports[p]))
template.stream(defines=defines_sorted, ports=ports_sorted).dump(args.output)
print('File generated at ' + args.output)
|
netik/dc26_spqr_badge
|
sw/firmware/ChibiOS-Contrib/tools/mx2board.py
|
Python
|
apache-2.0
| 12,105
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2016-01-08 16:29
from __future__ import unicode_literals
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('products', '0035_auto_20160102_1442'),
]
operations = [
migrations.CreateModel(
name='HackSawBlade',
fields=[
('sablesawblade_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='products.SableSawBlade')),
],
options={
'verbose_name_plural': 'Metallhandsägeblätter',
'verbose_name': 'Metallhandsägeblatt',
},
bases=('products.sablesawblade',),
),
]
|
n2o/guhema
|
products/migrations/0036_hacksawblade.py
|
Python
|
mit
| 856
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.