text
stringlengths 4
1.02M
| meta
dict |
|---|---|
import tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['Difference'] , ['LinearTrend'] , ['Seasonal_MonthOfYear'] , ['NoAR'] );
|
{
"content_hash": "15be80326014cb40b2ce6e95ce9bfa0f",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 94,
"avg_line_length": 41.75,
"alnum_prop": 0.7245508982035929,
"repo_name": "antoinecarme/pyaf",
"id": "8f481b49727c5a9bf20c47e66498cce34a0eb0a7",
"size": "167",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/model_control/detailed/transf_Difference/model_control_one_enabled_Difference_LinearTrend_Seasonal_MonthOfYear_NoAR.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "6773299"
},
{
"name": "Procfile",
"bytes": "24"
},
{
"name": "Python",
"bytes": "54209093"
},
{
"name": "R",
"bytes": "807"
},
{
"name": "Shell",
"bytes": "3619"
}
],
"symlink_target": ""
}
|
import time
import base
import v2_swagger_client
from v2_swagger_client.rest import ApiException
from base import _assert_status_code
class Robot(base.Base, object):
def __init__(self):
super(Robot,self).__init__(api_type = "robot")
def list_robot(self, expect_status_code = 200, **kwargs):
try:
body, status_code, _ = self._get_client(**kwargs).list_robot_with_http_info()
except ApiException as e:
base._assert_status_code(expect_status_code, e.status)
return []
else:
base._assert_status_code(expect_status_code, status_code)
base._assert_status_code(200, status_code)
return body
def create_access_list(self, right_map = [True] * 10):
_assert_status_code(10, len(right_map), r"Please input full access list for system robot account. Expected {}, while actual input count is {}.")
action_pull = "pull"
action_push = "push"
action_read = "read"
action_create = "create"
action_del = "delete"
access_def_list = [
("repository", action_pull),
("repository", action_push),
("artifact", action_del),
("helm-chart", action_read),
("helm-chart-version", action_create),
("helm-chart-version", action_del),
("tag", action_create),
("tag", action_del),
("artifact-label", action_create),
("scan", action_create)
]
access_list = []
for i in range(len(access_def_list)):
if right_map[i] is True:
robotAccountAccess = v2_swagger_client.Access(resource = access_def_list[i][0], action = access_def_list[i][1])
access_list.append(robotAccountAccess)
return access_list
def create_project_robot(self, project_name, duration, robot_name = None, robot_desc = None,
has_pull_right = True, has_push_right = True, has_chart_read_right = True,
has_chart_create_right = True, expect_status_code = 201, expect_response_body = None,
**kwargs):
if robot_name is None:
robot_name = base._random_name("robot")
if robot_desc is None:
robot_desc = base._random_name("robot_desc")
if has_pull_right is False and has_push_right is False:
has_pull_right = True
access_list = []
action_pull = "pull"
action_push = "push"
action_read = "read"
action_create = "create"
if has_pull_right is True:
robotAccountAccess = v2_swagger_client.Access(resource = "repository", action = action_pull)
access_list.append(robotAccountAccess)
if has_push_right is True:
robotAccountAccess = v2_swagger_client.Access(resource = "repository", action = action_push)
access_list.append(robotAccountAccess)
if has_chart_read_right is True:
robotAccountAccess = v2_swagger_client.Access(resource = "helm-chart", action = action_read)
access_list.append(robotAccountAccess)
if has_chart_create_right is True:
robotAccountAccess = v2_swagger_client.Access(resource = "helm-chart-version", action = action_create)
access_list.append(robotAccountAccess)
robotaccountPermissions = v2_swagger_client.RobotPermission(kind = "project", namespace = project_name, access = access_list)
permission_list = []
permission_list.append(robotaccountPermissions)
robotAccountCreate = v2_swagger_client.RobotCreate(name=robot_name, description=robot_desc, duration=duration, level="project", permissions = permission_list)
data = []
try:
data, status_code, header = self._get_client(**kwargs).create_robot_with_http_info(robotAccountCreate)
except ApiException as e:
base._assert_status_code(expect_status_code, e.status)
if expect_response_body is not None:
base._assert_status_body(expect_response_body, e.body)
else:
base._assert_status_code(expect_status_code, status_code)
base._assert_status_code(201, status_code)
return base._get_id_from_header(header), data
def get_robot_account_by_id(self, robot_id, **kwargs):
data, status_code, _ = self._get_client(**kwargs).get_robot_by_id_with_http_info(robot_id)
return data
def disable_robot_account(self, robot_id, disable, expect_status_code = 200, **kwargs):
data = self.get_robot_account_by_id(robot_id, **kwargs)
robotAccountUpdate = v2_swagger_client.RobotCreate(name=data.name, description=data.description, duration=data.duration, level=data.level, permissions = data.permissions, disable = disable)
_, status_code, _ = self._get_client(**kwargs).update_robot_with_http_info(robot_id, robotAccountUpdate)
base._assert_status_code(expect_status_code, status_code)
base._assert_status_code(200, status_code)
def delete_robot_account(self, robot_id, expect_status_code = 200, **kwargs):
_, status_code, _ = self._get_client(**kwargs).delete_robot_with_http_info(robot_id)
base._assert_status_code(expect_status_code, status_code)
base._assert_status_code(200, status_code)
def create_system_robot(self, permission_list, duration, robot_name = None, robot_desc = None, expect_status_code = 201, **kwargs):
if robot_name is None:
robot_name = base._random_name("robot")
if robot_desc is None:
robot_desc = base._random_name("robot_desc")
robotAccountCreate = v2_swagger_client.RobotCreate(name=robot_name, description=robot_desc, duration=duration, level="system", disable = False, permissions = permission_list)
data = []
data, status_code, header = self._get_client(**kwargs).create_robot_with_http_info(robotAccountCreate)
base._assert_status_code(expect_status_code, status_code)
base._assert_status_code(201, status_code)
return base._get_id_from_header(header), data
def update_robot_account(self, robot_id, robot, expect_status_code = 200, **kwargs):
_, status_code, _ = self._get_client(**kwargs).update_robot_with_http_info(robot_id, robot)
base._assert_status_code(expect_status_code, status_code)
base._assert_status_code(200, status_code)
def update_system_robot_account(self, robot_id, robot_name, robot_account_Permissions_list, disable = None, expect_status_code = 200, **kwargs):
robot = v2_swagger_client.Robot(id = robot_id, name = robot_name, level = "system", permissions = robot_account_Permissions_list)
if disable in (True, False):
robot.disable = disable
self.update_robot_account(robot_id, robot, expect_status_code = expect_status_code, **kwargs)
def refresh_robot_account_secret(self, robot_id, robot_new_sec, expect_status_code = 200, **kwargs):
robot_sec = v2_swagger_client.RobotSec(secret = robot_new_sec)
data, status_code, _ = self._get_client(**kwargs).refresh_sec_with_http_info(robot_id, robot_sec)
base._assert_status_code(expect_status_code, status_code)
base._assert_status_code(200, status_code)
print("Refresh new secret:", data)
return data
|
{
"content_hash": "3557d2913c803a2f1342089030ab87e6",
"timestamp": "",
"source": "github",
"line_count": 142,
"max_line_length": 197,
"avg_line_length": 51.936619718309856,
"alnum_prop": 0.6385084745762711,
"repo_name": "wy65701436/harbor",
"id": "3ef9b9d113c8924b33fb41c3b6286712412d806e",
"size": "7400",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tests/apitests/python/library/robot.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "2122"
},
{
"name": "Dockerfile",
"bytes": "17013"
},
{
"name": "Go",
"bytes": "5543957"
},
{
"name": "HTML",
"bytes": "857388"
},
{
"name": "JavaScript",
"bytes": "8987"
},
{
"name": "Jinja",
"bytes": "199133"
},
{
"name": "Makefile",
"bytes": "41515"
},
{
"name": "PLpgSQL",
"bytes": "11799"
},
{
"name": "Python",
"bytes": "532665"
},
{
"name": "RobotFramework",
"bytes": "482646"
},
{
"name": "SCSS",
"bytes": "113564"
},
{
"name": "Shell",
"bytes": "81765"
},
{
"name": "Smarty",
"bytes": "1931"
},
{
"name": "TypeScript",
"bytes": "2048995"
}
],
"symlink_target": ""
}
|
"""
MIT License
Copyright (c) 2017 Igor Kroitor
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
#------------------------------------------------------------------------------
__version__ = '1.7.65'
#==============================================================================
|
{
"content_hash": "11f9489b172fbb9242403d64f0960e5a",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 79,
"avg_line_length": 43.55172413793103,
"alnum_prop": 0.6904196357878069,
"repo_name": "aiueogawa/ccxt",
"id": "f3ec860b8562f31de470ab254493594245174ddb",
"size": "1288",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "datagrid/version.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "4029415"
},
{
"name": "PHP",
"bytes": "799664"
},
{
"name": "Python",
"bytes": "1364902"
},
{
"name": "Shell",
"bytes": "833"
}
],
"symlink_target": ""
}
|
"""
This module provides some custom wx widgets for the file converter perspective
"""
import wx
import os
from sas.sascalc.dataloader.data_info import Vector
from sas.sasgui.guiframe.utils import check_float
class VectorInput(object):
"""
An input field for inputting 2 (or 3) components of a vector.
"""
def __init__(self, parent, control_name, callback=None,
labels=["x: ", "y: ", "z: "], z_enabled=False):
"""
Create the control
:param parent: The window to add the control to
:param control_name: All TextCtrl names will start with control_name
:param callback: The function to call when the text is changed
:param labels: An array of labels for the TextCtrls
:param z_enabled: Whether or not to show the z input field
"""
self.parent = parent
self.control_name = control_name
self._callback = callback
self._name = control_name
self.labels = labels
self.z_enabled = z_enabled
self._sizer = None
self._inputs = {}
self._do_layout()
def GetSizer(self):
"""
Get the control's sizer
:return sizer: a wx.BoxSizer object
"""
return self._sizer
def GetName(self):
return self._name
def GetValue(self):
"""
Get the value of the vector input
:return v: A Vector object
"""
v = Vector()
if not self.Validate(): return v
for direction, control in self._inputs.iteritems():
try:
value = float(control.GetValue())
setattr(v, direction, value)
except: # Text field is empty
pass
return v
def SetValue(self, vector):
"""
Set the value of the vector input
:param vector: A Vector object
"""
directions = ['x', 'y']
if self.z_enabled: directions.append('z')
for direction in directions:
value = getattr(vector, direction)
if value is None: value = ''
self._inputs[direction].SetValue(str(value))
def Validate(self):
"""
Validate the contents of the inputs
:return all_valid: Whether or not the inputs are valid
:return invalid_ctrl: A control that is not valid
(or None if all are valid)
"""
all_valid = True
invalid_ctrl = None
for control in self._inputs.values():
if control.GetValue() == '': continue
control.SetBackgroundColour(wx.WHITE)
control_valid = check_float(control)
if not control_valid:
all_valid = False
invalid_ctrl = control
return all_valid, invalid_ctrl
def _do_layout(self):
self._sizer = wx.BoxSizer(wx.HORIZONTAL)
x_label = wx.StaticText(self.parent, -1, self.labels[0],
style=wx.ALIGN_CENTER_VERTICAL)
self._sizer.Add(x_label, wx.ALIGN_CENTER_VERTICAL)
x_input = wx.TextCtrl(self.parent, -1,
name="{}_x".format(self.control_name), size=(50, -1))
self._sizer.Add(x_input)
self._inputs['x'] = x_input
x_input.Bind(wx.EVT_TEXT, self._callback)
self._sizer.AddSpacer((10, -1))
y_label = wx.StaticText(self.parent, -1, self.labels[1],
style=wx.ALIGN_CENTER_VERTICAL)
self._sizer.Add(y_label, wx.ALIGN_CENTER_VERTICAL)
y_input = wx.TextCtrl(self.parent, -1,
name="{}_y".format(self.control_name), size=(50, -1))
self._sizer.Add(y_input)
self._inputs['y'] = y_input
y_input.Bind(wx.EVT_TEXT, self._callback)
if self.z_enabled:
self._sizer.AddSpacer((10, -1))
z_label = wx.StaticText(self.parent, -1, self.labels[2],
style=wx.ALIGN_CENTER_VERTICAL)
self._sizer.Add(z_label, wx.ALIGN_CENTER_VERTICAL)
z_input = wx.TextCtrl(self.parent, -1,
name="{}_z".format(self.control_name), size=(50, -1))
self._sizer.Add(z_input)
self._inputs['z'] = z_input
z_input.Bind(wx.EVT_TEXT, self._callback)
class FileInput(object):
def __init__(self, parent, wildcard=''):
self.parent = parent
self._sizer = None
self._text_ctrl = None
self._button_ctrl = None
self._filepath = ''
self._wildcard = wildcard
self._do_layout()
def GetCtrl(self):
return self._sizer
def GetPath(self):
return self._filepath
def SetWildcard(self, wildcard):
self._wildcard = wildcard
def _on_text_change(self, event):
event.Skip()
self._filepath = self._text_ctrl.GetValue()
def _on_browse(self, event):
event.Skip()
initial_path = self._filepath
initial_dir = os.getcwd()
if not os.path.isfile(initial_path):
initial_path = ''
else:
initial_dir = os.path.split(initial_path)[0]
file_dlg = wx.FileDialog(self.parent, defaultDir=initial_dir,
defaultFile=initial_path, wildcard=self._wildcard,
style=wx.FD_SAVE | wx.FD_OVERWRITE_PROMPT)
if file_dlg.ShowModal() == wx.ID_CANCEL:
file_dlg.Destroy()
return
self._filepath = file_dlg.GetPath()
file_dlg.Destroy()
self._text_ctrl.SetValue(self._filepath)
def _do_layout(self):
self._sizer = wx.BoxSizer(wx.HORIZONTAL)
self._text_ctrl = wx.TextCtrl(self.parent, -1)
self._sizer.Add(self._text_ctrl, wx.EXPAND)
self._text_ctrl.Bind(wx.EVT_TEXT, self._on_text_change)
self._sizer.AddSpacer(5)
self._button_ctrl = wx.Button(self.parent, -1, "Browse")
self._sizer.Add(self._button_ctrl)
self._button_ctrl.Bind(wx.EVT_BUTTON, self._on_browse)
|
{
"content_hash": "475324ab007e8c174d0960af241dc235",
"timestamp": "",
"source": "github",
"line_count": 188,
"max_line_length": 78,
"avg_line_length": 31.53191489361702,
"alnum_prop": 0.5706815114709851,
"repo_name": "lewisodriscoll/sasview",
"id": "a0df9e02f1e8840ff0603feff6d0a8ff5c66da19",
"size": "5928",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/sas/sasgui/perspectives/file_converter/converter_widgets.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "AutoIt",
"bytes": "5122"
},
{
"name": "Batchfile",
"bytes": "9544"
},
{
"name": "C",
"bytes": "79248"
},
{
"name": "C++",
"bytes": "228413"
},
{
"name": "HTML",
"bytes": "9252"
},
{
"name": "Makefile",
"bytes": "28052"
},
{
"name": "Python",
"bytes": "3696992"
},
{
"name": "Shell",
"bytes": "12936"
}
],
"symlink_target": ""
}
|
from distutils.core import setup
setup(
name='django-migratron',
version='0.1.3',
author='Chase Seibert',
author_email='chase.seibert@gmail.com',
packages=[
'migratron',
'migratron.management',
'migratron.management.commands',
'migratron.templates',
],
url='https://github.com/chase-seibert/django-migratron',
download_url='https://github.com/chase-seibert/django-migratron/tarball/master',
license='LICENSE.txt',
description='Create and run different buckets of unordered schema and data migrations.',
requires=[
'yaml',
'yamlfield',
'termcolor',
],
)
|
{
"content_hash": "eed640f312a28e0e634688ed8a7e70dc",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 92,
"avg_line_length": 28.608695652173914,
"alnum_prop": 0.6398176291793313,
"repo_name": "chase-seibert/django-migratron",
"id": "4df29e5016ce1138d76fbb44b8510a948f162c85",
"size": "658",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "33939"
}
],
"symlink_target": ""
}
|
import os
import sys
from .base import BaseCommand, CommandError
from opencivicdata.common import JURISDICTION_CLASSIFICATIONS
from opencivicdata.divisions import Division
def prompt(ps, default=''):
return input(ps).strip() or default
CLASS_DICT = {'events': 'Event',
'people': 'Person',
'bills': 'Bill',
'votes': 'Vote',
'disclosures': 'Disclosure'}
def write_jurisdiction_template(dirname, short_name, long_name, division_id, classification, url,
scraper_types):
camel_case = short_name.title().replace(' ', '')
# write __init__
lines = ['# encoding=utf-8', 'from pupa.scrape import Jurisdiction, Organization']
for stype in scraper_types:
lines.append('from .{} import {}{}Scraper'.format(stype, camel_case, CLASS_DICT[stype]))
lines.append('')
lines.append('')
lines.append('class {}(Jurisdiction):'.format(camel_case))
lines.append(' division_id = "{}"'.format(division_id))
lines.append(' classification = "{}"'.format(classification))
lines.append(' name = "{}"'.format(long_name))
lines.append(' url = "{}"'.format(url))
lines.append(' scrapers = {')
for stype in scraper_types:
lines.append(' "{}": {}{}Scraper,'.format(stype, camel_case, CLASS_DICT[stype]))
lines.append(' }')
lines.append('')
lines.append(' def get_organizations(self):')
lines.append(' #REQUIRED: define an organization using this format')
lines.append(' #where org_name is something like Seattle City Council')
lines.append(' #and classification is described here:')
lines.append(' org = Organization(name="org_name", classification="legislature")')
lines.append('')
lines.append(' # OPTIONAL: add posts to your organizaion using thi format,')
lines.append(' # where label is a human-readable description of the post '
'(eg "Ward 8 councilmember")')
lines.append(' # and role is the positoin type (eg conucilmember, alderman, mayor...)')
lines.append(' # skip entirely if you\'re not writing a people scraper.')
lines.append(' org.add_post(label="position_description", role="position_type")')
lines.append('')
lines.append(' #REQUIIRED: yield the organization')
lines.append(' yield org')
lines.append('')
with open(os.path.join(dirname, '__init__.py'), 'w') as of:
of.write('\n'.join(lines))
# write scraper files
for stype in scraper_types:
lines = ['from pupa.scrape import Scraper']
lines.append('from pupa.scrape import {}'.format(CLASS_DICT[stype]))
lines.append('')
lines.append('')
lines.append('class {}{}Scraper(Scraper):'.format(camel_case, CLASS_DICT[stype]))
lines.append('')
lines.append(' def scrape(self):')
lines.append(' # needs to be implemented')
lines.append(' pass')
lines.append('')
with open(os.path.join(dirname, stype + '.py'), 'w') as of:
of.write('\n'.join(lines))
class Command(BaseCommand):
name = 'init'
help = 'start a new pupa scraper'
def add_args(self):
self.add_argument('module', type=str, help='name of the new scraper module')
def handle(self, args, other):
name = prompt('jurisdiction name (e.g. City of Seattle): ')
division = prompt('division id (look this up in the opencivicdata/ocd-division-ids '
'repository): ')
classification = prompt('classification (can be: {}): '
.format(', '.join(JURISDICTION_CLASSIFICATIONS)))
url = prompt('official URL: ')
try:
Division.get(division)
except ValueError:
print("\nERROR: Division ID is invalid.",
"Please find the correct division_id here",
"https://github.com/opencivicdata/ocd-division-ids/tree/master/identifiers",
"or contact open-civic-data@googlegroups.org to add a new country\n")
sys.exit(1)
if os.path.exists(args.module):
raise CommandError(args.module + ' directory already exists')
os.makedirs(args.module)
# will default to True until they pick one, then defaults to False
scraper_types = CLASS_DICT.keys()
selected_scraper_types = []
for stype in scraper_types:
prompt_str = 'create {} scraper? {}: '.format(
stype, '[y/N]' if selected_scraper_types else '[Y/n]')
default = 'N' if selected_scraper_types else 'Y'
result = prompt(prompt_str, default).upper()
if result == 'Y':
selected_scraper_types.append(stype)
write_jurisdiction_template(args.module, args.module, name, division, classification, url,
selected_scraper_types)
|
{
"content_hash": "4d59384d42aa613c6d335fcd35e5f53e",
"timestamp": "",
"source": "github",
"line_count": 118,
"max_line_length": 98,
"avg_line_length": 42.567796610169495,
"alnum_prop": 0.5922755325502688,
"repo_name": "influence-usa/pupa",
"id": "603c75edd3c843d1e8504c05472b57c2718738d8",
"size": "5023",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pupa/cli/commands/init.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "240929"
},
{
"name": "Shell",
"bytes": "90"
}
],
"symlink_target": ""
}
|
import unittest2 as unittest
from Products.CMFCore.utils import getToolByName
from Products.UserAndGroupSelectionWidget import testing
from Products.UserAndGroupSelectionWidget.memberlookup import MemberLookup
class MemberLookupTest(unittest.TestCase):
layer = testing.USERANDGROUPSELECTIONWIDGET_INTEGRATION_TESTING
def setUp(self):
self.portal = self.layer['portal']
self.request = self.layer['request']
class FakeWidget(object):
groupIdFilter = []
searchableProperties = []
self.widget = FakeWidget()
self.widget.searchableProperties = ('email', 'fullname',
'home_page', 'location', 'description')
username = 'example-user'
acl_users = getToolByName(self.portal, 'acl_users')
acl_users.userFolderAddUser(username, 'secret', ['Member'], [])
membership = getToolByName(self.portal, 'portal_membership')
self.member = membership.getMemberById(username)
self.member.setMemberProperties(mapping={
'email': 'test@exampLe1.com',
'fullname': 'Example2 User',
'home_page': 'http://exaMple3.com',
'location': 'Example4',
'description': 'Example5 description',
})
self.memberlookup = MemberLookup(self.portal, self.request, self.widget)
def test_search_user_via_username(self):
self.memberlookup.searchabletext = 'example-'
members = self.memberlookup.getMembers()
self.assertEqual(1, len(members))
self.assertEqual(self.member.getId(), members[0]['id'])
def test_search_user_via_email(self):
self.memberlookup.searchabletext = 'example1'
members = self.memberlookup.getMembers()
self.assertEqual(1, len(members))
self.assertEqual(self.member.getId(), members[0]['id'])
def test_search_user_via_fullname(self):
self.memberlookup.searchabletext = 'example2'
members = self.memberlookup.getMembers()
self.assertEqual(1, len(members))
self.assertEqual(self.member.getId(), members[0]['id'])
def test_search_user_via_homepage(self):
self.memberlookup.searchabletext = 'example3'
members = self.memberlookup.getMembers()
self.assertEqual(1, len(members))
self.assertEqual(self.member.getId(), members[0]['id'])
def test_search_user_via_location(self):
self.memberlookup.searchabletext = 'example4'
members = self.memberlookup.getMembers()
self.assertEqual(1, len(members))
self.assertEqual(self.member.getId(), members[0]['id'])
def test_search_user_via_description(self):
self.memberlookup.searchabletext = 'example5'
members = self.memberlookup.getMembers()
self.assertEqual(1, len(members))
self.assertEqual(self.member.getId(), members[0]['id'])
def test_disable_search_over_user_properties(self):
self.widget.searchableProperties = ()
memberlookup = MemberLookup(self.portal, self.request, self.widget)
memberlookup.searchabletext = 'example1'
self.assertEqual(0, len(memberlookup.getMembers()))
def test_suite():
return unittest.defaultTestLoader.loadTestsFromName(__name__)
|
{
"content_hash": "44d31209f82cc878f3dd48ffde8fd645",
"timestamp": "",
"source": "github",
"line_count": 91,
"max_line_length": 80,
"avg_line_length": 35.89010989010989,
"alnum_prop": 0.6635027556644213,
"repo_name": "collective/Products.UserAndGroupSelectionWidget",
"id": "a92e09eeca6c006b45d7dfff77f98b7f92d02b1f",
"size": "3266",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/Products/UserAndGroupSelectionWidget/at/tests/test_memberlookup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "2839"
},
{
"name": "Python",
"bytes": "45492"
},
{
"name": "Shell",
"bytes": "275"
}
],
"symlink_target": ""
}
|
from django.conf.urls import patterns, include, url
from django.core.urlresolvers import reverse
from . import views
urlpatterns = patterns('',
url(r'^patients/$', views.PatientList.as_view(), name='patient_list'),
url(r'^patients-json/$', views.patient_list_json),
#url(r'^patient/add/$', views.PatientAddForm.as_view(), name='patient_add'),
url(r'^patient/(?P<slug>[-\w]+)/$', views.PatientDetail.as_view(), name='patient_detail'),
url(r'^patient/(?P<slug>[-\w]+)/add-medication/$', views.MedicationAdd.as_view(), name='med_add'),
url(r'^patient/(?P<slug>[-\w]+)/add-surgery/$', views.SurgeryAdd.as_view(), name='surgery_add'),
url(r'^patient/(?P<slug>[-\w]+)/add-seizure/$', views.SeizureAdd.as_view(), name='seizure_add'),
url(r'^visual/$', views.patient_visual),
)
|
{
"content_hash": "802c4fcecd4dcfcbe1d17853d0823af6",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 102,
"avg_line_length": 57.42857142857143,
"alnum_prop": 0.6554726368159204,
"repo_name": "datakid/hh2014-epilepsy",
"id": "eb303c7c10b514072f2c2eb5d3150b43c0553d6c",
"size": "804",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "records/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "20413"
},
{
"name": "JavaScript",
"bytes": "13965"
},
{
"name": "Python",
"bytes": "14925"
}
],
"symlink_target": ""
}
|
"""
Checkers for Ruby related projects
"""
from checkers import base
class RubyVersionChecker(base.GitHubVersionChecker):
"""
Ruby project checker
"""
name = 'Ruby'
slug = 'ruby'
homepage = 'http://www.ruby-lang.org/'
repository = 'https://github.com/ruby/ruby'
@staticmethod
def _normalize_tag_name(name):
"""
Normalizes GitHub tag name to be a PEP 404 compliant version name,
which in this case means replacing underscores with dots
Example:
v2_2_7 -> v2.2.7
:param name: tag name
:type name: str
:returns: normalized version name
:rtype: str
"""
return name.replace('_', '.')
def get_versions(self):
"""
Get the versions from GitHub tags
"""
return self._get_github_tags(normalize_func=self._normalize_tag_name)
class RailsVersionChecker(base.GitHubVersionChecker):
"""
Ruby on Rails project checker
"""
name = 'Ruby on Rails'
slug = 'rails'
homepage = 'http://rubyonrails.org/'
repository = 'https://github.com/rails/rails'
class JekyllVersionChecker(base.GitHubVersionChecker):
"""
Jekyll project checker
"""
name = 'Jekyll'
slug = 'jekyll'
homepage = 'https://jekyllrb.com/'
repository = 'https://github.com/jekyll/jekyll'
|
{
"content_hash": "da048a6fbdfb3544dd782bf83ac65ca0",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 77,
"avg_line_length": 24.69090909090909,
"alnum_prop": 0.6119293078055965,
"repo_name": "pawelad/verse",
"id": "973a1d39abe0bd627a3afd1882769585eb1b375d",
"size": "1358",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "verse/checkers/projects/ruby.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "52941"
},
{
"name": "HTML",
"bytes": "9004"
},
{
"name": "Python",
"bytes": "126333"
}
],
"symlink_target": ""
}
|
import logging
import re
from django.contrib import auth
from django.contrib.auth.models import User
from django.core.exceptions import PermissionDenied
from django.db.models import ManyToManyField, Q
from djblets.util.decorators import augment_method_from
from djblets.webapi.decorators import (webapi_login_required,
webapi_response_errors,
webapi_request_fields)
from djblets.webapi.errors import (DOES_NOT_EXIST, INVALID_FORM_DATA,
NOT_LOGGED_IN, PERMISSION_DENIED)
from djblets.webapi.fields import (BooleanFieldType,
ChoiceFieldType,
DateTimeFieldType,
DictFieldType,
IntFieldType,
ResourceFieldType,
ResourceListFieldType,
StringFieldType)
from reviewboard.diffviewer.features import dvcs_feature
from reviewboard.hostingsvcs.errors import HostingServiceError
from reviewboard.reviews.builtin_fields import BuiltinFieldMixin
from reviewboard.reviews.errors import NotModifiedError, PublishError
from reviewboard.reviews.fields import (get_review_request_fields,
get_review_request_field)
from reviewboard.reviews.models import Group, ReviewRequest, ReviewRequestDraft
from reviewboard.scmtools.errors import (InvalidChangeNumberError,
SCMError)
from reviewboard.webapi.base import ImportExtraDataError, WebAPIResource
from reviewboard.webapi.decorators import webapi_check_local_site
from reviewboard.webapi.errors import (COMMIT_ID_ALREADY_EXISTS,
INVALID_CHANGE_NUMBER,
NOTHING_TO_PUBLISH,
PUBLISH_ERROR,
REPO_INFO_ERROR)
from reviewboard.webapi.mixins import MarkdownFieldsMixin
from reviewboard.webapi.resources import resources
logger = logging.getLogger(__name__)
class ReviewRequestDraftResource(MarkdownFieldsMixin, WebAPIResource):
"""An editable draft of a review request.
This resource is used to actually modify a review request. Anything made
in this draft can be published in order to become part of the public
review request, or it can be discarded.
Any POST or PUTs on this draft will cause the draft to be created
automatically. An initial POST is not required.
There is only ever a maximum of one draft per review request.
In order to access this resource, the user must either own the review
request, or it must have the ``reviews.can_edit_reviewrequest`` permission
set.
"""
model = ReviewRequestDraft
name = 'draft'
policy_id = 'review_request_draft'
singleton = True
model_parent_key = 'review_request'
mimetype_item_resource_name = 'review-request-draft'
fields = {
'id': {
'type': IntFieldType,
'description': 'The numeric ID of the draft.',
},
'review_request': {
'type': ResourceFieldType,
'resource': 'reviewboard.webapi.resources.review_request.'
'ReviewRequestResource',
'description': 'The review request that owns this draft.',
},
'last_updated': {
'type': DateTimeFieldType,
'description': 'The date and time that the draft was last '
'updated.',
},
'branch': {
'type': StringFieldType,
'description': 'The branch name.',
},
'bugs_closed': {
'type': StringFieldType,
'description': 'The new list of bugs closed or referenced by this '
'change.',
},
'depends_on': {
'type': ResourceListFieldType,
'resource': 'reviewboard.webapi.resources.review_request.'
'ReviewRequestResource',
'description': 'The list of review requests that this '
'review request depends on.',
'added_in': '1.7.8',
},
'changedescription': {
'type': StringFieldType,
'description': 'A custom description of what changes are being '
'made in this update. It often will be used to '
'describe the changes in the diff.',
'supports_text_types': True,
},
'changedescription_text_type': {
'type': ChoiceFieldType,
'choices': MarkdownFieldsMixin.TEXT_TYPES,
'description': 'The current or forced text type for the '
'``changedescription`` field.',
'added_in': '2.0.12',
},
'commit_id': {
'type': StringFieldType,
'description': 'The updated ID of the commit this review request '
'is based upon.',
'added_in': '2.0',
},
'description': {
'type': StringFieldType,
'description': 'The new review request description.',
'supports_text_types': True,
},
'description_text_type': {
'type': ChoiceFieldType,
'choices': MarkdownFieldsMixin.TEXT_TYPES,
'description': 'The current or forced text type for the '
'``description`` field.',
'added_in': '2.0.12',
},
'extra_data': {
'type': DictFieldType,
'description': 'Extra data as part of the draft. '
'This can be set by the API or extensions.',
'added_in': '2.0',
},
'public': {
'type': BooleanFieldType,
'description': 'Whether or not the draft is public. '
'This will always be false up until the time '
'it is first made public. At that point, the '
'draft is deleted.',
},
'submitter': {
'type': StringFieldType,
'description': 'The user who submitted the review request.',
},
'summary': {
'type': StringFieldType,
'description': 'The new review request summary.',
},
'target_groups': {
'type': StringFieldType,
'description': 'A comma-separated list of review groups '
'that will be on the reviewer list.',
},
'target_people': {
'type': StringFieldType,
'description': 'A comma-separated list of users that will '
'be on a reviewer list.',
},
'testing_done': {
'type': StringFieldType,
'description': 'The new testing done text.',
'supports_text_types': True,
},
'testing_done_text_type': {
'type': ChoiceFieldType,
'choices': MarkdownFieldsMixin.TEXT_TYPES,
'description': 'The current or forced text type for the '
'``testing_done`` field.',
'added_in': '2.0.12',
},
'text_type': {
'type': ChoiceFieldType,
'choices': MarkdownFieldsMixin.TEXT_TYPES,
'description': 'Formerly responsible for indicating the text '
'type for text fields. Replaced by '
'``changedescription_text_type``, '
'``description_text_type``, and '
'``testing_done_text_type`` in 2.0.12.',
'added_in': '2.0',
'deprecated_in': '2.0.12',
},
}
allowed_methods = ('GET', 'POST', 'PUT', 'DELETE')
item_child_resources = [
resources.draft_diff,
resources.draft_screenshot,
resources.draft_file_attachment,
]
CREATE_UPDATE_OPTIONAL_FIELDS = {
'branch': {
'type': StringFieldType,
'description': 'The new branch name.',
},
'bugs_closed': {
'type': StringFieldType,
'description': 'A comma-separated list of bug IDs.',
},
'commit_id': {
'type': StringFieldType,
'description': 'The updated ID of the commit this review request '
'is based upon.',
'added_in': '2.0',
},
'depends_on': {
'type': StringFieldType,
'description': 'The new list of dependencies of this review '
'request.',
'added_in': '1.7.8',
},
'changedescription': {
'type': StringFieldType,
'description': 'The change description for this update.',
'supports_text_types': True,
},
'changedescription_text_type': {
'type': ChoiceFieldType,
'choices': MarkdownFieldsMixin.SAVEABLE_TEXT_TYPES,
'description': 'The text type used for the ``changedescription`` '
'field.',
'added_in': '2.0.12',
},
'description': {
'type': StringFieldType,
'description': 'The new review request description.',
'supports_text_types': True,
},
'description_text_type': {
'type': ChoiceFieldType,
'choices': MarkdownFieldsMixin.SAVEABLE_TEXT_TYPES,
'description': 'The text type used for the ``description`` '
'field.',
'added_in': '2.0.12',
},
'force_text_type': {
'type': ChoiceFieldType,
'choices': MarkdownFieldsMixin.TEXT_TYPES,
'description': 'The text type, if any, to force for returned '
'text fields. The contents will be converted '
'to the requested type in the payload, but '
'will not be saved as that type.',
'added_in': '2.0.9',
},
'public': {
'type': BooleanFieldType,
'description': 'Whether or not to make the review public. '
'If a review is public, it cannot be made '
'private again.',
},
'publish_as_owner': {
'type': bool,
'description': 'Publish on behalf of the owner of the review '
'request. If setting ``submitter``, this will '
'publish on behalf of the previous owner.',
'added_in': '3.0.6',
},
'submitter': {
'type': StringFieldType,
'description': 'The user who submitted the review request.',
'added_in': '3.0',
},
'summary': {
'type': StringFieldType,
'description': 'The new review request summary.',
},
'target_groups': {
'type': StringFieldType,
'description': 'A comma-separated list of review groups '
'that will be on the reviewer list.',
},
'target_people': {
'type': StringFieldType,
'description': 'A comma-separated list of users that will '
'be on a reviewer list.',
},
'testing_done': {
'type': StringFieldType,
'description': 'The new testing done text.',
'supports_text_types': True,
},
'testing_done_text_type': {
'type': ChoiceFieldType,
'choices': MarkdownFieldsMixin.SAVEABLE_TEXT_TYPES,
'description': 'The text type used for the ``testing_done`` '
'field.',
'added_in': '2.0.12',
},
'text_type': {
'type': ChoiceFieldType,
'choices': MarkdownFieldsMixin.SAVEABLE_TEXT_TYPES,
'description': 'The mode for the ``changedescription``, '
'``description``, and ``testing_done`` fields.\n'
'\n'
'This is deprecated. Please use '
'``changedescription_text_type``, '
'``description_text_type``, and '
'``testing_done_text_type`` instead.',
'added_in': '2.0',
'deprecated_in': '2.0.12',
},
'trivial': {
'type': BooleanFieldType,
'description': 'Determines if the review request publish '
'will not send an email.',
'added_in': '2.5',
},
'update_from_commit_id': {
'type': BooleanFieldType,
'description': 'If true, and if ``commit_id`` is provided, '
'the review request information and (when '
'supported) the diff will be updated based '
'on the commit ID.',
'added_in': '2.0',
},
}
VALUE_LIST_RE = re.compile(r'[, ]+')
@classmethod
def prepare_draft(cls, request, review_request):
"""Creates a draft, if the user has permission to."""
if not review_request.is_mutable_by(request.user):
raise PermissionDenied
return ReviewRequestDraft.create(review_request)
def get_queryset(self, request, *args, **kwargs):
review_request = resources.review_request.get_object(
request, *args, **kwargs)
return self.model.objects.filter(review_request=review_request)
def get_is_changedescription_rich_text(self, obj):
return obj.changedesc_id is not None and obj.changedesc.rich_text
def serialize_bugs_closed_field(self, obj, **kwargs):
return obj.get_bug_list()
def serialize_changedescription_field(self, obj, **kwargs):
if obj.changedesc:
return obj.changedesc.text
else:
return ''
def serialize_changedescription_text_type_field(self, obj, **kwargs):
# This will be overridden by MarkdownFieldsMixin.
return None
def serialize_description_text_type_field(self, obj, **kwargs):
# This will be overridden by MarkdownFieldsMixin.
return None
def serialize_status_field(self, obj, **kwargs):
return ReviewRequest.status_to_string(obj.status)
def serialize_public_field(self, obj, **kwargs):
return False
def serialize_testing_done_text_type_field(self, obj, **kwargs):
# This will be overridden by MarkdownFieldsMixin.
return None
def get_extra_data_field_supports_markdown(self, review_request, key):
field_cls = get_review_request_field(key)
return field_cls and getattr(field_cls, 'enable_markdown', False)
def has_access_permissions(self, request, draft, *args, **kwargs):
return draft.is_accessible_by(request.user)
def has_modify_permissions(self, request, draft, *args, **kwargs):
return draft.is_mutable_by(request.user)
def has_delete_permissions(self, request, draft, *args, **kwargs):
return draft.is_mutable_by(request.user)
@webapi_check_local_site
@webapi_login_required
@webapi_response_errors(COMMIT_ID_ALREADY_EXISTS, DOES_NOT_EXIST,
INVALID_CHANGE_NUMBER, INVALID_FORM_DATA,
NOT_LOGGED_IN, PERMISSION_DENIED, PUBLISH_ERROR)
@webapi_request_fields(
optional=CREATE_UPDATE_OPTIONAL_FIELDS,
allow_unknown=True
)
def create(self,
request,
local_site_name=None,
*args,
**kwargs):
"""Creates a draft of a review request.
If a draft already exists, this will just reuse the existing draft.
See the documentation on updating a draft for all the details.
"""
try:
review_request = resources.review_request.get_object(
request, local_site_name=local_site_name, *args, **kwargs)
except ReviewRequest.DoesNotExist:
return DOES_NOT_EXIST
if review_request.status == ReviewRequest.DISCARDED:
review_request.reopen(request.user)
# A draft is a singleton. Creating and updating it are the same
# operations in practice.
result = self.update(request, local_site_name=local_site_name,
*args, **kwargs)
if isinstance(result, tuple):
if result[0] == 200:
return (201,) + result[1:]
return result
@webapi_check_local_site
@webapi_login_required
@webapi_response_errors(COMMIT_ID_ALREADY_EXISTS, DOES_NOT_EXIST,
INVALID_CHANGE_NUMBER, INVALID_FORM_DATA,
NOT_LOGGED_IN, PERMISSION_DENIED, PUBLISH_ERROR,
REPO_INFO_ERROR)
@webapi_request_fields(
optional=CREATE_UPDATE_OPTIONAL_FIELDS,
allow_unknown=True
)
def update(self,
request,
local_site_name=None,
branch=None,
bugs_closed=None,
changedescription=None,
commit_id=None,
depends_on=None,
submitter=None,
summary=None,
target_groups=None,
target_people=None,
update_from_commit_id=False,
trivial=None,
publish_as_owner=False,
extra_fields={},
*args,
**kwargs):
"""Updates a draft of a review request.
This will update the draft with the newly provided data.
Most of the fields correspond to fields in the review request, but
there is one special one, ``public``. When ``public`` is set to true,
the draft will be published, moving the new content to the
review request itself, making it public, and sending out a notification
(such as an e-mail) if configured on the server. The current draft will
then be deleted.
Extra data can be stored later lookup. See
:ref:`webapi2.0-extra-data` for more information.
"""
try:
review_request = resources.review_request.get_object(
request, local_site_name=local_site_name, *args, **kwargs)
except ReviewRequest.DoesNotExist:
return DOES_NOT_EXIST
if not review_request.is_mutable_by(request.user):
return self.get_no_access_error(request)
draft = review_request.get_draft()
# Before we update anything, sanitize the commit ID, see if it
# changed, and make sure that the the new value isn't owned by
# another review request or draft.
if commit_id == '':
commit_id = None
if (commit_id and
commit_id != review_request.commit_id and
(draft is None or commit_id != draft.commit_id)):
# The commit ID has changed, so now we check for other usages of
# this ID.
repository = review_request.repository
existing_review_request_ids = (
ReviewRequest.objects
.filter(commit_id=commit_id,
repository=repository)
.values_list('pk', flat=True)
)
if (existing_review_request_ids and
review_request.pk not in existing_review_request_ids):
# Another review request is using this ID. Error out.
return COMMIT_ID_ALREADY_EXISTS
existing_draft_ids = (
ReviewRequestDraft.objects
.filter(commit_id=commit_id,
review_request__repository=repository)
.values_list('pk', flat=True)
)
if (existing_draft_ids and
(draft is None or draft.pk not in existing_draft_ids)):
# Another review request draft is using this ID. Error out.
return COMMIT_ID_ALREADY_EXISTS
# Now that we've completed our initial accessibility and conflict
# checks, we can start checking for changes to individual fields.
#
# We'll keep track of state pertaining to the fields we want to
# set/save, and any errors we hit. For setting/saving, these's two
# types of things we're tracking: The new field values (which will be
# applied to the objects or Many-To-Many relations) and a list of
# field names to set when calling `save(update_fields=...)`. The
# former implies the latter. The latter only needs to be directly
# set if the fields are modified directly by another function.
new_draft_values = {}
new_changedesc_values = {}
draft_update_fields = set()
changedesc_update_fields = set()
invalid_fields = {}
if draft is None:
draft = ReviewRequestDraft.create(review_request=review_request)
# Check for a new value for branch.
if branch is not None:
new_draft_values['branch'] = branch
# Check for a new value for bugs_closed:
if bugs_closed is not None:
new_draft_values['bugs_closed'] = \
','.join(self._parse_bug_list(bugs_closed))
# Check for a new value for changedescription.
if changedescription is not None and draft.changedesc_id is None:
invalid_fields['changedescription'] = [
'Change descriptions cannot be used for drafts of '
'new review requests'
]
# Check for a new value for commit_id.
if commit_id is not None:
new_draft_values['commit_id'] = commit_id
if update_from_commit_id:
try:
draft_update_fields.update(
draft.update_from_commit_id(commit_id))
except InvalidChangeNumberError:
return INVALID_CHANGE_NUMBER
except HostingServiceError as e:
return REPO_INFO_ERROR.with_message(str(e))
except SCMError as e:
return REPO_INFO_ERROR.with_message(str(e))
# Check for a new value for depends_on.
if depends_on is not None:
found_deps, missing_dep_ids = self._find_depends_on(
dep_ids=self._parse_value_list(depends_on),
request=request)
if missing_dep_ids:
invalid_fields['depends_on'] = missing_dep_ids
else:
new_draft_values['depends_on'] = found_deps
# Check for a new value for submitter.
if submitter is not None:
# While we only allow for one submitter, we'll try to parse a
# possible list of values. This allows us to provide a suitable
# error message if users try to set more than one submitter
# (which people do try, in practice).
found_users, missing_usernames = self._find_users(
usernames=self._parse_value_list(submitter),
request=request)
if len(found_users) + len(missing_usernames) > 1:
invalid_fields['submitter'] = [
'Only one user can be set as the owner of a review '
'request'
]
elif missing_usernames:
assert len(missing_usernames) == 1
invalid_fields['submitter'] = missing_usernames
elif found_users:
assert len(found_users) == 1
new_draft_values['owner'] = found_users[0]
else:
invalid_fields['submitter'] = [
'The owner of a review request cannot be blank'
]
# Check for a new value for summary.
if summary is not None:
if '\n' in summary:
invalid_fields['summary'] = [
"The summary can't contain a newline"
]
else:
new_draft_values['summary'] = summary
# Check for a new value for target_groups.
if target_groups is not None:
found_groups, missing_group_names = self._find_review_groups(
group_names=self._parse_value_list(target_groups),
request=request)
if missing_group_names:
invalid_fields['target_groups'] = missing_group_names
else:
new_draft_values['target_groups'] = found_groups
# Check for a new value for target_people.
if target_people is not None:
found_users, missing_usernames = self._find_users(
usernames=self._parse_value_list(target_people),
request=request)
if missing_usernames:
invalid_fields['target_people'] = missing_usernames
else:
new_draft_values['target_people'] = found_users
# See if we've caught any invalid values. If so, we can error out
# immediately before we update anything else.
if invalid_fields:
return INVALID_FORM_DATA, {
'fields': invalid_fields,
self.item_result_key: draft,
}
# Start applying any rich text processing to any text fields on the
# ChangeDescription and draft. We'll track any fields that get set
# for later saving.
#
# NOTE: If any text fields or text type fields are ever made
# parameters of this method, then their values will need to be
# passed directly to set_text_fields() calls below.
if draft.changedesc_id:
changedesc_update_fields.update(
self.set_text_fields(obj=draft.changedesc,
text_field='changedescription',
text_model_field='text',
rich_text_field_name='rich_text',
changedescription=changedescription,
**kwargs))
for text_field in ('description', 'testing_done'):
draft_update_fields.update(self.set_text_fields(
obj=draft,
text_field=text_field,
**kwargs))
# Go through the list of Markdown-enabled custom fields and apply
# any rich text processing. These will go in extra_data, so we'll
# want to make sure that's tracked for saving.
for field_cls in get_review_request_fields():
if (not issubclass(field_cls, BuiltinFieldMixin) and
getattr(field_cls, 'enable_markdown', False)):
modified_fields = self.set_extra_data_text_fields(
obj=draft,
text_field=field_cls.field_id,
extra_fields=extra_fields,
**kwargs)
if modified_fields:
draft_update_fields.add('extra_data')
# See if the caller has set or patched extra_data values. For
# compatibility, we're going to do this after processing the rich
# text fields ine extra_data above.
try:
if self.import_extra_data(draft, draft.extra_data, extra_fields):
# Track extra_data for saving.
draft_update_fields.add('extra_data')
except ImportExtraDataError as e:
return e.error_payload
# Everything checks out. We can now begin the process of applying any
# field changes and then save objects.
#
# We'll start by making an initial pass on the objects we need to
# either care about. This optimistically lets us avoid a lookup on the
# ChangeDescription, if it's not being modified.
to_apply = []
if draft_update_fields or new_draft_values:
# If there's any changes made at all to the draft, make sure we
# allow last_updated to be computed and saved.
if draft_update_fields or new_draft_values:
draft_update_fields.add('last_updated')
to_apply.append((draft, draft_update_fields, new_draft_values))
if changedesc_update_fields or new_changedesc_values:
to_apply.append((draft.changedesc, changedesc_update_fields,
new_changedesc_values))
for obj, update_fields, new_values in to_apply:
new_m2m_values = {}
# We may have a mixture of field values and Many-To-Many
# relation values, which we want to set only after the object
# is saved. Start by setting any field values, and store the
# M2M values for after.
for key, value in new_values.items():
field = obj._meta.get_field(key)
if isinstance(field, ManyToManyField):
# Save this until after the object is saved.
new_m2m_values[key] = value
else:
# We can set this one now, and mark it for saving.
setattr(obj, key, value)
update_fields.add(key)
if update_fields:
obj.save(update_fields=sorted(update_fields))
# Now we can set any values on M2M fields.
#
# Each entry will have zero or more values. We'll be
# setting to the list of values, which will fully replace
# the stored entries in the database.
for key, values in new_m2m_values.items():
field = getattr(obj, key)
field.set(values)
# Next, check if the draft is set to be published.
if request.POST.get('public', False):
if not review_request.public and not draft.changedesc_id:
# This is a new review request. Publish this on behalf of the
# owner of the review request, rather than the current user,
# regardless of the original publish_as_owner in the request.
# This allows a review request previously created with
# submit-as= to be published by that user instead of the
# logged in user.
publish_as_owner = True
if publish_as_owner:
publish_user = review_request.owner
else:
# Default to posting as the logged in user.
publish_user = request.user
try:
review_request.publish(user=publish_user, trivial=trivial)
except NotModifiedError:
return NOTHING_TO_PUBLISH
except PublishError as e:
return PUBLISH_ERROR.with_message(str(e))
return 200, {
self.item_result_key: draft,
}
@webapi_check_local_site
@webapi_login_required
@webapi_response_errors(DOES_NOT_EXIST, NOT_LOGGED_IN, PERMISSION_DENIED)
def delete(self, request, *args, **kwargs):
"""Deletes a draft of a review request.
This is equivalent to pressing :guilabel:`Discard Draft` in the
review request's page. It will simply erase all the contents of
the draft.
"""
# Make sure this exists.
try:
review_request = \
resources.review_request.get_object(request, *args, **kwargs)
draft = review_request.draft.get()
except ReviewRequest.DoesNotExist:
return DOES_NOT_EXIST
except ReviewRequestDraft.DoesNotExist:
return DOES_NOT_EXIST
if not self.has_delete_permissions(request, draft, *args, **kwargs):
return self.get_no_access_error(request)
draft.delete()
return 204, {}
@webapi_check_local_site
@webapi_login_required
@augment_method_from(WebAPIResource)
def get(self, request, review_request_id, *args, **kwargs):
"""Returns the current draft of a review request."""
pass
def get_links(self, child_resources=[], obj=None, request=None,
*args, **kwargs):
"""Return the links for the resource.
This method will filter out the draft diffcommit resource when the DVCS
feature is disabled.
Args:
child_resources (list of djblets.webapi.resources.base.
WebAPIResource):
The child resources for which links will be serialized.
review_request_id (unicode):
A string representation of the ID of the review request for
which links are being returned.
request (django.http.HttpRequest):
The HTTP request from the client.
*args (tuple):
Additional positional arguments.
**kwargs (dict):
Additional keyword arguments.
Returns:
dict:
A dictionary of the links for the resource.
"""
if (obj is not None and
not dvcs_feature.is_enabled() and
resources.draft_diffcommit in child_resources):
child_resources = list(child_resources)
child_resources.remove(resources.draft_diffcommit)
return super(ReviewRequestDraftResource, self).get_links(
child_resources, obj=obj, request=request, *args, **kwargs)
def _parse_value_list(self, data):
"""Parse a list of values from a string.
This will parse a comma-separated list of values into a list of
strings. All items will be stripped, and any empty values will be
removed.
Args:
data (unicode):
The data to parse.
Returns:
list of unicode:
The parsed list of strings.
"""
return [
value
for value in [
value.strip()
for value in self.VALUE_LIST_RE.split(data)
]
if value
]
def _parse_bug_list(self, bug_ids):
"""Parse a list of bug IDs.
This will remove any excess whitespace before or after the bug
IDs, and remove any leading ``#`` characters.
Args:
bug_ids (unicode):
The comma-separated list of bug IDs to parse.
Returns:
list of unicode:
The parsed list of bug IDs.
"""
return [
# RB stores bug numbers as numbers, but many people have the
# habit of prepending #, so filter it out:
bug.lstrip('#')
for bug in self._parse_value_list(bug_ids)
]
def _find_depends_on(self, dep_ids, request):
"""Return any found and missing review request dependencies.
This will look up :py:class:`ReviewRequests
<reviewboard.reviews.models.review_request.ReviewRequest>` that are
dependencies with the given list of IDs.
Args:
dep_ids (list of unicode):
The list of review request IDs to look up.
request (django.http.HttpRequest):
The HTTP request from the client.
Return:
tuple:
A tuple containing:
1. A list of :py:class:`~reviewboard.reviews.models.
review_request.ReviewRequest` instances for any IDs that
were found.
2. A list of IDs that could not be found.
"""
if not dep_ids:
return [], []
query_ids = []
# Filter out anything that isn't an integer.
for dep_id in dep_ids:
try:
query_ids.append(int(dep_id))
except ValueError:
pass
local_site = request.local_site
if local_site:
review_requests = ReviewRequest.objects.filter(
local_site=local_site,
local_id__in=query_ids)
else:
review_requests = ReviewRequest.objects.filter(pk__in=query_ids)
review_requests = list(review_requests)
missing_dep_ids = []
if len(review_requests) != len(dep_ids):
# Some review requests couldn't be found. Find out which.
found_dep_ids = set(
review_request.display_id
for review_request in review_requests
)
missing_dep_ids = [
dep_id
for dep_id in dep_ids
if dep_id not in found_dep_ids
]
return review_requests, missing_dep_ids
def _find_review_groups(self, group_names, request):
"""Return any found and missing review groups based on a list of names.
This will look up :py:class:`Groups
<reviewboard.site.models.group.Group>` from the database based on the
list of group names.
Args:
group_names (list of unicode):
The list of group names to look up.
request (django.http.HttpRequest):
The HTTP request from the client.
Return:
tuple:
A tuple containing:
1. A list of :py:class:`~reviewboard.site.models.group.Group`
instances for any group names that were found.
2. A list of group names that could not be found.
"""
if not group_names:
return [], []
# Build a query that will find each group with a case-insensitive
# search.
q = Q()
for group_name in group_names:
q |= Q(name__iexact=group_name)
groups = (
Group.objects
.filter(local_site=request.local_site)
.filter(q)
)
missing_group_names = []
if len(group_names) != len(groups):
# Some groups couldn't be found. Find out which.
found_group_names = set(
group.name
for group in groups
)
missing_group_names = [
group_name
for group_name in group_names
if group_name not in found_group_names
]
return groups, missing_group_names
def _find_users(self, usernames, request):
"""Return any found and missing users based on a list of usernames.
This will look up :py:meth:`Users <django.contrib.auth.models.User>`
from the database based on the list of usernames. If the request is
not performed on a :term:`Local Site`, then this will then attempt
to look up any missing users from the authentication backends,
creating them as necessary.
Args:
usernames (list of unicode):
The list of usernames to look up.
request (django.http.HttpRequest):
The HTTP request from the client.
Return:
tuple:
A tuple containing:
1. A list of :py:class:`~django.contrib.auth.models.User`
instances for any usernames that were found.
2. A list of usernames that could not be found.
"""
if not usernames:
return [], []
local_site = request.local_site
if local_site:
users = local_site.users.filter(username__in=usernames)
else:
users = User.objects.filter(username__in=usernames)
users = list(users)
missing_usernames = []
if len(users) != len(usernames):
# Some users couldn't be found. Find out which.
found_usernames = set(
user.username
for user in users
)
if not local_site:
# See if any of these users exist in an auth backend.
# We don't do this for Local Sites, since we don't want to
# risk creating users in sites where they don't belong.
for username in usernames:
if username in found_usernames:
continue
for backend in auth.get_backends():
try:
user = backend.get_or_create_user(username,
request)
if user is not None:
users.append(user)
found_usernames.add(username)
except NotImplementedError:
pass
except Exception as e:
logger.exception(
'Error when calling get_or_create_user for '
'auth backend %r: %s',
backend, e)
if len(users) != len(usernames):
missing_usernames = [
username
for username in usernames
if username not in found_usernames
]
return users, missing_usernames
review_request_draft_resource = ReviewRequestDraftResource()
|
{
"content_hash": "6856c009a332320e929783a068132411",
"timestamp": "",
"source": "github",
"line_count": 1078,
"max_line_length": 79,
"avg_line_length": 38.56679035250464,
"alnum_prop": 0.5436440168370414,
"repo_name": "reviewboard/reviewboard",
"id": "151775ae5a0d95dfab91218053f7d29c76f83ae1",
"size": "41575",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "reviewboard/webapi/resources/review_request_draft.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "10167"
},
{
"name": "Dockerfile",
"bytes": "7721"
},
{
"name": "HTML",
"bytes": "226489"
},
{
"name": "JavaScript",
"bytes": "3991608"
},
{
"name": "Less",
"bytes": "438017"
},
{
"name": "Python",
"bytes": "9186415"
},
{
"name": "Shell",
"bytes": "3855"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('music', '0021_https_thumbnails'),
]
operations = [
migrations.AlterField(
model_name='music',
name='thumbnail',
field=models.CharField(max_length=255),
preserve_default=True,
),
]
|
{
"content_hash": "9bbb543e9e986fcdf3ad11baac3007e7",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 51,
"avg_line_length": 21.68421052631579,
"alnum_prop": 0.587378640776699,
"repo_name": "Amoki/Amoki-Music",
"id": "f8fcde1ec3f3b1027649ce74c61bd378ad72fc73",
"size": "436",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "music/migrations/0022_auto_20151218_1958.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "14853"
},
{
"name": "HTML",
"bytes": "30566"
},
{
"name": "JavaScript",
"bytes": "53165"
},
{
"name": "Python",
"bytes": "127142"
}
],
"symlink_target": ""
}
|
from unittest import TestCase
from senic_hub.nuimo_app.hass import HomeAssistant
class TestHomeAssistant(TestCase):
def test_find_state_entity_state(self):
ha = HomeAssistant("ws://test")
states = [
{"entity_id": "eid1"},
{"entity_id": "eid2"},
]
self.assertEqual(ha.find_entity_state(states, "eid2"), {"entity_id": "eid2"})
self.assertEqual(ha.find_entity_state(states, "eid4"), None)
|
{
"content_hash": "cdef827e04b723a60fe027f5e00d5620",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 85,
"avg_line_length": 30.533333333333335,
"alnum_prop": 0.6135371179039302,
"repo_name": "grunskis/senic-hub",
"id": "05301d5810ca0dd9b6d7bec610af7524a98bec9c",
"size": "458",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "senic_hub/nuimo_app/tests/test_hass.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "5842"
},
{
"name": "HTML",
"bytes": "1137"
},
{
"name": "Java",
"bytes": "2936"
},
{
"name": "JavaScript",
"bytes": "59895"
},
{
"name": "Makefile",
"bytes": "2260"
},
{
"name": "Objective-C",
"bytes": "4972"
},
{
"name": "Python",
"bytes": "193541"
},
{
"name": "Ruby",
"bytes": "2868"
},
{
"name": "Shell",
"bytes": "74338"
},
{
"name": "Vim script",
"bytes": "7381"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from guardian.models import UserObjectPermission, GroupObjectPermission
from teamroles.models.role import Role
from teamroles.models.team import Team
from teamroles.models.userrole import UserRole
from teamroles.models.userteam import UserTeam
from django.contrib import admin
from django.contrib.auth.models import Permission
# Register your models here.
admin.site.register(Role)
admin.site.register(Team)
admin.site.register(UserRole)
admin.site.register(UserTeam)
admin.site.register(UserObjectPermission)
admin.site.register(GroupObjectPermission)
admin.site.register(Permission)
|
{
"content_hash": "55c4d180bbb1b53a97950d671da2be51",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 71,
"avg_line_length": 33.05263157894737,
"alnum_prop": 0.8455414012738853,
"repo_name": "mevlanaayas/django-teams",
"id": "e9274ea9def3273d22821809aec55effe59009e0",
"size": "652",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "teamroles/admin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "17740"
}
],
"symlink_target": ""
}
|
import time
import os
path = os.path.dirname(os.path.realpath(__file__))
os.chdir(path)
from sklearn.multiclass import OneVsOneClassifier, \
OutputCodeClassifier, \
OneVsRestClassifier
from csf_utils import get_vector_space_model_train_test, \
get_classifier, \
reduce_dimension, \
get_data_dict, \
get_accuracy
from data_serializer import DataSerializer as ds
from debug import Debug
##############################################################################
tt = time.time()
# directory for classifier's files
clf_data_path = '/home/yuriy/data/pyhk/report/'
# text data directory
txt_data_path = '/home/yuriy/data/pyhk/txt/'
min_doc_list_size = 0
clf_name = 'svc'
meta_name = 'ovr'
svd_dim = 100
train_perc = 0.66
trees_amount = 10
class_sample_size = 0
make_equal_size = False
# switch to True for next using
serialized_data = False
serialized_model = False
serialized_svd = False
##############################################################################
data_path = clf_data_path
# GET DATA
if not serialized_data:
data, not_readed_files_counter = get_data_dict(txt_data_path)
ds.serialize((data, not_readed_files_counter),
data_path + 'data')
else:
try:
data, not_readed_files_counter = ds.deserialize(data_path + \
'data')
except:
Debug.print_exception_info()
# CREATE MODEL
if not serialized_model:
matrix_train, matrix_test, Y_train, Y_test, vect = \
\
get_vector_space_model_train_test(data,
min_doc_list_size,
make_equal_size,
train_perc)
ds.serialize(vect, data_path + 'vectorizer')
ds.serialize((matrix_train, matrix_test, Y_train, Y_test),
data_path + 'tfidf_matrix')
else:
try:
matrix_train, matrix_test, Y_train, Y_test = \
ds.deserialize(data_path + \
'tfidf_matrix')
except:
Debug.print_exception_info()
print('initial matrix_train.shape', matrix_train.shape)
print('initial matrix_test.shape', matrix_test.shape)
if svd_dim > 0:
if not serialized_svd:
print('reducing dimension')
matrix_train, matrix_test, svd = \
reduce_dimension(matrix_train,
matrix_test,
svd_dim)
ds.serialize(svd, data_path + 'svd_' + str(svd_dim))
ds.serialize((matrix_train, matrix_test),
data_path + 'lsi_matrixes_' + str(svd_dim))
else:
try:
matrix_train, matrix_test = ds.deserialize(data_path + \
'lsi_matrixes_' + \
str(svd_dim))
except:
Debug.print_exception_info()
print(2*'\n')
print('matrix_train.shape: ', matrix_train.shape)
print('matrix_test.shape: ', matrix_test.shape)
clf1 = get_classifier(clf_name, (trees_amount,))
meta = {'ovr':OneVsRestClassifier(clf1),
'ovo':OneVsOneClassifier(clf1),
'occ':OutputCodeClassifier(clf1, code_size=2, random_state=0)}
#
print()
print(clf1.__class__)
clf = meta[meta_name]
print('\nfitting classifyer ...')
clf.fit(matrix_train, Y_train)
predictions = clf.predict(matrix_test)
report = get_accuracy(predictions,
Y_test,
clf,
data,
train_perc)
fname = 'report.csv.txt'
# write headers (labels separated by tab)
if not os.path.isfile(data_path + 'report.csv.txt'):
f = open(data_path + fname, 'a')
for label in sorted(report):
f.write(label + '\t')
f.write('\n')
f.close()
f = open(data_path + fname, 'a')
for label in sorted(report):
f.write(str(report[label]['accuracy']) + '\t')
f.write('\n')
f.close()
#############################################################
print('\n-------------------------------------------------------\n')
sec = time.time() - tt
min_ = int(sec/60)
ss = round(sec - 60*min_, 2)
print('\ntime == ', min_, ' min ', ss, ' sec')
|
{
"content_hash": "f3b421a7a9dbb804aac3a02543a0d935",
"timestamp": "",
"source": "github",
"line_count": 181,
"max_line_length": 78,
"avg_line_length": 25.635359116022098,
"alnum_prop": 0.4827586206896552,
"repo_name": "ChatbotAI/Text-Data",
"id": "757d7bebe0da9e4157c63e714dbf5193aec3ce61",
"size": "4640",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "classifier_en/report.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "23966"
}
],
"symlink_target": ""
}
|
import jinja2.nativetypes
import jinja2.sandbox
class _AirflowEnvironmentMixin:
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.filters.update(FILTERS)
def is_safe_attribute(self, obj, attr, value):
"""
Allow access to ``_`` prefix vars (but not ``__``).
Unlike the stock SandboxedEnvironment, we allow access to "private" attributes (ones starting with
``_``) whilst still blocking internal or truly private attributes (``__`` prefixed ones).
"""
return not jinja2.sandbox.is_internal_attribute(obj, attr)
class NativeEnvironment(_AirflowEnvironmentMixin, jinja2.nativetypes.NativeEnvironment):
"""NativeEnvironment for Airflow task templates."""
class SandboxedEnvironment(_AirflowEnvironmentMixin, jinja2.sandbox.SandboxedEnvironment):
"""SandboxedEnvironment for Airflow task templates."""
def ds_filter(value):
return value.strftime('%Y-%m-%d')
def ds_nodash_filter(value):
return value.strftime('%Y%m%d')
def ts_filter(value):
return value.isoformat()
def ts_nodash_filter(value):
return value.strftime('%Y%m%dT%H%M%S')
def ts_nodash_with_tz_filter(value):
return value.isoformat().replace('-', '').replace(':', '')
FILTERS = {
'ds': ds_filter,
'ds_nodash': ds_nodash_filter,
'ts': ts_filter,
'ts_nodash': ts_nodash_filter,
'ts_nodash_with_tz': ts_nodash_with_tz_filter,
}
|
{
"content_hash": "440b38e9dd3f3b6bacbc6e1ccd96d5bd",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 106,
"avg_line_length": 26.072727272727274,
"alnum_prop": 0.6680613668061367,
"repo_name": "bolkedebruin/airflow",
"id": "6ec010f618fd363fd701ea0bd8a57d12efec4243",
"size": "2222",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "airflow/templates.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "25286"
},
{
"name": "Dockerfile",
"bytes": "40459"
},
{
"name": "HCL",
"bytes": "3786"
},
{
"name": "HTML",
"bytes": "157840"
},
{
"name": "JavaScript",
"bytes": "167972"
},
{
"name": "Jinja",
"bytes": "33382"
},
{
"name": "Jupyter Notebook",
"bytes": "2933"
},
{
"name": "Mako",
"bytes": "1339"
},
{
"name": "Python",
"bytes": "19287942"
},
{
"name": "Shell",
"bytes": "645244"
},
{
"name": "TypeScript",
"bytes": "173854"
}
],
"symlink_target": ""
}
|
from vt_manager_kvm.models import *
from vt_manager_kvm.communication.utils.XmlUtils import *
import xmlrpclib
am = xmlrpclib.Server('https://expedient:expedient@192.168.254.193:8445/xmlrpc/agent')
xml = xmlFileToString('communication/utils/queryDelete.xml')
am.send(xml)
|
{
"content_hash": "e79b666a5735a60120cce426e978d93b",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 86,
"avg_line_length": 23.166666666666668,
"alnum_prop": 0.7805755395683454,
"repo_name": "ict-felix/stack",
"id": "a75fe56a83b690ea82d551a62718ea74a48650bd",
"size": "278",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vt_manager_kvm/src/python/vt_manager_kvm/tests/testdelete.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "337811"
},
{
"name": "Elixir",
"bytes": "17243"
},
{
"name": "Emacs Lisp",
"bytes": "1098"
},
{
"name": "Groff",
"bytes": "1735"
},
{
"name": "HTML",
"bytes": "660363"
},
{
"name": "Java",
"bytes": "18362"
},
{
"name": "JavaScript",
"bytes": "838960"
},
{
"name": "Makefile",
"bytes": "11581"
},
{
"name": "Perl",
"bytes": "5416"
},
{
"name": "Python",
"bytes": "8073455"
},
{
"name": "Shell",
"bytes": "259720"
}
],
"symlink_target": ""
}
|
import base64
import logging
import pickle
from datetime import datetime
from django.core.mail import EmailMessage
from django.db import models
PRIORITIES = (
("1", "high"),
("2", "medium"),
("3", "low"),
("4", "deferred"),
)
class MessageManager(models.Manager):
def non_deferred(self):
"""
the messages in the queue not deferred
"""
return self.filter(priority__lt="4")
def deferred(self):
"""
the deferred messages in the queue
"""
return self.filter(priority="4")
def retry_deferred(self, new_priority=2):
count = 0
for message in self.deferred():
if message.retry(new_priority):
count += 1
return count
def email_to_db(email):
# pickle.dumps returns essentially binary data which we need to encode
# to store in a unicode field.
return base64.encodestring(pickle.dumps(email))
def db_to_email(data):
if data == u"":
return None
else:
try:
return pickle.loads(base64.decodestring(data))
except Exception:
try:
# previous method was to just do pickle.dumps(val)
return pickle.loads(data.encode("ascii"))
except Exception:
return None
class Message(models.Model):
# The actual data - a pickled EmailMessage
message_data = models.TextField()
when_added = models.DateTimeField(default=datetime.now)
priority = models.CharField(max_length=1, choices=PRIORITIES, default="2")
# @@@ campaign?
# @@@ content_type?
objects = MessageManager()
## Suggested index, very useful to the send queue:
## CREATE INDEX mailer_message_send_order ON mailer_message (priority, when_added) WHERE priority < '4';
def defer(self):
self.priority = "4"
self.save()
def retry(self, new_priority=2):
if self.priority == "4":
self.priority = new_priority
self.save()
return True
else:
return False
def _get_email(self):
return db_to_email(self.message_data)
def _set_email(self, val):
self.message_data = email_to_db(val)
email = property(_get_email, _set_email, doc=
"""EmailMessage object. If this is mutated, you will need to
set the attribute again to cause the underlying serialised data to be updated.""")
@property
def to_addresses(self):
email = self.email
if email is not None:
return email.to
else:
return []
@property
def subject(self):
email = self.email
if email is not None:
return email.subject
else:
return ""
def filter_recipient_list(lst):
if lst is None:
return None
retval = []
for e in lst:
if DontSendEntry.objects.has_address(e):
logging.info("skipping email to %s as on don't send list " % e.encode("utf-8"))
else:
retval.append(e)
return retval
def make_message(subject="", body="", from_email=None, to=None, bcc=None,
attachments=None, headers=None, priority=None):
"""
Creates a simple message for the email parameters supplied.
The 'to' and 'bcc' lists are filtered using DontSendEntry.
If needed, the 'email' attribute can be set to any instance of EmailMessage
if e-mails with attachments etc. need to be supported.
Call 'save()' on the result when it is ready to be sent, and not before.
"""
to = filter_recipient_list(to)
bcc = filter_recipient_list(bcc)
core_msg = EmailMessage(subject=subject, body=body, from_email=from_email,
to=to, bcc=bcc, attachments=attachments, headers=headers)
db_msg = Message(priority=priority)
db_msg.email = core_msg
return db_msg
class DontSendEntryManager(models.Manager):
def has_address(self, address):
"""
is the given address on the don't send list?
"""
queryset = self.filter(to_address__iexact=address)
try:
# Django 1.2
return queryset.exists()
except AttributeError:
# AttributeError: 'QuerySet' object has no attribute 'exists'
return bool(queryset.count())
class DontSendEntry(models.Model):
to_address = models.EmailField()
when_added = models.DateTimeField()
# @@@ who added?
# @@@ comment field?
objects = DontSendEntryManager()
class Meta:
verbose_name = "don't send entry"
verbose_name_plural = "don't send entries"
RESULT_CODES = (
("1", "success"),
("2", "don't send"),
("3", "failure"),
# @@@ other types of failure?
)
class MessageLogManager(models.Manager):
def log(self, message, result_code, log_message=""):
"""
create a log entry for an attempt to send the given message and
record the given result and (optionally) a log message
"""
return self.create(
message_data = message.message_data,
when_added = message.when_added,
priority = message.priority,
# @@@ other fields from Message
result = result_code,
log_message = log_message,
)
class MessageLog(models.Model):
# fields from Message
message_data = models.TextField()
when_added = models.DateTimeField()
priority = models.CharField(max_length=1, choices=PRIORITIES)
# @@@ campaign?
# additional logging fields
when_attempted = models.DateTimeField(default=datetime.now, db_index=True)
result = models.CharField(max_length=1, choices=RESULT_CODES)
log_message = models.TextField()
objects = MessageLogManager()
@property
def email(self):
return db_to_email(self.message_data)
@property
def to_addresses(self):
email = self.email
if email is not None:
return email.to
else:
return []
@property
def subject(self):
email = self.email
if email is not None:
return email.subject
else:
return ""
|
{
"content_hash": "ab2c5f782c2e6a6cd7f3e18c79e155fd",
"timestamp": "",
"source": "github",
"line_count": 238,
"max_line_length": 105,
"avg_line_length": 26.69327731092437,
"alnum_prop": 0.5869667873445616,
"repo_name": "cchery101/django-mailer",
"id": "5a8dc760a8a5348703bb135ea37be1c2584278ec",
"size": "6353",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "mailer/models.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
}
|
from filters.dummy_sink import DummySink
from filters.dummy_source import DummySource
from filters.logger_sink import LoggerSink
class TestGraphBuilder:
"""
A fake graph builder used only for testing
"""
def __init__(self):
self._filters = {}
source_filter = DummySource('dummy_source')
logging_filter = LoggerSink('print_logger')
sink_filter = DummySink('dummy_sink')
self._filters[source_filter.filter_name] = source_filter
self._filters[logging_filter.filter_name] = logging_filter
self._filters[sink_filter.filter_name] = sink_filter
self._wire_up_pins()
self._run_graph()
def _wire_up_pins(self):
src_filter = self._filters.get('dummy_source')
src_output = src_filter.get_output_pin('output')
logging_filter = self._filters.get('print_logger')
logger_input = logging_filter.get_input_pin('input')
logger_output = logging_filter.get_output_pin('output')
sink_filter = self._filters.get('dummy_sink')
sink_input = sink_filter.get_input_pin('input')
# Connect pins
src_output.connect_to_pin(logger_input)
logger_output.connect_to_pin(sink_input)
def _run_graph(self):
for key, val in self._filters.items():
val.run()
|
{
"content_hash": "63de052ad4c4910ccf8b44248a06a6af",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 66,
"avg_line_length": 37.68571428571428,
"alnum_prop": 0.6398786959818044,
"repo_name": "koolspin/rosetta",
"id": "9f1b1c0b868c44c95c34a790f9eeddbd22e86424",
"size": "1319",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/test_graph_builder.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "86058"
},
{
"name": "Shell",
"bytes": "579"
}
],
"symlink_target": ""
}
|
import socket
import readline
HOST = '127.0.0.1'
PORT = 2222
socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
socket.connect((HOST, PORT))
stream = socket.makefile()
newline = '\r\n'
readymsg = '~~~ok!~~~'
def write_stream(msg):
stream.write(msg + newline)
stream.flush()
def exec_sync(cmd):
write_stream(cmd)
result = []
for line in stream:
line = line.strip()
if line == readymsg:
break
else:
result.append(line)
return result
completer_cache = {}
def completer_cache_val(v, f):
if v not in completer_cache:
completer_cache[v] = f()
return completer_cache[v]
def completer(text, state):
def get_locals():
return exec_sync("echo('\\r\\n'.join(locals().keys()))")
def get_dir(code):
return exec_sync("echo('\\r\\n'.join(dir(%s)))" % code)
def get_path_dir(locs, path):
attrs = locs
for i, token in enumerate(path):
if token in attrs:
attrs = get_dir('.'.join(start[0:i+1]))
else:
return []
return attrs
if text == '':
return None
try:
locs = completer_cache_val('locals', get_locals)
if '.' in text:
tokens = text.split('.')
start = tokens[0:-1]
last = tokens[-1]
attrs = completer_cache_val('dir_' + '.'.join(start), lambda: get_path_dir(locs, start))
suggestion = [w for w in attrs if w.startswith(last)][state]
return '.'.join(start + [suggestion])
else:
return [w for w in locs if w.startswith(text)][state]
except IndexError:
return None
readline.set_completer(completer)
readline.parse_and_bind('tab: complete')
write_stream('__READYMSG = "%s"' % readymsg)
for line in stream:
line = line.strip()
if line == readymsg:
cmd = raw_input('> ')
write_stream(cmd.strip())
completer_cache = {}
else:
print line
print 'connection closed'
|
{
"content_hash": "93d8d5863f906559907fb6a5ee42d8a5",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 100,
"avg_line_length": 25.135802469135804,
"alnum_prop": 0.5589390962671905,
"repo_name": "juho-p/wot-debugserver",
"id": "12c79b769762c6be9e937793bd11b8a6dea6370a",
"size": "2055",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/client.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "5120"
},
{
"name": "Shell",
"bytes": "93"
}
],
"symlink_target": ""
}
|
"""
Usage:
python -m test.regrtest [options] [test_name1 [test_name2 ...]]
python path/to/Lib/test/regrtest.py [options] [test_name1 [test_name2 ...]]
If no arguments or options are provided, finds all files matching
the pattern "test_*" in the Lib/test subdirectory and runs
them in alphabetical order (but see -M and -u, below, for exceptions).
For more rigorous testing, it is useful to use the following
command line:
python -E -tt -Wd -3 -m test.regrtest [options] [test_name1 ...]
Options:
-h/--help -- print this text and exit
Verbosity
-v/--verbose -- run tests in verbose mode with output to stdout
-w/--verbose2 -- re-run failed tests in verbose mode
-W/--verbose3 -- re-run failed tests in verbose mode immediately
-q/--quiet -- no output unless one or more tests fail
-S/--slow -- print the slowest 10 tests
--header -- print header with interpreter info
Selecting tests
-r/--randomize -- randomize test execution order (see below)
--randseed -- pass a random seed to reproduce a previous random run
-f/--fromfile -- read names of tests to run from a file (see below)
-x/--exclude -- arguments are tests to *exclude*
-s/--single -- single step through a set of tests (see below)
-u/--use RES1,RES2,...
-- specify which special resource intensive tests to run
-M/--memlimit LIMIT
-- run very large memory-consuming tests
Special runs
-l/--findleaks -- if GC is available detect tests that leak memory
-L/--runleaks -- run the leaks(1) command just before exit
-R/--huntrleaks RUNCOUNTS
-- search for reference leaks (needs debug build, v. slow)
-j/--multiprocess PROCESSES
-- run PROCESSES processes at once
-T/--coverage -- turn on code coverage tracing using the trace module
-D/--coverdir DIRECTORY
-- Directory where coverage files are put
-N/--nocoverdir -- Put coverage files alongside modules
-t/--threshold THRESHOLD
-- call gc.set_threshold(THRESHOLD)
-F/--forever -- run the specified tests in a loop, until an error happens
-P/--pgo -- enable Profile Guided Optimization training
Additional Option Details:
-r randomizes test execution order. You can use --randseed=int to provide an
int seed value for the randomizer; this is useful for reproducing troublesome
test orders.
-s On the first invocation of regrtest using -s, the first test file found
or the first test file given on the command line is run, and the name of
the next test is recorded in a file named pynexttest. If run from the
Python build directory, pynexttest is located in the 'build' subdirectory,
otherwise it is located in tempfile.gettempdir(). On subsequent runs,
the test in pynexttest is run, and the next test is written to pynexttest.
When the last test has been run, pynexttest is deleted. In this way it
is possible to single step through the test files. This is useful when
doing memory analysis on the Python interpreter, which process tends to
consume too many resources to run the full regression test non-stop.
-f reads the names of tests from the file given as f's argument, one
or more test names per line. Whitespace is ignored. Blank lines and
lines beginning with '#' are ignored. This is especially useful for
whittling down failures involving interactions among tests.
-L causes the leaks(1) command to be run just before exit if it exists.
leaks(1) is available on Mac OS X and presumably on some other
FreeBSD-derived systems.
-R runs each test several times and examines sys.gettotalrefcount() to
see if the test appears to be leaking references. The argument should
be of the form stab:run:fname where 'stab' is the number of times the
test is run to let gettotalrefcount settle down, 'run' is the number
of times further it is run and 'fname' is the name of the file the
reports are written to. These parameters all have defaults (5, 4 and
"reflog.txt" respectively), and the minimal invocation is '-R :'.
-M runs tests that require an exorbitant amount of memory. These tests
typically try to ascertain containers keep working when containing more than
2 billion objects, which only works on 64-bit systems. There are also some
tests that try to exhaust the address space of the process, which only makes
sense on 32-bit systems with at least 2Gb of memory. The passed-in memlimit,
which is a string in the form of '2.5Gb', determines howmuch memory the
tests will limit themselves to (but they may go slightly over.) The number
shouldn't be more memory than the machine has (including swap memory). You
should also keep in mind that swap memory is generally much, much slower
than RAM, and setting memlimit to all available RAM or higher will heavily
tax the machine. On the other hand, it is no use running these tests with a
limit of less than 2.5Gb, and many require more than 20Gb. Tests that expect
to use more than memlimit memory will be skipped. The big-memory tests
generally run very, very long.
-u is used to specify which special resource intensive tests to run,
such as those requiring large file support or network connectivity.
The argument is a comma-separated list of words indicating the
resources to test. Currently only the following are defined:
all - Enable all special resources.
audio - Tests that use the audio device. (There are known
cases of broken audio drivers that can crash Python or
even the Linux kernel.)
curses - Tests that use curses and will modify the terminal's
state and output modes.
largefile - It is okay to run some test that may create huge
files. These tests can take a long time and may
consume >2GB of disk space temporarily.
network - It is okay to run tests that use external network
resource, e.g. testing SSL support for sockets.
bsddb - It is okay to run the bsddb testsuite, which takes
a long time to complete.
decimal - Test the decimal module against a large suite that
verifies compliance with standards.
cpu - Used for certain CPU-heavy tests.
subprocess Run all tests for the subprocess module.
urlfetch - It is okay to download files required on testing.
gui - Run tests that require a running GUI.
xpickle - Test pickle and cPickle against Python 2.4, 2.5 and 2.6 to
test backwards compatibility. These tests take a long time
to run.
To enable all resources except one, use '-uall,-<resource>'. For
example, to run all the tests except for the bsddb tests, give the
option '-uall,-bsddb'.
"""
import StringIO
import getopt
import json
import os
import random
import re
import shutil
import sys
import time
import traceback
import warnings
import unittest
import tempfile
import imp
import platform
import sysconfig
# Some times __path__ and __file__ are not absolute (e.g. while running from
# Lib/) and, if we change the CWD to run the tests in a temporary dir, some
# imports might fail. This affects only the modules imported before os.chdir().
# These modules are searched first in sys.path[0] (so '' -- the CWD) and if
# they are found in the CWD their __file__ and __path__ will be relative (this
# happens before the chdir). All the modules imported after the chdir, are
# not found in the CWD, and since the other paths in sys.path[1:] are absolute
# (site.py absolutize them), the __file__ and __path__ will be absolute too.
# Therefore it is necessary to absolutize manually the __file__ and __path__ of
# the packages to prevent later imports to fail when the CWD is different.
for module in sys.modules.itervalues():
if hasattr(module, '__path__'):
module.__path__ = [os.path.abspath(path) for path in module.__path__]
if hasattr(module, '__file__'):
module.__file__ = os.path.abspath(module.__file__)
# MacOSX (a.k.a. Darwin) has a default stack size that is too small
# for deeply recursive regular expressions. We see this as crashes in
# the Python test suite when running test_re.py and test_sre.py. The
# fix is to set the stack limit to 2048.
# This approach may also be useful for other Unixy platforms that
# suffer from small default stack limits.
if sys.platform == 'darwin':
try:
import resource
except ImportError:
pass
else:
soft, hard = resource.getrlimit(resource.RLIMIT_STACK)
newsoft = min(hard, max(soft, 1024*2048))
resource.setrlimit(resource.RLIMIT_STACK, (newsoft, hard))
# Windows, Tkinter, and resetting the environment after each test don't
# mix well. To alleviate test failures due to Tcl/Tk not being able to
# find its library, get the necessary environment massage done once early.
if sys.platform == 'win32':
try:
import FixTk
except Exception:
pass
# Test result constants.
PASSED = 1
FAILED = 0
ENV_CHANGED = -1
SKIPPED = -2
RESOURCE_DENIED = -3
INTERRUPTED = -4
from test import test_support
RESOURCE_NAMES = ('audio', 'curses', 'largefile', 'network', 'bsddb',
'decimal', 'cpu', 'subprocess', 'urlfetch', 'gui',
'xpickle')
TEMPDIR = os.path.abspath(tempfile.gettempdir())
def usage(code, msg=''):
print __doc__
if msg: print msg
sys.exit(code)
def main(tests=None, testdir=None, verbose=0, quiet=False,
exclude=False, single=False, randomize=False, fromfile=None,
findleaks=False, use_resources=None, trace=False, coverdir='coverage',
runleaks=False, huntrleaks=False, verbose2=False, print_slow=False,
random_seed=None, use_mp=None, verbose3=False, forever=False,
header=False, pgo=False):
"""Execute a test suite.
This also parses command-line options and modifies its behavior
accordingly.
tests -- a list of strings containing test names (optional)
testdir -- the directory in which to look for tests (optional)
Users other than the Python test suite will certainly want to
specify testdir; if it's omitted, the directory containing the
Python test suite is searched for.
If the tests argument is omitted, the tests listed on the
command-line will be used. If that's empty, too, then all *.py
files beginning with test_ will be used.
The other default arguments (verbose, quiet, exclude,
single, randomize, findleaks, use_resources, trace, coverdir,
print_slow, and random_seed) allow programmers calling main()
directly to set the values that would normally be set by flags
on the command line.
"""
test_support.record_original_stdout(sys.stdout)
try:
opts, args = getopt.getopt(sys.argv[1:], 'hvqxsSrf:lu:t:TD:NLR:FwWM:j:P',
['help', 'verbose', 'verbose2', 'verbose3', 'quiet',
'exclude', 'single', 'slow', 'randomize', 'fromfile=', 'findleaks',
'use=', 'threshold=', 'trace', 'coverdir=', 'nocoverdir',
'runleaks', 'huntrleaks=', 'memlimit=', 'randseed=',
'multiprocess=', 'slaveargs=', 'forever', 'header', 'pgo'])
except getopt.error, msg:
usage(2, msg)
# Defaults
if random_seed is None:
random_seed = random.randrange(10000000)
if use_resources is None:
use_resources = []
for o, a in opts:
if o in ('-h', '--help'):
usage(0)
elif o in ('-v', '--verbose'):
verbose += 1
elif o in ('-w', '--verbose2'):
verbose2 = True
elif o in ('-W', '--verbose3'):
verbose3 = True
elif o in ('-q', '--quiet'):
quiet = True;
verbose = 0
elif o in ('-x', '--exclude'):
exclude = True
elif o in ('-s', '--single'):
single = True
elif o in ('-S', '--slow'):
print_slow = True
elif o in ('-r', '--randomize'):
randomize = True
elif o == '--randseed':
random_seed = int(a)
elif o in ('-f', '--fromfile'):
fromfile = a
elif o in ('-l', '--findleaks'):
findleaks = True
elif o in ('-L', '--runleaks'):
runleaks = True
elif o in ('-t', '--threshold'):
import gc
gc.set_threshold(int(a))
elif o in ('-T', '--coverage'):
trace = True
elif o in ('-D', '--coverdir'):
coverdir = os.path.join(os.getcwd(), a)
elif o in ('-N', '--nocoverdir'):
coverdir = None
elif o in ('-R', '--huntrleaks'):
huntrleaks = a.split(':')
if len(huntrleaks) not in (2, 3):
print a, huntrleaks
usage(2, '-R takes 2 or 3 colon-separated arguments')
if not huntrleaks[0]:
huntrleaks[0] = 5
else:
huntrleaks[0] = int(huntrleaks[0])
if not huntrleaks[1]:
huntrleaks[1] = 4
else:
huntrleaks[1] = int(huntrleaks[1])
if len(huntrleaks) == 2 or not huntrleaks[2]:
huntrleaks[2:] = ["reflog.txt"]
elif o in ('-M', '--memlimit'):
test_support.set_memlimit(a)
elif o in ('-u', '--use'):
u = [x.lower() for x in a.split(',')]
for r in u:
if r == 'all':
use_resources[:] = RESOURCE_NAMES
continue
remove = False
if r[0] == '-':
remove = True
r = r[1:]
if r not in RESOURCE_NAMES:
usage(1, 'Invalid -u/--use option: ' + a)
if remove:
if r in use_resources:
use_resources.remove(r)
elif r not in use_resources:
use_resources.append(r)
elif o in ('-F', '--forever'):
forever = True
elif o in ('-j', '--multiprocess'):
use_mp = int(a)
elif o == '--header':
header = True
elif o == '--slaveargs':
args, kwargs = json.loads(a)
try:
result = runtest(*args, **kwargs)
except BaseException, e:
result = INTERRUPTED, e.__class__.__name__
print # Force a newline (just in case)
print json.dumps(result)
sys.exit(0)
elif o in ('-P', '--pgo'):
pgo = True
else:
print >>sys.stderr, ("No handler for option {}. Please "
"report this as a bug at http://bugs.python.org.").format(o)
sys.exit(1)
if single and fromfile:
usage(2, "-s and -f don't go together!")
if use_mp and trace:
usage(2, "-T and -j don't go together!")
if use_mp and findleaks:
usage(2, "-l and -j don't go together!")
good = []
bad = []
skipped = []
resource_denieds = []
environment_changed = []
interrupted = False
if findleaks:
try:
import gc
except ImportError:
print 'No GC available, disabling findleaks.'
findleaks = False
else:
# Uncomment the line below to report garbage that is not
# freeable by reference counting alone. By default only
# garbage that is not collectable by the GC is reported.
#gc.set_debug(gc.DEBUG_SAVEALL)
found_garbage = []
if single:
filename = os.path.join(TEMPDIR, 'pynexttest')
try:
fp = open(filename, 'r')
next_test = fp.read().strip()
tests = [next_test]
fp.close()
except IOError:
pass
if fromfile:
tests = []
fp = open(os.path.join(test_support.SAVEDCWD, fromfile))
for line in fp:
guts = line.split() # assuming no test has whitespace in its name
if guts and not guts[0].startswith('#'):
tests.extend(guts)
fp.close()
# Strip .py extensions.
removepy(args)
removepy(tests)
stdtests = STDTESTS[:]
nottests = NOTTESTS.copy()
if exclude:
for arg in args:
if arg in stdtests:
stdtests.remove(arg)
nottests.add(arg)
args = []
# For a partial run, we do not need to clutter the output.
if verbose or header or not (quiet or single or tests or args):
if not pgo:
# Print basic platform information
print "==", platform.python_implementation(), \
" ".join(sys.version.split())
print "== ", platform.platform(aliased=True), \
"%s-endian" % sys.byteorder
print "== ", os.getcwd()
print "Testing with flags:", sys.flags
alltests = findtests(testdir, stdtests, nottests)
selected = tests or args or alltests
if single:
selected = selected[:1]
try:
next_single_test = alltests[alltests.index(selected[0])+1]
except IndexError:
next_single_test = None
if randomize:
random.seed(random_seed)
print "Using random seed", random_seed
random.shuffle(selected)
if trace:
import trace
tracer = trace.Trace(ignoredirs=[sys.prefix, sys.exec_prefix],
trace=False, count=True)
test_times = []
test_support.use_resources = use_resources
save_modules = sys.modules.keys()
def accumulate_result(test, result):
ok, test_time = result
test_times.append((test_time, test))
if ok == PASSED:
good.append(test)
elif ok == FAILED:
bad.append(test)
elif ok == ENV_CHANGED:
environment_changed.append(test)
elif ok == SKIPPED:
skipped.append(test)
elif ok == RESOURCE_DENIED:
skipped.append(test)
resource_denieds.append(test)
if forever:
def test_forever(tests=list(selected)):
while True:
for test in tests:
yield test
if bad:
return
tests = test_forever()
test_count = ''
test_count_width = 3
else:
tests = iter(selected)
test_count = '/{}'.format(len(selected))
test_count_width = len(test_count) - 1
if use_mp:
try:
from threading import Thread
except ImportError:
print "Multiprocess option requires thread support"
sys.exit(2)
from Queue import Queue
from subprocess import Popen, PIPE
debug_output_pat = re.compile(r"\[\d+ refs\]$")
output = Queue()
def tests_and_args():
for test in tests:
args_tuple = (
(test, verbose, quiet),
dict(huntrleaks=huntrleaks, use_resources=use_resources,
pgo=pgo)
)
yield (test, args_tuple)
pending = tests_and_args()
opt_args = test_support.args_from_interpreter_flags()
base_cmd = [sys.executable] + opt_args + ['-m', 'test.regrtest']
# required to spawn a new process with PGO flag on/off
if pgo:
base_cmd = base_cmd + ['--pgo']
def work():
# A worker thread.
try:
while True:
try:
test, args_tuple = next(pending)
except StopIteration:
output.put((None, None, None, None))
return
# -E is needed by some tests, e.g. test_import
popen = Popen(base_cmd + ['--slaveargs', json.dumps(args_tuple)],
stdout=PIPE, stderr=PIPE,
universal_newlines=True,
close_fds=(os.name != 'nt'))
stdout, stderr = popen.communicate()
# Strip last refcount output line if it exists, since it
# comes from the shutdown of the interpreter in the subcommand.
stderr = debug_output_pat.sub("", stderr)
stdout, _, result = stdout.strip().rpartition("\n")
if not result:
output.put((None, None, None, None))
return
result = json.loads(result)
output.put((test, stdout.rstrip(), stderr.rstrip(), result))
except BaseException:
output.put((None, None, None, None))
raise
workers = [Thread(target=work) for i in range(use_mp)]
for worker in workers:
worker.start()
finished = 0
test_index = 1
try:
while finished < use_mp:
test, stdout, stderr, result = output.get()
if test is None:
finished += 1
continue
if stdout:
print stdout
if stderr and not pgo:
print >>sys.stderr, stderr
sys.stdout.flush()
sys.stderr.flush()
if result[0] == INTERRUPTED:
assert result[1] == 'KeyboardInterrupt'
raise KeyboardInterrupt # What else?
accumulate_result(test, result)
if not quiet:
fmt = "[{1:{0}}{2}/{3}] {4}" if bad else "[{1:{0}}{2}] {4}"
print(fmt.format(
test_count_width, test_index, test_count,
len(bad), test))
test_index += 1
except KeyboardInterrupt:
interrupted = True
pending.close()
for worker in workers:
worker.join()
else:
for test_index, test in enumerate(tests, 1):
if not quiet:
fmt = "[{1:{0}}{2}/{3}] {4}" if bad else "[{1:{0}}{2}] {4}"
print(fmt.format(
test_count_width, test_index, test_count, len(bad), test))
sys.stdout.flush()
if trace:
# If we're tracing code coverage, then we don't exit with status
# if on a false return value from main.
tracer.runctx('runtest(test, verbose, quiet)',
globals=globals(), locals=vars())
else:
try:
result = runtest(test, verbose, quiet, huntrleaks, None, pgo)
accumulate_result(test, result)
if verbose3 and result[0] == FAILED:
if not pgo:
print "Re-running test %r in verbose mode" % test
runtest(test, True, quiet, huntrleaks, None, pgo)
except KeyboardInterrupt:
interrupted = True
break
except:
raise
if findleaks:
gc.collect()
if gc.garbage:
print "Warning: test created", len(gc.garbage),
print "uncollectable object(s)."
# move the uncollectable objects somewhere so we don't see
# them again
found_garbage.extend(gc.garbage)
del gc.garbage[:]
# Unload the newly imported modules (best effort finalization)
for module in sys.modules.keys():
if module not in save_modules and module.startswith("test."):
test_support.unload(module)
if interrupted and not pgo:
# print a newline after ^C
print
print "Test suite interrupted by signal SIGINT."
omitted = set(selected) - set(good) - set(bad) - set(skipped)
print count(len(omitted), "test"), "omitted:"
printlist(omitted)
if good and not quiet and not pgo:
if not bad and not skipped and not interrupted and len(good) > 1:
print "All",
print count(len(good), "test"), "OK."
if print_slow:
test_times.sort(reverse=True)
print "10 slowest tests:"
for time, test in test_times[:10]:
print "%s: %.1fs" % (test, time)
if bad and not pgo:
print count(len(bad), "test"), "failed:"
printlist(bad)
if environment_changed and not pgo:
print "{} altered the execution environment:".format(
count(len(environment_changed), "test"))
printlist(environment_changed)
if skipped and not quiet and not pgo:
print count(len(skipped), "test"), "skipped:"
printlist(skipped)
e = _ExpectedSkips()
plat = sys.platform
if e.isvalid():
surprise = set(skipped) - e.getexpected() - set(resource_denieds)
if surprise:
print count(len(surprise), "skip"), \
"unexpected on", plat + ":"
printlist(surprise)
else:
print "Those skips are all expected on", plat + "."
else:
print "Ask someone to teach regrtest.py about which tests are"
print "expected to get skipped on", plat + "."
if verbose2 and bad:
print "Re-running failed tests in verbose mode"
for test in bad[:]:
print "Re-running test %r in verbose mode" % test
sys.stdout.flush()
try:
test_support.verbose = True
ok = runtest(test, True, quiet, huntrleaks, None, pgo)
except KeyboardInterrupt:
# print a newline separate from the ^C
print
break
else:
if ok[0] in {PASSED, ENV_CHANGED, SKIPPED, RESOURCE_DENIED}:
bad.remove(test)
else:
if bad:
print count(len(bad), "test"), "failed again:"
printlist(bad)
if single:
if next_single_test:
with open(filename, 'w') as fp:
fp.write(next_single_test + '\n')
else:
os.unlink(filename)
if trace:
r = tracer.results()
r.write_results(show_missing=True, summary=True, coverdir=coverdir)
if runleaks:
os.system("leaks %d" % os.getpid())
sys.exit(len(bad) > 0 or interrupted)
STDTESTS = [
'test_grammar',
'test_opcodes',
'test_dict',
'test_builtin',
'test_exceptions',
'test_types',
'test_unittest',
'test_doctest',
'test_doctest2',
]
NOTTESTS = {
'test_support',
'test_future1',
'test_future2',
}
def findtests(testdir=None, stdtests=STDTESTS, nottests=NOTTESTS):
"""Return a list of all applicable test modules."""
testdir = findtestdir(testdir)
names = os.listdir(testdir)
tests = []
others = set(stdtests) | nottests
for name in names:
modname, ext = os.path.splitext(name)
if modname[:5] == "test_" and ext == ".py" and modname not in others:
tests.append(modname)
return stdtests + sorted(tests)
def runtest(test, verbose, quiet,
huntrleaks=False, use_resources=None, pgo=False):
"""Run a single test.
test -- the name of the test
verbose -- if true, print more messages
quiet -- if true, don't print 'skipped' messages (probably redundant)
test_times -- a list of (time, test_name) pairs
huntrleaks -- run multiple times to test for leaks; requires a debug
build; a triple corresponding to -R's three arguments
pgo -- if true, do not print unnecessary info when running the test
for Profile Guided Optimization build
Returns one of the test result constants:
INTERRUPTED KeyboardInterrupt when run under -j
RESOURCE_DENIED test skipped because resource denied
SKIPPED test skipped for some other reason
ENV_CHANGED test failed because it changed the execution environment
FAILED test failed
PASSED test passed
"""
test_support.verbose = verbose # Tell tests to be moderately quiet
if use_resources is not None:
test_support.use_resources = use_resources
try:
return runtest_inner(test, verbose, quiet, huntrleaks, pgo)
finally:
cleanup_test_droppings(test, verbose)
# Unit tests are supposed to leave the execution environment unchanged
# once they complete. But sometimes tests have bugs, especially when
# tests fail, and the changes to environment go on to mess up other
# tests. This can cause issues with buildbot stability, since tests
# are run in random order and so problems may appear to come and go.
# There are a few things we can save and restore to mitigate this, and
# the following context manager handles this task.
class saved_test_environment:
"""Save bits of the test environment and restore them at block exit.
with saved_test_environment(testname, verbose, quiet):
#stuff
Unless quiet is True, a warning is printed to stderr if any of
the saved items was changed by the test. The attribute 'changed'
is initially False, but is set to True if a change is detected.
If verbose is more than 1, the before and after state of changed
items is also printed.
"""
changed = False
def __init__(self, testname, verbose=0, quiet=False, pgo=False):
self.testname = testname
self.verbose = verbose
self.quiet = quiet
self.pgo = pgo
# To add things to save and restore, add a name XXX to the resources list
# and add corresponding get_XXX/restore_XXX functions. get_XXX should
# return the value to be saved and compared against a second call to the
# get function when test execution completes. restore_XXX should accept
# the saved value and restore the resource using it. It will be called if
# and only if a change in the value is detected.
#
# Note: XXX will have any '.' replaced with '_' characters when determining
# the corresponding method names.
resources = ('sys.argv', 'cwd', 'sys.stdin', 'sys.stdout', 'sys.stderr',
'os.environ', 'sys.path', 'asyncore.socket_map',
'files',
)
def get_sys_argv(self):
return id(sys.argv), sys.argv, sys.argv[:]
def restore_sys_argv(self, saved_argv):
sys.argv = saved_argv[1]
sys.argv[:] = saved_argv[2]
def get_cwd(self):
return os.getcwd()
def restore_cwd(self, saved_cwd):
os.chdir(saved_cwd)
def get_sys_stdout(self):
return sys.stdout
def restore_sys_stdout(self, saved_stdout):
sys.stdout = saved_stdout
def get_sys_stderr(self):
return sys.stderr
def restore_sys_stderr(self, saved_stderr):
sys.stderr = saved_stderr
def get_sys_stdin(self):
return sys.stdin
def restore_sys_stdin(self, saved_stdin):
sys.stdin = saved_stdin
def get_os_environ(self):
return id(os.environ), os.environ, dict(os.environ)
def restore_os_environ(self, saved_environ):
os.environ = saved_environ[1]
os.environ.clear()
os.environ.update(saved_environ[2])
def get_sys_path(self):
return id(sys.path), sys.path, sys.path[:]
def restore_sys_path(self, saved_path):
sys.path = saved_path[1]
sys.path[:] = saved_path[2]
def get_asyncore_socket_map(self):
asyncore = sys.modules.get('asyncore')
# XXX Making a copy keeps objects alive until __exit__ gets called.
return asyncore and asyncore.socket_map.copy() or {}
def restore_asyncore_socket_map(self, saved_map):
asyncore = sys.modules.get('asyncore')
if asyncore is not None:
asyncore.close_all(ignore_all=True)
asyncore.socket_map.update(saved_map)
def get_test_support_TESTFN(self):
if os.path.isfile(test_support.TESTFN):
result = 'f'
elif os.path.isdir(test_support.TESTFN):
result = 'd'
else:
result = None
return result
def restore_test_support_TESTFN(self, saved_value):
if saved_value is None:
if os.path.isfile(test_support.TESTFN):
os.unlink(test_support.TESTFN)
elif os.path.isdir(test_support.TESTFN):
shutil.rmtree(test_support.TESTFN)
def get_files(self):
return sorted(fn + ('/' if os.path.isdir(fn) else '')
for fn in os.listdir(os.curdir))
def restore_files(self, saved_value):
fn = test_support.TESTFN
if fn not in saved_value and (fn + '/') not in saved_value:
if os.path.isfile(fn):
test_support.unlink(fn)
elif os.path.isdir(fn):
test_support.rmtree(fn)
def resource_info(self):
for name in self.resources:
method_suffix = name.replace('.', '_')
get_name = 'get_' + method_suffix
restore_name = 'restore_' + method_suffix
yield name, getattr(self, get_name), getattr(self, restore_name)
def __enter__(self):
self.saved_values = dict((name, get()) for name, get, restore
in self.resource_info())
return self
def __exit__(self, exc_type, exc_val, exc_tb):
saved_values = self.saved_values
del self.saved_values
for name, get, restore in self.resource_info():
current = get()
original = saved_values.pop(name)
# Check for changes to the resource's value
if current != original:
self.changed = True
restore(original)
if not self.quiet and not self.pgo:
print >>sys.stderr, (
"Warning -- {} was modified by {}".format(
name, self.testname))
if self.verbose > 1 and not self.pgo:
print >>sys.stderr, (
" Before: {}\n After: {} ".format(
original, current))
# XXX (ncoghlan): for most resources (e.g. sys.path) identity
# matters at least as much as value. For others (e.g. cwd),
# identity is irrelevant. Should we add a mechanism to check
# for substitution in the cases where it matters?
return False
def runtest_inner(test, verbose, quiet, huntrleaks=False, pgo=False):
test_support.unload(test)
if verbose:
capture_stdout = None
else:
capture_stdout = StringIO.StringIO()
test_time = 0.0
refleak = False # True if the test leaked references.
try:
save_stdout = sys.stdout
try:
if capture_stdout:
sys.stdout = capture_stdout
if test.startswith('test.'):
abstest = test
else:
# Always import it from the test package
abstest = 'test.' + test
with saved_test_environment(test, verbose, quiet, pgo) as environment:
start_time = time.time()
the_package = __import__(abstest, globals(), locals(), [])
the_module = getattr(the_package, test)
# Old tests run to completion simply as a side-effect of
# being imported. For tests based on unittest or doctest,
# explicitly invoke their test_main() function (if it exists).
indirect_test = getattr(the_module, "test_main", None)
if indirect_test is not None:
indirect_test()
if huntrleaks:
refleak = dash_R(the_module, test, indirect_test,
huntrleaks)
test_time = time.time() - start_time
finally:
sys.stdout = save_stdout
except test_support.ResourceDenied, msg:
if not quiet and not pgo:
print test, "skipped --", msg
sys.stdout.flush()
return RESOURCE_DENIED, test_time
except unittest.SkipTest, msg:
if not quiet and not pgo:
print test, "skipped --", msg
sys.stdout.flush()
return SKIPPED, test_time
except KeyboardInterrupt:
raise
except test_support.TestFailed, msg:
if not pgo:
print >>sys.stderr, "test", test, "failed --", msg
sys.stderr.flush()
return FAILED, test_time
except:
type, value = sys.exc_info()[:2]
if not pgo:
print >>sys.stderr, "test", test, "crashed --", str(type) + ":", value
sys.stderr.flush()
if verbose and not pgo:
traceback.print_exc(file=sys.stderr)
sys.stderr.flush()
return FAILED, test_time
else:
if refleak:
return FAILED, test_time
if environment.changed:
return ENV_CHANGED, test_time
# Except in verbose mode, tests should not print anything
if verbose or huntrleaks:
return PASSED, test_time
output = capture_stdout.getvalue()
if not output:
return PASSED, test_time
print "test", test, "produced unexpected output:"
print "*" * 70
print output
print "*" * 70
sys.stdout.flush()
return FAILED, test_time
def cleanup_test_droppings(testname, verbose):
import stat
import gc
# First kill any dangling references to open files etc.
gc.collect()
# Try to clean up junk commonly left behind. While tests shouldn't leave
# any files or directories behind, when a test fails that can be tedious
# for it to arrange. The consequences can be especially nasty on Windows,
# since if a test leaves a file open, it cannot be deleted by name (while
# there's nothing we can do about that here either, we can display the
# name of the offending test, which is a real help).
for name in (test_support.TESTFN,
"db_home",
):
if not os.path.exists(name):
continue
if os.path.isdir(name):
kind, nuker = "directory", shutil.rmtree
elif os.path.isfile(name):
kind, nuker = "file", os.unlink
else:
raise SystemError("os.path says %r exists but is neither "
"directory nor file" % name)
if verbose:
print "%r left behind %s %r" % (testname, kind, name)
try:
# if we have chmod, fix possible permissions problems
# that might prevent cleanup
if (hasattr(os, 'chmod')):
os.chmod(name, stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO)
nuker(name)
except Exception, msg:
print >> sys.stderr, ("%r left behind %s %r and it couldn't be "
"removed: %s" % (testname, kind, name, msg))
def dash_R(the_module, test, indirect_test, huntrleaks):
"""Run a test multiple times, looking for reference leaks.
Returns:
False if the test didn't leak references; True if we detected refleaks.
"""
# This code is hackish and inelegant, but it seems to do the job.
import copy_reg, _abcoll, _pyio
if not hasattr(sys, 'gettotalrefcount'):
raise Exception("Tracking reference leaks requires a debug build "
"of Python")
# Save current values for dash_R_cleanup() to restore.
fs = warnings.filters[:]
ps = copy_reg.dispatch_table.copy()
pic = sys.path_importer_cache.copy()
try:
import zipimport
except ImportError:
zdc = None # Run unmodified on platforms without zipimport support
else:
zdc = zipimport._zip_directory_cache.copy()
abcs = {}
modules = _abcoll, _pyio
for abc in [getattr(mod, a) for mod in modules for a in mod.__all__]:
# XXX isinstance(abc, ABCMeta) leads to infinite recursion
if not hasattr(abc, '_abc_registry'):
continue
for obj in abc.__subclasses__() + [abc]:
abcs[obj] = obj._abc_registry.copy()
if indirect_test:
def run_the_test():
indirect_test()
else:
def run_the_test():
imp.reload(the_module)
deltas = []
nwarmup, ntracked, fname = huntrleaks
fname = os.path.join(test_support.SAVEDCWD, fname)
repcount = nwarmup + ntracked
print >> sys.stderr, "beginning", repcount, "repetitions"
print >> sys.stderr, ("1234567890"*(repcount//10 + 1))[:repcount]
dash_R_cleanup(fs, ps, pic, zdc, abcs)
for i in range(repcount):
rc_before = sys.gettotalrefcount()
run_the_test()
sys.stderr.write('.')
dash_R_cleanup(fs, ps, pic, zdc, abcs)
rc_after = sys.gettotalrefcount()
if i >= nwarmup:
deltas.append(rc_after - rc_before)
print >> sys.stderr
if any(deltas):
msg = '%s leaked %s references, sum=%s' % (test, deltas, sum(deltas))
print >> sys.stderr, msg
with open(fname, "a") as refrep:
print >> refrep, msg
refrep.flush()
return True
return False
def dash_R_cleanup(fs, ps, pic, zdc, abcs):
import gc, copy_reg
import _strptime, linecache
dircache = test_support.import_module('dircache', deprecated=True)
import urlparse, urllib, urllib2, mimetypes, doctest
import struct, filecmp
from distutils.dir_util import _path_created
# Clear the warnings registry, so they can be displayed again
for mod in sys.modules.values():
if hasattr(mod, '__warningregistry__'):
del mod.__warningregistry__
# Restore some original values.
warnings.filters[:] = fs
copy_reg.dispatch_table.clear()
copy_reg.dispatch_table.update(ps)
sys.path_importer_cache.clear()
sys.path_importer_cache.update(pic)
try:
import zipimport
except ImportError:
pass # Run unmodified on platforms without zipimport support
else:
zipimport._zip_directory_cache.clear()
zipimport._zip_directory_cache.update(zdc)
# clear type cache
sys._clear_type_cache()
# Clear ABC registries, restoring previously saved ABC registries.
for abc, registry in abcs.items():
abc._abc_registry = registry.copy()
abc._abc_cache.clear()
abc._abc_negative_cache.clear()
# Clear assorted module caches.
_path_created.clear()
re.purge()
_strptime._regex_cache.clear()
urlparse.clear_cache()
urllib.urlcleanup()
urllib2.install_opener(None)
dircache.reset()
linecache.clearcache()
mimetypes._default_mime_types()
filecmp._cache.clear()
struct._clearcache()
doctest.master = None
try:
import ctypes
except ImportError:
# Don't worry about resetting the cache if ctypes is not supported
pass
else:
ctypes._reset_cache()
# Collect cyclic trash.
gc.collect()
def findtestdir(path=None):
return path or os.path.dirname(__file__) or os.curdir
def removepy(names):
if not names:
return
for idx, name in enumerate(names):
basename, ext = os.path.splitext(name)
if ext == '.py':
names[idx] = basename
def count(n, word):
if n == 1:
return "%d %s" % (n, word)
else:
return "%d %ss" % (n, word)
def printlist(x, width=70, indent=4):
"""Print the elements of iterable x to stdout.
Optional arg width (default 70) is the maximum line length.
Optional arg indent (default 4) is the number of blanks with which to
begin each line.
"""
from textwrap import fill
blanks = ' ' * indent
# Print the sorted list: 'x' may be a '--random' list or a set()
print fill(' '.join(str(elt) for elt in sorted(x)), width,
initial_indent=blanks, subsequent_indent=blanks)
# Map sys.platform to a string containing the basenames of tests
# expected to be skipped on that platform.
#
# Special cases:
# test_pep277
# The _ExpectedSkips constructor adds this to the set of expected
# skips if not os.path.supports_unicode_filenames.
# test_timeout
# Controlled by test_timeout.skip_expected. Requires the network
# resource and a socket module.
#
# Tests that are expected to be skipped everywhere except on one platform
# are also handled separately.
_expectations = {
'win32':
"""
test__locale
test_bsddb185
test_bsddb3
test_commands
test_crypt
test_curses
test_dbm
test_dl
test_fcntl
test_fork1
test_epoll
test_gdbm
test_grp
test_ioctl
test_largefile
test_kqueue
test_mhlib
test_openpty
test_ossaudiodev
test_pipes
test_poll
test_posix
test_pty
test_pwd
test_resource
test_signal
test_spwd
test_threadsignals
test_timing
test_wait3
test_wait4
""",
'linux2':
"""
test_bsddb185
test_curses
test_dl
test_largefile
test_kqueue
test_ossaudiodev
""",
'unixware7':
"""
test_bsddb
test_bsddb185
test_dl
test_epoll
test_largefile
test_kqueue
test_minidom
test_openpty
test_pyexpat
test_sax
test_sundry
""",
'openunix8':
"""
test_bsddb
test_bsddb185
test_dl
test_epoll
test_largefile
test_kqueue
test_minidom
test_openpty
test_pyexpat
test_sax
test_sundry
""",
'sco_sv3':
"""
test_asynchat
test_bsddb
test_bsddb185
test_dl
test_fork1
test_epoll
test_gettext
test_largefile
test_locale
test_kqueue
test_minidom
test_openpty
test_pyexpat
test_queue
test_sax
test_sundry
test_thread
test_threaded_import
test_threadedtempfile
test_threading
""",
'riscos':
"""
test_asynchat
test_atexit
test_bsddb
test_bsddb185
test_bsddb3
test_commands
test_crypt
test_dbm
test_dl
test_fcntl
test_fork1
test_epoll
test_gdbm
test_grp
test_largefile
test_locale
test_kqueue
test_mmap
test_openpty
test_poll
test_popen2
test_pty
test_pwd
test_strop
test_sundry
test_thread
test_threaded_import
test_threadedtempfile
test_threading
test_timing
""",
'darwin':
"""
test__locale
test_bsddb
test_bsddb3
test_curses
test_epoll
test_gdb
test_gdbm
test_largefile
test_locale
test_kqueue
test_minidom
test_ossaudiodev
test_poll
""",
'sunos5':
"""
test_bsddb
test_bsddb185
test_curses
test_dbm
test_epoll
test_kqueue
test_gdbm
test_gzip
test_openpty
test_zipfile
test_zlib
""",
'hp-ux11':
"""
test_bsddb
test_bsddb185
test_curses
test_dl
test_epoll
test_gdbm
test_gzip
test_largefile
test_locale
test_kqueue
test_minidom
test_openpty
test_pyexpat
test_sax
test_zipfile
test_zlib
""",
'atheos':
"""
test_bsddb185
test_curses
test_dl
test_gdbm
test_epoll
test_largefile
test_locale
test_kqueue
test_mhlib
test_mmap
test_poll
test_popen2
test_resource
""",
'cygwin':
"""
test_bsddb185
test_bsddb3
test_curses
test_dbm
test_epoll
test_ioctl
test_kqueue
test_largefile
test_locale
test_ossaudiodev
test_socketserver
""",
'os2emx':
"""
test_audioop
test_bsddb185
test_bsddb3
test_commands
test_curses
test_dl
test_epoll
test_kqueue
test_largefile
test_mhlib
test_mmap
test_openpty
test_ossaudiodev
test_pty
test_resource
test_signal
""",
'freebsd4':
"""
test_bsddb
test_bsddb3
test_epoll
test_gdbm
test_locale
test_ossaudiodev
test_pep277
test_pty
test_socketserver
test_tcl
test_tk
test_ttk_guionly
test_ttk_textonly
test_timeout
test_urllibnet
test_multiprocessing
""",
'aix5':
"""
test_bsddb
test_bsddb185
test_bsddb3
test_bz2
test_dl
test_epoll
test_gdbm
test_gzip
test_kqueue
test_ossaudiodev
test_tcl
test_tk
test_ttk_guionly
test_ttk_textonly
test_zipimport
test_zlib
""",
'openbsd3':
"""
test_ascii_formatd
test_bsddb
test_bsddb3
test_ctypes
test_dl
test_epoll
test_gdbm
test_locale
test_normalization
test_ossaudiodev
test_pep277
test_tcl
test_tk
test_ttk_guionly
test_ttk_textonly
test_multiprocessing
""",
'netbsd3':
"""
test_ascii_formatd
test_bsddb
test_bsddb185
test_bsddb3
test_ctypes
test_curses
test_dl
test_epoll
test_gdbm
test_locale
test_ossaudiodev
test_pep277
test_tcl
test_tk
test_ttk_guionly
test_ttk_textonly
test_multiprocessing
""",
}
_expectations['freebsd5'] = _expectations['freebsd4']
_expectations['freebsd6'] = _expectations['freebsd4']
_expectations['freebsd7'] = _expectations['freebsd4']
_expectations['freebsd8'] = _expectations['freebsd4']
class _ExpectedSkips:
def __init__(self):
import os.path
from test import test_timeout
self.valid = False
if sys.platform in _expectations:
s = _expectations[sys.platform]
self.expected = set(s.split())
# expected to be skipped on every platform, even Linux
self.expected.add('test_linuxaudiodev')
if not os.path.supports_unicode_filenames:
self.expected.add('test_pep277')
if test_timeout.skip_expected:
self.expected.add('test_timeout')
if sys.maxint == 9223372036854775807L:
self.expected.add('test_imageop')
if sys.platform != "darwin":
MAC_ONLY = ["test_macos", "test_macostools", "test_aepack",
"test_plistlib", "test_scriptpackages",
"test_applesingle"]
for skip in MAC_ONLY:
self.expected.add(skip)
elif len(u'\0'.encode('unicode-internal')) == 4:
self.expected.add("test_macostools")
if sys.platform != "win32":
# test_sqlite is only reliable on Windows where the library
# is distributed with Python
WIN_ONLY = ["test_unicode_file", "test_winreg",
"test_winsound", "test_startfile",
"test_sqlite", "test_msilib"]
for skip in WIN_ONLY:
self.expected.add(skip)
if sys.platform != 'irix':
IRIX_ONLY = ["test_imageop", "test_al", "test_cd", "test_cl",
"test_gl", "test_imgfile"]
for skip in IRIX_ONLY:
self.expected.add(skip)
if sys.platform != 'sunos5':
self.expected.add('test_sunaudiodev')
self.expected.add('test_nis')
if not sys.py3kwarning:
self.expected.add('test_py3kwarn')
self.valid = True
def isvalid(self):
"Return true iff _ExpectedSkips knows about the current platform."
return self.valid
def getexpected(self):
"""Return set of test names we expect to skip on current platform.
self.isvalid() must be true.
"""
assert self.isvalid()
return self.expected
if __name__ == '__main__':
# findtestdir() gets the dirname out of __file__, so we have to make it
# absolute before changing the working directory.
# For example __file__ may be relative when running trace or profile.
# See issue #9323.
__file__ = os.path.abspath(__file__)
# sanity check
assert __file__ == os.path.abspath(sys.argv[0])
# When tests are run from the Python build directory, it is best practice
# to keep the test files in a subfolder. It eases the cleanup of leftover
# files using command "make distclean".
if sysconfig.is_python_build():
TEMPDIR = os.path.join(sysconfig.get_config_var('srcdir'), 'build')
TEMPDIR = os.path.abspath(TEMPDIR)
if not os.path.exists(TEMPDIR):
os.mkdir(TEMPDIR)
# Define a writable temp dir that will be used as cwd while running
# the tests. The name of the dir includes the pid to allow parallel
# testing (see the -j option).
TESTCWD = 'test_python_{}'.format(os.getpid())
TESTCWD = os.path.join(TEMPDIR, TESTCWD)
# Run the tests in a context manager that temporary changes the CWD to a
# temporary and writable directory. If it's not possible to create or
# change the CWD, the original CWD will be used. The original CWD is
# available from test_support.SAVEDCWD.
with test_support.temp_cwd(TESTCWD, quiet=True):
main()
|
{
"content_hash": "dbc894b7d27469ee7c961ffbc5bd4494",
"timestamp": "",
"source": "github",
"line_count": 1609,
"max_line_length": 85,
"avg_line_length": 34.047855811062774,
"alnum_prop": 0.5722395633681981,
"repo_name": "mcking49/apache-flask",
"id": "0007da64b8861df186e21dd0ce61ba1ea0940e29",
"size": "54807",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Python/Lib/test/regrtest.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "2501"
},
{
"name": "C",
"bytes": "479174"
},
{
"name": "C++",
"bytes": "21416"
},
{
"name": "CSS",
"bytes": "170391"
},
{
"name": "Groff",
"bytes": "21"
},
{
"name": "HTML",
"bytes": "1003190"
},
{
"name": "JavaScript",
"bytes": "1559701"
},
{
"name": "PHP",
"bytes": "3338"
},
{
"name": "PLSQL",
"bytes": "22886"
},
{
"name": "PowerShell",
"bytes": "8175"
},
{
"name": "Python",
"bytes": "30714489"
},
{
"name": "Visual Basic",
"bytes": "481"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
import sys
from collections import namedtuple
from pyang import plugin
_COPYRIGHT_NOTICE = """
// DO NOT EDIT
// generated by pyang using OpenConfig https://github.com/openconfig/public
//
// Copyright (C) 2014-2019 Nippon Telegraph and Telephone Corporation.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
// implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by pyang. DO NOT EDIT.
"""
EQUAL_TYPE_LEAF = 0
EQUAL_TYPE_ARRAY = 1
EQUAL_TYPE_MAP = 2
EQUAL_TYPE_CONTAINER = 3
def pyang_plugin_init():
plugin.register_plugin(GolangPlugin())
class GolangPlugin(plugin.PyangPlugin):
def __init__(self, name=None):
super(GolangPlugin, self).__init__(name=name)
self.multiple_modules = True
def add_output_format(self, fmts):
fmts['golang'] = self
def emit(self, ctx, modules, fd):
ctx.golang_identity_map = {}
ctx.golang_typedef_map = {}
ctx.golang_struct_def = []
ctx.golang_struct_names = {}
ctx.emitted_type_names = {}
ctx.prefix_rel = {}
ctx.module_deps = []
for m in modules:
check_module_deps(ctx, m)
# visit yang statements
visit_modules(ctx)
# emit bgp_configs
emit_go(ctx, fd)
def visit_modules(ctx):
# visit typedef and identity
for mod in ctx.module_deps:
visit_typedef(ctx, mod)
visit_identity(ctx, mod)
# visit container
for mod in ctx.module_deps:
visit_children(ctx, mod, mod.i_children)
def emit_go(ctx, fd):
ctx.golang_struct_def.reverse()
done = set()
# emit
generate_header(fd)
generate_common_functions(fd)
for mod in ctx.module_deps:
if mod not in _module_excluded:
emit_typedef(ctx, mod, fd)
emit_identity(ctx, mod, fd)
for struct in ctx.golang_struct_def:
struct_name = struct.uniq_name
if struct_name in done:
continue
emit_class_def(ctx, struct, struct_name, struct.module_prefix, fd)
done.add(struct_name)
def check_module_deps(ctx, mod):
own_prefix = mod.i_prefix
for k, v in mod.i_prefixes.items():
mod = ctx.get_module(v[0])
if mod is None:
continue
if mod.i_prefix != own_prefix:
check_module_deps(ctx, mod)
ctx.prefix_rel[mod.i_prefix] = k
if (mod not in ctx.module_deps
and mod.i_modulename not in _module_excluded):
ctx.module_deps.append(mod)
def dig_leafref(type_obj):
reftype = type_obj.i_type_spec.i_target_node.search_one('type')
if is_leafref(reftype):
return dig_leafref(reftype)
else:
return reftype
def emit_class_def(ctx, stmt, struct_name, prefix, fd):
if len(stmt.i_children) == 1 and is_list(stmt.i_children[0]):
return
print('// struct for container %s:%s.' % (prefix, stmt.arg), file=fd)
emit_description(stmt, fd)
print('type %s struct {' % convert_to_golang(struct_name), file=fd)
equal_elems = []
for child in stmt.i_children:
if child.path in _path_exclude:
continue
container_or_list_name = child.uniq_name
val_name_go = convert_to_golang(child.arg)
child_prefix = get_orig_prefix(child.i_orig_module)
tag_name = child.uniq_name.lower()
equal_type = EQUAL_TYPE_LEAF
equal_data = None
print('// original -> %s:%s' % (child_prefix, container_or_list_name), file=fd)
# case leaf
if is_leaf(child):
type_obj = child.search_one('type')
type_name = type_obj.arg
# case identityref
if is_identityref(type_obj):
emit_type_name = convert_to_golang(type_obj.search_one('base').arg.split(':')[-1])
# case leafref
elif is_leafref(type_obj):
if type_obj.search_one('path').arg.startswith('../config'):
continue
t = dig_leafref(type_obj)
if is_translation_required(t):
print('// %s:%s\'s original type is %s.' % (child_prefix, container_or_list_name, t.arg), file=fd)
emit_type_name = translate_type(t.arg)
elif is_identityref(t):
emit_type_name = convert_to_golang(t.search_one('base').arg.split(':')[-1])
else:
emit_type_name = t.arg
# case embeded enumeration
elif is_enum(type_obj):
emit_type_name = val_name_go
# case translation required
elif is_translation_required(type_obj):
print('// %s:%s\'s original type is %s.' % (child_prefix, container_or_list_name, type_name), file=fd)
emit_type_name = translate_type(type_name)
# case other primitives
elif is_builtin_type(type_obj):
emit_type_name = type_name
# default
else:
base_module = type_obj.i_orig_module.i_prefix
t = lookup_typedef(ctx, base_module, type_name)
# print(t.golang_name, file=sys.stderr)
emit_type_name = t.golang_name
# case 'case'
if is_case(child):
continue
if is_choice(child) and is_enum_choice(child):
emit_type_name = val_name_go
# case leaflist
if is_leaflist(child):
type_obj = child.search_one('type')
type_name = type_obj.arg
val_name_go = val_name_go + 'List'
tag_name += '-list'
equal_type = EQUAL_TYPE_ARRAY
# case leafref
if is_leafref(type_obj):
t = dig_leafref(type_obj)
emit_type_name = '[]' + t.arg
# case identityref
elif is_identityref(type_obj):
emit_type_name = '[]' + convert_to_golang(type_obj.search_one('base').arg.split(':')[-1])
# case translation required
elif is_translation_required(type_obj):
print('// original type is list of %s' % type_obj.arg, file=fd)
emit_type_name = '[]' + translate_type(type_name)
# case other primitives
elif is_builtin_type(type_obj):
emit_type_name = '[]' + type_name
# default
else:
base_module = type_obj.i_orig_module.i_prefix
t = lookup_typedef(ctx, base_module, type_name)
emit_type_name = '[]' + t.golang_name
# case container
elif is_container(child) or (is_choice(child) and not is_enum_choice(child)):
key = child_prefix + ':' + container_or_list_name
t = ctx.golang_struct_names[key]
val_name_go = t.golang_name
if len(t.i_children) == 1 and is_list(t.i_children[0]):
c = t.i_children[0]
emit_type_name = '[]' + c.golang_name
equal_type = EQUAL_TYPE_MAP
equal_data = c.search_one('key').arg
leaf = c.search_one('leaf').search_one('type')
if leaf.arg == 'leafref' and leaf.search_one('path').arg.startswith('../config'):
equal_data = 'config.' + equal_data
else:
emit_type_name = t.golang_name
equal_type = EQUAL_TYPE_CONTAINER
# case list
elif is_list(child):
key = child_prefix + ':' + container_or_list_name
t = ctx.golang_struct_names[key]
val_name_go = val_name_go + 'List'
tag_name += '-list'
emit_type_name = '[]' + t.golang_name
equal_type = EQUAL_TYPE_MAP
equal_data = child.search_one('key').arg
if is_container(child):
name = emit_type_name
if name.startswith(convert_to_golang(struct_name)) and name.endswith("Config"):
tag_name = 'config'
val_name_go = 'Config'
elif name.startswith(convert_to_golang(struct_name)) and name.endswith("State"):
tag_name = 'state'
val_name_go = 'State'
emit_description(child, fd=fd)
print(' {0}\t{1} `mapstructure:"{2}" json:"{2},omitempty"`'.format(val_name_go, emit_type_name, tag_name), file=fd)
equal_elems.append((val_name_go, emit_type_name, equal_type, equal_data))
print('}', file=fd)
if not struct_name.endswith('state'):
print('func (lhs *{0}) Equal(rhs *{0}) bool {{'.format(convert_to_golang(struct_name)), file=fd)
print('if lhs == nil || rhs == nil {', file=fd)
print('return false', file=fd)
print('}', file=fd)
for val_name, type_name, typ, elem in equal_elems:
if val_name == 'State':
continue
if typ == EQUAL_TYPE_LEAF:
if type_name == '[]byte':
print('if bytes.Compare(lhs.{0}, rhs.{0}) != 0 {{'.format(val_name), file=fd)
else:
print('if lhs.{0} != rhs.{0} {{'.format(val_name), file=fd)
print('return false', file=fd)
print('}', file=fd)
elif typ == EQUAL_TYPE_CONTAINER:
print('if !lhs.{0}.Equal(&(rhs.{0})) {{'.format(val_name), file=fd)
print('return false', file=fd)
print('}', file=fd)
elif typ == EQUAL_TYPE_ARRAY:
print('if len(lhs.{0}) != len(rhs.{0}) {{'.format(val_name), file=fd)
print('return false', file=fd)
print('}', file=fd)
print('for idx, l := range lhs.{0} {{'.format(val_name), file=fd)
if type_name == '[][]byte':
print('if bytes.Compare(l, rhs.{0}[idx]) != 0 {{'.format(val_name), file=fd)
else:
print('if l != rhs.{0}[idx] {{'.format(val_name), file=fd)
print('return false', file=fd)
print('}', file=fd)
print('}', file=fd)
elif typ == EQUAL_TYPE_MAP:
print('if len(lhs.{0}) != len(rhs.{0}) {{'.format(val_name), file=fd)
print('return false', file=fd)
print('}', file=fd)
print('{', file=fd)
print('lmap := make(map[string]*{0})'.format(type_name[2:]), file=fd)
print('for i, l := range lhs.{0} {{'.format(val_name), file=fd)
print('lmap[mapkey(i, string({0}))] = &lhs.{1}[i]'.format(' + '.join('l.{0}'.format(convert_to_golang(v)) for v in elem.split(' ')), val_name), file=fd)
print('}', file=fd)
print('for i, r := range rhs.{0} {{'.format(val_name), file=fd)
print('if l, y := lmap[mapkey(i, string({0}))]; !y {{'.format('+'.join('r.{0}'.format(convert_to_golang(v)) for v in elem.split(' '))), file=fd)
print('return false', file=fd)
print('} else if !r.Equal(l) {', file=fd)
print('return false', file=fd)
print('}', file=fd)
print('}', file=fd)
print('}', file=fd)
else:
sys.stderr.write("invalid equal type %s", typ)
print('return true', file=fd)
print('}', file=fd)
def get_orig_prefix(mod):
orig = mod.i_orig_module
if orig:
get_orig_prefix(orig)
else:
return mod.i_prefix
def get_path(c):
path = ''
if c.parent is not None:
p = ''
if hasattr(c, 'i_module'):
mod = c.i_module
prefix = mod.search_one('prefix')
if prefix:
p = prefix.arg + ":"
path = get_path(c.parent) + "/" + p + c.arg
return path
# define container embedded enums
def define_enum(ctx, mod, c):
prefix = mod.i_prefix
c.path = get_path(c)
c.golang_name = convert_to_golang(c.arg)
if prefix in ctx.golang_typedef_map:
ctx.golang_typedef_map[prefix][c.arg] = c
else:
ctx.golang_typedef_map[prefix] = {c.arg: c}
def visit_children(ctx, mod, children):
for c in children:
if is_case(c):
prefix = get_orig_prefix(c.parent.i_orig_module)
c.i_orig_module = c.parent.i_orig_module
else:
prefix = get_orig_prefix(c.i_orig_module)
c.uniq_name = c.arg
if c.arg == 'config':
c.uniq_name = c.parent.uniq_name + '-config'
elif c.arg == 'state':
c.uniq_name = c.parent.uniq_name + '-state'
elif c.arg == 'graceful-restart' and prefix == 'bgp-mp':
c.uniq_name = 'mp-graceful-restart'
if is_leaf(c) and is_enum(c.search_one('type')):
define_enum(ctx, mod, c)
elif is_list(c) or is_container(c) or is_choice(c):
c.golang_name = convert_to_golang(c.uniq_name)
if is_choice(c):
picks = pickup_choice(c)
c.i_children = picks
if is_enum_choice(c):
define_enum(ctx, mod, c)
continue
prefix_name = prefix + ':' + c.uniq_name
if prefix_name in ctx.golang_struct_names:
ext_c = ctx.golang_struct_names.get(prefix_name)
ext_c_child_count = len(getattr(ext_c, "i_children"))
current_c_child_count = len(getattr(c, "i_children"))
if ext_c_child_count < current_c_child_count:
c.module_prefix = prefix
ctx.golang_struct_names[prefix_name] = c
idx = ctx.golang_struct_def.index(ext_c)
ctx.golang_struct_def[idx] = c
else:
c.module_prefix = prefix
ctx.golang_struct_names[prefix_name] = c
ctx.golang_struct_def.append(c)
c.path = get_path(c)
# print(c.path, file=sys.stderr)
if hasattr(c, 'i_children'):
visit_children(ctx, mod, c.i_children)
def pickup_choice(c):
element = []
for child in c.i_children:
if is_case(child):
element = element + child.i_children
return element
def get_type_spec(stmt):
for s in stmt.substmts:
if hasattr(s, 'i_type_spec'):
return s.i_type_spec.name
return None
def visit_typedef(ctx, mod):
prefix = mod.i_prefix
child_map = {}
for stmt in mod.substmts:
if is_typedef(stmt):
stmt.path = get_path(stmt)
# print('stmt.path = "%s"' % stmt.path, file=sys.stderr)
name = stmt.arg
stmt.golang_name = convert_to_golang(name)
# print('stmt.golang_name = "%s"' % stmt.golang_name, file=sys.stderr)
child_map[name] = stmt
ctx.golang_typedef_map[prefix] = child_map
# print('ctx.golang_typedef_map["%s"] = %s' % (prefix, child_map), file=sys.stderr)
prefix_rel = ctx.prefix_rel[prefix]
ctx.golang_typedef_map[prefix_rel] = child_map
# print('ctx.golang_typedef_map["%s"] = %s' % (prefix_rel, child_map)), file=sys.stderr)
def visit_identity(ctx, mod):
prefix = mod.i_prefix
child_map = {}
for stmt in mod.substmts:
if is_identity(stmt):
name = stmt.arg
stmt.golang_name = convert_to_golang(name)
# print('stmt.golang_name = "%s"' % stmt.golang_name, file=sys.stderr)
child_map[name] = stmt
base = stmt.search_one('base')
if base:
base_name = base.arg
if ':' in base_name:
base_prefix, base_name = base_name.split(':', 1)
if base_prefix in ctx.golang_identity_map:
ctx.golang_identity_map[base_prefix][base_name].substmts.append(stmt)
else:
child_map[base_name].substmts.append(stmt)
ctx.golang_identity_map[prefix] = child_map
# print('ctx.golang_identity_map["%s"] = %s\n' % (prefix, child_map), file=sys.stderr)
prefix_rel = ctx.prefix_rel[prefix]
ctx.golang_identity_map[prefix_rel] = child_map
# print('ctx.golang_identity_map["%s"] = %s\n' % (prefix_rel, child_map), file=sys.stderr)
def lookup_identity(ctx, default_prefix, identity_name):
result = lookup(ctx.golang_identity_map, default_prefix, identity_name)
return result
def lookup_typedef(ctx, default_prefix, type_name):
result = lookup(ctx.golang_typedef_map, default_prefix, type_name)
return result
def lookup(basemap, default_prefix, key):
if ':' in key:
pref, name = key.split(':')
else:
pref = default_prefix
name = key
if pref in basemap:
return basemap[pref].get(name, None)
else:
return key
def emit_description(stmt, fd):
desc = stmt.search_one('description')
if desc is None:
return None
desc_words = desc.arg if desc.arg.endswith('.') else desc.arg + '.'
print('// %s' % desc_words.replace('\n', '\n// '), file=fd)
def emit_enum(prefix, name, stmt, substmts, fd):
type_name_org = name
type_name = stmt.golang_name
print('// typedef for identity %s:%s.' % (prefix, type_name_org), file=fd)
emit_description(stmt, fd)
print('type %s string' % type_name, file=fd)
const_prefix = convert_const_prefix(type_name_org)
print('const (', file=fd)
m = {}
if is_choice(stmt) and is_enum_choice(stmt):
n = namedtuple('Statement', ['arg'])
n.arg = 'none'
substmts = [n] + substmts
for sub in substmts:
enum_name = '%s_%s' % (const_prefix, convert_const_prefix(sub.arg))
m[sub.arg.lower()] = enum_name
print(' %s %s = "%s"' % (enum_name, type_name, sub.arg.lower()), file=fd)
print(')\n', file=fd)
print('var %sToIntMap = map[%s]int {' % (type_name, type_name), file=fd)
for i, sub in enumerate(substmts):
enum_name = '%s_%s' % (const_prefix, convert_const_prefix(sub.arg))
print(' %s: %d,' % (enum_name, i), file=fd)
print('}\n', file=fd)
print('var IntTo%sMap = map[int]%s {' % (type_name, type_name), file=fd)
for i, sub in enumerate(substmts):
enum_name = '%s_%s' % (const_prefix, convert_const_prefix(sub.arg))
print(' %d: %s,' % (i, enum_name), file=fd)
print('}\n', file=fd)
print('func (v %s) Validate() error {' % type_name, file=fd)
print('if _, ok := %sToIntMap[v]; !ok {' % type_name, file=fd)
print('return fmt.Errorf("invalid %s: %%s", v)' % type_name, file=fd)
print('}', file=fd)
print('return nil', file=fd)
print('}\n', file=fd)
if stmt.search_one('default'):
default = stmt.search_one('default')
print('func (v %s) Default() %s {' % (type_name, type_name), file=fd)
print('return %s' % m[default.arg.lower()], file=fd)
print('}\n', file=fd)
print('func (v %s) DefaultAsNeeded() %s {' % (type_name, type_name), file=fd)
print(' if string(v) == "" {', file=fd)
print(' return v.Default()', file=fd)
print('}', file=fd)
print(' return v', file=fd)
print('}', file=fd)
print('func (v %s) ToInt() int {' % type_name, file=fd)
print('_v := v.DefaultAsNeeded()')
print('i, ok := %sToIntMap[_v]' % type_name, file=fd)
else:
print('func (v %s) ToInt() int {' % type_name, file=fd)
print('i, ok := %sToIntMap[v]' % type_name, file=fd)
print('if !ok {', file=fd)
print('return -1', file=fd)
print('}', file=fd)
print('return i', file=fd)
print('}', file=fd)
def emit_typedef(ctx, mod, fd):
prefix = mod.i_prefix
t_map = ctx.golang_typedef_map[prefix]
for name, stmt in t_map.items():
if stmt.path in _typedef_exclude:
continue
# skip identityref type because currently skip identity
if get_type_spec(stmt) == 'identityref':
continue
type_name_org = name
type_name = stmt.golang_name
if type_name in ctx.emitted_type_names:
print("warning %s: %s has already been emitted from %s."
% (prefix + ":" + type_name_org, type_name_org, ctx.emitted_type_names[type_name]),
file=sys.stderr)
continue
ctx.emitted_type_names[type_name] = prefix + ":" + type_name_org
t = stmt.search_one('type')
if not t and is_choice(stmt):
emit_enum(prefix, type_name_org, stmt, stmt.i_children, fd)
elif is_enum(t):
emit_enum(prefix, type_name_org, stmt, t.substmts, fd)
elif is_union(t):
print('// typedef for typedef %s:%s.' % (prefix, type_name_org), file=fd)
emit_description(t, fd)
print('type %s string' % type_name, file=fd)
else:
if is_leafref(t):
t = dig_leafref(t)
print('// typedef for typedef %s:%s.' % (prefix, type_name_org), file=fd)
if is_builtin_type(t):
emit_description(t, fd)
print('type %s %s' % (type_name, t.arg), file=fd)
elif is_translation_required(t):
print('// %s:%s\'s original type is %s.' % (prefix, name, t.arg), file=fd)
emit_description(t, fd)
print('type %s %s' % (type_name, translate_type(t.arg)), file=fd)
else:
m = ctx.golang_typedef_map
for k in t.arg.split(':'):
m = m[k]
emit_description(t, fd)
print('type %s %s' % (type_name, m.golang_name), file=fd)
def emit_identity(ctx, mod, fd):
prefix = mod.i_prefix
i_map = ctx.golang_identity_map[prefix]
for name, stmt in i_map.items():
enums = stmt.search('identity')
if len(enums) > 0:
emit_enum(prefix, name, stmt, enums, fd)
def is_reference(s):
return s.arg in ['leafref', 'identityref']
def is_leafref(s):
return s.arg in ['leafref']
def is_identityref(s):
return s.arg in ['identityref']
def is_enum(s):
return s.arg in ['enumeration']
def is_union(s):
return s.arg in ['union']
def is_typedef(s):
return s.keyword in ['typedef']
def is_identity(s):
return s.keyword in ['identity']
def is_leaf(s):
return s.keyword in ['leaf']
def is_leaflist(s):
return s.keyword in ['leaf-list']
def is_list(s):
return s.keyword in ['list']
def is_container(s):
return s.keyword in ['container']
def is_case(s):
return s.keyword in ['case']
def is_choice(s):
return s.keyword in ['choice']
def is_enum_choice(s):
return all(e.search_one('type').arg in _type_enum_case for e in s.i_children)
_type_enum_case = [
'empty',
]
def is_builtin_type(t):
return t.arg in _type_builtin
def is_translation_required(t):
return t.arg in list(_type_translation_map.keys())
_type_translation_map = {
'union': 'string',
'decimal64': 'float64',
'boolean': 'bool',
'empty': 'bool',
'inet:ip-address': 'string',
'inet:ip-prefix': 'string',
'inet:ipv4-address': 'string',
'inet:as-number': 'uint32',
'bgp-set-community-option-type': 'string',
'inet:port-number': 'uint16',
'yang:timeticks': 'int64',
'ptypes:install-protocol-type': 'string',
'binary': '[]byte',
'route-family': 'bgp.RouteFamily',
'bgp-capability': 'bgp.ParameterCapabilityInterface',
'bgp-open-message': '*bgp.BGPMessage',
}
_type_builtin = [
"union",
"int8",
"int16",
"int32",
"int64",
"string",
"uint8",
"uint16",
"uint32",
"uint64",
]
_module_excluded = [
"ietf-inet-types",
"ietf-yang-types",
]
_path_exclude = [
"/rpol:routing-policy/rpol:defined-sets/rpol:neighbor-sets/rpol:neighbor-set/rpol:neighbor",
"/rpol:routing-policy/rpol:defined-sets/bgp-pol:bgp-defined-sets/bgp-pol:community-sets/bgp-pol:community-set/bgp-pol:community-member",
"/rpol:routing-policy/rpol:defined-sets/bgp-pol:bgp-defined-sets/bgp-pol:ext-community-sets/bgp-pol:ext-community-set/bgp-pol:ext-community-member",
"/rpol:routing-policy/rpol:defined-sets/bgp-pol:bgp-defined-sets/bgp-pol:as-path-sets/bgp-pol:as-path-set/bgp-pol:as-path-set-member",
]
_typedef_exclude = [
"/gobgp:bgp-capability",
"/gobgp:bgp-open-message",
]
def generate_header(fd):
print(_COPYRIGHT_NOTICE, file=fd)
print('package config', file=fd)
print('', file=fd)
print('import (', file=fd)
print('"fmt"', file=fd)
print('', file=fd)
print('"github.com/osrg/gobgp/pkg/packet/bgp"', file=fd)
print(')', file=fd)
print('', file=fd)
def generate_common_functions(fd):
print('func mapkey(index int, name string) string {', file=fd)
print('if name != "" {', file=fd)
print('return name', file=fd)
print('}', file=fd)
print('return fmt.Sprintf("%v", index)', file=fd)
print('}', file=fd)
def translate_type(key):
if key in _type_translation_map.keys():
return _type_translation_map[key]
else:
return key
# 'hoge-hoge' -> 'HogeHoge'
def convert_to_golang(type_string):
a = type_string.split('.')
return '.'.join(''.join(t.capitalize() for t in x.split('-')) for x in a)
# 'hoge-hoge' -> 'HOGE_HOGE'
def convert_const_prefix(type_string):
return type_string.replace('-', '_').upper()
def chop_suf(s, suf):
if not s.endswith(suf):
return s
return s[:-len(suf)]
|
{
"content_hash": "73b3219ab43cd94ac94714b84de6c9f9",
"timestamp": "",
"source": "github",
"line_count": 797,
"max_line_length": 168,
"avg_line_length": 32.623588456712675,
"alnum_prop": 0.551440329218107,
"repo_name": "j-kato/gobgp",
"id": "1ad359e30f50c58bc5df65f8330e0864d024c6e9",
"size": "26619",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tools/pyang_plugins/bgpyang2golang.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Go",
"bytes": "1281959"
},
{
"name": "Protocol Buffer",
"bytes": "16559"
},
{
"name": "Python",
"bytes": "401622"
},
{
"name": "Shell",
"bytes": "11705"
}
],
"symlink_target": ""
}
|
from local_settings import *
# Application definition
INSTALLED_APPS = (
'wpadmin',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.humanize',
'lugar',
'encuestas',
'clima',
'smart_selects',
'multiselectfield',
'selectable',
'sorl.thumbnail',
'el_pagination',
#'debug_toolbar',
'import_export',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
#borrar una ves usado
#'debug_toolbar.middleware.DebugToolbarMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'mapafinca.urls'
#SHOW_TOOLBAR_CALLBACK = True
#INTERNAL_IPS = '127.0.0.1'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mapafinca.wsgi.application'
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'es-ni'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
MEDIA_ROOT = os.environ.get('MEDIA_ROOT', os.path.join(BASE_DIR, 'media'))
MEDIA_URL = '/media/'
STATIC_ROOT = os.environ.get('STATIC_ROOT', os.path.join(BASE_DIR, 'static'))
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, "static_media"),
)
WPADMIN = {
'admin': {
#'admin_site': 'mapafinca.admin',
'title': 'Django panel de administración',
'menu': {
'top': 'wpadmin.menu.menus.BasicTopMenu',
'left': 'wpadmin.menu.menus.BasicLeftMenu',
},
'dashboard': {
'breadcrumbs': True,
},
'custom_style': STATIC_URL + 'wpadmin/css/themes/light.css',
}
}
CKEDITOR_JQUERY_URL = 'https://code.jquery.com/jquery-2.1.3.min.js'
SESSION_SERIALIZER = 'django.contrib.sessions.serializers.PickleSerializer'
# CACHES = {
# 'default': {
# 'BACKEND': 'django.core.cache.backends.db.DatabaseCache',
# 'LOCATION': 'my_cache_table',
# }
# }
ENDLESS_PAGINATION_PER_PAGE = 18
|
{
"content_hash": "d62a3ae3a55ce67626ef967877e7178d",
"timestamp": "",
"source": "github",
"line_count": 115,
"max_line_length": 77,
"avg_line_length": 26.217391304347824,
"alnum_prop": 0.6451077943615257,
"repo_name": "CARocha/mapafinca",
"id": "e8ec6454d7e3db011b18c50b741d3af3c755a116",
"size": "3040",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "mapafinca/settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "46505"
},
{
"name": "HTML",
"bytes": "391246"
},
{
"name": "JavaScript",
"bytes": "191073"
},
{
"name": "Python",
"bytes": "276590"
}
],
"symlink_target": ""
}
|
import h2o
h2o.init()
airlines_url = "https://s3.amazonaws.com/h2o-airlines-unpacked/allyears2k.csv"
airlines_df = h2o.import_file(airlines_url)
airlines_df.columns
airlines_df.describe() # output suppressed
airlines_df["IsArrDelayed"].describe()
independent_vars = ["Year","Month","DayOfWeek","CRSDepTime","CRSArrTime","Origin","Dest","UniqueCarrier"]
dependent_var = "IsArrDelayed"
from h2o.estimators.glm import H2OGeneralizedLinearEstimator
linear_classification_estimator = H2OGeneralizedLinearEstimator(family="binomial")
linear_classification_estimator.train(X=independent_vars, y=dependent_var, training_frame=airlines_df)
linear_classification_estimator.show()
|
{
"content_hash": "3384f68c973e768684f22d52d1e9d75f",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 105,
"avg_line_length": 27.32,
"alnum_prop": 0.7891654465592972,
"repo_name": "pchmieli/h2o-3",
"id": "445cc509c74ae21d3062615ecf0c28228ece2600",
"size": "683",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "h2o-docs/src/booklets/v2_2015/source/python/ipython_machinelearning_input.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "5090"
},
{
"name": "CSS",
"bytes": "162402"
},
{
"name": "CoffeeScript",
"bytes": "262107"
},
{
"name": "Emacs Lisp",
"bytes": "8927"
},
{
"name": "HTML",
"bytes": "139398"
},
{
"name": "Java",
"bytes": "5612816"
},
{
"name": "JavaScript",
"bytes": "38932"
},
{
"name": "Makefile",
"bytes": "34048"
},
{
"name": "Python",
"bytes": "2512115"
},
{
"name": "R",
"bytes": "1559459"
},
{
"name": "Rebol",
"bytes": "7059"
},
{
"name": "Ruby",
"bytes": "3506"
},
{
"name": "Scala",
"bytes": "22635"
},
{
"name": "Shell",
"bytes": "46381"
},
{
"name": "TeX",
"bytes": "521075"
}
],
"symlink_target": ""
}
|
""" unit testing code for molecule suppliers
"""
import os
import tempfile
import unittest
from rdkit import Chem, RDLogger
from rdkit import RDConfig
class TestCase(unittest.TestCase):
def tearDown(self):
RDLogger.EnableLog('rdApp.error')
def test1SDSupplier(self):
fileN = os.path.join(RDConfig.RDCodeDir, 'VLib', 'NodeLib', 'test_data', 'NCI_aids.10.sdf')
suppl = Chem.SDMolSupplier(fileN)
ms = [x for x in suppl]
self.assertEqual(len(ms), 10)
# test repeating:
ms = [x for x in suppl]
self.assertEqual(len(ms), 10)
# test reset:
suppl.reset()
m = next(suppl)
self.assertEqual(m.GetProp('_Name'), '48')
self.assertEqual(m.GetProp('NSC'), '48')
self.assertEqual(m.GetProp('CAS_RN'), '15716-70-8')
m = next(suppl)
self.assertEqual(m.GetProp('_Name'), '78')
self.assertEqual(m.GetProp('NSC'), '78')
self.assertEqual(m.GetProp('CAS_RN'), '6290-84-2')
suppl.reset()
for _ in range(10):
m = next(suppl)
with self.assertRaises(StopIteration):
m = next(suppl)
def test2SmilesSupplier(self):
fileN = os.path.join(RDConfig.RDCodeDir, 'VLib', 'NodeLib', 'test_data', 'pgp_20.txt')
suppl = Chem.SmilesMolSupplier(
fileN, delimiter='\t', smilesColumn=2, nameColumn=1, titleLine=1)
ms = [x for x in suppl]
self.assertEqual(len(ms), 20)
# test repeating:
ms = [x for x in suppl]
self.assertEqual(len(ms), 20)
# test reset:
suppl.reset()
m = next(suppl)
self.assertEqual(m.GetProp('_Name'), 'ALDOSTERONE')
self.assertEqual(m.GetProp('ID'), 'RD-PGP-0001')
m = next(suppl)
self.assertEqual(m.GetProp('_Name'), 'AMIODARONE')
self.assertEqual(m.GetProp('ID'), 'RD-PGP-0002')
suppl.reset()
for _ in range(20):
m = next(suppl)
with self.assertRaises(StopIteration):
m = next(suppl)
def test3SmilesSupplier(self):
txt = """C1CC1,1
CC(=O)O,3
fail,4
CCOC,5
"""
RDLogger.DisableLog('rdApp.error')
try:
with tempfile.NamedTemporaryFile('w+', suffix='.csv', delete=False) as tmp:
tmp.write(txt)
suppl = Chem.SmilesMolSupplier(tmp.name, delimiter=',', smilesColumn=0, nameColumn=1,
titleLine=0)
ms = [x for x in suppl]
suppl = None
while ms.count(None):
ms.remove(None)
self.assertEqual(len(ms), 3)
finally:
os.unlink(tmp.name)
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "de51d15322e5100c43c265157eb963f9",
"timestamp": "",
"source": "github",
"line_count": 94,
"max_line_length": 99,
"avg_line_length": 29.21276595744681,
"alnum_prop": 0.5571740713765477,
"repo_name": "bp-kelley/rdkit",
"id": "720860e55f9e59b531d274134477b2599dccdb32",
"size": "3021",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "rdkit/Chem/UnitTestSuppliers.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "1593408"
},
{
"name": "C#",
"bytes": "10167"
},
{
"name": "C++",
"bytes": "13831236"
},
{
"name": "CMake",
"bytes": "761688"
},
{
"name": "Dockerfile",
"bytes": "2590"
},
{
"name": "Fortran",
"bytes": "7590"
},
{
"name": "HTML",
"bytes": "43059702"
},
{
"name": "Java",
"bytes": "369342"
},
{
"name": "JavaScript",
"bytes": "52043"
},
{
"name": "Jupyter Notebook",
"bytes": "498341"
},
{
"name": "LLVM",
"bytes": "40048"
},
{
"name": "Lex",
"bytes": "4508"
},
{
"name": "Makefile",
"bytes": "10862"
},
{
"name": "Python",
"bytes": "4156873"
},
{
"name": "QMake",
"bytes": "389"
},
{
"name": "SMT",
"bytes": "3010"
},
{
"name": "SWIG",
"bytes": "342569"
},
{
"name": "Shell",
"bytes": "3822"
},
{
"name": "Smarty",
"bytes": "5864"
},
{
"name": "Yacc",
"bytes": "61432"
}
],
"symlink_target": ""
}
|
from confiture import Confiture, ConfigFileError
print("[*] loading template")
confiture = Confiture("examples/templates/confiture.yaml")
print("[*] checking required files for blueberry")
try:
confiture.check("examples/config/blueberry_ok.yaml")
print("[*] blueberry file is correct")
except ConfigFileError as e:
print(e.message)
print("[*] checking required files for banana")
try:
confiture.check("examples/config/banana_ko.yaml")
print("[*] banana file is correct")
except ConfigFileError as e:
print(e.message)
|
{
"content_hash": "216f6e5c538afbad3c0adf85da3de9d3",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 58,
"avg_line_length": 33.875,
"alnum_prop": 0.7343173431734318,
"repo_name": "Frky/confiture",
"id": "1f785318e9bcc9a0e398ea59bc4ad8c1cd7e8c46",
"size": "543",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "example.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "5175"
}
],
"symlink_target": ""
}
|
import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 128 , FREQ = 'D', seed = 0, trendtype = "MovingMedian", cycle_length = 30, transform = "Difference", sigma = 0.0, exog_count = 0, ar_order = 0);
|
{
"content_hash": "a8cd9864c8afc7efd0b435a898dc8eb7",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 168,
"avg_line_length": 38.285714285714285,
"alnum_prop": 0.7089552238805971,
"repo_name": "antoinecarme/pyaf",
"id": "91d11f728b45ea977c2bff77ba1318b59def0395",
"size": "268",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/artificial/transf_Difference/trend_MovingMedian/cycle_30/ar_/test_artificial_128_Difference_MovingMedian_30__0.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "6773299"
},
{
"name": "Procfile",
"bytes": "24"
},
{
"name": "Python",
"bytes": "54209093"
},
{
"name": "R",
"bytes": "807"
},
{
"name": "Shell",
"bytes": "3619"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import django.db.models.manager
from fluent_contents.models.managers import ContentItemManager
class Migration(migrations.Migration):
initial = True
dependencies = [
('fluent_contents', '0001_initial'),
('projects', '0015_auto_20161207_0900'),
('surveys', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='ProjectImagesContent',
fields=[
('contentitem_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='fluent_contents.ContentItem')),
('title', models.CharField(blank=True, max_length=63, null=True)),
('sub_title', models.CharField(blank=True, max_length=100, null=True)),
('description', models.TextField(blank=True, null=True)),
('action_text', models.CharField(blank=True, default='Check out our projects', max_length=100, null=True)),
('action_link', models.CharField(blank=True, default=b'/projects', max_length=100, null=True)),
],
options={
'db_table': 'contentitem_cms_projectimagescontent',
'verbose_name': 'Project Images',
},
bases=('fluent_contents.contentitem',),
managers=[
('objects', ContentItemManager()),
('base_objects', django.db.models.manager.Manager()),
],
),
migrations.CreateModel(
name='Projects',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('projects', models.ManyToManyField(to='projects.Project')),
],
),
migrations.CreateModel(
name='ProjectsContent',
fields=[
('contentitem_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='fluent_contents.ContentItem')),
('title', models.CharField(blank=True, max_length=63, null=True)),
('sub_title', models.CharField(blank=True, max_length=100, null=True)),
('action', models.CharField(max_length=255)),
('action_text', models.CharField(max_length=255)),
('projects', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='cms.Projects')),
],
options={
'db_table': 'contentitem_cms_projectscontent',
'verbose_name': 'Projects',
},
bases=('fluent_contents.contentitem',),
managers=[
('objects', ContentItemManager()),
('base_objects', django.db.models.manager.Manager()),
],
),
migrations.CreateModel(
name='Quote',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=63)),
('quote', models.CharField(max_length=255)),
],
),
migrations.CreateModel(
name='Quotes',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
],
),
migrations.CreateModel(
name='QuotesContent',
fields=[
('contentitem_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='fluent_contents.ContentItem')),
('title', models.CharField(blank=True, max_length=63, null=True)),
('sub_title', models.CharField(blank=True, max_length=100, null=True)),
('quotes', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='cms.Quotes')),
],
options={
'db_table': 'contentitem_cms_quotescontent',
'verbose_name': 'Quotes',
},
bases=('fluent_contents.contentitem',),
managers=[
('objects', ContentItemManager()),
('base_objects', django.db.models.manager.Manager()),
],
),
migrations.CreateModel(
name='ResultPage',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200, verbose_name='Title')),
('slug', models.SlugField(max_length=200, verbose_name='Slug')),
('description', models.TextField(blank=True, null=True, verbose_name='Description')),
('start_date', models.DateField(blank=True, null=True)),
('end_date', models.DateField(blank=True, null=True)),
],
),
migrations.CreateModel(
name='Stat',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('type', models.CharField(choices=[(b'manual', 'Manual input'), (b'people_involved', 'People involved'), (b'projects_realized', 'Projects realised'), (b'tasks_realized', 'Tasks realised'), (b'donated_total', 'Donated total'), (b'projects_online', 'Projects Online'), (b'votes_cast', 'Votes casts')], max_length=40)),
('title', models.CharField(max_length=63)),
('value', models.CharField(blank=True, help_text="Use this for 'manual' input or the override the calculated value.", max_length=63, null=True)),
],
),
migrations.CreateModel(
name='Stats',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
],
),
migrations.CreateModel(
name='StatsContent',
fields=[
('contentitem_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='fluent_contents.ContentItem')),
('title', models.CharField(blank=True, max_length=63, null=True)),
('sub_title', models.CharField(blank=True, max_length=100, null=True)),
('stats', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='cms.Stats')),
],
options={
'db_table': 'contentitem_cms_statscontent',
'verbose_name': 'Platform Statistics',
},
bases=('fluent_contents.contentitem',),
managers=[
('objects', ContentItemManager()),
('base_objects', django.db.models.manager.Manager()),
],
),
migrations.CreateModel(
name='SurveyContent',
fields=[
('contentitem_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='fluent_contents.ContentItem')),
('title', models.CharField(blank=True, max_length=63, null=True)),
('sub_title', models.CharField(blank=True, max_length=100, null=True)),
('survey', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='surveys.Survey')),
],
options={
'db_table': 'contentitem_cms_surveycontent',
'verbose_name': 'Platform Results',
},
bases=('fluent_contents.contentitem',),
managers=[
('objects', ContentItemManager()),
('base_objects', django.db.models.manager.Manager()),
],
),
migrations.AddField(
model_name='stat',
name='stats',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='cms.Stats'),
),
migrations.AddField(
model_name='quote',
name='quotes',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='cms.Quotes'),
),
]
|
{
"content_hash": "7ad5ff373b35b9e6f8a66416ab1cee62",
"timestamp": "",
"source": "github",
"line_count": 172,
"max_line_length": 332,
"avg_line_length": 49.51744186046512,
"alnum_prop": 0.5634613126687801,
"repo_name": "onepercentclub/bluebottle",
"id": "7276019558604242454ee1e47f0481c2460010d0",
"size": "8590",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bluebottle/cms/migrations/0001_initial.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "41694"
},
{
"name": "HTML",
"bytes": "246695"
},
{
"name": "Handlebars",
"bytes": "63"
},
{
"name": "JavaScript",
"bytes": "139123"
},
{
"name": "PHP",
"bytes": "35"
},
{
"name": "PLpgSQL",
"bytes": "1369882"
},
{
"name": "PostScript",
"bytes": "2927"
},
{
"name": "Python",
"bytes": "4983116"
},
{
"name": "Rich Text Format",
"bytes": "39109"
},
{
"name": "SCSS",
"bytes": "99555"
},
{
"name": "Shell",
"bytes": "3068"
},
{
"name": "Smarty",
"bytes": "3814"
}
],
"symlink_target": ""
}
|
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @author: psimakov@google.com (Pavel Simakov)
"""Enforces schema and verifies course files for referential integrity.
Use this script to verify referential integrity of your course definition files
before you import them into the production instance of Google AppEngine.
Here is how to use the script:
- prepare your course files
- edit the data/unit.csv file
- edit the data/lesson.csv file
- edit the assets/js/activity-*.*.js files
- edit the assets/js/assessment-*.js files
- run the script from a command line by navigating to the root
directory of the app and then typing "python tools/verify.py"
- review the report printed to the console for errors and warnings
Good luck!
"""
import csv
import json
import os
import re
import sys
BOOLEAN = object()
STRING = object()
FLOAT = object()
INTEGER = object()
CORRECT = object()
REGEX = object()
SCHEMA = {
'assessment': {
'assessmentName': STRING,
'preamble': STRING,
'checkAnswers': BOOLEAN,
'questionsList': [{
'questionHTML': STRING,
'lesson': STRING,
'choices': [STRING, CORRECT],
'correctAnswerNumeric': FLOAT,
'correctAnswerString': STRING,
'correctAnswerRegex': REGEX}]
}, 'activity': [
STRING,
{
'questionType': 'multiple choice',
'choices': [[STRING, BOOLEAN, STRING]]
}, {
'questionType': 'multiple choice group',
'questionsList': [{
'questionHTML': STRING,
'choices': [STRING],
'correctIndex': INTEGER}],
'allCorrectOutput': STRING,
'someIncorrectOutput': STRING
}, {
'questionType': 'freetext',
'correctAnswerRegex': REGEX,
'correctAnswerOutput': STRING,
'incorrectAnswerOutput': STRING,
'showAnswerOutput': STRING,
'showAnswerPrompt': STRING,
'outputHeight': STRING
}]}
UNITS_HEADER = (
'id,type,unit_id,title,release_date,now_available')
LESSONS_HEADER = (
'unit_id,unit_title,lesson_id,lesson_title,lesson_activity,'
'lesson_activity_name,lesson_notes,lesson_video_id,lesson_objectives')
UNIT_CSV_TO_DB_CONVERTER = {
'id': ('id', int),
'type': ('type', unicode),
'unit_id': ('unit_id', unicode),
'title': ('title', unicode),
'release_date': ('release_date', unicode),
'now_available': ('now_available', bool)
}
LESSON_CSV_TO_DB_CONVERTER = {
'unit_id': ('unit_id', int),
# Field 'unit_title' is a duplicate of Unit.title. We enforce that both
# values are the same and ignore this value altogether.
'unit_title': None,
'lesson_id': ('id', int),
'lesson_title': ('title', unicode),
'lesson_activity': ('activity', unicode),
'lesson_activity_name': ('activity_title', unicode),
'lesson_video_id': ('video', unicode),
'lesson_objectives': ('objectives', unicode),
'lesson_notes': ('notes', unicode)
}
# pylint: disable-msg=anomalous-backslash-in-string
NO_VERIFY_TAG_NAME_OPEN = '<gcb-no-verify>\s*\n'
# pylint: enable-msg=anomalous-backslash-in-string
NO_VERIFY_TAG_NAME_CLOSE = '</gcb-no-verify>'
OUTPUT_FINE_LOG = False
OUTPUT_DEBUG_LOG = False
class Term(object):
def __init__(self, term_type, value=None):
self.term_type = term_type
self.value = value
def __eq__(self, other):
if type(other) is not Term:
return False
else:
return ((self.term_type == other.term_type) and
(self.value == other.value))
class SchemaException(Exception):
"""A class to represent a schema error."""
def format_primitive_value_name(self, name):
if name == REGEX:
return 'REGEX(...)'
if name == CORRECT:
return 'CORRECT(...)'
if name == BOOLEAN:
return 'BOOLEAN'
return name
def format_primitive_type_name(self, name):
"""Formats a name for a primitive type."""
if name == BOOLEAN:
return 'BOOLEAN'
if name == REGEX:
return 'REGEX(...)'
if name == CORRECT:
return 'CORRECT(...)'
if name == STRING or isinstance(name, str):
return 'STRING'
if name == FLOAT:
return 'FLOAT'
if name == INTEGER:
return 'INTEGER'
if isinstance(name, dict):
return '{...}'
if isinstance(name, list):
return '[...]'
return 'Unknown type name \'%s\'' % name.__class__.__name__
def format_type_names(self, names):
if isinstance(names, list):
captions = []
for name in names:
captions.append(self.format_primitive_type_name(name))
return captions
else:
return self.format_primitive_type_name(names)
def __init__(self, message, value=None, types=None, path=None):
prefix = ''
if path:
prefix = 'Error at %s\n' % path
if types:
if value:
message = prefix + message % (
self.format_primitive_value_name(value),
self.format_type_names(types))
else:
message = prefix + message % self.format_type_names(types)
else:
if value:
message = prefix + (
message % self.format_primitive_value_name(value))
else:
message = prefix + message
super(SchemaException, self).__init__(message)
class Context(object):
""""A class that manages a stack of traversal contexts."""
def __init__(self):
self.parent = None
self.path = ['/']
def new(self, names):
""""Derives a new context from the current one."""
context = Context()
context.parent = self
context.path = list(self.path)
if names:
if isinstance(names, list):
for name in names:
if name:
context.path.append('/' + '%s' % name)
else:
context.path.append('/' + '%s' % names)
return context
def format_path(self):
"""Formats the canonical name of this context."""
return ''.join(self.path)
class SchemaHelper(object):
"""A class that knows how to apply the schema."""
def __init__(self):
self.type_stats = {}
def visit_element(self, atype, value, context, is_terminal=True):
"""Callback for each schema element being traversed."""
if atype in self.type_stats:
count = self.type_stats[atype]
else:
count = 0
self.type_stats[atype] = count + 1
if is_terminal:
self.parse_log.append(' TERMINAL: %s %s = %s' % (
atype, context.format_path(), value))
else:
self.parse_log.append(' NON-TERMINAL: %s %s' % (
atype, context.format_path()))
def extract_all_terms_to_depth(self, key, values, type_map):
"""Walks schema type map recursively to depth."""
# Walks schema type map recursively to depth and creates a list of all
# possible {key: value} pairs. The latter is a list of all non-terminal
# and terminal terms allowed in the schema. The list of terms from this
# method can be bound to an execution context for evaluating whether a
# given instance's map complies with the schema.
if key:
type_map.update({key: key})
if values == REGEX:
type_map.update({'regex': lambda x: Term(REGEX, x)})
return
if values == CORRECT:
type_map.update({'correct': lambda x: Term(CORRECT, x)})
return
if values == BOOLEAN:
type_map.update(
{'true': Term(BOOLEAN, True), 'false': Term(BOOLEAN, False)})
return
if values == STRING or values == INTEGER:
return
if isinstance(values, dict):
for new_key, new_value in values.items():
self.extract_all_terms_to_depth(new_key, new_value, type_map)
return
if isinstance(values, list):
for new_value in values:
self.extract_all_terms_to_depth(None, new_value, type_map)
return
def find_selectors(self, type_map):
"""Finds all type selectors."""
# Finds all elements in the type map where both a key and a value are
# strings. These elements are used to find one specific type map among
# several alternative type maps.
selector = {}
for akey, avalue in type_map.items():
if isinstance(akey, str) and isinstance(avalue, str):
selector.update({akey: avalue})
return selector
def find_compatible_dict(self, value_map, type_map, unused_context):
"""Find the type map most compatible with the value map."""
# A value map is considered compatible with a type map when former
# contains the same key names and the value types as the type map.
# special case when we have just one type; check name and type are the
# same
if len(type_map) == 1:
for value_key in value_map.keys():
for key in type_map[0].keys():
if value_key == key:
return key, type_map[0]
raise SchemaException(
"Expected: '%s'\nfound: %s", type_map[0].keys()[0], value_map)
# case when we have several types to choose from
for adict in type_map:
dict_selector = self.find_selectors(adict)
for akey, avalue in dict_selector.items():
if value_map[akey] == avalue:
return akey, adict
return None, None
def check_single_value_matches_type(self, value, atype, context):
"""Checks if a single value matches a specific (primitive) type."""
if atype == BOOLEAN:
if isinstance(value, bool) or value.term_type == BOOLEAN:
self.visit_element('BOOLEAN', value, context)
return True
else:
raise SchemaException(
'Expected: \'true\' or \'false\'\nfound: %s', value)
if isinstance(atype, str):
if isinstance(value, str):
self.visit_element('str', value, context)
return True
else:
raise SchemaException('Expected: \'string\'\nfound: %s', value)
if atype == STRING:
if isinstance(value, str):
self.visit_element('STRING', value, context)
return True
else:
raise SchemaException('Expected: \'string\'\nfound: %s', value)
if atype == REGEX and value.term_type == REGEX:
self.visit_element('REGEX', value, context)
return True
if atype == CORRECT and value.term_type == CORRECT:
self.visit_element('CORRECT', value, context)
return True
if atype == FLOAT:
if is_number(value):
self.visit_element('NUMBER', value, context)
return True
else:
raise SchemaException('Expected: \'number\'\nfound: %s', value)
if atype == INTEGER:
if is_integer(value):
self.visit_element('INTEGER', value, context)
return True
else:
raise SchemaException(
'Expected: \'integer\'\nfound: %s', value,
path=context.format_path())
raise SchemaException(
'Unexpected value \'%s\'\n'
'for type %s', value, atype, path=context.format_path())
def check_value_list_matches_type(self, value, atype, context):
"""Checks if all items in value list match a specific type."""
for value_item in value:
found = False
for atype_item in atype:
if isinstance(atype_item, list):
for atype_item_item in atype_item:
if self.does_value_match_type(
value_item, atype_item_item, context):
found = True
break
else:
if self.does_value_match_type(
value_item, atype_item, context):
found = True
break
if not found:
raise SchemaException(
'Expected: \'%s\'\nfound: %s', atype, value)
return True
def check_value_matches_type(self, value, atype, context):
"""Checks if single value or a list of values match a specific type."""
if isinstance(atype, list) and isinstance(value, list):
return self.check_value_list_matches_type(value, atype, context)
else:
return self.check_single_value_matches_type(value, atype, context)
def does_value_match_type(self, value, atype, context):
"""Same as other method, but does not throw an exception."""
try:
return self.check_value_matches_type(value, atype, context)
except SchemaException:
return False
def does_value_match_one_of_types(self, value, types, context):
"""Checks if a value matches to one of the types in the list."""
type_names = None
if isinstance(types, list):
type_names = types
if type_names:
for i in range(0, len(type_names)):
if self.does_value_match_type(value, type_names[i], context):
return True
return False
def does_value_match_map_of_type(self, value, types, context):
"""Checks if value matches any variation of {...} type."""
# find all possible map types
maps = []
for atype in types:
if isinstance(atype, dict):
maps.append(atype)
if not maps and isinstance(types, dict):
maps.append(types)
# check if the structure of value matches one of the maps
if isinstance(value, dict):
aname, adict = self.find_compatible_dict(value, maps, context)
if adict:
self.visit_element('dict', value, context.new(aname), False)
for akey, avalue in value.items():
if akey not in adict:
raise SchemaException(
'Unknown term \'%s\'', akey,
path=context.format_path())
self.check_value_of_valid_type(
avalue, adict[akey], context.new([aname, akey]))
return True
raise SchemaException(
'The value:\n %s\n'
'is incompatible with expected type(s):\n %s',
value, types, path=context.format_path())
return False
def format_name_with_index(self, alist, aindex):
"""A function to format a context name with an array element index."""
if len(alist) == 1:
return ''
else:
return '[%s]' % aindex
def does_value_match_list_of_types_in_order(
self, value, types, context, target):
"""Iterates the value and types in given order and checks for match."""
all_values_are_lists = True
for avalue in value:
if not isinstance(avalue, list):
all_values_are_lists = False
if all_values_are_lists:
for i in range(0, len(value)):
self.check_value_of_valid_type(value[i], types, context.new(
self.format_name_with_index(value, i)), True)
else:
if len(target) != len(value):
raise SchemaException(
'Expected: \'%s\' values\n' + 'found: %s.' % value,
len(target), path=context.format_path())
for i in range(0, len(value)):
self.check_value_of_valid_type(value[i], target[i], context.new(
self.format_name_with_index(value, i)))
return True
def does_value_match_list_of_types_any_order(self, value, types,
context, lists):
"""Iterates the value and types, checks if they match in any order."""
target = lists
if not target:
if not isinstance(types, list):
raise SchemaException(
'Unsupported type %s',
None, types, path=context.format_path())
target = types
for i in range(0, len(value)):
found = False
for atarget in target:
try:
self.check_value_of_valid_type(
value[i], atarget,
context.new(self.format_name_with_index(value, i)))
found = True
break
except SchemaException as unused_e:
continue
if not found:
raise SchemaException(
'The value:\n %s\n'
'is incompatible with expected type(s):\n %s',
value, types, path=context.format_path())
return True
def does_value_match_list_of_type(self, value, types, context, in_order):
"""Checks if a value matches a variation of [...] type."""
# Extra argument controls whether matching must be done in a specific
# or in any order. A specific order is demanded by [[...]]] construct,
# i.e. [[STRING, INTEGER, BOOLEAN]], while sub elements inside {...} and
# [...] can be matched in any order.
# prepare a list of list types
lists = []
for atype in types:
if isinstance(atype, list):
lists.append(atype)
if len(lists) > 1:
raise SchemaException(
'Unable to validate types with multiple alternative '
'lists %s', None, types, path=context.format_path())
if isinstance(value, list):
if len(lists) > 1:
raise SchemaException(
'Allowed at most one list\nfound: %s.',
None, types, path=context.format_path())
# determine if list is in order or not as hinted by double array
# [[..]]; [STRING, NUMBER] is in any order, but [[STRING, NUMBER]]
# demands order
ordered = len(lists) == 1 and isinstance(types, list)
if in_order or ordered:
return self.does_value_match_list_of_types_in_order(
value, types, context, lists[0])
else:
return self.does_value_match_list_of_types_any_order(
value, types, context, lists)
return False
def check_value_of_valid_type(self, value, types, context, in_order=None):
"""Check if a value matches any of the given types."""
if not (isinstance(types, list) or isinstance(types, dict)):
self.check_value_matches_type(value, types, context)
return
if (self.does_value_match_list_of_type(value, types,
context, in_order) or
self.does_value_match_map_of_type(value, types, context) or
self.does_value_match_one_of_types(value, types, context)):
return
raise SchemaException(
'Unknown type %s', value, path=context.format_path())
def check_instances_match_schema(self, values, types, name):
"""Recursively decompose 'values' to see if they match schema types."""
self.parse_log = []
context = Context().new(name)
self.parse_log.append(' ROOT %s' % context.format_path())
# pylint: disable-msg=protected-access
values_class = values.__class__
# pylint: enable-msg=protected-access
# handle {..} containers
if isinstance(types, dict):
if not isinstance(values, dict):
raise SchemaException(
'Error at \'/\': expected {...}, found %s' % (
values_class.__name__))
self.check_value_of_valid_type(values, types, context.new([]))
return
# handle [...] containers
if isinstance(types, list):
if not isinstance(values, list):
raise SchemaException(
'Error at \'/\': expected [...], found %s' % (
values_class.__name__))
for i in range(0, len(values)):
self.check_value_of_valid_type(
values[i], types, context.new('[%s]' % i))
return
raise SchemaException(
'Expected an array or a dictionary.', None,
path=context.format_path())
def escape_quote(value):
return unicode(value).replace('\'', r'\'')
class Unit(object):
"""A class to represent a Unit."""
def __init__(self):
self.id = 0
self.type = ''
self.unit_id = ''
self.title = ''
self.release_date = ''
self.now_available = False
def list_properties(self, name, output):
"""Outputs all properties of the unit."""
output.append('%s[\'id\'] = %s;' % (name, self.id))
output.append('%s[\'type\'] = \'%s\';' % (
name, escape_quote(self.type)))
output.append('%s[\'unit_id\'] = \'%s\';' % (
name, escape_quote(self.unit_id)))
output.append('%s[\'title\'] = \'%s\';' % (
name, escape_quote(self.title)))
output.append('%s[\'release_date\'] = \'%s\';' % (
name, escape_quote(self.release_date)))
output.append('%s[\'now_available\'] = %s;' % (
name, str(self.now_available).lower()))
class Lesson(object):
"""A class to represent a Lesson."""
def __init__(self):
self.unit_id = 0
self.unit_title = ''
self.lesson_id = 0
self.lesson_title = ''
self.lesson_activity = ''
self.lesson_activity_name = ''
self.lesson_notes = ''
self.lesson_video_id = ''
self.lesson_objectives = ''
def list_properties(self, name, output):
"""Outputs all properties of the lesson."""
activity = 'false'
if self.lesson_activity == 'yes':
activity = 'true'
output.append('%s[\'unit_id\'] = %s;' % (name, self.unit_id))
output.append('%s[\'unit_title\'] = \'%s\';' % (
name, escape_quote(self.unit_title)))
output.append('%s[\'lesson_id\'] = %s;' % (name, self.lesson_id))
output.append('%s[\'lesson_title\'] = \'%s\';' % (
name, escape_quote(self.lesson_title)))
output.append('%s[\'lesson_activity\'] = %s;' % (name, activity))
output.append('%s[\'lesson_activity_name\'] = \'%s\';' % (
name, escape_quote(self.lesson_activity_name)))
output.append('%s[\'lesson_notes\'] = \'%s\';' % (
name, escape_quote(self.lesson_notes)))
output.append('%s[\'lesson_video_id\'] = \'%s\';' % (
name, escape_quote(self.lesson_video_id)))
output.append('%s[\'lesson_objectives\'] = \'%s\';' % (
name, escape_quote(self.lesson_objectives)))
def to_id_string(self):
return '%s.%s.%s' % (self.unit_id, self.lesson_id, self.lesson_title)
class Assessment(object):
"""A class to represent a Assessment."""
def __init__(self):
self.scope = {}
SchemaHelper().extract_all_terms_to_depth(
'assessment', SCHEMA['assessment'], self.scope)
class Activity(object):
"""A class to represent a Activity."""
def __init__(self):
self.scope = {}
SchemaHelper().extract_all_terms_to_depth(
'activity', SCHEMA['activity'], self.scope)
def silent_echo(unused_message):
pass
def echo(message):
print message
def is_integer(s):
try:
return int(s) == float(s)
except ValueError:
return False
def is_boolean(s):
try:
return s == 'True' or s == 'False'
except ValueError:
return False
def is_number(s):
try:
float(s)
return True
except ValueError:
return False
def is_one_of(value, values):
for current in values:
if value == current:
return True
return False
def text_to_line_numbered_text(text):
"""Adds line numbers to the provided text."""
lines = text.split('\n')
results = []
i = 1
for line in lines:
results.append(str(i) + ': ' + line)
i += 1
return '\n '.join(results)
def set_object_attributes(target_object, names, values):
"""Sets object attributes from provided values."""
if len(names) != len(values):
raise SchemaException(
'The number of elements must match: %s and %s' % (names, values))
for i in range(0, len(names)):
if is_integer(values[i]):
# if we are setting an attribute of an object that support
# metadata, try to infer the target type and convert 'int' into
# 'str' here
target_type = None
if hasattr(target_object.__class__, names[i]):
attribute = getattr(target_object.__class__, names[i])
if hasattr(attribute, 'data_type'):
target_type = attribute.data_type.__name__
if target_type and (target_type == 'str' or
target_type == 'basestring'):
setattr(target_object, names[i], str(values[i]))
else:
setattr(target_object, names[i], int(values[i]))
continue
if is_boolean(values[i]):
setattr(target_object, names[i], bool(values[i]))
continue
setattr(target_object, names[i], values[i])
def read_objects_from_csv_stream(stream, header, new_object):
return read_objects_from_csv(csv.reader(stream), header, new_object)
def read_objects_from_csv_file(fname, header, new_object):
return read_objects_from_csv_stream(open(fname), header, new_object)
def read_objects_from_csv(value_rows, header, new_object):
"""Reads objects from the rows of a CSV file."""
values = []
for row in value_rows:
if not row:
continue
values.append(row)
names = header.split(',')
if names != values[0]:
raise SchemaException(
'Error reading CSV header.\n '
'Header row had %s element(s): %s\n '
'Expected header row with %s element(s): %s' % (
len(values[0]), values[0], len(names), names))
items = []
for i in range(1, len(values)):
if len(names) != len(values[i]):
raise SchemaException(
'Error reading CSV data row.\n '
'Row #%s had %s element(s): %s\n '
'Expected %s element(s): %s' % (
i, len(values[i]), values[i], len(names), names))
# Decode string values in case they were encoded in UTF-8. The CSV
# reader should do this automatically, but it does not. The issue is
# discussed here: http://docs.python.org/2/library/csv.html
decoded_values = []
for value in values[i]:
if isinstance(value, basestring):
value = unicode(value.decode('utf-8'))
decoded_values.append(value)
item = new_object()
set_object_attributes(item, names, decoded_values)
items.append(item)
return items
def escape_javascript_regex(text):
return re.sub(
r'([:][ ]*)([/])(.*)([/][ismx]*)', r': regex("\2\3\4")', text)
def remove_javascript_single_line_comment(text):
text = re.sub(re.compile('^(.*?)[ ]+//(.*)$', re.MULTILINE), r'\1', text)
text = re.sub(re.compile('^//(.*)$', re.MULTILINE), r'', text)
return text
def remove_javascript_multi_line_comment(text):
# pylint: disable-msg=anomalous-backslash-in-string
return re.sub(
re.compile('/\*(.*)\*/', re.MULTILINE + re.DOTALL), r'', text)
# pylint: enable-msg=anomalous-backslash-in-string
def parse_content_marked_no_verify(content):
"""Parses and returns a tuple of real content and no-verify text."""
# If you have any free-form JavaScript in the activity file, you need
# to place it between //<gcb-no-verify> ... //</gcb-no-verify> tags
# so that the verifier can selectively ignore it.
pattern = re.compile('%s(.*)%s' % (
NO_VERIFY_TAG_NAME_OPEN, NO_VERIFY_TAG_NAME_CLOSE), re.DOTALL)
m = pattern.search(content)
noverify_text = None
if m:
noverify_text = m.group(1)
return (re.sub(pattern, '', content), noverify_text)
def convert_javascript_to_python(content, root_name):
"""Removes JavaScript specific syntactic constructs and returns a tuple."""
# Reads the content and removes JavaScript comments, var's, and escapes
# regular expressions.
(content, noverify_text) = parse_content_marked_no_verify(content)
content = remove_javascript_multi_line_comment(content)
content = remove_javascript_single_line_comment(content)
content = content.replace('var %s = ' % root_name, '%s = ' % root_name)
content = escape_javascript_regex(content)
return (content, noverify_text)
def convert_javascript_file_to_python(fname, root_name):
return convert_javascript_to_python(
''.join(open(fname, 'r').readlines()), root_name)
def evaluate_python_expression_from_text(content, root_name, scope,
noverify_text):
"""Compiles and evaluates a Python script in a restricted environment."""
# First compiles and then evaluates a Python script text in a restricted
# environment using provided bindings. Returns the resulting bindings if
# evaluation completed.
# create a new execution scope that has only the schema terms defined;
# remove all other languages constructs including __builtins__
restricted_scope = {}
restricted_scope.update(scope)
restricted_scope.update({'__builtins__': {}})
code = compile(content, '<string>', 'exec')
# pylint: disable-msg=exec-statement
exec code in restricted_scope
# pylint: enable-msg=exec-statement
if noverify_text:
restricted_scope['noverify'] = noverify_text
if not restricted_scope[root_name]:
raise Exception('Unable to find \'%s\'' % root_name)
return restricted_scope
def evaluate_javascript_expression_from_file(fname, root_name, scope, error):
(content, noverify_text) = convert_javascript_file_to_python(fname,
root_name)
try:
return evaluate_python_expression_from_text(content, root_name, scope,
noverify_text)
except:
error('Unable to parse %s in file %s\n %s' % (
root_name, fname, text_to_line_numbered_text(content)))
for message in sys.exc_info():
error(str(message))
raise
class Verifier(object):
"""Verifies Units, Lessons, Assessments, Activities and their relations."""
def __init__(self):
self.echo_func = silent_echo
self.schema_helper = SchemaHelper()
self.errors = 0
self.warnings = 0
self.export = []
def verify_unit_fields(self, units):
self.export.append('units = Array();')
for unit in units:
if not is_one_of(unit.now_available, [True, False]):
self.error(
'Bad now_available \'%s\' for unit id %s; expected '
'\'True\' or \'False\'' % (unit.now_available, unit.id))
if not is_one_of(unit.type, ['U', 'A', 'O']):
self.error(
'Bad type \'%s\' for unit id %s; '
'expected \'U\', \'A\', or \'O\'' % (unit.type, unit.id))
if unit.type == 'A':
if not is_one_of(unit.unit_id, ('Pre', 'Mid', 'Fin')):
self.error(
'Bad unit_id \'%s\'; expected \'Pre\', \'Mid\' or '
'\'Fin\' for unit id %s' % (unit.unit_id, unit.id))
if unit.type == 'U':
if not is_integer(unit.unit_id):
self.error(
'Expected integer unit_id, found %s in unit id '
' %s' % (unit.unit_id, unit.id))
self.export.append('')
self.export.append('units[%s] = Array();' % unit.id)
self.export.append('units[%s][\'lessons\'] = Array();' % unit.id)
unit.list_properties('units[%s]' % unit.id, self.export)
def verify_lesson_fields(self, lessons):
for lesson in lessons:
if not is_one_of(lesson.lesson_activity, ['yes', '']):
self.error('Bad lesson_activity \'%s\' for lesson_id %s' % (
lesson.lesson_activity, lesson.lesson_id))
self.export.append('')
self.export.append('units[%s][\'lessons\'][%s] = Array();' % (
lesson.unit_id, lesson.lesson_id))
lesson.list_properties('units[%s][\'lessons\'][%s]' % (
lesson.unit_id, lesson.lesson_id), self.export)
def verify_unit_lesson_relationships(self, units, lessons):
"""Checks each lesson points to a unit and all lessons are in use."""
used_lessons = []
units.sort(key=lambda x: x.id)
# for unit in units:
for i in range(0, len(units)):
unit = units[i]
# check that unit ids are 1-based and sequential
if unit.id != i + 1:
self.error('Unit out of order: %s' % (unit.id))
# get the list of lessons for each unit
self.fine('Unit %s: %s' % (unit.id, unit.title))
unit_lessons = []
for lesson in lessons:
if lesson.unit_id == unit.unit_id:
if not lesson.unit_title == unit.title:
raise Exception(''.join([
'A unit_title of a lesson (id=%s) must match ',
'title of a unit (id=%s) the lesson belongs to.'
]) % (lesson.lesson_id, lesson.unit_id))
unit_lessons.append(lesson)
used_lessons.append(lesson)
# inspect all lessons for the current unit
unit_lessons.sort(key=lambda x: x.lesson_id)
for j in range(0, len(unit_lessons)):
lesson = unit_lessons[j]
# check that lesson_ids are 1-based and sequential
if lesson.lesson_id != j + 1:
self.warn(
'Lesson lesson_id is out of order: expected %s, found '
' %s (%s)' % (
j + 1, lesson.lesson_id, lesson.to_id_string()))
self.fine(' Lesson %s: %s' % (
lesson.lesson_id, lesson.lesson_title))
# find lessons not used by any of the units
unused_lessons = list(lessons)
for lesson in used_lessons:
unused_lessons.remove(lesson)
for lesson in unused_lessons:
self.warn('Unused lesson_id %s (%s)' % (
lesson.lesson_id, lesson.to_id_string()))
# check all lessons point to known units
for lesson in lessons:
has = False
for unit in units:
if lesson.unit_id == unit.unit_id:
has = True
break
if not has:
self.error('Lesson has unknown unit_id %s (%s)' % (
lesson.unit_id, lesson.to_id_string()))
def verify_activities(self, lessons):
"""Loads and verifies all activities."""
self.info('Loading activities:')
count = 0
for lesson in lessons:
if lesson.lesson_activity == 'yes':
count += 1
fname = os.path.join(
os.path.dirname(__file__),
'../assets/js/activity-' + str(lesson.unit_id) + '.' +
str(lesson.lesson_id) + '.js')
if not os.path.exists(fname):
self.error(' Missing activity: %s' % fname)
else:
activity = evaluate_javascript_expression_from_file(
fname, 'activity', Activity().scope, self.error)
self.verify_activity_instance(activity, fname)
self.export.append('')
self.encode_activity_json(
activity, lesson.unit_id, lesson.lesson_id)
self.info('Read %s activities' % count)
def verify_assessment(self, units):
"""Loads and verifies all assessments."""
self.export.append('')
self.export.append('assessments = Array();')
self.info('Loading assessment:')
count = 0
for unit in units:
if unit.type == 'A':
count += 1
assessment_name = str(unit.unit_id)
fname = os.path.join(
os.path.dirname(__file__),
'../assets/js/assessment-' + assessment_name + '.js')
if not os.path.exists(fname):
self.error(' Missing assessment: %s' % fname)
else:
assessment = evaluate_javascript_expression_from_file(
fname, 'assessment', Assessment().scope, self.error)
self.verify_assessment_instance(assessment, fname)
self.export.append('')
self.encode_assessment_json(assessment, assessment_name)
self.info('Read %s assessments' % count)
# NB: The exported script needs to define a gcb_regex() wrapper function
@staticmethod
def encode_regex(regex_str):
"""Encodes a JavaScript-style regex into a Python gcb_regex call."""
# parse the regex into the base and modifiers. e.g., for /foo/i
# base is 'foo' and modifiers is 'i'
assert regex_str[0] == '/'
# find the LAST '/' in regex_str (because there might be other
# escaped '/' characters in the middle of regex_str)
final_slash_index = regex_str.rfind('/')
assert final_slash_index > 0
base = regex_str[1:final_slash_index]
modifiers = regex_str[final_slash_index+1:]
func_str = 'gcb_regex(' + repr(base) + ', ' + repr(modifiers) + ')'
return func_str
def encode_activity_json(self, activity_dict, unit_id, lesson_id):
"""Encodes an activity dictionary into JSON."""
output = []
for elt in activity_dict['activity']:
t = type(elt)
encoded_elt = None
if t is str:
encoded_elt = {'type': 'string', 'value': elt}
elif t is dict:
qt = elt['questionType']
encoded_elt = {'type': qt}
if qt == 'multiple choice':
choices = elt['choices']
encoded_choices = [[x, y.value, z] for x, y, z in choices]
encoded_elt['choices'] = encoded_choices
elif qt == 'multiple choice group':
# everything inside are primitive types that can be encoded
elt_copy = dict(elt)
del elt_copy['questionType'] # redundant
encoded_elt['value'] = elt_copy
elif qt == 'freetext':
for k in elt.keys():
if k == 'questionType':
continue
elif k == 'correctAnswerRegex':
encoded_elt[k] = Verifier.encode_regex(elt[k].value)
else:
# ordinary string
encoded_elt[k] = elt[k]
else:
assert False
else:
assert False
assert encoded_elt
output.append(encoded_elt)
# N.B.: make sure to get the string quoting right!
code_str = "units[%s]['lessons'][%s]['activity'] = " % (
unit_id, lesson_id) + repr(json.dumps(output)) + ';'
self.export.append(code_str)
if 'noverify' in activity_dict:
self.export.append('')
noverify_code_str = "units[%s]['lessons'][%s]['code'] = " % (
unit_id, lesson_id) + repr(activity_dict['noverify']) + ';'
self.export.append(noverify_code_str)
def encode_assessment_json(self, assessment_dict, assessment_name):
"""Encodes an assessment dictionary into JSON."""
real_dict = assessment_dict['assessment']
output = {}
output['assessmentName'] = real_dict['assessmentName']
if 'preamble' in real_dict:
output['preamble'] = real_dict['preamble']
output['checkAnswers'] = real_dict['checkAnswers'].value
encoded_questions_list = []
for elt in real_dict['questionsList']:
encoded_elt = {}
encoded_elt['questionHTML'] = elt['questionHTML']
if 'lesson' in elt:
encoded_elt['lesson'] = elt['lesson']
if 'correctAnswerNumeric' in elt:
encoded_elt['correctAnswerNumeric'] = elt[
'correctAnswerNumeric']
if 'correctAnswerString' in elt:
encoded_elt['correctAnswerString'] = elt['correctAnswerString']
if 'correctAnswerRegex' in elt:
encoded_elt['correctAnswerRegex'] = Verifier.encode_regex(
elt['correctAnswerRegex'].value)
if 'choices' in elt:
encoded_choices = []
correct_answer_index = None
for (ind, e) in enumerate(elt['choices']):
if type(e) is str:
encoded_choices.append(e)
elif e.term_type == CORRECT:
encoded_choices.append(e.value)
correct_answer_index = ind
else:
raise Exception("Invalid type in 'choices'")
encoded_elt['choices'] = encoded_choices
encoded_elt['correctAnswerIndex'] = correct_answer_index
encoded_questions_list.append(encoded_elt)
output['questionsList'] = encoded_questions_list
# N.B.: make sure to get the string quoting right!
code_str = 'assessments[\'' + assessment_name + '\'] = ' + repr(
json.dumps(output)) + ';'
self.export.append(code_str)
if 'noverify' in assessment_dict:
self.export.append('')
noverify_code_str = ('assessments[\'' + assessment_name +
'\'] = ' + repr(assessment_dict['noverify']) +
';')
self.export.append(noverify_code_str)
def format_parse_log(self):
return 'Parse log:\n%s' % '\n'.join(self.schema_helper.parse_log)
def verify_assessment_instance(self, scope, fname):
"""Verifies compliance of assessment with schema."""
if scope:
try:
self.schema_helper.check_instances_match_schema(
scope['assessment'], SCHEMA['assessment'], 'assessment')
self.info(' Verified assessment %s' % fname)
if OUTPUT_DEBUG_LOG:
self.info(self.format_parse_log())
except SchemaException as e:
self.error(' Error in assessment %s\n%s' % (
fname, self.format_parse_log()))
raise e
else:
self.error(' Unable to evaluate \'assessment =\' in %s' % fname)
def verify_activity_instance(self, scope, fname):
"""Verifies compliance of activity with schema."""
if scope:
try:
self.schema_helper.check_instances_match_schema(
scope['activity'], SCHEMA['activity'], 'activity')
self.info(' Verified activity %s' % fname)
if OUTPUT_DEBUG_LOG:
self.info(self.format_parse_log())
except SchemaException as e:
self.error(' Error in activity %s\n%s' % (
fname, self.format_parse_log()))
raise e
else:
self.error(' Unable to evaluate \'activity =\' in %s' % fname)
def fine(self, x):
if OUTPUT_FINE_LOG:
self.echo_func('FINE: ' + x)
def info(self, x):
self.echo_func('INFO: ' + x)
def warn(self, x):
self.warnings += 1
self.echo_func('WARNING: ' + x)
def error(self, x):
self.errors += 1
self.echo_func('ERROR: ' + x)
def load_and_verify_model(self, echo_func):
"""Loads, parses and verifies all content for a course."""
self.echo_func = echo_func
self.info('Started verification in: %s' % __file__)
unit_file = os.path.join(os.path.dirname(__file__), '../data/unit.csv')
lesson_file = os.path.join(
os.path.dirname(__file__), '../data/lesson.csv')
self.info('Loading units from: %s' % unit_file)
units = read_objects_from_csv_file(unit_file, UNITS_HEADER, Unit)
self.info('Read %s units' % len(units))
self.info('Loading lessons from: %s' % lesson_file)
lessons = read_objects_from_csv_file(
lesson_file, LESSONS_HEADER, Lesson)
self.info('Read %s lessons' % len(lessons))
self.verify_unit_fields(units)
self.verify_lesson_fields(lessons)
self.verify_unit_lesson_relationships(units, lessons)
try:
self.verify_activities(lessons)
self.verify_assessment(units)
except SchemaException as e:
self.error(str(e))
self.info('Schema usage statistics: %s' % self.schema_helper.type_stats)
self.info('Completed verification: %s warnings, %s errors.' % (
self.warnings, self.errors))
return self.warnings, self.errors
def run_all_regex_unit_tests():
"""Executes all tests related to regular expressions."""
# pylint: disable-msg=anomalous-backslash-in-string
assert escape_javascript_regex(
'blah regex: /site:bls.gov?/i, blah') == (
'blah regex: regex(\"/site:bls.gov?/i\"), blah')
assert escape_javascript_regex(
'blah regex: /site:http:\/\/www.google.com?q=abc/i, blah') == (
'blah regex: regex(\"/site:http:\/\/www.google.com?q=abc/i\"), '
'blah')
assert remove_javascript_multi_line_comment(
'blah\n/*\ncomment\n*/\nblah') == 'blah\n\nblah'
assert remove_javascript_multi_line_comment(
'blah\nblah /*\ncomment\nblah */\nblah') == ('blah\nblah \nblah')
assert remove_javascript_single_line_comment(
'blah\n// comment\nblah') == 'blah\n\nblah'
assert remove_javascript_single_line_comment(
'blah\nblah http://www.foo.com\nblah') == (
'blah\nblah http://www.foo.com\nblah')
assert remove_javascript_single_line_comment(
'blah\nblah // comment\nblah') == 'blah\nblah\nblah'
assert remove_javascript_single_line_comment(
'blah\nblah // comment http://www.foo.com\nblah') == (
'blah\nblah\nblah')
assert parse_content_marked_no_verify(
'blah1\n// <gcb-no-verify>\n/blah2\n// </gcb-no-verify>\nblah3')[0] == (
'blah1\n// \nblah3')
# pylint: enable-msg=anomalous-backslash-in-string
assert Verifier.encode_regex('/white?/i') == """gcb_regex('white?', 'i')"""
assert (Verifier.encode_regex('/jane austen (book|books) \\-price/i') ==
r"""gcb_regex('jane austen (book|books) \\-price', 'i')""")
assert (Verifier.encode_regex('/Kozanji|Kozan-ji|Kosanji|Kosan-ji/i') ==
r"""gcb_regex('Kozanji|Kozan-ji|Kosanji|Kosan-ji', 'i')""")
assert (Verifier.encode_regex('/Big Time College Sport?/i') ==
"gcb_regex('Big Time College Sport?', 'i')")
assert (Verifier.encode_regex('/354\\s*[+]\\s*651/') ==
r"""gcb_regex('354\\s*[+]\\s*651', '')""")
def run_all_schema_helper_unit_tests():
"""Executes all tests related to schema validation."""
def assert_same(a, b):
if a != b:
raise Exception('Expected:\n %s\nFound:\n %s' % (a, b))
def assert_pass(instances, types, expected_result=None):
try:
schema_helper = SchemaHelper()
result = schema_helper.check_instances_match_schema(
instances, types, 'test')
if OUTPUT_DEBUG_LOG:
print '\n'.join(schema_helper.parse_log)
if expected_result:
assert_same(expected_result, result)
except SchemaException as e:
if OUTPUT_DEBUG_LOG:
print str(e)
print '\n'.join(schema_helper.parse_log)
raise
def assert_fails(func):
try:
func()
raise Exception('Expected to fail')
except SchemaException as e:
if OUTPUT_DEBUG_LOG:
print str(e)
def assert_fail(instances, types):
assert_fails(lambda: assert_pass(instances, types))
def create_python_dict_from_js_object(js_object):
python_str, noverify = convert_javascript_to_python(
'var x = ' + js_object, 'x')
ret = evaluate_python_expression_from_text(
python_str, 'x', Assessment().scope, noverify)
return ret['x']
# CSV tests
read_objects_from_csv(
[['id', 'type'], [1, 'none']], 'id,type', Unit)
def reader_one():
return read_objects_from_csv(
[['id', 'type'], [1, 'none']], 'id,type,title', Unit)
assert_fails(reader_one)
def reader_two():
read_objects_from_csv(
[['id', 'type', 'title'], [1, 'none']], 'id,type,title', Unit)
assert_fails(reader_two)
# context tests
assert_same(Context().new([]).new(['a']).new(['b', 'c']).format_path(),
('//a/b/c'))
# simple map tests
assert_pass({'name': 'Bob'}, {'name': STRING}, None)
assert_fail('foo', 'bar')
assert_fail({'name': 'Bob'}, {'name': INTEGER})
assert_fail({'name': 12345}, {'name': STRING})
assert_fail({'amount': 12345}, {'name': INTEGER})
assert_fail({'regex': Term(CORRECT)}, {'regex': Term(REGEX)})
assert_pass({'name': 'Bob'}, {'name': STRING, 'phone': STRING})
assert_pass({'name': 'Bob'}, {'phone': STRING, 'name': STRING})
assert_pass({'name': 'Bob'},
{'phone': STRING, 'name': STRING, 'age': INTEGER})
# mixed attributes tests
assert_pass({'colors': ['red', 'blue']}, {'colors': [STRING]})
assert_pass({'colors': []}, {'colors': [STRING]})
assert_fail({'colors': {'red': 'blue'}}, {'colors': [STRING]})
assert_fail({'colors': {'red': 'blue'}}, {'colors': [FLOAT]})
assert_fail({'colors': ['red', 'blue', 5.5]}, {'colors': [STRING]})
assert_fail({'colors': ['red', 'blue', {'foo': 'bar'}]},
{'colors': [STRING]})
assert_fail({'colors': ['red', 'blue'], 'foo': 'bar'},
{'colors': [STRING]})
assert_pass({'colors': ['red', 1]}, {'colors': [[STRING, INTEGER]]})
assert_fail({'colors': ['red', 'blue']}, {'colors': [[STRING, INTEGER]]})
assert_fail({'colors': [1, 2, 3]}, {'colors': [[STRING, INTEGER]]})
assert_fail({'colors': ['red', 1, 5.3]}, {'colors': [[STRING, INTEGER]]})
assert_pass({'colors': ['red', 'blue']}, {'colors': [STRING]})
assert_fail({'colors': ['red', 'blue']}, {'colors': [[STRING]]})
assert_fail({'colors': ['red', ['blue']]}, {'colors': [STRING]})
assert_fail({'colors': ['red', ['blue', 'green']]}, {'colors': [STRING]})
# required attribute tests
assert_pass({'colors': ['red', 5]}, {'colors': [[STRING, INTEGER]]})
assert_fail({'colors': ['red', 5]}, {'colors': [[INTEGER, STRING]]})
assert_pass({'colors': ['red', 5]}, {'colors': [STRING, INTEGER]})
assert_pass({'colors': ['red', 5]}, {'colors': [INTEGER, STRING]})
assert_fail({'colors': ['red', 5, 'FF0000']},
{'colors': [[STRING, INTEGER]]})
# an array and a map of primitive type tests
assert_pass({'color': {'name': 'red', 'rgb': 'FF0000'}},
{'color': {'name': STRING, 'rgb': STRING}})
assert_fail({'color': {'name': 'red', 'rgb': ['FF0000']}},
{'color': {'name': STRING, 'rgb': STRING}})
assert_fail({'color': {'name': 'red', 'rgb': 'FF0000'}},
{'color': {'name': STRING, 'rgb': INTEGER}})
assert_fail({'color': {'name': 'red', 'rgb': 'FF0000'}},
{'color': {'name': STRING, 'rgb': {'hex': STRING}}})
assert_pass({'color': {'name': 'red', 'rgb': 'FF0000'}},
{'color': {'name': STRING, 'rgb': STRING}})
assert_pass({'colors':
[{'name': 'red', 'rgb': 'FF0000'},
{'name': 'blue', 'rgb': '0000FF'}]},
{'colors': [{'name': STRING, 'rgb': STRING}]})
assert_fail({'colors':
[{'name': 'red', 'rgb': 'FF0000'},
{'phone': 'blue', 'rgb': '0000FF'}]},
{'colors': [{'name': STRING, 'rgb': STRING}]})
# boolean type tests
assert_pass({'name': 'Bob', 'active': True},
{'name': STRING, 'active': BOOLEAN})
assert_pass({'name': 'Bob', 'active': [5, True, False]},
{'name': STRING, 'active': [INTEGER, BOOLEAN]})
assert_pass({'name': 'Bob', 'active': [5, True, 'false']},
{'name': STRING, 'active': [STRING, INTEGER, BOOLEAN]})
assert_fail({'name': 'Bob', 'active': [5, True, 'False']},
{'name': STRING, 'active': [[INTEGER, BOOLEAN]]})
# optional attribute tests
assert_pass({'points':
[{'x': 1, 'y': 2, 'z': 3}, {'x': 3, 'y': 2, 'z': 1},
{'x': 2, 'y': 3, 'z': 1}]},
{'points': [{'x': INTEGER, 'y': INTEGER, 'z': INTEGER}]})
assert_pass({'points':
[{'x': 1, 'z': 3}, {'x': 3, 'y': 2}, {'y': 3, 'z': 1}]},
{'points': [{'x': INTEGER, 'y': INTEGER, 'z': INTEGER}]})
assert_pass({'account':
[{'name': 'Bob', 'age': 25, 'active': True}]},
{'account':
[{'age': INTEGER, 'name': STRING, 'active': BOOLEAN}]})
assert_pass({'account':
[{'name': 'Bob', 'active': True}]},
{'account':
[{'age': INTEGER, 'name': STRING, 'active': BOOLEAN}]})
# nested array tests
assert_fail({'name': 'Bob', 'active': [5, True, 'false']},
{'name': STRING, 'active': [[BOOLEAN]]})
assert_fail({'name': 'Bob', 'active': [True]},
{'name': STRING, 'active': [[STRING]]})
assert_pass({'name': 'Bob', 'active': ['true']},
{'name': STRING, 'active': [[STRING]]})
assert_pass({'name': 'flowers', 'price': ['USD', 9.99]},
{'name': STRING, 'price': [[STRING, FLOAT]]})
assert_pass({'name': 'flowers', 'price':
[['USD', 9.99], ['CAD', 11.79], ['RUB', 250.23]]},
{'name': STRING, 'price': [[STRING, FLOAT]]})
# selector tests
assert_pass({'likes': [{'state': 'CA', 'food': 'cheese'},
{'state': 'NY', 'drink': 'wine'}]},
{'likes': [{'state': 'CA', 'food': STRING},
{'state': 'NY', 'drink': STRING}]})
assert_pass({'likes': [{'state': 'CA', 'food': 'cheese'},
{'state': 'CA', 'food': 'nuts'}]},
{'likes': [{'state': 'CA', 'food': STRING},
{'state': 'NY', 'drink': STRING}]})
assert_fail({'likes': {'state': 'CA', 'drink': 'cheese'}},
{'likes': [{'state': 'CA', 'food': STRING},
{'state': 'NY', 'drink': STRING}]})
# creating from dict tests
assert_same(create_python_dict_from_js_object('{"active": true}'),
{'active': Term(BOOLEAN, True)})
assert_same(create_python_dict_from_js_object(
'{"a": correct("hello world")}'),
{'a': Term(CORRECT, 'hello world')})
assert_same(create_python_dict_from_js_object('{"a": /hello/i}'),
{'a': Term(REGEX, '/hello/i')})
def run_all_unit_tests():
run_all_regex_unit_tests()
run_all_schema_helper_unit_tests()
run_all_unit_tests()
if __name__ == '__main__':
Verifier().load_and_verify_model(echo)
|
{
"content_hash": "b85b023749e2a32aaf4e95914c4be49e",
"timestamp": "",
"source": "github",
"line_count": 1517,
"max_line_length": 80,
"avg_line_length": 38.17930125247199,
"alnum_prop": 0.5344452501812907,
"repo_name": "supunkamburugamuve/mooc2",
"id": "dda6cb8d432e55fc8bd255eaee915fbf9782930a",
"size": "57918",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tools/verify.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "239903"
},
{
"name": "Python",
"bytes": "249730"
}
],
"symlink_target": ""
}
|
import os
import sys
import traceback
import greenhouse
import junction
def main(environ, argv):
greenhouse.global_exception_handler(traceback.print_exception)
junction.configure_logging(level=1)
hub = junction.Hub(("", 9057), [("", 9056)])
hub.start()
greenhouse.Event().wait()
if __name__ == '__main__':
exit(main(os.environ, sys.argv))
|
{
"content_hash": "7643e7eee64913a29707524e99d4b59a",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 66,
"avg_line_length": 18.5,
"alnum_prop": 0.6702702702702703,
"repo_name": "teepark/junction",
"id": "14bfbba926b0625b1f64684032104c2164445b30",
"size": "444",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/streaming/relayer.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "213472"
}
],
"symlink_target": ""
}
|
"""
Tests that a loadable_module target is built correctly.
"""
from __future__ import print_function
import TestGyp
import os
import struct
import sys
if sys.platform == 'darwin':
print("This test is currently disabled: https://crbug.com/483696.")
sys.exit(0)
test = TestGyp.TestGyp(formats=['ninja', 'make', 'xcode'])
CHDIR = 'loadable-module'
test.run_gyp('test.gyp', chdir=CHDIR)
test.build('test.gyp', test.ALL, chdir=CHDIR)
# Binary.
binary = test.built_file_path(
'test_loadable_module.plugin/Contents/MacOS/test_loadable_module',
chdir=CHDIR)
test.must_exist(binary)
MH_BUNDLE = 8
if struct.unpack('4I', open(binary, 'rb').read(16))[3] != MH_BUNDLE:
test.fail_test()
# Info.plist.
info_plist = test.built_file_path(
'test_loadable_module.plugin/Contents/Info.plist', chdir=CHDIR)
test.must_exist(info_plist)
test.must_contain(info_plist, """
<key>CFBundleExecutable</key>
<string>test_loadable_module</string>
""")
# PkgInfo.
test.built_file_must_not_exist(
'test_loadable_module.plugin/Contents/PkgInfo', chdir=CHDIR)
test.built_file_must_not_exist(
'test_loadable_module.plugin/Contents/Resources', chdir=CHDIR)
test.pass_test()
|
{
"content_hash": "f35de3a5434e6657a9e12c6da0719dea",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 72,
"avg_line_length": 25.5,
"alnum_prop": 0.690359477124183,
"repo_name": "chromium/gyp",
"id": "77dde1d6cd1975a812f901458bdadafe93a86251",
"size": "1404",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "test/mac/gyptest-loadable-module.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Assembly",
"bytes": "1133"
},
{
"name": "Batchfile",
"bytes": "1115"
},
{
"name": "C",
"bytes": "39382"
},
{
"name": "C++",
"bytes": "43138"
},
{
"name": "Emacs Lisp",
"bytes": "14357"
},
{
"name": "Objective-C",
"bytes": "14848"
},
{
"name": "Objective-C++",
"bytes": "2333"
},
{
"name": "Python",
"bytes": "2235370"
},
{
"name": "Shell",
"bytes": "18495"
},
{
"name": "Swift",
"bytes": "116"
}
],
"symlink_target": ""
}
|
"""Shared logger for cwltool."""
import logging
_logger = logging.getLogger("cwltool") # pylint: disable=invalid-name
defaultStreamHandler = logging.StreamHandler() # pylint: disable=invalid-name
_logger.addHandler(defaultStreamHandler)
_logger.setLevel(logging.INFO)
|
{
"content_hash": "7e5262501e2adee4cd5e87bf6feaeb14",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 78,
"avg_line_length": 38.714285714285715,
"alnum_prop": 0.7859778597785978,
"repo_name": "dleehr/cwltool",
"id": "c720ddef89437e8b8c79e4ed305e110925380c20",
"size": "271",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cwltool/loghandler.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "363"
},
{
"name": "Common Workflow Language",
"bytes": "227670"
},
{
"name": "Dockerfile",
"bytes": "791"
},
{
"name": "Java",
"bytes": "144"
},
{
"name": "JavaScript",
"bytes": "197079"
},
{
"name": "Makefile",
"bytes": "20194"
},
{
"name": "Python",
"bytes": "1650756"
},
{
"name": "Shell",
"bytes": "15892"
},
{
"name": "Tcl",
"bytes": "462"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
import django.contrib.postgres.search
from django.db import migrations
from ..models import IndexEntry
table = IndexEntry._meta.db_table
class Migration(migrations.Migration):
dependencies = [
('postgres_search', '0001_initial'),
]
operations = [
migrations.RunSQL(
'DROP INDEX {}_body_search;'.format(table),
'CREATE INDEX {0}_body_search ON {0} '
'USING GIN(body_search);'.format(table),
),
migrations.RenameField(
model_name='indexentry',
old_name='body_search',
new_name='body',
),
migrations.AddField(
model_name='indexentry',
name='autocomplete',
field=django.contrib.postgres.search.SearchVectorField(default=''),
preserve_default=False,
),
migrations.AddIndex(
model_name='indexentry',
index=django.contrib.postgres.indexes.GinIndex(
fields=['autocomplete'],
name='postgres_se_autocom_ee48c8_gin'),
),
migrations.AddIndex(
model_name='indexentry',
index=django.contrib.postgres.indexes.GinIndex(
fields=['body'],
name='postgres_se_body_aaaa99_gin'),
),
]
|
{
"content_hash": "1f1896419d4e730a9cc76dc05f413c34",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 79,
"avg_line_length": 28.659574468085108,
"alnum_prop": 0.5657015590200446,
"repo_name": "timorieber/wagtail",
"id": "29ae599ab3625e0f69d0b3cd9a19f4bcacb6020a",
"size": "1420",
"binary": false,
"copies": "16",
"ref": "refs/heads/master",
"path": "wagtail/contrib/postgres_search/migrations/0002_add_autocomplete.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "185324"
},
{
"name": "Dockerfile",
"bytes": "703"
},
{
"name": "HTML",
"bytes": "383475"
},
{
"name": "JavaScript",
"bytes": "267615"
},
{
"name": "Makefile",
"bytes": "992"
},
{
"name": "Python",
"bytes": "3711005"
},
{
"name": "Shell",
"bytes": "8867"
}
],
"symlink_target": ""
}
|
"""
sentry.plugins.sentry_sites
~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
|
{
"content_hash": "ddcde1783178951432ff2e18fa40051b",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 70,
"avg_line_length": 25.857142857142858,
"alnum_prop": 0.6132596685082873,
"repo_name": "primepix/django-sentry",
"id": "0fa8e33e122a17b769a2d1828ad644af2b91cc18",
"size": "181",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "sentry/plugins/sentry_sites/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "20952"
},
{
"name": "JavaScript",
"bytes": "10544"
},
{
"name": "Python",
"bytes": "300510"
},
{
"name": "Shell",
"bytes": "4106"
}
],
"symlink_target": ""
}
|
import mock
from oslo_serialization import jsonutils
from six.moves import http_client
import webob
from cinder import context
from cinder import exception
from cinder.objects import fields
from cinder import test
from cinder.tests.unit.api import fakes
from cinder.tests.unit import fake_constants as fake
from cinder.tests.unit import fake_snapshot
# This list of fake snapshot is used by our tests.
snapshot_id = fake.SNAPSHOT_ID
bad_snp_id = fake.WILL_NOT_BE_FOUND_ID
def app():
# no auth, just let environ['cinder.context'] pass through
api = fakes.router.APIRouter()
mapper = fakes.urlmap.URLMap()
mapper['/v2'] = api
return mapper
def api_snapshot_get(self, context, snp_id):
"""Replacement for cinder.volume.api.API.get_snapshot.
We stub the cinder.volume.api.API.get_snapshot method to check for the
existence of snapshot_id in our list of fake snapshots and raise an
exception if the specified snapshot ID is not in our list.
"""
snapshot = {'id': fake.SNAPSHOT_ID,
'progress': '100%',
'volume_id': fake.VOLUME_ID,
'project_id': fake.PROJECT_ID,
'status': fields.SnapshotStatus.AVAILABLE}
if snp_id == snapshot_id:
snapshot_objct = fake_snapshot.fake_snapshot_obj(context, **snapshot)
return snapshot_objct
else:
raise exception.SnapshotNotFound(snapshot_id=snp_id)
@mock.patch('cinder.volume.api.API.get_snapshot', api_snapshot_get)
class SnapshotUnmanageTest(test.TestCase):
"""Test cases for cinder/api/contrib/snapshot_unmanage.py
The API extension adds an action to snapshots, "os-unmanage", which will
effectively issue a delete operation on the snapshot, but with a flag set
that means that a different method will be invoked on the driver, so that
the snapshot is not actually deleted in the storage backend.
In this set of test cases, we are ensuring that the code correctly parses
the request structure and raises the correct exceptions when things are not
right, and calls down into cinder.volume.api.API.delete_snapshot with the
correct arguments.
"""
def _get_resp(self, snapshot_id):
"""Helper to build an os-unmanage req for the specified snapshot_id."""
req = webob.Request.blank('/v2/%s/snapshots/%s/action' % (
fake.PROJECT_ID, snapshot_id))
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
req.environ['cinder.context'] = context.RequestContext(fake.USER_ID,
fake.PROJECT_ID,
True)
body = {'os-unmanage': ''}
req.body = jsonutils.dump_as_bytes(body)
res = req.get_response(app())
return res
@mock.patch('cinder.db.conditional_update', return_value=1)
@mock.patch('cinder.db.snapshot_update')
@mock.patch('cinder.volume.rpcapi.VolumeAPI.delete_snapshot')
def test_unmanage_snapshot_ok(self, mock_rpcapi, mock_db_update,
mock_conditional_update):
"""Return success for valid and unattached volume."""
res = self._get_resp(snapshot_id)
self.assertEqual(1, mock_rpcapi.call_count)
self.assertEqual(3, len(mock_rpcapi.call_args[0]))
self.assertEqual(0, len(mock_rpcapi.call_args[1]))
self.assertEqual(http_client.ACCEPTED, res.status_int, res)
def test_unmanage_snapshot_bad_snapshot_id(self):
"""Return 404 if the volume does not exist."""
res = self._get_resp(bad_snp_id)
self.assertEqual(http_client.NOT_FOUND, res.status_int, res)
|
{
"content_hash": "1761355e61c42f4c1c8a648e1d28c023",
"timestamp": "",
"source": "github",
"line_count": 93,
"max_line_length": 79,
"avg_line_length": 39.956989247311824,
"alnum_prop": 0.6590419806243273,
"repo_name": "phenoxim/cinder",
"id": "24e79b2a0c7df3a9f8b84637b1ea3947f356cee5",
"size": "4334",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "cinder/tests/unit/api/contrib/test_snapshot_unmanage.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "621"
},
{
"name": "Python",
"bytes": "20325688"
},
{
"name": "Shell",
"bytes": "16353"
}
],
"symlink_target": ""
}
|
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.linalg import hankel
from scipy.sparse import lil_matrix
from scipy.sparse import csr_matrix as spmatrix
class StructureMeta:
__metaclass__ = ABCMeta
@abstractmethod
def struct_from_vec(self, x):
# reads an input vector
# returns a structured matrix
pass
@abstractmethod
def vec_from_struct(self, X):
# reads a structured matrix
# returns the underlying data vector
pass
@abstractmethod
def orth_proj(self, X):
# reads an unstructured matrix
# returns its orthogonal projection on the space of structured matrices
pass
@abstractmethod
def vec_via_orth_proj(self, X):
# reads an unstructured matrix
# returns the underlying data vector of its orthogonal projection on the space of structured matrices
pass
class Hankel(StructureMeta):
def __init__(self, m, n):
self.m = m
self.n = n
N = m + n - 1
self.N = N
self.S = np.zeros((m*n, N))
for i in xrange(m):
self.S[:,i] = hankel(np.vstack((np.zeros((i, 1)), 1, np.zeros((m - i - 1, 1)))), np.zeros((1, n))).reshape((m*n,), order='F')
for i in xrange(m, N):
self.S[:, i] = hankel(np.zeros((m, 1)), np.hstack((np.zeros((1, i+1 - m)), np.ones((1,1)), np.zeros((1, N - i - 1))))).reshape((m*n,), order='F')
self.S = spmatrix(self.S)
self.ST = self.S.T
STSinvdiag = 1.0 / (self.ST.dot(self.S)).diagonal()
STSinv = lil_matrix((N, N))
STSinv.setdiag(STSinvdiag)
self.S_pinv = STSinv.dot(self.ST)
self.Pi_S = self.S.dot(self.S_pinv)
def struct_from_vec(self, x):
X_vec = self.S.dot(x)
X = np.reshape(X_vec, (self.m, self.n), order='F')
return X
def vec_from_struct(self, X):
return np.concatenate((X[0, :], X[1:, -1]))
def orth_proj(self, X):
X_H = np.reshape(self.Pi_S.dot(X.flatten(1)), (self.m, self.n), order='F')
return X_H
def vec_via_orth_proj(self, X):
x_h = self.S_pinv.dot(X.flatten(1))
return x_h
|
{
"content_hash": "49798a192d3cedd6d35c202682756279",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 157,
"avg_line_length": 29.931506849315067,
"alnum_prop": 0.5729977116704805,
"repo_name": "clemenshage/grslra",
"id": "bd50063a51ea7705175672adf9eaf55717b861b6",
"size": "2185",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "grslra/structures.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "10429"
},
{
"name": "Matlab",
"bytes": "19681"
},
{
"name": "Python",
"bytes": "151694"
}
],
"symlink_target": ""
}
|
from contextlib import contextmanager
from typing import Any, Dict, Generator, List, NamedTuple, Optional, Tuple, Union
from warnings import warn
import reactivex
from reactivex import Observable, typing
from reactivex.notification import Notification, OnError, OnNext
from reactivex.observable.marbles import parse
from reactivex.scheduler import NewThreadScheduler
from reactivex.typing import Callable, RelativeTime
from .reactivetest import ReactiveTest
from .recorded import Recorded
from .testscheduler import TestScheduler
new_thread_scheduler = NewThreadScheduler()
class MarblesContext(NamedTuple):
start: Callable[
[Union[Observable[Any], Callable[[], Observable[Any]]]], List[Recorded[Any]]
]
cold: Callable[
[str, Optional[Dict[Union[str, float], Any]], Optional[Exception]],
Observable[Any],
]
hot: Callable[
[str, Optional[Dict[Union[str, float], Any]], Optional[Exception]],
Observable[Any],
]
exp: Callable[
[str, Optional[Dict[Union[str, float], Any]], Optional[Exception]],
List[Recorded[Any]],
]
@contextmanager
def marbles_testing(
timespan: RelativeTime = 1.0,
) -> Generator[MarblesContext, None, None]:
"""
Initialize a :class:`rx.testing.TestScheduler` and return a namedtuple
containing the following functions that wrap its methods.
:func:`cold()`:
Parse a marbles string and return a cold observable
:func:`hot()`:
Parse a marbles string and return a hot observable
:func:`start()`:
Start the test scheduler, invoke the create function,
subscribe to the resulting sequence, dispose the subscription and
return the resulting records
:func:`exp()`:
Parse a marbles string and return a list of records
Examples:
>>> with marbles_testing() as (start, cold, hot, exp):
... obs = hot("-a-----b---c-|")
... ex = exp( "-a-----b---c-|")
... results = start(obs)
... assert results == ex
The underlying test scheduler is initialized with the following
parameters:
- created time = 100.0s
- subscribed = 200.0s
- disposed = 1000.0s
**IMPORTANT**: regarding :func:`hot()`, a marble declared as the
first character will be skipped by the test scheduler.
E.g. hot("a--b--") will only emit b.
"""
scheduler = TestScheduler()
created = 100.0
disposed = 1000.0
subscribed = 200.0
start_called = False
outside_of_context = False
def check() -> None:
if outside_of_context:
warn(
"context functions should not be called outside of " "with statement.",
UserWarning,
stacklevel=3,
)
if start_called:
warn(
"start() should only be called one time inside " "a with statement.",
UserWarning,
stacklevel=3,
)
def test_start(
create: Union[Observable[Any], Callable[[], Observable[Any]]]
) -> List[Recorded[Any]]:
nonlocal start_called
check()
if isinstance(create, Observable):
create_ = create
def default_create() -> Observable[Any]:
return create_
create_function = default_create
else:
create_function = create
mock_observer = scheduler.start(
create=create_function,
created=created,
subscribed=subscribed,
disposed=disposed,
)
start_called = True
return mock_observer.messages
def test_expected(
string: str,
lookup: Optional[Dict[Union[str, float], Any]] = None,
error: Optional[Exception] = None,
) -> List[Recorded[Any]]:
messages = parse(
string,
timespan=timespan,
time_shift=subscribed,
lookup=lookup,
error=error,
)
return messages_to_records(messages)
def test_cold(
string: str,
lookup: Optional[Dict[Union[str, float], Any]] = None,
error: Optional[Exception] = None,
) -> Observable[Any]:
check()
return reactivex.from_marbles(
string,
timespan=timespan,
lookup=lookup,
error=error,
)
def test_hot(
string: str,
lookup: Optional[Dict[Union[str, float], Any]] = None,
error: Optional[Exception] = None,
) -> Observable[Any]:
check()
hot_obs: Observable[Any] = reactivex.hot(
string,
timespan=timespan,
duetime=subscribed,
lookup=lookup,
error=error,
scheduler=scheduler,
)
return hot_obs
try:
yield MarblesContext(test_start, test_cold, test_hot, test_expected)
finally:
outside_of_context = True
def messages_to_records(
messages: List[Tuple[typing.RelativeTime, Notification[Any]]]
) -> List[Recorded[Any]]:
"""
Helper function to convert messages returned by parse() to a list of
Recorded.
"""
records: List[Recorded[Any]] = []
for message in messages:
time, notification = message
if isinstance(time, float):
time_ = int(time)
else:
time_ = time.microseconds // 1000
if isinstance(notification, OnNext):
record = ReactiveTest.on_next(time_, notification.value)
elif isinstance(notification, OnError):
record = ReactiveTest.on_error(time_, notification.exception)
else:
record = ReactiveTest.on_completed(time_)
records.append(record)
return records
|
{
"content_hash": "f7576f4121d7ff60df22152230737e40",
"timestamp": "",
"source": "github",
"line_count": 197,
"max_line_length": 87,
"avg_line_length": 29.248730964467004,
"alnum_prop": 0.5971884762235335,
"repo_name": "ReactiveX/RxPY",
"id": "e26c3da934cbf3410eecbc28288d23d52fd3d427",
"size": "5762",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "reactivex/testing/marbles.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "1503"
},
{
"name": "Jupyter Notebook",
"bytes": "347338"
},
{
"name": "Python",
"bytes": "1726895"
}
],
"symlink_target": ""
}
|
import glob
import os
import sys
import ah_bootstrap
from setuptools import setup
from astropy_helpers.setup_helpers import (
register_commands, get_package_info, get_debug_option)
try:
from astropy_helpers.distutils_helpers import is_distutils_display_option
except:
# For astropy-helpers v0.4.x
from astropy_helpers.setup_helpers import is_distutils_display_option
from astropy_helpers.git_helpers import get_git_devstr
from astropy_helpers.version_helpers import generate_version_py
import astropy
NAME = 'astropy'
# VERSION should be PEP386 compatible (http://www.python.org/dev/peps/pep-0386)
VERSION = '2.0.dev'
# Indicates if this version is a release version
RELEASE = 'dev' not in VERSION
if not RELEASE:
VERSION += get_git_devstr(False)
# Populate the dict of setup command overrides; this should be done before
# invoking any other functionality from distutils since it can potentially
# modify distutils' behavior.
cmdclassd = register_commands(NAME, VERSION, RELEASE)
# Freeze build information in version.py
generate_version_py(NAME, VERSION, RELEASE, get_debug_option(NAME),
uses_git=not RELEASE)
# Get configuration information from all of the various subpackages.
# See the docstring for setup_helpers.update_package_files for more
# details.
package_info = get_package_info()
# Add the project-global data
package_info['package_data'].setdefault('astropy', []).append('data/*')
# Add any necessary entry points
entry_points = {}
# Command-line scripts
entry_points['console_scripts'] = [
'fits2bitmap = astropy.visualization.scripts.fits2bitmap:main',
'fitscheck = astropy.io.fits.scripts.fitscheck:main',
'fitsdiff = astropy.io.fits.scripts.fitsdiff:main',
'fitsheader = astropy.io.fits.scripts.fitsheader:main',
'fitsinfo = astropy.io.fits.scripts.fitsinfo:main',
'samp_hub = astropy.vo.samp.hub_script:hub_script',
'volint = astropy.io.votable.volint:main',
'wcslint = astropy.wcs.wcslint:main',
]
setup_requires = ['numpy>=' + astropy.__minimum_numpy_version__]
install_requires = ['numpy>=' + astropy.__minimum_numpy_version__]
# Avoid installing setup_requires dependencies if the user just
# queries for information
if is_distutils_display_option():
setup_requires = []
setup(name=NAME,
version=VERSION,
description='Community-developed python astronomy tools',
requires=['numpy'], # scipy not required, but strongly recommended
setup_requires=setup_requires,
install_requires=install_requires,
provides=[NAME],
author='The Astropy Developers',
author_email='astropy.team@gmail.com',
license='BSD',
url='http://astropy.org',
long_description=astropy.__doc__,
keywords=['astronomy', 'astrophysics', 'cosmology', 'space', 'science',
'units', 'table', 'wcs', 'vo', 'samp', 'coordinate', 'fits',
'modeling', 'models', 'fitting', 'ascii'],
classifiers=[
'Intended Audience :: Science/Research',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: C',
'Programming Language :: Cython',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: Implementation :: CPython',
'Topic :: Scientific/Engineering :: Astronomy',
'Topic :: Scientific/Engineering :: Physics'
],
cmdclass=cmdclassd,
zip_safe=False,
use_2to3=False,
entry_points=entry_points,
python_requires='>=2.7',
**package_info
)
|
{
"content_hash": "fc7ee6ba670fd05b7486538b968f812f",
"timestamp": "",
"source": "github",
"line_count": 103,
"max_line_length": 79,
"avg_line_length": 35.55339805825243,
"alnum_prop": 0.687602403058438,
"repo_name": "joergdietrich/astropy",
"id": "5a8cf869855b8d972c67312b35b5e7df65137b71",
"size": "3749",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "366874"
},
{
"name": "C++",
"bytes": "1825"
},
{
"name": "HTML",
"bytes": "1172"
},
{
"name": "Jupyter Notebook",
"bytes": "62553"
},
{
"name": "Python",
"bytes": "7616749"
},
{
"name": "Shell",
"bytes": "425"
},
{
"name": "TeX",
"bytes": "778"
}
],
"symlink_target": ""
}
|
"""
byceps.services.user_badge.dbmodels.badge
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:Copyright: 2014-2022 Jochen Kupperschmidt
:License: Revised BSD (see `LICENSE` file for details)
"""
from typing import Optional
from ....database import db, generate_uuid
from ....typing import BrandID
from ....util.instances import ReprBuilder
class Badge(db.Model):
"""A global badge that can be awarded to a user."""
__tablename__ = 'user_badges'
id = db.Column(db.Uuid, default=generate_uuid, primary_key=True)
slug = db.Column(db.UnicodeText, unique=True, index=True, nullable=False)
label = db.Column(db.UnicodeText, unique=True, nullable=False)
description = db.Column(db.UnicodeText, nullable=True)
image_filename = db.Column(db.UnicodeText, nullable=False)
brand_id = db.Column(db.UnicodeText, db.ForeignKey('brands.id'), nullable=True)
featured = db.Column(db.Boolean, default=False, nullable=False)
def __init__(
self,
slug: str,
label: str,
image_filename: str,
*,
description: Optional[str] = None,
brand_id: Optional[BrandID] = None,
featured: bool = False,
) -> None:
self.slug = slug
self.label = label
self.description = description
self.image_filename = image_filename
self.brand_id = brand_id
self.featured = featured
def __repr__(self) -> str:
return ReprBuilder(self) \
.add_with_lookup('label') \
.build()
|
{
"content_hash": "5351dcb476ae20fdbde69a80d7c9e47c",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 83,
"avg_line_length": 31,
"alnum_prop": 0.6227781435154707,
"repo_name": "homeworkprod/byceps",
"id": "0169bfe16f4227d11bb603542394ccee77dbc78c",
"size": "1519",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "byceps/services/user_badge/dbmodels/badge.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "38198"
},
{
"name": "HTML",
"bytes": "318830"
},
{
"name": "JavaScript",
"bytes": "8541"
},
{
"name": "Python",
"bytes": "935249"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import, unicode_literals
from django.conf.urls import url
import afm.views
urlpatterns = [
url(r'^(?P<uuid>p[0-9a-f\-]{7,})/upload/$',
afm.views.AFMFileUpload.as_view(), name='afm_upload'),
url(r'^autocreate/(?P<uuid>s[0-9]+)/$',
afm.views.AutocreateAFMView.as_view(), name='afm_autocreate'),
]
|
{
"content_hash": "0a80812b764fdd28907d6e1734cd071d",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 70,
"avg_line_length": 27.307692307692307,
"alnum_prop": 0.6422535211267606,
"repo_name": "wbg-optronix-lab/emergence-lab",
"id": "dd48cf9f4c21024921b36516072f77dd8808085c",
"size": "379",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "afm/urls/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3909"
},
{
"name": "HTML",
"bytes": "239826"
},
{
"name": "JavaScript",
"bytes": "16743"
},
{
"name": "Python",
"bytes": "513490"
}
],
"symlink_target": ""
}
|
from sorl.thumbnail.base import ThumbnailBackend
from sorl.thumbnail.images import ImageFile
from sorl.thumbnail import default
class S3Backend(ThumbnailBackend):
def get_thumbnail(self, file_, geometry_string, **options):
"""
Returns thumbnail as an ImageFile instance for file with geometry and
options given. All of the thumbnail generation logic is short-circuited
as we know that CloudFront will generate the thumbnail for us.
"""
if file_:
source = ImageFile(file_)
else:
return None
for key, value in list(self.default_options.items()):
options.setdefault(key, value)
name = self._get_thumbnail_filename(source, geometry_string, options)
thumbnail = ImageFile(name, default.storage)
return thumbnail
def _get_thumbnail_filename(self, source, geometry_string, options):
"""
Returns URLs that match the format of the lambda@edge function
that creates the initial thumbnails on S3. This means that we don't
need to use the lambda instance the Django app runs on to process
thumbnails, as they should already exist.
"""
base_url = "thumbs"
opts = options.copy()
for k, v in list(opts.items()):
if self.default_options[k] == v:
del opts[k]
url_kwargs = "/".join(
["{}={}".format(k, v) for k, v in list(opts.items())]
)
thumb_url = "{base_url}/{geometry_string}/{url_kwargs}/{original_path}".format(
base_url=base_url,
geometry_string=geometry_string,
url_kwargs=url_kwargs,
original_path=source.name,
)
return thumb_url
|
{
"content_hash": "09907a421520c347e82ef85411f29c10",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 87,
"avg_line_length": 34.588235294117645,
"alnum_prop": 0.6139455782312925,
"repo_name": "DemocracyClub/electionleaflets",
"id": "efee65302a58e7c6c2d07321a5e101b9bd5fb37b",
"size": "1764",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "electionleaflets/apps/core/s3_thumbnail_store.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Gherkin",
"bytes": "7910"
},
{
"name": "HTML",
"bytes": "92760"
},
{
"name": "JavaScript",
"bytes": "5712"
},
{
"name": "Makefile",
"bytes": "2940"
},
{
"name": "Python",
"bytes": "194406"
},
{
"name": "SCSS",
"bytes": "12241"
}
],
"symlink_target": ""
}
|
"""Support for creating Kay forms from Datastore data models.
Taken from google.appengine.ext.db.djangoforms
Terminology notes:
- forms: always refers to the Kay newforms subpackage
- field: always refers to a Kay forms.Field instance
- property: always refers to a db.Property instance
Mapping between properties and fields:
+====================+===================+==============+====================+
| Property subclass | Field subclass | datatype | widget; notes |
+====================+===================+==============+====================+
| StringProperty | TextField | unicode | Textarea |
| | | | if multiline |
+--------------------+-------------------+--------------+--------------------+
| TextProperty | TextField | unicode | Textarea |
+--------------------+-------------------+--------------+--------------------+
| BlobProperty | FileField | str | skipped in v0.96 |
+--------------------+-------------------+--------------+--------------------+
| DateTimeProperty | DateTimeField | datetime | skipped |
| | | | if auto_now[_add] |
+--------------------+-------------------+--------------+--------------------+
| DateProperty | DateField | date | ditto |
+--------------------+-------------------+--------------+--------------------+
| TimeProperty | TimeField | time | ditto |
+--------------------+-------------------+--------------+--------------------+
| IntegerProperty | IntegerField | int or long | |
+--------------------+-------------------+--------------+--------------------+
| FloatProperty | FloatField | float | CharField in v0.96 |
+--------------------+-------------------+--------------+--------------------+
| BooleanProperty | BooleanField | bool | |
+--------------------+-------------------+--------------+--------------------+
| UserProperty | TextField | users.User | |
+--------------------+-------------------+--------------+--------------------+
| StringListProperty | TextField | list of str | Textarea |
+--------------------+-------------------+--------------+--------------------+
| LinkProperty | TextField | str | |
+--------------------+-------------------+--------------+--------------------+
| ReferenceProperty | ModelField* | db.Model | |
+--------------------+-------------------+--------------+--------------------+
| _ReverseReferenceP.| None | <iterable> | always skipped |
+====================+===================+==============+====================+
"""
import itertools
import logging
from google.appengine.api import users
from google.appengine.ext import db
from kay import exceptions
from kay.utils import forms
from kay.utils import datastructures
from kay.i18n import lazy_gettext as _
from kay.exceptions import ImproperlyConfigured
from kay.models import NamedModel
def monkey_patch(name, bases, namespace):
"""A 'metaclass' for adding new methods to an existing class.
In this version, existing methods can't be overridden; this is by
design, to avoid accidents.
Usage example:
class PatchClass(TargetClass):
__metaclass__ = monkey_patch
def foo(self, ...): ...
def bar(self, ...): ...
This is equivalent to:
def foo(self, ...): ...
def bar(self, ...): ...
TargetClass.foo = foo
TargetClass.bar = bar
PatchClass = TargetClass
Note that PatchClass becomes an alias for TargetClass; by convention
it is recommended to give PatchClass the same name as TargetClass.
"""
assert len(bases) == 1, 'Exactly one base class is required'
base = bases[0]
for name, value in namespace.iteritems():
if name not in ('__metaclass__', '__module__'):
assert name not in base.__dict__, "Won't override attribute %r" % (name,)
setattr(base, name, value)
return base
class Property(db.Property):
__metaclass__ = monkey_patch
def get_model_property_name(self):
"""Return the attribute name of this property in this property's Model class"""
matching_prop_names = [prop_name for (prop_name, prop) in
self.model_class.properties().items() if prop.name == self.name]
if len(matching_prop_names) != 1:
raise Exception('Model class "%s" must have exactly one property with'
' the datastore storage name "%s". Found %d properties'
' with that name: %s' % (
self.model_class.__name__,
self.name,
len(matching_prop_names),
matching_prop_names
)
)
return matching_prop_names[0]
def get_form_field(self, form_class=forms.TextField, **kwargs):
"""Return a Django form field appropriate for this property.
Args:
form_class: a forms.Field subclass, default forms.CharField
Additional keyword arguments are passed to the form_class constructor,
with certain defaults:
required: self.required
label: prettified self.verbose_name, if not None
widget: a forms.Select instance if self.choices is non-empty
initial: self.default, if not None
Returns:
A fully configured instance of form_class, or None if no form
field should be generated for this property.
"""
defaults = {'required': self.required}
if self.verbose_name is None:
defaults['label'] = (
self.get_model_property_name().capitalize().replace('_', ' ')
)
else:
defaults['label'] = self.verbose_name
if self.choices:
choices = []
if not self.required or (self.default is None and
'initial' not in kwargs):
choices.append(('', '---------'))
for choice in self.choices:
choices.append((unicode(choice), unicode(choice)))
defaults['choices'] = choices
form_class = forms.ChoiceField
if self.default is not None:
defaults['default'] = self.default
defaults.update(kwargs)
return form_class(**defaults)
def get_value_for_form(self, instance):
"""Extract the property value from the instance for use in a form.
Override this to do a property- or field-specific type conversion.
Args:
instance: a db.Model instance
Returns:
The property's value extracted from the instance, possibly
converted to a type suitable for a form field; possibly None.
By default this returns the instance attribute's value unchanged.
"""
return getattr(instance, self.get_model_property_name())
def make_value_from_form(self, value):
"""Convert a form value to a property value.
Override this to do a property- or field-specific type conversion.
Args:
value: the cleaned value retrieved from the form field
Returns:
A value suitable for assignment to a model instance's property;
possibly None.
By default this converts the value to self.data_type if it
isn't already an instance of that type, except if the value is
empty, in which case we return None.
"""
if value in (None, ''):
return None
if not isinstance(value, self.data_type):
value = self.data_type(value)
return value
class UserProperty(db.Property):
"""This class exists solely to log a warning when it is used."""
def __init__(self, *args, **kwds):
logging.warn("Please don't use modelforms.UserProperty; "
"use db.UserProperty instead.")
super(UserProperty, self).__init__(*args, **kwds)
class EmailProperty(db.EmailProperty):
__metaclass__ = monkey_patch
def get_form_field(self, **kwargs):
defaults = {'form_class': forms.EmailField}
defaults.update(kwargs)
return super(EmailProperty, self).get_form_field(**defaults)
class StringProperty(db.StringProperty):
__metaclass__ = monkey_patch
def get_form_field(self, **kwargs):
"""Return a Django form field appropriate for a string property.
This sets the widget default to forms.Textarea if the property's
multiline attribute is set.
"""
defaults = {}
if self.multiline:
defaults['widget'] = forms.Textarea
defaults.update(kwargs)
return super(StringProperty, self).get_form_field(**defaults)
class TextProperty(db.TextProperty):
__metaclass__ = monkey_patch
def get_form_field(self, **kwargs):
"""Return a Django form field appropriate for a text property.
This sets the widget default to forms.Textarea.
"""
defaults = {'widget': forms.Textarea}
defaults.update(kwargs)
return super(TextProperty, self).get_form_field(**defaults)
class BlobProperty(db.BlobProperty):
__metaclass__ = monkey_patch
def get_form_field(self, **kwargs):
"""Return a Django form field appropriate for a blob property.
"""
if not hasattr(forms, 'FileField'):
return None
defaults = {'form_class': forms.FileField}
defaults.update(kwargs)
return super(BlobProperty, self).get_form_field(**defaults)
def get_value_for_form(self, instance):
"""Extract the property value from the instance for use in a form.
There is no way to convert a Blob into an initial value for a file
upload, so we always return None.
"""
return None
def make_value_from_form(self, value):
"""Convert a form value to a property value.
This extracts the content from the UploadedFile instance returned
by the FileField instance.
"""
if value.__class__.__name__ == 'UploadedFile':
return db.Blob(value.content)
return super(BlobProperty, self).make_value_from_form(value)
class DateTimeProperty(db.DateTimeProperty):
__metaclass__ = monkey_patch
def get_form_field(self, **kwargs):
"""Return a Django form field appropriate for a date-time property.
This defaults to a DateTimeField instance, except if auto_now or
auto_now_add is set, in which case None is returned, as such
'auto' fields should not be rendered as part of the form.
"""
if self.auto_now or self.auto_now_add:
return None
defaults = {'form_class': forms.DateTimeField}
defaults.update(kwargs)
return super(DateTimeProperty, self).get_form_field(**defaults)
class DateProperty(db.DateProperty):
# TODO:
__metaclass__ = monkey_patch
def get_form_field(self, **kwargs):
"""Return a Django form field appropriate for a date property.
This defaults to a DateField instance, except if auto_now or
auto_now_add is set, in which case None is returned, as such
'auto' fields should not be rendered as part of the form.
"""
if self.auto_now or self.auto_now_add:
return None
defaults = {'form_class': forms.DateField}
defaults.update(kwargs)
return super(DateProperty, self).get_form_field(**defaults)
class TimeProperty(db.TimeProperty):
# TODO:
__metaclass__ = monkey_patch
def get_form_field(self, **kwargs):
"""Return a Django form field appropriate for a time property.
This defaults to a TimeField instance, except if auto_now or
auto_now_add is set, in which case None is returned, as such
'auto' fields should not be rendered as part of the form.
"""
if self.auto_now or self.auto_now_add:
return None
defaults = {'form_class': forms.TimeField}
defaults.update(kwargs)
return super(TimeProperty, self).get_form_field(**defaults)
class IntegerProperty(db.IntegerProperty):
__metaclass__ = monkey_patch
def get_form_field(self, **kwargs):
"""Return a Django form field appropriate for an integer property.
This defaults to an IntegerField instance.
"""
defaults = {'form_class': forms.IntegerField}
defaults.update(kwargs)
return super(IntegerProperty, self).get_form_field(**defaults)
class FloatProperty(db.FloatProperty):
__metaclass__ = monkey_patch
def get_form_field(self, **kwargs):
"""Return a Django form field appropriate for an integer property.
This defaults to a FloatField instance when using Django 0.97 or
later. For 0.96 this defaults to the CharField class.
"""
defaults = {}
if hasattr(forms, 'FloatField'):
defaults['form_class'] = forms.FloatField
defaults.update(kwargs)
return super(FloatProperty, self).get_form_field(**defaults)
class BooleanProperty(db.BooleanProperty):
__metaclass__ = monkey_patch
def get_form_field(self, **kwargs):
"""Return a Django form field appropriate for a boolean property.
This defaults to a BooleanField.
"""
defaults = {'form_class': forms.BooleanField}
defaults.update(kwargs)
return super(BooleanProperty, self).get_form_field(**defaults)
def make_value_from_form(self, value):
"""Convert a form value to a property value.
This is needed to ensure that False is not replaced with None.
"""
if value is None:
return None
if isinstance(value, basestring) and value.lower() == 'false':
return False
return bool(value)
class StringListProperty(db.StringListProperty):
__metaclass__ = monkey_patch
def get_form_field(self, **kwargs):
"""Return a Django form field appropriate for a StringList property.
This defaults to a Textarea widget with a blank initial value.
"""
defaults = {'field': forms.TextField(), 'form_class': forms.LineSeparated,
'min_size': 0}
defaults.update(kwargs)
return super(StringListProperty, self).get_form_field(**defaults)
def get_value_for_form(self, instance):
"""Extract the property value from the instance for use in a form.
This joins a list of strings with newlines.
"""
value = super(StringListProperty, self).get_value_for_form(instance)
if not value:
return None
if isinstance(value, list):
value = '\n'.join(value)
return value
def make_value_from_form(self, value):
"""Convert a form value to a property value.
This breaks the string into lines.
"""
if not value:
return []
if isinstance(value, basestring):
value = value.splitlines()
return value
class LinkProperty(db.LinkProperty):
__metaclass__ = monkey_patch
def get_form_field(self, **kwargs):
"""Return a Django form field appropriate for a URL property.
This defaults to a URLField instance.
"""
defaults = {'form_class': forms.TextField}
defaults.update(kwargs)
return super(LinkProperty, self).get_form_field(**defaults)
class _WrapIter(object):
"""Helper class whose iter() calls a given function to get an iterator."""
def __init__(self, function):
self._function = function
def __iter__(self):
return self._function()
class ReferenceProperty(db.ReferenceProperty):
__metaclass__ = monkey_patch
def get_form_field(self, **kwargs):
"""Return a Django form field appropriate for a reference property.
This defaults to a ModelChoiceField instance.
"""
defaults = {'form_class': forms.ModelField,
'model': self.reference_class}
defaults.update(kwargs)
return super(ReferenceProperty, self).get_form_field(**defaults)
def get_value_for_form(self, instance):
"""Extract the property value from the instance for use in a form.
This return the key object for the referenced object, or None.
"""
value = super(ReferenceProperty, self).get_value_for_form(instance)
if value is not None:
value = value.key()
return value
def make_value_from_form(self, value):
"""Convert a form value to a property value.
This turns a key string or object into a model instance.
"""
if value:
if not isinstance(value, db.Model):
value = db.get(value)
return value
class _ReverseReferenceProperty(db._ReverseReferenceProperty):
__metaclass__ = monkey_patch
def get_form_field(self, **kwargs):
"""Return a Django form field appropriate for a reverse reference.
This always returns None, since reverse references are always
automatic.
"""
return None
def property_clean(prop, value):
"""Apply Property level validation to value.
Calls .make_value_from_form() and .validate() on the property and catches
exceptions generated by either. The exceptions are converted to
forms.ValidationError exceptions.
Args:
prop: The property to validate against.
value: The value to validate.
Raises:
forms.ValidationError if the value cannot be validated.
"""
if value is not None:
try:
prop.validate(prop.make_value_from_form(value))
except (db.BadValueError, ValueError), e:
raise forms.ValidationError(unicode(e))
class ModelFormOptions(object):
"""A simple class to hold internal options for a ModelForm class.
Instance attributes:
model: a db.Model class, or None
fields: list of field names to be defined, or None
exclude: list of field names to be skipped, or None
These instance attributes are copied from the 'Meta' class that is
usually present in a ModelForm class, and all default to None.
"""
def __init__(self, options=None):
self.model = getattr(options, 'model', None)
self.fields = getattr(options, 'fields', None)
self.exclude = getattr(options, 'exclude', None)
self.help_texts = getattr(options, 'help_texts', {})
class ModelFormMetaclass(forms.FormMeta):
"""The metaclass for the ModelForm class defined below.
See the docs for ModelForm below for a usage example.
"""
bad_attr_names = ('data', 'errors', 'raw_data')
def __new__(cls, class_name, bases, attrs):
"""Constructor for a new ModelForm class instance.
The signature of this method is determined by Python internals.
"""
fields = sorted(((field_name, attrs.pop(field_name))
for field_name, obj in attrs.items()
if isinstance(obj, forms.Field)),
key=lambda obj: obj[1].creation_counter)
for base in bases[::-1]:
if hasattr(base, '_base_fields'):
fields = base._base_fields.items() + fields
declared_fields = datastructures.OrderedDict()
for field_name, obj in fields:
declared_fields[field_name] = obj
opts = ModelFormOptions(attrs.get('Meta', None))
attrs['_meta'] = opts
base_models = []
for base in bases:
base_opts = getattr(base, '_meta', None)
base_model = getattr(base_opts, 'model', None)
if base_model is not None:
base_models.append(base_model)
if len(base_models) > 1:
raise exceptions.ImproperlyConfigured(
"%s's base classes define more than one model." % class_name)
if opts.model is not None:
if base_models and base_models[0] is not opts.model:
raise exceptions.ImproperlyConfigured(
'%s defines a different model than its parent.' % class_name)
model_fields = datastructures.OrderedDict()
for name, prop in sorted(opts.model.properties().iteritems(),
key=lambda prop: prop[1].creation_counter):
if opts.fields and name not in opts.fields:
continue
if opts.exclude and name in opts.exclude:
continue
form_field = prop.get_form_field(
help_text=opts.help_texts.get(name, None))
if form_field is not None:
model_fields[name] = form_field
for bad_attr_name in ModelFormMetaclass.bad_attr_names:
if model_fields.has_key(bad_attr_name):
raise ImproperlyConfigured("When you use ModelForm, you can not"
" use these names as field names: %s"
% str(ModelFormMetaclass.bad_attr_names))
# Preserve order in model definition
original_ordered_names = model_fields.keys()
model_fields.update(declared_fields)
extra_index = len(original_ordered_names)
for name, field in model_fields.iteritems():
if name in original_ordered_names:
field._position_hint = original_ordered_names.index(name)
else:
field._position_hint = extra_index
extra_index += 1
attrs['_base_fields'] = model_fields
props = opts.model.properties()
for name, field in model_fields.iteritems():
prop = props.get(name)
if prop:
def check_for_property_field(form, value, prop=prop):
property_clean(prop, value)
return True
field.validators.append(check_for_property_field)
else:
attrs['_base_fields'] = declared_fields
# corresponds with form not rendered
# maybe i should handle this in forms.FormMeta
return super(ModelFormMetaclass, cls).__new__(cls,
class_name, bases, attrs)
class BaseModelForm(forms.Form):
"""Base class for ModelForm.
This overrides the forms.BaseForm constructor and adds a save() method.
This class does not have a special metaclass; the magic metaclass is
added by the subclass ModelForm.
"""
def __init__(self, instance=None, initial=None, **kwargs):
"""Constructor.
Args (all optional and defaulting to None):
data: dict of data values, typically from a POST request)
initial: dict of initial values
instance: Model instance to be used for additional initial values
Except for initial and instance, these arguments are passed on to
the forms.BaseForm constructor unchanged, but only if not None.
Leave these blank (i.e. None)
"""
opts = self._meta
self.instance = instance
object_data = {}
if instance is not None:
for name, prop in instance.properties().iteritems():
if opts.fields and name not in opts.fields:
continue
if opts.exclude and name in opts.exclude:
continue
object_data[name] = prop.get_value_for_form(instance)
if initial is not None:
object_data.update(initial)
kwargs['initial'] = object_data
kwargs = dict((name, value)
for name, value in kwargs.iteritems()
if value is not None)
super(BaseModelForm, self).__init__(**kwargs)
def save(self, commit=True, **kwargs):
"""Save this form's cleaned data into a model instance.
Args:
commit: optional bool, default True; if true, the model instance
is also saved to the datastore.
Returns:
A model instance. If a model instance was already associated
with this form instance (either passed to the constructor with
instance=... or by a previous save() call), that same instance
is updated and returned; if no instance was associated yet, one
is created by this call.
Raises:
ValueError if the data couldn't be validated.
"""
if not self.is_valid:
raise ValueError('Cannot save a non valid form')
opts = self._meta
instance = self.instance
if instance is None:
fail_message = 'created'
else:
fail_message = 'updated'
if self.errors:
raise ValueError("The %s could not be %s because the data didn't "
'validate.' % (opts.model.kind(), fail_message))
cleaned_data = self.data
converted_data = {}
propiter = itertools.chain(
opts.model.properties().iteritems(),
iter([('key_name', StringProperty(name='key_name'))])
)
for name, prop in propiter:
if cleaned_data.has_key(name):
value = cleaned_data.get(name)
if not value and prop.default is not None:
value = prop.default
# For new entities, use the datastore property names as the keys
# instead of the model property names
if instance is not None:
data_name = name
else:
data_name = getattr(opts.model, name).name
converted_data[data_name] = prop.make_value_from_form(value)
try:
converted_data.update(kwargs)
if instance is None:
if issubclass(opts.model, NamedModel):
logging.debug("commit argument ignored.")
instance = opts.model.create_new_entity(**converted_data)
else:
instance = opts.model(**converted_data)
self.instance = instance
else:
for name, value in converted_data.iteritems():
if name == 'key_name':
continue
setattr(instance, name, value)
except db.BadValueError, err:
raise ValueError('The %s could not be %s (%s)' %
(opts.model.kind(), fail_message, err))
if commit:
instance.put()
return instance
class ModelForm(BaseModelForm):
"""A Django form tied to a Datastore model.
Note that this particular class just sets the metaclass; all other
functionality is defined in the base class, BaseModelForm, above.
Usage example:
from google.appengine.ext import db
from google.appengine.ext.db import djangoforms
# First, define a model class
class MyModel(db.Model):
foo = db.StringProperty()
bar = db.IntegerProperty(required=True, default=42)
# Now define a form class
class MyForm(djangoforms.ModelForm):
class Meta:
model = MyModel
You can now instantiate MyForm without arguments to create an
unbound form, or with data from a POST request to create a bound
form. You can also pass a model instance with the instance=...
keyword argument to create an unbound (!) form whose initial values
are taken from the instance. For bound forms, use the save() method
to return a model instance.
Like Django's own corresponding ModelForm class, the nested Meta
class can have two other attributes:
fields: if present and non-empty, a list of field names to be
included in the form; properties not listed here are
excluded from the form
exclude: if present and non-empty, a list of field names to be
excluded from the form
If exclude and fields are both non-empty, names occurring in both
are excluded (i.e. exclude wins). By default all property in the
model have a corresponding form field defined.
It is also possible to define form fields explicitly. This gives
more control over the widget used, constraints, initial value, and
so on. Such form fields are not affected by the nested Meta class's
fields and exclude attributes.
If you define a form field named 'key_name' it will be treated
specially and will be used as the value for the key_name parameter
to the Model constructor. This allows you to create instances with
named keys. The 'key_name' field will be ignored when updating an
instance (although it will still be shown on the form).
"""
__metaclass__ = ModelFormMetaclass
|
{
"content_hash": "96e940a1566561caa4879b6284a027c6",
"timestamp": "",
"source": "github",
"line_count": 776,
"max_line_length": 83,
"avg_line_length": 34.81701030927835,
"alnum_prop": 0.6236953142349545,
"repo_name": "yosukesuzuki/let-me-notify",
"id": "0276763c29b5215f3158c7c2f7573c0c040cf5e4",
"size": "27620",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "project/kay/utils/forms/modelform.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1704"
},
{
"name": "HTML",
"bytes": "34400"
},
{
"name": "Python",
"bytes": "764480"
}
],
"symlink_target": ""
}
|
from django.conf.urls import include, url
from django.contrib.auth.models import Permission
from django.contrib.contenttypes.models import ContentType
from django.core import urlresolvers
from django.utils.html import format_html
from django.utils.translation import ugettext_lazy as _
from django.contrib.staticfiles.templatetags.staticfiles import static
from wagtail.wagtailadmin.menu import MenuItem
from wagtail.wagtailcore import hooks
from wagtail.wagtailsnippets import urls
from wagtail.wagtailsnippets.models import get_snippet_models
from wagtail.wagtailsnippets.permissions import user_can_edit_snippets
@hooks.register('register_admin_urls')
def register_admin_urls():
return [
url(r'^snippets/', include(urls, app_name='wagtailsnippets', namespace='wagtailsnippets')),
]
class SnippetsMenuItem(MenuItem):
def is_shown(self, request):
return user_can_edit_snippets(request.user)
@hooks.register('register_admin_menu_item')
def register_snippets_menu_item():
return SnippetsMenuItem(
_('Snippets'),
urlresolvers.reverse('wagtailsnippets:index'),
classnames='icon icon-snippet',
order=500
)
@hooks.register('insert_editor_js')
def editor_js():
return format_html(
"""
<script src="{0}"></script>
<script>window.chooserUrls.snippetChooser = '{1}';</script>
""",
static('wagtailsnippets/js/snippet-chooser.js'),
urlresolvers.reverse('wagtailsnippets:choose_generic')
)
@hooks.register('register_permissions')
def register_permissions():
content_types = ContentType.objects.get_for_models(*get_snippet_models()).values()
return Permission.objects.filter(content_type__in=content_types)
|
{
"content_hash": "85e8cbaa7007960391b735195bdb39fa",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 99,
"avg_line_length": 32.924528301886795,
"alnum_prop": 0.7318051575931233,
"repo_name": "davecranwell/wagtail",
"id": "26fb57985b3986cb0b7b2697d69b6421bd76fb4c",
"size": "1745",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "wagtail/wagtailsnippets/wagtail_hooks.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "159671"
},
{
"name": "HTML",
"bytes": "267389"
},
{
"name": "JavaScript",
"bytes": "109257"
},
{
"name": "Makefile",
"bytes": "548"
},
{
"name": "Python",
"bytes": "2063070"
},
{
"name": "Shell",
"bytes": "7388"
}
],
"symlink_target": ""
}
|
import unittest
from webkitpy.common.system.systemhost_mock import MockSystemHost
from webkitpy.layout_tests.port.base import Port
from webkitpy.layout_tests.port.driver import Driver, DriverOutput
from webkitpy.layout_tests.port import browser_test, browser_test_driver
from webkitpy.layout_tests.port.server_process_mock import MockServerProcess
from webkitpy.layout_tests.port.port_testcase import TestWebKitPort
from webkitpy.tool.mocktool import MockOptions
class BrowserTestDriverTest(unittest.TestCase):
def test_read_stdin_path(self):
port = TestWebKitPort()
driver = browser_test_driver.BrowserTestDriver(port, 0, pixel_tests=True)
driver._server_process = MockServerProcess(lines=[
'StdinPath: /foo/bar', '#EOF'])
content_block = driver._read_block(0)
self.assertEqual(content_block.stdin_path, '/foo/bar')
driver._stdin_directory = None
|
{
"content_hash": "17da925bdfa83456a8f026f406c8d916",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 81,
"avg_line_length": 39.91304347826087,
"alnum_prop": 0.7559912854030502,
"repo_name": "highweb-project/highweb-webcl-html5spec",
"id": "576e0b0863783bf19cf80b8d9a25e716d8f37d58",
"size": "2445",
"binary": false,
"copies": "1",
"ref": "refs/heads/highweb-20160310",
"path": "third_party/WebKit/Tools/Scripts/webkitpy/layout_tests/port/browser_test_driver_unittest.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
import sys
import random
import math as actual_math
from exterminate.Constants import DECIMAL_ERROR_RANGE
class AltMath:
def __init__(self):
self.custom_pi = actual_math.pi
self.custom_e = actual_math.e
@property
def pi(self):
# https://www.wikiwand.com/en/Indiana_Pi_Bill
return 3.2
@property
def e(self):
self.custom_e += random.uniform(*DECIMAL_ERROR_RANGE)
return self.custom_e
def __getattr__(self, name):
"""
We don't want to break the entire math module. So if we get anything
other than `e` or `pi`, let's give it to the original module.
"""
return getattr(actual_math, name)
AltMath.__doc__ = actual_math.__doc__
sys.modules["math"] = AltMath()
|
{
"content_hash": "88a293baa7a4eece92bd64c1514b87b1",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 76,
"avg_line_length": 22.823529411764707,
"alnum_prop": 0.6146907216494846,
"repo_name": "adtac/exterminate",
"id": "aed4808dae44c128be3ddb5a7809f7a33ad27d94",
"size": "776",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "exterminate/AltMath.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "4548"
}
],
"symlink_target": ""
}
|
from collections import namedtuple
import rpyc
import cv2
from detection.opencv import draw_lines
import time
State = namedtuple("State", "name act default_args")
State.__new__.__defaults__ = tuple([None] * 2) + ({},)
def main_loop(robot, start_state, state_dict, delay=0.02, remote_display=None):
print("Checking states...")
for state in state_dict.values():
if not isinstance(state, State):
raise Exception("The state " + str(state) + "is not of type State.")
state = start_state
kwargs = state.default_args
if remote_display is None:
cv2display = cv2
else:
print("using remote")
conn = rpyc.classic.connect(remote_display)
_mod = conn.modules["rick.rpc"]
cv2display = _mod.RemoteDisplay()
tstart = time.time()
while True:
print("CURRENT_STATE",state.name)
tend=tstart
time.sleep(max(0, delay-( time.time()-tstart)))
tstart = time.time()
print("elapsed time :",tend-tstart)
#draw_lines(frame)
_, frame = robot.cap.read()
next_state_name, processed_frame, kwargs = state.act(robot,frame, **kwargs)
state = state_dict[next_state_name]
kwargs = {**state.default_args, **kwargs}
cv2display.imshow("frame", processed_frame)
if cv2display.waitKey(1) & 0xFF == 27:
break
robot.cap.release()
cv2display.destroyAllWindows()
|
{
"content_hash": "9562a69ef465a602cf3f618e9faea1c1",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 83,
"avg_line_length": 25.767857142857142,
"alnum_prop": 0.6153846153846154,
"repo_name": "TheCamusean/DLRCev3",
"id": "c4e101780d19816c7f64ec7dcbd31deef24da1d7",
"size": "1443",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rick/rick/core.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "3570356"
},
{
"name": "Python",
"bytes": "558758"
},
{
"name": "Shell",
"bytes": "1722"
}
],
"symlink_target": ""
}
|
"""
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
import environ
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
env= environ.Env( DEBUG=(bool, False),)
environ.Env.read_env('.env')
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '$6(x*g_2g9l_*g8peb-@anl5^*8q!1w)k&e&2!i)t6$s8kia94'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = os.environ.get('DEBUG', True)
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'pipeline',
'compressor',
'django_gulp',
'app'
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'sniffle.urls'
WSGI_APPLICATION = 'sniffle.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
import dj_database_url
DATABASES = {
'default': dj_database_url.config(
default='sqlite:///' + os.path.join(BASE_DIR, 'db.sqlite3')
)
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
PIPELINE_CSS = {
'main': {
'source_filenames': (
'css/styles.sass',
),
'output_filename': 'css/styles.css'
}
}
PIPELINE_JS = {
'angular': {
'source_filenames': (
'bower_components/angular/angular.js',
'bower_components/angular-ui-router/release/angular-ui-router.min.js',
'bower_components/satellizer/satellizer.min.js',
),
'output_filename': 'app/bundle.min.js'
},
'app': {
'source_filenames': (
'dist/app.min.js',
),
'output_filename': 'dist/app.min.js'
},
}
PIPELINE_COMPILERS = (
'pipeline.compilers.sass.SASSCompiler',
)
STATIC_URL = '/static/'
STATIC_ROOT = 'staticfiles'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
)
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'compressor.finders.CompressorFinder',
'pipeline.finders.PipelineFinder'
)
STATICFILES_STORAGE = 'pipeline.storage.PipelineCachedStorage'
COMPRESS_ENABLED = os.environ.get('COMPRESS_ENABLED', True)
TOKEN_SECRET = env('TOKEN_SECRET')
FACEBOOK_SECRET = env('FACEBOOK_SECRET')
TEMPLATE_DIRS = (
os.path.join(BASE_DIR, 'templates'),
)
# Honor the 'X-Forwarded-Proto' header for request.is_secure()
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# Allow all host headers
ALLOWED_HOSTS = ['*']
|
{
"content_hash": "f2d555d24abe2ab7105b213863e78805",
"timestamp": "",
"source": "github",
"line_count": 144,
"max_line_length": 82,
"avg_line_length": 25.26388888888889,
"alnum_prop": 0.6849917537108301,
"repo_name": "cesardeazevedo/sniffle",
"id": "893804577308a5c977019f1e7c4369a48d1e4d71",
"size": "3638",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sniffle/settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1440"
},
{
"name": "HTML",
"bytes": "1075"
},
{
"name": "JavaScript",
"bytes": "5918"
},
{
"name": "Python",
"bytes": "8732"
},
{
"name": "Shell",
"bytes": "85"
}
],
"symlink_target": ""
}
|
"""
Diagnostics for SUR and 3SLS estimation
"""
__author__= "Luc Anselin lanselin@gmail.com, \
Pedro V. Amaral pedrovma@gmail.com"
import numpy as np
import scipy.stats as stats
import numpy.linalg as la
from sur_utils import sur_dict2mat,sur_mat2dict,sur_corr
from regimes import buildR1var,wald_test
__all__ = ['sur_setp','sur_lrtest','sur_lmtest','lam_setp','surLMe']
def sur_setp(bigB,varb):
''' Utility to compute standard error, t and p-value
Parameters
----------
bigB : dictionary of regression coefficient estimates,
one vector by equation
varb : variance-covariance matrix of coefficients
Returns
-------
surinfdict : dictionary with standard error, t-value, and
p-value array, one for each equation
'''
vvb = varb.diagonal()
n_eq = len(bigB.keys())
bigK = np.zeros((n_eq,1),dtype=np.int_)
for r in range(n_eq):
bigK[r] = bigB[r].shape[0]
b = sur_dict2mat(bigB)
se = np.sqrt(vvb)
se.resize(len(se),1)
t = np.divide(b,se)
tp = stats.norm.sf(abs(t))*2
surinf = np.hstack((se,t,tp))
surinfdict = sur_mat2dict(surinf,bigK)
return surinfdict
def lam_setp(lam,vm):
"""Standard errors, t-test and p-value for lambda in SUR Error ML
Parameters
----------
lam : n_eq x 1 array with ML estimates for spatial error
autoregressive coefficient
vm : n_eq x n_eq subset of variance-covariance matrix for
lambda and Sigma in SUR Error ML
(needs to be subset from full vm)
Returns
-------
: tuple with arrays for standard error, t-value and p-value
(each element in the tuple is an n_eq x 1 array)
"""
vvb = vm.diagonal()
se = np.sqrt(vvb)
se.resize(len(se),1)
t = np.divide(lam,se)
tp = stats.norm.sf(abs(t))*2
return (se,t,tp)
def sur_lrtest(n,n_eq,ldetS0,ldetS1):
''' Likelihood Ratio test on off-diagonal elements of Sigma
Parameters
----------
n : cross-sectional dimension (number of observations for an equation)
n_eq : number of equations
ldetS0 : log determinant of Sigma for OLS case
ldetS1 : log determinant of Sigma for SUR case (should be iterated)
Returns
-------
(lrtest,M,pvalue) : tupel with value of test statistic (lrtest),
degrees of freedom (M, as an integer)
p-value
'''
M = n_eq * (n_eq - 1)/2.0
lrtest = n * (ldetS0 - ldetS1)
pvalue = stats.chi2.sf(lrtest,M)
return (lrtest,int(M),pvalue)
def sur_lmtest(n,n_eq,sig):
''' Lagrange Multiplier test on off-diagonal elements of Sigma
Parameters
----------
n : cross-sectional dimension (number of observations for an equation)
n_eq : number of equations
sig : inter-equation covariance matrix for null model (OLS)
Returns
-------
(lmtest,M,pvalue) : tupel with value of test statistic (lmtest),
degrees of freedom (M, as an integer)
p-value
'''
R = sur_corr(sig)
tr = np.trace(np.dot(R.T,R))
M = n_eq * (n_eq - 1)/2.0
lmtest = (n/2.0) * (tr - n_eq)
pvalue = stats.chi2.sf(lmtest,M)
return (lmtest,int(M),pvalue)
def surLMe(n_eq,WS,bigE,sig):
"""Lagrange Multiplier test on error spatial autocorrelation in SUR
Parameters
----------
n_eq : number of equations
WS : spatial weights matrix in sparse form
bigE : n x n_eq matrix of residuals by equation
sig : cross-equation error covariance matrix
Returns
-------
(LMe,n_eq,pvalue) : tupel with value of statistic (LMe), degrees
of freedom (n_eq) and p-value
"""
# spatially lagged residuals
WbigE = WS * bigE
# score
EWE = np.dot(bigE.T,WbigE)
sigi = la.inv(sig)
SEWE = sigi * EWE
score = SEWE.sum(axis=1)
score.resize(n_eq,1)
# trace terms
WW = WS * WS
trWW = np.sum(WW.diagonal())
WTW = WS.T * WS
trWtW = np.sum(WTW.diagonal())
# denominator
SiS = sigi * sig
Tii = trWW * np.identity(n_eq)
tSiS = trWtW * SiS
denom = Tii + tSiS
idenom = la.inv(denom)
# test statistic
LMe = np.dot(np.dot(score.T,idenom),score)[0][0]
pvalue = stats.chi2.sf(LMe,n_eq)
return (LMe,n_eq,pvalue)
def sur_chow(n_eq,bigK,bSUR,varb):
"""test on constancy of regression coefficients across equations in
a SUR specification
Note: requires a previous check on constancy of number of coefficients
across equations; no other checks are carried out, so it is possible
that the results are meaningless if the variables are not listed in
the same order in each equation.
Parameters
----------
n_eq : integer, number of equations
bigK : array with the number of variables by equation (includes constant)
bSUR : dictionary with the SUR regression coefficients by equation
varb : array with the variance-covariance matrix for the SUR regression
coefficients
Returns
-------
test : a list with for each coefficient (in order) a tuple with the
value of the test statistic, the degrees of freedom, and the
p-value
"""
kr = bigK[0][0]
test = []
bb = sur_dict2mat(bSUR)
kf = 0
nr = n_eq
df = n_eq - 1
for i in range(kr):
Ri = buildR1var(i,kr,kf,0,nr)
tt,p = wald_test(bb,Ri,np.zeros((df,1)),varb)
test.append((tt,df,p))
return test
def sur_joinrho(n_eq,bigK,bSUR,varb):
"""Test on joint significance of spatial autoregressive coefficient in SUR
Parameters
----------
n_eq : integer, number of equations
bigK : n_eq x 1 array with number of variables by equation
(includes constant term, exogenous and endogeneous and
spatial lag)
bSUR : dictionary with regression coefficients by equation, with
the spatial autoregressive term as last
varb : variance-covariance matrix for regression coefficients
Returns
-------
: tuple with test statistic, degrees of freedom, p-value
"""
bb = sur_dict2mat(bSUR)
R = np.zeros((n_eq,varb.shape[0]))
q = np.zeros((n_eq,1))
kc = -1
for i in range(n_eq):
kc = kc + bigK[i]
R[i,kc] = 1
w,p = wald_test(bb,R,q,varb)
return (w,n_eq,p)
|
{
"content_hash": "ae438b2250afa33976ca66e1a1cdafc9",
"timestamp": "",
"source": "github",
"line_count": 221,
"max_line_length": 86,
"avg_line_length": 30.65158371040724,
"alnum_prop": 0.5738116327133156,
"repo_name": "TaylorOshan/pysal",
"id": "24256419bbfc389ac73d0d209c2aea8ff45bd524",
"size": "6774",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "pysal/spreg/diagnostics_sur.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "8941"
},
{
"name": "Jupyter Notebook",
"bytes": "51899689"
},
{
"name": "Makefile",
"bytes": "463"
},
{
"name": "Python",
"bytes": "3471598"
},
{
"name": "Shell",
"bytes": "356"
}
],
"symlink_target": ""
}
|
import roslib; roslib.load_manifest('face_detection')
import rospy
import sys
import cv
from cv_bridge import CvBridge
from sensor_msgs.msg import Image
from geometry_msgs.msg import Point
from geometry_msgs.msg import PointStamped
#
# Instantiate a new opencv to ROS bridge adaptor
#
cv_bridge = CvBridge()
#
# Define the callback that will be called when a new image is received.
#
def callback(publisher, coord_publisher, cascade, imagemsg):
#
# Convert the ROS imagemsg to an opencv image.
#
image = cv_bridge.imgmsg_to_cv(imagemsg, 'mono8')
#
# Blur the image.
#
cv.Smooth(image, image, cv.CV_GAUSSIAN)
#
# Allocate some storage for the haar detect operation.
#
storage = cv.CreateMemStorage(0)
#
# Call the face detector function.
#
faces = cv.HaarDetectObjects(image, cascade, storage, 1.2, 2,
cv.CV_HAAR_DO_CANNY_PRUNING, (100,100))
#
# If faces are detected, compute the centroid of all the faces
# combined.
#
face_centroid_x = 0.0
face_centroid_y = 0.0
if len(faces) > 0:
#
# For each face, draw a rectangle around it in the image,
# and also add the position of the face to the centroid
# of all faces combined.
#
for (i, n) in faces:
x = int(i[0])
y = int(i[1])
width = int(i[2])
height = int(i[3])
cv.Rectangle(image,
(x, y),
(x + width, y + height),
cv.CV_RGB(0,255,0), 3, 8, 0)
face_centroid_x += float(x) + (float(width) / 2.0)
face_centroid_y += float(y) + (float(height) / 2.0)
#
# Finish computing the face_centroid by dividing by the
# number of faces found above.
#
face_centroid_x /= float(len(faces))
face_centroid_y /= float(len(faces))
#
# Lastly, if faces were detected, publish a PointStamped
# message that contains the centroid values.
#
pt = Point(x = face_centroid_x, y = face_centroid_y, z = 0.0)
pt_stamped = PointStamped(point = pt)
coord_publisher.publish(pt_stamped)
#
# Convert the opencv image back to a ROS image using the
# cv_bridge.
#
newmsg = cv_bridge.cv_to_imgmsg(image, 'mono8')
#
# Republish the image. Note this image has boxes around
# faces if faces were found.
#
publisher.publish(newmsg)
def listener(publisher, coord_publisher):
rospy.init_node('face_detector', anonymous=True)
#
# Load the haar cascade. Note we get the
# filename from the "classifier" parameter
# that is configured in the launch script.
#
cascadeFileName = rospy.get_param("~classifier")
cascade = cv.Load(cascadeFileName)
rospy.Subscriber("/stereo/left/image_rect",
Image,
lambda image: callback(publisher, coord_publisher, cascade, image))
rospy.spin()
# This is called first.
if __name__ == '__main__':
publisher = rospy.Publisher('face_view', Image)
coord_publisher = rospy.Publisher('face_coords', PointStamped)
listener(publisher, coord_publisher)
|
{
"content_hash": "fd45236d38e6243a996676d6303ec023",
"timestamp": "",
"source": "github",
"line_count": 108,
"max_line_length": 88,
"avg_line_length": 30.47222222222222,
"alnum_prop": 0.5907019143117593,
"repo_name": "andrewtron3000/hacdc-ros-pkg",
"id": "5f8ba451d50ace5b7d51af6e5d7e9c3a98b16d59",
"size": "5077",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "face_detection/src/detector.py",
"mode": "33261",
"license": "bsd-2-clause",
"language": [
{
"name": "CMake",
"bytes": "9775"
},
{
"name": "Makefile",
"bytes": "328"
},
{
"name": "Python",
"bytes": "28255"
}
],
"symlink_target": ""
}
|
class BenchmarkCase(object):
@classmethod
def setUpClass(cls):
pass
@classmethod
def tearDownClass(cls):
pass
def setUp(self):
pass
def tearDown(self):
pass
|
{
"content_hash": "d8a432cb4f5b9cabe80d538b2cf75ea3",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 28,
"avg_line_length": 15.428571428571429,
"alnum_prop": 0.5740740740740741,
"repo_name": "AlekSi/benchmarking-py",
"id": "803c5cc4c3f56279cab665cd903bddb3a12715e6",
"size": "216",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "benchmarking/case.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "39695"
}
],
"symlink_target": ""
}
|
from collections import defaultdict
import logging
from linkedin.mobster.utils import format_time
class NetworkEventHandler(object):
def __init__(self):
# request start times in seconds
self._request_start_times = {}
# the following dicts use request ids as keys
self._request_start_times = defaultdict(lambda: -1)
self._page_refs = defaultdict(lambda: '_')
self._server_ips = defaultdict(lambda: '_')
self._connection_ids = defaultdict(lambda: '_')
self._resource_timings = defaultdict(self._default_resource_timing_value)
self._requests = defaultdict(self._default_request_value)
self._responses = defaultdict(self._default_response_value)
self._caches = defaultdict(self._default_cache_value)
self._data_packet_notifications = defaultdict(list)
self._response_sizes = defaultdict(lambda: 0)
self._response_encoded_sizes = defaultdict(lambda: 0)
self.primary_page_id = 'page_1'
# --------------
# Initialization
# --------------
def _default_resource_timing_value(self):
"""
Create initial value of resource timings object. We default to -1, since in the HAR format it signifies
that the field was not applicable. This way, it is guaranteed that the output is valid even if the browser
does not provide certain information (e.g. requesting about:blank does not have a receive time).
"""
timing_metrics = ['blocked', 'dns', 'connect', 'send', 'wait', 'receive', 'ssl']
return dict(zip(timing_metrics, len(timing_metrics) * [-1]))
def _default_request_value(self):
return {
'method': '_',
'url': '_',
'httpVersion': '_',
'cookies': [],
'headers': [],
'queryString': [],
'headersSize': -1,
'bodySize': 0
}
def _default_response_value(self):
return {
'status': -1,
'statusText': '_',
'httpVersion': '_',
'cookies': [],
'headers': [],
'content': self._default_content_value(),
'redirectURL': '',
'headersSize': -1,
'bodySize': 0
}
def _default_cache_value(self):
return {
'beforeRequest': None,
'afterRequest': None
}
def _default_content_value(self):
return {
'size': 0,
'compression': '-99999',
'mimeType': '_',
'text': '_'
}
# ---------
# Accessors
# ---------
def get_first_request_time(self):
"""Returns the time, in seconds, when the first resource request is initiated."""
return min(self._request_start_times.values())
# -----------------------------
# HAR Resource Entry Generation
# -----------------------------
def _total_resource_time(self, req_id):
"""Returns the total time taken for the request corresponding to the given request ID"""
timings = self._resource_timings[req_id]
timing_values = timings.values()
non_zero_timings = filter(lambda x: x> 0, timing_values)
return int(sum(non_zero_timings))
def _make_entry(self, req_id):
"""Creates the HAR resource entry corresponding to the given request ID"""
return {
'pageref': self.primary_page_id,
'startedDateTime': format_time(self._request_start_times[req_id]),
'time': self._total_resource_time(req_id),
'request': self._requests[req_id],
'response': self._responses[req_id],
'cache': self._caches[req_id],
'timings': self._resource_timings[req_id],
'serverIPAddress': self._server_ips[req_id],
'connection': self._connection_ids[req_id]
}
def make_entry_list(self):
"""
Makes a list of HAR-formatted "entries", which correspond to resources requested by the page
"""
request_ids_sorted_by_time = sorted(self._request_start_times, key=self._request_start_times.get)
return [self._make_entry(req_id) for req_id in request_ids_sorted_by_time]
# --------------
# Event Handling
# --------------
def process_event(self, message):
# the 'method' field of the message object will have the format X.Y, where X is the domain and Y is the command
# e.g. 'Runtime.evaluate', 'Page.navigate', etc.
message_type = message['method'].split('.')[1]
{
'requestWillBeSent': self.process_request_will_be_sent,
'requestServedFromCache': self.process_request_served_from_cache,
'responseReceived': self.process_response_received,
'dataReceived': self.process_data_received,
'loadingFinished': self.process_loading_finished,
'requestServedFromMemoryCache': lambda m: None,
'loadingFailed': lambda m: logging.info("Received loadingFailed message: \n{0}".format(m))
}[message_type](message)
def parse_msg(self, msg):
"""Transforms json message into a tuple (params, request_id, frame_id, timestamp)"""
return (msg['params'], msg['params']['requestId'], msg['params']['frameId'], msg['params']['timestamp'])
def process_request_will_be_sent(self, message):
(params, request_id, frame_id, timestamp) = self.parse_msg(message)
headers = [{'name': key, 'value' : value} for key, value in params['request']['headers'].iteritems()]
self._requests[request_id]['headers'] = headers
self._requests[request_id]['method'] = params['request']['method']
self._requests[request_id]['url'] = params['request']['url']
# we do this just in case the ResponseReceived event does not include timings (e.g. about:blank)
self._request_start_times[request_id] = params['timestamp']
def process_request_served_from_cache(self, message):
logging.info('Received request served from cache message: \n{0}'.format(message))
def process_response_received(self, message):
(params, request_id, frame_id, timestamp) = self.parse_msg(message)
self._responses[request_id]['status'] = params['response']['status']
self._responses[request_id]['statusText'] = params['response']['statusText']
self._responses[request_id]['headersSize'] = -1
headers = [{'name': key, 'value': value} for key, value in params['response']['headers'].iteritems()]
self._responses[request_id]['headers'] = headers
# timings are not included for about: url's
if not self._requests[request_id]['url'].startswith('about:'):
if not 'timing' in params['response']:
log.info('No timing information in message')
return
provided_timings = params['response']['timing']
self._request_start_times[request_id] = provided_timings['requestTime']
self._resource_timings[request_id]['blocked'] = max(provided_timings['dnsStart'], 0)
self._resource_timings[request_id]['dns'] = \
self.calc_timing(provided_timings['dnsStart'], provided_timings['dnsEnd'])
self._resource_timings[request_id]['connect'] = \
self.calc_timing(provided_timings['connectStart'], provided_timings['connectEnd'])
self._resource_timings[request_id]['send'] = \
self.calc_timing(provided_timings['sendStart'], provided_timings['sendEnd'])
def process_data_received(self, message):
self._data_packet_notifications[message['params']['requestId']].append(message['params'])
def process_loading_finished(self, message):
request_id = message['params']['requestId']
self._data_packet_notifications[request_id].sort(key = lambda x: x['timestamp'])
send_end = self._request_start_times[request_id] * 1000 + max(self._resource_timings[request_id]['blocked'], 0) \
+ max(self._resource_timings[request_id]['dns'], 0) \
+ max(self._resource_timings[request_id]['connect'], 0) \
+ max(self._resource_timings[request_id]['send'], 0)
# don't bother recording timing/size info for 'about:XXXX' url's
if not self._requests[request_id]['url'].startswith('about:'):
data_packet_infos = self._data_packet_notifications[request_id]
if len(data_packet_infos):
first_data_timestamp = self._data_packet_notifications[request_id][0]['timestamp']
last_data_timestamp = self._data_packet_notifications[request_id][-1]['timestamp']
self._resource_timings[request_id]['wait'] = int(first_data_timestamp * 1000 - send_end)
self._resource_timings[request_id]['receive'] = int(last_data_timestamp * 1000 - first_data_timestamp * 1000)
else:
self._resource_timings[request_id]['wait'] = int(message['params']['timestamp'] * 1000 - send_end)
self._resource_timings[request_id]['receive'] = 0
for packet_info in data_packet_infos:
self._response_sizes[request_id] += packet_info['dataLength']
self._response_encoded_sizes[request_id] += packet_info['encodedDataLength']
self._responses[request_id]['bodySize'] = self._response_sizes[request_id]
self._responses[request_id]['content']['size'] = self._response_sizes[request_id]
self._responses[request_id]['content']['compression'] = self._response_sizes[request_id] - \
self._response_encoded_sizes[request_id]
def calc_timing(self, start, end):
"""
Returns the difference between start and end or -1 if the timing is not applicable to the current request (i.e.
start and end are -1)
"""
if start == -1 and end == -1:
return -1
else:
return int(end - start)
|
{
"content_hash": "c063b5f9876f1d2c83acd3bd6d6bf058",
"timestamp": "",
"source": "github",
"line_count": 235,
"max_line_length": 117,
"avg_line_length": 40.13191489361702,
"alnum_prop": 0.631322235181847,
"repo_name": "krishnakanthpps/mobster",
"id": "164d6128aaf2514a1c47235293834619d061d1b1",
"size": "9431",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/linkedin/mobster/har/network.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "4015"
},
{
"name": "JavaScript",
"bytes": "24914"
},
{
"name": "Python",
"bytes": "68450"
}
],
"symlink_target": ""
}
|
import unittest
import os
import random
import string
from qiniu import rs
from qiniu import conf
def r(length):
lib = string.ascii_uppercase
return ''.join([random.choice(lib) for i in range(0, length)])
conf.ACCESS_KEY = os.getenv("QINIU_ACCESS_KEY")
conf.SECRET_KEY = os.getenv("QINIU_SECRET_KEY")
key = 'QINIU_UNIT_TEST_PIC'
bucket_name = os.getenv("QINIU_TEST_BUCKET")
noexist_key = 'QINIU_UNIT_TEST_NOEXIST' + r(30)
key2 = "rs_demo_test_key_1_" + r(5)
key3 = "rs_demo_test_key_2_" + r(5)
key4 = "rs_demo_test_key_3_" + r(5)
class TestRs(unittest.TestCase):
def test_stat(self):
r = rs.Client()
ret, err = r.stat(bucket_name, key)
assert err is None
assert ret is not None
# error
_, err = r.stat(bucket_name, noexist_key)
assert err is not None
def test_delete_move_copy(self):
r = rs.Client()
r.delete(bucket_name, key2)
r.delete(bucket_name, key3)
ret, err = r.copy(bucket_name, key, bucket_name, key2)
assert err is None, err
ret, err = r.move(bucket_name, key2, bucket_name, key3)
assert err is None, err
ret, err = r.delete(bucket_name, key3)
assert err is None, err
# error
_, err = r.delete(bucket_name, key2)
assert err is not None
_, err = r.delete(bucket_name, key3)
assert err is not None
def test_batch_stat(self):
r = rs.Client()
entries = [
rs.EntryPath(bucket_name, key),
rs.EntryPath(bucket_name, key2),
]
ret, err = r.batch_stat(entries)
assert err is None
self.assertEqual(ret[0]["code"], 200)
self.assertEqual(ret[1]["code"], 612)
def test_batch_delete_move_copy(self):
r = rs.Client()
e1 = rs.EntryPath(bucket_name, key)
e2 = rs.EntryPath(bucket_name, key2)
e3 = rs.EntryPath(bucket_name, key3)
e4 = rs.EntryPath(bucket_name, key4)
r.batch_delete([e2, e3, e4])
# copy
entries = [
rs.EntryPathPair(e1, e2),
rs.EntryPathPair(e1, e3),
]
ret, err = r.batch_copy(entries)
assert err is None
self.assertEqual(ret[0]["code"], 200)
self.assertEqual(ret[1]["code"], 200)
ret, err = r.batch_move([rs.EntryPathPair(e2, e4)])
assert err is None
self.assertEqual(ret[0]["code"], 200)
ret, err = r.batch_delete([e3, e4])
assert err is None
self.assertEqual(ret[0]["code"], 200)
r.batch_delete([e2, e3, e4])
if __name__ == "__main__":
unittest.main()
|
{
"content_hash": "d799371a8e36343e492651e0912f8b3b",
"timestamp": "",
"source": "github",
"line_count": 97,
"max_line_length": 66,
"avg_line_length": 27.24742268041237,
"alnum_prop": 0.5720771850170261,
"repo_name": "jemygraw/qrsync-python",
"id": "32714ff526c9f4e1e3b0a4c707d50943a85c32b1",
"size": "2667",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "qiniu/rs/test/rs_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "60187"
}
],
"symlink_target": ""
}
|
"""SCons.Scanner.RC
This module implements the dependency scanner for RC (Interface
Definition Language) files.
"""
#
# Copyright (c) 2001 - 2019 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Scanner/RC.py bee7caf9defd6e108fc2998a2520ddb36a967691 2019-12-17 02:07:09 bdeegan"
import re
import SCons.Node.FS
import SCons.Scanner
def no_tlb(nodes):
"""
Filter out .tlb files as they are binary and shouldn't be scanned
"""
# print("Nodes:%s"%[str(n) for n in nodes])
return [n for n in nodes if str(n)[-4:] != '.tlb']
def RCScan():
"""Return a prototype Scanner instance for scanning RC source files"""
res_re= r'^(?:\s*#\s*(?:include)|' \
r'.*?\s+(?:ICON|BITMAP|CURSOR|HTML|FONT|MESSAGETABLE|TYPELIB|REGISTRY|D3DFX)' \
r'\s*.*?)' \
r'\s*(<|"| )([^>"\s]+)(?:[>"\s])*$'
resScanner = SCons.Scanner.ClassicCPP("ResourceScanner",
"$RCSUFFIXES",
"CPPPATH",
res_re,
recursive=no_tlb)
return resScanner
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
{
"content_hash": "1d041fea2b986f369396be31942a4f77",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 116,
"avg_line_length": 35.27272727272727,
"alnum_prop": 0.6563573883161512,
"repo_name": "kayhayen/Nuitka",
"id": "47c6ca26ec547425b6057fb47097556cd55464f4",
"size": "2328",
"binary": false,
"copies": "4",
"ref": "refs/heads/develop",
"path": "nuitka/build/inline_copy/lib/scons-3.1.2/SCons/Scanner/RC.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1868"
},
{
"name": "C",
"bytes": "617681"
},
{
"name": "C++",
"bytes": "149777"
},
{
"name": "Python",
"bytes": "6603718"
},
{
"name": "Shell",
"bytes": "1088"
}
],
"symlink_target": ""
}
|
import tensorflow as tf
import numpy as np
from preprocess import shuffle
from tqdm import tqdm as t
from ipdb import set_trace as stop
import matplotlib.pyplot as plt
import os
from IPython import embed
class Run_classifiers():
def __init__(self, data, sentembeddings, general_options):
self.data = data
self.sentembeddings = sentembeddings
self.general_options = general_options
self.labels = sentembeddings.labels_to_train.astype(int)
self.num_labels = self.data.num_labels
self.preprocess()
self.onehot()
embedding_size = self.sentembeddings.w2vopt.embedding_size
window = self.sentembeddings.genopt.window_size
iterations = self.sentembeddings.w2vopt.iterations
gensimoptstring = "EMSIZE="+str(embedding_size)+"-WINDOW="+str(window)+"-ITER="+str(iterations)
print("Classifier for BoW")
self.one_run("BoW-avg", self.sentembeddings.bowavg)
print("Classifier for Metric Learning")
self.one_run("Metric_Learning-avg"+str(self.sentembeddings.mlearnopt), self.sentembeddings.metricavg)
print("Classifier for Gensim W2V")
self.one_run("Gensim_W2V-avg"+gensimoptstring, self.sentembeddings.GensimW2Vavg)
print("Classifier for My W2V")
self.one_run("My_W2V-avg"+str(self.sentembeddings.w2vopt), self.sentembeddings.MyW2Vavg)
def preprocess(self):
#Create directory for classification, if necessary
if not os.path.exists(self.data.directory+"Classifiers"):
os.makedirs(self.data.directory+"Classifiers")
self.directory = self.data.directory+"Classifiers/"
if not os.path.exists(self.directory+str(self.general_options)):
os.makedirs(self.directory+str(self.general_options))
self.directory = self.directory+str(self.general_options)+"/"
def onehot(self):
""" One-hot the labels """
y = self.labels
encoded = []
for i in y:
vec = np.zeros(self.num_labels)
vec[i] = 1
encoded.append(vec)
self.Y = np.array(encoded)
def one_run(self, name, embeddings):
""" Check whether the results of the classifier exist and decide to read or train """
if os.path.exists(self.directory+name) and os.path.isfile(self.directory+name+"/W.npy"):
return
os.makedirs(self.directory+name)
model = Classifier(sentembed = embeddings, encoded_labels = self.Y, num_labels = self.num_labels,
directory = self.directory+name+"/", options = self.general_options)
np.save(self.directory+name+"/W", model.W_final)
np.save(self.directory+name+"/b", model.b_final)
return
class Classifier():
""" Run a logistic regression classifier for one of the embeddings """
def __init__(self,sentembed,encoded_labels,num_labels,directory,options):
self.X = sentembed
self.Y = encoded_labels
self.opt = options
self.num_labels = num_labels
self.directory = directory
print("Creating graph")
self.create_graph()
print("Training classifier")
self.train()
def create_graph(self):
""" Create the graph of a logistic regression classifier """
self.nsamples, self.dim = self.X.shape
self.num_classes = self.num_labels
self.x = tf.placeholder(tf.float32, [None, self.dim], name = "x")
self.y = tf.placeholder(tf.float32, [None, self.num_classes], name = "y")
self.W = tf.Variable(tf.random_normal([self.dim, self.num_classes]))
self.b = tf.Variable(tf.random_normal([self.num_classes]))
self.aux = tf.matmul(self.x,self.W)+self.b
self.yhat = tf.nn.softmax(self.aux, dim = 0)
self.cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels = self.y, logits = self.aux))
self.optimizer = tf.train.GradientDescentOptimizer(self.opt.learning_rate).minimize(self.cost)
self.correct_prediction = tf.equal(tf.argmax(self.yhat, 1), tf.argmax(self.y, 1))
self.accuracy = tf.reduce_mean(tf.cast(self.correct_prediction, dtype = tf.float32))
def train(self):
""" Train the model. Inputs: self.X, self.Y"""
with tf.Session() as sess:
X_train, y_train = self.X, self.Y
init = tf.global_variables_initializer()
sess.run(init)
costs = []
accuracy = []
for epoch in t(range(self.opt.training_epochs)):
X_train, y_train = shuffle(X_train, y_train)
avg_cost = 0
avg_acc = 0
number_batches = int(self.nsamples/self.opt.batch_size)
for i in range(number_batches):
batch_x = X_train[i*self.opt.batch_size:(i+1)*self.opt.batch_size]
batch_y = y_train[i*self.opt.batch_size:(i+1)*self.opt.batch_size]
c, _, acc = sess.run([self.cost, self.optimizer, self.accuracy], feed_dict = {self.x: batch_x, self.y: batch_y})
avg_cost += c/number_batches
avg_acc += acc/number_batches
if epoch % self.opt.display_step == 0:
print("Epoch {0}, cost {1:.5f}, average training accuracy {2:.5f}".format(str(epoch+1).zfill(4), avg_cost, avg_acc))
costs += [avg_cost]
accuracy += [avg_acc]
plt.plot(costs)
plt.title("Cost")
plt.savefig(self.directory+"costs.png")
plt.close()
plt.plot(accuracy)
plt.title("Accuracy (train set)")
plt.savefig(self.directory+"acc.png")
plt.close()
self.W_final = sess.run(self.W)
self.b_final = sess.run(self.b)
|
{
"content_hash": "b90b346b6cde3343896d9dd4686f94f9",
"timestamp": "",
"source": "github",
"line_count": 134,
"max_line_length": 136,
"avg_line_length": 43.634328358208954,
"alnum_prop": 0.6066358816487087,
"repo_name": "PauBatlle/emb4class",
"id": "99b4f6ee0ea0cfd20200cc5faa47536491951b00",
"size": "5847",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "emb4class/classifier.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "60080"
}
],
"symlink_target": ""
}
|
import os
import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinxcontrib.spelling',
]
spelling_word_list_filename = [
'spelling_wordlist.txt',
]
spelling_show_suggestions = True
spelling_ignore_pypi_package_names = True
spelling_ignore_contributor_names = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'sphinxcontrib.spelling'
copyright = '2011, Doug Hellmann'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.4'
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
language = 'en'
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'sphinxcontribspellingdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'sphinxcontribspelling.tex', 'sphinxcontrib.spelling Documentation',
'Doug Hellmann', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'sphinxcontribspelling', 'sphinxcontrib.spelling Documentation',
['Doug Hellmann'], 1)
]
|
{
"content_hash": "804d2ee53d7a25569f0013be9ed7a718",
"timestamp": "",
"source": "github",
"line_count": 214,
"max_line_length": 80,
"avg_line_length": 32.01401869158879,
"alnum_prop": 0.7102612757261714,
"repo_name": "sphinx-contrib/spelling",
"id": "83ed97e0a64454dfdcedce913d567523de1fdf9f",
"size": "7257",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs/source/conf.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Makefile",
"bytes": "965"
},
{
"name": "Python",
"bytes": "49739"
},
{
"name": "Shell",
"bytes": "844"
}
],
"symlink_target": ""
}
|
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class RetrieveInvoiceItem(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the RetrieveInvoiceItem Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(RetrieveInvoiceItem, self).__init__(temboo_session, '/Library/Stripe/InvoiceItems/RetrieveInvoiceItem')
def new_input_set(self):
return RetrieveInvoiceItemInputSet()
def _make_result_set(self, result, path):
return RetrieveInvoiceItemResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return RetrieveInvoiceItemChoreographyExecution(session, exec_id, path)
class RetrieveInvoiceItemInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the RetrieveInvoiceItem
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_APIKey(self, value):
"""
Set the value of the APIKey input for this Choreo. ((required, string) The secret API Key provided by Stripe)
"""
super(RetrieveInvoiceItemInputSet, self)._set_input('APIKey', value)
def set_InvoiceItemID(self, value):
"""
Set the value of the InvoiceItemID input for this Choreo. ((required, string) The unique identifier of the invoice item you want to retrieve)
"""
super(RetrieveInvoiceItemInputSet, self)._set_input('InvoiceItemID', value)
class RetrieveInvoiceItemResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the RetrieveInvoiceItem Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. ((json) The response from Stripe)
"""
return self._output.get('Response', None)
class RetrieveInvoiceItemChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return RetrieveInvoiceItemResultSet(response, path)
|
{
"content_hash": "59efbc8a42cdcbe86076266b8bbafd38",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 149,
"avg_line_length": 39.81967213114754,
"alnum_prop": 0.7179909427748045,
"repo_name": "jordanemedlock/psychtruths",
"id": "cf513699921c1e720282e93043df31bed68448b8",
"size": "3298",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "temboo/Library/Stripe/InvoiceItems/RetrieveInvoiceItem.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "18544"
},
{
"name": "HTML",
"bytes": "34650"
},
{
"name": "JavaScript",
"bytes": "423"
},
{
"name": "PHP",
"bytes": "1097"
},
{
"name": "Python",
"bytes": "23444578"
}
],
"symlink_target": ""
}
|
"""
Copyright 2016 George Herde
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from os import urandom
from flask import Flask, request, redirect, url_for, render_template, session, flash
import BackEnd as BackEnd
app = Flask(__name__)
app.secret_key = urandom(24)
@app.route('/', methods=['GET'])
def show_home():
BackEnd.init()
session['init_complete'] = True
session['static_data'] = ""
return render_template('home.html')
@app.route('/search', methods=['GET', 'POST'])
def search_summoner():
if not session.get('init_complete'):
BackEnd.init()
session['init_complete'] = True
if request.method == 'POST':
if request.form['summoner_name'] == "":
return render_template('search.html')
session['summoner_name'] = request.form['summoner_name']
session['if_new'] = BackEnd.insert_summoner_controller(session['summoner_name'])
if session['if_new']:
failed = BackEnd.generate_mastery_controller(session['summoner_name'])
for champion in failed:
flash(champion + " failed to update, please retry")
return redirect(url_for('show_mastery', summoner_name=session['summoner_name']))
# show the form, it wasn't submitted
return render_template('search.html')
@app.route('/mastery/<summoner_name>', methods=['GET', 'POST'])
def show_mastery(summoner_name):
if not session.get('init_complete'):
BackEnd.init()
session['init_complete'] = True
session['summoner_name'] = summoner_name
if request.method == 'POST':
BackEnd.generate_mastery_controller(summoner_name)
flash('Mastery Updated')
return redirect(url_for('show_mastery', summoner_name=session['summoner_name']))
session['mastery_data'], summoner = BackEnd.select_summoner_champion_mastery_controller(summoner_name)
session['static_data'] = BackEnd.static_data_controller()
session['icon_url'] = 'http://ddragon.leagueoflegends.com/cdn/' + str(
session['static_data']['version']) + '/img/profileicon/' + str(summoner.icon) + '.png'
return render_template('mastery.html', session=session)
@app.route('/champion/<champion_key>', methods=['GET', 'POST'])
def show_detail(champion_key):
if not session.get('init_complete'):
BackEnd.init()
session['init_complete'] = True
session['champion_data'] = BackEnd.select_champion(champion_key=champion_key)
return render_template('mastery_detail.html', session=session)
if __name__ == '__main__':
app.debug = True
app.run()
|
{
"content_hash": "fba956cb2c54aead1445c07fe4ad123f",
"timestamp": "",
"source": "github",
"line_count": 80,
"max_line_length": 106,
"avg_line_length": 38.1375,
"alnum_prop": 0.6778105539167486,
"repo_name": "blackpan2/LoLMastery",
"id": "0c458f26caa6c7bce55321d44b8e7288a44f33d8",
"size": "3051",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "WebMastery.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "958"
},
{
"name": "HTML",
"bytes": "13893"
},
{
"name": "JavaScript",
"bytes": "4813"
},
{
"name": "Python",
"bytes": "14080"
}
],
"symlink_target": ""
}
|
"""Basic neural network layers."""
__all__ = ['Sequential', 'HybridSequential', 'Dense', 'Dropout', 'Embedding',
'BatchNorm', 'InstanceNorm', 'LayerNorm', 'Flatten', 'Lambda', 'HybridLambda']
import warnings
import numpy as np
from .activations import Activation
from ..block import Block, HybridBlock
from ..utils import _indent
from ... import nd, sym
class Sequential(Block):
"""Stacks Blocks sequentially.
Example::
net = nn.Sequential()
# use net's name_scope to give child Blocks appropriate names.
with net.name_scope():
net.add(nn.Dense(10, activation='relu'))
net.add(nn.Dense(20))
"""
def __init__(self, prefix=None, params=None):
super(Sequential, self).__init__(prefix=prefix, params=params)
def add(self, *blocks):
"""Adds block on top of the stack."""
for block in blocks:
self.register_child(block)
def forward(self, x):
for block in self._children.values():
x = block(x)
return x
def __repr__(self):
s = '{name}(\n{modstr}\n)'
modstr = '\n'.join([' ({key}): {block}'.format(key=key,
block=_indent(block.__repr__(), 2))
for key, block in self._children.items()])
return s.format(name=self.__class__.__name__,
modstr=modstr)
def __getitem__(self, key):
layers = list(self._children.values())[key]
if isinstance(layers, list):
net = type(self)(prefix=self._prefix)
with net.name_scope():
net.add(*layers)
return net
else:
return layers
def __len__(self):
return len(self._children)
def hybridize(self, active=True, **kwargs):
"""Activates or deactivates `HybridBlock`s recursively. Has no effect on
non-hybrid children.
Parameters
----------
active : bool, default True
Whether to turn hybrid on or off.
**kwargs : string
Additional flags for hybridized operator.
"""
if self._children and all(isinstance(c, HybridBlock) for c in self._children.values()):
warnings.warn(
"All children of this Sequential layer '%s' are HybridBlocks. Consider "
"using HybridSequential for the best performance."%self.prefix, stacklevel=2)
super(Sequential, self).hybridize(active, **kwargs)
class HybridSequential(HybridBlock):
"""Stacks HybridBlocks sequentially.
Example::
net = nn.HybridSequential()
# use net's name_scope to give child Blocks appropriate names.
with net.name_scope():
net.add(nn.Dense(10, activation='relu'))
net.add(nn.Dense(20))
net.hybridize()
"""
def __init__(self, prefix=None, params=None):
super(HybridSequential, self).__init__(prefix=prefix, params=params)
def add(self, *blocks):
"""Adds block on top of the stack."""
for block in blocks:
self.register_child(block)
def hybrid_forward(self, F, x):
for block in self._children.values():
x = block(x)
return x
def __repr__(self):
s = '{name}(\n{modstr}\n)'
modstr = '\n'.join([' ({key}): {block}'.format(key=key,
block=_indent(block.__repr__(), 2))
for key, block in self._children.items()])
return s.format(name=self.__class__.__name__,
modstr=modstr)
def __getitem__(self, key):
layers = list(self._children.values())[key]
if isinstance(layers, list):
net = type(self)(prefix=self._prefix)
with net.name_scope():
net.add(*layers)
return net
else:
return layers
def __len__(self):
return len(self._children)
class Dense(HybridBlock):
r"""Just your regular densely-connected NN layer.
`Dense` implements the operation:
`output = activation(dot(input, weight) + bias)`
where `activation` is the element-wise activation function
passed as the `activation` argument, `weight` is a weights matrix
created by the layer, and `bias` is a bias vector created by the layer
(only applicable if `use_bias` is `True`).
Note: the input must be a tensor with rank 2. Use `flatten` to convert it
to rank 2 manually if necessary.
Parameters
----------
units : int
Dimensionality of the output space.
activation : str
Activation function to use. See help on `Activation` layer.
If you don't specify anything, no activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias : bool
Whether the layer uses a bias vector.
flatten: bool
Whether the input tensor should be flattened.
If true, all but the first axis of input data are collapsed together.
If false, all but the last axis of input data are kept the same, and the transformation
applies on the last axis.
dtype : str or np.dtype, default 'float32'
Data type of output embeddings.
weight_initializer : str or `Initializer`
Initializer for the `kernel` weights matrix.
bias_initializer: str or `Initializer`
Initializer for the bias vector.
in_units : int, optional
Size of the input data. If not specified, initialization will be
deferred to the first time `forward` is called and `in_units`
will be inferred from the shape of input data.
prefix : str or None
See document of `Block`.
params : ParameterDict or None
See document of `Block`.
Inputs:
- **data**: if `flatten` is True, `data` should be a tensor with shape
`(batch_size, x1, x2, ..., xn)`, where x1 * x2 * ... * xn is equal to
`in_units`. If `flatten` is False, `data` should have shape
`(x1, x2, ..., xn, in_units)`.
Outputs:
- **out**: if `flatten` is True, `out` will be a tensor with shape
`(batch_size, units)`. If `flatten` is False, `out` will have shape
`(x1, x2, ..., xn, units)`.
"""
def __init__(self, units, activation=None, use_bias=True, flatten=True,
dtype='float32', weight_initializer=None, bias_initializer='zeros',
in_units=0, **kwargs):
super(Dense, self).__init__(**kwargs)
self._flatten = flatten
with self.name_scope():
self._units = units
self._in_units = in_units
self.weight = self.params.get('weight', shape=(units, in_units),
init=weight_initializer, dtype=dtype,
allow_deferred_init=True)
if use_bias:
self.bias = self.params.get('bias', shape=(units,),
init=bias_initializer, dtype=dtype,
allow_deferred_init=True)
else:
self.bias = None
if activation is not None:
self.act = Activation(activation, prefix=activation+'_')
else:
self.act = None
def hybrid_forward(self, F, x, weight, bias=None):
act = F.FullyConnected(x, weight, bias, no_bias=bias is None, num_hidden=self._units,
flatten=self._flatten, name='fwd')
if self.act is not None:
act = self.act(act)
return act
def __repr__(self):
s = '{name}({layout}, {act})'
shape = self.weight.shape
return s.format(name=self.__class__.__name__,
act=self.act if self.act else 'linear',
layout='{0} -> {1}'.format(shape[1] if shape[1] else None, shape[0]))
class Dropout(HybridBlock):
"""Applies Dropout to the input.
Dropout consists in randomly setting a fraction `rate` of input units
to 0 at each update during training time, which helps prevent overfitting.
Parameters
----------
rate : float
Fraction of the input units to drop. Must be a number between 0 and 1.
axes : tuple of int, default ()
The axes on which dropout mask is shared. If empty, regular dropout is applied.
Inputs:
- **data**: input tensor with arbitrary shape.
Outputs:
- **out**: output tensor with the same shape as `data`.
References
----------
`Dropout: A Simple Way to Prevent Neural Networks from Overfitting
<http://www.cs.toronto.edu/~rsalakhu/papers/srivastava14a.pdf>`_
"""
def __init__(self, rate, axes=(), **kwargs):
super(Dropout, self).__init__(**kwargs)
self._rate = rate
self._axes = axes
def hybrid_forward(self, F, x):
return F.Dropout(x, p=self._rate, axes=self._axes, name='fwd')
def __repr__(self):
s = '{name}(p = {_rate}, axes={_axes})'
return s.format(name=self.__class__.__name__,
**self.__dict__)
class BatchNorm(HybridBlock):
"""Batch normalization layer (Ioffe and Szegedy, 2014).
Normalizes the input at each batch, i.e. applies a transformation
that maintains the mean activation close to 0 and the activation
standard deviation close to 1.
Parameters
----------
axis : int, default 1
The axis that should be normalized. This is typically the channels
(C) axis. For instance, after a `Conv2D` layer with `layout='NCHW'`,
set `axis=1` in `BatchNorm`. If `layout='NHWC'`, then set `axis=3`.
momentum: float, default 0.9
Momentum for the moving average.
epsilon: float, default 1e-5
Small float added to variance to avoid dividing by zero.
center: bool, default True
If True, add offset of `beta` to normalized tensor.
If False, `beta` is ignored.
scale: bool, default True
If True, multiply by `gamma`. If False, `gamma` is not used.
When the next layer is linear (also e.g. `nn.relu`),
this can be disabled since the scaling
will be done by the next layer.
use_global_stats: bool, default False
If True, use global moving statistics instead of local batch-norm. This will force
change batch-norm into a scale shift operator.
If False, use local batch-norm.
beta_initializer: str or `Initializer`, default 'zeros'
Initializer for the beta weight.
gamma_initializer: str or `Initializer`, default 'ones'
Initializer for the gamma weight.
moving_mean_initializer: str or `Initializer`, default 'zeros'
Initializer for the moving mean.
moving_variance_initializer: str or `Initializer`, default 'ones'
Initializer for the moving variance.
in_channels : int, default 0
Number of channels (feature maps) in input data. If not specified,
initialization will be deferred to the first time `forward` is called
and `in_channels` will be inferred from the shape of input data.
Inputs:
- **data**: input tensor with arbitrary shape.
Outputs:
- **out**: output tensor with the same shape as `data`.
"""
def __init__(self, axis=1, momentum=0.9, epsilon=1e-5, center=True, scale=True,
use_global_stats=False, beta_initializer='zeros', gamma_initializer='ones',
running_mean_initializer='zeros', running_variance_initializer='ones',
in_channels=0, **kwargs):
super(BatchNorm, self).__init__(**kwargs)
self._kwargs = {'axis': axis, 'eps': epsilon, 'momentum': momentum,
'fix_gamma': not scale, 'use_global_stats': use_global_stats}
if in_channels != 0:
self.in_channels = in_channels
self.gamma = self.params.get('gamma', grad_req='write' if scale else 'null',
shape=(in_channels,), init=gamma_initializer,
allow_deferred_init=True,
differentiable=scale)
self.beta = self.params.get('beta', grad_req='write' if center else 'null',
shape=(in_channels,), init=beta_initializer,
allow_deferred_init=True,
differentiable=center)
self.running_mean = self.params.get('running_mean', grad_req='null',
shape=(in_channels,),
init=running_mean_initializer,
allow_deferred_init=True,
differentiable=False)
self.running_var = self.params.get('running_var', grad_req='null',
shape=(in_channels,),
init=running_variance_initializer,
allow_deferred_init=True,
differentiable=False)
def cast(self, dtype):
if np.dtype(dtype).name == 'float16':
dtype = 'float32'
super(BatchNorm, self).cast(dtype)
def hybrid_forward(self, F, x, gamma, beta, running_mean, running_var):
return F.BatchNorm(x, gamma, beta, running_mean, running_var,
name='fwd', **self._kwargs)
def __repr__(self):
s = '{name}({content}'
in_channels = self.gamma.shape[0]
s += ', in_channels={0}'.format(in_channels if in_channels else None)
s += ')'
return s.format(name=self.__class__.__name__,
content=', '.join(['='.join([k, v.__repr__()])
for k, v in self._kwargs.items()]))
class Embedding(HybridBlock):
r"""Turns non-negative integers (indexes/tokens) into dense vectors
of fixed size. eg. [4, 20] -> [[0.25, 0.1], [0.6, -0.2]]
Parameters
----------
input_dim : int
Size of the vocabulary, i.e. maximum integer index + 1.
output_dim : int
Dimension of the dense embedding.
dtype : str or np.dtype, default 'float32'
Data type of output embeddings.
weight_initializer : Initializer
Initializer for the `embeddings` matrix.
Inputs:
- **data**: (N-1)-D tensor with shape: `(x1, x2, ..., xN-1)`.
Output:
- **out**: N-D tensor with shape: `(x1, x2, ..., xN-1, output_dim)`.
"""
def __init__(self, input_dim, output_dim, dtype='float32',
weight_initializer=None, **kwargs):
super(Embedding, self).__init__(**kwargs)
self._kwargs = {'input_dim': input_dim, 'output_dim': output_dim,
'dtype': dtype}
self.weight = self.params.get('weight', shape=(input_dim, output_dim),
init=weight_initializer, dtype=dtype,
allow_deferred_init=True)
def hybrid_forward(self, F, x, weight):
return F.Embedding(x, weight, name='fwd', **self._kwargs)
def __repr__(self):
s = '{block_name}({input_dim} -> {output_dim}, {dtype})'
return s.format(block_name=self.__class__.__name__,
**self._kwargs)
class Flatten(HybridBlock):
r"""Flattens the input to two dimensional.
Inputs:
- **data**: input tensor with arbitrary shape `(N, x1, x2, ..., xn)`
Output:
- **out**: 2D tensor with shape: `(N, x1 \cdot x2 \cdot ... \cdot xn)`
"""
def __init__(self, **kwargs):
super(Flatten, self).__init__(**kwargs)
def hybrid_forward(self, F, x):
return x.reshape((0, -1))
def __repr__(self):
return self.__class__.__name__
class InstanceNorm(HybridBlock):
r"""
Applies instance normalization to the n-dimensional input array.
This operator takes an n-dimensional input array where (n>2) and normalizes
the input using the following formula:
.. math::
\bar{C} = \{i \mid i \neq 0, i \neq axis\}
out = \frac{x - mean[data, \bar{C}]}{ \sqrt{Var[data, \bar{C}]} + \epsilon}
* gamma + beta
Parameters
----------
axis : int, default 1
The axis that will be excluded in the normalization process. This is typically the channels
(C) axis. For instance, after a `Conv2D` layer with `layout='NCHW'`,
set `axis=1` in `InstanceNorm`. If `layout='NHWC'`, then set `axis=3`. Data will be
normalized along axes excluding the first axis and the axis given.
epsilon: float, default 1e-5
Small float added to variance to avoid dividing by zero.
center: bool, default True
If True, add offset of `beta` to normalized tensor.
If False, `beta` is ignored.
scale: bool, default True
If True, multiply by `gamma`. If False, `gamma` is not used.
When the next layer is linear (also e.g. `nn.relu`),
this can be disabled since the scaling
will be done by the next layer.
beta_initializer: str or `Initializer`, default 'zeros'
Initializer for the beta weight.
gamma_initializer: str or `Initializer`, default 'ones'
Initializer for the gamma weight.
in_channels : int, default 0
Number of channels (feature maps) in input data. If not specified,
initialization will be deferred to the first time `forward` is called
and `in_channels` will be inferred from the shape of input data.
Inputs:
- **data**: input tensor with arbitrary shape.
Outputs:
- **out**: output tensor with the same shape as `data`.
References
----------
`Instance Normalization: The Missing Ingredient for Fast Stylization
<https://arxiv.org/abs/1607.08022>`_
Examples
--------
>>> # Input of shape (2,1,2)
>>> x = mx.nd.array([[[ 1.1, 2.2]],
... [[ 3.3, 4.4]]])
>>> # Instance normalization is calculated with the above formula
>>> layer = InstanceNorm()
>>> layer.initialize(ctx=mx.cpu(0))
>>> layer(x)
[[[-0.99998355 0.99998331]]
[[-0.99998319 0.99998361]]]
<NDArray 2x1x2 @cpu(0)>
"""
def __init__(self, axis=1, epsilon=1e-5, center=True, scale=False,
beta_initializer='zeros', gamma_initializer='ones',
in_channels=0, **kwargs):
super(InstanceNorm, self).__init__(**kwargs)
self._kwargs = {'eps': epsilon, 'axis': axis, 'center': center, 'scale': scale}
self._axis = axis
self._epsilon = epsilon
self.gamma = self.params.get('gamma', grad_req='write' if scale else 'null',
shape=(in_channels,), init=gamma_initializer,
allow_deferred_init=True)
self.beta = self.params.get('beta', grad_req='write' if center else 'null',
shape=(in_channels,), init=beta_initializer,
allow_deferred_init=True)
def hybrid_forward(self, F, x, gamma, beta):
if self._axis == 1:
return F.InstanceNorm(x, gamma, beta,
name='fwd', eps=self._epsilon)
x = x.swapaxes(1, self._axis)
return F.InstanceNorm(x, gamma, beta, name='fwd',
eps=self._epsilon).swapaxes(1, self._axis)
def __repr__(self):
s = '{name}({content}'
in_channels = self.gamma.shape[0]
s += ', in_channels={0}'.format(in_channels)
s += ')'
return s.format(name=self.__class__.__name__,
content=', '.join(['='.join([k, v.__repr__()])
for k, v in self._kwargs.items()]))
class LayerNorm(HybridBlock):
r"""
Applies layer normalization to the n-dimensional input array.
This operator takes an n-dimensional input array and normalizes
the input using the given axis:
.. math::
out = \frac{x - mean[data, axis]}{ \sqrt{Var[data, axis]} + \epsilon} * gamma + beta
Parameters
----------
axis : int, default -1
The axis that should be normalized. This is typically the axis of the channels.
epsilon: float, default 1e-5
Small float added to variance to avoid dividing by zero.
center: bool, default True
If True, add offset of `beta` to normalized tensor.
If False, `beta` is ignored.
scale: bool, default True
If True, multiply by `gamma`. If False, `gamma` is not used.
beta_initializer: str or `Initializer`, default 'zeros'
Initializer for the beta weight.
gamma_initializer: str or `Initializer`, default 'ones'
Initializer for the gamma weight.
in_channels : int, default 0
Number of channels (feature maps) in input data. If not specified,
initialization will be deferred to the first time `forward` is called
and `in_channels` will be inferred from the shape of input data.
Inputs:
- **data**: input tensor with arbitrary shape.
Outputs:
- **out**: output tensor with the same shape as `data`.
References
----------
`Layer Normalization
<https://arxiv.org/pdf/1607.06450.pdf>`_
Examples
--------
>>> # Input of shape (2, 5)
>>> x = mx.nd.array([[1, 2, 3, 4, 5], [1, 1, 2, 2, 2]])
>>> # Layer normalization is calculated with the above formula
>>> layer = LayerNorm()
>>> layer.initialize(ctx=mx.cpu(0))
>>> layer(x)
[[-1.41421 -0.707105 0. 0.707105 1.41421 ]
[-1.2247195 -1.2247195 0.81647956 0.81647956 0.81647956]]
<NDArray 2x5 @cpu(0)>
"""
def __init__(self, axis=-1, epsilon=1e-5, center=True, scale=True,
beta_initializer='zeros', gamma_initializer='ones',
in_channels=0, prefix=None, params=None):
super(LayerNorm, self).__init__(prefix=prefix, params=params)
self._kwargs = {'eps': epsilon, 'axis': axis, 'center': center, 'scale': scale}
self._axis = axis
self._epsilon = epsilon
self._center = center
self._scale = scale
self.gamma = self.params.get('gamma', grad_req='write' if scale else 'null',
shape=(in_channels,), init=gamma_initializer,
allow_deferred_init=True)
self.beta = self.params.get('beta', grad_req='write' if center else 'null',
shape=(in_channels,), init=beta_initializer,
allow_deferred_init=True)
def hybrid_forward(self, F, data, gamma, beta):
norm_data = F.LayerNorm(data, gamma=gamma, beta=beta, axis=self._axis, eps=self._epsilon)
return norm_data
def __repr__(self):
s = '{name}({content}'
in_channels = self.gamma.shape[0]
s += ', in_channels={0}'.format(in_channels)
s += ')'
return s.format(name=self.__class__.__name__,
content=', '.join(['='.join([k, v.__repr__()])
for k, v in self._kwargs.items()]))
class Lambda(Block):
r"""Wraps an operator or an expression as a Block object.
Parameters
----------
function : str or function
Function used in lambda must be one of the following:
1) the name of an operator that is available in ndarray. For example::
block = Lambda('tanh')
2) a function that conforms to "def function(*args)". For example::
block = Lambda(lambda x: nd.LeakyReLU(x, slope=0.1))
Inputs:
- ** *args **: one or more input data. Their shapes depend on the function.
Output:
- ** *outputs **: one or more output data. Their shapes depend on the function.
"""
def __init__(self, function, prefix=None):
super(Lambda, self).__init__(prefix=prefix)
if isinstance(function, str):
assert hasattr(nd, function), \
"Function name %s is not found in ndarray." % function
self._func_impl = getattr(nd, function)
elif callable(function):
self._func_impl = function
else:
raise ValueError(
"Unrecognized function in lambda: {} of type {}"
.format(function, type(function)))
def forward(self, *args):
return self._func_impl(*args)
def __repr__(self):
return '{name}({function})'.format(name=self.__class__.__name__,
function=self._func_impl.__name__)
class HybridLambda(HybridBlock):
r"""Wraps an operator or an expression as a HybridBlock object.
Parameters
----------
function : str or function
Function used in lambda must be one of the following:
1) the name of an operator that is available in both symbol and ndarray. For example::
block = HybridLambda('tanh')
2) a function that conforms to "def function(F, data, *args)". For example::
block = HybridLambda(lambda F, x: F.LeakyReLU(x, slope=0.1))
Inputs:
- ** *args **: one or more input data. First argument must be symbol or ndarray.
Their shapes depend on the function.
Output:
- ** *outputs **: one or more output data. Their shapes depend on the function.
"""
def __init__(self, function, prefix=None):
super(HybridLambda, self).__init__(prefix=prefix)
if isinstance(function, str):
assert hasattr(nd, function) and hasattr(sym, function), \
"Function name %s is not found in symbol/ndarray." % function
func_dict = {sym: getattr(sym, function), nd: getattr(nd, function)}
self._func = lambda F, *args: func_dict[F](*args)
self._func_name = function
elif callable(function):
self._func = function
self._func_name = function.__name__
else:
raise ValueError(
"Unrecognized function in lambda: {} of type {}"
.format(function, type(function)))
def hybrid_forward(self, F, x, *args):
return self._func(F, x, *args)
def __repr__(self):
return '{name}({function})'.format(name=self.__class__.__name__,
function=self._func_name)
|
{
"content_hash": "ca02378c51b7f46e53eab2ddf4f6dea8",
"timestamp": "",
"source": "github",
"line_count": 676,
"max_line_length": 99,
"avg_line_length": 39.468934911242606,
"alnum_prop": 0.5610359431805405,
"repo_name": "fullfanta/mxnet",
"id": "d86c3e6ce4f34387019d0a714cfdbedb2e070f8b",
"size": "27519",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/mxnet/gluon/nn/basic_layers.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "10619"
},
{
"name": "C",
"bytes": "89222"
},
{
"name": "C++",
"bytes": "3181719"
},
{
"name": "CMake",
"bytes": "47349"
},
{
"name": "Cuda",
"bytes": "566696"
},
{
"name": "Java",
"bytes": "2868"
},
{
"name": "Jupyter Notebook",
"bytes": "1229390"
},
{
"name": "Makefile",
"bytes": "40032"
},
{
"name": "Matlab",
"bytes": "30187"
},
{
"name": "Perl",
"bytes": "589576"
},
{
"name": "Perl 6",
"bytes": "21768"
},
{
"name": "Protocol Buffer",
"bytes": "77256"
},
{
"name": "Python",
"bytes": "2754365"
},
{
"name": "R",
"bytes": "265756"
},
{
"name": "Scala",
"bytes": "855100"
},
{
"name": "Shell",
"bytes": "109190"
}
],
"symlink_target": ""
}
|
from gzip import GzipFile
from io import BytesIO
from flask import request
class GZip(object):
def __init__(self, app=None):
self.app = app
if app is not None:
self.init_app(app)
def init_app(self, app):
app.after_request(self.after_request)
def after_request(self, response):
encoding = request.headers.get('Accept-Encoding')
if 'gzip' not in encoding or \
not response.status_code == 200 or \
'Content-Encoding' in response.headers:
return response
response.direct_passthrough = False
gzip_buffer = BytesIO()
with GzipFile(mode='wb', compresslevel=5, fileobj=gzip_buffer) as gzip_file:
gzip_file.write(response.get_data())
response.set_data(bytes(gzip_buffer.getvalue()))
response.headers['Content-Encoding'] = 'gzip'
response.headers['Content-Length'] = response.content_length
return response
|
{
"content_hash": "143488d17244acaad652828d9660390d",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 84,
"avg_line_length": 27.82857142857143,
"alnum_prop": 0.6262833675564682,
"repo_name": "abacuspix/NFV_project",
"id": "a0f2baf5a51b621cf94246c044007a4c459a80ac",
"size": "974",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "Mastering Flask_Code Bundle/Chapter_13/Flask-GZip/flask_gzip/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "6037"
},
{
"name": "Gherkin",
"bytes": "419"
},
{
"name": "HTML",
"bytes": "342352"
},
{
"name": "JavaScript",
"bytes": "8828"
},
{
"name": "Mako",
"bytes": "2224"
},
{
"name": "Nginx",
"bytes": "231"
},
{
"name": "Python",
"bytes": "706126"
}
],
"symlink_target": ""
}
|
import os
DEBUG = False
DEBUG_user = ''
OTHER_APPS=()
invalid_template_string=''
try:
from local_settings import *
except ImportError:
pass
PROJECT_PATH = os.path.realpath(os.path.dirname(__file__)).replace('\\','/')
LOGIN_URL='login_view'
### admins in local_settings ###
MANAGERS = ADMINS
### database settings in local_settings ###
### allowed hosts in local_settings ###
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'America/Detroit'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = PROJECT_PATH+'/media/'
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = '/media/'
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = '/srv/www/_static/'
# URL prefix for static files.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
PROJECT_PATH+'/static/',
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# List of callables that know how to import templates from various sources.
# TEMPLATE_LOADERS = (
# 'django.template.loaders.filesystem.Loader',
# 'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
# )
MIDDLEWARE_CLASSES = (
'htmlmin.middleware.HtmlMinifyMiddleware',
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'migweb.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'migweb.wsgi.application'
DATA_UPLOAD_MAX_NUMBER_FIELDS = None
# TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
# PROJECT_PATH+'/templates',
# )
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'APP_DIRS': True,
'DIRS': [PROJECT_PATH+'/templates/'],
'OPTIONS': {
'context_processors': [
'django.contrib.auth.context_processors.auth',
"django.template.context_processors.debug",
"django.template.context_processors.i18n",
"django.template.context_processors.media",
'django.template.context_processors.request',
"django.template.context_processors.static",
"django.template.context_processors.tz",
"django.contrib.messages.context_processors.messages",
"migweb.context_processors.profile_setup",
"migweb.context_processors.debug_features",
"migweb.context_processors.dropdowns",
],
'string_if_invalid': invalid_template_string,
}
},
]
# C:\Users\Mike\Documents\TBP_Website\mig-website
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
#'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
#'django_spaghetti',
# Uncomment the next line to enable the admin:
'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
'django.contrib.admindocs',
#'dbsettings',
'django_select2',
'django_ajax',
'stdimage',
'mig_main',
'elections',
'event_cal',
'electees',
'requirements',
'history',
'about',
'outreach',
'member_resources',
'corporate',
'fora',
# 'bookswap',
)+OTHER_APPS
SESSION_ENGINE = "django.contrib.sessions.backends.signed_cookies"
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
TEST_RUNNER='migweb.test_runner.MigTestRunner'
### email settings in local_settings ###
## Select2 Settings
SELECT2_BOOTSTRAP=True
# for visualization
SPAGHETTI_SAUCE = {
'apps':['mig_main','event_cal','about','history','migweb','corporate','electees','elections','fora','member_resources','outreach','requirements'],
'show_fields':False,
'exclude':{'mig_main':['academicterm']}
}
|
{
"content_hash": "a55ac36db7299a8d79eda00dfd1534c1",
"timestamp": "",
"source": "github",
"line_count": 213,
"max_line_length": 148,
"avg_line_length": 31.586854460093896,
"alnum_prop": 0.6793995243757431,
"repo_name": "tbpmig/mig-website",
"id": "0bb3d848e2c48a7f00011958fcba7b8efdd0670c",
"size": "6766",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "migweb/settings.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "8576"
},
{
"name": "HTML",
"bytes": "760931"
},
{
"name": "JavaScript",
"bytes": "64350"
},
{
"name": "Less",
"bytes": "2022"
},
{
"name": "Python",
"bytes": "1637977"
},
{
"name": "TeX",
"bytes": "5289"
}
],
"symlink_target": ""
}
|
from bs4 import BeautifulSoup # For HTML parsing
import requests
import re # Regular expressions
from time import sleep # To prevent overwhelming the server between connections
import pandas as pd # For converting results to a dataframe
from sqlalchemy import create_engine,Table,Column,Integer,String,MetaData,ForeignKey,Date
import pymysql # We'll need sqlalchemy and pymysql to connect the SQL server
import datetime
import multiprocessing as mp
class ProdInfo:
vcleaners={"central":11333709011,"canister":510108,"handheld":510114,"robotic":3743561,"stick":510112,"upright":510110,"wetdry":553022}
def getVacuumTypeUrl(self,vacuumType,pageNum=1):
'''
Given one of the following vacuum type: (central,canister,handheld,robotic,stick,upright,wetdry)
an URL which is of the vacuum cleaners of the chosen vacuum type will be returned.
'''
url_type_base="https://www.amazon.com/home-garden-kitchen-furniture-bedding/b/ref=sr_pg_"+str(pageNum)+"?ie=UTF8&node="
url=url_type_base+str(self.vcleaners[vacuumType])+"&page="+str(pageNum)
print (url)
return url
def getFinalPageNum(self,url,maxretrytime=50):
'''
This method aims to obtain the total number of pages to be explored (for a specific vacuum type)
'''
passed=False
cnt=0
while(passed==False):
cnt+=1
print("%s times of iteration (getFinalPageNum)"%cnt)
if(cnt>maxretrytime):
raise Exception("Error from getFinalPageNum! Tried too many times but we are still blocked by Amazon.")
# We have a try-catch block here due to 'Connection time out' will raise an Exception
try:
with requests.Session() as session:
session.headers = {'User-Agent': "Mozilla/5.0 (Windows NT 6.1; WOW64; rv:50.0) Gecko/20100101 Firefox/50.0"}
r=session.get(url)
if (r.status_code==200):
soup=BeautifulSoup(r.content,"lxml")
if("Robot Check" in soup.text):
print("we are blocked!")
else:
tagsFinalPageNum=soup.select("span[class='pagnDisabled']")
finalPageNum=str(tagsFinalPageNum[0].text)
passed=True
else:
print("Connection failed. Reconnecting...")
except:
print("Error from getFinalPageNum(url)! Probably due to connection time out")
return finalPageNum
def InferFinalPageNum(self,vacuumType,pageNum=1,times=10):
'''
We found that the total number of pages cannot be obtained with certainty. Hence, we
infer the value of it, simply by getting its value 10 times and choosing the smallest one.
'''
url=self.getVacuumTypeUrl(vacuumType,pageNum)
list_finalpageNum=[]
for j in range(times):
finalpageNum=self.getFinalPageNum(url)
list_finalpageNum.append(finalpageNum)
FinalpageNum=min(list_finalpageNum)
print("the infered total number of pages=",FinalpageNum)
return FinalpageNum
def urlsGenerator(self,vacuumType,FinalPageNum):
"""
All the URLs of the selected vacuum type will be returned.
"""
URLs=[]
pageIdx=1
while(pageIdx<=int(FinalPageNum)):
url_Type="https://www.amazon.com/home-garden-kitchen-furniture-bedding/b/ref=sr_pg_"+str(pageIdx)+"?ie=UTF8&node="
url=url_Type+str(self.vcleaners[vacuumType])+"&page="+str(pageIdx)
URLs.append(url)
pageIdx+=1
return URLs
def soupGenerator(self,URLs,maxretrytime=50):
"""
Soups of all the URLs of the selected vacuum type will be returned.
"""
soups=[]
urlindex=0
for URL in URLs:
urlindex+=1
print("urlindex=",urlindex)
passed=False
cnt=0
while(passed==False):
cnt+=1
print("iteration=",cnt)
if(cnt>maxretrytime):
raise Exception("Error from soupGenerator(url,maxretrytime=%i)! Tried too many times but we are still blocked by Amazon."%maxretrytime)
# We have a try-catch block here due to 'Connection time out' will raise an Exception
try:
with requests.Session() as session:
session.headers = {'User-Agent': "Mozilla/5.0 (Windows NT 6.1; WOW64; rv:50.0) Gecko/20100101 Firefox/50.0"}
r=session.get(URL)
if (r.status_code==200):
soup=BeautifulSoup(r.content,"lxml")
if("Robot Check" in soup.text):
print("we are blocked!")
# we'll save the soup only when the status=200 and the page is not anti-robot
else:
print("we are not blocked!")
soups.append(soup)
passed=True
else:
print("Connection failed. Reconnecting...")
except:
print("Error from soupGenerator(URLs,maxretrytime=%i! Probably due to connection time out"%maxretrytime)
return soups
def items_info_extractor(self,soups):
item_links=[]
item_num_of_reviews=[]
item_prices=[]
item_names=[]
item_ids=[]
item_brands=[]
item_avestars=[]
for soup in soups:
items=soup.select('li[id^="result_"]')
for item in items:
link_item=item.select("a[href$='customerReviews']")
# ignore those items which contains 0 customer reviews. Those items are irrelevent to us.
if (link_item !=[]):
price_tag=link_item[0].parent.previous_sibling.previous_sibling
price_main_tag=price_tag.select(".sx-price-whole")
price_fraction_tag=price_tag.select(".sx-price-fractional")
link=link_item[0]["href"]
# Ignore items which don't have normal price tags.
# Those are items which are not sold by Amazon directly.
# Also, remove those items which are ads (3 ads are shown in each page).
if((price_main_tag !=[]) & (price_fraction_tag !=[]) & (link.endswith("spons#customerReviews") == False)):
# extract the item's name and ID from the obtained link
item_name=link.split("/")[3]
item_id=link.split("/")[5]
# replace the obtained link by the link that will lead to the customer reviews
base_url="https://www.amazon.com/"
link=base_url+item_name+"/product-reviews/"+item_id+"/ref=cm_cr_getr_d_paging_btm_" \
+str(1)+"?ie=UTF8&pageNumber="+str(1)+"&reviewerType=all_reviews&pageSize=1000"
# obtain the price of the selected single item
price_main=re.sub(",","",price_main_tag[0].text)
price_fraction=price_fraction_tag[0].text
item_price=int(price_main)+0.01*int(price_fraction)
# obtain the brand of the selected single item
item_brand=price_tag.parent.select(".a-size-small")[1].text
if(item_brand=="by "):
item_brand=price_tag.parent.select(".a-size-small")[2].text
# obtain the number of reviews of the selected single item
item_num_of_review=int(re.sub(",","",link_item[0].text))
# obtain the averaged number of stars
starSelect=item.select_one("span[class='a-declarative']")
#starSelect=item.select_one("div[class='a-column a-span5 a-span-last']")
if(starSelect is None): # there are no reviews yet (hence, we see no stars at all)
item_avestar=0
else:
item_avestar=starSelect.span.string.split(" ")[0] # there are some reviews. So, we are able to extract the averaged number of stars
#item_avestar=starSelect.div.span.a.i.string.split(" ")[0] # there are some reviews. So, we are able to extract the averaged number of stars
# store the obtained variables into lists
item_links.append(link)
item_num_of_reviews.append(item_num_of_review)
item_prices.append(item_price)
item_names.append(item_name)
item_ids.append(item_id)
item_brands.append(item_brand)
item_avestars.append(item_avestar)
return item_brands,item_ids,item_names,item_prices,item_num_of_reviews,item_links,item_avestars
def run(prodType):
print("The chosen vacuum type is %s.\n"%prodType)
# retrieve the data we want
prodObj=ProdInfo()
FinalPageNum=prodObj.InferFinalPageNum(prodType)
URLs=prodObj.urlsGenerator(prodType,FinalPageNum)
soups=prodObj.soupGenerator(URLs)
item_brands,item_ids,item_names,item_prices,item_num_of_reviews,item_links,item_avestars=prodObj.items_info_extractor(soups)
# store the retrieved data
date=datetime.datetime.now().strftime("%Y-%m-%d")
df=pd.DataFrame.from_items([("pindex",item_ids),("type",prodType),("pname",item_names),("brand",item_brands),("price",item_prices),("rurl",item_links),("totalRev",item_num_of_reviews),("avgStars",item_avestars)])
df.to_csv("ProdInfo_%s_%s.csv"%(prodType,date), encoding="utf-8")
## engine=create_engine("mysql+pymysql://semantic:GbwSq1RzFa@104.199.201.206:13606/Tests?charset=utf8",echo=False, encoding='utf-8')
## conn = engine.connect()
## df.to_sql(name='amzProd', con=conn, if_exists = 'append', index=False)
## conn.close()
prodTypes=["central","canister","handheld","robotic","stick","upright","wetdry"]
#run(prodTypes[-1]) # recalculate for the type wetdry
#for prodType in prodTypes:
# run(prodType)
# Let's use 7 threads to retrieve data of the 7 types of vacuum cleaners
processes = [mp.Process(target=run, args=(prodType,) ) for prodType in prodTypes]
# start and run the processes
for p in processes:
p.start()
for p in processes:
p.join()
|
{
"content_hash": "1745c621d3da0be73be44016cdc6d43d",
"timestamp": "",
"source": "github",
"line_count": 227,
"max_line_length": 213,
"avg_line_length": 39.62555066079295,
"alnum_prop": 0.6893829905503057,
"repo_name": "chi-hung/SementicProj",
"id": "b0164ae17efe60dcedd52a587613a1172fd90f9f",
"size": "9021",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "webCrawler/amzProd_InfoRetriever.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "234681"
},
{
"name": "Python",
"bytes": "8811"
}
],
"symlink_target": ""
}
|
import os
import sys
import subprocess
import argparse
import logging
BASE_DIR = os.path.dirname(os.path.realpath(__file__))
from logging import Formatter, StreamHandler
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
_handler = StreamHandler()
_handler.setFormatter(Formatter(
'%(asctime)s %(levelname)s: %(message)s',
'%Y-%m-%d %H:%M:%S'
))
logger.addHandler(_handler)
def error(msg, *args, **kwargs):
logger.error(msg, *args)
if 'exit' in kwargs and kwargs['exit']:
sys.exit(1)
class Command():
def update_submodules(self):
proc = subprocess.Popen(['git', 'submodule', 'init'])
ret = proc.wait()
if ret != 0:
error("Failed to initialize git submodules", exit=True)
proc = subprocess.Popen(['git', 'submodule', 'update'])
ret = proc.wait()
if ret != 0:
error("Failed to update git submodules", exit=True)
def recompile_vim_plugins(self):
import platform
if platform.system() != 'Darwin':
print "!! Can not auto-compile vim plugins. Please compile them yourself."
print " * YouCompleteMe"
return
logger.info('Attempting compilation of Vim plugins... This may take awhile.')
os.chdir(os.path.join(BASE_DIR, 'dotfiles/vim/bundle/YouCompleteMe'))
proc = subprocess.Popen(['/bin/sh', 'install.sh'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
ret = proc.wait()
if ret != 0:
error("Failed to compile YouCompleteMe", exit=True)
class Install(Command):
def __init__(self, options):
self.options = options
self.update_submodules()
self.install_dotfiles()
self.install_vundle()
if not options.skip_compile:
self.recompile_vim_plugins()
def install_dotfiles(self):
to_install = os.listdir(os.path.join(BASE_DIR, 'dotfiles'))
to_install = self.filter_shellfiles(to_install)
for source in to_install:
source_path = os.path.join(BASE_DIR, 'dotfiles', source)
target_path = os.path.join(os.environ['HOME'], '.' + source)
self.remove_file(target_path)
os.symlink(source_path, target_path)
logger.info('Linked %s', source_path)
def filter_shellfiles(self, dotfiles):
if self.options.shell == 'zsh':
return [f for f in dotfiles if 'bash' not in f]
elif self.options.shell == 'bash':
return [f for f in dotfiles if 'zsh' not in f]
def remove_file(self, target):
proc = subprocess.Popen(['rm', '-rf', target],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
ret = proc.wait()
if ret != 0:
error("Failed to remove file %s", target, exit=True)
def install_vundle(self):
proc = subprocess.Popen(['vim', '-c', 'BundleInstall', '-c', 'quitall'])
ret = proc.wait()
if ret != 0:
error("Failed to install vim plugins...", exit=True)
class Update(Command):
def __init__(self, options):
self.options = options
self.update_git()
self.update_submodules()
self.update_vundle()
if not options.skip_compile:
self.recompile_vim_plugins()
def update_git(self):
proc = subprocess.Popen(['git', 'pull'])
ret = proc.wait()
if ret != 0:
error("Failed to pull latest changes", exit=True)
def update_vundle(self):
proc = subprocess.Popen(['vim', '-c', 'BundleUpdate', '-c', 'quitall'])
ret = proc.wait()
if ret != 0:
error("Failed to update vundle, I recommend running :BundleInstall inside vim.", exit=True)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers()
subparser = subparsers.add_parser('install', help="Install rcfiles into your home directory.")
subparser.add_argument('--shell', default="zsh", choices=["zsh", "bash"], help="Which shell you want to install rc files for.")
subparser.add_argument('--skip-compile', action="store_true", help="Skip compilation of Vim plugins")
subparser.set_defaults(CommandClass=Install)
subparser = subparsers.add_parser('update', help="Update rcfiles.")
subparser.add_argument('--skip-compile', action="store_true", help="Skip compilation of Vim plugins")
subparser.set_defaults(CommandClass=Update)
options = parser.parse_args()
command = options.CommandClass(options)
|
{
"content_hash": "e3e1cb672ab08033b9dd98b21782d4cb",
"timestamp": "",
"source": "github",
"line_count": 126,
"max_line_length": 131,
"avg_line_length": 36.07936507936508,
"alnum_prop": 0.6135063792344919,
"repo_name": "dahlke/rcfiles",
"id": "7a389a7a3e059dfb95906d07d35c3d8248656933",
"size": "4568",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rcfiles.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "4568"
},
{
"name": "Shell",
"bytes": "10823"
},
{
"name": "Vim script",
"bytes": "14385"
}
],
"symlink_target": ""
}
|
from neutron.api import extensions
from neutron.extensions import l3
ALIAS = 'cisco-apic-l3'
USE_ROUTING_CONTEXT = 'apic:use_routing_context'
EXT_GW_ATTRIBUTES = {
USE_ROUTING_CONTEXT: {
'allow_post': True, 'allow_put': False,
'is_visible': True, 'default': None,
'validate': {'type:uuid_or_none': None},
}
}
EXTENDED_ATTRIBUTES_2_0 = {
l3.ROUTERS: dict(EXT_GW_ATTRIBUTES.items())
}
class Cisco_apic_l3(extensions.ExtensionDescriptor):
@classmethod
def get_name(cls):
return "Cisco APIC L3"
@classmethod
def get_alias(cls):
return ALIAS
@classmethod
def get_description(cls):
return ("Extension exposing mapping of Neutron L3 resources to Cisco "
"APIC constructs")
@classmethod
def get_updated(cls):
return "2017-06-06T12:00:00-00:00"
def get_extended_resources(self, version):
if version == "2.0":
return EXTENDED_ATTRIBUTES_2_0
else:
return {}
|
{
"content_hash": "009ccc85db2681d9a7abc4c9facf89c7",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 78,
"avg_line_length": 23.136363636363637,
"alnum_prop": 0.6198428290766208,
"repo_name": "noironetworks/apic-ml2-driver",
"id": "d506fb93418ce1a4c4034e2212f5ef48c5666fb6",
"size": "1656",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "apic_ml2/neutron/extensions/cisco_apic_l3.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "632912"
},
{
"name": "Shell",
"bytes": "12829"
}
],
"symlink_target": ""
}
|
from bokeh.charts import BoxPlot, output_file, show
from bokeh.sampledata.autompg import autompg as df
p = BoxPlot(df, values='mpg', label='cyl', outliers=False,
title="MPG Summary (grouped by CYL, no outliers)")
output_file("boxplot.html")
show(p)
|
{
"content_hash": "a147cd8f55d10a422f77903ebbbfd13b",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 62,
"avg_line_length": 29.333333333333332,
"alnum_prop": 0.7121212121212122,
"repo_name": "draperjames/bokeh",
"id": "fb446792d2e02a0632e5661d382117a4669a2f46",
"size": "264",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "sphinx/source/docs/user_guide/examples/charts_boxplot_outliers.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1442"
},
{
"name": "CSS",
"bytes": "93011"
},
{
"name": "CoffeeScript",
"bytes": "1072438"
},
{
"name": "HTML",
"bytes": "46812"
},
{
"name": "JavaScript",
"bytes": "31782"
},
{
"name": "Jupyter Notebook",
"bytes": "3981"
},
{
"name": "Makefile",
"bytes": "1164"
},
{
"name": "Python",
"bytes": "2332463"
},
{
"name": "Shell",
"bytes": "3660"
},
{
"name": "TypeScript",
"bytes": "105726"
}
],
"symlink_target": ""
}
|
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
("letters", "0011_letter_is_draft"),
]
operations = [
migrations.AddField(
model_name="letter",
name="mark_spam_by",
field=models.ForeignKey(
blank=True,
help_text="The person who marked it as spam",
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="letter_mark_spam_by",
to=settings.AUTH_USER_MODEL,
verbose_name=b"Spam marker",
),
)
]
|
{
"content_hash": "3fa223a32dfe3059cd355a1d0535f811",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 66,
"avg_line_length": 29.296296296296298,
"alnum_prop": 0.5651074589127687,
"repo_name": "watchdogpolska/feder",
"id": "96f2392f147a11a4dec5254748ccec78a8ee45fb",
"size": "842",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "feder/letters/migrations/0012_letter_mark_spam_by.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "624"
},
{
"name": "HTML",
"bytes": "183421"
},
{
"name": "JavaScript",
"bytes": "6245"
},
{
"name": "Makefile",
"bytes": "2086"
},
{
"name": "Python",
"bytes": "574027"
},
{
"name": "SCSS",
"bytes": "40546"
},
{
"name": "Shell",
"bytes": "214"
}
],
"symlink_target": ""
}
|
from django.conf.urls import url
from twobuntu.categories import views
from twobuntu.feeds import CategoryArticlesFeed
urlpatterns = [
url(r'^(?P<id>\d+)/(?:(?P<slug>[\w-]+)/)?$', views.view, name='view'),
url(r'^rss/(?P<id>\d+)/$', CategoryArticlesFeed(), name='rss'),
]
|
{
"content_hash": "f9e7d7a6466bab4757dba8eb9012e449",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 74,
"avg_line_length": 28.3,
"alnum_prop": 0.6466431095406361,
"repo_name": "2buntu/2buntu-blog",
"id": "b4653a7bdf90ace47360c3416e3347474bc0360a",
"size": "283",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "twobuntu/categories/urls.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "2977"
},
{
"name": "HTML",
"bytes": "76074"
},
{
"name": "JavaScript",
"bytes": "6310"
},
{
"name": "Python",
"bytes": "93053"
}
],
"symlink_target": ""
}
|
__author__ = 'ar'
import os
import glob
import numpy as np
import matplotlib.pyplot as plt
import pycocotools.coco as coco
from pycocotools.coco import COCO
import skimage.io as skio
######################################
def findCatById(listCats, catId):
for ii in listCats:
if ii['id'] == catId:
return ii
return None
######################################
if __name__ == '__main__':
# dataDir = '/home/ar/datasets/mscoco'
dataDir = '/mnt/data1T2/datasets2/mscoco/raw-data'
dataType = 'train2014'
# dataType = 'val2014'
annFile = '%s/annotations/instances_%s.json' % (dataDir, dataType)
imgDir = '%s/%s' % (dataDir, dataType)
if not os.path.isdir(imgDir):
raise Exception('Cant find directory with MS-COCO images [%s]' % dataDir)
#
coco = COCO(annFile)
#
listCatsFoodIdx = coco.getCatIds(supNms=['food'])
for ii, idx in enumerate(listCatsFoodIdx):
tmpCat = coco.loadCats(ids = idx)[0]
print ('%d [%d] : %s (%s)' % (ii, idx, tmpCat['name'], tmpCat['supercategory']))
print ('-------')
|
{
"content_hash": "6989be0754b34642b9e4f04f3ff0fc35",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 88,
"avg_line_length": 30.38888888888889,
"alnum_prop": 0.5776965265082267,
"repo_name": "gakarak/FCN_MSCOCO_Food_Segmentation",
"id": "41a8125a6bbc31c6471fc23d7525e3efd2ba5170",
"size": "1136",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "MSCOCO_Processing/PythonAPI/run01_generate_food_masks_print_info.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "10425"
},
{
"name": "C++",
"bytes": "13023"
},
{
"name": "Jupyter Notebook",
"bytes": "96857"
},
{
"name": "Makefile",
"bytes": "199"
},
{
"name": "Python",
"bytes": "112307"
}
],
"symlink_target": ""
}
|
from django import template
from django.contrib.contenttypes.models import ContentType
from django.urls import reverse
register = template.Library()
@register.filter
def rating_score(obj, user):
"""
Returns the score a user has given an object
"""
if not user.is_authenticated or not hasattr(obj, "_ratings_field"):
return False
ratings_descriptor = getattr(obj, obj._ratings_field)
try:
rating = ratings_descriptor.get(user=user).score
except ratings_descriptor.model.DoesNotExist:
rating = None
return rating
@register.filter
def has_rated(user, obj):
"""
Returns whether or not the user has rated the given object
"""
return rating_score(obj, user) is not None
@register.filter
def rate_url(obj, score=1):
"""
Generates a link to "rate" the given object with the provided score - this
can be used as a form target or for POSTing via Ajax.
"""
return reverse(
"ratings_rate_object",
args=(
ContentType.objects.get_for_model(obj).pk,
obj.pk,
score,
),
)
@register.filter
def unrate_url(obj):
"""
Generates a link to "un-rate" the given object - this
can be used as a form target or for POSTing via Ajax.
"""
return reverse(
"ratings_unrate_object",
args=(
ContentType.objects.get_for_model(obj).pk,
obj.pk,
),
)
|
{
"content_hash": "755140d57b9212d331493f25f8238f96",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 78,
"avg_line_length": 23.80327868852459,
"alnum_prop": 0.6287878787878788,
"repo_name": "django/djangosnippets.org",
"id": "9219e2b79414b3044a107887a5505c7944f692b4",
"size": "1452",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "ratings/templatetags/ratings_tags.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "7764"
},
{
"name": "Dockerfile",
"bytes": "367"
},
{
"name": "HTML",
"bytes": "90582"
},
{
"name": "JavaScript",
"bytes": "79468"
},
{
"name": "Procfile",
"bytes": "78"
},
{
"name": "Python",
"bytes": "154731"
},
{
"name": "Ruby",
"bytes": "931"
},
{
"name": "SCSS",
"bytes": "13734"
},
{
"name": "Shell",
"bytes": "346"
}
],
"symlink_target": ""
}
|
import MyBot_core
|
{
"content_hash": "859e37bb1808e326cb74a03527f6e908",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 17,
"avg_line_length": 18,
"alnum_prop": 0.8333333333333334,
"repo_name": "HaliteChallenge/Halite-II",
"id": "a7b1c492b5bafa666847a72eeec868fee5de9047",
"size": "18",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "airesources/Cython3/MyBot.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "8111"
},
{
"name": "C",
"bytes": "1910003"
},
{
"name": "C#",
"bytes": "31000"
},
{
"name": "C++",
"bytes": "820400"
},
{
"name": "CMake",
"bytes": "2698"
},
{
"name": "CSS",
"bytes": "412005"
},
{
"name": "Clojure",
"bytes": "15989"
},
{
"name": "Common Lisp",
"bytes": "20600"
},
{
"name": "Dart",
"bytes": "14090"
},
{
"name": "Elixir",
"bytes": "22917"
},
{
"name": "F#",
"bytes": "17888"
},
{
"name": "Go",
"bytes": "14458"
},
{
"name": "HTML",
"bytes": "62449"
},
{
"name": "Haskell",
"bytes": "15459"
},
{
"name": "Java",
"bytes": "28548"
},
{
"name": "JavaScript",
"bytes": "6941451"
},
{
"name": "Julia",
"bytes": "17710"
},
{
"name": "Kotlin",
"bytes": "18922"
},
{
"name": "Makefile",
"bytes": "32949"
},
{
"name": "Mako",
"bytes": "532"
},
{
"name": "OCaml",
"bytes": "21982"
},
{
"name": "PHP",
"bytes": "27244"
},
{
"name": "Python",
"bytes": "491423"
},
{
"name": "Ruby",
"bytes": "156808"
},
{
"name": "Rust",
"bytes": "20054"
},
{
"name": "Scala",
"bytes": "23110"
},
{
"name": "Shell",
"bytes": "30859"
},
{
"name": "Swift",
"bytes": "27281"
},
{
"name": "Vue",
"bytes": "192540"
}
],
"symlink_target": ""
}
|
from ShareYourSystem.Classors.Representer import _print
from ShareYourSystem.Functers import Argumenter,Switcher
from ShareYourSystem.Objects import Setter
#Print a version of the class
_print(dict(Switcher.SwitcherClass.__dict__.items()))
#Print a version of this object
_print(Switcher.SwitcherClass())
#Print a version of his __dict__
_print(Switcher.SwitcherClass().__dict__)
#Test
class MakerClass(Setter.SetterClass):
def default_init(self):
#Call the based method
Setter.SetterClass.__init__(self)
#Call the init
self.init()
def init(self):
#Definition a default Int
self.MakingInt=1
self.MadeInt=0
'''
@Switcher.SwitcherClass()
def make(self):
self.MadeInt+=2*self.MakingInt
'''
#Be carefule Argumenter must be before Switcher... for setting goodly the argumenting inputs
@Switcher.SwitcherClass()
@Argumenter.ArgumenterClass()
def make(self,_Int=None):
self.MadeInt+=2*self.MakingInt
#Get an instance
MyMaker=MakerClass()
#Show the default value of the instance
print('At the beginning the object is...')
print(MyMaker)
print('')
#Change the int
print('make with the object...')
MyMaker.make(2)
print(MyMaker)
print('')
#Change the int
print('make again with the object, but it is switched off now')
MyMaker.make()
print(MyMaker)
print('')
#Now set InititatedIsBool to False to bind with a reinit of the object
print('Now swith on the object...')
MyMaker['SwitchingMakeBool']=False
print(MyMaker)
print('')
#Test
from ShareYourSystem.Functers import Hooker
class BuilderClass(MakerClass):
@Hooker.HookerClass(**{'HookingAfterVariablesList':[{'CallingVariable':MakerClass.init}]})
def init(self):
#Definition a default Int
self.BuildingFloat=1.
self.BuiltInt=0
#Be carefule Argumenter must be before Switcher... for setting goodly the argumenting inputs
@Switcher.SwitcherClass()
@Argumenter.ArgumenterClass()
def build(self,_Float=None):
self.BuiltInt+=2*self.MakingInt+int(self.BuildingFloat)
#Get an instance
MyBuilder=BuilderClass()
#Show the default value of the instance
print('At the beginning the object is...')
print(MyBuilder)
print('')
#Change the int
print('build with the object...')
MyBuilder.make(2)
print(MyBuilder)
print('')
#Change the int
print('build again with the object, but it is switched off now')
MyBuilder.build()
print(MyBuilder)
print('')
#Now set InititatedIsBool to False to bind with a reinit of the object
print('Now swith on the object for the make process...')
MyBuilder['SwitchingMakeBool']=False
print(MyBuilder)
print('')
|
{
"content_hash": "aedee73e648ce3dd004289eb6aa8e4b8",
"timestamp": "",
"source": "github",
"line_count": 111,
"max_line_length": 93,
"avg_line_length": 22.83783783783784,
"alnum_prop": 0.7510848126232742,
"repo_name": "Ledoux/ShareYourSystem",
"id": "9e9c189311bc8e5ac29b5bc2fa36bf8078f856d6",
"size": "2551",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Pythonlogy/draft/Resetter/Test.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "ApacheConf",
"bytes": "86"
},
{
"name": "C++",
"bytes": "4244220"
},
{
"name": "CSS",
"bytes": "142769"
},
{
"name": "CoffeeScript",
"bytes": "37331"
},
{
"name": "HTML",
"bytes": "36211676"
},
{
"name": "JavaScript",
"bytes": "2147968"
},
{
"name": "Jupyter Notebook",
"bytes": "7930602"
},
{
"name": "Makefile",
"bytes": "6362"
},
{
"name": "PHP",
"bytes": "11096341"
},
{
"name": "Python",
"bytes": "5700092"
},
{
"name": "Ruby",
"bytes": "60"
},
{
"name": "Scala",
"bytes": "2412"
},
{
"name": "Shell",
"bytes": "2525"
},
{
"name": "Swift",
"bytes": "154"
},
{
"name": "TeX",
"bytes": "2556"
},
{
"name": "XSLT",
"bytes": "20993"
}
],
"symlink_target": ""
}
|
"""Bitcoin P2P network half-a-node.
This python code was modified from ArtForz' public domain half-a-node, as
found in the mini-node branch of http://github.com/jgarzik/pynode.
P2PConnection: A low-level connection object to a node's P2P interface
P2PInterface: A high-level interface object for communicating to a node over P2P"""
import asyncore
from collections import defaultdict
from io import BytesIO
import logging
import socket
import struct
import sys
import threading
from test_framework.messages import *
from test_framework.util import wait_until
logger = logging.getLogger("TestFramework.mininode")
MESSAGEMAP = {
b"addr": msg_addr,
b"block": msg_block,
b"blocktxn": msg_blocktxn,
b"cmpctblock": msg_cmpctblock,
b"feefilter": msg_feefilter,
b"getaddr": msg_getaddr,
b"getblocks": msg_getblocks,
b"getblocktxn": msg_getblocktxn,
b"getdata": msg_getdata,
b"getheaders": msg_getheaders,
b"headers": msg_headers,
b"inv": msg_inv,
b"mempool": msg_mempool,
b"ping": msg_ping,
b"pong": msg_pong,
b"reject": msg_reject,
b"sendcmpct": msg_sendcmpct,
b"sendheaders": msg_sendheaders,
b"tx": msg_tx,
b"verack": msg_verack,
b"version": msg_version,
}
MAGIC_BYTES = {
"mainnet": b"\xe6\xe8\xe9\xe5", # mainnet
"testnet3": b"\xcb\xf2\xc0\xef", # testnet3
"regtest": b"\xcb\xf2\xc0\xef", # regtest
}
class P2PConnection(asyncore.dispatcher):
"""A low-level connection object to a node's P2P interface.
This class is responsible for:
- opening and closing the TCP connection to the node
- reading bytes from and writing bytes to the socket
- deserializing and serializing the P2P message header
- logging messages as they are sent and received
This class contains no logic for handing the P2P message payloads. It must be
sub-classed and the on_message() callback overridden."""
def __init__(self):
# All P2PConnections must be created before starting the NetworkThread.
# assert that the network thread is not running.
assert not network_thread_running()
super().__init__(map=mininode_socket_map)
def peer_connect(self, dstaddr, dstport, net="regtest"):
self.dstaddr = dstaddr
self.dstport = dstport
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
self.sendbuf = b""
self.recvbuf = b""
self.state = "connecting"
self.network = net
self.disconnect = False
logger.info('Connecting to Bitcoin Node: %s:%d' % (self.dstaddr, self.dstport))
try:
self.connect((dstaddr, dstport))
except:
self.handle_close()
def peer_disconnect(self):
# Connection could have already been closed by other end.
if self.state == "connected":
self.disconnect_node()
# Connection and disconnection methods
def handle_connect(self):
"""asyncore callback when a connection is opened."""
if self.state != "connected":
logger.debug("Connected & Listening: %s:%d" % (self.dstaddr, self.dstport))
self.state = "connected"
self.on_open()
def handle_close(self):
"""asyncore callback when a connection is closed."""
logger.debug("Closing connection to: %s:%d" % (self.dstaddr, self.dstport))
self.state = "closed"
self.recvbuf = b""
self.sendbuf = b""
try:
self.close()
except:
pass
self.on_close()
def disconnect_node(self):
"""Disconnect the p2p connection.
Called by the test logic thread. Causes the p2p connection
to be disconnected on the next iteration of the asyncore loop."""
self.disconnect = True
# Socket read methods
def handle_read(self):
"""asyncore callback when data is read from the socket."""
t = self.recv(8192)
if len(t) > 0:
self.recvbuf += t
self._on_data()
def _on_data(self):
"""Try to read P2P messages from the recv buffer.
This method reads data from the buffer in a loop. It deserializes,
parses and verifies the P2P header, then passes the P2P payload to
the on_message callback for processing."""
try:
while True:
if len(self.recvbuf) < 4:
return
if self.recvbuf[:4] != MAGIC_BYTES[self.network]:
raise ValueError("got garbage %s" % repr(self.recvbuf))
if len(self.recvbuf) < 4 + 12 + 4 + 4:
return
command = self.recvbuf[4:4+12].split(b"\x00", 1)[0]
msglen = struct.unpack("<i", self.recvbuf[4+12:4+12+4])[0]
checksum = self.recvbuf[4+12+4:4+12+4+4]
if len(self.recvbuf) < 4 + 12 + 4 + 4 + msglen:
return
msg = self.recvbuf[4+12+4+4:4+12+4+4+msglen]
th = sha256(msg)
h = sha256(th)
if checksum != h[:4]:
raise ValueError("got bad checksum " + repr(self.recvbuf))
self.recvbuf = self.recvbuf[4+12+4+4+msglen:]
if command not in MESSAGEMAP:
raise ValueError("Received unknown command from %s:%d: '%s' %s" % (self.dstaddr, self.dstport, command, repr(msg)))
f = BytesIO(msg)
t = MESSAGEMAP[command]()
t.deserialize(f)
self._log_message("receive", t)
self.on_message(t)
except Exception as e:
logger.exception('Error reading message:', repr(e))
raise
def on_message(self, message):
"""Callback for processing a P2P payload. Must be overridden by derived class."""
raise NotImplementedError
# Socket write methods
def writable(self):
"""asyncore method to determine whether the handle_write() callback should be called on the next loop."""
with mininode_lock:
pre_connection = self.state == "connecting"
length = len(self.sendbuf)
return (length > 0 or pre_connection)
def handle_write(self):
"""asyncore callback when data should be written to the socket."""
with mininode_lock:
# asyncore does not expose socket connection, only the first read/write
# event, thus we must check connection manually here to know when we
# actually connect
if self.state == "connecting":
self.handle_connect()
if not self.writable():
return
try:
sent = self.send(self.sendbuf)
except:
self.handle_close()
return
self.sendbuf = self.sendbuf[sent:]
def send_message(self, message, pushbuf=False):
"""Send a P2P message over the socket.
This method takes a P2P payload, builds the P2P header and adds
the message to the send buffer to be sent over the socket."""
if self.state != "connected" and not pushbuf:
raise IOError('Not connected, no pushbuf')
self._log_message("send", message)
command = message.command
data = message.serialize()
tmsg = MAGIC_BYTES[self.network]
tmsg += command
tmsg += b"\x00" * (12 - len(command))
tmsg += struct.pack("<I", len(data))
th = sha256(data)
h = sha256(th)
tmsg += h[:4]
tmsg += data
with mininode_lock:
if (len(self.sendbuf) == 0 and not pushbuf):
try:
sent = self.send(tmsg)
self.sendbuf = tmsg[sent:]
except BlockingIOError:
self.sendbuf = tmsg
else:
self.sendbuf += tmsg
# Class utility methods
def _log_message(self, direction, msg):
"""Logs a message being sent or received over the connection."""
if direction == "send":
log_message = "Send message to "
elif direction == "receive":
log_message = "Received message from "
log_message += "%s:%d: %s" % (self.dstaddr, self.dstport, repr(msg)[:500])
if len(log_message) > 500:
log_message += "... (msg truncated)"
logger.debug(log_message)
class P2PInterface(P2PConnection):
"""A high-level P2P interface class for communicating with a Bitcoin node.
This class provides high-level callbacks for processing P2P message
payloads, as well as convenience methods for interacting with the
node over P2P.
Individual testcases should subclass this and override the on_* methods
if they want to alter message handling behaviour."""
def __init__(self):
super().__init__()
# Track number of messages of each type received and the most recent
# message of each type
self.message_count = defaultdict(int)
self.last_message = {}
# A count of the number of ping messages we've sent to the node
self.ping_counter = 1
# The network services received from the peer
self.nServices = 0
def peer_connect(self, *args, services=NODE_NETWORK|NODE_WITNESS, send_version=True, **kwargs):
super().peer_connect(*args, **kwargs)
if send_version:
# Send a version msg
vt = msg_version()
vt.nServices = services
vt.addrTo.ip = self.dstaddr
vt.addrTo.port = self.dstport
vt.addrFrom.ip = "0.0.0.0"
vt.addrFrom.port = 0
self.send_message(vt, True)
# Message receiving methods
def on_message(self, message):
"""Receive message and dispatch message to appropriate callback.
We keep a count of how many of each message type has been received
and the most recent message of each type."""
with mininode_lock:
try:
command = message.command.decode('ascii')
self.message_count[command] += 1
self.last_message[command] = message
getattr(self, 'on_' + command)(message)
except:
print("ERROR delivering %s (%s)" % (repr(message), sys.exc_info()[0]))
raise
# Callback methods. Can be overridden by subclasses in individual test
# cases to provide custom message handling behaviour.
def on_open(self):
pass
def on_close(self):
pass
def on_addr(self, message): pass
def on_block(self, message): pass
def on_blocktxn(self, message): pass
def on_cmpctblock(self, message): pass
def on_feefilter(self, message): pass
def on_getaddr(self, message): pass
def on_getblocks(self, message): pass
def on_getblocktxn(self, message): pass
def on_getdata(self, message): pass
def on_getheaders(self, message): pass
def on_headers(self, message): pass
def on_mempool(self, message): pass
def on_pong(self, message): pass
def on_reject(self, message): pass
def on_sendcmpct(self, message): pass
def on_sendheaders(self, message): pass
def on_tx(self, message): pass
def on_inv(self, message):
want = msg_getdata()
for i in message.inv:
if i.type != 0:
want.inv.append(i)
if len(want.inv):
self.send_message(want)
def on_ping(self, message):
self.send_message(msg_pong(message.nonce))
def on_verack(self, message):
self.verack_received = True
def on_version(self, message):
assert message.nVersion >= MIN_VERSION_SUPPORTED, "Version {} received. Test framework only supports versions greater than {}".format(message.nVersion, MIN_VERSION_SUPPORTED)
self.send_message(msg_verack())
self.nServices = message.nServices
# Connection helper methods
def wait_for_disconnect(self, timeout=60):
test_function = lambda: self.state != "connected"
wait_until(test_function, timeout=timeout, lock=mininode_lock)
# Message receiving helper methods
def wait_for_block(self, blockhash, timeout=60):
test_function = lambda: self.last_message.get("block") and self.last_message["block"].block.rehash() == blockhash
wait_until(test_function, timeout=timeout, lock=mininode_lock)
def wait_for_getdata(self, timeout=60):
test_function = lambda: self.last_message.get("getdata")
wait_until(test_function, timeout=timeout, lock=mininode_lock)
def wait_for_getheaders(self, timeout=60):
test_function = lambda: self.last_message.get("getheaders")
wait_until(test_function, timeout=timeout, lock=mininode_lock)
def wait_for_inv(self, expected_inv, timeout=60):
"""Waits for an INV message and checks that the first inv object in the message was as expected."""
if len(expected_inv) > 1:
raise NotImplementedError("wait_for_inv() will only verify the first inv object")
test_function = lambda: self.last_message.get("inv") and \
self.last_message["inv"].inv[0].type == expected_inv[0].type and \
self.last_message["inv"].inv[0].hash == expected_inv[0].hash
wait_until(test_function, timeout=timeout, lock=mininode_lock)
def wait_for_verack(self, timeout=60):
test_function = lambda: self.message_count["verack"]
wait_until(test_function, timeout=timeout, lock=mininode_lock)
# Message sending helper functions
def send_and_ping(self, message):
self.send_message(message)
self.sync_with_ping()
# Sync up with the node
def sync_with_ping(self, timeout=60):
self.send_message(msg_ping(nonce=self.ping_counter))
test_function = lambda: self.last_message.get("pong") and self.last_message["pong"].nonce == self.ping_counter
wait_until(test_function, timeout=timeout, lock=mininode_lock)
self.ping_counter += 1
# Keep our own socket map for asyncore, so that we can track disconnects
# ourselves (to workaround an issue with closing an asyncore socket when
# using select)
mininode_socket_map = dict()
# One lock for synchronizing all data access between the networking thread (see
# NetworkThread below) and the thread running the test logic. For simplicity,
# P2PConnection acquires this lock whenever delivering a message to a P2PInterface,
# and whenever adding anything to the send buffer (in send_message()). This
# lock should be acquired in the thread running the test logic to synchronize
# access to any data shared with the P2PInterface or P2PConnection.
mininode_lock = threading.RLock()
class NetworkThread(threading.Thread):
def __init__(self):
super().__init__(name="NetworkThread")
def run(self):
while mininode_socket_map:
# We check for whether to disconnect outside of the asyncore
# loop to workaround the behavior of asyncore when using
# select
disconnected = []
for fd, obj in mininode_socket_map.items():
if obj.disconnect:
disconnected.append(obj)
[obj.handle_close() for obj in disconnected]
asyncore.loop(0.1, use_poll=True, map=mininode_socket_map, count=1)
logger.debug("Network thread closing")
def network_thread_start():
"""Start the network thread."""
# Only one network thread may run at a time
assert not network_thread_running()
NetworkThread().start()
def network_thread_running():
"""Return whether the network thread is running."""
return any([thread.name == "NetworkThread" for thread in threading.enumerate()])
def network_thread_join(timeout=10):
"""Wait timeout seconds for the network thread to terminate.
Throw if the network thread doesn't terminate in timeout seconds."""
network_threads = [thread for thread in threading.enumerate() if thread.name == "NetworkThread"]
assert len(network_threads) <= 1
for thread in network_threads:
thread.join(timeout)
assert not thread.is_alive()
|
{
"content_hash": "7da06ad1daaf3d332b0fdc5c65f2f92e",
"timestamp": "",
"source": "github",
"line_count": 436,
"max_line_length": 182,
"avg_line_length": 37.5,
"alnum_prop": 0.6181039755351682,
"repo_name": "ppcoin/ppcoin",
"id": "a6860ee17f07596d85bc9a93884a02f51a85584c",
"size": "16655",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/functional/test_framework/mininode.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "7948"
},
{
"name": "C++",
"bytes": "1432773"
},
{
"name": "Groff",
"bytes": "12841"
},
{
"name": "Makefile",
"bytes": "4292"
},
{
"name": "NSIS",
"bytes": "6134"
},
{
"name": "Objective-C++",
"bytes": "2463"
},
{
"name": "Python",
"bytes": "50532"
},
{
"name": "QMake",
"bytes": "10567"
},
{
"name": "Shell",
"bytes": "1849"
}
],
"symlink_target": ""
}
|
from setuptools import setup
version = '3.0'
setup(name='dopamine',
packages = ['dopamine'],
version=version,
description="A library to use DopamineLabs machine learning API",
long_description="""\
This packages provides a class for interacting with the DopamineAPI from a python project. After you have received your API key and configured the actions and reinforcements relevant to your app on the [Dopamine Developer Dashboard](dashboard.usedopamine.com), you may use this class to place 'tracking()', and 'reinforcement()' calls from inside your app that will communicate directly with the machine learning based DopamineAPI.""",
classifiers=[], # Get strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers
keywords='machinelearning analytics dopamine reinforcement behavior',
author='Akash Desai',
author_email='kash@usedopamine.com',
url='https://github.com/DopamineLabs/DopamineKit-Python-Client',
download_url='https://github.com/DopamineLabs/DopamineKit-Python-Client/tarball/'+version,
license='MIT',
include_package_data=True,
zip_safe=True,
install_requires=[
# -*- Extra requirements: -*-
],
entry_points="""
# -*- Entry points: -*-
""",
)
|
{
"content_hash": "2ec5eb9b282e68eafd2b1cf187106cc2",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 435,
"avg_line_length": 49.42307692307692,
"alnum_prop": 0.7011673151750972,
"repo_name": "DopamineLabs/DopamineAPI_Python-Client",
"id": "7d7df2a0421bdba82d8867fd7fe86d5e3ddc6ef2",
"size": "1285",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "9421"
}
],
"symlink_target": ""
}
|
from django.conf.urls import include, url
urlpatterns = [
url(r'^paginate_test/$', "utils.tests.pagination_test_func"),
]
|
{
"content_hash": "97d29194de2eecd233c19378fe000358",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 65,
"avg_line_length": 18.428571428571427,
"alnum_prop": 0.689922480620155,
"repo_name": "mcmdhr/CSOJ",
"id": "2388e32c75123a4e23e2715511b1076d9df5ab8b",
"size": "441",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "utils/test_urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "48167"
},
{
"name": "HTML",
"bytes": "173998"
},
{
"name": "JavaScript",
"bytes": "130711"
},
{
"name": "Python",
"bytes": "247487"
},
{
"name": "Shell",
"bytes": "540"
}
],
"symlink_target": ""
}
|
"""Febelfin Bank Transaction Code designations.
See :doc:`/specs/b2c`.
"""
from lino.api import _
DESCRIPTIONS = {
'0101': _("Individual transfer order"),
'0102': _("Individual transfer order initiated by the bank"),
'0103': _("Standing order"),
'0105': _("Payment of wages, etc."),
'0107': _("Collective transfer"),
'0113': _("Transfer from your account"),
'0117': _("Financial centralisation"),
'0137': _("Costs"),
'0139': _("Your issue circular cheque"),
'0140': _("Codes proper to each bank"),
'0149': _("Cancellation or correction"),
'0150': _("Transfer in your favour"),
'0151': _("Transfer in your favour - initiated by the bank"),
'0152': _("Payment in your favour"),
'0154': _("Unexecutable transfer order"),
'0160': _("Non-presented circular cheque"),
'0162': _("Unpaid postal order"),
'0164': _("Transfer to your account"),
'0166': _("Financial centralization"),
'0187': _("Reimbursement of costs"),
'0190': _("Codes proper to each bank"),
'0199': _("Cancellation or correction"),
'0301': _("Payment of your cheque"),
'0305': _("Payment of voucher"),
'0309': _("Unpaid voucher"),
'0311': _("Department store cheque"),
'0315': _("Your purchase bank cheque"),
'0317': _("Your certified cheque"),
'0337': _("Cheque-related costs"),
'0338': _("Provisionally unpaid"),
'0340': _("Codes proper to each bank"),
'0349': _("Cancellation or correction"),
'0352': _("First credit of cheques, vouchers, luncheon vouchers, "
"postal orders, credit under usual reserve"),
'0358': _("Remittance of cheques, vouchers, etc. credit after collection"),
'0360': _("Reversal of voucher"),
'0362': _("Reversal of cheque"),
'0363': _("Second credit of unpaid cheque"),
'0366': _("Remittance of cheque by your branch-credit "
"under usual reserve"),
'0387': _("Reimbursement of cheque-related costs"),
'0390': _("Codes proper to each bank"),
'0399': _("Cancellation or correction"),
'0401': _("Loading a GSM card"),
'0402': _("Payment by means of a payment card within the Eurozone"),
'0403': _("Settlement credit cards"),
'0404': _("Cash withdrawal from an ATM"),
'0405': _("Loading Proton"),
'0406': _("Payment with tank card"),
'0407': _("Payment by GSM"),
'0408': _("Payment by means of a payment card outside the Eurozone"),
'0437': _("Costs"),
'0440': _("Codes proper to each bank"),
'0449': _("Cancellation or correction"),
'0450': _("Credit after a payment at a terminal"),
'0451': _("Unloading Proton"),
'0452': _("Loading GSM cards"),
'0453': _("Cash deposit at an ATM"),
'0455': _("Income from payments by GSM"),
'0468': _("Credit after Proton payments"),
'0487': _("Reimbursement of costs"),
'0490': _("Codes proper to each bank"),
'0499': _("Cancellation or correction"),
'0501': _("Payment"),
'0503': _("Unpaid debt"),
'0505': _("Reimbursement"),
'0537': _("Costs"),
'0540': _("Codes proper to each institution"),
'0549': _("Cancellation or correction"),
'0550': _("Credit after collection"),
'0552': _("Credit under usual reserve"),
'0554': _("Reimbursement"),
'0556': _("Unexecutable reimbursement"),
'0558': _("Reversal"),
'0587': _("Reimbursement of costs"),
'0590': _("Codes proper to each bank"),
'0599': _("Cancellation or correction"),
'0701': _("Payment commercial paper"),
'0705': _("Commercial paper claimed back"),
'0706': _("Extension of maturity date"),
'0707': _("Unpaid commercial paper"),
'0708': _("Payment in advance"),
'0709': _("Agio on supplier's bill"),
'0737': _("Costs related to commercial paper"),
'0739': _("Return of an irregular bill of exchange"),
'0740': _("Codes proper to each bank"),
'0749': _("Cancellation or correction"),
'0750': _("Remittance of commercial paper-credit after collection"),
'0752': _("Remittance of commercial paper-credit under usual reserve"),
'0754': _("Remittance of commercial paper for discount"),
'0756': _("Remittance of supplier's bill with guarantee"),
'0758': _("Remittance of supplier's bill without guarantee"),
'0787': _("Reimbursement of costs"),
'0790': _("Codes proper to each bank"),
'0799': _("Cancellation or correction"),
'0901': _("Cash withdrawal"),
'0905': _("Purchase of foreign bank notes"),
'0907': _("Purchase of gold/pieces"),
'0909': _("Purchase of petrol coupons"),
'0913': _("Cash withdrawal by your branch or agents"),
'0917': _("Purchase of fiscal stamps"),
'0919': _("Difference in payment"),
'0925': _("Purchase of traveller's cheque"),
'0937': _("Costs"),
'0940': _("Codes proper to each bank"),
'0949': _("Cancellation or correction"),
'0950': _("Cash payment"),
'0952': _("Payment night safe"),
'0958': _("Payment by your branch/agents"),
'0960': _("Sale of foreign bank notes"),
'0962': _("Sale of gold/pieces under usual reserve"),
'0968': _("Difference in payment"),
'0970': _("Sale of traveller's cheque"),
'0987': _("Reimbursement of costs"),
'0990': _("Codes proper to each bank"),
'0999': _("Cancellation or correction"),
'1101': _("Purchase of securities"),
'1102': _("Tenders"),
'1103': _("Subscription to securities"),
'1104': _("Issues"),
'1105': _("Partial payment subscription"),
'1106': _("Share option plan -- exercising an option"),
'1109': _("Settlement of securities"),
'1111': _("Payable coupons/repayable securities"),
'1113': _("Your repurchase of issue"),
'1115': _("Interim interest on subscription"),
'1117': _("Management fee"),
'1119': _("Regularisation costs"),
'1137': _("Costs"),
'1140': _("Codes proper to each bank"),
'1149': _("Cancellation or correction"),
'1150': _("Sale of securities"),
'1151': _("Tender"),
'1152': _("Payment of coupons from a deposit or settlement of coupons "
"delivered over the counter - credit under usual reserve"),
'1158': _("Repayable securities from a deposit or delivered at the "
"counter -- credit under usual reserve"),
'1162': _("Interim interest on subscription When reimbursed separately "
"to the subscriber"),
'1164': _("Your issue"),
'1166': _("Retrocession of issue commission"),
'1168': _("Compensation for missing coupon"),
'1170': _("Settlement of securities"),
'1187': _("Reimbursement of costs"),
'1190': _("Codes proper to each bank"),
'1199': _("Cancellation or correction"),
'1301': _("Short-term loan"),
'1302': _("Long-term loan"),
'1305': _("Settlement of fixed advance"),
'1307': _("Your repayment instalment credits"),
'1311': _("Your repayment mortgage loan"),
'1313': _("Settlement of bank acceptances"),
'1315': _("Your repayment hire-purchase and similar claims"),
'1319': _("Documentary import credits"),
'1321': _("Other credit applications"),
'1337': _("Credit-related costs"),
'1340': _("Codes proper to each bank"),
'1349': _("Cancellation or correction"),
'1350': _("Settlement of instalment credit"),
'1354': _("Fixed advance -- capital and interest"),
'1355': _("Fixed advance -- interest only"),
'1356': _("Subsidy"),
'1360': _("Settlement of mortgage loan"),
'1362': _("Term loan"),
'1368': _("Documentary export credits"),
'1370': _("Settlement of discount bank acceptance"),
'1387': _("Reimbursement of costs"),
'1390': _("Codes proper to each bank"),
'1399': _("Cancellation or correction"),
'3001': _("Spot purchase of foreign exchange"),
'3003': _("Forward purchase of foreign exchange"),
'3005': _("Capital and/or interest term investment"),
'3033': _("Value (date) correction"),
'3037': _("Costs"),
'3039': _("Undefined transaction"),
'3040': _("Codes proper to each bank"),
'3049': _("Cancellation or correction"),
'3050': _("Spot sale of foreign exchange"),
'3052': _("Forward sale of foreign exchange"),
'3054': _("Capital and/or interest term investment"),
'3055': _("Interest term investment"),
'3083': _("Value (date) correction"),
'3087': _("Reimbursement of costs"),
'3089': _("Undefined transaction"),
'3090': _("Codes proper to each bank"),
'3099': _("Cancellation or correction"),
'3501': _("Closing"),
'3537': _("Costs"),
'3540': _("Codes proper to each bank"),
'3549': _("Cancellation or correction"),
'3550': _("Closing"),
'3587': _("Reimbursement of costs"),
'3590': _("Codes proper to each bank"),
'3599': _("Cancellation or correction"),
'4101': _("Transfer"),
'4103': _("Standing order"),
'4105': _("Collective payments of wages"),
'4107': _("Collective transfers"),
'4113': _("Transfer from your account"),
'4117': _("Financial centralisation (debit)"),
'4137': _("Costs relating to outgoing foreign transfers and "
"non-SEPA transfers"),
'4138': _("Costs relating to incoming foreign and non-SEPA transfers"),
'4140': _("Codes proper to each bank"),
'4149': _("Cancellation or correction"),
'4150': _("Transfer"),
'4164': _("Transfer to your account"),
'4166': _("Financial centralisation (credit)"),
'4187': _("Reimbursement of costs"),
'4190': _("Codes proper to each bank"),
'4199': _("Cancellation or correction"),
'4301': _("Payment of a foreign cheque"),
'4307': _("Unpaid foreign cheque"),
'4315': _("Purchase of an international bank cheque"),
'4337': _("Costs relating to payment of foreign cheques"),
'4340': _("Codes proper to each bank"),
'4349': _("Cancellation or correction"),
'4352': _("Remittance of foreign cheque credit under usual reserve"),
'4358': _("Remittance of foreign cheque credit after collection"),
'4362': _("Reversal of cheques"),
'4387': _("Reimbursement of costs"),
'4390': _("Codes proper to each bank"),
'4399': _("Cancellation or correction"),
'4701': _("Payment of foreign bill"),
'4705': _("Bill claimed back"),
'4706': _("Extension"),
'4707': _("Unpaid foreign bill"),
'4711': _("Payment documents abroad"),
'4713': _("Discount foreign supplier's bills"),
'4714': _("Warrant fallen due"),
'4737': _("Costs relating to the payment of a foreign bill"),
'4740': _("Codes proper to each bank"),
'4749': _("Cancellation or correction"),
'4750': _("Remittance of foreign bill credit after collection"),
'4752': _("Remittance of foreign bill credit under usual reserve"),
'4754': _("Discount abroad"),
'4756': _("Remittance of guaranteed foreign supplier's bill"),
'4758': _("Idem without guarantee"),
'4760': _("Remittance of documents abroad - credit under usual reserve"),
'4762': _("Remittance of documents abroad - credit after collection"),
'4764': _("Warrant"),
'4787': _("Reimbursement of costs"),
'4790': _("Codes proper to each bank"),
'4799': _("Cancellation or correction"),
'8002': _("Costs relating to electronic output"),
'8004': _("Costs for holding a documentary cash credit"),
'8006': _("Damage relating to bills and cheques"),
'8007': _("Insurance costs"),
'8008': _("Registering compensation for savings accounts"),
'8009': _("Postage"),
'8010': _("Purchase of Smartcard"),
'8011': _("Fees and commissions charged separately"),
'8012': _("Costs for opening a bank guarantee"),
'8013': _("Renting of safes"),
'8014': _("Handling costs instalment credit"),
'8015': _("Night safe"),
'8016': _("Bank confirmation to revisor or accountant"),
'8017': _("Charge for safe custody"),
'8018': _("Trade information"),
'8019': _("Special charge for safe custody"),
'8020': _("Drawing up a certificate"),
'8021': _("Pay-packet charges"),
'8022': _("Management/custody"),
'8023': _("Research costs"),
'8024': _("Participation in and management of interest refund system"),
'8025': _("Renting of direct debit box"),
'8026': _("Travel insurance premium"),
'8027': _("Subscription fee"),
'8029': _("Information charges"),
'8031': _("Writ service fee"),
'8033': _("Miscellaneous fees and commissions"),
'8035': _("Costs"),
'8037': _("Access right to database"),
'8039': _("Surety fee"),
'8041': _("Research costs"),
'8043': _("Printing of forms"),
'8045': _("Documentary credit charges"),
'8047': _("Charging fees for transactions"),
'8049': _("Cancellation or correction"),
'8099': _("Cancellation or correction"),
}
def code2desc(c):
"""Return the description of the given code."""
return DESCRIPTIONS.get(c, c)
|
{
"content_hash": "87848882a37de4f37b5c365b79acaed3",
"timestamp": "",
"source": "github",
"line_count": 295,
"max_line_length": 79,
"avg_line_length": 43.6135593220339,
"alnum_prop": 0.6004974351002642,
"repo_name": "lino-framework/xl",
"id": "632b57b30e8974bd54c0f1930b8eb24732fd35ca",
"size": "13003",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lino_xl/lib/b2c/febelfin.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "186625"
},
{
"name": "HTML",
"bytes": "1417287"
},
{
"name": "JavaScript",
"bytes": "1630929"
},
{
"name": "PHP",
"bytes": "40437"
},
{
"name": "Python",
"bytes": "2395471"
}
],
"symlink_target": ""
}
|
from itertools import groupby
from . import SimConcretizationStrategy
class SimConcretizationStrategyControlledData(SimConcretizationStrategy):
"""
Concretization strategy that constraints the address to controlled data.
Controlled data consists of symbolic data and the addresses given as arguments.
memory.
"""
def __init__(self, limit, fixed_addrs, **kwargs):
super(SimConcretizationStrategyControlledData, self).__init__(**kwargs)
self._limit = limit
self._fixed_addrs = fixed_addrs
def _concretize(self, memory, addr):
# Get all symbolic variables in memory
symbolic_vars = filter(lambda key: not key.startswith("reg_") and not key.startswith("mem_"), memory.state.memory.mem._name_mapping.keys())
controlled_addrs = sorted([_addr for s_var in symbolic_vars for _addr in memory.addrs_for_name(s_var)])
controlled_addrs.extend(self._fixed_addrs)
# Represent controlled addresses in adjacent memory areas as "base+offset"
base_length_array = [(controlled_addrs[0], 0)]
for i in xrange(1, len(controlled_addrs)):
if controlled_addrs[i - 1] + 1 == controlled_addrs[i]:
base = base_length_array[i-1][0]
else:
base = controlled_addrs[i]
base_length_array.append((base, controlled_addrs[i] - base))
# create intervals from memory areas
intervals = map(lambda t: (t[0], len(list(t[1]))), groupby(base_length_array, key=lambda t: t[0]))
constraints = []
# create constraints from intervals
for base, length in intervals:
constraints.append(memory.state.se.And(addr >= base, addr < base+length))
# try to get solutions for controlled memory
ored_constraints = memory.state.se.Or(*constraints)
solutions = self._eval(memory, addr, self._limit, extra_constraints=(ored_constraints,))
if not solutions:
solutions = None
return solutions
|
{
"content_hash": "e5548b1595db0403116d17a544635ab6",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 147,
"avg_line_length": 43.84782608695652,
"alnum_prop": 0.6534457114526524,
"repo_name": "f-prettyland/angr",
"id": "62a6464082c7a642028bad4976e458728864d396",
"size": "2017",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "angr/concretization_strategies/controlled_data.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "6375"
},
{
"name": "C++",
"bytes": "39375"
},
{
"name": "Makefile",
"bytes": "557"
},
{
"name": "Python",
"bytes": "2934645"
}
],
"symlink_target": ""
}
|
"""Announcement SMTP based library.
The announcement SMTP based library with a simplified command line startup.
MIT licensed, Copyright (c) 2015
@author Oliver Merkel, Merkel(dot)Oliver(at)web(dot)de.
All rights reserved.
"""
from announcementconfig import *
from smtplib import SMTP
from string import Template
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.mime.base import MIMEBase
from email.utils import formataddr
from email.utils import COMMASPACE
from base64 import encodebytes
import os
import getpass
import sys
from email.base64mime import body_encode as encode_base64
class Announcement:
FROM = 'From'
TO = 'To'
CC = 'CC'
REPLYTO = 'Reply-To'
SUBJECT = 'Subject'
BODY = 'Body'
SIGNATURE = 'Signature'
FRIENDLYNAME = 'Friendly Name'
PATH = 'path'
MAIL = 'Mail'
ATTACHMENTS = 'attachments'
user = None
mailType = None
def __init__(self, user, mailType):
self.setUser(user)
self.setMailType(mailType)
def setUser(self, user):
self.user = user
self.userData = users[user]
self.update()
def setMailType(self, mailType):
self.mailType = mailType
self.update()
def update(self):
if self.user and self.mailType:
self.renderHeader()
self.renderBody()
self.renderMessage()
def getShortAddressList(self):
return self.userData[self.MAIL]
def getFullAddressList(self):
return [ '"%s" <%s>' % (self.userData[self.FRIENDLYNAME], x) \
for x in self.getShortAddressList() ]
def renderBody(self):
signature = '\n'.join(self.userData[self.SIGNATURE])
body = '\n'.join(mail[self.mailType][self.BODY])
body = Template(body).safe_substitute(current)
self.body = Template(body).safe_substitute(signature=signature)
def getSubject(self):
return Template(mail[self.mailType][self.SUBJECT]).safe_substitute(current)
def renderHeader(self):
result = {}
result[self.SUBJECT] = self.getSubject()
result[self.TO] = mail[self.mailType][self.TO]
result[self.CC] = mail[self.mailType][self.CC]
self.header = result
def renderMessage(self):
msg = MIMEMultipart('alternative')
msg[self.SUBJECT] = self.header[self.SUBJECT]
msg[self.FROM] = self.getFullAddressList()[0]
msg[self.TO] = COMMASPACE.join(self.header[self.TO])
msg[self.CC] = COMMASPACE.join(self.header[self.CC])
msg[self.REPLYTO] = common['replymailaddr']
partHtml = MIMEText(self.body, 'html')
msg.attach(partHtml)
self.message = msg
def sendMail( self, password ):
with SMTP( common['mailserver'], 587 ) as smtp:
smtp.set_debuglevel(1)
smtp.ehlo()
smtp.starttls()
smtp.ehlo()
if not (hex(sys.hexversion) == '0x30500f0'):
smtp.login( self.getShortAddressList()[0], password )
else:
# Application side workaround for http://bugs.python.org/issue25446
# smtp.login using AUTH LOGIN mechanism is broken in released Python 3.5.0
#
# prefer AUTH LOGIN over other mechanism if available
if not "LOGIN" in smtp.esmtp_features["auth"].split():
# best effort approach: allow any other mechanism
smtp.login( self.getShortAddressList()[0], password )
else:
(code, resp) = smtp.docmd("AUTH", "LOGIN " +
encode_base64(self.getShortAddressList()[0].encode('ascii'), eol=''))
if not code == 334:
raise SMTPException("Authentication not possible with this login.")
(code, resp) = smtp.docmd(encode_base64(password.encode('ascii'), eol=''))
# 235 : 'Authentication successful'
# 503 : 'Error: already authenticated'
if not code in (235, 503):
raise SMTPException("Authentication unsuccessful.")
smtp.sendmail( self.message[self.FROM],
self.header[self.TO] + self.header[self.CC], self.message.as_string() )
def sendTestMail(self):
to = COMMASPACE.join( self.userData[self.MAIL] )
with SMTP( common['mailserver'] ) as smtp:
smtp.sendmail( self.message[self.FROM], to, self.message.as_string() )
def getAttachmentKey( self ):
return mail[self.mailType][self.ATTACHMENTS] if self.ATTACHMENTS in mail[self.mailType].keys() else None
def attachmentsMissing( self ):
result = False
key = self.getAttachmentKey()
if not key is None:
for filePath in current[key]:
fileName = filePath.split('\\')[-1]
if not os.path.exists(filePath) or not os.path.isfile(filePath):
print('Error: Missing %s ( %s )' % (fileName, filePath))
result = True
return result
def attach(self):
result = True
key = self.getAttachmentKey()
if not key is None:
for filePath in current[key]:
fileName = filePath.split('\\')[-1]
if os.path.exists(filePath) and os.path.isfile(filePath):
fh = open(filePath,'rb')
part = MIMEBase('application', "octet-stream")
part.set_payload(encodebytes(fh.read()).decode())
fh.close()
part.add_header('Content-Transfer-Encoding', 'base64')
part.add_header('Content-Disposition', 'attachment; filename="%s"' % fileName)
self.message.attach(part)
else:
print('Error: Missing %s ( %s )' % (fileName, filePath))
result = False
return result
if '__main__' == __name__:
import argparse
userList = sorted(list(users.keys()))
mailTypeList = sorted(list(mail.keys()))
parser = argparse.ArgumentParser(description='Announcement Mailer.')
parser.add_argument('-u', '--user', nargs=1, type=str,
choices=userList, default=[userList[0]],
help='User to send the announcement. ' +
'Default is to use ' + userList[0])
parser.add_argument('-t', '--type', nargs=1, type=str,
choices=mailTypeList, default=[mailTypeList[0]],
help='Mail type of announcement. ' +
('Default is to use "%s"' % mailTypeList[0]))
parser.add_argument('-s', '--send', nargs=1, type=str,
choices=['console', 'test', 'serious'],
default=['console'], help="""How and where to send output.
"console" shows the mail on console only.
"test" fakes mail by sending to own mail address only.
"serious" sends out real mail.
Default is to use console""" )
args = parser.parse_args()
user = args.user[0]
sendOutput = args.send[0]
mailType = args.type[0]
announcement = Announcement(user, mailType)
if 'console' == sendOutput:
print(announcement.message.as_string())
else:
if announcement.attach():
if 'test' == sendOutput:
announcement.sendTestMail()
else:
password = getpass.getpass('Hello %s. Please enter your password: ' % user).strip()
announcement.sendMail(password)
else:
print("Error: could not include attachments")
|
{
"content_hash": "c0143626c7e910cbe702df6e0643c908",
"timestamp": "",
"source": "github",
"line_count": 197,
"max_line_length": 108,
"avg_line_length": 34.69035532994924,
"alnum_prop": 0.6552531460345332,
"repo_name": "OMerkel/Announcement",
"id": "25ce4d955f0989c7075e4d6ccb11da030ab99a6e",
"size": "6834",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "announcement.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "19074"
}
],
"symlink_target": ""
}
|
from django.contrib import admin
from tools.models import Tool, ToolCategory
# Register your models here.
class ToolAdmin(admin.ModelAdmin):
model = Tool
ordering = ('status',)
list_display = ('title', 'status')
admin.site.register(Tool, ToolAdmin)
# Register your models here.
class ToolCategoriesAdmin(admin.ModelAdmin):
pass
admin.site.register(ToolCategory, ToolCategoriesAdmin)
|
{
"content_hash": "b8fdb2d3476835016fae628efb8ba2df",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 54,
"avg_line_length": 24.6875,
"alnum_prop": 0.769620253164557,
"repo_name": "ethdeveloper/ethdeveloper",
"id": "8f1f56246fcc599df93b3073e590d5bef56f2d3c",
"size": "395",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tools/admin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "27338"
},
{
"name": "HTML",
"bytes": "10600"
},
{
"name": "JavaScript",
"bytes": "8832"
},
{
"name": "Python",
"bytes": "10375"
}
],
"symlink_target": ""
}
|
'''
'''
from AWSIoTPythonSDK.MQTTLib import AWSIoTMQTTClient
import logging
import time
import argparse
# Custom MQTT message callback
def customCallback(client, userdata, message):
print("Received a new message: ")
print(message.payload)
print("from topic: ")
print(message.topic)
print("--------------\n\n")
def pubSubGetClient():
# Read in command-line parameters
parser = argparse.ArgumentParser()
parser.add_argument("-e", "--endpoint", action="store", required=True, dest="host", help="Your AWS IoT custom endpoint")
parser.add_argument("-r", "--rootCA", action="store", required=True, dest="rootCAPath", help="Root CA file path")
parser.add_argument("-c", "--cert", action="store", dest="certificatePath", help="Certificate file path")
parser.add_argument("-k", "--key", action="store", dest="privateKeyPath", help="Private key file path")
parser.add_argument("-w", "--websocket", action="store_true", dest="useWebsocket", default=False,
help="Use MQTT over WebSocket")
parser.add_argument("-id", "--clientId", action="store", dest="clientId", default="basicPubSub",
help="Targeted client id")
parser.add_argument("-t", "--topic", action="store", dest="topic", default="sdk/test/Python", help="Targeted topic")
parser.add_argument("-d", "--deviceid", action="store", required=True, dest="deviceid", help="device id in duplocloud")
args = parser.parse_args()
host = args.host
rootCAPath = args.rootCAPath
certificatePath = args.certificatePath
privateKeyPath = args.privateKeyPath
useWebsocket = args.useWebsocket
clientId = args.clientId
topic = args.topic
deviceid = args.deviceid
if args.useWebsocket and args.certificatePath and args.privateKeyPath:
parser.error("X.509 cert authentication and WebSocket are mutual exclusive. Please pick one.")
exit(2)
if not args.useWebsocket and (not args.certificatePath or not args.privateKeyPath):
parser.error("Missing credentials for authentication.")
exit(2)
# Configure logging
logger = logging.getLogger("AWSIoTPythonSDK.core")
logger.setLevel(logging.WARNING)
streamHandler = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
streamHandler.setFormatter(formatter)
logger.addHandler(streamHandler)
lAWSIoTMQTTClient = None
if useWebsocket:
lAWSIoTMQTTClient = AWSIoTMQTTClient(clientId, useWebsocket=True)
lAWSIoTMQTTClient.configureEndpoint(host, 443)
lAWSIoTMQTTClient.configureCredentials(rootCAPath)
else:
lAWSIoTMQTTClient = AWSIoTMQTTClient(clientId)
lAWSIoTMQTTClient.configureEndpoint(host, 8883)
lAWSIoTMQTTClient.configureCredentials(rootCAPath, privateKeyPath, certificatePath)
# AWSIoTMQTTClient connection configuration
lAWSIoTMQTTClient.configureAutoReconnectBackoffTime(1, 32, 20)
lAWSIoTMQTTClient.configureOfflinePublishQueueing(-1) # Infinite offline Publish queueing
lAWSIoTMQTTClient.configureDrainingFrequency(2) # Draining: 2 Hz
lAWSIoTMQTTClient.configureConnectDisconnectTimeout(30) # 10 sec
lAWSIoTMQTTClient.configureMQTTOperationTimeout(30) # 5 sec
return lAWSIoTMQTTClient, topic, deviceid
def startPubSub(aInAWSIoTMQTTClient, aInTopic):
# Connect and subscribe to AWS IoT
aInAWSIoTMQTTClient.connect()
#aInAWSIoTMQTTClient.subscribe(aInTopic, 1, customCallback)
time.sleep(10)
# Publish to the same topic in a loop forever
loopCount = 0
while True:
aInAWSIoTMQTTClient.publish(aInTopic, "New Message ", 1)
loopCount += 1
time.sleep(5)
|
{
"content_hash": "cf8b50eb3721325da86b583623cf4ab3",
"timestamp": "",
"source": "github",
"line_count": 91,
"max_line_length": 124,
"avg_line_length": 41.32967032967033,
"alnum_prop": 0.7053975006647168,
"repo_name": "duplocloud/duploiotagent",
"id": "070679727f72b5a831f7dc04d5eaf8c035f8cd7e",
"size": "4345",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "duplo/basicPubSub.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "283667"
}
],
"symlink_target": ""
}
|
from abc import abstractmethod
import codecs
import os
import time
from typing import Dict, Generator, List, Optional, Tuple, Union
import click
from lxml import etree
from perfectextractor.apps.core.base import BaseWorker
from .models import Alignment, MultiWordExpression
from .utils import TXT, XML, CSV, open_csv, open_xlsx
LEMMATA_CONFIG = os.path.join(os.path.dirname(__file__), 'config/{language}_lemmata.txt')
class BaseExtractor(BaseWorker):
def __init__(self,
language_from: str,
languages_to: Optional[List[str]] = None,
file_names: Optional[List[str]] = None,
sentence_ids: Optional[List[str]] = None,
lemmata: Optional[Union[Tuple[str], List[str], bool]] = None,
tokens: Optional[List[Tuple[str, str]]] = None,
metadata: Optional[List[Tuple[str, str]]] = None,
regex: Optional[List[str]] = None,
outfile: Optional[str] = None,
position: Optional[int] = None,
output: str = TXT,
format_: str = CSV,
one_per_sentence: bool = False,
sort_by_certainty: bool = False,
no_order_languages: bool = False,
file_limit: int = 0,
min_file_size: int = 0,
max_file_size: int = 0) -> None:
"""
Initializes the extractor for the given source and target language(s).
:param language_from: the source language
:param languages_to: the target language(s)
:param file_names: whether to limit the search to certain file names
:param sentence_ids: whether to limit the search to certain sentence IDs
:param lemmata: whether to limit the search to certain lemmata (can be provided as a boolean or a list)
:param tokens: whether to limit the search to certain tokens (list of tuples (from-to))
:param metadata: whether to add metadata to the output (list of tuples (metadata-level))
:param regex: whether to limit the search to certain regular expressions (list of regexes)
:param outfile: the filename to output the results to
:param position: whether to limit the search to a certain position (e.g. only sentence-initial)
:param output: whether to output the results in text or XML format
:param format_: whether to output the file as .csv or .xlsx
:param one_per_sentence: whether to output all lines, and allow one classification per sentence
:param sort_by_certainty: whether to sort the files by average alignment certainty
:param no_order_languages: whether to order the languages on alignment
:param file_limit: whether to limit the number of files searched in
:param min_file_size: whether to only use files larger (or equal) than a certain size
:param max_file_size: whether to only use files smaller (or equal) than a certain size
"""
super().__init__(language_from, outfile, format_)
self.l_to = languages_to or []
self.file_names = file_names
self.sentence_ids = sentence_ids
self.tokens: Optional[Dict[str, str]] = dict(tokens) if tokens else None
self.metadata: Dict[str, str] = dict(metadata) if metadata else {}
self.regex = regex
self.position = position
self.output = output
self.one_per_sentence = one_per_sentence
self.sort_by_certainty = sort_by_certainty
self.no_order_languages = no_order_languages
self.file_limit = file_limit
self.min_file_size = min_file_size
self.max_file_size = max_file_size
# Read in the lemmata list (if provided)
self.lemmata_list: List[str] = []
self.read_lemmata(lemmata)
# Other variables
self.other_extractors: List[BaseExtractor] = []
self.alignment_xmls: Dict[str, str] = dict()
self._index: Dict[str, etree._Element] = dict() # save segments indexed by id
def read_lemmata(self, lemmata: Optional[Union[Tuple[str], List[str], bool]]) -> None:
"""
Gathers the lemmata to be filtered upon.
"""
if lemmata is not None:
if type(lemmata) in (list, tuple):
self.lemmata_list = list(lemmata)
elif type(lemmata) == bool:
if lemmata:
with codecs.open(LEMMATA_CONFIG.format(language=self.l_from), 'r', 'utf-8') as lexicon:
self.lemmata_list = lexicon.read().split()
else:
raise ValueError('Unknown value for lemmata')
def check_language_in_config(self, language: str) -> None:
"""
Checks whether there is an implementation available for the given language.
"""
if language not in self.config.sections():
msg = 'No implementation for {} for language {}'.format(self.__class__.__name__, language)
raise click.ClickException(msg)
def process_folder(self, dir_name: str, progress_cb=None, done_cb=None) -> None:
"""
Creates a result file and processes each file in a folder.
"""
file_names = self.collect_file_names(dir_name)
progress_total = len(file_names)
result_file = self.outfile or '-'.join([dir_name, self.l_from]) + '.' + self.format_
opener = open_csv if self.format_ == CSV else open_xlsx
with opener(result_file) as writer:
header = self.generate_header()
writer.writerow(header) if self.format_ == CSV else writer.writerow(header, is_header=True)
for i, part in enumerate(self.generate_results(dir_name, file_names)):
writer.writerows(part)
if progress_cb:
progress_cb(i + 1, progress_total)
if done_cb:
done_cb(result_file)
def collect_file_names(self, dir_name: str) -> List[str]:
"""
Collects the file names in a given directory and (potentially) filters these based on file size,
alignment certainty or a limited number of files.
:param dir_name: The current directory
:return: A list of files to consider.
"""
click.echo('Collecting file names...')
if self.file_names:
file_names = [os.path.join(dir_name, f) for f in self.file_names]
else:
file_names = self.list_filenames(dir_name)
if self.min_file_size or self.max_file_size:
file_names = self.filter_by_file_size(file_names)
if self.sort_by_certainty:
file_names = self.sort_by_alignment_certainty(file_names)
if self.file_limit:
file_names = file_names[:self.file_limit]
click.echo('Finished collecting file names, starting processing...')
return file_names
def generate_results(self, dir_name: str, file_names: List[str] = None) -> Generator[List[str], List[str], None]:
"""
Generates the results for a directory or a set of files.
"""
if file_names is None:
file_names = self.collect_file_names(dir_name)
for f in file_names:
yield self.process_file(f)
def process_file(self, filename: str) -> List[str]:
"""
Processes a single file.
"""
t0 = time.time()
click.echo('Now processing {}...'.format(filename))
# Parse the current tree (create a iterator over 's' elements)
s_trees = etree.iterparse(filename, tag=self.sentence_tag)
# Filter the sentence trees
s_trees = self.filter_sentences(s_trees)
# Parse the alignment and translation trees
alignment_trees, translation_trees = self.parse_alignment_trees(filename)
t1 = time.time()
click.echo('Finished parsing trees, took {:.3} seconds'.format(t1 - t0))
# Fetch the results
results = self.fetch_results(filename, s_trees, alignment_trees, translation_trees)
click.echo('Finished fetching results, took {:.3} seconds'.format(time.time() - t1))
# Free index memory
self._index = dict()
return results
def filter_sentences(self, s_trees):
"""
Filters the sentences based on the provided sentence_ids.
"""
if self.sentence_ids:
# TODO: preferably, this should also return an iterparse instead of a list
result = []
for event, s in s_trees:
if s.get(self.config.get('all', 'id')) in self.sentence_ids:
result.append((event, s))
return result
else:
return s_trees
@property
def sentence_tag(self) -> str:
"""
The XML tag used for sentences.
"""
return 's'
@property
def word_tag(self) -> str:
"""
The XML tag used for words.
"""
return 'w'
def generate_header(self) -> List[str]:
"""
Returns the header for the output file.
"""
header = [
'document',
'sentence',
'type {}'.format(self.l_from),
'words {}'.format(self.l_from),
'ids {}'.format(self.l_from),
self.l_from]
for metadata in self.metadata.keys():
header.append(metadata)
for language in self.l_to:
header.append('alignment type')
header.append(language)
return header
def generate_result_line(self,
filename: str,
sentence: etree._Element,
mwe: MultiWordExpression = None) -> List[Optional[str]]:
"""
Returns a single result line
:param filename: The current filename
:param sentence: The current sentence (in XML format)
:param mwe: The found MultiWordExpression
:return: A list of output properties.
"""
result: List[Optional[str]] = list()
result.append(os.path.basename(filename))
result.append(self.get_id(sentence))
if mwe:
result.append(self.get_type(sentence, mwe=mwe))
result.append(mwe.construction_to_string())
result.append(mwe.construction_ids())
if self.output == XML:
result.append('<root>' + str(etree.tostring(sentence, encoding=str)) + '</root>')
else:
result.append(mwe.mark_sentence())
self.append_metadata(sentence, result)
else:
result.append('')
result.append('')
result.append('')
if self.output == XML:
result.append('<root>' + str(etree.tostring(sentence, encoding=str)) + '</root>')
else:
result.append(self.mark_sentence(sentence))
self.append_metadata(sentence, result)
return result
def append_metadata(self,
s: Optional[etree._Element],
result: List[Optional[str]]) -> None:
"""
Appends metadata for to a result line.
"""
for metadata, level in self.metadata.items():
if s is not None and level == 's':
result.append(s.get(metadata))
elif s is not None and level == 'p':
result.append(s.getparent().get(metadata))
elif s is not None and level == 'text':
result.append(s.getparent().getparent().get(metadata))
else:
raise ValueError('Invalid level {}'.format(level))
def add_extractor(self, extractor: 'BaseExtractor') -> None:
"""
Adds another Extractor to this Extractor. This allows to combine Extractors.
The last added Extractor determines the output.
"""
self.other_extractors.append(extractor)
def languages_ordered(self, language_from: str, language_to: str) -> List[str]:
return [language_from, language_to] if self.no_order_languages else sorted([language_from, language_to])
def get_tenses(self, sentence):
"""
This method allows to retrieve the English "tense" for a complete sentence. It is very naive,
based upon the part-of-speech tags of verbs that appear in the sentence.
It should work for the tagsets of both the Penn Treebank Project and the BNC.
See https://www.cis.uni-muenchen.de/~schmid/tools/TreeTagger/data/Penn-Treebank-Tagset.pdf for the
Penn Treebank Project tagset and see http://www.natcorp.ox.ac.uk/docs/URG/posguide.html#section1
for the BNC tagset
:param sentence: the s element
:return: a tuple of the assigned tense and all tenses for the verbs in the sentences
"""
tense = 'none'
tenses = []
for w in sentence.xpath('.//w'):
pos = self.get_pos(self.l_from, w)
if pos.startswith('V') and len(pos) == 3:
if pos.endswith('B') or pos.endswith('P') or pos.endswith('Z'):
tenses.append('present')
elif pos.endswith('D'):
tenses.append('past')
elif pos.endswith('N'):
tenses.append('participle')
elif pos.endswith('G'):
tenses.append('gerund')
elif pos.endswith('I'):
tenses.append('infinitive')
elif pos == 'VM0':
tenses.append('modal')
elif pos == 'MD':
tenses.append('modal')
elif pos == 'BES':
tenses.append('present')
elif pos == 'VB':
tenses.append('infinitive')
if tenses:
tenses_set = set(tenses)
if len(tenses_set) == 1:
tense = tenses[0]
else:
if tenses_set in [{'present', 'infinitive'}, {'present', 'gerund'}, {'present', 'gerund', 'infinitive'}]:
tense = 'present'
elif tenses_set in [{'past', 'infinitive'}, {'past', 'gerund'}, {'past', 'gerund', 'infinitive'}]:
tense = 'past'
elif tenses_set == {'modal', 'infinitive'}:
tense = 'modal'
else:
tense = 'other'
return tense, tenses
@abstractmethod
def get_config(self) -> Union[str, List[str]]:
"""
Returns the location of the configuration file, potentially multiple
"""
pass
@abstractmethod
def fetch_results(self,
filename: str,
s_trees: etree.iterparse,
alignment_trees: Dict[str, List[Alignment]],
translation_trees: Dict[str, etree._ElementTree]) -> List[str]:
"""
Fetches the results for a single file.
"""
pass
@abstractmethod
def parse_alignment_trees(self, filename: str) -> Tuple[Dict[str, List[Alignment]],
Dict[str, etree._ElementTree]]:
"""
Parses the alignment trees for a single file.
"""
pass
@abstractmethod
def list_filenames(self, dir_name: str) -> List[str]:
"""
List all to be processed files in the given directory.
"""
pass
@abstractmethod
def get_translated_lines(self,
alignment_trees: Dict[str, List[Alignment]],
language_from: str,
language_to: str,
segment_number: str) -> List[str]:
"""
Returns the translated segment numbers (could be multiple) for a segment number in the original text.
"""
pass
@abstractmethod
def get_sentence(self, element: etree._Element) -> etree._Element:
"""
Returns the full sentence XML for the given element.
"""
pass
@abstractmethod
def get_siblings(self,
element: etree._Element,
sentence_id: Optional[str],
check_preceding: bool) -> List[etree._Element]:
"""
Returns the siblings of the given element in the given sentence_id.
The check_preceding parameter allows to look either forwards or backwards.
"""
pass
@abstractmethod
def sort_by_alignment_certainty(self, file_names: List[str]) -> List[str]:
"""
Sort files by their probability of having a correct sentence alignment.
"""
pass
@abstractmethod
def filter_by_file_size(self, file_names: List[str]) -> List[str]:
"""
Filter files based on file size, a minimum and maximum file size can be supplied.
"""
pass
@abstractmethod
def get_type(self, sentence: etree._Element, mwe: Optional[MultiWordExpression] = None) -> str:
"""
Return a classification for the sentence or the found MultiWordExpression.
"""
pass
@abstractmethod
def mark_sentence(self, sentence: etree._Element, match: Optional[str] = None) -> str:
"""
Mark the found match (if any) in the sentence.
"""
pass
|
{
"content_hash": "01b08d2e8846e6aadecf18a7aa2ef034",
"timestamp": "",
"source": "github",
"line_count": 440,
"max_line_length": 121,
"avg_line_length": 39.56136363636364,
"alnum_prop": 0.5685643706554834,
"repo_name": "UUDigitalHumanitieslab/time-in-translation",
"id": "79674ac44f49d6fd7be967e74356df2363f2c848",
"size": "17407",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "perfectextractor/apps/extractor/base.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "43919"
}
],
"symlink_target": ""
}
|
"""Query and aggregate data from log files using SQL-like syntax"""
import sys
import argparse
import os
import re
import ast
import readline
import atexit
import time
import inspect
from multiprocessing import cpu_count
try:
from collections import OrderedDict
except ImportError:
# python < 2.7 compatability
from compat.OrderedDict import OrderedDict
from ply import yacc
import parser
import parallel
import screen
import sqlfuncs
import logformat
from util import NoTokenError, parse_format_string, Complete, Table, pretty_print
DEBUG = False
log_regex = None
class LogQuery(object):
def __init__(self, data, query):
self.data = data
self.query = query
try:
self.ast = parser.parse(query)
except NoTokenError, e:
print "ERROR: %s" % e.message
print query
return
except SyntaxError:
return
if DEBUG:
# pretty-printer
sq = str(self.ast)
pretty_print(sq)
print sq
print '-'*screen.width
pass
def run(self):
start_time = time.time()
op_data = sqlfuncs.do(self.ast, self.data[:]) # COPY!!!
response = OrderedDict()
for row in op_data:
for key in row.keys():
if not response.has_key(key):
response[key] = []
response[key].append(row[key])
Table(response, start_time).prnt()
class LoGrok(object):
def __init__(self, args, interactive=False, curses=False, chunksize=10000):
if curses:
screen.init_curses()
elif interactive:
screen.init_linebased()
self.interactive = interactive
self.args = args
self.processed_rows = 0
self.oldpct = 0
self.data = []
self.chunksize = chunksize
self.complete = Complete()
self.crunchlogs()
self.interact()
def crunchlogs(self):
global log_regex
if self.args.format is not None:
logformat = self.args.format
else:
logformat = logformat.TYPES[self.args.type]
print
lines = []
for logfile in self.args.logfile:
screen.print_mutable("Reading lines from %s:" % logfile.name)
lines += logfile.readlines()
screen.print_mutable("Reading lines from %s: %d" % (logfile.name, len(lines)))
logfile.close()
screen.print_mutable("", True)
log_regex = re.compile(parse_format_string(logformat))
if self.args.lines:
lines = lines[:self.args.lines]
st = time.time()
self.data = parallel.run(log_match, lines, _print=True)
et = time.time()
print "%d lines crunched in %0.3f seconds" % (len(lines), (et-st))
def interact(self):
if screen.is_curses():
screen.draw_curses_screen(self.data)
self.main_loop()
elif self.interactive:
self.shell()
else:
self.query(self.args.query)
def shell(self):
try:
history = os.path.expanduser('~/.logrok_history')
readline.read_history_file(history)
except IOError:
pass
atexit.register(readline.write_history_file, history)
readline.set_history_length(1000)
readline.parse_and_bind('tab: complete')
readline.set_completer(self.complete.complete)
# XXX This is ugly and needs to be more intelligent. Ideally, the
# completer would use readline.readline() to contextually switch out
# the returned matches
self.complete.addopts(['select', 'from log', 'where', 'between',
'order by', 'group by', 'limit', ] + get_sqlfuncs() + self.data[0].keys())
while True:
q = raw_input("logrok> ").strip()
while not q.endswith(";"):
q += raw_input("> ").strip()
self.query(q)
def query(self, query):
semicolon = query.find(';')
if semicolon != -1:
query = query[:semicolon]
if query in ('quit', 'bye', 'exit'):
sys.exit(0)
if query.startswith('help') or query.startswith('?'):
answer = "Use sql syntax against your log, `from` clauses are ignored.\n"\
"Queries can span multiple lines and _must_ end in a semicolon `;`.\n"\
" Try: `show fields;` to see available field names. Press TAB at the\n"\
" beginning of a new line to see all available completions."
print answer
return
if query in ('show fields', 'show headers'):
print ', '.join(self.data[0].keys())
return
else:
try:
q = LogQuery(self.data, query)
return q.run()
except SyntaxError, e:
return e.message
def main_loop(self):
while 1:
c = screen.getch()
if c == ord('x'): break
if c == ord('q'): screen.prompt("QUERY:", self.query)
def get_sqlfuncs():
return map(
lambda x: x[0],
filter(
lambda x: not x[0].startswith('_') and not x[0] == 'do',
inspect.getmembers(sqlfuncs, inspect.isfunction)
)
)
@parallel.map
def log_match(chunk):
response = []
for line in chunk:
out = {}
m = log_regex.match(line)
for key in log_regex.groupindex:
if logformat.types.has_key(key):
f = logformat.types[key]
else:
f = str
"""
# XXX
# This is a hack a big big hack
# It's here because I discovered that converting the date
# strings into date objects using strptime is a HUGE performance hit!
# -- don't know what to do about that
if f not in (int, str):
f = str
"""
d = m.group(key)
out[key] = f(d)
response.append(out)
return response
def main():
cmd = argparse.ArgumentParser(description="Grok/Query/Aggregate log files. Requires python2 >= 2.7")
typ = cmd.add_mutually_exclusive_group(required=True)
typ.add_argument('-t', '--type', metavar='TYPE', choices=logformat.TYPES, help='{%s} Use built-in log type (default: apache-common)'%', '.join(logformat.TYPES), default='apache-common')
typ.add_argument('-f', '--format', action='store', help='Log format (use apache LogFormat string)')
typ.add_argument('-C', '--config', type=argparse.FileType('r'), help='httpd.conf file in which to find LogFormat string (requires -T)')
cmd.add_argument('-T', '--ctype', help='type-name for LogFormat from specified httpd.conf file (only works with -c)')
cmd.add_argument('-j', '--processes', action='store', type=int, help='Number of processes to fork for log crunching (default: smart)', default=parallel.SMART)
cmd.add_argument('-l', '--lines', action='store', type=int, help='Only process LINES lines of input')
interactive = cmd.add_mutually_exclusive_group(required=False)
interactive.add_argument('-i', '--interactive', action='store_true', help="Use line-based interactive interface")
interactive.add_argument('-c', '--curses', action='store_true', help=argparse.SUPPRESS)
interactive.add_argument('-q', '--query', help="The query to run")
cmd.add_argument('-d', '--debug', action='store_true', help="Turn debugging on (you don't want this)")
cmd.add_argument('logfile', nargs='+', type=argparse.FileType('r'), help="log(s) to parse/query")
args = cmd.parse_args(sys.argv[1:])
if args.config and not args.ctype:
cmd.error("-C/--config option requires -T/--ctype option")
if args.ctype and not args.config:
cmd.error("-T/--ctype only works with -C/--config option")
if args.config and args.ctype:
config = args.config.read()
args.config.close()
m = re.search(r'^logformat[\s]+(.*)[\s]+%s' % args.ctype, config, re.I|re.M)
if m is None:
cmd.error("LogFormat %s not found in %s" % (args.ctype, args.config.name))
format = m.group(1)
if (format.startswith("'") or format.startswith('"')) and (format.endswith("'") or format.endswith('"')):
format = format[1:-1]
args.format = format.replace(r"\'", "'").replace(r'\"', '"')
global DEBUG
DEBUG = args.debug
parser.DEBUG = DEBUG
parallel.DEBUG = DEBUG
sqlfuncs.DEBUG = DEBUG
parser.init()
parallel.numprocs = args.processes
LoGrok(args, interactive=args.interactive, curses=args.curses)
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
parallel.killall()
# TODO -- reset terminal if curses
print
sys.exit(1)
|
{
"content_hash": "8fbe786440e631d6b488784bbce15ff1",
"timestamp": "",
"source": "github",
"line_count": 247,
"max_line_length": 189,
"avg_line_length": 36.1336032388664,
"alnum_prop": 0.5757983193277311,
"repo_name": "spuriousdata/logrok",
"id": "6a4bebbd0924d0fb439c9d505a90c6c3423656a4",
"size": "8948",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "logrok/logrok.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "62810"
}
],
"symlink_target": ""
}
|
import math
class Point(object):
def __init__(self, x=0, y=0, r=None, theta=None):
self.x, self.y = x,y
# convert from polar if necessary
if r is not None and theta is not None:
self.x = r * math.cos(theta)
self.y = r * math.sin(theta)
def __add__(self, other):
return Point(x = self.x + other.x, y = self.y + other.y)
def __sub__(self, other):
return Point(x = self.x - other.x, y = self.y - other.y)
def rotate(self, theta):
X = self.x * math.cos(theta) - self.y * math.sin(theta)
Y = self.x * math.sin(theta) + self.y * math.cos(theta)
return Point(x = X, y = Y)
def rotateAround(self, other_point, theta):
return (self - other_point).rotate(theta) + other_point
def __repr__(self):
return "{0:.3f} {1:.3f}".format(self.x, self.y)
class ScheduleParser(object):
def __init__(self, left_schedule_raw, right_schedule_raw):
# since the total robot's mission can only be up to 100 seconds,
# we'll just fill 100 slots
self.left_schedule = self._parseSchedule(left_schedule_raw)
self.right_schedule = self._parseSchedule(right_schedule_raw)
def _parseSchedule(self, schedule_list):
L = [0]*100
current_block = 0
for schedule_block in schedule_list:
ang_velocity, duration = map(int, schedule_block.split('|'))
L[current_block: current_block+duration] = [ang_velocity] * duration
current_block += duration
return L
def at(self, k):
return ( self.left_schedule[k], self.right_schedule[k] )
class Robot(object):
def __init__(self):
self.position = Point()
self.heading = 0
def processSchedule(self, schedule):
for k in xrange(100):
ang_vel_left, ang_vel_right = schedule.at(k)
self.moveOneSecond(ang_vel_left, ang_vel_right)
pass
def moveOneSecond(self, ang_vel_left, ang_vel_right):
duration = 1
wheel_radius = 1
wheel_base = 1
# handle straight case
if ang_vel_left == ang_vel_right:
distance_to_travel = duration * ang_vel_left * wheel_radius
self.position = self.position + Point(r = distance_to_travel, theta = self.heading)
return
# calculate radius of rotation (from left wheel)
radius_of_rotation = wheel_base * ( ang_vel_right + ang_vel_left ) / 2.0 / ( ang_vel_right - ang_vel_left )
# calculate angle of rotation (taking into account zeroes)
rotating_around_right = ( 2 * radius_of_rotation == -wheel_base )
angle_of_rotation = 0
if rotating_around_right:
angle_of_rotation = ang_vel_left * duration * wheel_radius / ( radius_of_rotation - wheel_base / 2.0 )
else:
angle_of_rotation = ang_vel_right * duration * wheel_radius / ( radius_of_rotation + wheel_base / 2.0 )
# calculate center of rotation
center_of_rotation = self.offsetAlongWheelBase(radius_of_rotation)
# perform the rotation
self.position = self.position.rotateAround(center_of_rotation, angle_of_rotation)
self.heading += angle_of_rotation
def offsetAlongWheelBase(self, offset_amount = 0):
theta = self.heading + math.pi / 2
return self.position + Point(r = offset_amount, theta = theta)
def __repr__(self):
return repr(self.position)
# input parsing
case_count = int(raw_input())
for i in xrange(case_count):
left_schedule_raw = raw_input().strip().split()
right_schedule_raw = raw_input().strip().split()
parser = ScheduleParser(left_schedule_raw, right_schedule_raw)
r = Robot()
r.processSchedule(parser)
print r
|
{
"content_hash": "95b355310cf691cecd52576c2c6e08af",
"timestamp": "",
"source": "github",
"line_count": 103,
"max_line_length": 109,
"avg_line_length": 32.61165048543689,
"alnum_prop": 0.6805596903840428,
"repo_name": "MercerBinaryBears/Slides",
"id": "638e34d8029e1e55e86e5b2d776f353fba692fec",
"size": "3359",
"binary": false,
"copies": "2",
"ref": "refs/heads/gh-pages",
"path": "2016/slides/ComputationalGeometry/robot.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "333"
},
{
"name": "CSS",
"bytes": "202927"
},
{
"name": "HTML",
"bytes": "206081"
},
{
"name": "Java",
"bytes": "7095"
},
{
"name": "JavaScript",
"bytes": "263297"
},
{
"name": "Python",
"bytes": "9645"
}
],
"symlink_target": ""
}
|
import urllib
import tornado.ioloop
import tornado.web
import tornado.auth
import tornado.httpclient
import tornado.escape
import tornado.httputil
import logging
class GithubMixin(tornado.auth.OAuth2Mixin):
""" Github OAuth Mixin, based on FacebookGraphMixin
"""
_OAUTH_AUTHORIZE_URL = 'https://github.com/login/oauth/authorize'
_OAUTH_ACCESS_TOKEN_URL = 'https://github.com/login/oauth/access_token'
_API_URL = 'https://api.github.com'
def get_authenticated_user(self, redirect_uri, client_id, client_secret,
code, callback, extra_fields=None):
""" Handles the login for Github, queries /user and returns a user object
"""
logging.debug('gau ' + redirect_uri)
http = tornado.httpclient.AsyncHTTPClient()
args = {
"redirect_uri": redirect_uri,
"code": code,
"client_id": client_id,
"client_secret": client_secret,
}
http.fetch(self._oauth_request_token_url(**args),
self.async_callback(self._on_access_token, redirect_uri, client_id, client_secret, callback, extra_fields))
def _on_access_token(self, redirect_uri, client_id, client_secret,
callback, fields, response):
""" callback for authentication url, if successful get the user details """
if response.error:
logging.warning('Github auth error: %s' % str(response))
callback(None)
return
args = tornado.escape.parse_qs_bytes(
tornado.escape.native_str(response.body))
if 'error' in args:
logging.error('oauth error ' + args['error'][-1])
raise Exception(args['error'][-1])
session = {
"access_token": args["access_token"][-1],
}
method = '/user'
cback = self.async_callback(self._on_get_user_info, callback, session)
token = session['access_token']
self.github_request(path='/user', callback=cback, access_token=token)
#self.github_request(
# method="/user",
# callback=self.async_callback(self._on_get_user_info, callback, session),
# access_token=session["access_token"])
def _on_get_user_info(self, callback, session, user):
""" callback for github request /user to create a user """
logging.debug('user data from github ' + str(user))
if user is None:
callback(None)
return
callback({
"login": user["login"],
"name": user["name"],
"email": user["email"],
"access_token": session["access_token"],
})
def github_request(self, path, callback, access_token=None, method='GET', body=None, **args):
""" Makes a github API request, hands callback the parsed data """
args["access_token"] = access_token
url = tornado.httputil.url_concat(self._API_URL + path, args)
logging.debug('request to ' + url)
http = tornado.httpclient.AsyncHTTPClient()
if body is not None:
body = tornado.escape.json_encode(body)
logging.debug('body is' + body)
http.fetch(url, callback=self.async_callback(
self._parse_response, callback), method=method, body=body)
def _parse_response(self, callback, response):
""" Parse the JSON from the API """
if response.error:
logging.warning("HTTP error from Github: %s", response.error)
callback(None)
return
try:
json = tornado.escape.json_decode(response.body)
except Exception:
logging.warning("Invalid JSON from Github: %r", response.body)
callback(None)
return
if isinstance(json, dict) and json.get("error_code"):
logging.warning("Facebook error: %d: %r", json["error_code"],
json.get("error_msg"))
callback(None)
return
callback(json)
|
{
"content_hash": "f58e89b3eacff4799bfd089044e5c467",
"timestamp": "",
"source": "github",
"line_count": 111,
"max_line_length": 119,
"avg_line_length": 36.52252252252252,
"alnum_prop": 0.5860878145041933,
"repo_name": "richstoner/incf_engine",
"id": "c404ba01be3b0e9ec98937c2f04af449fb4bb874",
"size": "4054",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "frontend/app/github.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "579218"
},
{
"name": "JavaScript",
"bytes": "1335580"
},
{
"name": "Python",
"bytes": "60903"
},
{
"name": "Ruby",
"bytes": "3035"
},
{
"name": "Shell",
"bytes": "4238"
}
],
"symlink_target": ""
}
|
try:
# Python 2.7
from collections import OrderedDict
except:
# Python 2.6
from gluon.contrib.simplejson.ordered_dict import OrderedDict
from gluon import current
from gluon.storage import Storage
def config(settings):
"""
Template settings for IFRC's Resource Management System
- Americas Zone
http://eden.sahanafoundation.org/wiki/Deployments/IFRC
"""
T = current.T
# -----------------------------------------------------------------------------
# System Name
#
settings.base.system_name = T("Resource Management System")
settings.base.system_name_short = T("RMS")
# -----------------------------------------------------------------------------
# Pre-Populate
#
settings.base.prepopulate = ("RMSAmericas", "RMSAmericas/Demo")
# -----------------------------------------------------------------------------
# Theme (folder to use for views/layout.html)
#
settings.base.theme = "RMSAmericas"
# Uncomment to disable responsive behavior of datatables
#settings.ui.datatables_responsive = False
# Uncomment to show a default cancel button in standalone create/update forms
settings.ui.default_cancel_button = True
# @todo: configure custom icons
#settings.ui.custom_icons = {
# "male": "icon-male",
# "female": "icon-female",
# "medical": "icon-plus-sign-alt",
#}
# =============================================================================
# System Settings
# -----------------------------------------------------------------------------
# Security Policy
settings.security.policy = 8 # Delegations
settings.security.map = True
# Authorization Settings
settings.auth.registration_requires_approval = True
settings.auth.registration_requires_verification = True
settings.auth.registration_requests_organisation = True
settings.auth.registration_organisation_required = True
settings.auth.registration_requests_site = True
settings.auth.registration_link_user_to = {"staff": T("Staff"),
"volunteer": T("Volunteer"),
#"member": T("Member")
}
settings.auth.record_approval = True
# @ToDo: Should we fallback to organisation_id if site_id is None?
settings.auth.registration_roles = {"site_id": ["reader",
],
}
# Owner Entity
settings.auth.person_realm_human_resource_site_then_org = True
settings.auth.person_realm_member_org = True
# Activate entity role manager tabs for OrgAdmins
settings.auth.entity_role_manager = True
def ifrc_realm_entity(table, row):
"""
Assign a Realm Entity to records
"""
tablename = table._tablename
# Do not apply realms for Master Data
# @ToDo: Restore Realms and add a role/functionality support for Master Data
if tablename in ("hrm_certificate",
"hrm_department",
"hrm_job_title",
"hrm_course",
"hrm_programme",
#"member_membership_type",
"vol_award",
):
return None
db = current.db
s3db = current.s3db
# Entity reference fields
EID = "pe_id"
#OID = "organisation_id"
SID = "site_id"
#GID = "group_id"
PID = "person_id"
# Owner Entity Foreign Key
realm_entity_fks = dict(pr_contact = [("org_organisation", EID),
#("po_household", EID),
("pr_person", EID),
],
pr_contact_emergency = EID,
pr_physical_description = EID,
pr_address = [("org_organisation", EID),
("pr_person", EID),
],
pr_image = EID,
pr_identity = PID,
pr_education = PID,
pr_note = PID,
hrm_human_resource = SID,
hrm_training = PID,
inv_recv = SID,
inv_send = SID,
inv_track_item = "track_org_id",
inv_adj_item = "adj_id",
req_req_item = "req_id",
#po_household = "area_id",
#po_organisation_area = "area_id",
)
# Default Foreign Keys (ordered by priority)
default_fks = (#"household_id",
"catalog_id",
"project_id",
"project_location_id",
)
# Link Tables
#realm_entity_link_table = dict(
# project_task = Storage(tablename = "project_task_project",
# link_key = "task_id"
# )
# )
#if tablename in realm_entity_link_table:
# # Replace row with the record from the link table
# link_table = realm_entity_link_table[tablename]
# table = s3db[link_table.tablename]
# rows = db(table[link_table.link_key] == row.id).select(table.id,
# limitby=(0, 1))
# if rows:
# # Update not Create
# row = rows.first()
# Check if there is a FK to inherit the realm_entity
realm_entity = 0
fk = realm_entity_fks.get(tablename, None)
fks = [fk] if not isinstance(fk, list) else list(fk)
fks.extend(default_fks)
for default_fk in fks:
if isinstance(default_fk, tuple):
instance_type, fk = default_fk
else:
instance_type, fk = None, default_fk
if fk not in table.fields:
continue
# Inherit realm_entity from parent record
if fk == EID:
if instance_type:
ftable = s3db.table(instance_type)
if not ftable:
continue
else:
ftable = s3db.pr_person
query = (ftable[EID] == row[EID])
else:
ftablename = table[fk].type[10:] # reference tablename
ftable = s3db[ftablename]
query = (table.id == row.id) & \
(table[fk] == ftable.id)
record = db(query).select(ftable.realm_entity,
limitby=(0, 1)).first()
if record:
realm_entity = record.realm_entity
break
#else:
# Continue to loop through the rest of the default_fks
# Fall back to default get_realm_entity function
use_user_organisation = False
#use_user_root_organisation = False
# Suppliers & Partners are owned by the user's organisation
if realm_entity == 0 and tablename == "org_organisation":
ottable = s3db.org_organisation_type
ltable = db.org_organisation_organisation_type
query = (ltable.organisation_id == row.id) & \
(ltable.organisation_type_id == ottable.id)
otype = db(query).select(ottable.name,
limitby=(0, 1)).first()
if not otype or otype.name != "Red Cross / Red Crescent":
use_user_organisation = True
# Facilities & Requisitions are owned by the user's organisation
elif tablename in ("org_facility", "req_req"):
use_user_organisation = True
elif tablename == "hrm_training":
# Inherit realm entity from the related HR record
htable = s3db.hrm_human_resource
query = (table.id == row.id) & \
(htable.person_id == table.person_id) & \
(htable.deleted != True)
rows = db(query).select(htable.realm_entity, limitby=(0, 2))
if len(rows) == 1:
realm_entity = rows.first().realm_entity
else:
# Ambiguous => try course organisation
ctable = s3db.hrm_course
otable = s3db.org_organisation
query = (table.id == row.id) & \
(ctable.id == table.course_id) & \
(otable.id == ctable.organisation_id)
row = db(query).select(otable.pe_id,
limitby=(0, 1)).first()
if row:
realm_entity = row.pe_id
# otherwise: inherit from the person record
# Groups are owned by the user's organisation
#elif tablename in ("pr_group",):
elif tablename == "pr_group":
use_user_organisation = True
auth = current.auth
user = auth.user
if user:
if use_user_organisation:
# @ToDo - this might cause issues if the user's org is different from the realm that gave them permissions to create the Org
realm_entity = s3db.pr_get_pe_id("org_organisation",
user.organisation_id)
#elif use_user_root_organisation:
# realm_entity = s3db.pr_get_pe_id("org_organisation",
# auth.root_org())
return realm_entity
settings.auth.realm_entity = ifrc_realm_entity
# -----------------------------------------------------------------------------
# L10n (Localization) settings
#
settings.L10n.languages = OrderedDict([
("en", "English"),
("es", "Español"),
])
# Default Language
settings.L10n.default_language = "en"
# Default timezone for users
settings.L10n.utc_offset = "-0500"
# Number formats (defaults to ISO 31-0)
# Decimal separator for numbers (defaults to ,)
settings.L10n.decimal_separator = "."
# Thousands separator for numbers (defaults to space)
settings.L10n.thousands_separator = ","
# Unsortable 'pretty' date format (for use in English)
settings.L10n.date_format = "%d-%b-%Y"
# Make last name in person/user records mandatory
#settings.L10n.mandatory_lastname = True
# Uncomment this to Translate Layer Names
settings.L10n.translate_gis_layer = True
# Translate Location Names
settings.L10n.translate_gis_location = True
# Uncomment this for Alternate Location Names
settings.L10n.name_alt_gis_location = True
# Uncomment this to Translate Organisation Names/Acronyms
settings.L10n.translate_org_organisation = True
# Names of Orgs with specific settings
HNRC = "Honduran Red Cross"
# -----------------------------------------------------------------------------
# Finance settings
#
def currencies(default):
""" RMS- and NS-specific currencies (lazy setting) """
# Currencies that are common for all NS
currencies = {"EUR" : T("Euros"),
"CHF" : T("Swiss Francs"),
"USD" : T("United States Dollars"),
}
# NS-specific currencies
root_org = current.auth.root_org_name()
if root_org == HNRC:
currencies["HNL"] = T("Honduran Lempira")
return currencies
settings.fin.currencies = currencies
def currency_default(default):
""" NS-specific default currencies (lazy setting) """
root_org = current.auth.root_org_name()
if root_org == HNRC:
default = "HNL"
#else:
# default = "USD"
return default
settings.fin.currency_default = currency_default
# -----------------------------------------------------------------------------
# Map Settings
# Display Resources recorded to Admin-Level Locations on the map
# @ToDo: Move into gis_config?
settings.gis.display_L0 = True
# Uncomment to display the Map Legend as a floating DIV
settings.gis.legend = "float"
# GeoNames username
settings.gis.geonames_username = "rms_dev"
# -----------------------------------------------------------------------------
# Use the label 'Camp' instead of 'Shelter'
#
settings.ui.camp = True
# -----------------------------------------------------------------------------
# Filter Manager
#
#settings.search.filter_manager = False
# -----------------------------------------------------------------------------
# Default Summary
#
settings.ui.summary = ({"common": True,
"name": "add",
"widgets": [{"method": "create"}],
},
{"name": "table",
"label": "Table",
"widgets": [{"method": "datatable"}],
},
{"name": "charts",
"label": "Report",
"widgets": [{"method": "report", "ajax_init": True}],
},
{"name": "map",
"label": "Map",
"widgets": [{"method": "map", "ajax_init": True}],
},
)
# -----------------------------------------------------------------------------
# Content Management
#
#settings.cms.hide_index = True
settings.cms.richtext = True
# -----------------------------------------------------------------------------
# Messaging
# Parser
#settings.msg.parser = "IFRC"
# =============================================================================
# Module Settings
# -----------------------------------------------------------------------------
# Organisation Management
#
# Enable the use of Organisation Branches
settings.org.branches = True
# Set the length of the auto-generated org/site code the default is 10
#settings.org.site_code_len = 3
# Set the label for Sites
settings.org.site_label = "Office/Warehouse/Facility"
# Enable certain fields just for specific Organisations
#settings.org.dependent_fields = \
# {"pr_person.middle_name" : (CVTL, VNRC),
# "pr_person_details.mother_name" : (BRCS, ),
# "pr_person_details.father_name" : (ARCS, BRCS),
# "pr_person_details.grandfather_name" : (ARCS, ),
# "pr_person_details.affiliations" : (PRC, ),
# "pr_person_details.company" : (PRC, ),
# "vol_details.availability" : (VNRC, ),
# "vol_details.card" : (ARCS, ),
# "vol_volunteer_cluster.vol_cluster_type_id" : (PRC, ),
# "vol_volunteer_cluster.vol_cluster_id" : (PRC, ),
# "vol_volunteer_cluster.vol_cluster_position_id" : (PRC, ),
# }
# -----------------------------------------------------------------------------
# Human Resource Management
#
# Uncomment to allow Staff & Volunteers to be registered without an email address
settings.hrm.email_required = False
# Uncomment to filter certificates by (root) Organisation & hence not allow Certificates from other orgs to be added to a profile (except by Admin)
settings.hrm.filter_certificates = True
# Uncomment to show the Organisation name in HR represents
settings.hrm.show_organisation = True
# Uncomment to allow HRs to have multiple Job Titles
settings.hrm.multiple_job_titles = True
# Uncomment to have each root Org use a different Job Title Catalog
settings.hrm.org_dependent_job_titles = True
# Uncomment to disable the use of HR Credentials
settings.hrm.use_credentials = False
# Uncomment to disable the use of HR Certificates
settings.hrm.use_certificates = False
# Uncomment to enable the use of HR Education
#settings.hrm.use_education = True
# Custom label for Organisations in HR module
settings.hrm.organisation_label = "National Society / Branch"
# Uncomment to consolidate tabs into a single CV
settings.hrm.cv_tab = True
# Uncomment to consolidate tabs into Staff Record (set to False to hide the tab)
settings.hrm.record_tab = "record"
# Uncomment to do a search for duplicates in the new AddPersonWidget2
settings.pr.lookup_duplicates = True
# -----------------------------------------------------------------------------
# Projects
# Uncomment this to use settings suitable for a global/regional organisation (e.g. DRR)
settings.project.mode_3w = True
# Uncomment this to use DRR (Disaster Risk Reduction) extensions
settings.project.mode_drr = True
# Uncomment this to use Activity Types for Activities & Projects
settings.project.activity_types = True
# Uncomment this to use Codes for projects
settings.project.codes = True
# Uncomment this to call project locations 'Communities'
#settings.project.community = True
# Uncomment this to enable Hazards in 3W projects
settings.project.hazards = True
# Uncomment this to enable Indicators in projects
# Just HNRC
#settings.project.indicators = True
# Uncomment this to use multiple Budgets per project
settings.project.multiple_budgets = True
# Uncomment this to use multiple Organisations per project
settings.project.multiple_organisations = True
# Uncomment this to enable Programmes in projects
settings.project.programmes = True
# Uncomment this to enable Themes in 3W projects
settings.project.themes = True
# Uncomment this to customise
# Links to Filtered Components for Donors & Partners
settings.project.organisation_roles = {
1: T("Host National Society"),
2: T("Partner"),
3: T("Donor"),
#4: T("Customer"), # T("Beneficiary")?
#5: T("Supplier"),
9: T("Partner National Society"),
}
# -----------------------------------------------------------------------------
# Inventory Management
settings.inv.show_mode_of_transport = True
settings.inv.send_show_time_in = True
#settings.inv.collapse_tabs = True
# Uncomment if you need a simpler (but less accountable) process for managing stock levels
#settings.inv.direct_stock_edits = True
#settings.inv.org_dependent_warehouse_types = True
# Settings for HNRC:
settings.inv.stock_count = False
settings.inv.item_status = {#0: current.messages["NONE"], # Not defined yet
0: T("Good"),
1: T("Damaged"),
#1: T("Dump"),
#2: T("Sale"),
#3: T("Reject"),
#4: T("Surplus")
}
settings.inv.recv_types = {#0: current.messages["NONE"], In Shipment Types
#11: T("Internal Shipment"), In Shipment Types
32: T("Donation"),
34: T("Purchase"),
36: T("Consignment"), # Borrowed
37: T("In Transit"), # Loaning warehouse space to another agency
}
# -----------------------------------------------------------------------------
# Request Management
# Uncomment to disable Inline Forms in Requests module
settings.req.inline_forms = False
settings.req.req_type = ["Stock"]
settings.req.use_commit = False
# Should Requests ask whether Transportation is required?
settings.req.ask_transport = True
settings.req.pack_values = False
# Disable Request Matching as we don't wwant users making requests to see what stock is available
settings.req.prompt_match = False
# Uncomment to disable Recurring Request
settings.req.recurring = False # HNRC
# =============================================================================
# Template Modules
#
settings.modules = OrderedDict([
# Core modules which shouldn't be disabled
("default", Storage(
name_nice = "RMS",
restricted = False, # Use ACLs to control access to this module
access = None, # All Users (inc Anonymous) can see this module in the default menu & access the controller
#module_type = None # This item is not shown in the menu
)),
("admin", Storage(
name_nice = T("Administration"),
#description = "Site Administration",
restricted = True,
access = "|1|", # Only Administrators can see this module in the default menu & access the controller
#module_type = None # This item is handled separately for the menu
)),
("appadmin", Storage(
name_nice = T("Administration"),
#description = "Site Administration",
restricted = True,
#module_type = None # No Menu
)),
("errors", Storage(
name_nice = T("Ticket Viewer"),
#description = "Needed for Breadcrumbs",
restricted = False,
#module_type = None # No Menu
)),
("sync", Storage(
name_nice = T("Synchronization"),
#description = "Synchronization",
restricted = True,
access = "|1|", # Only Administrators can see this module in the default menu & access the controller
#module_type = None # This item is handled separately for the menu
)),
("translate", Storage(
name_nice = T("Translation Functionality"),
#description = "Selective translation of strings based on module.",
#module_type = None,
)),
# Uncomment to enable internal support requests
("support", Storage(
name_nice = T("Support"),
#description = "Support Requests",
restricted = True,
#module_type = None # This item is handled separately for the menu
)),
("gis", Storage(
name_nice = T("Map"),
#description = "Situation Awareness & Geospatial Analysis",
restricted = True,
#module_type = 6, # 6th item in the menu
)),
("pr", Storage(
name_nice = T("Person Registry"),
#description = "Central point to record details on People",
restricted = True,
access = "|1|", # Only Administrators can see this module in the default menu (access to controller is possible to all still)
#module_type = 10
)),
("org", Storage(
name_nice = T("Organizations"),
#description = 'Lists "who is doing what & where". Allows relief agencies to coordinate their activities',
restricted = True,
#module_type = 1
)),
# All modules below here should be possible to disable safely
("hrm", Storage(
name_nice = T("Staff"),
#description = "Human Resources Management",
restricted = True,
#module_type = 2,
)),
("vol", Storage(
name_nice = T("Volunteers"),
#description = "Human Resources Management",
restricted = True,
#module_type = 2,
)),
("cms", Storage(
name_nice = T("Content Management"),
#description = "Content Management System",
restricted = True,
module_type = None,
)),
("doc", Storage(
name_nice = T("Documents"),
#description = "A library of digital resources, such as photos, documents and reports",
restricted = True,
#module_type = 10,
)),
("msg", Storage(
name_nice = T("Messaging"),
#description = "Sends & Receives Alerts via Email & SMS",
restricted = True,
# The user-visible functionality of this module isn't normally required. Rather it's main purpose is to be accessed from other modules.
#module_type = None,
)),
("supply", Storage(
name_nice = T("Supply Chain Management"),
#description = "Used within Inventory Management, Request Management and Asset Management",
restricted = True,
#module_type = None, # Not displayed
)),
("inv", Storage(
name_nice = T("Warehouses"),
#description = "Receiving and Sending Items",
restricted = True,
#module_type = 4
)),
#("asset", Storage(
# name_nice = T("Assets"),
# #description = "Recording and Assigning Assets",
# restricted = True,
# #module_type = 5,
# )),
("req", Storage(
name_nice = T("Requests"),
#description = "Manage requests for supplies, assets, staff or other resources. Matches against Inventories where supplies are requested.",
restricted = True,
#module_type = 10,
)),
("project", Storage(
name_nice = T("Projects"),
#description = "Tracking of Projects, Activities and Tasks",
restricted = True,
#module_type = 2
)),
("budget", Storage(
name_nice = T("Budgets"),
#description = "Tracking of Budgets",
restricted = True,
#module_type = None
)),
#("survey", Storage(
# name_nice = T("Assessments"),
# #description = "Create, enter, and manage surveys.",
# restricted = True,
# #module_type = 5,
# )),
#("event", Storage(
# name_nice = T("Events"),
# #description = "Events",
# restricted = True,
# #module_type = 10
# )),
#("member", Storage(
# name_nice = T("Members"),
# #description = "Membership Management System",
# restricted = True,
# #module_type = 10,
# )),
#("deploy", Storage(
# name_nice = T("Regional Disaster Response Teams"),
# #description = "Alerting and Deployment of Disaster Response Teams",
# restricted = True,
# #module_type = 10,
# )),
#("po", Storage(
# name_nice = T("Recovery Outreach"),
# #description = "Population Outreach",
# restricted = True,
# #module_type = 10,
# )),
("stats", Storage(
name_nice = T("Statistics"),
#description = "Manages statistics",
restricted = True,
#module_type = None,
)),
#("vulnerability", Storage(
# name_nice = T("Vulnerability"),
# #description = "Manages vulnerability indicators",
# restricted = True,
# #module_type = 10,
# )),
])
# -------------------------------------------------------------------------
# Functions which are local to this Template
# -------------------------------------------------------------------------
def ns_only(tablename,
fieldname = "organisation_id",
required = True,
branches = True,
updateable = True,
limit_filter_opts = True
):
"""
Function to configure an organisation_id field to be restricted to just
NS/Branch
@param required: Field is mandatory
@param branches: Include Branches
@param updateable: Limit to Orgs which the user can update
@param limit_filter_opts: Also limit the Filter options
NB If limit_filter_opts=True, apply in customise_xx_controller inside prep,
after standard_prep is run
"""
# Lookup organisation_type_id for Red Cross
db = current.db
s3db = current.s3db
ttable = s3db.org_organisation_type
try:
type_id = db(ttable.name == "Red Cross / Red Crescent").select(ttable.id,
limitby=(0, 1),
).first().id
except:
# No IFRC prepop done - skip (e.g. testing impacts of CSS changes in this theme)
return
# Load standard model
f = s3db[tablename][fieldname]
if limit_filter_opts:
# Find the relevant filter widget & limit it's options
filter_widgets = s3db.get_config(tablename, "filter_widgets")
filter_widget = None
if filter_widgets:
from s3 import FS, S3HierarchyFilter
for w in filter_widgets:
if isinstance(w, S3HierarchyFilter) and \
w.field == "organisation_id":
filter_widget = w
break
if filter_widget is not None:
selector = FS("organisation_organisation_type.organisation_type_id")
filter_widget.opts["filter"] = (selector == type_id)
# Label
if branches:
f.label = T("National Society / Branch")
else:
f.label = T("National Society")
# Requires
# Filter by type
ltable = db.org_organisation_organisation_type
rows = db(ltable.organisation_type_id == type_id).select(ltable.organisation_id)
filter_opts = [row.organisation_id for row in rows]
auth = current.auth
s3_has_role = auth.s3_has_role
Admin = s3_has_role("ADMIN")
if branches:
if Admin:
parent = True
else:
# @ToDo: Set the represent according to whether the user can see resources of just a single NS or multiple
# @ToDo: Consider porting this into core
user = auth.user
if user:
realms = user.realms
#delegations = user.delegations
if realms:
parent = True
else:
parent = False
else:
parent = True
else:
# Keep the represent function as simple as possible
parent = False
# Exclude branches
btable = s3db.org_organisation_branch
rows = db((btable.deleted != True) &
(btable.branch_id.belongs(filter_opts))).select(btable.branch_id)
filter_opts = list(set(filter_opts) - set(row.branch_id for row in rows))
organisation_represent = s3db.org_OrganisationRepresent
represent = organisation_represent(parent=parent)
f.represent = represent
from s3 import IS_ONE_OF
requires = IS_ONE_OF(db, "org_organisation.id",
represent,
filterby = "id",
filter_opts = filter_opts,
updateable = updateable,
orderby = "org_organisation.name",
sort = True)
if not required:
from gluon import IS_EMPTY_OR
requires = IS_EMPTY_OR(requires)
f.requires = requires
if parent:
# Use hierarchy-widget
from s3 import FS, S3HierarchyWidget
# No need for parent in represent (it's a hierarchy view)
node_represent = organisation_represent(parent=False)
# Filter by type
# (no need to exclude branches - we wouldn't be here if we didn't use branches)
selector = FS("organisation_organisation_type.organisation_type_id")
f.widget = S3HierarchyWidget(lookup="org_organisation",
filter=(selector == type_id),
represent=node_represent,
multiple=False,
leafonly=False,
)
else:
# Dropdown not Autocomplete
f.widget = None
# Comment
if (Admin or s3_has_role("ORG_ADMIN")):
# Need to do import after setting Theme
from s3layouts import S3AddResourceLink
from s3 import S3ScriptItem
add_link = S3AddResourceLink(c="org", f="organisation",
vars={"organisation_type.name":"Red Cross / Red Crescent"},
label=T("Create National Society"),
title=T("National Society"),
)
comment = f.comment
if not comment or isinstance(comment, S3AddResourceLink):
f.comment = add_link
elif isinstance(comment[1], S3ScriptItem):
# Don't overwrite scripts
f.comment[0] = add_link
else:
f.comment = add_link
else:
# Not allowed to add NS/Branch
f.comment = ""
# -----------------------------------------------------------------------------
def user_org_default_filter(selector, tablename=None):
"""
Default filter for organisation_id:
* Use the user's organisation if logged-in and associated with an
organisation.
"""
auth = current.auth
user_org_id = auth.is_logged_in() and auth.user.organisation_id
if user_org_id:
return user_org_id
else:
# no default
return {}
# -----------------------------------------------------------------------------
def user_org_and_children_default_filter(selector, tablename=None):
"""
Default filter for organisation_id:
* Use the user's organisation if logged-in and associated with an
organisation.
"""
auth = current.auth
user_org_id = auth.is_logged_in() and auth.user.organisation_id
if user_org_id:
db = current.db
s3db = current.s3db
otable = s3db.org_organisation
org = db(otable.id == user_org_id).select(otable.pe_id,
limitby=(0, 1)
).first()
if org:
pe_id = org.pe_id
pe_ids = s3db.pr_get_descendants((pe_id,),
entity_types=("org_organisation",))
rows = db(otable.pe_id.belongs(pe_ids)).select(otable.id)
ids = [row.id for row in rows]
ids.append(user_org_id)
return ids
else:
return user_org_id
else:
# no default
return {}
# -----------------------------------------------------------------------------
def customise_auth_user_controller(**attr):
"""
Customise admin/user() and default/user() controllers
"""
#if "arg" in attr and attr["arg"] == "register":
# Organisation needs to be an NS/Branch
ns_only("auth_user",
required = True,
branches = True,
updateable = False, # Need to see all Orgs in Registration screens
)
return attr
settings.customise_auth_user_controller = customise_auth_user_controller
# -----------------------------------------------------------------------------
def customise_hrm_course_controller(**attr):
tablename = "hrm_course"
# Organisation needs to be an NS/Branch
ns_only(tablename,
required = False,
branches = False,
)
# Load standard model
s3db = current.s3db
table = s3db.hrm_course
list_fields = ["code",
"name",
"organisation_id",
(T("Sectors"), "course_sector.sector_id"),
]
from s3 import S3SQLCustomForm, S3SQLInlineLink
crud_form = S3SQLCustomForm("code",
"name",
"organisation_id",
S3SQLInlineLink("sector",
field = "sector_id",
label = T("Sectors"),
),
)
s3db.configure(tablename,
crud_form = crud_form,
list_fields = list_fields,
)
return attr
settings.customise_hrm_course_controller = customise_hrm_course_controller
# -----------------------------------------------------------------------------
def customise_hrm_department_controller(**attr):
# Organisation needs to be an NS/Branch
ns_only("hrm_department",
required = False,
branches = False,
)
return attr
settings.customise_hrm_department_controller = customise_hrm_department_controller
# -----------------------------------------------------------------------------
def emergency_contact_represent(row):
"""
Representation of Emergency Contacts (S3Represent label renderer)
@param row: the row
"""
items = [row["pr_contact_emergency.name"]]
relationship = row["pr_contact_emergency.relationship"]
if relationship:
items.append(" (%s)" % relationship)
phone_number = row["pr_contact_emergency.phone"]
if phone_number:
items.append(": %s" % phone_number)
return "".join(items)
# -----------------------------------------------------------------------------
def customise_hrm_human_resource_controller(**attr):
controller = current.request.controller
if controller != "deploy":
# Default Filter
from s3 import s3_set_default_filter
s3_set_default_filter("~.organisation_id",
user_org_and_children_default_filter,
tablename = "hrm_human_resource")
s3db = current.s3db
s3db.org_organisation.root_organisation.label = T("National Society")
s3 = current.response.s3
# Custom prep
standard_prep = s3.prep
def custom_prep(r):
# Call standard prep
if callable(standard_prep):
result = standard_prep(r)
if not result:
return False
# Organisation needs to be an NS/Branch
ns_only("hrm_human_resource",
required = True,
branches = True,
limit_filter_opts = True,
)
return True
s3.prep = custom_prep
return attr
settings.customise_hrm_human_resource_controller = customise_hrm_human_resource_controller
# -----------------------------------------------------------------------------
def customise_hrm_job_title_controller(**attr):
# Organisation needs to be an NS/Branch
ns_only("hrm_job_title",
required = False,
branches = False,
)
# Don't show RDRT in the list
current.s3db.hrm_job_title.type.requires = IS_IN_SET({1: T("Staff"),
2: T("Volunteer"),
3: T("Both")
})
return attr
settings.customise_hrm_job_title_controller = customise_hrm_job_title_controller
# -----------------------------------------------------------------------------
def customise_hrm_programme_controller(**attr):
# Organisation needs to be an NS/Branch
ns_only("hrm_programme",
required = False,
branches = False,
)
return attr
settings.customise_hrm_programme_controller = customise_hrm_programme_controller
# -----------------------------------------------------------------------------
def customise_hrm_programme_hours_controller(**attr):
# Default Filter
from s3 import s3_set_default_filter
s3_set_default_filter("~.person_id$human_resource.organisation_id",
user_org_default_filter,
tablename = "hrm_programme_hours")
return attr
settings.customise_hrm_programme_hours_controller = customise_hrm_programme_hours_controller
# -----------------------------------------------------------------------------
def customise_hrm_training_controller(**attr):
# Default Filter
from s3 import s3_set_default_filter
s3_set_default_filter("~.person_id$human_resource.organisation_id",
user_org_default_filter,
tablename = "hrm_training")
return attr
settings.customise_hrm_training_controller = customise_hrm_training_controller
# -----------------------------------------------------------------------------
def customise_inv_home():
"""
Homepage for the Inventory module
"""
from gluon import URL
from s3 import s3_redirect_default
auth = current.auth
if auth.user and auth.user.site_id and \
not auth.s3_has_role(current.session.s3.system_roles.ORG_ADMIN):
# Redirect to this Warehouse
table = current.s3db.inv_warehouse
wh = current.db(table.site_id == auth.user.site_id).select(table.id,
limitby=(0, 1)
).first()
if wh:
s3_redirect_default(URL(c="inv", f="warehouse",
args=[wh.id, "inv_item"]))
# Redirect to Warehouse Summary Page
s3_redirect_default(URL(c="inv", f="warehouse", args="summary"))
settings.customise_inv_home = customise_inv_home
# -----------------------------------------------------------------------------
def customise_inv_inv_item_resource(r, tablename):
current.s3db.configure("inv_inv_item",
create = False,
deletable = False,
editable = False,
listadd = False,
)
settings.customise_inv_inv_item_resource = customise_inv_inv_item_resource
# -----------------------------------------------------------------------------
def customise_inv_send_resource(r, tablename):
current.s3db.configure("inv_send",
list_fields = ["id",
"send_ref",
"req_ref",
#"sender_id",
"site_id",
"date",
"recipient_id",
"delivery_date",
"to_site_id",
"status",
#"driver_name",
#"driver_phone",
#"vehicle_plate_no",
#"time_out",
"comments",
],
)
settings.customise_inv_send_resource = customise_inv_send_resource
# -----------------------------------------------------------------------------
def customise_inv_warehouse_resource(r, tablename):
settings.gis.postcode_selector = False # Needs to be done before prep as read during model load
settings.inv.recv_tab_label = "Received/Incoming Shipments"
settings.inv.send_tab_label = "Sent Shipments"
# Only Nepal RC use Warehouse Types
s3db = current.s3db
field = s3db.inv_warehouse.warehouse_type_id
field.readable = field.writable = False
list_fields = s3db.get_config("inv_warehouse", "list_fields")
try:
list_fields.remove("warehouse_type_id")
except:
# Already removed
pass
settings.customise_inv_warehouse_resource = customise_inv_warehouse_resource
# -----------------------------------------------------------------------------
def customise_org_facility_resource(r, tablename):
#root_org = current.auth.root_org_name()
#if root_org != HNRC:
# return
settings.gis.postcode_selector = False # Needs to be done before prep as read during model load
# Simplify Form
s3db = current.s3db
table = s3db.org_facility
table.code.readable = table.code.writable = False
table.opening_times.readable = table.opening_times.writable = False
table.website.readable = table.website.writable = False
field = s3db.org_site_facility_type.facility_type_id
field.readable = field.writable = False
# Simplify Search Fields
from s3 import S3TextFilter, S3OptionsFilter, S3LocationFilter
# Which levels of Hierarchy are we using?
levels = current.gis.get_relevant_hierarchy_levels()
text_fields = ["name",
#"code",
"comments",
"organisation_id$name",
"organisation_id$acronym",
]
for level in levels:
lfield = "location_id$%s" % level
text_fields.append(lfield)
s3db.configure("org_facility",
filter_widgets = [
S3TextFilter(text_fields,
label = T("Search"),
),
S3OptionsFilter("organisation_id"),
S3LocationFilter("location_id",
levels = levels,
),
]
)
settings.customise_org_facility_resource = customise_org_facility_resource
# -----------------------------------------------------------------------------
def customise_org_office_controller(**attr):
s3 = current.response.s3
# Custom prep
standard_prep = s3.prep
def custom_prep(r):
# Call standard prep
if callable(standard_prep):
result = standard_prep(r)
else:
result = True
# Organisation needs to be an NS/Branch
ns_only("org_office",
required = True,
branches = True,
limit_filter_opts = True,
)
return result
s3.prep = custom_prep
return attr
settings.customise_org_office_controller = customise_org_office_controller
# -----------------------------------------------------------------------------
def customise_org_organisation_controller(**attr):
s3 = current.response.s3
# Custom prep
standard_prep = s3.prep
def custom_prep(r):
# Call standard prep
if callable(standard_prep):
result = standard_prep(r)
else:
result = True
if r.interactive or r.representation == "aadata":
if not r.component or r.component_name == "branch":
resource = r.resource
type_label = T("Type")
if r.get_vars.get("caller") == "org_facility_organisation_id":
# Simplify
from s3 import S3SQLCustomForm
crud_form = S3SQLCustomForm("name",
"acronym",
"phone",
"comments",
)
resource.configure(crud_form=crud_form,
)
elif r.controller == "po":
# Referral Agencies in PO module
list_fields = ("name",
"acronym",
"organisation_organisation_type.organisation_type_id",
"website",
)
resource.configure(list_fields=list_fields)
# Custom CRUD form
if r.interactive:
from s3 import S3SQLCustomForm, S3SQLInlineLink, S3SQLInlineComponent
# Filter inline address for type "office address", also sets default
OFFICE = {"field": "type", "options": 3}
crud_form = S3SQLCustomForm(
"name",
"acronym",
S3SQLInlineLink("organisation_type",
field = "organisation_type_id",
label = type_label,
multiple = False,
),
S3SQLInlineComponent("address",
fields = [("", "location_id")],
multiple = False,
filterby = (OFFICE,),
),
"phone",
"website",
"logo",
"comments",
)
# Remove unwanted filters
# @todo: add a location filter for office address
unwanted_filters = ("sector_organisation.sector_id",
"country",
)
filter_widgets = [widget
for widget in resource.get_config("filter_widgets")
if widget.field not in unwanted_filters]
resource.configure(crud_form = crud_form,
filter_widgets = filter_widgets,
)
else:
# Organisations in org module
list_fields = ["name",
"acronym",
"organisation_organisation_type.organisation_type_id",
"country",
"website",
]
type_filter = r.get_vars.get("organisation_type.name", None)
if type_filter:
type_names = type_filter.split(",")
if len(type_names) == 1:
# Strip Type from list_fields
list_fields.remove("organisation_organisation_type.organisation_type_id")
type_label = ""
if type_filter == "Red Cross / Red Crescent":
# Modify filter_widgets
filter_widgets = resource.get_config("filter_widgets")
# Remove type (always 'RC')
filter_widgets.pop(1)
# Modify CRUD Strings
s3.crud_strings.org_organisation = Storage(
label_create = T("Create National Society"),
title_display = T("National Society Details"),
title_list = T("Red Cross & Red Crescent National Societies"),
title_update = T("Edit National Society"),
title_upload = T("Import Red Cross & Red Crescent National Societies"),
label_list_button = T("List Red Cross & Red Crescent National Societies"),
label_delete_button = T("Delete National Society"),
msg_record_created = T("National Society added"),
msg_record_modified = T("National Society updated"),
msg_record_deleted = T("National Society deleted"),
msg_list_empty = T("No Red Cross & Red Crescent National Societies currently registered")
)
# Add Region to list_fields
list_fields.insert(-1, "region_id")
# Region is required
r.table.region_id.requires = r.table.region_id.requires.other
else:
r.table.region_id.readable = r.table.region_id.writable = False
resource.configure(list_fields=list_fields)
if r.interactive:
r.table.country.label = T("Country")
from s3 import S3SQLCustomForm, S3SQLInlineLink
crud_form = S3SQLCustomForm(
"name",
"acronym",
S3SQLInlineLink("organisation_type",
field = "organisation_type_id",
label = type_label,
multiple = False,
#widget = "hierarchy",
),
"region_id",
"country",
"phone",
"website",
"logo",
"comments",
)
resource.configure(crud_form=crud_form)
return result
s3.prep = custom_prep
return attr
settings.customise_org_organisation_controller = customise_org_organisation_controller
# -----------------------------------------------------------------------------
def customise_pr_group_controller(**attr):
# Organisation needs to be an NS/Branch
ns_only("org_organisation_team",
required = False,
branches = True,
)
return attr
settings.customise_pr_group_controller = customise_pr_group_controller
# =============================================================================
def vol_active(person_id):
"""
Whether a Volunteer counts as 'Active' based on the number of hours
they've done (both Trainings & Programmes) per month, averaged over
the last year.
If nothing recorded for the last 3 months, don't penalise as assume
that data entry hasn't yet been done.
@ToDo: This should be based on the HRM record, not Person record
- could be active with Org1 but not with Org2
@ToDo: allow to be calculated differently per-Org
"""
now = current.request.utcnow
# Time spent on Programme work
htable = current.s3db.hrm_programme_hours
query = (htable.deleted == False) & \
(htable.person_id == person_id) & \
(htable.date != None)
programmes = current.db(query).select(htable.hours,
htable.date,
orderby=htable.date)
if programmes:
# Ignore up to 3 months of records
three_months_prior = (now - datetime.timedelta(days=92))
end = max(programmes.last().date, three_months_prior.date())
last_year = end - datetime.timedelta(days=365)
# Is this the Volunteer's first year?
if programmes.first().date > last_year:
# Only start counting from their first month
start = programmes.first().date
else:
# Start from a year before the latest record
start = last_year
# Total hours between start and end
programme_hours = 0
for programme in programmes:
if programme.date >= start and programme.date <= end and programme.hours:
programme_hours += programme.hours
# Average hours per month
months = max(1, (end - start).days / 30.5)
average = programme_hours / months
# Active?
if average >= 8:
return True
else:
return False
else:
return False
# -----------------------------------------------------------------------------
def customise_pr_person_controller(**attr):
s3db = current.s3db
# Special cases for different NS
root_org = current.auth.root_org_name()
if root_org == HNRC:
settings.gis.postcode_selector = False # Needs to be done before prep as read during model load
s3 = current.response.s3
# Custom prep
standard_prep = s3.prep
def custom_prep(r):
# Call standard prep
if callable(standard_prep):
result = standard_prep(r)
if not result:
return False
component_name = r.component_name
method = r.method
if component_name == "appraisal":
atable = r.component.table
atable.organisation_id.readable = atable.organisation_id.writable = False
# Organisation needs to be an NS
#ns_only("hrm_appraisal",
# required = True,
# branches = False,
# )
field = atable.supervisor_id
field.readable = field.writable = False
field = atable.job_title_id
field.comment = None
field.label = T("Sector") # RDRT-specific
from s3 import IS_ONE_OF
field.requires = IS_ONE_OF(current.db, "hrm_job_title.id",
field.represent,
filterby = "type",
filter_opts = (4,),
)
elif component_name == "physical_description":
from gluon import DIV
dtable = r.component.table
dtable.medical_conditions.comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Medical Conditions"),
T("Chronic Illness, Disabilities, Mental/Psychological Condition etc.")))
elif method =="record" or component_name == "human_resource":
# Organisation needs to be an NS/Branch
ns_only("hrm_human_resource",
required = True,
branches = True,
)
if method == "record":
# Use default form (legacy)
s3db.clear_config("hrm_human_resource", "crud_form")
return True
s3.prep = custom_prep
return attr
settings.customise_pr_person_controller = customise_pr_person_controller
# -----------------------------------------------------------------------------
def customise_supply_item_category_resource(r, tablename):
#root_org = current.auth.root_org_name()
#if root_org == HNRC:
# Not using Assets Module
field = current.s3db.supply_item_category.can_be_asset
field.readable = field.writable = False
settings.customise_supply_item_category_resource = customise_supply_item_category_resource
# -----------------------------------------------------------------------------
def customise_survey_series_controller(**attr):
# Organisation needs to be an NS/Branch
ns_only("survey_series",
required = False,
branches = True,
)
return attr
settings.customise_survey_series_controller = customise_survey_series_controller
# -----------------------------------------------------------------------------
# Projects
# Uncomment this to use settings suitable for a global/regional organisation (e.g. DRR)
settings.project.mode_3w = True
# Uncomment this to use DRR (Disaster Risk Reduction) extensions
settings.project.mode_drr = True
# Uncomment this to use Activity Types for Activities & Projects
settings.project.activity_types = True
# Uncomment this to use Codes for projects
settings.project.codes = True
# Uncomment this to call project locations 'Communities'
settings.project.community = True
# Uncomment this to enable Hazards in 3W projects
settings.project.hazards = True
# Uncomment this to enable Indicators in projects
# Just HNRC
#settings.project.indicators = True
# Uncomment this to use multiple Budgets per project
settings.project.multiple_budgets = True
# Uncomment this to use multiple Organisations per project
settings.project.multiple_organisations = True
# Uncomment this to enable Programmes in projects
settings.project.programmes = True
# Uncomment this to enable Themes in 3W projects
settings.project.themes = True
# Uncomment this to customise
# Links to Filtered Components for Donors & Partners
settings.project.organisation_roles = {
1: T("Host National Society"),
2: T("Partner"),
3: T("Donor"),
#4: T("Customer"), # T("Beneficiary")?
#5: T("Supplier"),
9: T("Partner National Society"),
}
# -------------------------------------------------------------------------
def household_inject_form_script(r, record):
"""
Inject JS for progressive revelation of household form,
to be called from prep
@param r: the S3Request
@param record: the household record
"""
if r.interactive:
s3 = current.response.s3
script = "/%s/static/themes/IFRC/js/po.js" % current.request.application
if script not in s3.scripts:
s3.scripts.append(script)
if record and record.followup:
s3.jquery_ready.append('''$.showHouseholdComponents(true)''');
return
# -------------------------------------------------------------------------
def customise_po_household_controller(**attr):
s3 = current.response.s3
# Custom prep
standard_prep = s3.prep
def custom_prep(r):
# Call standard prep
if callable(standard_prep):
result = standard_prep(r)
else:
result = True
# Do not require international phone number format
settings = current.deployment_settings
settings.msg.require_international_phone_numbers = False
# Inject JS for household form
household_inject_form_script(r, r.record)
return result
s3.prep = custom_prep
return attr
settings.customise_po_household_controller = customise_po_household_controller
# -------------------------------------------------------------------------
def customise_po_area_controller(**attr):
s3 = current.response.s3
# Custom prep
standard_prep = s3.prep
def custom_prep(r):
# Call standard prep
if callable(standard_prep):
result = standard_prep(r)
else:
result = True
# Do not require international phone number format
settings = current.deployment_settings
settings.msg.require_international_phone_numbers = False
if r.component_name == "household":
# Inject JS for household form
record = None
if r.component_id:
records = r.component.load()
if records:
record = records[0]
household_inject_form_script(r, record)
return result
s3.prep = custom_prep
return attr
settings.customise_po_area_controller = customise_po_area_controller
# -------------------------------------------------------------------------
def project_project_postprocess(form):
"""
When using Project Monitoring (i.e. HNRC) then create the entries
"""
db = current.db
s3db = current.s3db
project_id = form.vars.id
# Read Budget Entity ID, Start Date and End Date
ptable = s3db.project_project
project = db(ptable.id == project_id).select(ptable.budget_entity_id,
ptable.name,
ptable.start_date,
ptable.end_date,
limitby=(0, 1)
).first()
if not project:
return
# Copy Project Name to Budget Name
budget_entity_id = project.budget_entity_id
btable = s3db.budget_budget
query = (btable.budget_entity_id == budget_entity_id)
budget = db(query).select(btable.id, # Needed for update_record
# If we want to provide smoothed default expected values
#btable.total_budget,
btable.currency,
# Assume Monthly
#btable.monitoring_frequency,
limitby=(0, 1)
).first()
if not budget:
return
try:
budget.update_record(name = project.name)
except:
# unique=True violation
budget.update_record(name = "Budget for %s" % project.name)
# Create Monitoring Data entries
# Assume Monthly
#monitoring_frequency = budget.monitoring_frequency
#if not monitoring_frequency:
# return
#total_budget = budget.total_budget
currency = budget.currency
start_date = project.start_date
end_date = project.end_date
if not start_date or not end_date:
return
# Create entries for the 1st of every month between start_date and end_date
from dateutil import rrule
dates = list(rrule.rrule(rrule.MONTHLY, bymonthday=1, dtstart=start_date, until=end_date))
mtable = s3db.budget_monitoring
for d in dates:
mtable.insert(budget_entity_id = budget_entity_id,
# @ToDo: This needs to be modified whenever entries are manually edited
# Set/update this in budget_monitoring_onaccept
# - also check here that we don't exceed overall budget
start_date = start_date,
end_date = d,
currency = currency,
)
# Start date relates to previous entry
start_date = d
# -----------------------------------------------------------------------------
def customise_project_project_controller(**attr):
tablename = "project_project"
# Default Filter
from s3 import s3_set_default_filter
s3_set_default_filter("~.organisation_id",
user_org_default_filter,
tablename = "project_project")
# Load standard model
s3db = current.s3db
table = s3db[tablename]
# @ToDo: S3SQLInlineComponent for Project orgs
# Get IDs for PartnerNS/Partner-Donor
# db = current.db
# ttable = db.org_organisation_type
# rows = db(ttable.deleted != True).select(ttable.id,
# ttable.name,
# )
# rc = []
# not_rc = []
# nappend = not_rc.append
# for row in rows:
# if row.name == "Red Cross / Red Crescent":
# rc.append(row.id)
# elif row.name == "Supplier":
# pass
# else:
# nappend(row.id)
# Custom Fields
table.organisation_id.label = T("Host National Society")
# Custom Crud Form
from s3 import S3SQLCustomForm, S3SQLInlineComponent, S3SQLInlineLink
# Special cases for different NS
root_org = current.auth.root_org_name()
if root_org == HNRC:
HFA = None
# @ToDo: Use Inter-American Framework instead (when extending to Zone office)
# @ToDo: Add 'Business Line' (when extending to Zone office)
settings.project.details_tab = True
settings.project.community_volunteers = True
# Done in a more structured way instead
objectives = None
outputs = None
settings.project.goals = True
settings.project.indicators = True
settings.project.outcomes = True
settings.project.outputs = True
# Use Budget module instead of ProjectAnnualBudget
settings.project.multiple_budgets = False
settings.project.budget_monitoring = True
# Require start/end dates
table.start_date.requires = table.start_date.requires.other
table.end_date.requires = table.end_date.requires.other
budget = S3SQLInlineComponent(
"budget",
label = T("Budget"),
#link = False,
multiple = False,
fields = ["total_budget",
"currency",
#"monitoring_frequency",
],
)
btable = s3db.budget_budget
# Need to provide a name
import random, string
btable.name.default = "".join(random.SystemRandom().choice(string.ascii_uppercase + string.digits) for _ in range(16))
btable.monitoring_frequency.default = 3 # Monthly
postprocess = project_project_postprocess
list_fields = s3db.get_config("project_project", "list_fields")
list_fields += [(T("Monthly Status"), "current_status_by_indicators"),
(T("Cumulative Status"), "overall_status_by_indicators"),
]
else:
HFA = "drr.hfa"
objectives = "objectives"
outputs = S3SQLInlineComponent(
"output",
label = T("Outputs"),
fields = ["name", "status"],
)
budget = None
postprocess = None
if settings.get_project_programmes():
# Inject inline link for programmes including AddResourceLink
#from s3layouts import S3AddResourceLink
comment = s3db.project_programme_id.attr.comment
comment.vars = {"caller": "link_defaultprogramme",
"prefix": "project",
"parent": "programme_project",
}
programme = S3SQLInlineLink("programme",
label = T("Program"),
field = "programme_id",
multiple = False,
comment = comment,
)
else:
programme = None
crud_form = S3SQLCustomForm(
"organisation_id",
programme,
"name",
"code",
"description",
"status_id",
"start_date",
"end_date",
budget,
#S3SQLInlineComponent(
# "location",
# label = T("Locations"),
# fields = ["location_id"],
#),
# Outputs
outputs,
S3SQLInlineLink(
"hazard",
label = T("Hazards"),
field = "hazard_id",
help_field = s3db.project_hazard_help_fields,
cols = 4,
translate = True,
),
S3SQLInlineLink(
"sector",
label = T("Sectors"),
field = "sector_id",
cols = 4,
translate = True,
),
S3SQLInlineLink(
"theme",
label = T("Themes"),
field = "theme_id",
help_field = s3db.project_theme_help_fields,
cols = 4,
translate = True,
# Filter Theme by Sector
filterby = "theme_id:project_theme_sector.sector_id",
match = "sector_project.sector_id",
script = '''
$.filterOptionsS3({
'trigger':{'alias':'sector','name':'sector_id','inlineType':'link'},
'target':{'alias':'theme','name':'theme_id','inlineType':'link'},
'lookupPrefix':'project',
'lookupResource':'theme',
'lookupKey':'theme_id:project_theme_sector.sector_id',
'showEmptyField':false,
'tooltip':'project_theme_help_fields(id,name)'
})'''
),
HFA,
objectives,
"human_resource_id",
# Disabled since we need organisation_id filtering to either organisation_type_id == RC or NOT
# & also hiding Branches from RCs
# & also rewriting for organisation_type_id via link table
# Partner NS
# S3SQLInlineComponent(
# "organisation",
# name = "partnerns",
# label = T("Partner National Societies"),
# fields = ["organisation_id",
# "comments",
# ],
# Filter Organisation by Type
# filter = ["organisation_id": {"filterby": "organisation_type_id",
# "filterfor": rc,
# }],
# filterby = dict(field = "role",
# options = [9])
# ),
# Partner Orgs
# S3SQLInlineComponent(
# "organisation",
# name = "partner",
# label = T("Partner Organizations"),
# fields = ["organisation_id",
# "comments",
# ],
# Filter Organisation by Type
# filter = ["organisation_id": {"filterby": "organisation_type_id",
# "filterfor": not_rc,
# }],
# filterby = dict(field = "role",
# options = [2])
# ),
# Donors
# S3SQLInlineComponent(
# "organisation",
# name = "donor",
# label = T("Donor(s)"),
# fields = ["organisation_id",
# "amount",
# "currency"],
# Filter Organisation by Type
# filter = ["organisation_id": {"filterby": "organisation_type_id",
# "filterfor": not_rc,
# }],
# filterby = dict(field = "role",
# options = [3])
# ),
#"budget",
#"currency",
"comments",
postprocess = postprocess,
)
s3db.configure(tablename,
crud_form = crud_form,
)
s3 = current.response.s3
# Custom prep
standard_prep = s3.prep
def custom_prep(r):
# Call standard prep
if callable(standard_prep):
result = standard_prep(r)
else:
result = True
# Lead Organisation needs to be an NS (not a branch)
ns_only(tablename,
required = True,
branches = False,
limit_filter_opts = True,
)
# Set the Host NS filter as Visible so that the default filter works
filter_widgets = s3db.get_config(tablename, "filter_widgets")
for widget in filter_widgets:
if widget.field == "organisation_id":
widget.opts.hidden = False
break
return result
s3.prep = custom_prep
return attr
settings.customise_project_project_controller = customise_project_project_controller
# -----------------------------------------------------------------------------
def customise_project_beneficiary_resource(r, tablename):
"""
Link Project Beneficiaries to Activity Type
"""
if r.interactive and r.component:
if r.tablename == "project_project":
# We are a component of the Project
project_id = r.id
elif r.tablename == "project_location":
# We are a component of the Project Location
project_id = r.record.project_id
else:
# Unknown!
return
db = current.db
s3db = current.s3db
# Filter Activity Type by Sector
ltable = s3db.project_sector_project
rows = db(ltable.project_id == project_id).select(ltable.sector_id)
sectors = [row.sector_id for row in rows]
ltable = s3db.project_activity_type_sector
rows = db(ltable.sector_id.belongs(sectors)).select(ltable.activity_type_id)
filteropts = [row.activity_type_id for row in rows]
def postprocess(form):
# Update project_location.activity_type
beneficiary_id = form.vars.get("id", None)
table = db.project_beneficiary
row = db(table.id == beneficiary_id).select(table.project_location_id,
limitby = (0, 1)
).first()
if not row:
return
project_location_id = row.project_location_id
if not project_location_id:
return
ltable = db.project_beneficiary_activity_type
row = db(ltable.beneficiary_id == beneficiary_id).select(ltable.activity_type_id,
limitby = (0, 1)
).first()
if not row:
return
activity_type_id = row.activity_type_id
ltable = s3db.project_activity_type_location
query = (ltable.project_location_id == project_location_id) & \
(ltable.activity_type_id == activity_type_id)
exists = db(query).select(ltable.id,
limitby = (0, 1)
).first()
if not exists:
ltable.insert(project_location_id = project_location_id,
activity_type_id = activity_type_id,
)
from s3 import S3SQLCustomForm, S3SQLInlineLink
crud_form = S3SQLCustomForm(#"project_id",
"project_location_id",
S3SQLInlineLink("activity_type",
field = "activity_type_id",
filterby = "id",
options = filteropts,
label = T("Activity Type"),
multiple = False,
),
"parameter_id",
"value",
"target_value",
"date",
"end_date",
"comments",
postprocess = postprocess,
)
s3db.configure(tablename,
crud_form = crud_form,
)
elif not r.component:
# Report
from s3 import S3OptionsFilter
resource = r.resource
filter_widgets = resource.get_config("filter_widgets")
filter_widgets.insert(1,
S3OptionsFilter("beneficiary_activity_type.activity_type_id",
label = T("Activity Type"),
))
report_options = resource.get_config("report_options")
report_options.rows.append("beneficiary_activity_type.activity_type_id")
# Same object so would be added twice
#report_options.cols.append("beneficiary_activity_type.activity_type_id")
resource.configure(filter_widgets = filter_widgets,
report_options = report_options,
)
settings.customise_project_beneficiary_resource = customise_project_beneficiary_resource
# -----------------------------------------------------------------------------
def customise_project_location_resource(r, tablename):
from s3 import S3SQLCustomForm, S3SQLInlineComponentCheckbox
crud_form = S3SQLCustomForm(
"project_id",
"location_id",
# @ToDo: Grouped Checkboxes
S3SQLInlineComponentCheckbox(
"activity_type",
label = T("Activity Types"),
field = "activity_type_id",
cols = 3,
# Filter Activity Type by Sector
filter = {"linktable": "project_activity_type_sector",
"lkey": "activity_type_id",
"rkey": "sector_id",
"lookuptable": "project_project",
"lookupkey": "project_id",
},
translate = True,
),
"comments",
)
current.s3db.configure(tablename,
crud_form = crud_form,
)
settings.customise_project_location_resource = customise_project_location_resource
# -----------------------------------------------------------------------------
def customise_req_commit_controller(**attr):
# Request is mandatory
field = current.s3db.req_commit.req_id
field.requires = field.requires.other
return attr
settings.customise_req_commit_controller = customise_req_commit_controller
# -----------------------------------------------------------------------------
def customise_req_req_resource(r, tablename):
s3db = current.s3db
# Request is mandatory
field = s3db.req_commit.req_id
field.requires = field.requires.other
table = s3db.req_req
table.req_ref.represent = lambda v, show_link=True, pdf=True: \
s3db.req_ref_represent(v, show_link, pdf)
table.site_id.label = T("Deliver To")
# Hide Drivers list_field
list_fields = s3db.get_config("req_req", "list_fields")
try:
list_fields.remove((T("Drivers"), "drivers"))
except:
# Already removed
pass
settings.customise_req_req_resource = customise_req_req_resource
# END =========================================================================
|
{
"content_hash": "a19ccb7030fe6f9dcf806d6523a592f0",
"timestamp": "",
"source": "github",
"line_count": 2127,
"max_line_length": 155,
"avg_line_length": 41.67842031029619,
"alnum_prop": 0.45766497461928934,
"repo_name": "gallifrey17/eden",
"id": "75c755c6ade2c3e475487079db38650d4016f0ea",
"size": "88676",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "modules/templates/RMSAmericas/config.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "727"
},
{
"name": "CSS",
"bytes": "2357496"
},
{
"name": "HTML",
"bytes": "1320129"
},
{
"name": "JavaScript",
"bytes": "20033330"
},
{
"name": "NSIS",
"bytes": "3934"
},
{
"name": "PHP",
"bytes": "15220"
},
{
"name": "Python",
"bytes": "29444550"
},
{
"name": "Ruby",
"bytes": "3611"
},
{
"name": "Shell",
"bytes": "5022"
},
{
"name": "XSLT",
"bytes": "2811535"
}
],
"symlink_target": ""
}
|
import abjad
from abjad.tools import lilypondnametools
from abjad.tools import spannertools
from abjad.tools import markuptools
class ComplexTextSpanner(spannertools.Spanner):
r'''A complex text spanner.
.. container:: example
::
>>> staff = abjad.Staff("c'4 d'4 r4 e'4")
>>> spanner_one = consort.ComplexTextSpanner(
... direction=Up,
... markup='foo',
... )
>>> spanner_two = consort.ComplexTextSpanner(
... direction=Down,
... markup='bar',
... )
>>> abjad.attach(spanner_one, staff[:2])
>>> abjad.attach(spanner_two, staff[3:])
::
>>> print(format(staff))
\new Staff {
\once \override TextSpanner.bound-details.left-broken.text = \markup { foo }
\once \override TextSpanner.bound-details.left.text = \markup { foo }
\once \override TextSpanner.bound-details.right-broken.text = ##f
\once \override TextSpanner.bound-details.right.text = \markup {
\draw-line
#'(0 . -1)
}
\once \override TextSpanner.dash-fraction = 1
\once \override TextSpanner.direction = #up
c'4 \startTextSpan
d'4
<> \stopTextSpan
r4
\once \override TextScript.parent-alignment-X = #left
\once \override TextScript.self-alignment-X = #left
e'4 _ \markup { bar }
}
.. container:: example
::
>>> staff = abjad.Staff("c'4 d'4 e'4 f'4")
>>> spanner_one = consort.ComplexTextSpanner(
... direction=Up,
... markup='foo',
... )
>>> spanner_two = consort.ComplexTextSpanner(
... direction=Down,
... markup='bar',
... )
>>> abjad.attach(spanner_one, staff[:2])
>>> abjad.attach(spanner_two, staff[3:])
::
>>> print(format(staff))
\new Staff {
\once \override TextSpanner.bound-details.left-broken.text = \markup { foo }
\once \override TextSpanner.bound-details.left.text = \markup { foo }
\once \override TextSpanner.bound-details.right-broken.text = ##f
\once \override TextSpanner.bound-details.right.text = \markup {
\draw-line
#'(0 . -1)
}
\once \override TextSpanner.dash-fraction = 1
\once \override TextSpanner.direction = #up
c'4 \startTextSpan
d'4
<> \stopTextSpan
e'4
\once \override TextScript.parent-alignment-X = #left
\once \override TextScript.self-alignment-X = #left
f'4 _ \markup { bar }
}
.. container:: example
::
>>> staff = abjad.Staff("c'8 d' e' r r a' b' c''")
>>> spanner_one = consort.ComplexTextSpanner(
... direction=Up,
... markup='foo',
... )
>>> spanner_two = consort.ComplexTextSpanner(
... direction=Up,
... markup='foo',
... )
>>> abjad.attach(spanner_one, staff[:3])
>>> abjad.attach(spanner_two, staff[5:])
::
>>> print(format(staff))
\new Staff {
\once \override TextSpanner.bound-details.left-broken.text = \markup { foo }
\once \override TextSpanner.bound-details.left.text = \markup { foo }
\once \override TextSpanner.bound-details.right-broken.text = ##f
\once \override TextSpanner.bound-details.right.text = \markup {
\draw-line
#'(0 . -1)
}
\once \override TextSpanner.dash-fraction = 1
\once \override TextSpanner.direction = #up
c'8 \startTextSpan
d'8
e'8
r8
r8
a'8
b'8
c''8
<> \stopTextSpan
}
.. container:: example
::
>>> staff = abjad.Staff("c'8 d' e' f' g' a' b' c''")
>>> spanner_one = consort.ComplexTextSpanner(
... direction=Up,
... markup='foo',
... )
>>> spanner_two = consort.ComplexTextSpanner(
... direction=Down,
... markup='bar',
... )
>>> spanner_three = consort.ComplexTextSpanner(
... direction=Up,
... markup='foo',
... )
>>> abjad.attach(spanner_one, staff[:3])
>>> abjad.attach(spanner_two, staff[3:5])
>>> abjad.attach(spanner_three, staff[5:])
::
>>> print(format(staff))
\new Staff {
\once \override TextSpanner.bound-details.left-broken.text = \markup { foo }
\once \override TextSpanner.bound-details.left.text = \markup { foo }
\once \override TextSpanner.bound-details.right-broken.text = ##f
\once \override TextSpanner.bound-details.right.text = \markup {
\draw-line
#'(0 . -1)
}
\once \override TextSpanner.dash-fraction = 1
\once \override TextSpanner.direction = #up
c'8 \startTextSpan
d'8
e'8
<> \stopTextSpan
\once \override TextSpanner.bound-details.left-broken.text = \markup { bar }
\once \override TextSpanner.bound-details.left.text = \markup { bar }
\once \override TextSpanner.bound-details.right-broken.text = ##f
\once \override TextSpanner.bound-details.right.text = \markup {
\draw-line
#'(0 . -1)
}
\once \override TextSpanner.dash-fraction = 1
\once \override TextSpanner.direction = #down
f'8 \startTextSpan
g'8
<> \stopTextSpan
\once \override TextSpanner.bound-details.left-broken.text = \markup { foo }
\once \override TextSpanner.bound-details.left.text = \markup { foo }
\once \override TextSpanner.bound-details.right-broken.text = ##f
\once \override TextSpanner.bound-details.right.text = \markup {
\draw-line
#'(0 . -1)
}
\once \override TextSpanner.dash-fraction = 1
\once \override TextSpanner.direction = #up
a'8 \startTextSpan
b'8
c''8
<> \stopTextSpan
}
'''
### CLASS VARIABLES ###
__slots__ = (
'_direction',
'_markup',
)
### INITIALIZER ###
def __init__(
self,
direction=None,
markup=None,
overrides=None,
):
spannertools.Spanner.__init__(
self,
overrides=overrides,
)
assert direction in (Up, Down, None)
self._direction = direction
self._markup = markuptools.Markup(markup)
### PRIVATE METHODS ###
def _copy_keyword_args(self, new):
new._markup = self.markup
def _get_lilypond_format_bundle(self, leaf):
lilypond_format_bundle = self._get_basic_lilypond_format_bundle(leaf)
if self._is_my_only_leaf(leaf):
previous_is_similar = self._previous_spanner_is_similar(leaf)
next_is_similar = self._next_spanner_is_similar(leaf)
if previous_is_similar and next_is_similar:
return lilypond_format_bundle
elif previous_is_similar:
self._make_spanner_stop(lilypond_format_bundle)
elif next_is_similar:
self._make_spanner_start(lilypond_format_bundle)
else:
self._make_markup(lilypond_format_bundle)
elif self._is_my_first_leaf(leaf):
if not self._previous_spanner_is_similar(leaf):
self._make_spanner_start(lilypond_format_bundle)
elif self._is_my_last_leaf(leaf):
if not self._next_spanner_is_similar(leaf):
self._make_spanner_stop(lilypond_format_bundle)
return lilypond_format_bundle
def _make_markup(self, lilypond_format_bundle):
direction = self.direction or Up
markup = markuptools.Markup(
self.markup.contents,
direction,
)
lilypond_format_bundle.right.markup.append(markup)
override = lilypondnametools.LilyPondGrobOverride(
grob_name='TextScript',
is_once=True,
property_path=('parent-alignment-X',),
value=Left,
)
lilypond_format_bundle.update(override)
override = lilypondnametools.LilyPondGrobOverride(
grob_name='TextScript',
is_once=True,
property_path=('self-alignment-X',),
value=Left,
)
lilypond_format_bundle.update(override)
def _make_spanner_start(self, lilypond_format_bundle):
override = lilypondnametools.LilyPondGrobOverride(
grob_name='TextSpanner',
is_once=True,
property_path=('bound-details', 'left', 'text'),
value=self.markup,
)
lilypond_format_bundle.update(override)
override = lilypondnametools.LilyPondGrobOverride(
grob_name='TextSpanner',
is_once=True,
property_path=('bound-details', 'left-broken', 'text'),
value=self.markup,
)
lilypond_format_bundle.update(override)
override = lilypondnametools.LilyPondGrobOverride(
grob_name='TextSpanner',
is_once=True,
property_path=('bound-details', 'right', 'text'),
value=markuptools.Markup(r"\draw-line #'(0 . -1)")
)
lilypond_format_bundle.update(override)
override = lilypondnametools.LilyPondGrobOverride(
grob_name='TextSpanner',
is_once=True,
property_path=('bound-details', 'right-broken', 'text'),
value=False,
)
lilypond_format_bundle.update(override)
override = lilypondnametools.LilyPondGrobOverride(
grob_name='TextSpanner',
is_once=True,
property_path=('dash-fraction',),
value=1,
)
lilypond_format_bundle.update(override)
if self.direction is not None:
override = lilypondnametools.LilyPondGrobOverride(
grob_name='TextSpanner',
is_once=True,
property_path=('direction',),
value=self.direction,
)
lilypond_format_bundle.update(override)
string = r'\startTextSpan'
lilypond_format_bundle.right.spanner_starts.append(string)
def _make_spanner_stop(self, lilypond_format_bundle):
string = r'<> \stopTextSpan'
lilypond_format_bundle.after.indicators.append(string)
# def _next_spanner_is_similar(self, leaf):
# next_leaf = leaf._get_leaf(1)
# next_spanner = None
# next_spanner_is_similar = False
# if next_leaf is not None:
# spanners = next_leaf._get_spanners(type(self))
# if spanners:
# assert len(spanners) == 1
# next_spanner = tuple(spanners)[0]
# if next_spanner.direction == self.direction:
# if next_spanner.markup == self.markup:
# next_spanner_is_similar = True
# return next_spanner_is_similar
def _next_spanner_is_similar(self, leaf):
leaf_prototype = (abjad.Note, abjad.Chord)
next_spanner = None
next_spanner_is_similar = False
for index in range(1, 5):
next_leaf = leaf._get_leaf(index)
if next_leaf is None:
break
elif isinstance(next_leaf, abjad.MultimeasureRest):
break
has_spanner = next_leaf._has_spanner(type(self),
in_parentage=True)
if not has_spanner:
if isinstance(next_leaf, leaf_prototype):
break
continue
next_spanner = next_leaf._get_spanner(type(self))
if next_spanner.direction != self.direction:
break
if next_spanner.markup != self.markup:
break
next_spanner_is_similar = True
return next_spanner_is_similar
def _previous_spanner_is_similar(self, leaf):
leaf_prototype = (abjad.Note, abjad.Chord)
previous_spanner = None
previous_spanner_is_similar = False
for index in range(1, 5):
previous_leaf = leaf._get_leaf(-index)
if previous_leaf is None:
break
elif isinstance(previous_leaf, abjad.MultimeasureRest):
break
has_spanner = previous_leaf._has_spanner(type(self),
in_parentage=True)
if not has_spanner:
if isinstance(previous_leaf, leaf_prototype):
break
continue
previous_spanner = previous_leaf._get_spanner(type(self))
if previous_spanner.direction != self.direction:
break
if previous_spanner.markup != self.markup:
break
previous_spanner_is_similar = True
return previous_spanner_is_similar
### PUBLIC PROPERTIES ###
@property
def direction(self):
return self._direction
@property
def markup(self):
return self._markup
|
{
"content_hash": "895251e53bdb3487831f62eede428d31",
"timestamp": "",
"source": "github",
"line_count": 393,
"max_line_length": 92,
"avg_line_length": 36.404580152671755,
"alnum_prop": 0.5087020339693856,
"repo_name": "josiah-wolf-oberholtzer/consort",
"id": "1436eab59c81248ea974602be462b2da6e231c8d",
"size": "14307",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "consort/tools/ComplexTextSpanner.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "LilyPond",
"bytes": "22101"
},
{
"name": "Python",
"bytes": "865475"
}
],
"symlink_target": ""
}
|
import copy
import math
from oslo_log import log as logging
from oslo_utils import importutils
import six
from cinder import exception
from cinder.i18n import _, _LW
from cinder import utils
from cinder.volume.drivers.netapp.dataontap.client import client_base
from cinder.volume.drivers.netapp import utils as na_utils
netapp_lib = importutils.try_import('netapp_lib')
if netapp_lib:
from netapp_lib.api.zapi import errors as netapp_error
from netapp_lib.api.zapi import zapi as netapp_api
LOG = logging.getLogger(__name__)
DELETED_PREFIX = 'deleted_cinder_'
@six.add_metaclass(utils.TraceWrapperMetaclass)
class Client(client_base.Client):
def __init__(self, **kwargs):
super(Client, self).__init__(**kwargs)
self.vserver = kwargs.get('vserver', None)
self.connection.set_vserver(self.vserver)
# Default values to run first api
self.connection.set_api_version(1, 15)
(major, minor) = self.get_ontapi_version(cached=False)
self.connection.set_api_version(major, minor)
def _invoke_vserver_api(self, na_element, vserver):
server = copy.copy(self.connection)
server.set_vserver(vserver)
result = server.invoke_successfully(na_element, True)
return result
def set_vserver(self, vserver):
self.connection.set_vserver(vserver)
def get_iscsi_target_details(self):
"""Gets the iSCSI target portal details."""
iscsi_if_iter = netapp_api.NaElement('iscsi-interface-get-iter')
result = self.connection.invoke_successfully(iscsi_if_iter, True)
tgt_list = []
num_records = result.get_child_content('num-records')
if num_records and int(num_records) >= 1:
attr_list = result.get_child_by_name('attributes-list')
iscsi_if_list = attr_list.get_children()
for iscsi_if in iscsi_if_list:
d = dict()
d['address'] = iscsi_if.get_child_content('ip-address')
d['port'] = iscsi_if.get_child_content('ip-port')
d['tpgroup-tag'] = iscsi_if.get_child_content('tpgroup-tag')
d['interface-enabled'] = iscsi_if.get_child_content(
'is-interface-enabled')
tgt_list.append(d)
return tgt_list
def get_fc_target_wwpns(self):
"""Gets the FC target details."""
wwpns = []
port_name_list_api = netapp_api.NaElement('fcp-port-name-get-iter')
port_name_list_api.add_new_child('max-records', '100')
result = self.connection.invoke_successfully(port_name_list_api, True)
num_records = result.get_child_content('num-records')
if num_records and int(num_records) >= 1:
for port_name_info in result.get_child_by_name(
'attributes-list').get_children():
if port_name_info.get_child_content('is-used') != 'true':
continue
wwpn = port_name_info.get_child_content('port-name').lower()
wwpns.append(wwpn)
return wwpns
def get_iscsi_service_details(self):
"""Returns iscsi iqn."""
iscsi_service_iter = netapp_api.NaElement('iscsi-service-get-iter')
result = self.connection.invoke_successfully(iscsi_service_iter, True)
if result.get_child_content('num-records') and\
int(result.get_child_content('num-records')) >= 1:
attr_list = result.get_child_by_name('attributes-list')
iscsi_service = attr_list.get_child_by_name('iscsi-service-info')
return iscsi_service.get_child_content('node-name')
LOG.debug('No iSCSI service found for vserver %s', self.vserver)
return None
def get_lun_list(self):
"""Gets the list of LUNs on filer.
Gets the LUNs from cluster with vserver.
"""
luns = []
tag = None
while True:
api = netapp_api.NaElement('lun-get-iter')
api.add_new_child('max-records', '100')
if tag:
api.add_new_child('tag', tag, True)
lun_info = netapp_api.NaElement('lun-info')
lun_info.add_new_child('vserver', self.vserver)
query = netapp_api.NaElement('query')
query.add_child_elem(lun_info)
api.add_child_elem(query)
result = self.connection.invoke_successfully(api, True)
if result.get_child_by_name('num-records') and\
int(result.get_child_content('num-records')) >= 1:
attr_list = result.get_child_by_name('attributes-list')
luns.extend(attr_list.get_children())
tag = result.get_child_content('next-tag')
if tag is None:
break
return luns
def get_lun_map(self, path):
"""Gets the LUN map by LUN path."""
tag = None
map_list = []
while True:
lun_map_iter = netapp_api.NaElement('lun-map-get-iter')
lun_map_iter.add_new_child('max-records', '100')
if tag:
lun_map_iter.add_new_child('tag', tag, True)
query = netapp_api.NaElement('query')
lun_map_iter.add_child_elem(query)
query.add_node_with_children('lun-map-info', **{'path': path})
result = self.connection.invoke_successfully(lun_map_iter, True)
tag = result.get_child_content('next-tag')
if result.get_child_content('num-records') and \
int(result.get_child_content('num-records')) >= 1:
attr_list = result.get_child_by_name('attributes-list')
lun_maps = attr_list.get_children()
for lun_map in lun_maps:
lun_m = dict()
lun_m['initiator-group'] = lun_map.get_child_content(
'initiator-group')
lun_m['lun-id'] = lun_map.get_child_content('lun-id')
lun_m['vserver'] = lun_map.get_child_content('vserver')
map_list.append(lun_m)
if tag is None:
break
return map_list
def _get_igroup_by_initiator_query(self, initiator, tag):
igroup_get_iter = netapp_api.NaElement('igroup-get-iter')
igroup_get_iter.add_new_child('max-records', '100')
if tag:
igroup_get_iter.add_new_child('tag', tag, True)
query = netapp_api.NaElement('query')
igroup_info = netapp_api.NaElement('initiator-group-info')
query.add_child_elem(igroup_info)
igroup_info.add_new_child('vserver', self.vserver)
initiators = netapp_api.NaElement('initiators')
igroup_info.add_child_elem(initiators)
igroup_get_iter.add_child_elem(query)
initiators.add_node_with_children(
'initiator-info', **{'initiator-name': initiator})
# limit results to just the attributes of interest
desired_attrs = netapp_api.NaElement('desired-attributes')
desired_igroup_info = netapp_api.NaElement('initiator-group-info')
desired_igroup_info.add_node_with_children(
'initiators', **{'initiator-info': None})
desired_igroup_info.add_new_child('vserver', None)
desired_igroup_info.add_new_child('initiator-group-name', None)
desired_igroup_info.add_new_child('initiator-group-type', None)
desired_igroup_info.add_new_child('initiator-group-os-type', None)
desired_attrs.add_child_elem(desired_igroup_info)
igroup_get_iter.add_child_elem(desired_attrs)
return igroup_get_iter
def get_igroup_by_initiators(self, initiator_list):
"""Get igroups exactly matching a set of initiators."""
tag = None
igroup_list = []
if not initiator_list:
return igroup_list
initiator_set = set(initiator_list)
while True:
# C-mode getter APIs can't do an 'and' query, so match the first
# initiator (which will greatly narrow the search results) and
# filter the rest in this method.
query = self._get_igroup_by_initiator_query(initiator_list[0], tag)
result = self.connection.invoke_successfully(query, True)
tag = result.get_child_content('next-tag')
num_records = result.get_child_content('num-records')
if num_records and int(num_records) >= 1:
for igroup_info in result.get_child_by_name(
'attributes-list').get_children():
initiator_set_for_igroup = set()
for initiator_info in igroup_info.get_child_by_name(
'initiators').get_children():
initiator_set_for_igroup.add(
initiator_info.get_child_content('initiator-name'))
if initiator_set == initiator_set_for_igroup:
igroup = {'initiator-group-os-type':
igroup_info.get_child_content(
'initiator-group-os-type'),
'initiator-group-type':
igroup_info.get_child_content(
'initiator-group-type'),
'initiator-group-name':
igroup_info.get_child_content(
'initiator-group-name')}
igroup_list.append(igroup)
if tag is None:
break
return igroup_list
def clone_lun(self, volume, name, new_name, space_reserved='true',
qos_policy_group_name=None, src_block=0, dest_block=0,
block_count=0):
# zAPI can only handle 2^24 blocks per range
bc_limit = 2 ** 24 # 8GB
# zAPI can only handle 32 block ranges per call
br_limit = 32
z_limit = br_limit * bc_limit # 256 GB
z_calls = int(math.ceil(block_count / float(z_limit)))
zbc = block_count
if z_calls == 0:
z_calls = 1
for _call in range(0, z_calls):
if zbc > z_limit:
block_count = z_limit
zbc -= z_limit
else:
block_count = zbc
clone_create = netapp_api.NaElement.create_node_with_children(
'clone-create',
**{'volume': volume, 'source-path': name,
'destination-path': new_name,
'space-reserve': space_reserved})
if qos_policy_group_name is not None:
clone_create.add_new_child('qos-policy-group-name',
qos_policy_group_name)
if block_count > 0:
block_ranges = netapp_api.NaElement("block-ranges")
segments = int(math.ceil(block_count / float(bc_limit)))
bc = block_count
for _segment in range(0, segments):
if bc > bc_limit:
block_count = bc_limit
bc -= bc_limit
else:
block_count = bc
block_range =\
netapp_api.NaElement.create_node_with_children(
'block-range',
**{'source-block-number':
six.text_type(src_block),
'destination-block-number':
six.text_type(dest_block),
'block-count':
six.text_type(block_count)})
block_ranges.add_child_elem(block_range)
src_block += int(block_count)
dest_block += int(block_count)
clone_create.add_child_elem(block_ranges)
self.connection.invoke_successfully(clone_create, True)
def get_lun_by_args(self, **args):
"""Retrieves LUN with specified args."""
lun_iter = netapp_api.NaElement('lun-get-iter')
lun_iter.add_new_child('max-records', '100')
query = netapp_api.NaElement('query')
lun_iter.add_child_elem(query)
query.add_node_with_children('lun-info', **args)
luns = self.connection.invoke_successfully(lun_iter, True)
attr_list = luns.get_child_by_name('attributes-list')
if not attr_list:
return []
return attr_list.get_children()
def file_assign_qos(self, flex_vol, qos_policy_group_name, file_path):
"""Assigns the named QoS policy-group to a file."""
api_args = {
'volume': flex_vol,
'qos-policy-group-name': qos_policy_group_name,
'file': file_path,
'vserver': self.vserver,
}
return self.send_request('file-assign-qos', api_args, False)
def provision_qos_policy_group(self, qos_policy_group_info):
"""Create QOS policy group on the backend if appropriate."""
if qos_policy_group_info is None:
return
# Legacy QOS uses externally provisioned QOS policy group,
# so we don't need to create one on the backend.
legacy = qos_policy_group_info.get('legacy')
if legacy is not None:
return
spec = qos_policy_group_info.get('spec')
if spec is not None:
self.qos_policy_group_create(spec['policy_name'],
spec['max_throughput'])
def qos_policy_group_create(self, qos_policy_group_name, max_throughput):
"""Creates a QOS policy group."""
api_args = {
'policy-group': qos_policy_group_name,
'max-throughput': max_throughput,
'vserver': self.vserver,
}
return self.send_request('qos-policy-group-create', api_args, False)
def qos_policy_group_delete(self, qos_policy_group_name):
"""Attempts to delete a QOS policy group."""
api_args = {
'policy-group': qos_policy_group_name,
}
return self.send_request('qos-policy-group-delete', api_args, False)
def qos_policy_group_rename(self, qos_policy_group_name, new_name):
"""Renames a QOS policy group."""
api_args = {
'policy-group-name': qos_policy_group_name,
'new-name': new_name,
}
return self.send_request('qos-policy-group-rename', api_args, False)
def mark_qos_policy_group_for_deletion(self, qos_policy_group_info):
"""Do (soft) delete of backing QOS policy group for a cinder volume."""
if qos_policy_group_info is None:
return
spec = qos_policy_group_info.get('spec')
# For cDOT we want to delete the QoS policy group that we created for
# this cinder volume. Because the QoS policy may still be "in use"
# after the zapi call to delete the volume itself returns successfully,
# we instead rename the QoS policy group using a specific pattern and
# later attempt on a best effort basis to delete any QoS policy groups
# matching that pattern.
if spec is not None:
current_name = spec['policy_name']
new_name = DELETED_PREFIX + current_name
try:
self.qos_policy_group_rename(current_name, new_name)
except netapp_api.NaApiError as ex:
msg = _LW('Rename failure in cleanup of cDOT QOS policy group '
'%(name)s: %(ex)s')
LOG.warning(msg, {'name': current_name, 'ex': ex})
# Attempt to delete any QoS policies named "delete-openstack-*".
self.remove_unused_qos_policy_groups()
def remove_unused_qos_policy_groups(self):
"""Deletes all QOS policy groups that are marked for deletion."""
api_args = {
'query': {
'qos-policy-group-info': {
'policy-group': '%s*' % DELETED_PREFIX,
'vserver': self.vserver,
}
},
'max-records': 3500,
'continue-on-failure': 'true',
'return-success-list': 'false',
'return-failure-list': 'false',
}
try:
self.send_request('qos-policy-group-delete-iter', api_args, False)
except netapp_api.NaApiError as ex:
msg = 'Could not delete QOS policy groups. Details: %(ex)s'
msg_args = {'ex': ex}
LOG.debug(msg % msg_args)
def set_lun_qos_policy_group(self, path, qos_policy_group):
"""Sets qos_policy_group on a LUN."""
api_args = {
'path': path,
'qos-policy-group': qos_policy_group,
}
return self.send_request('lun-set-qos-policy-group', api_args)
def get_if_info_by_ip(self, ip):
"""Gets the network interface info by ip."""
net_if_iter = netapp_api.NaElement('net-interface-get-iter')
net_if_iter.add_new_child('max-records', '10')
query = netapp_api.NaElement('query')
net_if_iter.add_child_elem(query)
query.add_node_with_children(
'net-interface-info',
**{'address': na_utils.resolve_hostname(ip)})
result = self.connection.invoke_successfully(net_if_iter, True)
num_records = result.get_child_content('num-records')
if num_records and int(num_records) >= 1:
attr_list = result.get_child_by_name('attributes-list')
return attr_list.get_children()
raise exception.NotFound(
_('No interface found on cluster for ip %s') % ip)
def get_vol_by_junc_vserver(self, vserver, junction):
"""Gets the volume by junction path and vserver."""
vol_iter = netapp_api.NaElement('volume-get-iter')
vol_iter.add_new_child('max-records', '10')
query = netapp_api.NaElement('query')
vol_iter.add_child_elem(query)
vol_attrs = netapp_api.NaElement('volume-attributes')
query.add_child_elem(vol_attrs)
vol_attrs.add_node_with_children(
'volume-id-attributes',
**{'junction-path': junction,
'owning-vserver-name': vserver})
des_attrs = netapp_api.NaElement('desired-attributes')
des_attrs.add_node_with_children('volume-attributes',
**{'volume-id-attributes': None})
vol_iter.add_child_elem(des_attrs)
result = self._invoke_vserver_api(vol_iter, vserver)
num_records = result.get_child_content('num-records')
if num_records and int(num_records) >= 1:
attr_list = result.get_child_by_name('attributes-list')
vols = attr_list.get_children()
vol_id = vols[0].get_child_by_name('volume-id-attributes')
return vol_id.get_child_content('name')
msg_fmt = {'vserver': vserver, 'junction': junction}
raise exception.NotFound(_("No volume on cluster with vserver "
"%(vserver)s and junction path "
"%(junction)s ") % msg_fmt)
def clone_file(self, flex_vol, src_path, dest_path, vserver,
dest_exists=False):
"""Clones file on vserver."""
LOG.debug("Cloning with params volume %(volume)s, src %(src_path)s, "
"dest %(dest_path)s, vserver %(vserver)s",
{'volume': flex_vol, 'src_path': src_path,
'dest_path': dest_path, 'vserver': vserver})
clone_create = netapp_api.NaElement.create_node_with_children(
'clone-create',
**{'volume': flex_vol, 'source-path': src_path,
'destination-path': dest_path})
major, minor = self.connection.get_api_version()
if major == 1 and minor >= 20 and dest_exists:
clone_create.add_new_child('destination-exists', 'true')
self._invoke_vserver_api(clone_create, vserver)
def get_file_usage(self, path, vserver):
"""Gets the file unique bytes."""
LOG.debug('Getting file usage for %s', path)
file_use = netapp_api.NaElement.create_node_with_children(
'file-usage-get', **{'path': path})
res = self._invoke_vserver_api(file_use, vserver)
unique_bytes = res.get_child_content('unique-bytes')
LOG.debug('file-usage for path %(path)s is %(bytes)s',
{'path': path, 'bytes': unique_bytes})
return unique_bytes
def get_vserver_ips(self, vserver):
"""Get ips for the vserver."""
result = netapp_api.invoke_api(
self.connection, api_name='net-interface-get-iter',
is_iter=True, tunnel=vserver)
if_list = []
for res in result:
records = res.get_child_content('num-records')
if records > 0:
attr_list = res['attributes-list']
ifs = attr_list.get_children()
if_list.extend(ifs)
return if_list
def check_apis_on_cluster(self, api_list=None):
"""Checks API availability and permissions on cluster.
Checks API availability and permissions for executing user.
Returns a list of failed apis.
"""
api_list = api_list or []
failed_apis = []
if api_list:
api_version = self.connection.get_api_version()
if api_version:
major, minor = api_version
if major == 1 and minor < 20:
for api_name in api_list:
na_el = netapp_api.NaElement(api_name)
try:
self.connection.invoke_successfully(na_el)
except Exception as e:
if isinstance(e, netapp_api.NaApiError):
if(e.code == netapp_error.EAPINOTFOUND
or e.code == netapp_error.EAPIPRIVILEGE):
failed_apis.append(api_name)
elif major == 1 and minor >= 20:
failed_apis = copy.copy(api_list)
result = netapp_api.invoke_api(
self.connection,
api_name='system-user-capability-get-iter',
api_family='cm',
additional_elems=None,
is_iter=True)
for res in result:
attr_list = res.get_child_by_name('attributes-list')
if attr_list:
capabilities = attr_list.get_children()
for capability in capabilities:
op_list = capability.get_child_by_name(
'operation-list')
if op_list:
ops = op_list.get_children()
for op in ops:
apis = op.get_child_content(
'api-name')
if apis:
api_list = apis.split(',')
for api_name in api_list:
if (api_name and
api_name.strip()
in failed_apis):
failed_apis.remove(
api_name)
else:
continue
else:
msg = _("Unsupported Clustered Data ONTAP version.")
raise exception.VolumeBackendAPIException(data=msg)
else:
msg = _("Data ONTAP API version could not be determined.")
raise exception.VolumeBackendAPIException(data=msg)
return failed_apis
def get_operational_network_interface_addresses(self):
"""Gets the IP addresses of operational LIFs on the vserver."""
api_args = {
'query': {
'net-interface-info': {
'operational-status': 'up'
}
},
'desired-attributes': {
'net-interface-info': {
'address': None,
}
}
}
result = self.send_request('net-interface-get-iter', api_args)
lif_info_list = result.get_child_by_name(
'attributes-list') or netapp_api.NaElement('none')
return [lif_info.get_child_content('address') for lif_info in
lif_info_list.get_children()]
def get_flexvol_capacity(self, flexvol_path):
"""Gets total capacity and free capacity, in bytes, of the flexvol."""
api_args = {
'query': {
'volume-attributes': {
'volume-id-attributes': {
'junction-path': flexvol_path
}
}
},
'desired-attributes': {
'volume-attributes': {
'volume-space-attributes': {
'size-available': None,
'size-total': None,
}
}
},
}
result = self.send_request('volume-get-iter', api_args)
attributes_list = result.get_child_by_name('attributes-list')
volume_attributes = attributes_list.get_child_by_name(
'volume-attributes')
volume_space_attributes = volume_attributes.get_child_by_name(
'volume-space-attributes')
size_available = float(
volume_space_attributes.get_child_content('size-available'))
size_total = float(
volume_space_attributes.get_child_content('size-total'))
return size_total, size_available
|
{
"content_hash": "da3e28f79022cd51ade40d0f80d0475f",
"timestamp": "",
"source": "github",
"line_count": 603,
"max_line_length": 79,
"avg_line_length": 43.550580431177444,
"alnum_prop": 0.5348996610943986,
"repo_name": "scottdangelo/RemoveVolumeMangerLocks",
"id": "ce199bf656b0df8d91d5ca978df4bed6e97fe4fd",
"size": "27006",
"binary": false,
"copies": "9",
"ref": "refs/heads/RemoveVolumeManagerLocks",
"path": "cinder/volume/drivers/netapp/dataontap/client/client_cmode.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "13128387"
},
{
"name": "Shell",
"bytes": "8222"
}
],
"symlink_target": ""
}
|
from django.conf import settings
from django.db.models import Q
from django.http import Http404
from django.shortcuts import get_object_or_404
from rest_framework import status
from rest_framework.authentication import BaseAuthentication
from rest_framework.decorators import (api_view, authentication_classes,
permission_classes)
from rest_framework.exceptions import ParseError, PermissionDenied
from rest_framework.fields import BooleanField, CharField
from rest_framework.filters import BaseFilterBackend, OrderingFilter
from rest_framework.mixins import (CreateModelMixin, DestroyModelMixin,
ListModelMixin, RetrieveModelMixin)
from rest_framework.parsers import FormParser, JSONParser
from rest_framework.permissions import BasePermission
from rest_framework.relations import PrimaryKeyRelatedField, RelatedField
from rest_framework.response import Response
from rest_framework.serializers import ModelSerializer, SerializerMethodField
from rest_framework.viewsets import GenericViewSet
from addons.models import Addon
from amo.urlresolvers import reverse
from users.models import UserProfile
from comm.models import (CommunicationNote, CommunicationNoteRead,
CommunicationThread, user_has_perm_note,
user_has_perm_thread)
from comm.tasks import consume_email, mark_thread_read
from comm.utils import filter_notes_by_read_status
from mkt.api.authentication import (RestOAuthAuthentication,
RestSharedSecretAuthentication)
from mkt.api.base import CORSMixin, SilentListModelMixin
from mkt.reviewers.utils import send_note_emails
from mkt.webpay.forms import PrepareForm
class AuthorSerializer(ModelSerializer):
name = CharField()
class Meta:
model = UserProfile
fields = ('name',)
class NoteSerializer(ModelSerializer):
body = CharField()
author_meta = AuthorSerializer(source='author', read_only=True)
reply_to = PrimaryKeyRelatedField(required=False)
is_read = SerializerMethodField('is_read_by_user')
def is_read_by_user(self, obj):
return obj.read_by_users.filter(
pk=self.get_request().amo_user.id).exists()
class Meta:
model = CommunicationNote
fields = ('id', 'author', 'author_meta', 'note_type', 'body',
'created', 'thread', 'reply_to', 'is_read')
class AddonSerializer(ModelSerializer):
name = CharField()
thumbnail_url = RelatedField('thumbnail_url')
url = CharField(source='get_absolute_url')
review_url = SerializerMethodField('get_review_url')
class Meta:
model = Addon
fields = ('name', 'url', 'thumbnail_url', 'slug', 'review_url')
def get_review_url(self, obj):
return reverse('reviewers.apps.review', args=[obj.app_slug])
class ThreadSerializer(ModelSerializer):
addon_meta = AddonSerializer(source='addon', read_only=True)
recent_notes = SerializerMethodField('get_recent_notes')
notes_count = SerializerMethodField('get_notes_count')
class Meta:
model = CommunicationThread
fields = ('id', 'addon', 'addon_meta', 'version', 'notes_count',
'recent_notes', 'created', 'modified')
view_name = 'comm-thread-detail'
def get_recent_notes(self, obj):
NoteSerializer.get_request = self.get_request
notes = (obj.notes.with_perms(self.get_request().amo_user, obj)
.order_by('-created')[:5])
return NoteSerializer(notes).data
def get_notes_count(self, obj):
return obj.notes.count()
class ThreadPermission(BasePermission):
"""
Permission wrapper for checking if the authenticated user has the
permission to view the thread.
"""
def has_permission(self, request, view):
# Let `has_object_permission` handle the permissions when we retrieve
# an object.
if view.action == 'retrieve':
return True
if not request.user.is_authenticated():
raise PermissionDenied()
return True
def has_object_permission(self, request, view, obj):
"""
Make sure we give correct permissions to read/write the thread.
"""
if not request.user.is_authenticated() or obj.read_permission_public:
return obj.read_permission_public
return user_has_perm_thread(obj, request.amo_user)
class NotePermission(ThreadPermission):
def has_permission(self, request, view):
thread_id = view.kwargs['thread_id']
# We save the thread in the view object so we can use it later.
view.comm_thread = get_object_or_404(CommunicationThread,
id=thread_id)
if view.action == 'list':
return ThreadPermission.has_object_permission(self,
request, view, view.comm_thread)
if view.action == 'create':
if not request.user.is_authenticated():
return False
# Determine permission to add the note based on the thread
# permission.
return ThreadPermission.has_object_permission(self,
request, view, view.comm_thread)
return True
def has_object_permission(self, request, view, obj):
# Has thread obj-level permission AND note obj-level permission.
return (
ThreadPermission.has_object_permission(self, request, view,
obj.thread) and
user_has_perm_note(obj, request.amo_user))
class EmailCreationPermission(object):
"""Permit if client's IP address is whitelisted."""
def has_permission(self, request, view):
auth_token = request.META.get('HTTP_POSTFIX_AUTH_TOKEN')
if auth_token and auth_token not in settings.POSTFIX_AUTH_TOKEN:
return False
remote_ip = request.META.get('REMOTE_ADDR')
return remote_ip and (
remote_ip in settings.WHITELISTED_CLIENTS_EMAIL_API)
class NoAuthentication(BaseAuthentication):
def authenticate(self, request):
return request._request.user, None
class ReadUnreadFilter(BaseFilterBackend):
filter_param = 'show_read'
def filter_queryset(self, request, queryset, view):
"""
Return only read notes if `show_read=true` is truthy and only unread
notes if `show_read=false.
"""
val = request.GET.get('show_read')
if val is None:
return queryset
show_read = BooleanField().from_native(val)
return filter_notes_by_read_status(queryset, request.amo_user,
show_read)
class CommViewSet(CORSMixin, GenericViewSet):
"""Some overriding and mixin stuff to adapt other viewsets."""
parser_classes = (FormParser, JSONParser)
def patched_get_request(self):
return lambda x: self.request
def get_serializer_class(self):
original = super(CommViewSet, self).get_serializer_class()
original.get_request = self.patched_get_request()
return original
def partial_update(self, request, *args, **kwargs):
val = BooleanField().from_native(request.DATA.get('is_read'))
if val:
self.mark_as_read(request.amo_user)
return Response(status=status.HTTP_204_NO_CONTENT)
else:
return Response('Requested update operation not supported',
status=status.HTTP_403_FORBIDDEN)
class ThreadViewSet(SilentListModelMixin, RetrieveModelMixin,
DestroyModelMixin, CreateModelMixin, CommViewSet):
model = CommunicationThread
serializer_class = ThreadSerializer
authentication_classes = (RestOAuthAuthentication,
RestSharedSecretAuthentication)
permission_classes = (ThreadPermission,)
filter_backends = (OrderingFilter,)
cors_allowed_methods = ['get', 'post', 'patch']
def list(self, request):
self.serializer_class = ThreadSerializer
profile = request.amo_user
# We list all the threads the user has posted a note to.
notes = profile.comm_notes.values_list('thread', flat=True)
# We list all the threads where the user has been CC'd.
cc = profile.comm_thread_cc.values_list('thread', flat=True)
# This gives 404 when an app with given slug/id is not found.
if 'app' in request.GET:
form = PrepareForm(request.GET)
if not form.is_valid():
raise Http404()
notes, cc = list(notes), list(cc)
queryset = CommunicationThread.objects.filter(pk__in=notes + cc,
addon=form.cleaned_data['app'])
else:
# We list all the threads which uses an add-on authored by the
# user and with read permissions for add-on devs.
notes, cc = list(notes), list(cc)
addons = list(profile.addons.values_list('pk', flat=True))
q_dev = Q(addon__in=addons, read_permission_developer=True)
queryset = CommunicationThread.objects.filter(
Q(pk__in=notes + cc) | q_dev)
self.queryset = queryset
return SilentListModelMixin.list(self, request)
def mark_as_read(self, profile):
mark_thread_read(self.get_object(), profile)
class NoteViewSet(ListModelMixin, CreateModelMixin, RetrieveModelMixin,
DestroyModelMixin, CommViewSet):
model = CommunicationNote
serializer_class = NoteSerializer
authentication_classes = (RestOAuthAuthentication,
RestSharedSecretAuthentication,)
permission_classes = (NotePermission,)
filter_backends = (OrderingFilter, ReadUnreadFilter)
cors_allowed_methods = ['get', 'post', 'delete', 'patch']
def get_queryset(self):
return CommunicationNote.objects.with_perms(
self.request.amo_user, self.comm_thread)
def get_serializer(self, instance=None, data=None,
files=None, many=False, partial=False):
if self.action == 'create':
# HACK: We want to set the `author` as the current user
# (read-only), yet we can't specify `author` as a `read_only`
# field because then the serializer won't pick it up at the time
# of deserialization.
data_dict = {'author': self.request.amo_user.id,
'thread': self.comm_thread.id,
'note_type': data['note_type'],
'body': data['body']}
else:
data_dict = data
return super(NoteViewSet, self).get_serializer(data=data_dict,
files=files, instance=instance, many=many, partial=partial)
def inherit_permissions(self, obj, parent):
for key in ('developer', 'reviewer', 'senior_reviewer',
'mozilla_contact', 'staff'):
perm = 'read_permission_%s' % key
setattr(obj, perm, getattr(parent, perm))
def post_save(self, obj, created=False):
if created:
send_note_emails(obj)
def pre_save(self, obj):
"""Inherit permissions from the thread."""
self.inherit_permissions(obj, self.comm_thread)
def mark_as_read(self, profile):
CommunicationNoteRead.objects.get_or_create(note=self.get_object(),
user=profile)
class ReplyViewSet(NoteViewSet):
cors_allowed_methods = ['get', 'post']
def initialize_request(self, request, *args, **kwargs):
self.parent_note = get_object_or_404(CommunicationNote,
id=kwargs['note_id'])
return super(ReplyViewSet, self).initialize_request(request, *args,
**kwargs)
def get_queryset(self):
return self.parent_note.replies.all()
def pre_save(self, obj):
"""Inherit permissions from the parent note."""
self.inherit_permissions(obj, self.parent_note)
obj.reply_to = self.parent_note
@api_view(['POST'])
@authentication_classes((NoAuthentication,))
@permission_classes((EmailCreationPermission,))
def post_email(request):
email_body = request.POST.get('body')
if not email_body:
raise ParseError(
detail='email_body not present in the POST data.')
consume_email.apply_async((email_body,))
return Response(status=201)
|
{
"content_hash": "de64be872aaa6df233926cbb07c80409",
"timestamp": "",
"source": "github",
"line_count": 334,
"max_line_length": 77,
"avg_line_length": 37.4251497005988,
"alnum_prop": 0.64096,
"repo_name": "Joergen/zamboni",
"id": "8bf37f553dc7ecf4c5fe57f04c0095b4cb18ebd9",
"size": "12500",
"binary": false,
"copies": "1",
"ref": "refs/heads/uge43",
"path": "mkt/comm/api.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "4145"
},
{
"name": "CSS",
"bytes": "608838"
},
{
"name": "JavaScript",
"bytes": "1750529"
},
{
"name": "Perl",
"bytes": "565"
},
{
"name": "Puppet",
"bytes": "13808"
},
{
"name": "Python",
"bytes": "6063534"
},
{
"name": "Ruby",
"bytes": "1865"
},
{
"name": "Shell",
"bytes": "19774"
}
],
"symlink_target": ""
}
|
import pytest
import numpy as np
from numpy.testing import assert_allclose
from sklearn.compose import ColumnTransformer
from sklearn.datasets import load_diabetes
from sklearn.datasets import load_iris
from sklearn.datasets import make_classification
from sklearn.datasets import make_regression
from sklearn.dummy import DummyClassifier
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import LogisticRegression
from sklearn.impute import SimpleImputer
from sklearn.inspection import permutation_importance
from sklearn.model_selection import train_test_split
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import KBinsDiscretizer
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import scale
from sklearn.utils import parallel_backend
from sklearn.utils._testing import _convert_container
@pytest.mark.parametrize("n_jobs", [1, 2])
def test_permutation_importance_correlated_feature_regression(n_jobs):
# Make sure that feature highly correlated to the target have a higher
# importance
rng = np.random.RandomState(42)
n_repeats = 5
X, y = load_diabetes(return_X_y=True)
y_with_little_noise = (
y + rng.normal(scale=0.001, size=y.shape[0])).reshape(-1, 1)
X = np.hstack([X, y_with_little_noise])
clf = RandomForestRegressor(n_estimators=10, random_state=42)
clf.fit(X, y)
result = permutation_importance(clf, X, y, n_repeats=n_repeats,
random_state=rng, n_jobs=n_jobs)
assert result.importances.shape == (X.shape[1], n_repeats)
# the correlated feature with y was added as the last column and should
# have the highest importance
assert np.all(result.importances_mean[-1] >
result.importances_mean[:-1])
@pytest.mark.parametrize("n_jobs", [1, 2])
def test_permutation_importance_correlated_feature_regression_pandas(n_jobs):
pd = pytest.importorskip("pandas")
# Make sure that feature highly correlated to the target have a higher
# importance
rng = np.random.RandomState(42)
n_repeats = 5
dataset = load_iris()
X, y = dataset.data, dataset.target
y_with_little_noise = (
y + rng.normal(scale=0.001, size=y.shape[0])).reshape(-1, 1)
# Adds feature correlated with y as the last column
X = pd.DataFrame(X, columns=dataset.feature_names)
X['correlated_feature'] = y_with_little_noise
clf = RandomForestClassifier(n_estimators=10, random_state=42)
clf.fit(X, y)
result = permutation_importance(clf, X, y, n_repeats=n_repeats,
random_state=rng, n_jobs=n_jobs)
assert result.importances.shape == (X.shape[1], n_repeats)
# the correlated feature with y was added as the last column and should
# have the highest importance
assert np.all(result.importances_mean[-1] > result.importances_mean[:-1])
@pytest.mark.parametrize("n_jobs", [1, 2])
def test_robustness_to_high_cardinality_noisy_feature(n_jobs, seed=42):
# Permutation variable importance should not be affected by the high
# cardinality bias of traditional feature importances, especially when
# computed on a held-out test set:
rng = np.random.RandomState(seed)
n_repeats = 5
n_samples = 1000
n_classes = 5
n_informative_features = 2
n_noise_features = 1
n_features = n_informative_features + n_noise_features
# Generate a multiclass classification dataset and a set of informative
# binary features that can be used to predict some classes of y exactly
# while leaving some classes unexplained to make the problem harder.
classes = np.arange(n_classes)
y = rng.choice(classes, size=n_samples)
X = np.hstack([(y == c).reshape(-1, 1)
for c in classes[:n_informative_features]])
X = X.astype(np.float32)
# Not all target classes are explained by the binary class indicator
# features:
assert n_informative_features < n_classes
# Add 10 other noisy features with high cardinality (numerical) values
# that can be used to overfit the training data.
X = np.concatenate([X, rng.randn(n_samples, n_noise_features)], axis=1)
assert X.shape == (n_samples, n_features)
# Split the dataset to be able to evaluate on a held-out test set. The
# Test size should be large enough for importance measurements to be
# stable:
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.5, random_state=rng)
clf = RandomForestClassifier(n_estimators=5, random_state=rng)
clf.fit(X_train, y_train)
# Variable importances computed by impurity decrease on the tree node
# splits often use the noisy features in splits. This can give misleading
# impression that high cardinality noisy variables are the most important:
tree_importances = clf.feature_importances_
informative_tree_importances = tree_importances[:n_informative_features]
noisy_tree_importances = tree_importances[n_informative_features:]
assert informative_tree_importances.max() < noisy_tree_importances.min()
# Let's check that permutation-based feature importances do not have this
# problem.
r = permutation_importance(clf, X_test, y_test, n_repeats=n_repeats,
random_state=rng, n_jobs=n_jobs)
assert r.importances.shape == (X.shape[1], n_repeats)
# Split the importances between informative and noisy features
informative_importances = r.importances_mean[:n_informative_features]
noisy_importances = r.importances_mean[n_informative_features:]
# Because we do not have a binary variable explaining each target classes,
# the RF model will have to use the random variable to make some
# (overfitting) splits (as max_depth is not set). Therefore the noisy
# variables will be non-zero but with small values oscillating around
# zero:
assert max(np.abs(noisy_importances)) > 1e-7
assert noisy_importances.max() < 0.05
# The binary features correlated with y should have a higher importance
# than the high cardinality noisy features.
# The maximum test accuracy is 2 / 5 == 0.4, each informative feature
# contributing approximately a bit more than 0.2 of accuracy.
assert informative_importances.min() > 0.15
def test_permutation_importance_mixed_types():
rng = np.random.RandomState(42)
n_repeats = 4
# Last column is correlated with y
X = np.array([[1.0, 2.0, 3.0, np.nan], [2, 1, 2, 1]]).T
y = np.array([0, 1, 0, 1])
clf = make_pipeline(SimpleImputer(), LogisticRegression(solver='lbfgs'))
clf.fit(X, y)
result = permutation_importance(clf, X, y, n_repeats=n_repeats,
random_state=rng)
assert result.importances.shape == (X.shape[1], n_repeats)
# the correlated feature with y is the last column and should
# have the highest importance
assert np.all(result.importances_mean[-1] > result.importances_mean[:-1])
# use another random state
rng = np.random.RandomState(0)
result2 = permutation_importance(clf, X, y, n_repeats=n_repeats,
random_state=rng)
assert result2.importances.shape == (X.shape[1], n_repeats)
assert not np.allclose(result.importances, result2.importances)
# the correlated feature with y is the last column and should
# have the highest importance
assert np.all(result2.importances_mean[-1] > result2.importances_mean[:-1])
def test_permutation_importance_mixed_types_pandas():
pd = pytest.importorskip("pandas")
rng = np.random.RandomState(42)
n_repeats = 5
# Last column is correlated with y
X = pd.DataFrame({'col1': [1.0, 2.0, 3.0, np.nan],
'col2': ['a', 'b', 'a', 'b']})
y = np.array([0, 1, 0, 1])
num_preprocess = make_pipeline(SimpleImputer(), StandardScaler())
preprocess = ColumnTransformer([
('num', num_preprocess, ['col1']),
('cat', OneHotEncoder(), ['col2'])
])
clf = make_pipeline(preprocess, LogisticRegression(solver='lbfgs'))
clf.fit(X, y)
result = permutation_importance(clf, X, y, n_repeats=n_repeats,
random_state=rng)
assert result.importances.shape == (X.shape[1], n_repeats)
# the correlated feature with y is the last column and should
# have the highest importance
assert np.all(result.importances_mean[-1] > result.importances_mean[:-1])
def test_permutation_importance_linear_regresssion():
X, y = make_regression(n_samples=500, n_features=10, random_state=0)
X = scale(X)
y = scale(y)
lr = LinearRegression().fit(X, y)
# this relationship can be computed in closed form
expected_importances = 2 * lr.coef_**2
results = permutation_importance(lr, X, y,
n_repeats=50,
scoring='neg_mean_squared_error')
assert_allclose(expected_importances, results.importances_mean,
rtol=1e-1, atol=1e-6)
def test_permutation_importance_equivalence_sequential_parallel():
# regression test to make sure that sequential and parallel calls will
# output the same results.
X, y = make_regression(n_samples=500, n_features=10, random_state=0)
lr = LinearRegression().fit(X, y)
importance_sequential = permutation_importance(
lr, X, y, n_repeats=5, random_state=0, n_jobs=1
)
# First check that the problem is structured enough and that the model is
# complex enough to not yield trivial, constant importances:
imp_min = importance_sequential['importances'].min()
imp_max = importance_sequential['importances'].max()
assert imp_max - imp_min > 0.3
# The actually check that parallelism does not impact the results
# either with shared memory (threading) or without isolated memory
# via process-based parallelism using the default backend
# ('loky' or 'multiprocessing') depending on the joblib version:
# process-based parallelism (by default):
importance_processes = permutation_importance(
lr, X, y, n_repeats=5, random_state=0, n_jobs=2)
assert_allclose(
importance_processes['importances'],
importance_sequential['importances']
)
# thread-based parallelism:
with parallel_backend("threading"):
importance_threading = permutation_importance(
lr, X, y, n_repeats=5, random_state=0, n_jobs=2
)
assert_allclose(
importance_threading['importances'],
importance_sequential['importances']
)
@pytest.mark.parametrize("n_jobs", [None, 1, 2])
def test_permutation_importance_equivalence_array_dataframe(n_jobs):
# This test checks that the column shuffling logic has the same behavior
# both a dataframe and a simple numpy array.
pd = pytest.importorskip('pandas')
# regression test to make sure that sequential and parallel calls will
# output the same results.
X, y = make_regression(n_samples=100, n_features=5, random_state=0)
X_df = pd.DataFrame(X)
# Add a categorical feature that is statistically linked to y:
binner = KBinsDiscretizer(n_bins=3, encode="ordinal")
cat_column = binner.fit_transform(y.reshape(-1, 1))
# Concatenate the extra column to the numpy array: integers will be
# cast to float values
X = np.hstack([X, cat_column])
assert X.dtype.kind == "f"
# Insert extra column as a non-numpy-native dtype (while keeping backward
# compat for old pandas versions):
if hasattr(pd, "Categorical"):
cat_column = pd.Categorical(cat_column.ravel())
else:
cat_column = cat_column.ravel()
new_col_idx = len(X_df.columns)
X_df[new_col_idx] = cat_column
assert X_df[new_col_idx].dtype == cat_column.dtype
# Stich an aribtrary index to the dataframe:
X_df.index = np.arange(len(X_df)).astype(str)
rf = RandomForestRegressor(n_estimators=5, max_depth=3, random_state=0)
rf.fit(X, y)
n_repeats = 3
importance_array = permutation_importance(
rf, X, y, n_repeats=n_repeats, random_state=0, n_jobs=n_jobs
)
# First check that the problem is structured enough and that the model is
# complex enough to not yield trivial, constant importances:
imp_min = importance_array['importances'].min()
imp_max = importance_array['importances'].max()
assert imp_max - imp_min > 0.3
# Now check that importances computed on dataframe matche the values
# of those computed on the array with the same data.
importance_dataframe = permutation_importance(
rf, X_df, y, n_repeats=n_repeats, random_state=0, n_jobs=n_jobs
)
assert_allclose(
importance_array['importances'],
importance_dataframe['importances']
)
@pytest.mark.parametrize("input_type", ["array", "dataframe"])
def test_permutation_importance_large_memmaped_data(input_type):
# Smoke, non-regression test for:
# https://github.com/scikit-learn/scikit-learn/issues/15810
n_samples, n_features = int(5e4), 4
X, y = make_classification(n_samples=n_samples, n_features=n_features,
random_state=0)
assert X.nbytes > 1e6 # trigger joblib memmaping
X = _convert_container(X, input_type)
clf = DummyClassifier(strategy='prior').fit(X, y)
# Actual smoke test: should not raise any error:
n_repeats = 5
r = permutation_importance(clf, X, y, n_repeats=n_repeats, n_jobs=2)
# Auxiliary check: DummyClassifier is feature independent:
# permutating feature should not change the predictions
expected_importances = np.zeros((n_features, n_repeats))
assert_allclose(expected_importances, r.importances)
|
{
"content_hash": "3e47d099c059456c2388ab4ee18a8ed8",
"timestamp": "",
"source": "github",
"line_count": 353,
"max_line_length": 79,
"avg_line_length": 39.55807365439094,
"alnum_prop": 0.6842595244915497,
"repo_name": "bnaul/scikit-learn",
"id": "2b381e9a20b1a86706a8ca6b94eaaae817b6f9be",
"size": "13964",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "sklearn/inspection/tests/test_permutation_importance.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "3366"
},
{
"name": "C",
"bytes": "451996"
},
{
"name": "C++",
"bytes": "140322"
},
{
"name": "Makefile",
"bytes": "1512"
},
{
"name": "PowerShell",
"bytes": "17042"
},
{
"name": "Python",
"bytes": "7229182"
},
{
"name": "Shell",
"bytes": "19938"
}
],
"symlink_target": ""
}
|
import sys, shutil, os, hashlib
def usage():
print "This program moves all .tile files in the current directory to"
print "\ta file named based on their MD5 values."
print "usage: move_tiles.py"
if __name__ == '__main__':
if "--help" in sys.argv:
usage()
else:
for file in os.listdir("."):
if file.endswith(".tile"):
m = hashlib.md5()
m.update(open(file).read())
shutil.move(file, m.hexdigest()+".tile")
|
{
"content_hash": "5148a50583ea65ef2288e7589961966b",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 74,
"avg_line_length": 31.375,
"alnum_prop": 0.545816733067729,
"repo_name": "eaburns/pbnf",
"id": "0b8f888f6d281327600f9281a8e5df520624a61a",
"size": "653",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tile-gen/move_tiles.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "1262"
},
{
"name": "C",
"bytes": "108921"
},
{
"name": "C++",
"bytes": "391886"
},
{
"name": "OCaml",
"bytes": "43036"
},
{
"name": "Objective-C",
"bytes": "2776"
},
{
"name": "Python",
"bytes": "9222"
},
{
"name": "Shell",
"bytes": "750"
},
{
"name": "TeX",
"bytes": "5641"
}
],
"symlink_target": ""
}
|
from longclaw.configuration.models import Configuration
def currency(request):
config = Configuration.for_site(request.site)
return {
'currency_html_code': config.currency_html_code,
'currency': config.currency
}
|
{
"content_hash": "78de7ad99250c757b51b5dd3e423a8a3",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 56,
"avg_line_length": 30.25,
"alnum_prop": 0.7066115702479339,
"repo_name": "JamesRamm/longclaw",
"id": "aa1373c4a225f3a200120624e66b8f8fa924f50a",
"size": "242",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "longclaw/configuration/context_processors.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1437"
},
{
"name": "Dockerfile",
"bytes": "148"
},
{
"name": "HTML",
"bytes": "20376"
},
{
"name": "JavaScript",
"bytes": "37905"
},
{
"name": "Makefile",
"bytes": "1562"
},
{
"name": "Python",
"bytes": "172472"
},
{
"name": "Shell",
"bytes": "970"
}
],
"symlink_target": ""
}
|
import numpy as np
import tensorflow as tf
def pairwise_add(u, v=None, is_batch=False):
"""
performs a pairwise summation between vectors (possibly the same)
Parameters:
----------
u: Tensor (n, ) | (n, 1)
v: Tensor (n, ) | (n, 1) [optional]
is_batch: bool
a flag for whether the vectors come in a batch
ie.: whether the vectors has a shape of (b,n) or (b,n,1)
Returns: Tensor (n, n)
Raises: ValueError
"""
u_shape = u.get_shape().as_list()
if len(u_shape) > 2 and not is_batch:
raise ValueError("Expected at most 2D tensors, but got %dD" % len(u_shape))
if len(u_shape) > 3 and is_batch:
raise ValueError("Expected at most 2D tensor batches, but got %dD" % len(u_shape))
if v is None:
v = u
else:
v_shape = v.get_shape().as_list()
if u_shape != v_shape:
raise ValueError("Shapes %s and %s do not match" % (u_shape, v_shape))
n = u_shape[0] if not is_batch else u_shape[1]
column_u = tf.reshape(u, (-1, 1) if not is_batch else (-1, n, 1))
U = tf.concat([column_u] * n, 1 if not is_batch else 2)
if v is u:
return U + tf.transpose(U, None if not is_batch else [0, 2, 1])
else:
row_v = tf.reshape(v, (1, -1) if not is_batch else (-1, 1, n))
V = tf.concat([row_v] * n, 0 if not is_batch else 1)
return U + V
def decaying_softmax(shape, axis):
rank = len(shape) # num dim
max_val = shape[axis]
weights_vector = np.arange(1, max_val + 1, dtype=np.float32)
weights_vector = weights_vector[::-1] # reversed
weights_vector = np.exp(weights_vector) / np.sum(np.exp(weights_vector)) # softmax weights
container = np.zeros(shape, dtype=np.float32)
broadcastable_shape = [1] * rank
broadcastable_shape[axis] = max_val
return container + np.reshape(weights_vector, broadcastable_shape) # the weight matrix is built, with axis is filled with softmax weights
def unpack_into_tensorarray(value, axis, size=None):
"""
unpacks a given tensor along a given axis into a TensorArray
Parameters:
----------
value: Tensor
the tensor to be unpacked
axis: int
the axis to unpack the tensor along
size: int
the size of the array to be used if shape inference resulted in None
Returns: TensorArray
the unpacked TensorArray
"""
shape = value.get_shape().as_list()
rank = len(shape)
dtype = value.dtype
array_size = shape[axis] if not shape[axis] is None else size
if array_size is None:
raise ValueError("Can't create TensorArray with size None")
array = tf.TensorArray(dtype=dtype, size=array_size) #size of the axis
dim_permutation = [axis] +list(range(1, axis)) + [0] + list(range(axis + 1, rank))
unpack_axis_major_value = tf.transpose(value, dim_permutation)# move axis values to the 0 dim
full_array = array.unstack(unpack_axis_major_value)
return full_array
def pack_into_tensor(array, axis):
"""
packs a given TensorArray into a tensor along a given axis
Parameters:
----------
array: TensorArray
the tensor array to pack
axis: int
the axis to pack the array along
Returns: Tensor
the packed tensor
"""
packed_tensor = array.stack() # add 1 dimension at the 0 dim
shape = packed_tensor.get_shape()
try:
rank = len(shape)
except:
print("unknow length of tensor array!!! assume rank 3")
rank = 3
dim_permutation = [axis] + list(range(1, axis)) + [0] + list(range(axis + 1, rank))
correct_shape_tensor = tf.transpose(packed_tensor, dim_permutation)# put the extra dimension to axis you want
return correct_shape_tensor
def pack_into_tensor2(array, axis):
"""
packs a given TensorArray into a tensor along a given axis
Parameters:
----------
array: TensorArray
the tensor array to pack
axis: int
the axis to pack the array along
Returns: Tensor
the packed tensor
"""
packed_tensor = array.stack() # add 1 dimension at the 0 dim
dim_permutation = [axis] + list(range(1, axis)) + [0] + list(range(axis + 1, 3))
correct_shape_tensor = tf.transpose(packed_tensor, dim_permutation)# put the extra dimension to axis you want
return correct_shape_tensor
|
{
"content_hash": "92860d3de55fcee8d802d16e3139ea9c",
"timestamp": "",
"source": "github",
"line_count": 135,
"max_line_length": 141,
"avg_line_length": 32.318518518518516,
"alnum_prop": 0.6245702498280999,
"repo_name": "thaihungle/deepexp",
"id": "f853d2c7ca708ee0d6ea1abb16f30164cc0dbb2c",
"size": "4363",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gen-dnc/utility.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "2566290"
}
],
"symlink_target": ""
}
|
"""Functions to perform actions related to voiceover application."""
from __future__ import annotations
from core import feconf
from core.domain import email_manager
from core.domain import exp_fetchers
from core.domain import opportunity_services
from core.domain import rights_domain
from core.domain import rights_manager
from core.domain import suggestion_registry
from core.domain import user_services
from core.platform import models
(suggestion_models,) = models.Registry.import_models([models.NAMES.suggestion])
def _get_voiceover_application_class(target_type):
"""Returns the voiceover application class for a given target type.
Args:
target_type: str. The target type of the voiceover application.
Returns:
class. The voiceover application class for the given target type.
Raises:
Exception. The voiceover application target type is invalid.
"""
target_type_to_classes = (
suggestion_registry.VOICEOVER_APPLICATION_TARGET_TYPE_TO_DOMAIN_CLASSES)
if target_type in target_type_to_classes:
return target_type_to_classes[target_type]
else:
raise Exception(
'Invalid target type for voiceover application: %s' % target_type)
def _get_voiceover_application_model(voiceover_application):
"""Returns the GeneralVoiceoverApplicationModel object for the give
voiceover application object.
Args:
voiceover_application: BaseVoiceoverApplication. The voiceover
application object.
Returns:
GeneralVoiceoverApplicationModel. The model object out of the given
application object.
"""
return suggestion_models.GeneralVoiceoverApplicationModel(
id=voiceover_application.voiceover_application_id,
target_type=voiceover_application.target_type,
target_id=voiceover_application.target_id,
status=voiceover_application.status,
author_id=voiceover_application.author_id,
final_reviewer_id=voiceover_application.final_reviewer_id,
language_code=voiceover_application.language_code,
filename=voiceover_application.filename,
content=voiceover_application.content,
rejection_message=voiceover_application.rejection_message)
def _get_voiceover_application_from_model(voiceover_application_model):
"""Returns the BaseVoiceoverApplication object for the give
voiceover application model object.
Args:
voiceover_application_model: GeneralVoiceoverApplicationModel. The
voiceover application model object.
Returns:
BaseVoiceoverApplication. The domain object out of the given voiceover
application model object.
"""
voiceover_application_class = _get_voiceover_application_class(
voiceover_application_model.target_type)
return voiceover_application_class(
voiceover_application_model.id,
voiceover_application_model.target_id,
voiceover_application_model.status,
voiceover_application_model.author_id,
voiceover_application_model.final_reviewer_id,
voiceover_application_model.language_code,
voiceover_application_model.filename,
voiceover_application_model.content,
voiceover_application_model.rejection_message)
def _save_voiceover_applications(voiceover_applications):
"""Saves a list of given voiceover application object in datastore.
Args:
voiceover_applications: list(BaseVoiceoverApplication). The list of
voiceover application objects.
"""
voiceover_application_models = []
for voiceover_application in voiceover_applications:
voiceover_application.validate()
voiceover_application_model = _get_voiceover_application_model(
voiceover_application)
voiceover_application_models.append(voiceover_application_model)
suggestion_models.GeneralVoiceoverApplicationModel.update_timestamps_multi(
voiceover_application_models)
suggestion_models.GeneralVoiceoverApplicationModel.put_multi(
voiceover_application_models)
def get_voiceover_application_by_id(voiceover_application_id):
"""Returns voiceover application model corresponding to give id.
Args:
voiceover_application_id: str. The voiceover application id.
Returns:
BaseVoiceoverApplication. The voiceover application object for the give
application id.
"""
voiceover_application_model = (
suggestion_models.GeneralVoiceoverApplicationModel.get_by_id(
voiceover_application_id))
return _get_voiceover_application_from_model(voiceover_application_model)
def get_reviewable_voiceover_applications(user_id):
"""Returns a list of voiceover applications which the given user can review.
Args:
user_id: str. The user ID of the reviewer.
Returns:
list(BaseVoiceoverApplication). A list of voiceover application which
the given user can review.
"""
voiceover_application_models = (
suggestion_models.GeneralVoiceoverApplicationModel
.get_reviewable_voiceover_applications(user_id))
return [
_get_voiceover_application_from_model(model) for model in (
voiceover_application_models)]
def get_user_submitted_voiceover_applications(user_id, status=None):
"""Returns a list of voiceover application submitted by the given user which
are currently in the given status.
Args:
user_id: str. The id of the user.
status: str|None. The status of the voiceover application.
Returns:
BaseVoiceoverApplication). A list of voiceover application which are
submitted by the given user.
"""
voiceover_application_models = (
suggestion_models.GeneralVoiceoverApplicationModel
.get_user_voiceover_applications(user_id, status))
return [
_get_voiceover_application_from_model(model) for model in (
voiceover_application_models)]
def accept_voiceover_application(voiceover_application_id, reviewer_id):
"""Accept the voiceover application of given voiceover application id.
Args:
voiceover_application_id: str. The id of the voiceover application which
need to be accepted.
reviewer_id: str. The user ID of the reviewer.
Raises:
Exception. Reviewer ID is same as the author ID.
"""
voiceover_application = get_voiceover_application_by_id(
voiceover_application_id)
if reviewer_id == voiceover_application.author_id:
raise Exception(
'Applicants are not allowed to review their own '
'voiceover application.')
reviewer = user_services.get_user_actions_info(reviewer_id)
voiceover_application.accept(reviewer_id)
_save_voiceover_applications([voiceover_application])
if voiceover_application.target_type == feconf.ENTITY_TYPE_EXPLORATION:
rights_manager.assign_role_for_exploration(
reviewer, voiceover_application.target_id,
voiceover_application.author_id, rights_domain.ROLE_VOICE_ARTIST)
opportunity_services.update_exploration_voiceover_opportunities(
voiceover_application.target_id,
voiceover_application.language_code)
opportunities = (
opportunity_services.get_exploration_opportunity_summaries_by_ids([
voiceover_application.target_id]))
email_manager.send_accepted_voiceover_application_email(
voiceover_application.author_id,
opportunities[voiceover_application.target_id].chapter_title,
voiceover_application.language_code)
# TODO(#7969): Add notification to the user's dashboard for the accepted
# voiceover application.
voiceover_application_models = (
suggestion_models.GeneralVoiceoverApplicationModel
.get_voiceover_applications(
voiceover_application.target_type, voiceover_application.target_id,
voiceover_application.language_code))
rejected_voiceover_applications = []
for model in voiceover_application_models:
voiceover_application = _get_voiceover_application_from_model(
model)
if not voiceover_application.is_handled:
voiceover_application.reject(
reviewer_id, 'We have to reject your application as another '
'application for the same opportunity got accepted.')
rejected_voiceover_applications.append(voiceover_application)
_save_voiceover_applications(rejected_voiceover_applications)
def reject_voiceover_application(
voiceover_application_id, reviewer_id, rejection_message):
"""Rejects the voiceover application of given voiceover application id.
Args:
voiceover_application_id: str. The is of the voiceover application which
need to be rejected.
reviewer_id: str. The user ID of the reviewer.
rejection_message: str. The plain text message submitted by the
reviewer while rejecting the application.
Raises:
Exception. Reviewer ID is same as the author ID.
"""
voiceover_application = get_voiceover_application_by_id(
voiceover_application_id)
if reviewer_id == voiceover_application.author_id:
raise Exception(
'Applicants are not allowed to review their own '
'voiceover application.')
reviewer = user_services.get_user_actions_info(reviewer_id)
voiceover_application.reject(reviewer.user_id, rejection_message)
_save_voiceover_applications([voiceover_application])
if voiceover_application.target_type == feconf.ENTITY_TYPE_EXPLORATION:
opportunities = (
opportunity_services.get_exploration_opportunity_summaries_by_ids([
voiceover_application.target_id]))
email_manager.send_rejected_voiceover_application_email(
voiceover_application.author_id,
opportunities[voiceover_application.target_id].chapter_title,
voiceover_application.language_code, rejection_message)
# TODO(#7969): Add notification to the user's dashboard for the accepted
# voiceover application.
def create_new_voiceover_application(
target_type, target_id, language_code, content, filename, author_id):
"""Creates a new voiceover application withe the given data.
Args:
target_type: str. The string representing the type of the target entity.
target_id: str. The ID of the target entity.
language_code: str. The language code for the voiceover application.
content: str. The html content which is voiceover in the
application.
filename: str. The filename of the voiceover audio.
author_id: str. The ID of the user who submitted the voiceover
application.
"""
voiceover_application_class = _get_voiceover_application_class(target_type)
voiceover_application_id = (
suggestion_models.GeneralVoiceoverApplicationModel.get_new_id(''))
voiceover_application = voiceover_application_class(
voiceover_application_id, target_id, suggestion_models.STATUS_IN_REVIEW,
author_id, None, language_code, filename, content, None)
_save_voiceover_applications([voiceover_application])
def get_text_to_create_voiceover_application(
target_type, target_id, language_code):
"""Returns a text to voiceover for a voiceover application.
Args:
target_type: str. The string representing the type of the target entity.
target_id: str. The ID of the target entity.
language_code: str. The language code for the content.
Returns:
str. The text which can be voiceover for a voiceover application.
Raises:
Exception. Invalid target type.
"""
if target_type == feconf.ENTITY_TYPE_EXPLORATION:
exploration = exp_fetchers.get_exploration_by_id(target_id)
init_state_name = exploration.init_state_name
state = exploration.states[init_state_name]
if exploration.language_code == language_code:
return state.content.html
else:
return state.written_translations.get_translated_content(
state.content.content_id, language_code)
else:
raise Exception('Invalid target type: %s' % target_type)
|
{
"content_hash": "feec78ad54a7583987a1a006d5a5ba59",
"timestamp": "",
"source": "github",
"line_count": 312,
"max_line_length": 80,
"avg_line_length": 39.69551282051282,
"alnum_prop": 0.7062575696406944,
"repo_name": "brianrodri/oppia",
"id": "4cb83051c08b33b0ad25654a2da2325e7cc1d8ec",
"size": "12990",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "core/domain/voiceover_services.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "487903"
},
{
"name": "HTML",
"bytes": "1748056"
},
{
"name": "JavaScript",
"bytes": "1176446"
},
{
"name": "PEG.js",
"bytes": "71377"
},
{
"name": "Python",
"bytes": "14169091"
},
{
"name": "Shell",
"bytes": "2239"
},
{
"name": "TypeScript",
"bytes": "13316709"
}
],
"symlink_target": ""
}
|
"""Test RPC misc output."""
import xml.etree.ElementTree as ET
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_raises_rpc_error,
assert_equal,
assert_greater_than,
assert_greater_than_or_equal,
)
from test_framework.authproxy import JSONRPCException
class RpcMiscTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.supports_cli = False
def run_test(self):
node = self.nodes[0]
self.log.info("test CHECK_NONFATAL")
assert_raises_rpc_error(
-1,
'Internal bug detected: \'request.params[9].get_str() != "trigger_internal_bug"\'',
lambda: node.echo(arg9='trigger_internal_bug'),
)
self.log.info("test getmemoryinfo")
memory = node.getmemoryinfo()['locked']
assert_greater_than(memory['used'], 0)
assert_greater_than(memory['free'], 0)
assert_greater_than(memory['total'], 0)
# assert_greater_than_or_equal() for locked in case locking pages failed at some point
assert_greater_than_or_equal(memory['locked'], 0)
assert_greater_than(memory['chunks_used'], 0)
assert_greater_than(memory['chunks_free'], 0)
assert_equal(memory['used'] + memory['free'], memory['total'])
self.log.info("test mallocinfo")
try:
mallocinfo = node.getmemoryinfo(mode="mallocinfo")
self.log.info('getmemoryinfo(mode="mallocinfo") call succeeded')
tree = ET.fromstring(mallocinfo)
assert_equal(tree.tag, 'malloc')
except JSONRPCException:
self.log.info('getmemoryinfo(mode="mallocinfo") not available')
assert_raises_rpc_error(-8, 'mallocinfo is only available when compiled with glibc 2.10+', node.getmemoryinfo, mode="mallocinfo")
assert_raises_rpc_error(-8, "unknown mode foobar", node.getmemoryinfo, mode="foobar")
self.log.info("test logging")
assert_equal(node.logging()['qt'], True)
node.logging(exclude=['qt'])
assert_equal(node.logging()['qt'], False)
node.logging(include=['qt'])
assert_equal(node.logging()['qt'], True)
self.log.info("test getindexinfo")
# Without any indices running the RPC returns an empty object
assert_equal(node.getindexinfo(), {})
# Restart the node with indices and wait for them to sync
self.restart_node(0, ["-txindex", "-blockfilterindex"])
self.wait_until(lambda: all(i["synced"] for i in node.getindexinfo().values()))
# Returns a list of all running indices by default
assert_equal(
node.getindexinfo(),
{
"txindex": {"synced": True, "best_block_height": 340},
"basic block filter index": {"synced": True, "best_block_height": 340}
}
)
# Specifying an index by name returns only the status of that index
assert_equal(
node.getindexinfo("txindex"),
{
"txindex": {"synced": True, "best_block_height": 340},
}
)
# Specifying an unknown index name returns an empty result
assert_equal(node.getindexinfo("foo"), {})
if __name__ == '__main__':
RpcMiscTest().main()
|
{
"content_hash": "2596ecf62656f4f3e18eba48c7fd2b4a",
"timestamp": "",
"source": "github",
"line_count": 90,
"max_line_length": 141,
"avg_line_length": 37.17777777777778,
"alnum_prop": 0.6102809324566647,
"repo_name": "rnicoll/dogecoin",
"id": "b30b288ff4266bc39620d9b18c73876eca449a86",
"size": "3555",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "test/functional/rpc_misc.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "28173"
},
{
"name": "C",
"bytes": "1064604"
},
{
"name": "C++",
"bytes": "8101614"
},
{
"name": "CMake",
"bytes": "28560"
},
{
"name": "HTML",
"bytes": "21833"
},
{
"name": "M4",
"bytes": "215256"
},
{
"name": "Makefile",
"bytes": "117017"
},
{
"name": "Objective-C++",
"bytes": "5497"
},
{
"name": "Python",
"bytes": "2237402"
},
{
"name": "QMake",
"bytes": "798"
},
{
"name": "Sage",
"bytes": "35184"
},
{
"name": "Scheme",
"bytes": "7554"
},
{
"name": "Shell",
"bytes": "153769"
}
],
"symlink_target": ""
}
|
from django.conf import settings
from django import test
from django.utils import translation
from django.utils.functional import lazy
import jinja2
from nose.tools import eq_
from test_utils import ExtraAppTestCase, trans_eq
from testapp.models import TranslatedModel, UntranslatedModel, FancyModel
from translations.models import (Translation, PurifiedTranslation,
TranslationSequence)
from translations import widgets
from translations.query import order_by_translation
def ids(qs):
return [o.id for o in qs]
class TranslationFixturelessTestCase(test.TestCase):
"We want to be able to rollback stuff."
def test_whitespace(self):
t = Translation(localized_string=' khaaaaaan! ', id=999)
t.save()
eq_('khaaaaaan!', t.localized_string)
class TranslationSequenceTestCase(test.TestCase):
"""
Make sure automatic translation sequence generation works
as expected.
"""
def test_empty_translations_seq(self):
"""Make sure we can handle an empty translation sequence table."""
TranslationSequence.objects.all().delete()
newtrans = Translation.new('abc', 'en-us')
newtrans.save()
assert newtrans.id > 0, (
'Empty translation table should still generate an ID.')
def test_single_translation_sequence(self):
"""Make sure we only ever have one translation sequence."""
TranslationSequence.objects.all().delete()
eq_(TranslationSequence.objects.count(), 0)
for i in range(5):
newtrans = Translation.new(str(i), 'en-us')
newtrans.save()
eq_(TranslationSequence.objects.count(), 1)
def test_translation_sequence_increases(self):
"""Make sure translation sequence increases monotonically."""
newtrans1 = Translation.new('abc', 'en-us')
newtrans1.save()
newtrans2 = Translation.new('def', 'de')
newtrans2.save()
assert newtrans2.pk > newtrans1.pk, (
'Translation sequence needs to keep increasing.')
class TranslationTestCase(ExtraAppTestCase):
fixtures = ['testapp/test_models.json']
extra_apps = ['translations.tests.testapp']
def setUp(self):
super(TranslationTestCase, self).setUp()
self.redirect_url = settings.REDIRECT_URL
self.redirect_secret_key = settings.REDIRECT_SECRET_KEY
settings.REDIRECT_URL = None
settings.REDIRECT_SECRET_KEY = 'sekrit'
translation.activate('en-US')
def tearDown(self):
super(TranslationTestCase, self).tearDown()
settings.REDIRECT_URL = self.redirect_url
settings.REDIRECT_SECRET_KEY = self.redirect_secret_key
def test_fetch_translations(self):
"""Basic check of fetching translations in the current locale."""
o = TranslatedModel.objects.get(id=1)
trans_eq(o.name, 'some name', 'en-US')
trans_eq(o.description, 'some description', 'en-US')
def test_fetch_no_translations(self):
"""Make sure models with no translations aren't harmed."""
o = UntranslatedModel.objects.get(id=1)
eq_(o.number, 17)
def test_fetch_translation_de_locale(self):
"""Check that locale fallbacks work."""
try:
translation.activate('de')
o = TranslatedModel.objects.get(id=1)
trans_eq(o.name, 'German!! (unst unst)', 'de')
trans_eq(o.description, 'some description', 'en-US')
finally:
translation.deactivate()
def test_create_translation(self):
o = TranslatedModel.objects.create(name='english name')
get_model = lambda: TranslatedModel.objects.get(id=o.id)
trans_eq(o.name, 'english name', 'en-US')
eq_(o.description, None)
# Make sure the translation id is stored on the model, not the autoid.
eq_(o.name.id, o.name_id)
# Check that a different locale creates a new row with the same id.
translation.activate('de')
german = get_model()
trans_eq(o.name, 'english name', 'en-US')
german.name = u'Gemütlichkeit name'
german.description = u'clöüserw description'
german.save()
trans_eq(german.name, u'Gemütlichkeit name', 'de')
trans_eq(german.description, u'clöüserw description', 'de')
# ids should be the same, autoids are different.
eq_(o.name.id, german.name.id)
assert o.name.autoid != german.name.autoid
# Check that de finds the right translation.
fresh_german = get_model()
trans_eq(fresh_german.name, u'Gemütlichkeit name', 'de')
trans_eq(fresh_german.description, u'clöüserw description', 'de')
# Check that en-US has the right translations.
translation.deactivate()
english = get_model()
trans_eq(english.name, 'english name', 'en-US')
english.debug = True
eq_(english.description, None)
english.description = 'english description'
english.save()
fresh_english = get_model()
trans_eq(fresh_english.description, 'english description', 'en-US')
eq_(fresh_english.description.id, fresh_german.description.id)
def test_update_translation(self):
o = TranslatedModel.objects.get(id=1)
translation_id = o.name.autoid
o.name = 'new name'
o.save()
o = TranslatedModel.objects.get(id=1)
trans_eq(o.name, 'new name', 'en-US')
# Make sure it was an update, not an insert.
eq_(o.name.autoid, translation_id)
def test_create_with_dict(self):
# Set translations with a dict.
strings = {'en-US': 'right language', 'de': 'wrong language'}
o = TranslatedModel.objects.create(name=strings)
# Make sure we get the English text since we're in en-US.
trans_eq(o.name, 'right language', 'en-US')
# Check that de was set.
translation.activate('de')
o = TranslatedModel.objects.get(id=o.id)
trans_eq(o.name, 'wrong language', 'de')
# We're in de scope, so we should see the de text.
de = TranslatedModel.objects.create(name=strings)
trans_eq(o.name, 'wrong language', 'de')
# Make sure en-US was still set.
translation.deactivate()
o = TranslatedModel.objects.get(id=de.id)
trans_eq(o.name, 'right language', 'en-US')
def test_update_with_dict(self):
# There's existing en-US and de strings.
strings = {'de': None, 'fr': 'oui'}
get_model = lambda: TranslatedModel.objects.get(id=1)
# Don't try checking that the model's name value is en-US. It will be
# one of the other locales, but we don't know which one. You just set
# the name to a dict, deal with it.
m = get_model()
m.name = strings
m.save()
# en-US was not touched.
trans_eq(get_model().name, 'some name', 'en-US')
# de was updated to NULL, so it falls back to en-US.
translation.activate('de')
trans_eq(get_model().name, 'some name', 'en-US')
# fr was added.
translation.activate('fr')
trans_eq(get_model().name, 'oui', 'fr')
def test_dict_bad_locale(self):
m = TranslatedModel.objects.get(id=1)
m.name = {'de': 'oof', 'xxx': 'bam', 'es': 'si'}
m.save()
ts = Translation.objects.filter(id=m.name_id)
eq_(sorted(ts.values_list('locale', flat=True)),
['de', 'en-US', 'es'])
def test_sorting(self):
"""Test translation comparisons in Python code."""
b = Translation.new('bbbb', 'de')
a = Translation.new('aaaa', 'de')
c = Translation.new('cccc', 'de')
eq_(sorted([c, a, b]), [a, b, c])
def test_sorting_en(self):
q = TranslatedModel.objects.all()
expected = [4, 1, 3]
eq_(ids(order_by_translation(q, 'name')), expected)
eq_(ids(order_by_translation(q, '-name')), list(reversed(expected)))
def test_sorting_mixed(self):
translation.activate('de')
q = TranslatedModel.objects.all()
expected = [1, 4, 3]
eq_(ids(order_by_translation(q, 'name')), expected)
eq_(ids(order_by_translation(q, '-name')), list(reversed(expected)))
def test_sorting_by_field(self):
field = TranslatedModel._meta.get_field('default_locale')
TranslatedModel.get_fallback = classmethod(lambda cls: field)
translation.activate('de')
q = TranslatedModel.objects.all()
expected = [3, 1, 4]
eq_(ids(order_by_translation(q, 'name')), expected)
eq_(ids(order_by_translation(q, '-name')), list(reversed(expected)))
del TranslatedModel.get_fallback
def test_new_purified_field(self):
# This is not a full test of the html sanitizing. We expect the
# underlying bleach library to have full tests.
s = '<a id=xx href="http://xxx.com">yay</a> <i>http://yyy.com</i>'
m = FancyModel.objects.create(purified=s)
eq_(m.purified.localized_string_clean,
'<a href="http://xxx.com" rel="nofollow">yay</a> '
'<i><a href="http://yyy.com" rel="nofollow">'
'http://yyy.com</a></i>')
eq_(m.purified.localized_string, s)
def test_new_linkified_field(self):
s = '<a id=xx href="http://xxx.com">yay</a> <i>http://yyy.com</i>'
m = FancyModel.objects.create(linkified=s)
eq_(m.linkified.localized_string_clean,
'<a href="http://xxx.com" rel="nofollow">yay</a> '
'<i><a href="http://yyy.com" rel="nofollow">'
'http://yyy.com</a></i>')
eq_(m.linkified.localized_string, s)
def test_update_purified_field(self):
m = FancyModel.objects.get(id=1)
s = '<a id=xx href="http://xxx.com">yay</a> <i>http://yyy.com</i>'
m.purified = s
m.save()
eq_(m.purified.localized_string_clean,
'<a href="http://xxx.com" rel="nofollow">yay</a> '
'<i><a href="http://yyy.com" rel="nofollow">'
'http://yyy.com</a></i>')
eq_(m.purified.localized_string, s)
def test_update_linkified_field(self):
m = FancyModel.objects.get(id=1)
s = '<a id=xx href="http://xxx.com">yay</a> <i>http://yyy.com</i>'
m.linkified = s
m.save()
eq_(m.linkified.localized_string_clean,
'<a href="http://xxx.com" rel="nofollow">yay</a> '
'<i><a href="http://yyy.com" rel="nofollow">'
'http://yyy.com</a></i>')
eq_(m.linkified.localized_string, s)
def test_purified_field_str(self):
m = FancyModel.objects.get(id=1)
eq_(u'%s' % m.purified,
'<i>x</i> '
'<a href="http://yyy.com" rel="nofollow">http://yyy.com</a>')
def test_linkified_field_str(self):
m = FancyModel.objects.get(id=1)
eq_(u'%s' % m.linkified,
'<i>x</i> '
'<a href="http://yyy.com" rel="nofollow">http://yyy.com</a>')
def test_purifed_linkified_fields_in_template(self):
m = FancyModel.objects.get(id=1)
env = jinja2.Environment()
t = env.from_string('{{ m.purified }}=={{ m.linkified }}')
s = t.render(m=m)
eq_(s, u'%s==%s' % (m.purified.localized_string_clean,
m.linkified.localized_string_clean))
def test_outgoing_url(self):
"""
Make sure linkified field is properly bounced off our outgoing URL
redirector.
"""
settings.REDIRECT_URL = 'http://example.com/'
s = 'I like http://example.org/awesomepage.html .'
m = FancyModel.objects.create(linkified=s)
eq_(m.linkified.localized_string_clean,
'I like <a href="http://example.com/'
'40979175e3ef6d7a9081085f3b99f2f05447b22ba790130517dd62b7ee59ef94/'
'http%3A//example.org/'
'awesomepage.html" rel="nofollow">http://example.org/awesomepage'
'.html</a> .')
eq_(m.linkified.localized_string, s)
def test_require_locale(self):
obj = TranslatedModel.objects.get(id=1)
eq_(unicode(obj.no_locale), 'blammo')
eq_(obj.no_locale.locale, 'en-US')
# Switch the translation to a locale we wouldn't pick up by default.
obj.no_locale.locale = 'fr'
obj.no_locale.save()
obj = TranslatedModel.objects.get(id=1)
eq_(unicode(obj.no_locale), 'blammo')
eq_(obj.no_locale.locale, 'fr')
def test_translation_bool():
t = lambda s: Translation(localized_string=s)
assert bool(t('text')) is True
assert bool(t(' ')) is False
assert bool(t('')) is False
assert bool(t(None)) is False
def test_translation_unicode():
t = lambda s: Translation(localized_string=s)
eq_(unicode(t('hello')), 'hello')
eq_(unicode(t(None)), '')
def test_widget_value_from_datadict():
data = {'f_en-US': 'woo', 'f_de': 'herr', 'f_fr_delete': ''}
actual = widgets.TransMulti().value_from_datadict(data, [], 'f')
expected = {'en-US': 'woo', 'de': 'herr', 'fr': None}
eq_(actual, expected)
def test_purified_translation_html():
"""__html__() should return a string."""
s = u'<b>heyhey</b>'
x = PurifiedTranslation(localized_string=s)
assert isinstance(x.__html__(), unicode)
eq_(x.__html__(), s)
def test_comparison_with_lazy():
x = Translation(localized_string='xxx')
lazy_u = lazy(lambda x: x, unicode)
x == lazy_u('xxx')
lazy_u('xxx') == x
|
{
"content_hash": "a21fc99828ddb3a5aed9b0b925eb2559",
"timestamp": "",
"source": "github",
"line_count": 374,
"max_line_length": 79,
"avg_line_length": 36.40909090909091,
"alnum_prop": 0.5993978115590806,
"repo_name": "washort/gelato.models",
"id": "8ba156318be3a7eafb8890bc9067534e53394df2",
"size": "13650",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gelato/translations/tests/test_models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "124841"
}
],
"symlink_target": ""
}
|
import numpy
from io import BytesIO
import aggdraw as agg
from . import AggHelp
from ginga import ImageView
from ginga.aggw.CanvasRenderAgg import CanvasRenderer
try:
import PIL.Image as PILimage
have_PIL = True
except ImportError:
have_PIL = False
class ImageViewAggError(ImageView.ImageViewError):
pass
class ImageViewAgg(ImageView.ImageViewBase):
def __init__(self, logger=None, rgbmap=None, settings=None):
ImageView.ImageViewBase.__init__(self, logger=logger,
rgbmap=rgbmap,
settings=settings)
self.surface = None
self._rgb_order = 'RGBA'
self.renderer = CanvasRenderer(self)
def get_surface(self):
return self.surface
def render_image(self, rgbobj, dst_x, dst_y):
"""Render the image represented by (rgbobj) at dst_x, dst_y
in the pixel space.
"""
if self.surface is None:
return
canvas = self.surface
self.logger.debug("redraw surface")
# get window contents as a buffer and load it into the AGG surface
rgb_buf = self.getwin_buffer(order=self._rgb_order)
canvas.fromstring(rgb_buf)
# for debugging
#self.save_rgb_image_as_file('/tmp/temp.png', format='png')
def configure_surface(self, width, height):
# create agg surface the size of the window
self.surface = agg.Draw("RGBA", (width, height), 'black')
# inform the base class about the actual window size
self.configure(width, height)
def get_image_as_array(self):
if self.surface is None:
raise ImageViewAggError("No AGG surface defined")
# TODO: could these have changed between the time that self.surface
# was last updated?
wd, ht = self.get_window_size()
# Get agg surface as a numpy array
surface = self.get_surface()
arr8 = numpy.fromstring(surface.tostring(), dtype=numpy.uint8)
arr8 = arr8.reshape((ht, wd, 4))
return arr8
def get_image_as_buffer(self, output=None):
if self.surface is None:
raise ImageViewAggError("No AGG surface defined")
obuf = output
if obuf is None:
obuf = BytesIO()
surface = self.get_surface()
obuf.write(surface.tostring())
return obuf
def get_rgb_image_as_buffer(self, output=None, format='png', quality=90):
if not have_PIL:
raise ImageViewAggError("Please install PIL to use this method")
if self.surface is None:
raise ImageViewAggError("No AGG surface defined")
obuf = output
if obuf is None:
obuf = BytesIO()
# Get current surface as an array
arr8 = self.get_image_as_array()
# make a PIL image
image = PILimage.fromarray(arr8)
image.save(obuf, format=format, quality=quality)
if not (output is None):
return None
return obuf.getvalue()
def get_rgb_image_as_bytes(self, format='png', quality=90):
buf = self.get_rgb_image_as_buffer(format=format, quality=quality)
return buf
def save_rgb_image_as_file(self, filepath, format='png', quality=90):
if not have_PIL:
raise ImageViewAggError("Please install PIL to use this method")
if self.surface is None:
raise ImageViewAggError("No AGG surface defined")
with open(filepath, 'w') as out_f:
self.get_rgb_image_as_buffer(output=out_f, format=format,
quality=quality)
self.logger.debug("wrote %s file '%s'" % (format, filepath))
def update_image(self):
# subclass implements this method to actually update a widget
# from the agg surface
self.logger.warning("Subclass should override this method")
return False
def set_cursor(self, cursor):
# subclass implements this method to actually set a defined
# cursor on a widget
self.logger.warning("Subclass should override this method")
def reschedule_redraw(self, time_sec):
# subclass implements this method to call delayed_redraw() after
# time_sec
self.delayed_redraw()
def get_rgb_order(self):
return self._rgb_order
#END
|
{
"content_hash": "0389ce988c5e7136d1df89194abad30b",
"timestamp": "",
"source": "github",
"line_count": 141,
"max_line_length": 77,
"avg_line_length": 31.070921985815602,
"alnum_prop": 0.6135585482766491,
"repo_name": "stscieisenhamer/ginga",
"id": "acede338b5b4477473d08b5381bdcbb71a6abe7b",
"size": "4561",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ginga/aggw/ImageViewAgg.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "2151"
},
{
"name": "JavaScript",
"bytes": "82354"
},
{
"name": "Python",
"bytes": "2763201"
}
],
"symlink_target": ""
}
|
from multiprocessing import Array, Event, Process
from time import sleep
from ujson import loads as json_loads
from sanic import Sanic
from sanic.response import json
from sanic.utils import local_request, HOST, PORT
# ------------------------------------------------------------ #
# GET
# ------------------------------------------------------------ #
# TODO: Figure out why this freezes on pytest but not when
# executed via interpreter
def skip_test_multiprocessing():
app = Sanic('test_json')
response = Array('c', 50)
@app.route('/')
async def handler(request):
return json({"test": True})
stop_event = Event()
async def after_start(*args, **kwargs):
http_response = await local_request('get', '/')
response.value = http_response.text.encode()
stop_event.set()
def rescue_crew():
sleep(5)
stop_event.set()
rescue_process = Process(target=rescue_crew)
rescue_process.start()
app.serve_multiple({
'host': HOST,
'port': PORT,
'after_start': after_start,
'request_handler': app.handle_request,
'request_max_size': 100000,
}, workers=2, stop_event=stop_event)
rescue_process.terminate()
try:
results = json_loads(response.value)
except:
raise ValueError("Expected JSON response but got '{}'".format(response))
assert results.get('test') == True
|
{
"content_hash": "b8cd359cf522f5ffb3aca4475aa38aa4",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 80,
"avg_line_length": 26.849056603773583,
"alnum_prop": 0.5818692902319045,
"repo_name": "hhstore/flask-annotated",
"id": "545ecee76bfb9a7489c8bbce73c0f9dd495d4a5b",
"size": "1423",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sanic/sanic-0.1.9/tests/test_multiprocessing.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "173082"
}
],
"symlink_target": ""
}
|
#!/usr/bin/env python
#Copyright (C) 2011 by Benedict Paten (benedictpaten@gmail.com)
#
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#
#The above copyright notice and this permission notice shall be included in
#all copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
#THE SOFTWARE.
import os
import re
import sys
import subprocess
import time
from Queue import Empty
from sonLib.bioio import logger
from multiprocessing import Process
from multiprocessing import JoinableQueue as Queue
#from threading import Thread
#from Queue import Queue, Empty
from sonLib.bioio import logger
from jobTree.batchSystems.abstractBatchSystem import AbstractBatchSystem
from jobTree.src.master import getParasolResultsFileName
def popenParasolCommand(command, runUntilSuccessful=True):
"""Issues a parasol command using popen to capture the output.
If the command fails then it will try pinging parasol until it gets a response.
When it gets a response it will recursively call the issue parasol command, repeating this pattern
for a maximum of N times.
The final exit value will reflect this.
"""
while True:
process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=sys.stderr, bufsize=-1)
output, nothing = process.communicate() #process.stdout.read().strip()
exitValue = process.wait()
if exitValue == 0:
return 0, output.split("\n")
logger.critical("The following parasol command failed (exit value %s): %s" % (exitValue, command))
if not runUntilSuccessful:
return exitValue, None
time.sleep(10)
logger.critical("Waited for a few seconds, will try again")
def getUpdatedJob(parasolResultsFile, outputQueue1, outputQueue2):
"""We use the parasol results to update the status of jobs, adding them
to the list of updated jobs.
Results have the following structure.. (thanks Mark D!)
int status; /* Job status - wait() return format. 0 is good. */
char *host; /* Machine job ran on. */
char *jobId; /* Job queuing system job ID */
char *exe; /* Job executable file (no path) */
int usrTicks; /* 'User' CPU time in ticks. */
int sysTicks; /* 'System' CPU time in ticks. */
unsigned submitTime; /* Job submission time in seconds since 1/1/1970 */
unsigned startTime; /* Job start time in seconds since 1/1/1970 */
unsigned endTime; /* Job end time in seconds since 1/1/1970 */
char *user; /* User who ran job */
char *errFile; /* Location of stderr file on host */
plus you finally have the command name..
"""
parasolResultsFileHandle = open(parasolResultsFile, 'r')
while True:
line = parasolResultsFileHandle.readline()
if line != '':
results = line.split()
result = int(results[0])
jobID = int(results[2])
outputQueue1.put(jobID)
outputQueue2.put((jobID, result))
else:
time.sleep(0.01) #Go to sleep to avoid churning
class ParasolBatchSystem(AbstractBatchSystem):
"""The interface for Parasol.
"""
@classmethod
def getDisplayNames(cls):
"""
Names used to select this batch system.
"""
return ["parasol"]
def __init__(self, config, maxCpus, maxMemory):
AbstractBatchSystem.__init__(self, config, maxCpus, maxMemory) #Call the parent constructor
if maxMemory != sys.maxint:
logger.critical("A max memory has been specified for the parasol batch system class of %i, but currently this batchsystem interface does not support such limiting" % maxMemory)
#Keep the name of the results file for the pstat2 command..
self.parasolCommand = config.attrib["parasol_command"]
self.parasolResultsFile = getParasolResultsFileName(config.attrib["job_tree"])
#Reset the job queue and results (initially, we do this again once we've killed the jobs)
self.queuePattern = re.compile("q\s+([0-9]+)")
self.runningPattern = re.compile("r\s+([0-9]+)\s+[\S]+\s+[\S]+\s+([0-9]+)\s+[\S]+")
self.killJobs(self.getIssuedJobIDs()) #Kill any jobs on the current stack
logger.info("Going to sleep for a few seconds to kill any existing jobs")
time.sleep(5) #Give batch system a second to sort itself out.
logger.info("Removed any old jobs from the queue")
#Reset the job queue and results
exitValue = popenParasolCommand("%s -results=%s clear sick" % (self.parasolCommand, self.parasolResultsFile), False)[0]
if exitValue != None:
logger.critical("Could not clear sick status of the parasol batch %s" % self.parasolResultsFile)
exitValue = popenParasolCommand("%s -results=%s flushResults" % (self.parasolCommand, self.parasolResultsFile), False)[0]
if exitValue != None:
logger.critical("Could not flush the parasol batch %s" % self.parasolResultsFile)
open(self.parasolResultsFile, 'w').close()
logger.info("Reset the results queue")
#Stuff to allow max cpus to be work
self.outputQueue1 = Queue()
self.outputQueue2 = Queue()
#worker = Thread(target=getUpdatedJob, args=(self.parasolResultsFileHandle, self.outputQueue1, self.outputQueue2))
#worker.setDaemon(True)
worker = Process(target=getUpdatedJob, args=(self.parasolResultsFile, self.outputQueue1, self.outputQueue2))
worker.daemon = True
worker.start()
self.usedCpus = 0
self.jobIDsToCpu = {}
def issueJob(self, command, memory, cpu):
"""Issues parasol with job commands.
"""
self.checkResourceRequest(memory, cpu)
pattern = re.compile("your job ([0-9]+).*")
parasolCommand = "%s -verbose -ram=%i -cpu=%i -results=%s add job '%s'" % (self.parasolCommand, memory, cpu, self.parasolResultsFile, command)
#Deal with the cpus
self.usedCpus += cpu
while True: #Process finished results with no wait
try:
jobID = self.outputQueue1.get_nowait()
self.usedCpus -= self.jobIDsToCpu.pop(jobID)
assert self.usedCpus >= 0
self.outputQueue1.task_done()
except Empty:
break
while self.usedCpus > self.maxCpus: #If we are still waiting
self.usedCpus -= self.jobIDsToCpu.pop(self.outputQueue1.get())
assert self.usedCpus >= 0
self.outputQueue1.task_done()
#Now keep going
while True:
#time.sleep(0.1) #Sleep to let parasol catch up #Apparently unnecessary
line = popenParasolCommand(parasolCommand)[1][0]
match = pattern.match(line)
if match != None: #This is because parasol add job will return success, even if the job was not properly issued!
break
else:
logger.info("We failed to properly add the job, we will try again after a sleep")
time.sleep(5)
jobID = int(match.group(1))
self.jobIDsToCpu[jobID] = cpu
logger.debug("Got the parasol job id: %s from line: %s" % (jobID, line))
logger.debug("Issued the job command: %s with (parasol) job id: %i " % (parasolCommand, jobID))
return jobID
def killJobs(self, jobIDs):
"""Kills the given jobs, represented as Job ids, then checks they are dead by checking
they are not in the list of issued jobs.
"""
while True:
for jobID in jobIDs:
exitValue = popenParasolCommand("%s remove job %i" % (self.parasolCommand, jobID), runUntilSuccessful=False)[0]
logger.info("Tried to remove jobID: %i, with exit value: %i" % (jobID, exitValue))
runningJobs = self.getIssuedJobIDs()
if set(jobIDs).difference(set(runningJobs)) == set(jobIDs):
return
time.sleep(5)
logger.critical("Tried to kill some jobs, but something happened and they are still going, so I'll try again")
def getIssuedJobIDs(self):
"""Gets the list of jobs issued to parasol.
"""
#Example issued job, first field is jobID, last is the results file
#31816891 localhost benedictpaten 2009/07/23 10:54:09 python ~/Desktop/out.txt
issuedJobs = set()
for line in popenParasolCommand("%s -extended list jobs" % self.parasolCommand)[1]:
if line != '':
tokens = line.split()
if tokens[-1] == self.parasolResultsFile:
jobID = int(tokens[0])
issuedJobs.add(jobID)
return list(issuedJobs)
def getRunningJobIDs(self):
"""Returns map of running jobIDs and the time they have been running.
"""
#Example lines..
#r 5410186 benedictpaten jobTreeSlave 1247029663 localhost
#r 5410324 benedictpaten jobTreeSlave 1247030076 localhost
runningJobs = {}
issuedJobs = self.getIssuedJobIDs()
for line in popenParasolCommand("%s -results=%s pstat2 " % (self.parasolCommand, self.parasolResultsFile))[1]:
if line != '':
match = self.runningPattern.match(line)
if match != None:
jobID = int(match.group(1))
startTime = int(match.group(2))
if jobID in issuedJobs: #It's one of our jobs
runningJobs[jobID] = time.time() - startTime
return runningJobs
def getUpdatedJob(self, maxWait):
jobID = self.getFromQueueSafely(self.outputQueue2, maxWait)
if jobID != None:
self.outputQueue2.task_done()
return jobID
def getRescueJobFrequency(self):
"""Parasol leaks jobs, but rescuing jobs involves calls to parasol list jobs and pstat2,
making it expensive.
"""
return 5400 #Once every 90 minutes
def main():
pass
def _test():
import doctest
return doctest.testmod()
if __name__ == '__main__':
_test()
main()
|
{
"content_hash": "c9ad97106788481e9c1c770e1cf461d3",
"timestamp": "",
"source": "github",
"line_count": 240,
"max_line_length": 188,
"avg_line_length": 45.954166666666666,
"alnum_prop": 0.6444827273551545,
"repo_name": "harvardinformatics/jobTree",
"id": "0c7ea6c5db3047b706cbd639eb2cb5bad240e5b4",
"size": "11029",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "batchSystems/parasol.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "956"
},
{
"name": "Python",
"bytes": "271544"
}
],
"symlink_target": ""
}
|
from a10sdk.common.A10BaseClass import A10BaseClass
class Dblb(A10BaseClass):
"""Class Description::
DBLB template.
Class dblb supports CRUD Operations and inherits from `common/A10BaseClass`.
This class is the `"PARENT"` class for this module.`
:param uuid: {"description": "uuid of the object", "format": "string", "minLength": 1, "modify-not-allowed": 1, "optional": true, "maxLength": 64, "type": "string"}
:param server_version: {"optional": true, "enum": ["MSSQL2008", "MSSQL2012", "MySQL"], "type": "string", "description": "'MSSQL2008': MSSQL server 2008 or 2008 R2; 'MSSQL2012': MSSQL server 2012; 'MySQL': MySQL server (any version); ", "format": "enum"}
:param name: {"description": "DBLB template name", "format": "string-rlx", "minLength": 1, "optional": false, "maxLength": 63, "type": "string"}
:param class_list: {"description": "Specify user/password string class list (Class list name)", "format": "string", "minLength": 1, "optional": true, "maxLength": 63, "type": "string", "$ref": "/axapi/v3/class-list"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
URL for this object::
`https://<Hostname|Ip address>//axapi/v3/slb/template/dblb/{name}`.
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.required = [ "name"]
self.b_key = "dblb"
self.a10_url="/axapi/v3/slb/template/dblb/{name}"
self.DeviceProxy = ""
self.uuid = ""
self.server_version = ""
self.name = ""
self.calc_sha1 = {}
self.class_list = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
|
{
"content_hash": "09f476c888aa28c2d87eac9dd6da0d1c",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 257,
"avg_line_length": 40.53488372093023,
"alnum_prop": 0.6201950659781985,
"repo_name": "a10networks/a10sdk-python",
"id": "8b0395bdd099ad7d8d6314c88669a031f697f505",
"size": "1743",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "a10sdk/core/slb/slb_template_dblb.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "6956372"
}
],
"symlink_target": ""
}
|
"""
A minimal front end to the Docutils Publisher, producing LaTeX.
"""
try:
import locale
locale.setlocale(locale.LC_ALL, '')
except:
pass
from docutils.core import publish_cmdline
description = ('Generates LaTeX documents from standalone reStructuredText '
'sources. '
'Reads from <source> (default is stdin) and writes to '
'<destination> (default is stdout). See '
'<http://docutils.sourceforge.net/docs/user/latex.html> for '
'the full reference.')
publish_cmdline(writer_name='latex', description=description)
|
{
"content_hash": "773adf846f79caa50c7bd264183c4f98",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 76,
"avg_line_length": 30.4,
"alnum_prop": 0.6447368421052632,
"repo_name": "ENCODE-DCC/pyencoded-tools",
"id": "688edf3449fd25c6c352fa41ae98c59c6bb3eca5",
"size": "823",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "permissions_qa_scripts/originals/UPLOADS/tools/bin/rst2latex.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "AngelScript",
"bytes": "9064"
},
{
"name": "Batchfile",
"bytes": "1411"
},
{
"name": "Jupyter Notebook",
"bytes": "19265564"
},
{
"name": "Python",
"bytes": "939689"
},
{
"name": "Shell",
"bytes": "5829"
}
],
"symlink_target": ""
}
|
from google.cloud import kms_v1
def sample_list_crypto_keys():
# Create a client
client = kms_v1.KeyManagementServiceClient()
# Initialize request argument(s)
request = kms_v1.ListCryptoKeysRequest(
parent="parent_value",
)
# Make the request
page_result = client.list_crypto_keys(request=request)
# Handle the response
for response in page_result:
print(response)
# [END cloudkms_v1_generated_KeyManagementService_ListCryptoKeys_sync]
|
{
"content_hash": "d9bde8fb5bd11d06eaed96425415f821",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 70,
"avg_line_length": 24.7,
"alnum_prop": 0.7004048582995951,
"repo_name": "googleapis/python-kms",
"id": "4f9b57a0352ba5a23c5a7c5550fba9eff4c33a64",
"size": "1885",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "samples/generated_samples/cloudkms_v1_generated_key_management_service_list_crypto_keys_sync.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "1280914"
},
{
"name": "Shell",
"bytes": "30651"
}
],
"symlink_target": ""
}
|
from django.test import TestCase
from django.core.urlresolvers import reverse
from django.contrib.auth.models import User as DjangoUser
from anaf.core.models import Group, Perspective, ModuleSetting
from models import Ticket, TicketQueue, TicketStatus, ServiceAgent, Service, ServiceLevelAgreement
from anaf.identities.models import Contact, ContactType
import datetime
class ServicesModelsTest(TestCase):
def test_model(self):
"""Test Services models"""
status = TicketStatus(name='TestStatus')
status.save()
self.assertNotEquals(status.id, None)
queue = TicketQueue(name='TestQueue', default_ticket_status=status)
queue.save()
self.assertNotEquals(status.id, None)
ticket = Ticket(name='TestTicket', status=status, queue=queue)
ticket.save()
self.assertNotEquals(ticket.id, None)
ticket.delete()
queue.delete()
status.delete()
class ServicesViewsTest(TestCase):
username = "test"
password = "password"
def setUp(self):
self.group, created = Group.objects.get_or_create(name='test')
self.user, created = DjangoUser.objects.get_or_create(username=self.username, is_staff=True)
self.user.set_password(self.password)
self.user.save()
perspective, created = Perspective.objects.get_or_create(name='default')
perspective.set_default_user()
perspective.save()
ModuleSetting.set('default_perspective', perspective.id)
self.contact_type = ContactType(name='test')
self.contact_type.set_default_user()
self.contact_type.save()
self.contact = Contact(name='test', contact_type=self.contact_type)
self.contact.set_default_user()
self.contact.save()
self.status = TicketStatus(name='TestStatus')
self.status.set_default_user()
self.status.save()
self.queue = TicketQueue(
name='TestQueue', default_ticket_status=self.status)
self.queue.set_default_user()
self.queue.save()
self.ticket = Ticket(
name='TestTicket', status=self.status, queue=self.queue)
self.ticket.set_default_user()
self.ticket.save()
self.agent = ServiceAgent(related_user=self.user.profile, available_from=datetime.time(9),
available_to=datetime.time(17))
self.agent.set_default_user()
self.agent.save()
self.service = Service(name='test')
self.service.set_default_user()
self.service.save()
self.sla = ServiceLevelAgreement(name='test', service=self.service,
client=self.contact, provider=self.contact)
self.sla.set_default_user()
self.sla.save()
######################################
# Testing views when user is logged in
######################################
def test_index_login(self):
"Test index page with login at /services/"
response = self.client.post('/accounts/login',
{'username': self.username, 'password': self.password})
self.assertRedirects(response, '/')
response = self.client.get(reverse('services'))
self.assertEquals(response.status_code, 200)
def test_index_owned(self):
"Test index page with login at /services/owned"
response = self.client.post('/accounts/login',
{'username': self.username, 'password': self.password})
self.assertRedirects(response, '/')
response = self.client.get(reverse('services_index_owned'))
self.assertEquals(response.status_code, 200)
def test_index_assigned(self):
"Test index page with login at /services/assigned"
response = self.client.post('/accounts/login',
{'username': self.username, 'password': self.password})
self.assertRedirects(response, '/')
response = self.client.get(reverse('services_index_assigned'))
self.assertEquals(response.status_code, 200)
# Queues
def test_queue_add(self):
"Test page with login at /services/queue/add"
response = self.client.post('/accounts/login',
{'username': self.username, 'password': self.password})
self.assertRedirects(response, '/')
response = self.client.get(reverse('services_queue_add'))
self.assertEquals(response.status_code, 200)
def test_queue_view(self):
"Test page with login at /services/queue/view/<queue_id>"
response = self.client.post('/accounts/login',
{'username': self.username, 'password': self.password})
self.assertRedirects(response, '/')
response = self.client.get(
reverse('services_queue_view', args=[self.queue.id]))
self.assertEquals(response.status_code, 200)
def test_queue_edit(self):
"Test page with login at /services/queue/edit/<queue_id>"
response = self.client.post('/accounts/login',
{'username': self.username, 'password': self.password})
self.assertRedirects(response, '/')
response = self.client.get(
reverse('services_queue_edit', args=[self.queue.id]))
self.assertEquals(response.status_code, 200)
def test_queue_delete(self):
"Test page with login at /services/queue/delete/<queue_id>"
response = self.client.post('/accounts/login',
{'username': self.username, 'password': self.password})
self.assertRedirects(response, '/')
response = self.client.get(
reverse('services_queue_delete', args=[self.queue.id]))
self.assertEquals(response.status_code, 200)
# Statuses
def test_status_view(self):
"Test index page with login at /services/status/view/<status_id>"
response = self.client.post('/accounts/login',
{'username': self.username, 'password': self.password})
self.assertRedirects(response, '/')
response = self.client.get(
reverse('services_status_view', args=[self.status.id]))
self.assertEquals(response.status_code, 200)
def test_status_edit(self):
"Test index page with login at /services/status/edit/<status_id>"
response = self.client.post('/accounts/login',
{'username': self.username, 'password': self.password})
self.assertRedirects(response, '/')
response = self.client.get(
reverse('services_status_edit', args=[self.status.id]))
self.assertEquals(response.status_code, 200)
def test_status_delete(self):
"Test index page with login at /services/status/delete/<status_id>"
response = self.client.post('/accounts/login',
{'username': self.username, 'password': self.password})
self.assertRedirects(response, '/')
response = self.client.get(
reverse('services_status_delete', args=[self.status.id]))
self.assertEquals(response.status_code, 200)
def test_status_add(self):
"Test index page with login at /services/status/add/"
response = self.client.post('/accounts/login',
{'username': self.username, 'password': self.password})
self.assertRedirects(response, '/')
response = self.client.get(reverse('services_status_add'))
self.assertEquals(response.status_code, 200)
# Tickets
def test_ticket_add(self):
"Test page with login at /services/ticket/add"
response = self.client.post('/accounts/login',
{'username': self.username, 'password': self.password})
self.assertRedirects(response, '/')
response = self.client.get(reverse('services_ticket_add'))
self.assertEquals(response.status_code, 200)
def test_ticket_add_by_queue(self):
"Test page with login at /services/ticket/add/queue/(?P<queue_id>\d+)"
response = self.client.post('/accounts/login',
{'username': self.username, 'password': self.password})
self.assertRedirects(response, '/')
response = self.client.get(
reverse('services_ticket_add_by_queue', args=[self.queue.id]))
self.assertEquals(response.status_code, 200)
def test_ticket_view(self):
"Test page with login at /services/ticket/view/<ticket_id>"
response = self.client.post('/accounts/login',
{'username': self.username, 'password': self.password})
self.assertRedirects(response, '/')
response = self.client.get(
reverse('services_ticket_view', args=[self.ticket.id]))
self.assertEquals(response.status_code, 200)
def test_ticket_edit(self):
"Test page with login at /services/ticket/edit/<ticket_id>"
response = self.client.post('/accounts/login',
{'username': self.username, 'password': self.password})
self.assertRedirects(response, '/')
response = self.client.get(
reverse('services_ticket_edit', args=[self.ticket.id]))
self.assertEquals(response.status_code, 200)
def test_ticket_delete(self):
"Test page with login at /services/ticket/delete/<ticket_id>"
response = self.client.post('/accounts/login',
{'username': self.username, 'password': self.password})
self.assertRedirects(response, '/')
response = self.client.get(
reverse('services_ticket_delete', args=[self.ticket.id]))
self.assertEquals(response.status_code, 200)
def test_ticket_set_status(self):
"Test page with login at /services/ticket/set/(?P<ticket_id>\d+)/status/(?P<status_id>\d+)"
response = self.client.post('/accounts/login',
{'username': self.username, 'password': self.password})
self.assertRedirects(response, '/')
response = self.client.get(
reverse('services_ticket_set_status', args=[self.ticket.id, self.status.id]))
self.assertEquals(response.status_code, 200)
# Settings
def test_settings_view(self):
"Test page with login at /services/settings/view"
response = self.client.post('/accounts/login',
{'username': self.username, 'password': self.password})
self.assertRedirects(response, '/')
response = self.client.get(reverse('services_settings_view'))
self.assertEquals(response.status_code, 200)
def test_settings_edit(self):
"Test page with login at /services/settings/edit"
response = self.client.post('/accounts/login',
{'username': self.username, 'password': self.password})
self.assertRedirects(response, '/')
response = self.client.get(reverse('services_settings_view'))
self.assertEquals(response.status_code, 200)
# Catalogue
def test_service_catalogue(self):
"Test page with login at /services/catalogue"
response = self.client.post('/accounts/login',
{'username': self.username, 'password': self.password})
self.assertRedirects(response, '/')
response = self.client.get(reverse('services_service_catalogue'))
self.assertEquals(response.status_code, 200)
# Services
def test_service_view(self):
"Test page with login at /services/service/view/<service_id>"
response = self.client.post('/accounts/login',
{'username': self.username, 'password': self.password})
self.assertRedirects(response, '/')
response = self.client.get(
reverse('services_service_view', args=[self.service.id]))
self.assertEquals(response.status_code, 200)
def test_service_edit(self):
"Test page with login at /services/service/edit/<service_id>"
response = self.client.post('/accounts/login',
{'username': self.username, 'password': self.password})
self.assertRedirects(response, '/')
response = self.client.get(
reverse('services_service_edit', args=[self.service.id]))
self.assertEquals(response.status_code, 200)
def test_service_delete(self):
"Test page with login at /services/service/delete/<service_id>"
response = self.client.post('/accounts/login',
{'username': self.username, 'password': self.password})
self.assertRedirects(response, '/')
response = self.client.get(
reverse('services_service_delete', args=[self.service.id]))
self.assertEquals(response.status_code, 200)
def test_service_add(self):
"Test page with login at /services/service/add"
response = self.client.post('/accounts/login',
{'username': self.username, 'password': self.password})
self.assertRedirects(response, '/')
response = self.client.get(reverse('services_service_add'))
self.assertEquals(response.status_code, 200)
# SLAs
def test_sla_index(self):
"Test page with login at /services/sla"
response = self.client.post('/accounts/login',
{'username': self.username, 'password': self.password})
self.assertRedirects(response, '/')
response = self.client.get(reverse('services_sla_index'))
self.assertEquals(response.status_code, 200)
def test_sla_view(self):
"Test page with login at /services/sla/view/<sla_id>"
response = self.client.post('/accounts/login',
{'username': self.username, 'password': self.password})
self.assertRedirects(response, '/')
response = self.client.get(
reverse('services_sla_view', args=[self.sla.id]))
self.assertEquals(response.status_code, 200)
def test_sla_edit(self):
"Test page with login at /services/sla/edit/<sla_id>"
response = self.client.post('/accounts/login',
{'username': self.username, 'password': self.password})
self.assertRedirects(response, '/')
response = self.client.get(
reverse('services_sla_edit', args=[self.sla.id]))
self.assertEquals(response.status_code, 200)
def test_sla_delete(self):
"Test page with login at /services/sla/delete/<sla_id>"
response = self.client.post('/accounts/login',
{'username': self.username, 'password': self.password})
self.assertRedirects(response, '/')
response = self.client.get(
reverse('services_sla_delete', args=[self.sla.id]))
self.assertEquals(response.status_code, 200)
def test_sla_add(self):
"Test page with login at /services/sla/add"
response = self.client.post('/accounts/login',
{'username': self.username, 'password': self.password})
self.assertRedirects(response, '/')
response = self.client.get(reverse('services_sla_index'))
self.assertEquals(response.status_code, 200)
# Agents
def test_agent_index(self):
"Test page with login at /services/agent"
response = self.client.post('/accounts/login',
{'username': self.username, 'password': self.password})
self.assertRedirects(response, '/')
response = self.client.get(reverse('services_agent_index'))
self.assertEquals(response.status_code, 200)
def test_agent_view(self):
"Test page with login at /services/agent/view/<agent_id>"
response = self.client.post('/accounts/login',
{'username': self.username, 'password': self.password})
self.assertRedirects(response, '/')
response = self.client.get(
reverse('services_agent_view', args=[self.agent.id]))
self.assertEquals(response.status_code, 200)
def test_agent_edit(self):
"Test page with login at /services/agent/edit/<agent_id>"
response = self.client.post('/accounts/login',
{'username': self.username, 'password': self.password})
self.assertRedirects(response, '/')
response = self.client.get(
reverse('services_agent_edit', args=[self.agent.id]))
self.assertEquals(response.status_code, 200)
def test_agent_delete(self):
"Test page with login at /services/agent/delete/<agent_id>"
response = self.client.post('/accounts/login',
{'username': self.username, 'password': self.password})
self.assertRedirects(response, '/')
response = self.client.get(
reverse('services_agent_delete', args=[self.agent.id]))
self.assertEquals(response.status_code, 200)
def test_agent_add(self):
"Test page with login at /services/agent/add"
response = self.client.post('/accounts/login',
{'username': self.username, 'password': self.password})
self.assertRedirects(response, '/')
response = self.client.get(reverse('services_agent_add'))
self.assertEquals(response.status_code, 200)
######################################
# Testing views when user is not logged in
######################################
def test_index(self):
"Test index page at /services/"
response = self.client.get(reverse('services'))
# Redirects as unauthenticated
self.assertRedirects(response, reverse('user_login'))
def test_index_owned_out(self):
"Testing /services/owned"
response = self.client.get(reverse('services_index_owned'))
self.assertRedirects(response, reverse('user_login'))
def test_index_assigned_out(self):
"Testing /services/assigned"
response = self.client.get(reverse('services_index_assigned'))
self.assertRedirects(response, reverse('user_login'))
# Queues
def test_queue_add_out(self):
"Testing /services/queue/add"
response = self.client.get(reverse('services_queue_add'))
self.assertRedirects(response, reverse('user_login'))
def test_queue_view_out(self):
"Testing /services/queue/view/<queue_id>"
response = self.client.get(
reverse('services_queue_view', args=[self.queue.id]))
self.assertRedirects(response, reverse('user_login'))
def test_queue_edit_out(self):
"Testing /services/queue/edit/<queue_id>"
response = self.client.get(
reverse('services_queue_edit', args=[self.queue.id]))
self.assertRedirects(response, reverse('user_login'))
def test_queue_delete_out(self):
"Testing /services/queue/delete/<queue_id>"
response = self.client.get(
reverse('services_queue_delete', args=[self.queue.id]))
self.assertRedirects(response, reverse('user_login'))
# Statuses
def test_status_view_out(self):
"Testing /services/status/view/<status_id>"
response = self.client.get(
reverse('services_status_view', args=[self.status.id]))
self.assertRedirects(response, reverse('user_login'))
def test_status_edit_out(self):
"Testing /services/status/edit/<status_id>"
response = self.client.get(
reverse('services_status_edit', args=[self.status.id]))
self.assertRedirects(response, reverse('user_login'))
def test_status_delete_out(self):
"Testing /services/status/delete/<status_id>"
response = self.client.get(
reverse('services_status_delete', args=[self.status.id]))
self.assertRedirects(response, reverse('user_login'))
def test_status_add_out(self):
"Testing /services/status/add/"
response = self.client.get(reverse('services_status_add'))
self.assertRedirects(response, reverse('user_login'))
# Tickets
def test_ticket_add_out(self):
"Testing /services/ticket/add"
response = self.client.get(reverse('services_ticket_add'))
self.assertRedirects(response, reverse('user_login'))
def test_ticket_add_by_queue_out(self):
"Testing /services/ticket/add/queue/(?P<queue_id>\d+)"
response = self.client.get(
reverse('services_ticket_add_by_queue', args=[self.queue.id]))
self.assertRedirects(response, reverse('user_login'))
def test_ticket_view_out(self):
"Testing /services/ticket/view/<ticket_id>"
response = self.client.get(
reverse('services_ticket_view', args=[self.ticket.id]))
self.assertRedirects(response, reverse('user_login'))
def test_ticket_edit_out(self):
"Testing /services/ticket/edit/<ticket_id>"
response = self.client.get(
reverse('services_ticket_edit', args=[self.ticket.id]))
self.assertRedirects(response, reverse('user_login'))
def test_ticket_delete_out(self):
"Testing /services/ticket/delete/<ticket_id>"
response = self.client.get(
reverse('services_ticket_delete', args=[self.ticket.id]))
self.assertRedirects(response, reverse('user_login'))
def test_ticket_set_status_out(self):
"Testing /services/ticket/set/(?P<ticket_id>\d+)/status/(?P<status_id>\d+)"
response = self.client.get(
reverse('services_ticket_set_status', args=[self.ticket.id, self.status.id]))
self.assertRedirects(response, reverse('user_login'))
# Settings
def test_settings_view_out(self):
"Testing /services/settings/view"
response = self.client.get(reverse('services_settings_view'))
self.assertRedirects(response, reverse('user_login'))
def test_settings_edit_out(self):
"Testing /services/settings/edit"
response = self.client.get(reverse('services_settings_view'))
self.assertRedirects(response, reverse('user_login'))
# Catalogue
def test_service_catalogue_out(self):
"Testing /services/catalogue"
response = self.client.get(reverse('services_service_catalogue'))
self.assertRedirects(response, reverse('user_login'))
# Services
def test_service_view_out(self):
"Testing /services/service/view/<service_id>"
response = self.client.get(
reverse('services_service_view', args=[self.service.id]))
self.assertRedirects(response, reverse('user_login'))
def test_service_edit_out(self):
"Testing /services/service/edit/<service_id>"
response = self.client.get(
reverse('services_service_edit', args=[self.service.id]))
self.assertRedirects(response, reverse('user_login'))
def test_service_delete_out(self):
"Testing /services/service/delete/<service_id>"
response = self.client.get(
reverse('services_service_delete', args=[self.service.id]))
self.assertRedirects(response, reverse('user_login'))
def test_service_add_out(self):
"Testing /services/service/add"
response = self.client.get(reverse('services_service_add'))
self.assertRedirects(response, reverse('user_login'))
# SLAs
def test_sla_index_out(self):
"Testing /services/sla"
response = self.client.get(reverse('services_sla_index'))
self.assertRedirects(response, reverse('user_login'))
def test_sla_view_out(self):
"Testing /services/sla/view/<sla_id>"
response = self.client.get(
reverse('services_sla_view', args=[self.sla.id]))
self.assertRedirects(response, reverse('user_login'))
def test_sla_edit_out(self):
"Testing /services/sla/edit/<sla_id>"
response = self.client.get(
reverse('services_sla_edit', args=[self.sla.id]))
self.assertRedirects(response, reverse('user_login'))
def test_sla_delete_out(self):
"Testing /services/sla/delete/<sla_id>"
response = self.client.get(
reverse('services_sla_delete', args=[self.sla.id]))
self.assertRedirects(response, reverse('user_login'))
def test_sla_add_out(self):
"Testing /services/sla/add"
response = self.client.get(reverse('services_sla_index'))
self.assertRedirects(response, reverse('user_login'))
# Agents
def test_agent_index_out(self):
"Testing /services/agent"
response = self.client.get(reverse('services_agent_index'))
self.assertRedirects(response, reverse('user_login'))
def test_agent_view_out(self):
"Testing /services/agent/view/<agent_id>"
response = self.client.get(
reverse('services_agent_view', args=[self.agent.id]))
self.assertRedirects(response, reverse('user_login'))
def test_agent_edit_out(self):
"Testing /services/agent/edit/<agent_id>"
response = self.client.get(
reverse('services_agent_edit', args=[self.agent.id]))
self.assertRedirects(response, reverse('user_login'))
def test_agent_delete_out(self):
"Test page with login at /services/agent/delete/<agent_id>"
response = self.client.get(
reverse('services_agent_delete', args=[self.agent.id]))
self.assertRedirects(response, reverse('user_login'))
def test_agent_add_out(self):
"Test page with login at /services/agent/add"
response = self.client.get(reverse('services_agent_add'))
self.assertRedirects(response, reverse('user_login'))
|
{
"content_hash": "9bf7e21b50e50063131a86b1b13949e5",
"timestamp": "",
"source": "github",
"line_count": 585,
"max_line_length": 100,
"avg_line_length": 44.473504273504275,
"alnum_prop": 0.6137909828189261,
"repo_name": "tovmeod/anaf",
"id": "70bb237a33a7b8508cfd52822d9b158f32d4e008",
"size": "26017",
"binary": false,
"copies": "1",
"ref": "refs/heads/drf",
"path": "anaf/services/tests.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "400736"
},
{
"name": "HTML",
"bytes": "1512873"
},
{
"name": "JavaScript",
"bytes": "2136807"
},
{
"name": "PHP",
"bytes": "25856"
},
{
"name": "Python",
"bytes": "2045934"
},
{
"name": "Shell",
"bytes": "18005"
},
{
"name": "TSQL",
"bytes": "147855"
}
],
"symlink_target": ""
}
|
import ipywidgets as widgets
class FormValue(widgets.HBox):
def __init__(self, name, **kwargs):
self.name = name
width = kwargs.get('width', 'auto')
self._ncb = kwargs.get('cb')
# accept either 'description' or 'desc'
desc = kwargs.get('desc', kwargs.get('description', ''))
form_item_layout = widgets.Layout(
display='flex',
flex_flow='row',
border='solid 1px lightgray',
justify_content='space-between',
padding='3px',
width=width
)
self.dd.layout = {'width': 'initial'}
self.dd.disabled = kwargs.get('disabled', False)
self.dd.observe(self._cb, names='value')
popup = '<div data-toggle="popover" title="%s" data-container="body">%s</div>' % (desc, name)
label = widgets.HTML(value=popup, layout=widgets.Layout(flex='2 1 auto'))
widgets.HBox.__init__(self, [label, self.dd], layout=form_item_layout)
def _cb(self, w):
if self._ncb is not None:
return self._ncb(self, w['new'])
@property
def cb(self):
return self._ncb
@cb.setter
def cb(self, newcb):
self._ncb = newcb
@property
def value(self):
return self.dd.value
@value.setter
def value(self, newval):
self.dd.value = newval
@property
def disabled(self):
return self.dd.disabled
@disabled.setter
def disabled(self, newval):
self.dd.disabled = newval
@property
def visible(self):
return self.dd.layout.visibility
@visible.setter
def visible(self, newval):
if newval:
self.dd.layout.visibility = 'visible'
self.layout.visibility = 'visible'
return
self.dd.layout.visibility = 'hidden'
self.layout.visibility = 'hidden'
class String(FormValue):
def __init__(self, name, value, **kwargs):
self.dd = widgets.Text(value=value)
FormValue.__init__(self, name, **kwargs)
class Dropdown(FormValue):
def __init__(self, name, options, value, **kwargs):
self.dd = widgets.Dropdown(options=options, value=value)
FormValue.__init__(self, name, **kwargs)
mw = '{}ch'.format(max(map(len, self.dd.options))+4)
self.dd.layout = {'width':'auto', 'min_width': mw}
class Checkbox(FormValue):
def __init__(self, name, **kwargs):
value = kwargs.get('value', False)
self.dd = widgets.Checkbox(value=value)
FormValue.__init__(self, name, **kwargs)
class Radiobuttons(FormValue):
def __init__(self, name, options, value, **kwargs):
self.dd = widgets.RadioButtons(options=options, value=value)
FormValue.__init__(self, name, **kwargs)
class Togglebuttons(FormValue):
def __init__(self, name, options, value, **kwargs):
self.dd = widgets.ToggleButtons(options=options, value=value)
FormValue.__init__(self, name, **kwargs)
# self.dd.style.button_width='{}ch'.format(max(map(len, self.dd.options))+4)
self.dd.style={'button_width': 'initial'}
class Text(FormValue):
def __init__(self, name, value='', **kwargs):
self.dd = widgets.Textarea(value=value)
FormValue.__init__(self, name, **kwargs)
|
{
"content_hash": "8f4fdc9b145b11c036746437c93e7b94",
"timestamp": "",
"source": "github",
"line_count": 110,
"max_line_length": 101,
"avg_line_length": 30.01818181818182,
"alnum_prop": 0.5860084797092671,
"repo_name": "martin-hunt/hublib",
"id": "7b09c44e1da6325ca9c8fd832786595162b61042",
"size": "3302",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hublib/ui/formvalue.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "3123140"
},
{
"name": "Python",
"bytes": "145798"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.