text
stringlengths 4
1.02M
| meta
dict |
|---|---|
"""
SAASpy
An open-source library to read and analyze images from the FOXSI Solar
Alignment and Aspect System.
"""
from __future__ import absolute_import
|
{
"content_hash": "e9ba47e0df68976884006f563e4f6394",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 70,
"avg_line_length": 22.142857142857142,
"alnum_prop": 0.7612903225806451,
"repo_name": "foxsi/SAASpy",
"id": "9420a4cbc25df2f85dca2dd43afaed96d64c1954",
"size": "155",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "saaspy/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7968"
}
],
"symlink_target": ""
}
|
import datetime
import os
from io import BytesIO
from PIL import Image
from PIL.Image import isImageType
from django.conf import settings
from django.utils.translation import ugettext as _
from pptx import Presentation
from pptx.enum.shapes import MSO_SHAPE_TYPE
from pptx.enum.text import PP_PARAGRAPH_ALIGNMENT
from pptx.util import Inches, Pt
SEEDSOURCE_TITLE = getattr(settings, 'SEEDSOURCE_TITLE', _('Seedlot Selection Tool'))
class PPTCreator(object):
def __init__(self):
self.presentation = None
self.width = None
self.height = None
def degree_sign(self, s):
return s.replace('°', '°')
def add_text(self, text_frame, lines):
for line in lines:
paragraph = text_frame.add_paragraph()
for segment in line:
text, size, bold = segment
run = paragraph.add_run()
run.text = text
run.font.size = Pt(size)
run.font.bold = bold
def get_transfer_method_text(self, method, center):
if method != 'seedzone':
method_text = _('Custom transfer limits, climatic center based on the selected location')
elif center == 'zone':
method_text = _('Transfer limits and climatic center based on seed zone')
else:
method_text = _('Transfer limits based on seed zone, climatic center based on the selected location')
return method_text
def replace_shape_image(self, shape, image, slide):
im_bytes = BytesIO()
image.save(im_bytes, 'PNG')
new_shape = slide.shapes.add_picture(im_bytes, Inches(0.5), Inches(0.5), Inches(9), Inches(6))
old_pic = shape._element
new_pic = new_shape._element
old_pic.addnext(new_pic)
old_pic.getparent().remove(old_pic)
def replace_shape_text(self, shape, text):
paragraph = shape.text_frame.paragraphs[0]
for run in paragraph.runs[1:]:
paragraph._p.remove(run._r)
paragraph.runs[0].text = text
def add_title_text(self, slide, title):
shape = slide.shapes.add_textbox(Inches(.41), Inches(.23), Inches(9.18), Inches(.5))
tf = shape.text_frame
tf.text = title
paragraph = tf.paragraphs[0]
paragraph.font.size = Pt(24)
paragraph.alignment = PP_PARAGRAPH_ALIGNMENT.CENTER
def render_template(self, context):
for slide in self.presentation.slides:
self.render_template_slide(slide, context)
def render_template_slide(self, slide, context):
for shape in slide.shapes:
if shape.name not in context:
continue
value = context[shape.name]
if callable(value):
value(shape)
elif shape.shape_type == MSO_SHAPE_TYPE.PICTURE:
if not isImageType(value):
raise TypeError('Template value {} must be an Image type'.format(shape.name))
self.replace_shape_image(shape, value, slide)
elif shape.shape_type == MSO_SHAPE_TYPE.TEXT_BOX:
if not isinstance(value, str):
raise TypeError('Template value {} must be a string'.format(shape.name))
self.replace_shape_text(shape, value)
def add_slide(self):
slide = self.presentation.slides.add_slide(self.presentation.slide_layouts[0])
# Delete placeholders
for placeholder in (slide.placeholders):
placeholder.element.getparent().remove(placeholder.element)
return slide
def create_overview_slide(self, context):
objective = context['objective']
location_label = context['location_label']
point = context['point']
elevation = context['elevation']
seedlot_year = context['seedlot_year']
site_year = context['site_year']
site_model = context['site_model']
method = context['method']
center = context['center']
location = (point['y'], point['x'])
data_url = 'http://cfcg.forestry.ubc.ca/projects/climate-data/climatebcwna/#ClimateWNA'
method_text = self.get_transfer_method_text(method, center)
slide = self.add_slide()
self.add_title_text(slide, '{} - {}'.format(SEEDSOURCE_TITLE, datetime.datetime.today().strftime('%m/%d/%Y')))
# Body
shape = slide.shapes.add_textbox(Inches(.65), Inches(.73), Inches(8.69), Inches(6.19))
shape.text_frame.word_wrap = True
self.add_text(shape.text_frame, (
((_('Objective:') + ' ', 18, True), (objective, 18, False)),
(('', 18, False),),
(('{}: '.format(location_label), 18, True), ('{}, {}'.format(*location), 18, False)),
((_('Elevation:') + ' ', 18, True), (_('{elevation} ft').format(elevation=elevation), 18, False)),
(('', 18, False),),
((_('Climate scenarios'), 24, True),),
((_('Seedlot climate:') + ' ', 18, True), (seedlot_year, 18, False)),
((_('Planting site climate: ') + ' ', 18, True), (' '.join((site_year, site_model or '')), 18, False)),
(('', 18, False),),
((_('Transfer limit method:') + ' ', 18, True), (method_text, 18, False)),
(('\n', 18, False),),
((_('Data URL:') + ' ', 12, True), (data_url, 12, False))
))
# Hyperlink URL
shape.text_frame.paragraphs[-1].runs[-1].hyperlink.address = data_url
def create_variables_slide(self, variables):
slide = self.add_slide()
self.add_title_text(slide, _('Climate Variables'))
num_rows = len(variables) + 1
table = slide.shapes.add_table(
num_rows, 3, Inches(.47), Inches(.73), Inches(9.05), Inches(.4) * num_rows
).table
cols = table.columns
cols[0].width = Inches(4.59)
cols[1].width = Inches(2.06)
cols[2].width = Inches(2.4)
# Headers
table.cell(0, 0).text = _('Variable')
table.cell(0, 1).text = _('Center')
table.cell(0, 2).text = _('Transfer limit') + ' (+/-)'
for i, variable in enumerate(variables, start=1):
units = self.degree_sign(variable['units'])
center_label = ' '.join((variable['value'], units))
limit_label = '{} {}{}'.format(
variable['limit'],
units,
' ({})'.format(_('modified')) if variable['modified'] else ''
)
table.cell(i, 0).text = variable['label']
table.cell(i, 1).text = center_label
table.cell(i, 2).text = limit_label
def create_custom_functions_slide(self, custom_functions):
slide = self.add_slide()
self.add_title_text(slide, _('Custom Functions'))
num_rows = len(custom_functions) + 1
table = slide.shapes.add_table(
num_rows, 4, Inches(.47), Inches(.73), Inches(9.05), Inches(.4) * num_rows
).table
cols = table.columns
cols[0].width = Inches(2.1)
cols[1].width = Inches(3.75)
cols[2].width = Inches(0.6)
cols[2].width = Inches(0.9)
# Headers
table.cell(0, 0).text = _('Name')
table.cell(0, 1).text = _('Function')
table.cell(0, 2).text = _('Center')
table.cell(0, 3).text = _('Transfer limit') + ' (+/-)'
for i, custom_function in enumerate(custom_functions, start=1):
table.cell(i, 0).text = custom_function['name']
table.cell(i, 1).text = custom_function['func']
table.cell(i, 2).text = str(custom_function['value'])
table.cell(i, 3).text = str(custom_function['transfer'])
def create_constraints_slide(self, constraints):
slide = self.add_slide()
self.add_title_text(slide, _('Constraints'))
num_rows = len(constraints) + 1
table = slide.shapes.add_table(
num_rows, 3, Inches(.47), Inches(.73), Inches(9.05), Inches(.4) * num_rows
).table
cols = table.columns
cols[0].width = Inches(4.59)
cols[1].width = Inches(2.06)
cols[2].width = Inches(2.4)
# Headers
table.cell(0, 0).text = _('Constraint')
table.cell(0, 1).text = _('Value')
table.cell(0, 2).text = '{} (+/-)'.format(_('Range'))
for i, constraint in enumerate(constraints, start=1):
if constraint['type'] == 'shapefile':
table.cell(i, 0).text = constraint['label']
table.cell(i, 1)._tc.set('gridSpan', str(2))
table.cell(i, 1).text = constraint['filename']
else:
table.cell(i, 0).text = constraint['label']
table.cell(i, 1).text = constraint['value']
table.cell(i, 2).text = constraint['range']
def add_presenter_notes(self, slide, context):
text_frame = slide.notes_slide.notes_text_frame
objective = context['objective']
location_label = context['location_label']
point = context['point']
elevation = context['elevation']
seedlot_year = context['seedlot_year']
site_year = context['site_year']
site_model = context['site_model']
method = context['method']
center = context['center']
location = (point['y'], point['x'])
method_text = self.get_transfer_method_text(method, center)
lines = [
((_('Objective:') + ' ', 12, True), (objective, 12, False)),
(('{}: '.format(location_label), 12, True), ('{}, {}'.format(*location), 12, False)),
((_('Elevation:') + ' ', 12, True), ('{} ft'.format(elevation), 12, False)),
((_('Climate Scenarios'), 12, True),),
((' {} '.format(_('Seedlot climate:')), 12, True), (seedlot_year, 12, False)),
((' {} '.format(_('Planting site climate:')), 12, True), ('{} {}'.format(site_year, site_model or ''), 12, False)),
((_('Transfer limit method:') + ' ', 12, True), (method_text, 12, False))
]
if method == 'seedzone':
band = context['band']
band_str = ", {}' - {}'".format(band[0], band[1]) if band else ''
lines += [
((_('Species:') + ' ', 12, True), (context['species'], 12, False)),
((_('Seed zone:') + ' ', 12, True), (context['zone'] + band_str, 12, False))
]
# Variables table
variables = context['variables']
name_width = max([len(_('Variable'))] + [len(x['label']) for x in variables]) + 3
center_width = max(
[len(_('Center'))] + [len(' '.join([str(x['value']), self.degree_sign(x['units'])])) for x in variables]
) + 3
transfer_width = max(
[len(_('Transfer limit') + ' (+/-)')] +
[
len('{} {}{}'.format(
x['limit'],
self.degree_sign(x['units']),
' ({})'.format(_('modified')) if x['modified'] else '')
)
for x in variables
]
)
lines += [
(('', 12, False),),
((_('Variables'), 12, True),),
((''.join([
_('Variable').ljust(name_width),
_('Center').ljust(center_width),
_('Transfer limit') + ' (+/-)'.ljust(transfer_width)
]), 12, False),),
(('-' * (name_width + center_width + transfer_width), 12, False),)
]
for variable in context['variables']:
units = self.degree_sign(variable['units'])
lines += [
((''.join([
variable['label'].ljust(name_width),
'{} {}'.format(variable['value'], units).ljust(center_width),
'{} {}{}'.format(
variable['limit'],
units,
' ({})'.format(_('modified')) if variable['modified'] else ''
)
]), 12, False),)
]
if context['constraints']:
# Constraints table
constraints = context['constraints']
name_width = max([len('Constraint')] + [len(x['label']) for x in constraints]) + 3
value_width = max(
[len(_('Value'))] +
[len(x['value']) for x in [c for c in constraints if c['type'] != 'shapefile']]
) + 3
range_width = max(
[len(_('Range') + ' (+/-)')] +
[len(x['range']) for x in [c for c in constraints if c['type'] != 'shapefile']]
) + 3
# Ensure we have room for shapefile name, if there is one
shape_constraint = [c for c in constraints if c['type'] == 'shapefile']
if shape_constraint:
filename_width = len(shape_constraint[0]['filename'])
if filename_width > value_width + range_width:
range_width = filename_width - value_width
lines += [
(('', 12, False),),
((_('Constraints'), 12, True),),
((''.join([
_('Constraint').ljust(name_width),
_('Value').ljust(value_width),
_('Range') + ' (+/-)'.ljust(range_width)
]), 12, False),),
(('-' * (name_width + value_width + range_width), 12, False),)
]
for constraint in constraints:
if constraint['type'] == 'shapefile':
lines += [
((''.join([
constraint['label'].ljust(name_width),
constraint['filename'].ljust(value_width + range_width)
]), 12, False),)
]
else:
lines += [
((''.join([
constraint['label'].ljust(name_width),
constraint['value'].ljust(value_width),
constraint['range'].ljust(range_width)
]), 12, False),)
]
self.add_text(text_frame, lines)
for paragraph in text_frame.paragraphs:
paragraph.font.name = 'Andale Mono'
def get_presentation(self, context):
self.presentation = Presentation(
os.path.join(os.path.dirname(__file__), 'templates', 'pptx', 'report.pptx')
)
self.width = Inches(self.presentation.slide_width / Inches(1))
self.height = Inches(self.presentation.slide_height / Inches(1))
self.render_template(dict(
coord_bottom=self.degree_sign(context['south']),
coord_right=self.degree_sign(context['east']),
coord_left=self.degree_sign(context['west']),
coord_top=self.degree_sign(context['north']),
scale_label=context['scale'],
map_image=Image.open(context['image_data']),
attribution=_('Generated {date} by the Seedlot Selection Tool').format(
date=datetime.datetime.today().strftime('%m/%d/%Y')
)
))
self.create_overview_slide(context)
self.create_variables_slide(context['variables'])
if context['custom_functions']:
self.create_custom_functions_slide(context['custom_functions'])
if context['constraints']:
self.create_constraints_slide(context['constraints'])
self.add_presenter_notes(self.presentation.slides[0], context)
return self.presentation
|
{
"content_hash": "286fe26bf550fb0cd85ef9aaae4d091a",
"timestamp": "",
"source": "github",
"line_count": 391,
"max_line_length": 128,
"avg_line_length": 40.05626598465473,
"alnum_prop": 0.5193461882262802,
"repo_name": "consbio/seedsource-core",
"id": "f95a6d0baa2beee896d7970ddce47d74f7937040",
"size": "15663",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "seedsource_core/django/seedsource/ppt.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "225237"
},
{
"name": "HTML",
"bytes": "21781"
},
{
"name": "Python",
"bytes": "192673"
}
],
"symlink_target": ""
}
|
from django.contrib import admin
from fserver.models import *
class PairAdmin(admin.ModelAdmin):
list_display = ('name', 'pip_value', 'point', 'spread', 'is_active')
list_filter = ('spread', 'is_active')
class SignalAdmin(admin.ModelAdmin):
list_display = ('id', 'account_name', 'system', 'pair', 'tf', 'status', 'direction', 'client_time', 'server_time')
list_filter = ('system', 'account_name', 'status', 'pair', 'tf')
class SystemPairAdmin(admin.ModelAdmin):
list_display = ('system', 'pair', 'tf', 'is_active')
list_filter = ('system', 'pair', 'tf', 'is_active')
class AccountSystemAdmin(admin.ModelAdmin):
list_display = ('account', 'system', 'is_active')
list_filter = ('account', 'system', 'is_active')
class SystemAdmin(admin.ModelAdmin):
list_display = ('name', 'version', 'is_active')
list_filter = ('is_active',)
class AccountAdmin(admin.ModelAdmin):
list_display = ('broker', 'number', 'type', 'last_ping', 'is_active')
list_filter = ('broker', 'type', 'is_active')
class BrokerAdmin(admin.ModelAdmin):
list_display = ('name', 'type', 'min_lot', 'max_lot', 'timezone', 'is_active')
list_filter = ('type', 'is_active')
class SystemRiskAdmin(admin.ModelAdmin):
list_display = ('system', 'broker', 'account', 'risk_per_trade')
list_filter = ('system', 'broker', 'risk_per_trade')
class SystemLogAdmin(admin.ModelAdmin):
list_display = ('signal_id', 'action', 'comment', 'pair', 'tf', 'created_at')
# list_filter = ('pair', 'tf', 'account')
# search_fields = ['signal_id']
class TradeAdmin(admin.ModelAdmin):
list_display = ('signal', 'ticket', 'size', 'profit', 'status', 'last_profit', 'created_at')
list_filter = ('status', 'account')
class BarAdmin(admin.ModelAdmin):
list_display = ('pair', 'tf', 'open', 'high', 'low', 'close', 'time')
list_filter = ('pair', 'tf')
class SignalAccountAdmin(admin.ModelAdmin):
list_display = ('account', 'signal')
list_filter = ('account',)
class TradeUpdateAdmin(admin.ModelAdmin):
list_display = ('trade', 'net_profit', 'gross_profit', 'swap', 'commision', 'created_at')
admin.site.register(Broker, BrokerAdmin)
admin.site.register(Account, AccountAdmin)
admin.site.register(System, SystemAdmin)
admin.site.register(SystemRisk, SystemRiskAdmin)
admin.site.register(Pair, PairAdmin)
admin.site.register(SystemPair, SystemPairAdmin)
admin.site.register(Signal, SignalAdmin)
admin.site.register(Trade, TradeAdmin)
admin.site.register(AccountSystem, AccountSystemAdmin)
admin.site.register(SystemLog, SystemLogAdmin)
admin.site.register(Bar, BarAdmin)
admin.site.register(SignalAccount, SignalAccountAdmin)
admin.site.register(TradeUpdate, TradeUpdateAdmin)
|
{
"content_hash": "fadfbd4caccbe3bee9d5372245105e97",
"timestamp": "",
"source": "github",
"line_count": 85,
"max_line_length": 118,
"avg_line_length": 32.10588235294118,
"alnum_prop": 0.68083547086845,
"repo_name": "yezooz/fserver",
"id": "85cbd3eb6baf93a2d4ea155704898735c38b701e",
"size": "3849",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "writer/admin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ActionScript",
"bytes": "17361"
},
{
"name": "CSS",
"bytes": "2031126"
},
{
"name": "HTML",
"bytes": "2032072"
},
{
"name": "JavaScript",
"bytes": "8424854"
},
{
"name": "PHP",
"bytes": "100476"
},
{
"name": "Python",
"bytes": "171480"
},
{
"name": "Shell",
"bytes": "9469"
}
],
"symlink_target": ""
}
|
import copy
import uuid
from lxml import etree
from webob import exc
from nova.api.openstack.compute.contrib import instance_actions
from nova.compute import api as compute_api
from nova import db
from nova.db.sqlalchemy import models
from nova import exception
from nova.openstack.common import policy
from nova import test
from nova.tests.api.openstack import fakes
from nova.tests import fake_instance
from nova.tests import fake_server_actions
FAKE_UUID = fake_server_actions.FAKE_UUID
FAKE_REQUEST_ID = fake_server_actions.FAKE_REQUEST_ID1
def format_action(action):
'''Remove keys that aren't serialized.'''
to_delete = ('id', 'finish_time', 'created_at', 'updated_at', 'deleted_at',
'deleted')
for key in to_delete:
if key in action:
del(action[key])
if 'start_time' in action:
# NOTE(danms): Without WSGI above us, these will be just stringified
action['start_time'] = str(action['start_time'].replace(tzinfo=None))
for event in action.get('events', []):
format_event(event)
return action
def format_event(event):
'''Remove keys that aren't serialized.'''
to_delete = ('id', 'created_at', 'updated_at', 'deleted_at', 'deleted',
'action_id')
for key in to_delete:
if key in event:
del(event[key])
if 'start_time' in event:
# NOTE(danms): Without WSGI above us, these will be just stringified
event['start_time'] = str(event['start_time'].replace(tzinfo=None))
if 'finish_time' in event:
# NOTE(danms): Without WSGI above us, these will be just stringified
event['finish_time'] = str(event['finish_time'].replace(tzinfo=None))
return event
class InstanceActionsPolicyTest(test.NoDBTestCase):
def setUp(self):
super(InstanceActionsPolicyTest, self).setUp()
self.controller = instance_actions.InstanceActionsController()
def test_list_actions_restricted_by_project(self):
rules = policy.Rules({'compute:get': policy.parse_rule(''),
'compute_extension:instance_actions':
policy.parse_rule('project_id:%(project_id)s')})
policy.set_rules(rules)
def fake_instance_get_by_uuid(context, instance_id,
columns_to_join=None,
use_slave=False):
return fake_instance.fake_db_instance(
**{'name': 'fake', 'project_id': '%s_unequal' %
context.project_id})
self.stubs.Set(db, 'instance_get_by_uuid', fake_instance_get_by_uuid)
req = fakes.HTTPRequest.blank('/v2/123/servers/12/os-instance-actions')
self.assertRaises(exception.Forbidden, self.controller.index, req,
str(uuid.uuid4()))
def test_get_action_restricted_by_project(self):
rules = policy.Rules({'compute:get': policy.parse_rule(''),
'compute_extension:instance_actions':
policy.parse_rule('project_id:%(project_id)s')})
policy.set_rules(rules)
def fake_instance_get_by_uuid(context, instance_id,
columns_to_join=None,
use_slave=False):
return fake_instance.fake_db_instance(
**{'name': 'fake', 'project_id': '%s_unequal' %
context.project_id})
self.stubs.Set(db, 'instance_get_by_uuid', fake_instance_get_by_uuid)
req = fakes.HTTPRequest.blank(
'/v2/123/servers/12/os-instance-actions/1')
self.assertRaises(exception.Forbidden, self.controller.show, req,
str(uuid.uuid4()), '1')
class InstanceActionsTest(test.NoDBTestCase):
def setUp(self):
super(InstanceActionsTest, self).setUp()
self.controller = instance_actions.InstanceActionsController()
self.fake_actions = copy.deepcopy(fake_server_actions.FAKE_ACTIONS)
self.fake_events = copy.deepcopy(fake_server_actions.FAKE_EVENTS)
def fake_get(self, context, instance_uuid, expected_attrs=None,
want_objects=False):
return {'uuid': instance_uuid}
def fake_instance_get_by_uuid(context, instance_id, use_slave=False):
return {'name': 'fake', 'project_id': context.project_id}
self.stubs.Set(compute_api.API, 'get', fake_get)
self.stubs.Set(db, 'instance_get_by_uuid', fake_instance_get_by_uuid)
def test_list_actions(self):
def fake_get_actions(context, uuid):
actions = []
for act in self.fake_actions[uuid].itervalues():
action = models.InstanceAction()
action.update(act)
actions.append(action)
return actions
self.stubs.Set(db, 'actions_get', fake_get_actions)
req = fakes.HTTPRequest.blank('/v2/123/servers/12/os-instance-actions')
res_dict = self.controller.index(req, FAKE_UUID)
for res in res_dict['instanceActions']:
fake_action = self.fake_actions[FAKE_UUID][res['request_id']]
self.assertEqual(format_action(fake_action), format_action(res))
def test_get_action_with_events_allowed(self):
def fake_get_action(context, uuid, request_id):
action = models.InstanceAction()
action.update(self.fake_actions[uuid][request_id])
return action
def fake_get_events(context, action_id):
events = []
for evt in self.fake_events[action_id]:
event = models.InstanceActionEvent()
event.update(evt)
events.append(event)
return events
self.stubs.Set(db, 'action_get_by_request_id', fake_get_action)
self.stubs.Set(db, 'action_events_get', fake_get_events)
req = fakes.HTTPRequest.blank(
'/v2/123/servers/12/os-instance-actions/1',
use_admin_context=True)
res_dict = self.controller.show(req, FAKE_UUID, FAKE_REQUEST_ID)
fake_action = self.fake_actions[FAKE_UUID][FAKE_REQUEST_ID]
fake_events = self.fake_events[fake_action['id']]
fake_action['events'] = fake_events
self.assertEqual(format_action(fake_action),
format_action(res_dict['instanceAction']))
def test_get_action_with_events_not_allowed(self):
def fake_get_action(context, uuid, request_id):
return self.fake_actions[uuid][request_id]
def fake_get_events(context, action_id):
return self.fake_events[action_id]
self.stubs.Set(db, 'action_get_by_request_id', fake_get_action)
self.stubs.Set(db, 'action_events_get', fake_get_events)
rules = policy.Rules({'compute:get': policy.parse_rule(''),
'compute_extension:instance_actions':
policy.parse_rule(''),
'compute_extension:instance_actions:events':
policy.parse_rule('is_admin:True')})
policy.set_rules(rules)
req = fakes.HTTPRequest.blank(
'/v2/123/servers/12/os-instance-actions/1')
res_dict = self.controller.show(req, FAKE_UUID, FAKE_REQUEST_ID)
fake_action = self.fake_actions[FAKE_UUID][FAKE_REQUEST_ID]
self.assertEqual(format_action(fake_action),
format_action(res_dict['instanceAction']))
def test_action_not_found(self):
def fake_no_action(context, uuid, action_id):
return None
self.stubs.Set(db, 'action_get_by_request_id', fake_no_action)
req = fakes.HTTPRequest.blank(
'/v2/123/servers/12/os-instance-actions/1')
self.assertRaises(exc.HTTPNotFound, self.controller.show, req,
FAKE_UUID, FAKE_REQUEST_ID)
def test_index_instance_not_found(self):
def fake_get(self, context, instance_uuid, expected_attrs=None,
want_objects=False):
raise exception.InstanceNotFound(instance_id=instance_uuid)
self.stubs.Set(compute_api.API, 'get', fake_get)
req = fakes.HTTPRequest.blank('/v2/123/servers/12/os-instance-actions')
self.assertRaises(exc.HTTPNotFound, self.controller.index, req,
FAKE_UUID)
def test_show_instance_not_found(self):
def fake_get(self, context, instance_uuid, expected_attrs=None,
want_objects=False):
raise exception.InstanceNotFound(instance_id=instance_uuid)
self.stubs.Set(compute_api.API, 'get', fake_get)
req = fakes.HTTPRequest.blank(
'/v2/123/servers/12/os-instance-actions/fake')
self.assertRaises(exc.HTTPNotFound, self.controller.show, req,
FAKE_UUID, 'fake')
class InstanceActionsSerializerTest(test.NoDBTestCase):
def setUp(self):
super(InstanceActionsSerializerTest, self).setUp()
self.fake_actions = copy.deepcopy(fake_server_actions.FAKE_ACTIONS)
self.fake_events = copy.deepcopy(fake_server_actions.FAKE_EVENTS)
def _verify_instance_action_attachment(self, attach, tree):
for key in attach.keys():
if key != 'events':
self.assertEqual(attach[key], tree.get(key),
'%s did not match' % key)
def _verify_instance_action_event_attachment(self, attach, tree):
for key in attach.keys():
self.assertEqual(attach[key], tree.get(key),
'%s did not match' % key)
def test_instance_action_serializer(self):
serializer = instance_actions.InstanceActionTemplate()
action = self.fake_actions[FAKE_UUID][FAKE_REQUEST_ID]
text = serializer.serialize({'instanceAction': action})
tree = etree.fromstring(text)
action = format_action(action)
self.assertEqual('instanceAction', tree.tag)
self._verify_instance_action_attachment(action, tree)
found_events = False
for child in tree:
if child.tag == 'events':
found_events = True
self.assertFalse(found_events)
def test_instance_action_events_serializer(self):
serializer = instance_actions.InstanceActionTemplate()
action = self.fake_actions[FAKE_UUID][FAKE_REQUEST_ID]
event = self.fake_events[action['id']][0]
action['events'] = [dict(event), dict(event)]
text = serializer.serialize({'instanceAction': action})
tree = etree.fromstring(text)
action = format_action(action)
self.assertEqual('instanceAction', tree.tag)
self._verify_instance_action_attachment(action, tree)
event = format_event(event)
found_events = False
for child in tree:
if child.tag == 'events':
found_events = True
for key in event:
self.assertEqual(event[key], child.get(key))
self.assertTrue(found_events)
def test_instance_actions_serializer(self):
serializer = instance_actions.InstanceActionsTemplate()
action_list = self.fake_actions[FAKE_UUID].values()
text = serializer.serialize({'instanceActions': action_list})
tree = etree.fromstring(text)
action_list = [format_action(action) for action in action_list]
self.assertEqual('instanceActions', tree.tag)
self.assertEqual(len(action_list), len(tree))
for idx, child in enumerate(tree):
self.assertEqual('instanceAction', child.tag)
request_id = child.get('request_id')
self._verify_instance_action_attachment(
self.fake_actions[FAKE_UUID][request_id],
child)
|
{
"content_hash": "9dcf347f623e477dae5128bf2c1b7319",
"timestamp": "",
"source": "github",
"line_count": 274,
"max_line_length": 79,
"avg_line_length": 43.934306569343065,
"alnum_prop": 0.6005150357202194,
"repo_name": "CiscoSystems/nova",
"id": "1a85fedc9eb4e1a955518619b359b4a410a09b8b",
"size": "12671",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "nova/tests/api/openstack/compute/contrib/test_instance_actions.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "13926229"
},
{
"name": "Shell",
"bytes": "17451"
}
],
"symlink_target": ""
}
|
import itertools
import re
import socket
import time
from collections import defaultdict
import supervisor.xmlrpc
from six.moves import xmlrpc_client as xmlrpclib
from datadog_checks.base import AgentCheck
DEFAULT_HOST = 'localhost'
DEFAULT_PORT = '9001'
DEFAULT_SOCKET_IP = 'http://127.0.0.1'
DD_STATUS = {
'STOPPED': AgentCheck.CRITICAL,
'STARTING': AgentCheck.UNKNOWN,
'RUNNING': AgentCheck.OK,
'BACKOFF': AgentCheck.CRITICAL,
'STOPPING': AgentCheck.CRITICAL,
'EXITED': AgentCheck.CRITICAL,
'FATAL': AgentCheck.CRITICAL,
'UNKNOWN': AgentCheck.UNKNOWN,
}
PROCESS_STATUS = {AgentCheck.CRITICAL: 'down', AgentCheck.OK: 'up', AgentCheck.UNKNOWN: 'unknown'}
SERVER_TAG = 'supervisord_server'
PROCESS_TAG = 'supervisord_process'
FORMAT_TIME = lambda x: time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(x)) # noqa E731
SERVER_SERVICE_CHECK = 'supervisord.can_connect'
PROCESS_SERVICE_CHECK = 'supervisord.process.status'
# Example supervisord versions: http://supervisord.org/changes.html
# - 4.0.0
# - 3.0
# - 3.0b2
# - 3.0a12
SUPERVISORD_VERSION_PATTERN = re.compile(
r"""
(?P<major>0|[1-9]\d*)
\.
(?P<minor>0|[1-9]\d*)
(\.
(?P<patch>0|[1-9]\d*)
)?
(?:(?P<release>
[a-zA-Z][0-9]*
))?
""",
re.VERBOSE,
)
class SupervisordCheck(AgentCheck):
def check(self, instance):
if instance.get('user'):
self._log_deprecation('_config_renamed', 'user', 'username')
if instance.get('pass'):
self._log_deprecation('_config_renamed', 'pass', 'password')
server_name = instance.get('name')
if not server_name or not server_name.strip():
raise Exception("Supervisor server name not specified in yaml configuration.")
instance_tags = instance.get('tags', [])
instance_tags.append('{}:{}'.format(SERVER_TAG, server_name))
supe = self._connect(instance)
count_by_status = defaultdict(int)
# Gather all process information
try:
processes = supe.getAllProcessInfo()
except xmlrpclib.Fault as error:
raise Exception(
'An error occurred while reading process information: {} {}'.format(error.faultCode, error.faultString)
)
except socket.error:
host = instance.get('host', DEFAULT_HOST)
port = instance.get('port', DEFAULT_PORT)
sock = instance.get('socket')
if sock is None:
msg = (
'Cannot connect to http://{}:{}. '
'Make sure supervisor is running and XML-RPC '
'inet interface is enabled.'.format(host, port)
)
else:
msg = (
'Cannot connect to {}. Make sure supervisor '
'is running and socket is enabled and socket file'
' has the right permissions.'.format(sock)
)
self.service_check(SERVER_SERVICE_CHECK, AgentCheck.CRITICAL, tags=instance_tags, message=msg)
raise Exception(msg)
except xmlrpclib.ProtocolError as e:
if e.errcode == 401: # authorization error
msg = 'Username or password to {} are incorrect.'.format(server_name)
else:
msg = 'An error occurred while connecting to {}: {} {}'.format(server_name, e.errcode, e.errmsg)
self.service_check(SERVER_SERVICE_CHECK, AgentCheck.CRITICAL, tags=instance_tags, message=msg)
raise Exception(msg)
# If we're here, we were able to connect to the server
self.service_check(SERVER_SERVICE_CHECK, AgentCheck.OK, tags=instance_tags)
# Filter monitored processes on configuration directives
proc_regex = instance.get('proc_regex', [])
if not isinstance(proc_regex, list):
raise Exception("'proc_regex' should be a list of strings. e.g. %s" % [proc_regex])
proc_names = instance.get('proc_names', [])
if not isinstance(proc_names, list):
raise Exception("'proc_names' should be a list of strings. e.g. %s" % [proc_names])
# Collect information on each monitored process
monitored_processes = []
# monitor all processes if no filters were specified
if len(proc_regex) == 0 and len(proc_names) == 0:
monitored_processes = processes
for pattern, process in itertools.product(proc_regex, processes):
if re.match(pattern, process['name']) and process not in monitored_processes:
monitored_processes.append(process)
for process in processes:
if process['name'] in proc_names and process not in monitored_processes:
monitored_processes.append(process)
# Report service checks and uptime for each process
for proc in monitored_processes:
proc_name = proc['name']
tags = instance_tags + ['{}:{}'.format(PROCESS_TAG, proc_name)]
# Report Service Check
status = DD_STATUS[proc['statename']]
msg = self._build_message(proc) if status is not AgentCheck.OK else None
count_by_status[status] += 1
self.service_check(PROCESS_SERVICE_CHECK, status, tags=tags, message=msg)
# Report Uptime
uptime = self._extract_uptime(proc)
self.gauge('supervisord.process.uptime', uptime, tags=tags)
# Report counts by status
for status in PROCESS_STATUS:
self.gauge(
'supervisord.process.count',
count_by_status[status],
tags=instance_tags + ['status:{}'.format(PROCESS_STATUS[status])],
)
self._collect_metadata(supe)
@staticmethod
def _connect(instance):
sock = instance.get('socket')
user = instance.get('user') or instance.get('username')
password = instance.get('pass') or instance.get('password')
if sock is not None:
host = instance.get('host', DEFAULT_SOCKET_IP)
transport = supervisor.xmlrpc.SupervisorTransport(user, password, sock)
server = xmlrpclib.ServerProxy(host, transport=transport)
else:
host = instance.get('host', DEFAULT_HOST)
port = instance.get('port', DEFAULT_PORT)
auth = '{}:{}@'.format(user, password) if user and password else ''
server = xmlrpclib.Server('http://{}{}:{}/RPC2'.format(auth, host, port))
return server.supervisor
@staticmethod
def _extract_uptime(proc):
start, now = int(proc['start']), int(proc['now'])
status = proc['statename']
active_state = status in ['BACKOFF', 'RUNNING', 'STOPPING']
return now - start if active_state else 0
@staticmethod
def _build_message(proc):
start, stop, now = int(proc['start']), int(proc['stop']), int(proc['now'])
proc['now_str'] = FORMAT_TIME(now)
proc['start_str'] = FORMAT_TIME(start)
proc['stop_str'] = '' if stop == 0 else FORMAT_TIME(stop)
return (
"""Current time: %(now_str)s
Process name: %(name)s
Process group: %(group)s
Description: %(description)s
Error log file: %(stderr_logfile)s
Stdout log file: %(stdout_logfile)s
Log file: %(logfile)s
State: %(statename)s
Start time: %(start_str)s
Stop time: %(stop_str)s
Exit Status: %(exitstatus)s"""
% proc
)
def _collect_metadata(self, supe):
try:
version = supe.getSupervisorVersion()
except Exception as e:
self.log.warning("Error collecting version: %s", e)
return
self.log.debug('Version collected: %s', version)
self.set_metadata('version', version, scheme='regex', pattern=SUPERVISORD_VERSION_PATTERN)
|
{
"content_hash": "32d856b224f834be2db52ce8cb06ab4a",
"timestamp": "",
"source": "github",
"line_count": 217,
"max_line_length": 119,
"avg_line_length": 36.30875576036866,
"alnum_prop": 0.6003299911156238,
"repo_name": "DataDog/integrations-core",
"id": "3e16ddbfd694d014a66b3895e378c7eed53870ee",
"size": "7989",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "supervisord/datadog_checks/supervisord/supervisord.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "578"
},
{
"name": "COBOL",
"bytes": "12312"
},
{
"name": "Dockerfile",
"bytes": "22998"
},
{
"name": "Erlang",
"bytes": "15518"
},
{
"name": "Go",
"bytes": "6988"
},
{
"name": "HCL",
"bytes": "4080"
},
{
"name": "HTML",
"bytes": "1318"
},
{
"name": "JavaScript",
"bytes": "1817"
},
{
"name": "Kotlin",
"bytes": "430"
},
{
"name": "Lua",
"bytes": "3489"
},
{
"name": "PHP",
"bytes": "20"
},
{
"name": "PowerShell",
"bytes": "2398"
},
{
"name": "Python",
"bytes": "13020828"
},
{
"name": "Roff",
"bytes": "359"
},
{
"name": "Ruby",
"bytes": "241"
},
{
"name": "Scala",
"bytes": "7000"
},
{
"name": "Shell",
"bytes": "83227"
},
{
"name": "Swift",
"bytes": "203"
},
{
"name": "TSQL",
"bytes": "29972"
},
{
"name": "TypeScript",
"bytes": "1019"
}
],
"symlink_target": ""
}
|
"""API error code
"""
|
{
"content_hash": "cf23c12a81e1649d9e05c99ab00e0856",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 17,
"avg_line_length": 11,
"alnum_prop": 0.5454545454545454,
"repo_name": "jack8daniels2/yabgp",
"id": "78b97fdca1f2ad974ac95f0c78744f9d5e07f8be",
"size": "657",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "yabgp/api/code.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "256862"
}
],
"symlink_target": ""
}
|
import unittest
from ray.rllib.agents.ppo import PPOTrainer, DEFAULT_CONFIG
import ray
class LocalModeTest(unittest.TestCase):
def testLocal(self):
ray.init(local_mode=True)
cf = DEFAULT_CONFIG.copy()
agent = PPOTrainer(cf, "CartPole-v0")
print(agent.train())
if __name__ == "__main__":
unittest.main(verbosity=2)
|
{
"content_hash": "fdc95c03edd4b1a50cacd2543df85ce4",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 59,
"avg_line_length": 22.4375,
"alnum_prop": 0.6573816155988857,
"repo_name": "stephanie-wang/ray",
"id": "c3341beafc9f2e2bf85a542db4ebde12dedc8cec",
"size": "359",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rllib/tests/test_local.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "29882"
},
{
"name": "C++",
"bytes": "2149909"
},
{
"name": "CSS",
"bytes": "8025"
},
{
"name": "Dockerfile",
"bytes": "5499"
},
{
"name": "Go",
"bytes": "28481"
},
{
"name": "HTML",
"bytes": "30435"
},
{
"name": "Java",
"bytes": "738348"
},
{
"name": "JavaScript",
"bytes": "444"
},
{
"name": "Jupyter Notebook",
"bytes": "1615"
},
{
"name": "Makefile",
"bytes": "1965"
},
{
"name": "Python",
"bytes": "4058862"
},
{
"name": "Shell",
"bytes": "88736"
},
{
"name": "Starlark",
"bytes": "121207"
},
{
"name": "TypeScript",
"bytes": "64161"
}
],
"symlink_target": ""
}
|
from .. import mfpackage
from ..data.mfdatautil import ListTemplateGenerator
class ModflowGwfgnc(mfpackage.MFPackage):
"""
ModflowGwfgnc defines a gnc package within a gwf6 model.
Parameters
----------
model : MFModel
Model that this package is a part of. Package is automatically
added to model when it is initialized.
loading_package : bool
Do not set this parameter. It is intended for debugging and internal
processing purposes only.
print_input : boolean
* print_input (boolean) keyword to indicate that the list of GNC
information will be written to the listing file immediately after it
is read.
print_flows : boolean
* print_flows (boolean) keyword to indicate that the list of GNC flow
rates will be printed to the listing file for every stress period
time step in which "BUDGET PRINT" is specified in Output Control. If
there is no Output Control option and "PRINT_FLOWS" is specified,
then flow rates are printed for the last time step of each stress
period.
explicit : boolean
* explicit (boolean) keyword to indicate that the ghost node correction
is applied in an explicit manner on the right-hand side of the
matrix. The explicit approach will likely require additional outer
iterations. If the keyword is not specified, then the correction will
be applied in an implicit manner on the left-hand side. The implicit
approach will likely converge better, but may require additional
memory. If the EXPLICIT keyword is not specified, then the BICGSTAB
linear acceleration option should be specified within the LINEAR
block of the Sparse Matrix Solver.
numgnc : integer
* numgnc (integer) is the number of GNC entries.
numalphaj : integer
* numalphaj (integer) is the number of contributing factors.
gncdata : [cellidn, cellidm, cellidsj, alphasj]
* cellidn ((integer, ...)) is the cellid of the cell, :math:`n`, in
which the ghost node is located. For a structured grid that uses the
DIS input file, CELLIDN is the layer, row, and column numbers of the
cell. For a grid that uses the DISV input file, CELLIDN is the layer
number and CELL2D number for the two cells. If the model uses the
unstructured discretization (DISU) input file, then CELLIDN is the
node number for the cell. This argument is an index variable, which
means that it should be treated as zero-based when working with FloPy
and Python. Flopy will automatically subtract one when loading index
variables and add one when writing index variables.
* cellidm ((integer, ...)) is the cellid of the connecting cell,
:math:`m`, to which flow occurs from the ghost node. For a structured
grid that uses the DIS input file, CELLIDM is the layer, row, and
column numbers of the cell. For a grid that uses the DISV input file,
CELLIDM is the layer number and CELL2D number for the two cells. If
the model uses the unstructured discretization (DISU) input file,
then CELLIDM is the node number for the cell. This argument is an
index variable, which means that it should be treated as zero-based
when working with FloPy and Python. Flopy will automatically subtract
one when loading index variables and add one when writing index
variables.
* cellidsj ((integer, ...)) is the array of CELLIDS for the
contributing j cells, which contribute to the interpolated head value
at the ghost node. This item contains one CELLID for each of the
contributing cells of the ghost node. Note that if the number of
actual contributing cells needed by the user is less than NUMALPHAJ
for any ghost node, then a dummy CELLID of zero(s) should be inserted
with an associated contributing factor of zero. For a structured grid
that uses the DIS input file, CELLID is the layer, row, and column
numbers of the cell. For a grid that uses the DISV input file, CELLID
is the layer number and cell2d number for the two cells. If the model
uses the unstructured discretization (DISU) input file, then CELLID
is the node number for the cell. This argument is an index variable,
which means that it should be treated as zero-based when working with
FloPy and Python. Flopy will automatically subtract one when loading
index variables and add one when writing index variables.
* alphasj (double) is the contributing factors for each contributing
node in CELLIDSJ. Note that if the number of actual contributing
cells is less than NUMALPHAJ for any ghost node, then dummy CELLIDS
should be inserted with an associated contributing factor of zero.
The sum of ALPHASJ should be less than one. This is because one minus
the sum of ALPHASJ is equal to the alpha term (alpha n in equation
4-61 of the GWF Model report) that is multiplied by the head in cell
n.
filename : String
File name for this package.
pname : String
Package name for this package.
parent_file : MFPackage
Parent package file that references this package. Only needed for
utility packages (mfutl*). For example, mfutllaktab package must have
a mfgwflak package parent_file.
"""
gncdata = ListTemplateGenerator(("gwf6", "gnc", "gncdata", "gncdata"))
package_abbr = "gwfgnc"
_package_type = "gnc"
dfn_file_name = "gwf-gnc.dfn"
dfn = [
[
"block options",
"name print_input",
"type keyword",
"reader urword",
"optional true",
],
[
"block options",
"name print_flows",
"type keyword",
"reader urword",
"optional true",
],
[
"block options",
"name explicit",
"type keyword",
"tagged true",
"reader urword",
"optional true",
],
[
"block dimensions",
"name numgnc",
"type integer",
"reader urword",
"optional false",
],
[
"block dimensions",
"name numalphaj",
"type integer",
"reader urword",
"optional false",
],
[
"block gncdata",
"name gncdata",
"type recarray cellidn cellidm cellidsj alphasj",
"shape (maxbound)",
"reader urword",
],
[
"block gncdata",
"name cellidn",
"type integer",
"shape",
"tagged false",
"in_record true",
"reader urword",
"numeric_index true",
],
[
"block gncdata",
"name cellidm",
"type integer",
"shape",
"tagged false",
"in_record true",
"reader urword",
"numeric_index true",
],
[
"block gncdata",
"name cellidsj",
"type integer",
"shape (numalphaj)",
"tagged false",
"in_record true",
"reader urword",
"numeric_index true",
],
[
"block gncdata",
"name alphasj",
"type double precision",
"shape (numalphaj)",
"tagged false",
"in_record true",
"reader urword",
],
]
def __init__(
self,
model,
loading_package=False,
print_input=None,
print_flows=None,
explicit=None,
numgnc=None,
numalphaj=None,
gncdata=None,
filename=None,
pname=None,
parent_file=None,
):
super(ModflowGwfgnc, self).__init__(
model, "gnc", filename, pname, loading_package, parent_file
)
# set up variables
self.print_input = self.build_mfdata("print_input", print_input)
self.print_flows = self.build_mfdata("print_flows", print_flows)
self.explicit = self.build_mfdata("explicit", explicit)
self.numgnc = self.build_mfdata("numgnc", numgnc)
self.numalphaj = self.build_mfdata("numalphaj", numalphaj)
self.gncdata = self.build_mfdata("gncdata", gncdata)
self._init_complete = True
|
{
"content_hash": "44ee91271327c8f1efa9fc2c5a399566",
"timestamp": "",
"source": "github",
"line_count": 213,
"max_line_length": 79,
"avg_line_length": 41.183098591549296,
"alnum_prop": 0.6002051983584131,
"repo_name": "aleaf/flopy",
"id": "e283cb30fb7293243fb686b08186e762bda33315",
"size": "8868",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "flopy/mf6/modflow/mfgwfgnc.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "67"
},
{
"name": "Python",
"bytes": "5469342"
},
{
"name": "Shell",
"bytes": "2562"
}
],
"symlink_target": ""
}
|
from config import server
DEBUG = server.DEBUG
if DEBUG:
SESSION_ENGINE = 'django.contrib.sessions.backends.cache'
else:
SESSION_ENGINE = 'django.contrib.sessions.backends.file'
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.', # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
'NAME': '', # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'g4#^+d4sl+-qql@=h32a_4eeys*&_qln^l5il!=nd*q=3a4ku)'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'PenBlog.middleware.CheckIsAdminMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
ROOT_URLCONF = 'PenBlog.urls'
TEMPLATE_DIRS = ()
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Uncomment the next line to enable the admin:
# 'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
|
{
"content_hash": "cbd2db9b3b39b8ce7e3775d9bf5d872a",
"timestamp": "",
"source": "github",
"line_count": 143,
"max_line_length": 115,
"avg_line_length": 33.63636363636363,
"alnum_prop": 0.6848232848232848,
"repo_name": "quanix/PenBlog",
"id": "00614cf295db3d6e0162eb6c284a2e7509aaa157",
"size": "4850",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "PenBlog/settings.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "375983"
},
{
"name": "HTML",
"bytes": "157973"
},
{
"name": "JavaScript",
"bytes": "886645"
},
{
"name": "Python",
"bytes": "46915"
},
{
"name": "Shell",
"bytes": "142"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from builtins import object
import operator
class Status(object):
def __init__(self, name, code, reason=None):
self.name = name
self.code = code
self.reason = reason or getattr(self, "reason", None)
def _comparison(self, operator, other):
if hasattr(other, "code"):
return operator(self.code, other.code)
return NotImplemented
def __eq__(self, other):
return self._comparison(operator.eq, other)
def __ne__(self, other):
return self._comparison(operator.ne, other)
def __lt__(self, other):
return self._comparison(operator.lt, other)
def __gt__(self, other):
return self._comparison(operator.gt, other)
def __le__(self, other):
return self._comparison(operator.le, other)
def __ge__(self, other):
return self._comparison(operator.ge, other)
class PendingStatus(Status):
def __init__(self, reason=None):
super(PendingStatus, self).__init__("pending", 0, reason)
class SubmittedStatus(Status):
def __init__(self, reason=None):
super(SubmittedStatus, self).__init__("submitted", 1, reason)
class CompleteStatus(Status):
def __init__(self, reason=None):
super(CompleteStatus, self).__init__("complete", 2, reason)
class SkippedStatus(Status):
def __init__(self, reason=None):
super(SkippedStatus, self).__init__("skipped", 3, reason)
class FailedStatus(Status):
def __init__(self, reason=None):
super(FailedStatus, self).__init__("failed", 4, reason)
class NotSubmittedStatus(SkippedStatus):
reason = "disabled"
class NotUpdatedStatus(SkippedStatus):
reason = "locked"
class DidNotChangeStatus(SkippedStatus):
reason = "nochange"
class StackDoesNotExist(SkippedStatus):
reason = "does not exist in cloudformation"
PENDING = PendingStatus()
WAITING = PendingStatus(reason="waiting")
SUBMITTED = SubmittedStatus()
COMPLETE = CompleteStatus()
SKIPPED = SkippedStatus()
FAILED = FailedStatus()
INTERRUPTED = FailedStatus(reason="interrupted")
|
{
"content_hash": "7c9b5d83cb91ed7832908a1ad9704303",
"timestamp": "",
"source": "github",
"line_count": 85,
"max_line_length": 69,
"avg_line_length": 25.658823529411766,
"alnum_prop": 0.6629986244841816,
"repo_name": "remind101/stacker",
"id": "395d575d4e07e09830f0bcfa516351ab1da13af5",
"size": "2181",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "stacker/status.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1057"
},
{
"name": "Makefile",
"bytes": "429"
},
{
"name": "Python",
"bytes": "512358"
},
{
"name": "Shell",
"bytes": "29734"
}
],
"symlink_target": ""
}
|
"""
wa-report-pusher
What the hell! Why spend time filling
in the reporting tables? me and
"""
from __future__ import print_function
__version__ = '1.0'
__author__ = 'M.Price'
__license__ = 'MIT'
__prj_name__ = 'wa-report-pusher'
__description__ = 'What the hell! Why spend time filling ' \
'in the reporting tables? me and'
__platforms__ = 'Any'
import os
import sys
import yaml
from datetime import datetime, timedelta
from apiclient import discovery
from github3 import login, GitHub
from httplib2 import Http
from googleapiclient.discovery import Resource
from oauth2client.service_account import ServiceAccountCredentials
res_dir = 'resources'
def _get_def_config():
"""Open default.yml configuration.
:return: Configuration file
"""
try:
return open(os.path.join(res_dir, 'default.yml'))
except Exception:
raise FileNotFoundError("Resource folder not found")
try:
path = ''
flags = sys.argv[1]
try:
path = os.path.join(res_dir, flags)
file = open(path)
except Exception:
try:
path += '.yml'
file = open(path)
except Exception:
file = _get_def_config()
except Exception:
file = _get_def_config()
conf = yaml.load(file)
sheets_conf = conf.get('SHEETS')
CLIENT_SECRET_FILE = "{}/{}".format(res_dir,
sheets_conf.get('CLIENT_SECRET_FILE'))
SPREADSHEET_ID = sheets_conf.get('SPREADSHEET_ID')
SHEET_NAME = sheets_conf.get('SHEET_NAME')
FIND_NAME = sheets_conf.get('FIND_NAME')
PROJECT_NAME = sheets_conf.get('PROJECT_NAME')
git_conf = conf.get('GITHUB')
GIT_CREDENTIALS_FILE = "{}/{}".format(res_dir,
git_conf.get('GIT_CREDENTIALS_FILE'))
GIT_PROJECTS = git_conf.get('GIT_PROJECTS')
# You will ask me: "What the fu*k is this?"
ABC = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M',
'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z']
def _get_abc(index: int) -> str:
"""Getting letters by index (for column index in Sheets).
:param index: Search index
:return: Letters index
"""
abc_len = len(ABC)
if 676 > index > 0:
if index <= abc_len:
return ABC[index]
else:
i = int(index / abc_len)
return "{}{}".format(ABC[i + 1], ABC[index - (abc_len * i)])
else:
raise Exception('Index - invalid')
def _get_index(values: list, index: int) -> int:
"""Search empty cell in user column.
:param values: Sheet data
:param index: User Index
:return: Empty cell index
"""
cur_date = datetime.now().strftime("%d.%m")
find = False
i = 0
try:
for row in values:
i += 1
if row[index] == "" and row[0] == cur_date:
find = True
break
except IndexError:
pass
if not find:
raise Exception('Out of range')
print('-- Last index on column:{}'.format(i))
return i
def _get_spreadsheets() -> Resource:
"""Get spreadsheets with google sheets api authorization
:return: Spreadsheets value data
"""
credentials = ServiceAccountCredentials.from_json_keyfile_name(
CLIENT_SECRET_FILE, ['https://www.googleapis.com/auth/spreadsheets'])
http_auth = credentials.authorize(Http())
service = discovery.build('sheets', 'v4', http=http_auth)
return service.spreadsheets().values()
def _get_git_session() -> (GitHub, str):
"""Get GitHub session and User name from GIT_CREDENTIALS_FILE.
Sample
:return: GitHub session, User name
"""
with open(GIT_CREDENTIALS_FILE, 'r') as fd:
git_user = fd.readline().strip()
git_password = fd.readline().strip()
return login(username=git_user, password=git_password), git_user
def get_commits(gh: GitHub, git_user: str, prj_name: str,
git_acc: str, git_repo: str) -> str:
"""Get last day commits from current owner and repository.
:return: Compilation of commits message
:raise Exception: if not right something
"""
repository = gh.repository(git_acc, git_repo)
find_date = (datetime.now() - timedelta(1)).strftime("%Y-%m-%d")
commits = list()
for branch in repository.iter_branches():
for commit in repository.iter_commits(sha=branch.name,
author=git_user,
since=find_date):
commits.append(commit.commit.message.replace('\n', ' '))
if len(commits) > 0:
commits_data = prj_name + ' - '
commits_data += ", ".join(commits)
print('-- Getting {} commits a success'.format(prj_name))
return commits_data
else:
print('Getting commits - invalid')
exit(1)
def push_data(spreadsheets: Resource, commits: list,
cell: int, abc_index: str) -> None:
"""Push compile commits of projects in current sheet
:param spreadsheets: google sheets api authorization
:param commits: list of projects commits
:param cell: Row index
:param abc_index: Letter column index
"""
commits_data = '\n'.join(commits)
range_data = "{}!{}{}:{}{}".format(SHEET_NAME, abc_index, cell,
abc_index, cell)
spreadsheets.batchUpdate(
spreadsheetId=SPREADSHEET_ID,
body={
"valueInputOption": "USER_ENTERED",
"data": [
{"range": range_data,
"majorDimension": "ROWS",
"values": [[commits_data, ], ]}
]
}).execute()
print('-- Push in table is success.')
def report_push() -> None:
spreadsheets = _get_spreadsheets()
data = spreadsheets.get(
spreadsheetId=SPREADSHEET_ID, range=SHEET_NAME).execute()
values = data.get('values', [])
index = 0
if not values:
print('-- No data found.')
else:
# Search current name in table
for row in values[0]:
if row == FIND_NAME:
index = values[0].index(FIND_NAME)
print('-- Find name index:{}'.format(_get_abc(index)))
break
abc_index = _get_abc(index)
first_cell = _get_index(values, index)
# GitHub Project agitation
commits = list()
gh, git_user = _get_git_session()
for prj in GIT_PROJECTS:
commits.append(get_commits(gh, git_user, prj[0], prj[1], prj[2]))
# Push commits message in Google Sheets
push_data(spreadsheets, commits, first_cell, abc_index)
if __name__ == '__main__':
report_push()
|
{
"content_hash": "86335b2f898c00bd81bcb720aba6324c",
"timestamp": "",
"source": "github",
"line_count": 218,
"max_line_length": 77,
"avg_line_length": 30.701834862385322,
"alnum_prop": 0.5753772598236964,
"repo_name": "MihailPreis/WA_Report_pusher",
"id": "a7825e71c66f7b897dac573253137bfd60d26c51",
"size": "6693",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "report_pusher.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7441"
}
],
"symlink_target": ""
}
|
from numpy import *
import math
def norm_pdf_multivariate(x, mu, sigma):
size = len(x)
if size == len(mu) and (size, size) == sigma.shape:
det = linalg.det(sigma)
if det == 0:
raise NameError("The covariance matrix can't be singular")
norm_const = 1.0/ ( math.pow((2*pi),float(size)/2) * math.pow(det,1.0/2) )
x_mu = matrix(x - mu)
inv = sigma.I
result = math.pow(math.e, -0.5 * (x_mu * inv * x_mu.T))
return norm_const * result
else:
raise NameError("The dimensions of the input don't match")
|
{
"content_hash": "87a40a9243e8d519ad1353e1c7f1a9ad",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 78,
"avg_line_length": 31.61111111111111,
"alnum_prop": 0.5817223198594025,
"repo_name": "sbxzy/pygks",
"id": "704be80bed0d439b944f6f7b26ed8d3ad4177e14",
"size": "619",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pygks/__gaussian_custom.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "527342"
}
],
"symlink_target": ""
}
|
'''
Created on 2016年2月9日
@author: Darren
'''
'''
Dual Palindromes
Mario Cruz (Colombia) & Hugo Rickeboer (Argentina)
A number that reads the same from right to left as when read from left to right is called a palindrome. The number 12321 is a palindrome; the number 77778 is not. Of course, palindromes have neither leading nor trailing zeroes, so 0220 is not a palindrome.
The number 21 (base 10) is not palindrome in base 10, but the number 21 (base 10) is, in fact, a palindrome in base 2 (10101).
Write a program that reads two numbers (expressed in base 10):
N (1 <= N <= 15)
S (0 < S < 10000)
and then finds and prints (in base 10) the first N numbers strictly greater than S that are palindromic when written in two or more number bases (2 <= base <= 10).
Solutions to this problem do not require manipulating integers larger than the standard 32 bits.
PROGRAM NAME: dualpal
INPUT FORMAT
A single line with space separated integers N and S.
SAMPLE INPUT (file dualpal.in)
3 25
OUTPUT FORMAT
N lines, each with a base 10 number that is palindromic when expressed in at least two of the bases 2..10. The numbers should be listed in order from smallest to largest.
SAMPLE OUTPUT (file dualpal.out)
26
27
28
'''
def convert(num,base):
res=""
while num>0:
temp=num%base
if temp>9:
res=chr(ord("A")-10+temp)+res
else:
res=str(temp)+res
num//=base
return res
def dualpal(N,S):
res=[]
while len(res)<N:
S+=1
count=0
for base in range(2,11):
cand=convert(S, base)
if cand==cand[::-1]:
count+=1
if count>=2:
res.append(S)
break
print(res)
dualpal(15, 9900)
|
{
"content_hash": "7990892cf9e61738e9afab94385dadcc",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 256,
"avg_line_length": 27.904761904761905,
"alnum_prop": 0.6535836177474402,
"repo_name": "darrencheng0817/AlgorithmLearning",
"id": "8e06a512aea127a22bc6699db2acc0fea33aaa39",
"size": "1764",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "USACO/section1/dualpal/dualpal.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "2663"
},
{
"name": "Java",
"bytes": "89490"
},
{
"name": "Python",
"bytes": "600854"
}
],
"symlink_target": ""
}
|
from fanstatic import DEBUG, MINIFIED, compat
BOOL_CONFIG = set(['versioning', 'recompute_hashes', DEBUG, MINIFIED,
'bottom', 'force_bottom', 'bundle', 'rollup',
'versioning_use_md5', 'compile'])
# From paste.util.converters.
def asbool(obj):
if isinstance(obj, compat.basestring):
obj = obj.strip().lower()
if obj in ['true', 'yes', 'on', 'y', 't', '1']:
return True
elif obj in ['false', 'no', 'off', 'n', 'f', '0']:
return False
else:
raise ValueError(
"String is not true/false: %r" % obj)
return bool(obj)
def convert_config(config):
result = {}
for key, value in compat.iteritems(config):
if key in BOOL_CONFIG:
result[key] = asbool(value)
else:
result[key] = value
return result
|
{
"content_hash": "98df765a2efbf073f099237b2bd183a7",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 69,
"avg_line_length": 30.103448275862068,
"alnum_prop": 0.5383734249713631,
"repo_name": "fanstatic/fanstatic",
"id": "038b9236a91aacd3bfb0be07898042be0350593a",
"size": "873",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "fanstatic/config.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "48"
},
{
"name": "HTML",
"bytes": "61"
},
{
"name": "Python",
"bytes": "388150"
}
],
"symlink_target": ""
}
|
from distutils.core import setup, Extension
wrapper = Extension(
'_hom4ps',
['hom4ps_wrap.cxx'],
libraries=['hom4ps']
)
setup (
name='Hom4PSpy',
version='1.0',
description='Python binding for Hom4PS-3, a numerical nonlinear system solver',
author='Tianran Chen',
author_email='chentia1@msu.edu',
url='http://www.hom4ps3.org',
py_modules=['hom4ps', 'hom4pspy'],
ext_modules=[wrapper],
)
|
{
"content_hash": "72fda7cc6676eadb7b427bdbcc56c3f5",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 83,
"avg_line_length": 25.705882352941178,
"alnum_prop": 0.6407322654462243,
"repo_name": "chentianran/h3-bindings",
"id": "e7569bbec8f86a0de8bfceaad3b54b90c0c37340",
"size": "460",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7207"
},
{
"name": "Shell",
"bytes": "36"
}
],
"symlink_target": ""
}
|
"""Package related functionality."""
from __future__ import print_function
import sys
from chromite.api import faux
from chromite.api import validate
from chromite.api.controller import controller_util
from chromite.api.gen.chromite.api import binhost_pb2
from chromite.api.gen.chromiumos import common_pb2
from chromite.lib import constants
from chromite.lib import cros_build_lib
from chromite.lib import cros_logging as logging
from chromite.lib import portage_util
from chromite.lib.uprev_lib import GitRef
from chromite.service import packages
assert sys.version_info >= (3, 6), 'This module requires Python 3.6+'
_OVERLAY_TYPE_TO_NAME = {
binhost_pb2.OVERLAYTYPE_PUBLIC: constants.PUBLIC_OVERLAYS,
binhost_pb2.OVERLAYTYPE_PRIVATE: constants.PRIVATE_OVERLAYS,
binhost_pb2.OVERLAYTYPE_BOTH: constants.BOTH_OVERLAYS,
}
def _UprevResponse(_input_proto, output_proto, _config):
"""Add fake paths to a successful uprev response."""
output_proto.modified_ebuilds.add().path = '/fake/path1'
output_proto.modified_ebuilds.add().path = '/fake/path2'
@faux.success(_UprevResponse)
@faux.empty_error
@validate.require('overlay_type')
@validate.is_in('overlay_type', _OVERLAY_TYPE_TO_NAME)
@validate.validation_complete
def Uprev(input_proto, output_proto, _config):
"""Uprev all cros workon ebuilds that have changes."""
build_targets = controller_util.ParseBuildTargets(input_proto.build_targets)
overlay_type = _OVERLAY_TYPE_TO_NAME[input_proto.overlay_type]
chroot = controller_util.ParseChroot(input_proto.chroot)
output_dir = input_proto.output_dir or None
try:
uprevved = packages.uprev_build_targets(build_targets, overlay_type, chroot,
output_dir)
except packages.Error as e:
# Handle module errors nicely, let everything else bubble up.
cros_build_lib.Die(e)
for path in uprevved:
output_proto.modified_ebuilds.add().path = path
def _UprevVersionedPackageResponse(_input_proto, output_proto, _config):
"""Add fake paths to a successful uprev versioned package response."""
uprev_response = output_proto.responses.add()
uprev_response.modified_ebuilds.add().path = '/uprev/response/path'
@faux.success(_UprevVersionedPackageResponse)
@faux.empty_error
@validate.require('versions')
@validate.require('package_info.package_name', 'package_info.category')
@validate.validation_complete
def UprevVersionedPackage(input_proto, output_proto, _config):
"""Uprev a versioned package.
See go/pupr-generator for details about this endpoint.
"""
chroot = controller_util.ParseChroot(input_proto.chroot)
build_targets = controller_util.ParseBuildTargets(input_proto.build_targets)
package = controller_util.PackageInfoToCPV(input_proto.package_info)
refs = []
for ref in input_proto.versions:
refs.append(GitRef(path=ref.repository, ref=ref.ref, revision=ref.revision))
try:
result = packages.uprev_versioned_package(package, build_targets, refs,
chroot)
except packages.Error as e:
# Handle module errors nicely, let everything else bubble up.
cros_build_lib.Die(e)
if not result.uprevved:
# No uprevs executed, skip the output population.
return
for modified in result.modified:
uprev_response = output_proto.responses.add()
uprev_response.version = modified.new_version
for path in modified.files:
uprev_response.modified_ebuilds.add().path = path
def _GetBestVisibleResponse(_input_proto, output_proto, _config):
"""Add fake paths to a successful GetBestVisible response."""
package_info = common_pb2.PackageInfo(
category='category',
package_name='name',
version='1.01',
)
output_proto.package_info.CopyFrom(package_info)
@faux.success(_GetBestVisibleResponse)
@faux.empty_error
@validate.require('atom')
@validate.validation_complete
def GetBestVisible(input_proto, output_proto, _config):
"""Returns the best visible PackageInfo for the indicated atom."""
build_target = None
if input_proto.build_target.name:
build_target = controller_util.ParseBuildTarget(input_proto.build_target)
cpv = packages.get_best_visible(input_proto.atom, build_target=build_target)
package_info = common_pb2.PackageInfo()
controller_util.CPVToPackageInfo(cpv, package_info)
output_proto.package_info.CopyFrom(package_info)
def _ChromeVersionResponse(_input_proto, output_proto, _config):
"""Add a fake chrome version to a successful response."""
output_proto.version = '78.0.3900.0'
@faux.success(_ChromeVersionResponse)
@faux.empty_error
@validate.require('build_target.name')
@validate.validation_complete
def GetChromeVersion(input_proto, output_proto, _config):
"""Returns the chrome version."""
build_target = controller_util.ParseBuildTarget(input_proto.build_target)
chrome_version = packages.determine_chrome_version(build_target)
if chrome_version:
output_proto.version = chrome_version
def _GetTargetVersionsResponse(_input_proto, output_proto, _config):
"""Add fake target version fields to a successful response."""
output_proto.android_version = '5812377'
output_proto.android_branch_version = 'git_nyc-mr1-arc'
output_proto.android_target_version = 'cheets'
output_proto.chrome_version = '78.0.3900.0'
output_proto.platform_version = '12438.0.0'
output_proto.milestone_version = '78'
output_proto.full_version = 'R78-12438.0.0'
@faux.success(_GetTargetVersionsResponse)
@faux.empty_error
@validate.require('build_target.name')
@validate.validation_complete
def GetTargetVersions(input_proto, output_proto, _config):
"""Returns the target versions."""
build_target = controller_util.ParseBuildTarget(input_proto.build_target)
# Android version.
android_version = packages.determine_android_version([build_target.name])
logging.info('Found android version: %s', android_version)
if android_version:
output_proto.android_version = android_version
# Android branch version.
android_branch_version = packages.determine_android_branch(build_target.name)
logging.info('Found android branch version: %s', android_branch_version)
if android_branch_version:
output_proto.android_branch_version = android_branch_version
# Android target version.
android_target_version = packages.determine_android_target(build_target.name)
logging.info('Found android target version: %s', android_target_version)
if android_target_version:
output_proto.android_target_version = android_target_version
# TODO(crbug/1019770): Investigate cases where builds_chrome is true but
# chrome_version is None.
builds_chrome = packages.builds(constants.CHROME_CP, build_target)
if builds_chrome:
# Chrome version fetch.
chrome_version = packages.determine_chrome_version(build_target)
logging.info('Found chrome version: %s', chrome_version)
if chrome_version:
output_proto.chrome_version = chrome_version
# The ChromeOS version info.
output_proto.platform_version = packages.determine_platform_version()
output_proto.milestone_version = packages.determine_milestone_version()
output_proto.full_version = packages.determine_full_version()
def _GetBuilderMetadataResponse(input_proto, output_proto, _config):
"""Add fake metadata fields to a successful response."""
# Populate only a few fields to validate faux testing.
build_target_metadata = output_proto.build_target_metadata.add()
build_target_metadata.build_target = input_proto.build_target.name
build_target_metadata.android_container_branch = 'git_pi-arc'
model_metadata = output_proto.model_metadata.add()
model_metadata.model_name = 'astronaut'
model_metadata.ec_firmware_version = 'coral_v1.1.1234-56789f'
@faux.success(_GetBuilderMetadataResponse)
@faux.empty_error
@validate.require('build_target.name')
@validate.validation_complete
def GetBuilderMetadata(input_proto, output_proto, _config):
"""Returns the target builder metadata."""
build_target = controller_util.ParseBuildTarget(input_proto.build_target)
build_target_metadata = output_proto.build_target_metadata.add()
build_target_metadata.build_target = build_target.name
# Android version.
android_version = packages.determine_android_version([build_target.name])
logging.info('Found android version: %s', android_version)
if android_version:
build_target_metadata.android_container_version = android_version
# Android branch version.
android_branch_version = packages.determine_android_branch(build_target.name)
logging.info('Found android branch version: %s', android_branch_version)
if android_branch_version:
build_target_metadata.android_container_branch = android_branch_version
# Android target version.
android_target_version = packages.determine_android_target(build_target.name)
logging.info('Found android target version: %s', android_target_version)
if android_target_version:
build_target_metadata.android_container_target = android_target_version
build_target_metadata.arc_use_set = 'arc' in portage_util.GetBoardUseFlags(
build_target.name)
# TODO(crbug/1071620): Add service layer calls to fill out the rest of
# build_target_metadata and model_metadata.
fw_versions = packages.determine_firmware_versions(build_target)
build_target_metadata.main_firmware_version = fw_versions.main_fw_version
build_target_metadata.ec_firmware_version = fw_versions.ec_fw_version
def _HasPrebuiltSuccess(_input_proto, output_proto, _config):
"""The mock success case for HasChromePrebuilt."""
output_proto.has_prebuilt = True
@faux.success(_HasPrebuiltSuccess)
@faux.empty_error
@validate.require('build_target.name')
@validate.validation_complete
def HasChromePrebuilt(input_proto, output_proto, _config):
"""Checks if the most recent version of Chrome has a prebuilt."""
build_target = controller_util.ParseBuildTarget(input_proto.build_target)
useflags = 'chrome_internal' if input_proto.chrome else None
exists = packages.has_prebuilt(constants.CHROME_CP, build_target=build_target,
useflags=useflags)
output_proto.has_prebuilt = exists
@faux.success(_HasPrebuiltSuccess)
@faux.empty_error
@validate.require('build_target.name', 'package_info.category',
'package_info.package_name')
@validate.validation_complete
def HasPrebuilt(input_proto, output_proto, _config):
"""Checks if the most recent version of Chrome has a prebuilt."""
build_target = controller_util.ParseBuildTarget(input_proto.build_target)
package = controller_util.PackageInfoToCPV(input_proto.package_info).cp
useflags = 'chrome_internal' if input_proto.chrome else None
exists = packages.has_prebuilt(
package, build_target=build_target, useflags=useflags)
output_proto.has_prebuilt = exists
def _BuildsChromeSuccess(_input_proto, output_proto, _config):
"""Mock success case for BuildsChrome."""
output_proto.builds_chrome = True
@faux.success(_BuildsChromeSuccess)
@faux.empty_error
@validate.require('build_target.name')
@validate.validation_complete
def BuildsChrome(input_proto, output_proto, _config):
"""Check if the board builds chrome."""
build_target = controller_util.ParseBuildTarget(input_proto.build_target)
cpvs = [controller_util.PackageInfoToCPV(pi) for pi in input_proto.packages]
builds_chrome = packages.builds(constants.CHROME_CP, build_target, cpvs)
output_proto.builds_chrome = builds_chrome
|
{
"content_hash": "c0126f5645615f3a0b083e846e5e3e09",
"timestamp": "",
"source": "github",
"line_count": 286,
"max_line_length": 80,
"avg_line_length": 40.05944055944056,
"alnum_prop": 0.7536877018416689,
"repo_name": "endlessm/chromium-browser",
"id": "a788672fa89eb9f81a90accfa89eec114269350d",
"size": "11647",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "third_party/chromite/api/controller/packages.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
import os
from migrate.versioning import api as versioning_api
from oslo_config import cfg
from oslo_log import log as logging
from designate.manage import base
from designate.sqlalchemy import utils
LOG = logging.getLogger(__name__)
REPOSITORY = os.path.abspath(os.path.join(os.path.dirname(__file__), '..',
'storage', 'impl_sqlalchemy',
'migrate_repo'))
cfg.CONF.import_opt('connection', 'designate.storage.impl_sqlalchemy',
group='storage:sqlalchemy')
CONF = cfg.CONF
INIT_VERSION = 37
def get_manager():
return utils.get_migration_manager(
REPOSITORY, CONF['storage:sqlalchemy'].connection, INIT_VERSION)
class DatabaseCommands(base.Commands):
def version(self):
current = get_manager().version()
latest = versioning_api.version(repository=REPOSITORY).value
print("Current: %s Latest: %s" % (current, latest))
def sync(self):
get_manager().upgrade(None)
@base.args('revision', nargs='?')
def upgrade(self, revision):
get_manager().upgrade(revision)
@base.args('revision', nargs='?')
def downgrade(self, revision):
get_manager().downgrade(revision)
|
{
"content_hash": "26b21cc735e176ef40454964e02e8db1",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 74,
"avg_line_length": 29.13953488372093,
"alnum_prop": 0.636073423782921,
"repo_name": "kiall/designate-py3",
"id": "a8da400de26215f644a82ebfc4f7c970bf75f11c",
"size": "1879",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "designate/manage/database.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "9136"
},
{
"name": "JavaScript",
"bytes": "1378"
},
{
"name": "Python",
"bytes": "1977010"
},
{
"name": "Ruby",
"bytes": "4238"
},
{
"name": "Shell",
"bytes": "13056"
}
],
"symlink_target": ""
}
|
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from modular_build import read_file, write_file
import os
import os.path as path
import generate_injected_script_externs
import generate_protocol_externs
import modular_build
import re
import shutil
import subprocess
import sys
import tempfile
try:
import simplejson as json
except ImportError:
import json
if len(sys.argv) == 2 and sys.argv[1] == '--help':
print("Usage: %s [module_names]" % path.basename(sys.argv[0]))
print(" module_names list of modules for which the Closure compilation should run.")
print(" If absent, the entire frontend will be compiled.")
sys.exit(0)
is_cygwin = sys.platform == 'cygwin'
def run_in_shell(command_line):
return subprocess.Popen(command_line, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True)
def to_platform_path(filepath):
if not is_cygwin:
return filepath
return re.sub(r'^/cygdrive/(\w)', '\\1:', filepath)
def to_platform_path_exact(filepath):
if not is_cygwin:
return filepath
output, _ = run_in_shell('cygpath -w %s' % filepath).communicate()
# pylint: disable=E1103
return output.strip().replace('\\', '\\\\')
scripts_path = path.dirname(path.abspath(__file__))
devtools_path = path.dirname(scripts_path)
inspector_path = path.join(path.dirname(devtools_path), 'core', 'inspector')
devtools_frontend_path = path.join(devtools_path, 'front_end')
patched_es6_externs_file = to_platform_path(path.join(devtools_frontend_path, 'es6.js'))
global_externs_file = to_platform_path(path.join(devtools_frontend_path, 'externs.js'))
protocol_externs_file = path.join(devtools_frontend_path, 'protocol_externs.js')
webgl_rendering_context_idl_path = path.join(path.dirname(devtools_path), 'core', 'html', 'canvas', 'WebGLRenderingContextBase.idl')
injected_script_source_name = path.join(inspector_path, 'InjectedScriptSource.js')
canvas_injected_script_source_name = path.join(inspector_path, 'InjectedScriptCanvasModuleSource.js')
injected_script_externs_idl_names = [
path.join(inspector_path, 'InjectedScriptHost.idl'),
path.join(inspector_path, 'JavaScriptCallFrame.idl'),
]
jsmodule_name_prefix = 'jsmodule_'
runtime_module_name = '_runtime'
type_checked_jsdoc_tags_list = ['param', 'return', 'type', 'enum']
type_checked_jsdoc_tags_or = '|'.join(type_checked_jsdoc_tags_list)
# Basic regex for invalid JsDoc types: an object type name ([A-Z][A-Za-z0-9.]+[A-Za-z0-9]) not preceded by '!', '?', ':' (this, new), or '.' (object property).
invalid_type_regex = re.compile(r'@(?:' + type_checked_jsdoc_tags_or + r')\s*\{.*(?<![!?:.A-Za-z0-9])([A-Z][A-Za-z0-9.]+[A-Za-z0-9])[^/]*\}')
invalid_type_designator_regex = re.compile(r'@(?:' + type_checked_jsdoc_tags_or + r')\s*.*(?<![{: ])([?!])=?\}')
invalid_non_object_type_regex = re.compile(r'@(?:' + type_checked_jsdoc_tags_or + r')\s*\{.*(![a-z]+)[^/]*\}')
error_warning_regex = re.compile(r'WARNING|ERROR')
loaded_css_regex = re.compile(r'(?:registerRequiredCSS|WebInspector\.View\.createStyleElement)\s*\(\s*"(.+)"\s*\)')
java_build_regex = re.compile(r'^\w+ version "(\d+)\.(\d+)')
errors_found = False
generate_protocol_externs.generate_protocol_externs(protocol_externs_file, path.join(devtools_path, 'protocol.json'))
# Based on http://stackoverflow.com/questions/377017/test-if-executable-exists-in-python.
def which(program):
def is_exe(fpath):
return path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, fname = path.split(program)
if fpath:
if is_exe(program):
return program
else:
for part in os.environ["PATH"].split(os.pathsep):
part = part.strip('"')
exe_file = path.join(part, program)
if is_exe(exe_file):
return exe_file
return None
def log_error(message):
print 'ERROR: ' + message
def error_excepthook(exctype, value, traceback):
print 'ERROR:'
sys.__excepthook__(exctype, value, traceback)
sys.excepthook = error_excepthook
application_descriptors = ['devtools.json', 'inspector.json', 'toolbox.json']
loader = modular_build.DescriptorLoader(devtools_frontend_path)
descriptors = loader.load_applications(application_descriptors)
modules_by_name = descriptors.modules
def hasErrors(output):
return re.search(error_warning_regex, output) != None
def verify_jsdoc_extra(additional_files):
files = [to_platform_path(file) for file in descriptors.all_compiled_files() + additional_files]
file_list = tempfile.NamedTemporaryFile(mode='wt', delete=False)
try:
file_list.write('\n'.join(files))
finally:
file_list.close()
return run_in_shell('%s -jar %s --files-list-name %s' % (java_exec, jsdoc_validator_jar, to_platform_path_exact(file_list.name))), file_list
def verify_jsdoc(additional_files):
def file_list():
return descriptors.all_compiled_files() + additional_files
errors_found = False
for full_file_name in file_list():
lineIndex = 0
with open(full_file_name, 'r') as sourceFile:
for line in sourceFile:
line = line.rstrip()
lineIndex += 1
if not line:
continue
if verify_jsdoc_line(full_file_name, lineIndex, line):
errors_found = True
return errors_found
def verify_jsdoc_line(fileName, lineIndex, line):
def print_error(message, errorPosition):
print '%s:%s: ERROR - %s%s%s%s%s%s' % (fileName, lineIndex, message, os.linesep, line, os.linesep, ' ' * errorPosition + '^', os.linesep)
known_css = {}
errors_found = False
match = re.search(invalid_type_regex, line)
if match:
print_error('Type "%s" nullability not marked explicitly with "?" (nullable) or "!" (non-nullable)' % match.group(1), match.start(1))
errors_found = True
match = re.search(invalid_non_object_type_regex, line)
if match:
print_error('Non-object type explicitly marked with "!" (non-nullable), which is the default and should be omitted', match.start(1))
errors_found = True
match = re.search(invalid_type_designator_regex, line)
if match:
print_error('Type nullability indicator misplaced, should precede type', match.start(1))
errors_found = True
match = re.search(loaded_css_regex, line)
if match:
file = path.join(devtools_frontend_path, match.group(1))
exists = known_css.get(file)
if exists is None:
exists = path.isfile(file)
known_css[file] = exists
if not exists:
print_error('Dynamically loaded CSS stylesheet is missing in the source tree', match.start(1))
errors_found = True
return errors_found
def find_java():
required_major = 1
required_minor = 7
exec_command = None
has_server_jvm = True
java_path = which('java')
if not java_path:
java_path = which('java.exe')
if not java_path:
print 'NOTE: No Java executable found in $PATH.'
sys.exit(1)
is_ok = False
java_version_out, _ = run_in_shell('%s -version' % java_path).communicate()
# pylint: disable=E1103
match = re.search(java_build_regex, java_version_out)
if match:
major = int(match.group(1))
minor = int(match.group(2))
is_ok = major >= required_major and minor >= required_minor
if is_ok:
exec_command = '%s -Xms1024m -server -XX:+TieredCompilation' % java_path
check_server_proc = run_in_shell('%s -version' % exec_command)
check_server_proc.communicate()
if check_server_proc.returncode != 0:
# Not all Java installs have server JVMs.
exec_command = exec_command.replace('-server ', '')
has_server_jvm = False
if not is_ok:
print 'NOTE: Java executable version %d.%d or above not found in $PATH.' % (required_major, required_minor)
sys.exit(1)
print 'Java executable: %s%s' % (java_path, '' if has_server_jvm else ' (no server JVM)')
return exec_command
java_exec = find_java()
closure_compiler_jar = to_platform_path(path.join(scripts_path, 'closure', 'compiler.jar'))
closure_runner_jar = to_platform_path(path.join(scripts_path, 'compiler-runner', 'closure-runner.jar'))
jsdoc_validator_jar = to_platform_path(path.join(scripts_path, 'jsdoc-validator', 'jsdoc-validator.jar'))
modules_dir = tempfile.mkdtemp()
common_closure_args = ' --summary_detail_level 3 --jscomp_error visibility --compilation_level SIMPLE_OPTIMIZATIONS --warning_level VERBOSE --language_in=ES6_STRICT --language_out=ES5_STRICT --accept_const_keyword --extra_annotation_name suppressReceiverCheck --extra_annotation_name suppressGlobalPropertiesCheck --module_output_path_prefix %s' % to_platform_path_exact(modules_dir + path.sep)
worker_modules_by_name = {}
dependents_by_module_name = {}
for module_name in descriptors.application:
module = descriptors.modules[module_name]
if descriptors.application[module_name].get('type', None) == 'worker':
worker_modules_by_name[module_name] = module
for dep in module.get('dependencies', []):
list = dependents_by_module_name.get(dep)
if not list:
list = []
dependents_by_module_name[dep] = list
list.append(module_name)
def check_conditional_dependencies():
for name in modules_by_name:
for dep_name in modules_by_name[name].get('dependencies', []):
dependency = modules_by_name[dep_name]
if dependency.get('experiment') or dependency.get('condition'):
log_error('Module "%s" may not depend on the conditional module "%s"' % (name, dep_name))
errors_found = True
check_conditional_dependencies()
def verify_worker_modules():
for name in modules_by_name:
for dependency in modules_by_name[name].get('dependencies', []):
if dependency in worker_modules_by_name:
log_error('Module "%s" may not depend on the worker module "%s"' % (name, dependency))
errors_found = True
verify_worker_modules()
def check_duplicate_files():
def check_module(module, seen_files, seen_modules):
name = module['name']
seen_modules[name] = True
for dep_name in module.get('dependencies', []):
if not dep_name in seen_modules:
check_module(modules_by_name[dep_name], seen_files, seen_modules)
for source in module.get('scripts', []):
referencing_module = seen_files.get(source)
if referencing_module:
log_error('Duplicate use of %s in "%s" (previously seen in "%s")' % (source, name, referencing_module))
seen_files[source] = name
for module_name in worker_modules_by_name:
check_module(worker_modules_by_name[module_name], {}, {})
print 'Checking duplicate files across modules...'
check_duplicate_files()
def module_arg(module_name):
return ' --module ' + jsmodule_name_prefix + module_name
def modules_to_check():
if len(sys.argv) == 1:
return descriptors.sorted_modules()
print 'Compiling only these modules: %s' % sys.argv[1:]
return [module for module in descriptors.sorted_modules() if module in set(sys.argv[1:])]
def dump_module(name, recursively, processed_modules):
if name in processed_modules:
return ''
processed_modules[name] = True
module = modules_by_name[name]
skipped_scripts = set(module.get('skip_compilation', []))
command = ''
dependencies = module.get('dependencies', [])
if recursively:
for dependency in dependencies:
command += dump_module(dependency, recursively, processed_modules)
command += module_arg(name) + ':'
filtered_scripts = descriptors.module_compiled_files(name)
command += str(len(filtered_scripts))
firstDependency = True
for dependency in dependencies + [runtime_module_name]:
if firstDependency:
command += ':'
else:
command += ','
firstDependency = False
command += jsmodule_name_prefix + dependency
for script in filtered_scripts:
command += ' --js ' + to_platform_path(path.join(devtools_frontend_path, name, script))
return command
print 'Compiling frontend...'
compiler_args_file = tempfile.NamedTemporaryFile(mode='wt', delete=False)
try:
platform_protocol_externs_file = to_platform_path(protocol_externs_file)
runtime_js_path = to_platform_path(path.join(devtools_frontend_path, 'Runtime.js'))
checked_modules = modules_to_check()
for name in checked_modules:
closure_args = common_closure_args
closure_args += ' --externs ' + to_platform_path(patched_es6_externs_file)
closure_args += ' --externs ' + to_platform_path(global_externs_file)
closure_args += ' --externs ' + platform_protocol_externs_file
runtime_module = module_arg(runtime_module_name) + ':1 --js ' + runtime_js_path
closure_args += runtime_module + dump_module(name, True, {})
compiler_args_file.write('%s %s%s' % (name, closure_args, os.linesep))
finally:
compiler_args_file.close()
closure_runner_command = '%s -jar %s --compiler-args-file %s' % (java_exec, closure_runner_jar, to_platform_path_exact(compiler_args_file.name))
modular_compiler_proc = run_in_shell(closure_runner_command)
def unclosure_injected_script(sourceFileName, outFileName):
source = read_file(sourceFileName)
def replace_function(matchobj):
return re.sub(r'@param', 'param', matchobj.group(1) or '') + '\n//' + matchobj.group(2)
# Comment out the closure function and its jsdocs
source = re.sub(r'(/\*\*(?:[\s\n]*\*\s*@param[^\n]+\n)+\s*\*/\s*)?\n(\(function)', replace_function, source, count=1)
# Comment out its return statement
source = re.sub(r'\n(\s*return\s+[^;]+;\s*\n\}\)\s*)$', '\n/*\\1*/', source)
# Replace the "var Object" override with a "self.Object" one
source = re.sub(r'\nvar Object =', '\nself.Object =', source, count=1)
write_file(outFileName, source)
injectedScriptSourceTmpFile = to_platform_path(path.join(inspector_path, 'InjectedScriptSourceTmp.js'))
injectedScriptCanvasModuleSourceTmpFile = path.join(inspector_path, 'InjectedScriptCanvasModuleSourceTmp.js')
unclosure_injected_script(injected_script_source_name, injectedScriptSourceTmpFile)
unclosure_injected_script(canvas_injected_script_source_name, injectedScriptCanvasModuleSourceTmpFile)
print 'Compiling InjectedScriptSource.js and InjectedScriptCanvasModuleSource.js...'
injected_script_externs_file = tempfile.NamedTemporaryFile(mode='wt', delete=False)
try:
generate_injected_script_externs.generate_injected_script_externs(injected_script_externs_idl_names, injected_script_externs_file)
finally:
injected_script_externs_file.close()
spawned_compiler_command = '%s -jar %s %s' % (java_exec, closure_compiler_jar, common_closure_args)
command = spawned_compiler_command
command += ' --externs ' + to_platform_path_exact(injected_script_externs_file.name)
command += ' --externs ' + to_platform_path(protocol_externs_file)
command += ' --module ' + jsmodule_name_prefix + 'injected_script' + ':1'
command += ' --js ' + to_platform_path(injectedScriptSourceTmpFile)
command += ' --module ' + jsmodule_name_prefix + 'injected_canvas_script' + ':1:' + jsmodule_name_prefix + 'injected_script'
command += ' --js ' + to_platform_path(injectedScriptCanvasModuleSourceTmpFile)
injectedScriptCompileProc = run_in_shell(command)
print 'Verifying JSDoc comments...'
additional_jsdoc_check_files = [injectedScriptSourceTmpFile, injectedScriptCanvasModuleSourceTmpFile]
errors_found |= verify_jsdoc(additional_jsdoc_check_files)
jsdocValidatorProc, jsdocValidatorFileList = verify_jsdoc_extra(additional_jsdoc_check_files)
print 'Checking generated code in InjectedScriptCanvasModuleSource.js...'
webgl_check_script_path = path.join(scripts_path, "check_injected_webgl_calls_info.py")
check_injected_webgl_calls_command = '%s %s %s' % (webgl_check_script_path, webgl_rendering_context_idl_path, canvas_injected_script_source_name)
canvasModuleCompileProc = run_in_shell(check_injected_webgl_calls_command)
print 'Validating InjectedScriptSource.js...'
injectedscript_check_script_path = path.join(scripts_path, "check_injected_script_source.py")
check_injected_script_command = '%s %s' % (injectedscript_check_script_path, injected_script_source_name)
validateInjectedScriptProc = run_in_shell(check_injected_script_command)
print
(jsdocValidatorOut, _) = jsdocValidatorProc.communicate()
if jsdocValidatorOut:
print ('JSDoc validator output:%s%s' % (os.linesep, jsdocValidatorOut))
errors_found = True
os.remove(jsdocValidatorFileList.name)
(moduleCompileOut, _) = modular_compiler_proc.communicate()
print 'Modular compilation output:'
start_module_regex = re.compile(r'^@@ START_MODULE:(.+) @@$')
end_module_regex = re.compile(r'^@@ END_MODULE @@$')
in_module = False
skipped_modules = {}
error_count = 0
def skip_dependents(module_name):
for skipped_module in dependents_by_module_name.get(module_name, []):
skipped_modules[skipped_module] = True
has_module_output = False
# pylint: disable=E1103
for line in moduleCompileOut.splitlines():
if not in_module:
match = re.search(start_module_regex, line)
if not match:
continue
in_module = True
has_module_output = True
module_error_count = 0
module_output = []
module_name = match.group(1)
skip_module = skipped_modules.get(module_name)
if skip_module:
skip_dependents(module_name)
else:
match = re.search(end_module_regex, line)
if not match:
if not skip_module:
module_output.append(line)
if hasErrors(line):
error_count += 1
module_error_count += 1
skip_dependents(module_name)
continue
in_module = False
if skip_module:
print 'Skipping module %s...' % module_name
elif not module_error_count:
print 'Module %s compiled successfully: %s' % (module_name, module_output[0])
else:
print 'Module %s compile failed: %s errors%s' % (module_name, module_error_count, os.linesep)
print os.linesep.join(module_output)
if not has_module_output:
print moduleCompileOut
if error_count:
print 'Total Closure errors: %d%s' % (error_count, os.linesep)
errors_found = True
(injectedScriptCompileOut, _) = injectedScriptCompileProc.communicate()
print 'InjectedScriptSource.js and InjectedScriptCanvasModuleSource.js compilation output:%s' % os.linesep, injectedScriptCompileOut
errors_found |= hasErrors(injectedScriptCompileOut)
(canvasModuleCompileOut, _) = canvasModuleCompileProc.communicate()
print 'InjectedScriptCanvasModuleSource.js generated code check output:%s' % os.linesep, canvasModuleCompileOut
errors_found |= hasErrors(canvasModuleCompileOut)
(validateInjectedScriptOut, _) = validateInjectedScriptProc.communicate()
print 'Validate InjectedScriptSource.js output:%s' % os.linesep, (validateInjectedScriptOut if validateInjectedScriptOut else '<empty>')
errors_found |= hasErrors(validateInjectedScriptOut)
if errors_found:
print 'ERRORS DETECTED'
os.remove(injectedScriptSourceTmpFile)
os.remove(injectedScriptCanvasModuleSourceTmpFile)
os.remove(compiler_args_file.name)
os.remove(injected_script_externs_file.name)
os.remove(protocol_externs_file)
shutil.rmtree(modules_dir, True)
|
{
"content_hash": "651d6d4e7de66c11aeeaab9a5f738ecd",
"timestamp": "",
"source": "github",
"line_count": 512,
"max_line_length": 394,
"avg_line_length": 41.458984375,
"alnum_prop": 0.6844584726998634,
"repo_name": "kurli/blink-crosswalk",
"id": "50002477d373cffe52a1f44728a7969247297b4f",
"size": "21227",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Source/devtools/scripts/compile_frontend.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "ApacheConf",
"bytes": "1835"
},
{
"name": "Assembly",
"bytes": "14768"
},
{
"name": "Batchfile",
"bytes": "35"
},
{
"name": "Bison",
"bytes": "64588"
},
{
"name": "C",
"bytes": "124323"
},
{
"name": "C++",
"bytes": "44371388"
},
{
"name": "CSS",
"bytes": "565212"
},
{
"name": "CoffeeScript",
"bytes": "163"
},
{
"name": "GLSL",
"bytes": "11578"
},
{
"name": "Groff",
"bytes": "28067"
},
{
"name": "HTML",
"bytes": "58015328"
},
{
"name": "Java",
"bytes": "109391"
},
{
"name": "JavaScript",
"bytes": "24469331"
},
{
"name": "Objective-C",
"bytes": "47687"
},
{
"name": "Objective-C++",
"bytes": "301733"
},
{
"name": "PHP",
"bytes": "184068"
},
{
"name": "Perl",
"bytes": "585293"
},
{
"name": "Python",
"bytes": "3817314"
},
{
"name": "Ruby",
"bytes": "141818"
},
{
"name": "Shell",
"bytes": "10037"
},
{
"name": "XSLT",
"bytes": "49926"
}
],
"symlink_target": ""
}
|
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
try:
# Numba is installed
import numba
except ImportError:
# Numba is run from its source checkout
sys.path.insert(0, os.path.abspath('../..'))
import numba
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
#'sphinx.ext.mathjax',
'sphinx.ext.autodoc',
#'sphinx.ext.graphviz',
'sphinxjp.themecore',
]
todo_include_todos = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['../_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Numba'
copyright = u'2012-2015, Continuum Analytics'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
version = '.'.join(numba.__version__.split('.')[:2])
# The full version, including alpha/beta/rc tags.
release = numba.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#html_theme = 'default'
# pip install sphinxjp.themes.basicstrap
html_theme = 'basicstrap'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['../_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Numbadoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'numba.tex', u'Numba Documentation',
u'Continuum Analytics', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'numba', 'Numba Documentation',
['Continuum Analytics'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Numba', 'Numba Documentation',
'Continuum Analytics', 'Numba', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Configuration for intersphinx: refer to the Python standard library
# and the Numpy documentation.
intersphinx_mapping = {
'python': ('https://docs.python.org/3', None),
'numpy': ('https://docs.scipy.org/doc/numpy', None),
'llvmlite': ('http://llvmlite.pydata.org/en/latest/', None),
}
# -- Custom autogeneration ------------------------------------------------
def _autogenerate():
from numba.scripts.generate_lower_listing import gen_lower_listing
basedir = os.path.dirname(__file__)
gen_lower_listing(os.path.join(basedir,
'developer/autogen_lower_listing.rst'))
_autogenerate()
|
{
"content_hash": "63214de2d930099ba3d7e7b826110bd8",
"timestamp": "",
"source": "github",
"line_count": 283,
"max_line_length": 79,
"avg_line_length": 31.091872791519435,
"alnum_prop": 0.6950789862484373,
"repo_name": "stefanseefeld/numba",
"id": "a6bcee0991088ad9f406a5a6afddefdac18fc92f",
"size": "9240",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs/source/conf.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "5535"
},
{
"name": "C",
"bytes": "303376"
},
{
"name": "C++",
"bytes": "17024"
},
{
"name": "Cuda",
"bytes": "214"
},
{
"name": "HTML",
"bytes": "98846"
},
{
"name": "Jupyter Notebook",
"bytes": "110325"
},
{
"name": "Python",
"bytes": "3946372"
},
{
"name": "Shell",
"bytes": "2414"
}
],
"symlink_target": ""
}
|
from streamlink.plugins.onetv import OneTV
from tests.plugins import PluginCanHandleUrl
class TestPluginCanHandleUrlOneTV(PluginCanHandleUrl):
__plugin__ = OneTV
should_match = [
"https://www.1tv.ru/live",
"http://www.1tv.ru/live",
"https://static.1tv.ru/eump/embeds/1tv_live_orbit-plus-4.html?muted=no",
"https://static.1tv.ru/eump/pages/1tv_live.html",
"https://static.1tv.ru/eump/pages/1tv_live_orbit-plus-4.html",
]
should_not_match = [
"http://www.1tv.ru/some-show/some-programme-2018-03-10",
"https://www.ctc.ru/online",
"http://www.ctc.ru/online",
"https://www.chetv.ru/online",
"http://www.chetv.ru/online",
"https://www.ctclove.ru/online",
"http://www.ctclove.ru/online",
"https://www.domashny.ru/online",
"http://www.domashny.ru/online",
]
|
{
"content_hash": "50ac7d7ddfebec6f1c5513510c27bfa3",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 80,
"avg_line_length": 34.07692307692308,
"alnum_prop": 0.6128668171557562,
"repo_name": "gravyboat/streamlink",
"id": "ac4efe4f4983a798653b4996da31abb3002729bf",
"size": "886",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "tests/plugins/test_onetv.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "1392475"
},
{
"name": "Shell",
"bytes": "6280"
}
],
"symlink_target": ""
}
|
import dns.rdtypes.dsbase
class DS(dns.rdtypes.dsbase.DSBase):
"""DS record"""
|
{
"content_hash": "51963c327b965d5a47ea263faee3998b",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 36,
"avg_line_length": 14.333333333333334,
"alnum_prop": 0.686046511627907,
"repo_name": "waynechu/PythonProject",
"id": "7d457b2281e3fa4a816885299c994457c23f6ba4",
"size": "950",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "dns/rdtypes/ANY/DS.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "575084"
}
],
"symlink_target": ""
}
|
from math import sqrt
from ..geometry import Vector
from .mesh import Material, MeshPart
Vertex = Vector
TexCoord = Vertex
Normal = Vertex
Color = Vertex
class FaceVertex:
"""Contains the information a vertex needs in a face
In contains the index of the vertex, the index of the texture coordinate
and the index of the normal. It is None if it is not available.
:param vertex: index of the vertex
:param tex_coord: index of the texture coordinate
:param normal: index of the normal
:param color: index of the color
"""
def __init__(self, vertex = None, tex_coord = None, normal = None, color = None):
"""Initializes a FaceVertex from its indices
"""
self.vertex = vertex
self.tex_coord = tex_coord
self.normal = normal
self.color = color
def from_array(self, arr):
"""Initializes a FaceVertex from an array
:param arr: can be an array of strings, the first value will be the
vertex index, the second will be the texture coordinate index, the
third will be the normal index, and the fourth will be the color index.
"""
self.vertex = int(arr[0]) if len(arr) > 0 else None
try:
self.tex_coord = int(arr[1]) if len(arr) > 1 else None
except:
self.tex_coord = None
try:
self.normal = int(arr[2]) if len(arr) > 2 else None
except:
self.normal = None
try:
self.color = int(arr[3]) if len(arr) > 3 else None
except:
self.color = None
return self
class Face:
"""Represents a face with 3 vertices
Faces with more than 3 vertices are not supported in this class. You should
split your face first and then create the number needed of instances of
this class.
"""
def __init__(self, a = None, b = None, c = None, material = None):
"""Initializes a Face with its three FaceVertex and its Material
:param a: first FaceVertex element
:param b: second FaceVertex element
:param c: third FaceVertex element
:param material: the material to use with this face
"""
self.a = a
self.b = b
self.c = c
self.material = material
# Expects array of array
def from_array(self, arr):
"""Initializes a Face with an array
:param arr: should be an array of array of objects. Each array will
represent a FaceVertex
"""
self.a = FaceVertex().from_array(arr[0])
self.b = FaceVertex().from_array(arr[1])
self.c = FaceVertex().from_array(arr[2])
return self
class ModelParser:
"""Represents a 3D model
"""
def __init__(self, up_conversion = None):
"""Initializes the model
:param up_conversion: couple of characters, can be y z or z y
"""
self.up_conversion = up_conversion
self.vertices = []
self.colors = []
self.normals = []
self.tex_coords = []
self.parts = []
self.materials = []
self.current_part = None
self.path = None
def init_textures(self):
"""Initializes the textures of the parts of the model
Basically, calls glGenTexture on each texture
"""
for part in self.parts:
part.init_texture()
def add_vertex(self, vertex):
"""Adds a vertex to the current model
Will also update its bounding box, and convert the up vector if
up_conversion was specified.
:param vertex: vertex to add to the model
"""
# Apply up_conversion to the vertex
new_vertex = vertex
if self.up_conversion is not None:
if self.up_conversion[0] == 'y' and self.up_conversion[1] == 'z':
new_vertex = Vector(vertex.y, vertex.z, vertex.x)
elif self.up_conversion[0] == 'z' and self.up_conversion[1] == 'y':
new_vertex = Vector(vertex.z, vertex.x, vertex.y)
self.vertices.append(new_vertex)
def add_tex_coord(self, tex_coord):
"""Adds a texture coordinate element to the current model
:param tex_coord: tex_coord to add to the model
"""
self.tex_coords.append(tex_coord)
def add_normal(self, normal):
"""Adds a normal element to the current model
:param normal: normal to add to the model
"""
self.normals.append(normal)
def add_color(self, color):
"""Adds a color element to the current model
:param color: color to add to the model
"""
self.colors.append(color)
def add_face(self, face):
"""Adds a face to the current model
If the face has a different material than the current material, it will
create a new mesh part and update the current material.
:param face: face to add to the model
"""
if self.current_part is None or (face.material != self.current_part.material and face.material is not None):
self.current_part = MeshPart(self)
self.current_part.material = face.material if face.material is not None else Material.DEFAULT_MATERIAL
self.parts.append(self.current_part)
self.current_part.add_face(face)
def parse_file(self, path, chunk_size = 512):
"""Sets the path of the model and parse bytes by chunk
:param path: path to the file to parse
:param chunk_size: the file will be read chunk by chunk, each chunk
having chunk_size bytes
"""
self.path = path
byte_counter = 0
with open(path, 'rb') as f:
while True:
bytes = f.read(chunk_size)
if bytes == b'':
return
self.parse_bytes(bytes, byte_counter)
byte_counter += chunk_size
def draw(self):
"""Draws each part of the model with OpenGL
"""
import OpenGL.GL as gl
for part in self.parts:
part.draw()
def generate_vbos(self):
"""Generates the VBOs of each part of the model
"""
for part in self.parts:
part.generate_vbos()
def generate_vertex_normals(self):
"""Generate the normals for each vertex of the model
A normal will be the average normal of the adjacent faces of a vertex.
"""
self.normals = [Normal() for i in self.vertices]
for part in self.parts:
for face in part.faces:
v1 = Vertex.from_points(self.vertices[face.a.vertex], self.vertices[face.b.vertex])
v2 = Vertex.from_points(self.vertices[face.a.vertex], self.vertices[face.c.vertex])
v1.normalize()
v2.normalize()
cross = Vertex.cross_product(v1, v2)
self.normals[face.a.vertex] += cross
self.normals[face.b.vertex] += cross
self.normals[face.c.vertex] += cross
for normal in self.normals:
normal.normalize()
for part in self.parts:
for face in part.faces:
face.a.normal = face.a.vertex
face.b.normal = face.b.vertex
face.c.normal = face.c.vertex
def generate_face_normals(self):
"""Generate the normals for each face of the model
A normal will be the normal of the face
"""
# Build array of faces
faces = sum(map(lambda x: x.faces, self.parts), [])
self.normals = [Normal()] * len(faces)
for (index, face) in enumerate(faces):
v1 = Vertex.from_points(self.vertices[face.a.vertex], self.vertices[face.b.vertex])
v2 = Vertex.from_points(self.vertices[face.a.vertex], self.vertices[face.c.vertex])
cross = Vertex.cross_product(v1, v2)
cross.normalize()
self.normals[index] = cross
face.a.normal = index
face.b.normal = index
face.c.normal = index
def get_material_index(self, material):
"""Finds the index of the given material
:param material: Material you want the index of
"""
return [i for (i,m) in enumerate(self.materials) if m.name == material.name][0]
class TextModelParser(ModelParser):
def parse_file(self, path):
"""Sets the path of the model and parse each line
:param path: path to the text file to parse
"""
self.path = path
with open(path) as f:
for line in f.readlines():
line = line.rstrip()
if line != '':
self.parse_line(line)
class BoundingBox:
"""Represents a bounding box of a 3D model
"""
def __init__(self):
"""Initializes the coordinates of the bounding box
"""
self.min_x = +float('inf')
self.min_y = +float('inf')
self.min_z = +float('inf')
self.max_x = -float('inf')
self.max_y = -float('inf')
self.max_z = -float('inf')
def add(self, vector):
"""Adds a vector to a bounding box
If the vector is outside the bounding box, the bounding box will be
enlarged, otherwise, nothing will happen.
:param vector: the vector that will enlarge the bounding box
"""
self.min_x = min(self.min_x, vector.x)
self.min_y = min(self.min_y, vector.y)
self.min_z = min(self.min_z, vector.z)
self.max_x = max(self.max_x, vector.x)
self.max_y = max(self.max_y, vector.y)
self.max_z = max(self.max_z, vector.z)
def __str__(self):
"""Returns a string that represents the bounding box
"""
return "[{},{}],[{},{}],[{},{}]".format(
self.min_x,
self.min_y,
self.min_z,
self.max_x,
self.max_y,
self.max_z)
def get_center(self):
"""Returns the center of the bounding box
"""
return Vertex(
(self.min_x + self.max_x) / 2,
(self.min_y + self.max_y) / 2,
(self.min_z + self.max_z) / 2)
def get_scale(self):
"""Returns the maximum edge of the bounding box
"""
return max(
abs(self.max_x - self.min_x),
abs(self.max_y - self.min_y),
abs(self.max_z - self.min_z))
class Exporter:
"""Represents an object that can export a model into a certain format
"""
def __init__(self, model):
"""Creates a exporter for the model
:param model: model to export
"""
self.model = model
|
{
"content_hash": "e82d38fcfff197d0e9849a14949612f8",
"timestamp": "",
"source": "github",
"line_count": 334,
"max_line_length": 116,
"avg_line_length": 32.0059880239521,
"alnum_prop": 0.5711880261927035,
"repo_name": "tforgione/model-converter",
"id": "f9ee647a684fedc59099b45f7af4d6ab608e617d",
"size": "10690",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "d3/model/basemodel.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "GLSL",
"bytes": "848"
},
{
"name": "Python",
"bytes": "65661"
}
],
"symlink_target": ""
}
|
import os
import shutil
import re
from bs4 import BeautifulSoup
from settings import FOLDER_PERMISSIONS
file1=open("downloadablelinks.txt", "r")
downloadablelinks=file1.readlines()
file1.close()
downloadablelinks = [word.rstrip("\n") for word in downloadablelinks]
file2=open("activedownloadablelinks.txt", "r")
activedownloadablelinks=file2.readlines()
file2.close()
activedownloadablelinks = [word.rstrip("\n") for word in activedownloadablelinks]
file3=open("activeallexternallinks.txt", "r")
activeallexternallinks=file3.readlines()
file3.close()
activeallexternallinks = [word.rstrip("\n") for word in activeallexternallinks]
def link_is_appropriate(link, layer_level):
"""
Specifies which links we consider in each layer. It can be
modified to suit the website.
"""
# correspondece between layer_level and ampersand count
correspondence = {0:0, 1:1, 2:3, 3:-1}
if link is None:
pass
elif link.startswith("index.html@sub"):
if correspondence[layer_level] is link.count('&'):
return True
return False
def get_unique_name(name, names):
"""
Checks whether a name exists in the dictionary of names.
Appends an integer to the name if it exists already.
"""
if name not in names:
names[name] = 1
return name
else:
names[name] = names[name] + 1
name = name + str(names[name]) # appends integer to name
return get_unique_name(name, names)
def make_new_directory(path):
"""
Creates a new directory
"""
if os.path.exists(path):
shutil.rmtree(path)
os.makedirs(path)
os.chmod(path, FOLDER_PERMISSIONS)
def copy_directory(src, dest):
try:
shutil.copytree(src, dest)
except shutil.Error as e:
print('Directory not copied. Error: %s' % e)
except OSError as e:
print('Directory not copied. Error: %s' % e)
def save_html(soup, path):
"""
Writes to html file from soup
"""
html = soup.prettify("utf-8")
with open(path, "wb") as f:
f.write(html)
def print_progress(layer_level):
"""
Prints progress of processing as vertical dots
"""
if layer_level is 1:
print(".")
def make_menu_pages(src, dest, menu_links):
"""
Creates menu pages with modified menu links
- Checks the menu links in home page
- Creates a menu page for each menu link
- Changes the menu links in each menu page
"""
with open(os.path.join(src, "index.html")) as f:
soup = BeautifulSoup(f.read())
attrs = {"href": re.compile("^index.html@pg")}
for link_tag in soup.find_all('a', attrs=attrs):
link = link_tag.get('href')
if link is None:
continue
name = link_tag.string.strip(" \t\n\r").partition(' ')[0].lower()
menu_links[link] = name
with open(os.path.join(src, link)) as f:
menu_soup = BeautifulSoup(f.read())
for link_tag in menu_soup.find_all('a'):
link = link_tag.get('href')
elink = link_tag.get('href')
if link.startswith("http://"):
if (link in downloadablelinks):
link = link[6:]
link_tag['href'] = 2*"../" + "../external" + link
elif (link in activeallexternallinks):
link_tag['href'] = 'javascript:var c=confirm("Do You wish to access internet?");if(c==true){window.location="'+elink+'";}'
else:
link_tag['href'] = 'javascript:alert("Link is dead");'
attrs = {"href": "index.php.html"}
for menu_link_tag in menu_soup.find_all('a', attrs=attrs):
menu_link_tag['href'] = "../../"
attrs = {"href": re.compile("^index.html@pg")}
for menu_link_tag in menu_soup.find_all('a', attrs=attrs):
link_name = menu_link_tag.string.strip(" \t\n\r")
link_name = link_name.partition(' ')[0].lower()
menu_link_tag['href'] = "../../menus/" + link_name
parse_css_links(menu_soup, layer_level=2)
parse_images(menu_soup, layer_level=2)
make_new_directory(os.path.join(dest, "menus", name))
save_html(menu_soup, os.path.join(dest, "menus", name, "index.html"))
def arrange_resources(src, dest):
"""
Arranges all resources from src into dest
"""
list_of_dirs = [name for name in os.listdir(src)
if os.path.isdir(os.path.join(src, name))]
for dir_name in list_of_dirs:
copy_src = os.path.join(src, dir_name)
copy_dest = os.path.join(dest, "res", dir_name)
copy_directory(copy_src, copy_dest)
def parse_css_links(soup, layer_level):
"""
Parses css links in html files.
Modifies all css links according to layer_level
"""
for link_tag in soup.find_all('link'):
link = link_tag.get('href')
if link is not None:
if link.endswith(".css"):
link_tag['href'] = layer_level*"../" + "res/" + link
if layer_level is 3:
link_tag['href'] = 4*"../" + "res/" + link
def parse_images(soup, layer_level):
"""
Parses image links in html files
Modifies all images links according to layer_level
"""
for link_tag in soup.find_all('img'):
link = link_tag.get('src')
if link is not None:
if not link.startswith("http"):
link_tag['src'] = layer_level*"../" + "res/" + link
if layer_level is 3:
link_tag['src'] = 4*"../" + "res/" + link
def parse_menu_links(layer_level, link_tag, menu_links):
"""
Modifies links in the menu
"""
link = link_tag.get('href')
if link is None:
return
if link.startswith("index.php.html"):
link_tag['href'] = layer_level*"../"
if layer_level is 3:
link_tag['href'] = 4*"../"
for menu_link, menu_name in menu_links.items():
if link.startswith(menu_link):
link_tag['href'] = layer_level*"../" + "menus/" + menu_name
if layer_level is 3:
link_tag['href'] = 4*"../" + "menus/" + menu_name
def print_error_when_directly_run(name):
"""
Prints error message when python script is not imported
and is run as script
"""
if name == '__main__':
print("\nThis is not a standalone script.",
"\nUse 'python3 run.py' to run copy-website.")
print_error_when_directly_run(__name__)
|
{
"content_hash": "3b954dd9cd4978f622159668d1af18cb",
"timestamp": "",
"source": "github",
"line_count": 187,
"max_line_length": 142,
"avg_line_length": 34.81283422459893,
"alnum_prop": 0.5831029185867895,
"repo_name": "jayadeepk/copy-website",
"id": "47f4824287cc85ba9c5675a63dee18a5af29837a",
"size": "6917",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "helpers.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "14534"
},
{
"name": "Shell",
"bytes": "1043"
}
],
"symlink_target": ""
}
|
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: release-1.25
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from kubernetes.client.configuration import Configuration
class V1beta2GroupSubject(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'name': 'str'
}
attribute_map = {
'name': 'name'
}
def __init__(self, name=None, local_vars_configuration=None): # noqa: E501
"""V1beta2GroupSubject - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._name = None
self.discriminator = None
self.name = name
@property
def name(self):
"""Gets the name of this V1beta2GroupSubject. # noqa: E501
name is the user group that matches, or \"*\" to match all user groups. See https://github.com/kubernetes/apiserver/blob/master/pkg/authentication/user/user.go for some well-known group names. Required. # noqa: E501
:return: The name of this V1beta2GroupSubject. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this V1beta2GroupSubject.
name is the user group that matches, or \"*\" to match all user groups. See https://github.com/kubernetes/apiserver/blob/master/pkg/authentication/user/user.go for some well-known group names. Required. # noqa: E501
:param name: The name of this V1beta2GroupSubject. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and name is None: # noqa: E501
raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
self._name = name
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1beta2GroupSubject):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1beta2GroupSubject):
return True
return self.to_dict() != other.to_dict()
|
{
"content_hash": "c3e3d3b98aa872db5d52a6fb7e766851",
"timestamp": "",
"source": "github",
"line_count": 121,
"max_line_length": 224,
"avg_line_length": 32.32231404958678,
"alnum_prop": 0.5868064433648683,
"repo_name": "kubernetes-client/python",
"id": "c04c64578b8d8211a27e19c6fa24e2cc616d1b2e",
"size": "3928",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kubernetes/client/models/v1beta2_group_subject.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "356"
},
{
"name": "Python",
"bytes": "11454299"
},
{
"name": "Shell",
"bytes": "43108"
}
],
"symlink_target": ""
}
|
from pyvisdk.base.managed_object_types import ManagedObjectTypes
from pyvisdk.mo.managed_entity import ManagedEntity
import logging
########################################
# Automatically generated, do not edit.
########################################
log = logging.getLogger(__name__)
class ComputeResource(ManagedEntity):
'''Represents a set of physical compute resources for a set of virtual
machines.The base type ComputeResource, when instantiated by calling
AddStandaloneHost_Task, represents a single host. The subclass
ClusterComputeResource represents a cluster of hosts and adds distributed
management features such as availability and resource scheduling.A
ComputeResource always has a root ResourcePool associated with it. Certain
types of clusters such as those with VMware DRS enabled and standalone hosts
(ESX Server 3) support the creation of ResourcePool hierarchies.'''
def __init__(self, core, name=None, ref=None, type=ManagedObjectTypes.ComputeResource):
super(ComputeResource, self).__init__(core, name=name, ref=ref, type=type)
@property
def configurationEx(self):
'''Configuration of the compute resource; applies to both standalone hosts and
clusters. For a cluster this property will return a ClusterConfigInfoEx object.'''
return self.update('configurationEx')
@property
def datastore(self):
'''The datastore property is the subset of datastore objects in the datacenter
available in this ComputeResource.'''
return self.update('datastore')
@property
def environmentBrowser(self):
'''The environment browser object that identifies the environments that are
supported on this compute resource.'''
return self.update('environmentBrowser')
@property
def host(self):
'''List of hosts that are part of this compute resource. If the compute resource
is a standalone type, then this list contains just one element.'''
return self.update('host')
@property
def network(self):
'''The subset of network objects available in the datacenter that is available in
this ComputeResource.'''
return self.update('network')
@property
def resourcePool(self):
'''Reference to root resource pool.'''
return self.update('resourcePool')
@property
def summary(self):
'''Basic runtime information about a compute resource. This information is used on
summary screens and in list views.'''
return self.update('summary')
def ReconfigureComputeResource_Task(self, spec, modify):
'''Change the compute resource configuration.
:param spec: A set of configuration changes to apply to the compute resource. The specification can be a complete set of changes or a partial set of changes, applied incrementally. When invoking reconfigureEx on a cluster, this argument may be a ClusterConfigSpecEx object.
:param modify: Flag to specify whether the specification ("spec") should be applied incrementally. If "modify" is false and the operation succeeds, then the configuration of the cluster matches the specification exactly; in this case any unset portions of the specification will result in unset or default portions of the configuration.
'''
return self.delegate("ReconfigureComputeResource_Task")(spec, modify)
|
{
"content_hash": "9ba758cca5aebb6b377c71b9f71746aa",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 344,
"avg_line_length": 48.083333333333336,
"alnum_prop": 0.7036395147313691,
"repo_name": "xuru/pyvisdk",
"id": "fa33cca99cd63cad7c923fd9ba88894187a65c26",
"size": "3463",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyvisdk/mo/compute_resource.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "369"
},
{
"name": "Python",
"bytes": "3037849"
},
{
"name": "Shell",
"bytes": "4517"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from __future__ import unicode_literals
# -*- coding: utf-8 -*-
import json
from django.http import HttpResponse
from django.contrib.auth.decorators import login_required
import django_nyt
def disable_notify(func):
"""Disable notifications. Example:
@disable_notify
def your_function():
notify("no one will be notified", ...)
"""
def wrap(request, *args, **kwargs):
django_nyt._disable_notifications = True
response = func(request, *args, **kwargs)
django_nyt._disable_notifications = False
return response
return wrap
def login_required_ajax(func):
"""Similar to login_required. But if the request is an ajax request, then
it returns an error in json with a 403 status code."""
def wrap(request, *args, **kwargs):
if request.is_ajax():
if not request.user or not request.user.is_authenticated():
return json_view(lambda *a,
**kw: {'error': 'not logged in'})(request,
status=403)
return func(request, *args, **kwargs)
else:
return login_required(func)(request, *args, **kwargs)
return wrap
def json_view(func):
def wrap(request, *args, **kwargs):
obj = func(request, *args, **kwargs)
data = json.dumps(obj, ensure_ascii=False)
status = kwargs.get('status', 200)
response = HttpResponse(content_type='application/json', status=status)
response.write(data)
return response
return wrap
|
{
"content_hash": "908eb86ebcc43728f241ee501bdf788b",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 79,
"avg_line_length": 32.7,
"alnum_prop": 0.600611620795107,
"repo_name": "spookylukey/django-nyt",
"id": "88d23f5ebb4202a6694e4d101dd116ec48903517",
"size": "1635",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django_nyt/decorators.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "5580"
},
{
"name": "Python",
"bytes": "77940"
},
{
"name": "Shell",
"bytes": "5104"
}
],
"symlink_target": ""
}
|
import os
import re
from django import forms
from django.conf import settings
from django.forms import ModelForm
from django.forms.models import modelformset_factory
from django.template import Context, Template, TemplateSyntaxError
import commonware.log
import happyforms
from piston.models import Consumer
from product_details import product_details
from tower import ugettext_lazy as _lazy
from quieter_formset.formset import BaseModelFormSet
from olympia import amo
from olympia.addons.models import Addon
from olympia.amo.urlresolvers import reverse
from olympia.applications.models import AppVersion
from olympia.bandwagon.models import (
Collection, FeaturedCollection, MonthlyPick)
from olympia.compat.forms import CompatForm as BaseCompatForm
from olympia.files.models import File
from olympia.zadmin.models import SiteEvent, ValidationJob
LOGGER_NAME = 'z.zadmin'
log = commonware.log.getLogger(LOGGER_NAME)
class DevMailerForm(happyforms.Form):
_choices = [('eula',
'Developers who have set up EULAs for active add-ons'),
('sdk', 'Developers of active SDK add-ons'),
('all_extensions', 'All extension developers')]
recipients = forms.ChoiceField(choices=_choices, required=True)
subject = forms.CharField(widget=forms.TextInput(attrs=dict(size='100')),
required=True)
preview_only = forms.BooleanField(initial=True, required=False,
label=u'Log emails instead of sending')
message = forms.CharField(widget=forms.Textarea, required=True)
class BulkValidationForm(happyforms.ModelForm):
application = forms.ChoiceField(
label=_lazy(u'Application'),
choices=amo.APPS_CHOICES)
curr_max_version = forms.ChoiceField(
label=_lazy(u'Current Max. Version'),
choices=[('', _lazy(u'Select an application first'))])
target_version = forms.ChoiceField(
label=_lazy(u'Target Version'),
choices=[('', _lazy(u'Select an application first'))])
finish_email = forms.CharField(
required=False,
label=_lazy(u'Email when finished'))
class Meta:
model = ValidationJob
fields = ('application', 'curr_max_version', 'target_version',
'finish_email')
def __init__(self, *args, **kw):
kw.setdefault('initial', {})
kw['initial']['finish_email'] = settings.FLIGTAR
super(BulkValidationForm, self).__init__(*args, **kw)
w = self.fields['application'].widget
# Get the URL after the urlconf has loaded.
w.attrs['data-url'] = reverse('zadmin.application_versions_json')
def version_choices_for_app_id(self, app_id):
versions = AppVersion.objects.filter(application=app_id)
return [(v.id, v.version) for v in versions]
def clean_application(self):
app_id = int(self.cleaned_data['application'])
self.cleaned_data['application'] = app_id
choices = self.version_choices_for_app_id(app_id)
self.fields['target_version'].choices = choices
self.fields['curr_max_version'].choices = choices
return self.cleaned_data['application']
def _clean_appversion(self, field):
return AppVersion.objects.get(pk=int(field))
def clean_curr_max_version(self):
return self._clean_appversion(self.cleaned_data['curr_max_version'])
def clean_target_version(self):
return self._clean_appversion(self.cleaned_data['target_version'])
path = os.path.join(settings.ROOT, 'src/olympia/zadmin/templates/zadmin')
texts = {
'validation': open('%s/%s' % (path, 'validation-email.txt')).read(),
}
varname = re.compile(r'{{\s*([a-zA-Z0-9_]+)\s*}}')
class NotifyForm(happyforms.Form):
subject = forms.CharField(widget=forms.TextInput, required=True)
preview_only = forms.BooleanField(
initial=True, required=False,
label=_lazy(u'Log emails instead of sending'))
text = forms.CharField(widget=forms.Textarea, required=True)
variables = ['{{PASSING_ADDONS}}', '{{FAILING_ADDONS}}', '{{APPLICATION}}',
'{{VERSION}}']
variable_names = [varname.match(v).group(1) for v in variables]
def __init__(self, *args, **kw):
kw.setdefault('initial', {})
if 'text' in kw:
kw['initial']['text'] = texts[kw.pop('text')]
kw['initial']['subject'] = ('Add-on compatibility with '
'{{APPLICATION}} {{VERSION}}')
super(NotifyForm, self).__init__(*args, **kw)
def check_template(self, data):
try:
Template(data).render(Context({}))
except TemplateSyntaxError, err:
raise forms.ValidationError(err)
return data
def clean_text(self):
return self.check_template(self.cleaned_data['text'])
def clean_subject(self):
return self.check_template(self.cleaned_data['subject'])
class FeaturedCollectionForm(happyforms.ModelForm):
LOCALES = (('', u'(Default Locale)'),) + tuple(
(i, product_details.languages[i]['native'])
for i in settings.AMO_LANGUAGES)
application = forms.ChoiceField(amo.APPS_CHOICES)
collection = forms.CharField(widget=forms.HiddenInput)
locale = forms.ChoiceField(choices=LOCALES, required=False)
class Meta:
model = FeaturedCollection
fields = ('application', 'locale')
def clean_collection(self):
application = self.cleaned_data.get('application', None)
collection = self.cleaned_data.get('collection', None)
if not Collection.objects.filter(id=collection,
application=application).exists():
raise forms.ValidationError(
u'Invalid collection for this application.')
return collection
def save(self, commit=False):
collection = self.cleaned_data['collection']
f = super(FeaturedCollectionForm, self).save(commit=commit)
f.collection = Collection.objects.get(id=collection)
f.save()
return f
class BaseFeaturedCollectionFormSet(BaseModelFormSet):
def __init__(self, *args, **kw):
super(BaseFeaturedCollectionFormSet, self).__init__(*args, **kw)
for form in self.initial_forms:
try:
form.initial['collection'] = (
FeaturedCollection.objects
.get(id=form.instance.id).collection.id)
except (FeaturedCollection.DoesNotExist, Collection.DoesNotExist):
form.initial['collection'] = None
FeaturedCollectionFormSet = modelformset_factory(
FeaturedCollection,
form=FeaturedCollectionForm, formset=BaseFeaturedCollectionFormSet,
can_delete=True, extra=0)
class OAuthConsumerForm(happyforms.ModelForm):
class Meta:
model = Consumer
fields = ['name', 'description', 'status']
class MonthlyPickForm(happyforms.ModelForm):
image = forms.CharField(required=False)
blurb = forms.CharField(max_length=200,
widget=forms.Textarea(attrs={'cols': 20,
'rows': 2}))
class Meta:
model = MonthlyPick
widgets = {
'addon': forms.TextInput(),
}
fields = ('addon', 'image', 'blurb', 'locale')
MonthlyPickFormSet = modelformset_factory(MonthlyPick, form=MonthlyPickForm,
can_delete=True, extra=0)
class AddonStatusForm(ModelForm):
class Meta:
model = Addon
fields = ('status', 'highest_status')
class FileStatusForm(ModelForm):
class Meta:
model = File
fields = ('status',)
FileFormSet = modelformset_factory(File, form=FileStatusForm,
formset=BaseModelFormSet, extra=0)
class SiteEventForm(ModelForm):
class Meta:
model = SiteEvent
fields = ('start', 'end', 'event_type', 'description',
'more_info_url')
class YesImSure(happyforms.Form):
yes = forms.BooleanField(required=True, label="Yes, I'm sure")
class CompatForm(BaseCompatForm):
_minimum_choices = [(x, x) for x in xrange(100, -10, -10)]
minimum = forms.TypedChoiceField(choices=_minimum_choices, coerce=int,
required=False)
_ratio_choices = [('%.1f' % (x / 10.0), '%.0f%%' % (x * 10))
for x in xrange(9, -1, -1)]
ratio = forms.ChoiceField(choices=_ratio_choices, required=False)
class GenerateErrorForm(happyforms.Form):
error = forms.ChoiceField(choices=(
['zerodivisionerror', 'Zero Division Error (will email)'],
['iorequesterror', 'IORequest Error (no email)'],
['heka_statsd', 'Heka statsd message'],
['heka_json', 'Heka JSON message'],
['heka_cef', 'Heka CEF message'],
['heka_sentry', 'Heka Sentry message'],
['amo_cef', 'AMO CEF message']))
def explode(self):
error = self.cleaned_data.get('error')
if error == 'zerodivisionerror':
1 / 0
elif error == 'iorequesterror':
class IOError(Exception):
pass
raise IOError('request data read error')
elif error == 'heka_cef':
environ = {'REMOTE_ADDR': '127.0.0.1', 'HTTP_HOST': '127.0.0.1',
'PATH_INFO': '/', 'REQUEST_METHOD': 'GET',
'HTTP_USER_AGENT': 'MySuperBrowser'}
config = {'cef.version': '0',
'cef.vendor': 'Mozilla',
'cef.device_version': '3',
'cef.product': 'zamboni',
'cef': True}
settings.HEKA.cef(
'xx\nx|xx\rx', 5, environ, config,
username='me', ext1='ok=ok', ext2='ok\\ok',
logger_info='settings.HEKA')
elif error == 'heka_statsd':
settings.HEKA.incr(name=LOGGER_NAME)
elif error == 'heka_json':
settings.HEKA.heka(
type="heka_json",
fields={'foo': 'bar', 'secret': 42,
'logger_type': 'settings.HEKA'})
elif error == 'heka_sentry':
# These are local variables only used
# by Sentry's frame hacking magic.
# They won't be referenced which may trigger flake8
# errors.
heka_conf = settings.HEKA_CONF # NOQA
active_heka_conf = settings.HEKA._config # NOQA
try:
1 / 0
except:
settings.HEKA.raven('heka_sentry error triggered')
elif error == 'amo_cef':
from olympia.amo.utils import log_cef
env = {'REMOTE_ADDR': '127.0.0.1', 'HTTP_HOST': '127.0.0.1',
'PATH_INFO': '/', 'REQUEST_METHOD': 'GET',
'HTTP_USER_AGENT': 'MySuperBrowser'}
log_cef(settings.STATSD_PREFIX, 6, env)
class PriceTiersForm(happyforms.Form):
prices = forms.FileField()
|
{
"content_hash": "8acfeb2868123301608704d93f5e2cb6",
"timestamp": "",
"source": "github",
"line_count": 306,
"max_line_length": 79,
"avg_line_length": 36.1764705882353,
"alnum_prop": 0.6041553748870822,
"repo_name": "jpetto/olympia",
"id": "d9d3022bcb6e2454a73b0c9b90096ca6210713ba",
"size": "11070",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/olympia/zadmin/forms.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ApacheConf",
"bytes": "249"
},
{
"name": "CSS",
"bytes": "665496"
},
{
"name": "HTML",
"bytes": "1606994"
},
{
"name": "JavaScript",
"bytes": "1315514"
},
{
"name": "Makefile",
"bytes": "4235"
},
{
"name": "PLSQL",
"bytes": "74"
},
{
"name": "Python",
"bytes": "4026490"
},
{
"name": "Shell",
"bytes": "9145"
},
{
"name": "Smarty",
"bytes": "1930"
}
],
"symlink_target": ""
}
|
from setuptools import setup, find_packages
import sys, os
version = '0.93b'
setup(name='aha.plugin.twitteroauth',
version=version,
description="A twitter auth plugin for aha",
long_description="""\
A plugin that ssupplies authentication support of twitter oauth.""",
classifiers=[], # Get strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers
keywords='web aha twitter authentication plugin',
author='Atsushi Shibata',
author_email='shibata@webcore.co.jp',
url='http://coreblog.org/aha',
license='BSD',
packages = [
'twitteroauth',
],
include_package_data=True,
zip_safe=False,
install_requires = [
'aha',
],
entry_points="""
# -*- Entry points: -*-
""",
)
|
{
"content_hash": "87ebcb266b7af5a4af142b32fff29794",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 95,
"avg_line_length": 28.785714285714285,
"alnum_prop": 0.6104218362282878,
"repo_name": "Letractively/aha-gae",
"id": "7da20d91febedbdb9732a1dec1f9a475007599d5",
"size": "806",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "plugins/aha.plugin.twitteroauth/setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "51818"
},
{
"name": "HTML",
"bytes": "29371"
},
{
"name": "JavaScript",
"bytes": "39684"
},
{
"name": "Makefile",
"bytes": "50"
},
{
"name": "Python",
"bytes": "417917"
}
],
"symlink_target": ""
}
|
"""Script to load runinfo from the lims process: 'Illumina Sequencing (Illumina SBS) 4.0'
into the flowcell database in statusdb.
Maya Brandi, Science for Life Laboratory, Stockholm, Sweden.
"""
import sys
import os
import codecs
from optparse import OptionParser
from pprint import pprint
from genologics.lims import *
from genologics.config import BASEURI, USERNAME, PASSWORD
from datetime import date
from lims_utils import *
from scilifelab.db.statusDB_utils import *
import scilifelab.log
lims = Lims(BASEURI, USERNAME, PASSWORD)
def main(project, sample, conf):
today = date.today()
couch = load_couch_server(conf)
ref_db = couch['reference']
if project:
load_project_udfs(ref_db)
if sample:
load_sample_udfs(ref_db)
def load_sample_udfs(ref_db):
"""Loads all sample level udfs into the reference database on statusdb.
ref_db is the reference database."""
udfs = prepare_udf_source_info('Sample', lims.get_samples())
samples = ref_db.get('project-samples-[key]')
samples_details = ref_db.get('project-samples-[key]-details')
for udf, udf_field in udfs.items():
if 'discontinued' not in udf.split('_'):
if udf in SAMP_UDF_EXCEPTIONS:
key = 'project-samples-[key]-'+udf
samples[udf] = key
inf=save_couchdb_ref_obj(ref_db, samples)
else:
key = 'project-samples-[key]-details-'+udf
samples_details[udf] = key
delkey = 'project-samples-[sample]-details-'+udf
delete_doc(ref_db,delkey)
inf=save_couchdb_ref_obj(ref_db, samples_details)
udf_field['_id'] = key
inf=save_couchdb_ref_obj(ref_db, udf_field)
def load_project_udfs(ref_db):
"""Loads all project level udfs into the reference database on statusdb.
ref_db is the reference database."""
udfs = prepare_udf_source_info('Project', lims.get_projects())
project = ref_db.get('project')
project_details = ref_db.get('project-details')
for udf, udf_field in udfs.items():
if 'discontinued' not in udf.split('_'):
if udf in PROJ_UDF_EXCEPTIONS:
key = 'project-'+udf
project[udf] = key
inf=save_couchdb_ref_obj(ref_db, project)
else:
key = 'project-details-'+udf
project_details[udf] = key
inf=save_couchdb_ref_obj(ref_db, project_details)
udf_field['_id'] = key
inf=save_couchdb_ref_obj(ref_db, udf_field)
def prepare_udf_source_info(element_type = None, element_list = None):
"""element_type should be some lims element type, such as
'Project', 'Sample', 'Artifact'...
from wich you want to get udf source info.
element_list should be an list of instanses of the element type.
eg output from get_samples(), get_projects()..."""
udfs = lims.get_udfs(attach_to_name = element_type)
objects={}
for udf in udfs:
db_name=udf.name.lower().replace(' ','_').replace('-','_')
objects[db_name] = {'doc_source': { 'lims_field':udf.name,
'lims_element': element_type,
'source': 'Lims'},
'doc_type': udf.root.get('type')}
return objects
def find_example(element_list = None, udf_name = None):
"""To find an example value od the udf_name in the element_list,
where element_list should be an list of instanses of some element type.
Eg. output from get_samples(), get_projects()... Will only make 1000 tryals."""
i=0
if element_list and udf_name:
print 'Searching udf-example of: '+ udf_name
for elm in element_list:
i=i+1
try:
example=dict(elm.udf.items())[udf_name]
if example:
print example
return example
except:
pass
if i==1000:
return 'Not found'
if __name__ == '__main__':
usage = "Usage: python flowcell_summary_upload_LIMS.py [options]"
parser = OptionParser(usage=usage)
parser.add_option("-p", "--project", dest="project", action="store_true", default=False,
help = "Upload source info for project level udfs into the reference database.")
parser.add_option("-s", "--sample", dest="sample", action="store_true", default=False,
help = "Upload source info for sample level udfs into the reference database.")
parser.add_option("-c", "--conf", dest="conf",
default=os.path.join(os.environ['HOME'],'opt/config/post_process.yaml'),
help = "Config file. Default: ~/opt/config/post_process.yaml")
(options, args) = parser.parse_args()
LOG = scilifelab.log.file_logger('LOG', options.conf, 'lims2db_reference.log','log_dir_tools')
main(options.project, options.sample , options.conf)
|
{
"content_hash": "5a729428be257e1e0dcf87ccb06f2775",
"timestamp": "",
"source": "github",
"line_count": 121,
"max_line_length": 102,
"avg_line_length": 41.32231404958678,
"alnum_prop": 0.6028,
"repo_name": "senthil10/scilifelab",
"id": "abb2e23ec71004c5d8f3aa631f1d8ba8a7f1e80e",
"size": "5023",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "scilifelab/lims_utils/reference_uppload_LIMS.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "3192"
},
{
"name": "Mako",
"bytes": "13990"
},
{
"name": "Python",
"bytes": "1324303"
},
{
"name": "R",
"bytes": "4392"
},
{
"name": "Shell",
"bytes": "38743"
}
],
"symlink_target": ""
}
|
class CliFailedCommandError(Exception):
"""cli command failed."""
class CliSyntaxError(Exception):
"""cli command had a syntax error."""
class UnexpectedApiReturnValueError(Exception):
"""exception raised when the API return value is unexpected"""
def __init__(self, retval, message):
self.retval = retval
self.message = message
super().__init__(message)
|
{
"content_hash": "26b751c550748044a08de1fe193d0e99",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 66,
"avg_line_length": 26.666666666666668,
"alnum_prop": 0.6725,
"repo_name": "FDio/vpp",
"id": "611e5a3845d3c25fe526b056f9f9b5372758a399",
"size": "400",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "test/vpp_papi_exceptions.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "19971"
},
{
"name": "C",
"bytes": "26080388"
},
{
"name": "C++",
"bytes": "1180881"
},
{
"name": "CMake",
"bytes": "229900"
},
{
"name": "Dockerfile",
"bytes": "1075"
},
{
"name": "Emacs Lisp",
"bytes": "111146"
},
{
"name": "Go",
"bytes": "66545"
},
{
"name": "HTML",
"bytes": "636"
},
{
"name": "Jinja",
"bytes": "1135"
},
{
"name": "Lua",
"bytes": "79974"
},
{
"name": "M4",
"bytes": "257"
},
{
"name": "Makefile",
"bytes": "105502"
},
{
"name": "Perl",
"bytes": "6569"
},
{
"name": "Python",
"bytes": "5028232"
},
{
"name": "Ruby",
"bytes": "3865"
},
{
"name": "Shell",
"bytes": "148207"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
import pandas as pd
from sklearn.base import TransformerMixin
class NameTransformer(TransformerMixin):
def __init__(self, use=True):
self.use = use
def transform(self, features_raw, **transform_params):
if self.use:
features = features_raw.copy(deep=True)
name_len = features.Name.apply(lambda x: len(x))
return pd.concat([features,
pd.DataFrame({'NameLen': name_len})], axis=1)
return features_raw
def fit(self, X, y=None, **fit_params):
return self
def get_params(self, *args, **kwargs):
return { 'use': self.use }
def set_params(self, **params):
if 'use' in params:
self.use = params.get('use')
|
{
"content_hash": "46a1db2d0d8e39527d1d417a9037b22f",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 75,
"avg_line_length": 29.22222222222222,
"alnum_prop": 0.5868187579214195,
"repo_name": "wojtekwalczak/kaggle_titanic",
"id": "53d30541d3b9e18a12cefe5c8ccacbde02c27f62",
"size": "789",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "titanic/transformers/NameTransformer.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "24286"
}
],
"symlink_target": ""
}
|
import numpy as np
from .basis import BasisFamily
from scipy.interpolate import BSpline, splev
class BSplineFamily(BasisFamily):
"""B-spline basis functions.
This class represents a B-spline basis for piecewise polynomials defined
across a set of breakpoints with given degree and smoothness. On each
interval between two breakpoints, we have a polynomial of a given degree
and the spline is continuous up to a given smoothness at interior
breakpoints.
Parameters
----------
breakpoints : 1D array or 2D array of float
The breakpoints for the spline(s).
degree : int or list of ints
For each spline variable, the degree of the polynomial between
breakpoints. If a single number is given and more than one spline
variable is specified, the same degree is used for each spline
variable.
smoothness : int or list of ints
For each spline variable, the smoothness at breakpoints (number of
derivatives that should match).
vars : None or int, optional
The number of spline variables. If specified as None (default),
then the spline basis describes a single variable, with no indexing.
If the number of spine variables is > 0, then the spline basis is
index using the `var` keyword.
"""
def __init__(self, breakpoints, degree, smoothness=None, vars=None):
"""Create a B-spline basis for piecewise smooth polynomials."""
# Process the breakpoints for the spline */
breakpoints = np.array(breakpoints, dtype=float)
if breakpoints.ndim == 2:
raise NotImplementedError(
"breakpoints for each spline variable not yet supported")
elif breakpoints.ndim != 1:
raise ValueError("breakpoints must be convertable to a 1D array")
elif breakpoints.size < 2:
raise ValueError("break point vector must have at least 2 values")
elif np.any(np.diff(breakpoints) <= 0):
raise ValueError("break points must be strictly increasing values")
# Decide on the number of spline variables
if vars is None:
nvars = 1
self.nvars = None # track as single variable
elif not isinstance(vars, int):
raise TypeError("vars must be an integer")
else:
nvars = vars
self.nvars = nvars
#
# Process B-spline parameters (degree, smoothness)
#
# B-splines are defined on a set of intervals separated by
# breakpoints. On each interval we have a polynomial of a certain
# degree and the spline is continuous up to a given smoothness at
# breakpoints. The code in this section allows some flexibility in
# the way that all of this information is supplied, including using
# scalar values for parameters (which are then broadcast to each
# output) and inferring values and dimensions from other
# information, when possible.
#
# Utility function for broadcasting spline params (degree, smoothness)
def process_spline_parameters(
values, length, allowed_types, minimum=0,
default=None, name='unknown'):
# Preprocessing
if values is None and default is None:
return None
elif values is None:
values = default
elif isinstance(values, np.ndarray):
# Convert ndarray to list
values = values.tolist()
# Figure out what type of object we were passed
if isinstance(values, allowed_types):
# Single number of an allowed type => broadcast to list
values = [values for i in range(length)]
elif all([isinstance(v, allowed_types) for v in values]):
# List of values => make sure it is the right size
if len(values) != length:
raise ValueError(f"length of '{name}' does not match"
f" number of variables")
else:
raise ValueError(f"could not parse '{name}' keyword")
# Check to make sure the values are OK
if values is not None and any([val < minimum for val in values]):
raise ValueError(
f"invalid value for '{name}'; must be at least {minimum}")
return values
# Degree of polynomial
degree = process_spline_parameters(
degree, nvars, (int), name='degree', minimum=1)
# Smoothness at breakpoints; set default to degree - 1 (max possible)
smoothness = process_spline_parameters(
smoothness, nvars, (int), name='smoothness', minimum=0,
default=[d - 1 for d in degree])
# Make sure degree is sufficent for the level of smoothness
if any([degree[i] - smoothness[i] < 1 for i in range(nvars)]):
raise ValueError("degree must be greater than smoothness")
# Store the parameters for the spline (self.nvars already stored)
self.breakpoints = breakpoints
self.degree = degree
self.smoothness = smoothness
#
# Compute parameters for a SciPy BSpline object
#
# To create a B-spline, we need to compute the knotpoints, keeping
# track of the use of repeated knotpoints at the initial knot and
# final knot as well as repeated knots at intermediate points
# depending on the desired smoothness.
#
# Store the coefficients for each output (useful later)
self.coef_offset, self.coef_length, offset = [], [], 0
for i in range(nvars):
# Compute number of coefficients for the piecewise polynomial
ncoefs = (self.degree[i] + 1) * (len(self.breakpoints) - 1) - \
(self.smoothness[i] + 1) * (len(self.breakpoints) - 2)
self.coef_offset.append(offset)
self.coef_length.append(ncoefs)
offset += ncoefs
self.N = offset # save the total number of coefficients
# Create knotpoints for each spline variable
# TODO: extend to multi-dimensional breakpoints
self.knotpoints = []
for i in range(nvars):
# Allocate space for the knotpoints
self.knotpoints.append(np.empty(
(self.degree[i] + 1) + (len(self.breakpoints) - 2) * \
(self.degree[i] - self.smoothness[i]) + (self.degree[i] + 1)))
# Initial knotpoints (multiplicity = order)
self.knotpoints[i][0:self.degree[i] + 1] = self.breakpoints[0]
offset = self.degree[i] + 1
# Interior knotpoints (multiplicity = degree - smoothness)
nknots = self.degree[i] - self.smoothness[i]
assert nknots > 0 # just in case
for j in range(1, self.breakpoints.size - 1):
self.knotpoints[i][offset:offset+nknots] = self.breakpoints[j]
offset += nknots
# Final knotpoint (multiplicity = order)
self.knotpoints[i][offset:offset + self.degree[i] + 1] = \
self.breakpoints[-1]
def __repr__(self):
return f'<{self.__class__.__name__}: nvars={self.nvars}, ' + \
f'degree={self.degree}, smoothness={self.smoothness}>'
# Compute the kth derivative of the ith basis function at time t
def eval_deriv(self, i, k, t, var=None):
"""Evaluate the kth derivative of the ith basis function at time t."""
if self.nvars is None or (self.nvars == 1 and var is None):
# Use same variable for all requests
var = 0
elif self.nvars > 1 and var is None:
raise SystemError(
"scalar variable call to multi-variable splines")
# Create a coefficient vector for this spline
coefs = np.zeros(self.coef_length[var]); coefs[i] = 1
# Evaluate the derivative of the spline at the desired point in time
return BSpline(self.knotpoints[var], coefs,
self.degree[var]).derivative(k)(t)
|
{
"content_hash": "23decba813c6c4f20c07a94fd07ab2d5",
"timestamp": "",
"source": "github",
"line_count": 188,
"max_line_length": 79,
"avg_line_length": 43.71808510638298,
"alnum_prop": 0.6049397736950968,
"repo_name": "python-control/python-control",
"id": "c771beb59f2436297af6d4927879290b53cef9b8",
"size": "8452",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "control/flatsys/bspline.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "126"
},
{
"name": "Python",
"bytes": "1630504"
}
],
"symlink_target": ""
}
|
from api import api
def init_app(app, config):
app.config.from_pyfile(config)
app.add_url_rule('/', 'root', lambda: app.send_static_file('index.html'))
app.register_blueprint(api, url_prefix='/api')
|
{
"content_hash": "acce187308e8aa3eb104c8b4df1e8249",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 74,
"avg_line_length": 33.833333333333336,
"alnum_prop": 0.7093596059113301,
"repo_name": "UoMCS/syllabus-visualisation",
"id": "734909fa59c91ca656c14f6a583a99e11733bd10",
"size": "203",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "server/init_app.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1472"
},
{
"name": "HTML",
"bytes": "10814"
},
{
"name": "JavaScript",
"bytes": "12702"
},
{
"name": "Python",
"bytes": "23702"
}
],
"symlink_target": ""
}
|
"""
Runs MultiNet on a whole bunch of input images.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import logging
import os
import sys
# configure logging
if 'TV_IS_DEV' in os.environ and os.environ['TV_IS_DEV']:
logging.basicConfig(format='%(asctime)s %(levelname)s %(message)s',
level=logging.INFO,
stream=sys.stdout)
else:
logging.basicConfig(format='%(asctime)s %(levelname)s %(message)s',
level=logging.INFO,
stream=sys.stdout)
# https://github.com/tensorflow/tensorflow/issues/2034#issuecomment-220820070
import scipy as scp
import scipy.misc
import numpy as np
import tensorflow as tf
import time
flags = tf.app.flags
FLAGS = flags.FLAGS
sys.path.insert(1, os.path.realpath('incl'))
import train as united_train
import tensorvision.train as train
import tensorvision.utils as utils
import tensorvision.core as core
from PIL import Image, ImageDraw, ImageFont
flags.DEFINE_string('data',
"data_road/testing.txt",
'Text file containing images.')
flags.DEFINE_bool('speed_test',
False,
'Only measure inference speed.')
res_folder = 'results'
def _output_generator(sess, tensor_list, image_pl, data_file,
process_image=lambda x: x):
image_dir = os.path.dirname(data_file)
with open(data_file) as file:
for datum in file:
datum = datum.rstrip()
image_file = datum.split(" ")[0]
image_file = os.path.join(image_dir, image_file)
image = scp.misc.imread(image_file)
image = process_image(image)
feed_dict = {image_pl: image}
start_time = time.time()
output = sess.run(tensor_list, feed_dict=feed_dict)
yield image_file, output
def eval_runtime(sess, subhypes, image_pl, eval_list, data_file):
logging.info(' ')
logging.info('Evaluation complete. Measuring runtime.')
image_dir = os.path.dirname(data_file)
with open(data_file) as file:
for datum in file:
datum = datum.rstrip()
image_file = datum.split(" ")[0]
image_file = os.path.join(image_dir, image_file)
image = scp.misc.imread(image_file)
image = process_image(subhypes, image)
feed = {image_pl: image}
sess.run(eval_list, feed_dict=feed)
sess.run(eval_list, feed_dict=feed)
sess.run(eval_list, feed_dict=feed)
for i in xrange(100):
_ = sess.run(eval_list, feed_dict=feed)
start_time = time.time()
for i in xrange(100):
_ = sess.run(eval_list, feed_dict=feed)
dt = (time.time() - start_time)/100
logging.info('Joined inference can be conducted at the following rates on'
' your machine:')
logging.info('Speed (msec): %f ', 1000*dt)
logging.info('Speed (fps): %f ', 1/dt)
return dt
def test_constant_input(subhypes):
road_input_conf = subhypes['road']['jitter']
seg_input_conf = subhypes['segmentation']['jitter']
car_input_conf = subhypes['detection']
gesund = True \
and road_input_conf['image_width'] == seg_input_conf['image_width'] \
and road_input_conf['image_height'] == seg_input_conf['image_height'] \
and car_input_conf['image_width'] == seg_input_conf['image_width'] \
and car_input_conf['image_height'] == seg_input_conf['image_height'] \
if not gesund:
logging.error("The different tasks are training"
"using different resolutions. Please retrain all tasks,"
"using the same resolution.")
exit(1)
return
def test_segmentation_input(subhypes):
if not subhypes['segmentation']['jitter']['reseize_image']:
logging.error('')
logging.error("Issue with Segmentation input handling.")
logging.error("Segmentation input will be resized during this"
"evaluation, but was not resized during training.")
logging.error("This will lead to bad results.")
logging.error("To use this script please train segmentation using"
"the configuration:.")
logging.error("""
{
"jitter": {
"reseize_image": true,
"image_height" : 384,
"image_width" : 1248,
},
}""")
logging.error("Alternatively implement evaluation using non-resized"
" input.")
exit(1)
return
def road_draw(image, highway):
im = Image.fromarray(image.astype('uint8'))
draw = ImageDraw.Draw(im)
fnt = ImageFont.truetype('FreeMono/FreeMonoBold.ttf', 40)
shape = image.shape
if highway:
draw.text((65, 10), "Highway",
font=fnt, fill=(255, 255, 0, 255))
draw.ellipse([10, 10, 55, 55], fill=(255, 255, 0, 255),
outline=(255, 255, 0, 255))
else:
draw.text((65, 10), "minor road",
font=fnt, fill=(255, 0, 0, 255))
draw.ellipse([10, 10, 55, 55], fill=(255, 0, 0, 255),
outline=(255, 0, 0, 255))
return np.array(im).astype('float32')
def run_eval(load_out, output_folder, data_file):
meta_hypes, subhypes, submodules, decoded_logits, sess, image_pl = load_out
assert(len(meta_hypes['model_list']) == 3)
# inf_out['pred_boxes_new'], inf_out['pred_confidences']
seg_softmax = decoded_logits['segmentation']['softmax']
pred_boxes_new = decoded_logits['detection']['pred_boxes_new']
pred_confidences = decoded_logits['detection']['pred_confidences']
road_softmax = decoded_logits['road']['softmax'][0]
eval_list = [seg_softmax, pred_boxes_new, pred_confidences, road_softmax]
def my_process(image):
return process_image(subhypes, image)
if FLAGS.speed_test:
eval_runtime(sess, subhypes, image_pl, eval_list, data_file)
exit(0)
test_constant_input(subhypes)
test_segmentation_input(subhypes)
import utils.train_utils as dec_utils
gen = _output_generator(sess, eval_list, image_pl, data_file, my_process)
for image_file, output in gen:
image = scp.misc.imread(image_file)
image = process_image(subhypes, image)
shape = image.shape
seg_softmax, pred_boxes_new, pred_confidences, road_softmax = output
# Create Segmentation Overlay
shape = image.shape
seg_softmax = seg_softmax[:, 1].reshape(shape[0], shape[1])
hard = seg_softmax > 0.5
overlay_image = utils.fast_overlay(image, hard)
# Draw Detection Boxes
new_img, rects = dec_utils.add_rectangles(
subhypes['detection'], [overlay_image], pred_confidences,
pred_boxes_new, show_removed=False,
use_stitching=True, rnn_len=subhypes['detection']['rnn_len'],
min_conf=0.50, tau=subhypes['detection']['tau'])
# Draw road classification
highway = (np.argmax(road_softmax) == 1)
new_img = road_draw(new_img, highway)
# Save image file
im_name = os.path.basename(image_file)
new_im_file = os.path.join(output_folder, im_name)
im_name = os.path.basename(image_file)
new_im_file = os.path.join(output_folder, im_name)
scp.misc.imsave(new_im_file, new_img)
logging.info("Plotting file: {}".format(new_im_file))
eval_runtime(sess, subhypes, image_pl, eval_list, data_file)
exit(0)
def process_image(subhypes, image):
hypes = subhypes['road']
shape = image.shape
image_height = hypes['jitter']['image_height']
image_width = hypes['jitter']['image_width']
assert(image_height >= shape[0])
assert(image_width >= shape[1])
image = scp.misc.imresize(image, (image_height,
image_width, 3),
interp='cubic')
return image
def load_united_model(logdir):
subhypes = {}
subgraph = {}
submodules = {}
subqueues = {}
first_iter = True
meta_hypes = utils.load_hypes_from_logdir(logdir, subdir="",
base_path='hypes')
for model in meta_hypes['models']:
subhypes[model] = utils.load_hypes_from_logdir(logdir, subdir=model)
hypes = subhypes[model]
hypes['dirs']['output_dir'] = meta_hypes['dirs']['output_dir']
hypes['dirs']['image_dir'] = meta_hypes['dirs']['image_dir']
submodules[model] = utils.load_modules_from_logdir(logdir,
dirname=model,
postfix=model)
modules = submodules[model]
image_pl = tf.placeholder(tf.float32)
image = tf.expand_dims(image_pl, 0)
image.set_shape([1, 384, 1248, 3])
decoded_logits = {}
hypes = subhypes['segmentation']
modules = submodules['segmentation']
logits = modules['arch'].inference(hypes, image, train=False)
for model in meta_hypes['models']:
hypes = subhypes[model]
modules = submodules[model]
optimizer = modules['solver']
with tf.name_scope('Validation_%s' % model):
reuse = {True: False, False: True}[first_iter]
scope = tf.get_variable_scope()
decoded_logits[model] = modules['objective'].decoder(hypes, logits,
train=False)
first_iter = False
sess = tf.Session()
saver = tf.train.Saver()
cur_step = core.load_weights(logdir, sess, saver)
return meta_hypes, subhypes, submodules, decoded_logits, sess, image_pl
def main(_):
utils.set_gpus_to_use()
logdir = FLAGS.logdir
data_file = FLAGS.data
if logdir is None:
logging.error('Usage python predict_joint --logdir /path/to/logdir'
'--data /path/to/data/txt')
exit(1)
output_folder = os.path.join(logdir, res_folder)
if not os.path.exists(output_folder):
os.mkdir(output_folder)
logdir = logdir
utils.load_plugins()
if 'TV_DIR_DATA' in os.environ:
data_file = os.path.join(os.environ['TV_DIR_DATA'], data_file)
else:
data_file = os.path.join('DATA', data_file)
if not os.path.exists(data_file):
logging.error('Please provide a valid data_file.')
logging.error('Use --data_file')
exit(1)
if 'TV_DIR_RUNS' in os.environ:
os.environ['TV_DIR_RUNS'] = os.path.join(os.environ['TV_DIR_RUNS'],
'UnitedVision2')
logging_file = os.path.join(output_folder, "analysis.log")
utils.create_filewrite_handler(logging_file, mode='a')
load_out = load_united_model(logdir)
run_eval(load_out, output_folder, data_file)
# stopping input Threads
if __name__ == '__main__':
tf.app.run()
|
{
"content_hash": "0760f03210cc77b302a20e9647769abe",
"timestamp": "",
"source": "github",
"line_count": 336,
"max_line_length": 79,
"avg_line_length": 32.595238095238095,
"alnum_prop": 0.5962381300219138,
"repo_name": "MarvinTeichmann/MultiNet",
"id": "7134b6a2b2b17eba6f5db9ee185667dd978b4b26",
"size": "10999",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "predict_joint.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "53509"
}
],
"symlink_target": ""
}
|
from horizon.test import helpers as test
class BrowserTests(test.TestCase):
# Unit tests for browser.
def test_me(self):
self.assertTrue(1 + 1 == 2)
|
{
"content_hash": "68f814a188630f8668a4f1205a71e4e7",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 40,
"avg_line_length": 23.857142857142858,
"alnum_prop": 0.6706586826347305,
"repo_name": "PaulMcMillan/shmoocon_2014_talk",
"id": "d470715e665749085c83f23e9dc5948b2e457d05",
"size": "167",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "caravan/caravan/dashboards/tasks/browser/tests.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "107"
},
{
"name": "JavaScript",
"bytes": "56732"
},
{
"name": "Python",
"bytes": "50875"
}
],
"symlink_target": ""
}
|
"""
py3compat, based on jinja2._compat
~~~~~~~~~~~~~~
Some py2/py3 compatibility support based on a stripped down
version of six so we don't have to depend on a specific version
of it.
:copyright: Copyright 2013 by the Jinja team.
:license: BSD, see LICENSE for details.
"""
from __future__ import (unicode_literals, absolute_import,
division, print_function)
import sys
PY2 = str is bytes
PYPY = hasattr(sys, 'pypy_translation_info')
_identity = lambda x: x
# avoid flake8 F821 undefined name 'unicode'
try:
text_type = unicode # Python 2
string_types = (str, unicode)
except NameError:
text_type = str # Python 3
string_types = (str, )
# avoid flake8 F821 undefined name 'xrange'
try:
range_type = xrange # Python 2
except NameError:
range_type = range # Python 3
if not PY2:
unichr = chr
iterkeys = lambda d: iter(d.keys())
itervalues = lambda d: iter(d.values())
iteritems = lambda d: iter(d.items())
import pickle
from io import BytesIO, StringIO
NativeStringIO = StringIO
def reraise(tp, value, tb=None):
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
ifilter = filter
imap = map
izip = zip
intern = sys.intern
implements_iterator = _identity
implements_to_string = _identity
get_next = lambda x: x.__next__
else:
unichr = unichr
iterkeys = lambda d: d.iterkeys()
itervalues = lambda d: d.itervalues()
iteritems = lambda d: d.iteritems()
import cPickle as pickle # noqa
from cStringIO import StringIO as BytesIO, StringIO # noqa
NativeStringIO = BytesIO
exec('def reraise(tp, value, tb=None):\n raise tp, value, tb')
from itertools import imap, izip, ifilter
intern = intern
def implements_iterator(cls):
cls.next = cls.__next__
del cls.__next__
return cls
def implements_to_string(cls):
cls.__unicode__ = cls.__str__
cls.__str__ = lambda x: x.__unicode__().encode('utf-8')
return cls
get_next = lambda x: x.next
def encode_filename(filename):
# avoid flake8 F821 undefined name 'unicode'
try: # Python 2
if isinstance(filename, unicode):
return filename.encode('utf-8')
except NameError: # Python 3
pass
return filename
def with_metaclass(meta, *bases):
# This requires a bit of explanation: the basic idea is to make a
# dummy metaclass for one level of class instanciation that replaces
# itself with the actual metaclass. Because of internal type checks
# we also need to make sure that we downgrade the custom metaclass
# for one level to something closer to type (that's why __call__ and
# __init__ comes back from type etc.).
#
# This has the advantage over six.with_metaclass in that it does not
# introduce dummy classes into the final MRO.
class metaclass(meta):
__call__ = type.__call__
__init__ = type.__init__
def __new__(cls, name, this_bases, d):
if this_bases is None:
return type.__new__(cls, name, (), d)
return meta(name, bases, d)
return metaclass('temporary_class', None, {})
try:
from urllib.parse import quote_from_bytes as url_quote
except ImportError:
from urllib import quote as url_quote
|
{
"content_hash": "eed9379e2fe777a79534537b23421f8d",
"timestamp": "",
"source": "github",
"line_count": 120,
"max_line_length": 72,
"avg_line_length": 28.55,
"alnum_prop": 0.6293053123175715,
"repo_name": "yeleman/py3compat",
"id": "ed6a4012df48c7fea7ddc6ae1b60a8dbae94ed2b",
"size": "3505",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "py3compat.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "4610"
}
],
"symlink_target": ""
}
|
from weight.weight_vector import WeightVector
from data.data_pool import DataPool
import os
from learner import logger
__version__ = '1.0.0'
class PerceptronLearnerBase:
name = "PerceptronLearnerBase"
def __init__(self, w_vector=None):
self.w_vector = {}
if w_vector is None:
return
if not isinstance(w_vector, WeightVector):
raise ValueError(
"LEARNER [ERROR]: w_vector is not an instance of WeightVector")
for key in w_vector.keys():
self.w_vector[key] = w_vector[key]
return
def sequential_learn(self,
f_argmax,
data_pool,
iterations=1,
d_filename=None,
dump_freq=1):
# check values
if not isinstance(data_pool, DataPool):
raise ValueError("LEARNER [ERROR]: data_pool not of DataPool type")
if not isinstance(iterations, int):
raise ValueError("LEARNER [ERROR]: iterations not of int type")
if iterations < 1:
raise ValueError("LEARNER [ERROR]: iterations needs to be positive integer")
if d_filename is not None and not isinstance(d_filename, str):
raise ValueError("LEARNER [ERROR]: d_filename needs to be str or None")
if not isinstance(dump_freq, int):
raise ValueError("LEARNER [ERROR]: dump_freq needs to be int")
logger.info("Starting sequential train")
logger.info("Using Learner: " + self.name)
self.w_vector = {}
# for t = 1 ... T
for t in range(iterations):
logger.info("Starting Iteration %d" % t)
logger.info("Initial Number of Keys: %d" % len(self.w_vector.keys()))
vector_list = self._iteration_learn(data_pool=data_pool,
init_w_vector=self.w_vector,
f_argmax=f_argmax,
log=True,
info="Iteration %d, " % t)
self.w_vector = self._iteration_proc(vector_list)
logger.info("Iteration complete, total number of keys: %d" % len(self.w_vector.keys()))
if d_filename is not None:
if t % dump_freq == 0 or t == iterations - 1:
tmp = self.export()
tmp.dump(d_filename + "_Iter_%d.db" % (t + 1))
return self.export()
def parallel_learn(self,
f_argmax,
data_pool,
iterations=1,
d_filename=None,
dump_freq=1,
sparkContext=None,
hadoop=False):
def create_dp(textString, fgen, data_format):
dp = DataPool(fgen = fgen,
data_format = data_format,
textString = textString[1])
return dp
def get_sent_num(dp):
return dp.get_sent_num()
# check values
from pyspark import SparkContext
sc = sparkContext
if sparkContext is None:
raise ValueError("LEARNER [ERROR]: sparkContext not specified")
if not isinstance(sc, SparkContext):
raise ValueError("LEARNER [ERROR]: sparkContext not of pyspark.context.SparkContext type")
if not isinstance(data_pool, DataPool):
raise ValueError("LEARNER [ERROR]: data_pool not of DataPool type")
if not isinstance(iterations, int):
raise ValueError("LEARNER [ERROR]: iterations not of int type")
if iterations < 1:
raise ValueError("LEARNER [ERROR]: iterations needs to be positive integer")
if d_filename is not None and not isinstance(d_filename, str):
raise ValueError("LEARNER [ERROR]: d_filename needs to be str or None")
if not isinstance(dump_freq, int):
raise ValueError("LEARNER [ERROR]: dump_freq needs to be int")
logger.info("Starting parallel train")
logger.info("Using Learner: " + self.name)
dir_name = data_pool.loadedPath()
data_format = data_pool.data_format
fgen = data_pool.fgen
# By default, when the hdfs is configured for spark, even in local mode it will
# still try to load from hdfs. The following code is to resolve this confusion.
if hadoop is True:
train_files = sc.wholeTextFiles(dir_name, minPartitions=10).cache()
else:
dir_name = os.path.abspath(os.path.expanduser(dir_name))
train_files = sc.wholeTextFiles("file://" + dir_name, minPartitions=10).cache()
dp = train_files.map(lambda t: create_dp(textString = t,
fgen = fgen,
data_format = data_format)).cache()
self.w_vector = {}
tmp = dp.map(get_sent_num).sum()
logger.info("Totel number of sentences: %d" % tmp)
for t in range(iterations):
logger.info("Starting Iteration %d" % t)
logger.info("Initial Number of Keys: %d" % len(self.w_vector.keys()))
w_vector_list = dp.flatMap(
lambda t: self._iteration_learn(data_pool=t,
init_w_vector=self.w_vector,
f_argmax=f_argmax))
w_vector_list = w_vector_list.combineByKey(
lambda value: value,
lambda x, value: tuple(map(sum, zip(x, value))),
lambda x, y: tuple(map(sum, zip(x, y)))).collect()
self.w_vector = self._iteration_proc(w_vector_list)
logger.info("Iteration complete, total number of keys: %d" % len(self.w_vector.keys()))
if d_filename is not None:
if t % dump_freq == 0 or t == iterations - 1:
tmp = self.export()
tmp.dump(d_filename + "_Iter_%d.db" % (t + 1), sparkContext)
return self.export()
|
{
"content_hash": "fc28d95aa8c9c85e04429f6c6adfb081",
"timestamp": "",
"source": "github",
"line_count": 151,
"max_line_length": 102,
"avg_line_length": 41.17880794701987,
"alnum_prop": 0.5276616275329687,
"repo_name": "sfu-natlang/glm-parser",
"id": "d88673293fcf20f2264c554e4844bdd4256ae75f",
"size": "6218",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/learner/perceptron_base.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "32989"
},
{
"name": "Perl",
"bytes": "12897"
},
{
"name": "Python",
"bytes": "2217716"
},
{
"name": "Shell",
"bytes": "26389"
}
],
"symlink_target": ""
}
|
import os
import util
import glob
import re
import shutil
import tarfile
import types
import actions
import source
def make_conf_source(**keywds):
ex_vars = {}
src = None
make_conf = {}
if 'ex_vars' in keywds:
ex_vars = keywds['ex_vars']
if 'source' in keywds:
src = keywds['source']
if src:
make_conf = util.make_conf_dict(src)
for k in ex_vars:
if not k in make_conf.keys():
make_conf[k] = ex_vars[k]
else:
for v in ex_vars[k].split(' '):
if not v in make_conf[k]:
make_conf[k] += ' ' + v
ret = ""
for k, v in ex_vars:
ret += '%s="%s"\n' % (k, v)
return ret
class BaseStage(actions.InhibitorAction):
"""
Basic stage building action. Handles fetching sources and setting up the chroot
to be able to merge packages. Also cleans everything up afterwards.
@param stage_conf - Stage configuration, see below.
@param build_name - Unique string to identify the stage.
@param stage_name - Type of stage being built. Default is base_stage.
Stage Configuration:
@param name -
@param seed - Name of the seed stage to use for building. Stage
needs to be located in inhibitor's stagedir.
@param fs_sources - List of InhibitorSources to be added to the stage. The
sources are added during install_sources(). If
source.keep is set, the source will persist in the file
stage image, otherwise it is removed during
remove_sources().
"""
def __init__(self, stage_conf, build_name, stage_name='base_stage', **keywds):
self.build_name = '%s-%s' % (stage_name, build_name)
self.conf = stage_conf
self.sources = []
self.istate = None
self.target_root = None
self.tarpath = None
self.seed = None
self.fs_sources = None
self.aux_mounts = {}
self.aux_sources = {}
self.root = util.Path('/')
self.env = {
'INHIBITOR_SCRIPT_ROOT': '/tmp/inhibitor/sh',
'ROOT': self.root,
}
if self.conf.has('seed'):
self.seed = self.conf.seed
else:
raise util.InhibitorError('No seed stage specified')
super(BaseStage, self).__init__(self.build_name, **keywds)
def update_root(self, new_root):
self.root = new_root
self.env['ROOT'] = new_root
def chroot_failure(self):
util.umount_all(self.istate.mount_points)
def post_conf_begin(self, inhibitor_state):
super(BaseStage, self).post_conf(inhibitor_state)
self.target_root = self.istate.paths.build.pjoin(self.build_name)
self.tarpath = self.istate.paths.stages.pjoin(self.build_name + '.tar.bz2')
util.mkdir(self.target_root)
if self.seed:
self.seed = self.istate.paths.stages.pjoin(self.seed)
self.aux_mounts = {
'proc': util.Mount('/proc', '/proc', self.target_root),
'sys': util.Mount('/sys', '/sys', self.target_root),
'dev': util.Mount('/dev', '/dev', self.target_root),
'devpts': util.Mount('/dev/pts', '/dev/pts', self.target_root),
}
self.aux_sources = {
'resolv.conf': source.create_source(
'file://etc/resolv.conf', keep = True, dest = '/etc/resolv.conf'),
'hosts': source.create_source(
'file://etc/hosts', keep = True, dest = '/etc/hosts'),
}
for i in glob.iglob( self.istate.paths.share.pjoin('*.sh') ):
j = source.create_source(
"file://%s" % i,
keep = False,
dest = self.env['INHIBITOR_SCRIPT_ROOT'] + '/' + os.path.basename(i)
)
self.sources.append(j)
if self.conf.has('fs_sources'):
if not type(self.conf.fs_sources) in (types.ListType, types.TupleType):
self.conf.fs_sources = [self.conf.fs_sources]
for src in self.conf.fs_sources:
if not src.dest:
util.warn("Setting dest for %s to '/'" % (src.src,))
src.dest = util.Path('/')
if src.mountable and src.dest == util.Path('/'):
util.warn("Setting mountable=False on %s" % (src.src,))
src.mountable = False
self.sources.append(src)
def post_conf_finish(self):
for src in self.sources:
src.post_conf( self.istate )
src.init()
for _, src in self.aux_sources.items():
src.post_conf( self.istate )
src.init()
def post_conf(self, inhibitor_state, run_finish=True):
self.post_conf_begin(inhibitor_state)
if run_finish:
self.post_conf_finish()
def unpack_seed(self):
if not os.path.isdir(self.seed):
if os.path.exists(self.seed):
os.unlink(self.seed)
seedfile = self.seed + '.tar.bz2'
util.info("Unpacking %s" % seedfile)
os.makedirs(self.seed)
try:
util.cmd('tar -xjpf %s -C %s/' % (seedfile, self.seed))
except:
shutil.rmtree(self.seed)
raise
util.info("Syncing %s to %s" % (self.seed.dname(), self.target_root.dname()) )
util.cmd('rsync -a --delete %s %s' %
(self.seed.dname(), self.target_root.dname()) )
def install_sources(self):
for src in self.sources:
src.install( root = self.target_root )
for m in ('proc', 'sys', 'dev', 'devpts'):
util.mount( self.aux_mounts[m], self.istate.mount_points )
for m in ('resolv.conf', 'hosts'):
self.aux_sources[m].install( root = self.target_root )
def remove_sources(self):
for src in [x for x in self.sources if (x.keep and not x.mountable)]:
# Previous actions may have overwritten the source, so
# it needs to be reinstalled one last time.
src.install( root = self.target_root )
for src in self.sources:
src.remove()
for m in ('proc', 'sys', 'devpts', 'dev'):
util.umount( self.aux_mounts[m], self.istate.mount_points )
for m in ('resolv.conf', 'hosts'):
self.aux_sources[m].remove()
def finish_sources(self):
for src in self.sources:
src.finish()
for _, src in self.aux_sources.items():
if not src in self.sources:
src.finish()
def clean_tmp(self):
shutil.rmtree(self.target_root.pjoin('/tmp/inhibitor'))
def get_action_sequence(self):
return [
util.Step(self.install_sources, always=True),
util.Step(self.remove_sources, always=True),
util.Step(self.finish_sources, always=True),
util.Step(self.clean_tmp, always=True),
]
def pack(self):
archive = tarfile.open(self.tarpath, 'w:bz2')
archive.add(self.target_root,
arcname = '/',
recursive = True,
)
archive.close()
util.info("Created %s" % (self.tarpath,))
class BaseGentooStage(BaseStage):
"""
Basic stage building action. Handles fetching sources and setting up the chroot
to be able to merge packages. Also cleans everything up afterwards.
@param stage_conf - Stage configuration, see below.
@param build_name - Unique string to identify the stage.
@param stage_name - Type of stage being built. Default is base_stage.
Stage Configuration:
@param name -
@param snapshot - InhibitorSource representing the portage tree.
@param overlays - List of InhibitorSources' to use as portage overlays.
@param kernel - Container for kernel configuration.
kernel_pkg - String passed to emerge to get kernel package.
kconfig - InhibitorSource that contains the kernel config.
genkernel - Arguments to pass to genkernel when building the
initramfs.
packages - List of packages that should be installed after the
kernel has been configured.
@param profile - Portage profile to use.
@param seed - Name of the seed stage to use for building. Stage
needs to be located in inhibitor's stagedir.
@param make_conf - InhibitorSource for make.conf.
@param portage_conf - InhibitorSource with the contents for /etc/portage.
"""
def __init__(self, stage_conf, build_name, stage_name='base_stage', **keywds):
super(BaseGentooStage, self).__init__(stage_conf, build_name, stage_name=stage_name, **keywds)
self.profile = None
self.kernel = None
self.pkgcache = None
self.portage_cr = util.Path('/tmp/inhibitor/portage_configroot')
self.env.update({
'PKGDIR': '/tmp/inhibitor/pkgs',
'DISTDIR': '/tmp/inhibitor/dist',
'PORTAGE_CONFIGROOT': self.portage_cr,
'PORTDIR': '/tmp/inhibitor/portdir'
})
if self.conf.has('overlays'):
self.env['PORTDIR_OVERLAY'] = ''
if self.conf.has('pkgcache'):
self.pkgcache = self.conf.pkgcache
def update_portage_cr(self, new_portage_cr):
self.portage_cr = new_portage_cr
self.env['PORTAGE_CONFIGROOT'] = new_portage_cr
def post_conf(self, inhibitor_state):
super(BaseGentooStage, self).post_conf(inhibitor_state, run_finish=False)
if not self.pkgcache:
self.pkgcache = source.create_source(
"file://%s" % util.mkdir(self.istate.paths.pkgs.pjoin(self.build_name)) )
self.pkgcache.keep = False
self.pkgcache.dest = self.env['PKGDIR']
self.sources.append(self.pkgcache)
distcache = source.create_source(
"file://%s" % util.mkdir(self.istate.paths.dist),
keep = False,
dest = self.env['DISTDIR']
)
self.sources.append(distcache)
if self.conf.has('kernel'):
self.kernel = self.conf.kernel
if not self.kernel.has('kconfig'):
raise util.InhibitorError('No kernel config (kconfig) specified.')
else:
self.kernel.kconfig.keep = False
self.kernel.kconfig.dest = util.Path('/tmp/inhibitor/kconfig')
self.kernel.kconfig.post_conf(inhibitor_state)
self.sources.append(self.kernel.kconfig)
if not self.kernel.has('kernel_pkg'):
raise util.InhibitorError('No kernel package (kernel_pkg) specified.')
kerncache = source.create_source(
"file://%s" % util.mkdir(inhibitor_state.paths.kernel.pjoin(self.build_name)),
keep = False,
dest = '/tmp/inhibitor/kerncache'
)
self.sources.append(kerncache)
if self.conf.has('snapshot'):
self.conf.snapshot.keep = False
self.conf.snapshot.dest = util.Path( self.env['PORTDIR'] )
self.sources.append(self.conf.snapshot)
else:
_, portdir = util.cmd_out('portageq portdir', raise_exception=True)
self.sources.append(
source.create_source( 'file://' + portdir,
keep = False,
dest = util.Path( self.env['PORTDIR'] ))
)
if self.conf.has('overlays'):
i = 0
for overlay in self.conf.overlays:
overlay.keep = False
overlay.dest = util.Path('/tmp/inhibitor/overlays/%d' % i)
self.sources.append(overlay)
self.env['PORTDIR_OVERLAY'] += ' /tmp/inhibitor/overlays/%d' % i
i += 1
if self.conf.has('profile'):
self.profile = self.conf.profile
else:
self.profile = os.readlink('/etc/make.profile')
self.profile = re.sub('.*/profiles/', '', self.profile)
if self.conf.has('make_conf'):
mc = self.conf.make_conf
else:
mc = source.create_source(make_conf_source, source='/etc/make.conf')
mc.dest = self.portage_cr.pjoin('etc/make.conf')
mc.keep = True
self.sources.append(mc)
if self.conf.has('portage_conf'):
self.conf.portage_conf.dest = self.portage_cr.pjoin('etc/portage')
self.conf.portage_conf.keep = True
self.sources.append(self.conf.portage_conf)
self.post_conf_finish()
def make_profile_link(self):
# XXX: We also need to make the root profile link, Gentoo Bug 324179.
for d in (self.target_root, self.target_root.pjoin(self.portage_cr)):
targ = d.pjoin('/etc/make.profile')
util.mkdir( os.path.dirname(targ) )
if os.path.lexists(targ):
os.unlink(targ)
os.symlink(self.env['PORTDIR'] + '/profiles/%s' % self.profile, targ)
def restore_profile_link(self):
# XXX: See make_profile_link.
targ = self.target_root.pjoin('/etc/make.profile')
if os.path.lexists(targ):
os.unlink(targ)
os.symlink('../usr/portage/profiles/%s' % self.profile, targ)
def get_action_sequence(self):
return [
util.Step(self.install_sources, always=True),
util.Step(self.make_profile_link, always=True),
util.Step(self.remove_sources, always=True),
util.Step(self.finish_sources, always=True),
util.Step(self.restore_profile_link,always=True),
util.Step(self.clean_tmp, always=True),
]
class Stage4(BaseGentooStage):
"""
Stage 4 building action. Handles fetching sources, setting up the chroot,
merging packages, configuring a kernel and running any specified scripts
inside the completed build.
@param stage_conf - Stage configuration, see below.
@param build_name - Unique string to identify the stage.
@param stage_name - Type of stage being built. Default is base_stage.
Stage Configuration:
@param name -
@param snapshot - InhibitorSource representing the portage tree.
@param overlays - List of InhibitorSources' to use as portage overlays.
@param kernel - Container for kernel configuration.
kernel_pkg - String passed to emerge to get kernel package.
kconfig - InhibitorSource that contains the kernel config.
genkernel - Arguments to pass to genkernel when building the
initramfs.
packages - List of packages that should be installed after the
kernel has been configured.
@param profile - Portage profile to use.
@param seed - Name of the seed stage to use for building. Stage
needs to be located in inhibitor's stagedir.
@param make_conf - InhibitorSource for make.conf.
@param portage_conf - InhibitorSource with the contents for /etc/portage.
@param scripts - List of Scripts to run after merging all packages and
building the kernel. They run in order inside of the
completed chroot from '/tmp/inhibitor/sh/'
@param packages - List or String of packages to merge.
"""
def __init__(self, stage_conf, build_name, **keywds):
self.package_list = []
self.scripts = []
super(Stage4, self).__init__(stage_conf, build_name, 'stage4', **keywds)
self.emerge_cmd = '%s/inhibitor-run.sh run_emerge ' % (self.env['INHIBITOR_SCRIPT_ROOT'],)
def post_conf(self, inhibitor_state):
super(Stage4, self).post_conf(inhibitor_state)
if self.conf.has('scripts'):
self.scripts = self.conf.scripts
for script in self.conf.scripts:
script.post_conf(inhibitor_state)
if self.conf.has('packages'):
self.package_list = util.strlist_to_list(self.conf.packages)
else:
raise util.InhibitorError('No packages specified')
def _emerge(self, packages, flags=''):
util.chroot(
path = self.target_root,
function = util.cmd,
fargs = {
'cmdline': '%s %s %s' % (
self.emerge_cmd,
flags,
packages
),
'env': self.env
},
failuref = self.chroot_failure
)
def get_action_sequence(self):
ret = []
ret.append( util.Step(self.unpack_seed, always=False) )
ret.append( util.Step(self.install_sources, always=True) )
ret.append( util.Step(self.make_profile_link, always=False) )
ret.append( util.Step(self.merge_portage, always=False) )
ret.append( util.Step(self.setup_extras, always=False) )
ret.append( util.Step(self.merge_system, always=False) )
ret.append( util.Step(self.merge_packages, always=False) )
if self.kernel:
ret.append( util.Step(self.merge_kernel, always=False) )
ret.append( util.Step(self.run_scripts, always=False) )
ret.append( util.Step(self.remove_sources, always=True) )
ret.append( util.Step(self.finish_sources, always=True) )
ret.append( util.Step(self.install_portage_conf, always=False) )
ret.append( util.Step(self.restore_profile_link, always=True) )
ret.append( util.Step(self.clean_tmp, always=True) )
ret.append( util.Step(self.pack, always=False) )
return ret
def merge_portage(self):
self._emerge('sys-apps/portage', flags='--oneshot --newuse')
def setup_extras(self):
util.chroot(
path = self.target_root,
function = util.cmd,
fargs = {
'cmdline': '%s/inhibitor-run.sh setup_extras' % ( self.env['INHIBITOR_SCRIPT_ROOT'], ),
'env': self.env,
},
failuref = self.chroot_failure,
)
def merge_system(self):
self._emerge('system', flags='--deep --newuse --update')
def merge_packages(self):
package_str = ' '.join(self.package_list)
package_str = package_str.replace('\n', ' ')
self._emerge(package_str, flags='--deep --newuse --update')
def merge_kernel(self):
args = ['--build_name', self.build_name,
'--kernel_pkg', '\'%s\'' % (self.kernel.kernel_pkg,)]
if self.kernel.has('genkernel'):
args.extend(['--genkernel', self.kernel.genkernel])
if self.kernel.has('packages'):
args.extend(['--packages', self.kernel.packages])
util.chroot(
path = self.target_root,
function = util.cmd,
fargs = {
'cmdline': '%s/kernel.sh %s' % ( self.env['INHIBITOR_SCRIPT_ROOT'], ' '.join(args),),
'env': self.env
},
failuref = self.chroot_failure,
)
def run_scripts(self):
for script in self.scripts:
script.install( root = self.target_root )
util.chroot(
path = self.target_root,
function = util.cmd,
fargs = {'cmdline': script.cmdline(), 'env':self.env},
failuref = self.chroot_failure
)
def remove_sources(self):
super(Stage4, self).remove_sources()
for script in self.scripts:
script.remove()
def finish_sources(self):
super(Stage4, self).finish_sources()
for script in self.scripts:
script.finish()
def install_portage_conf(self):
portage_cr = self.target_root.pjoin( self.env['PORTAGE_CONFIGROOT'] + '/etc/' )
dest = self.target_root.pjoin('/etc/')
util.path_sync(portage_cr, dest)
|
{
"content_hash": "064610e85decd4aa737d8b3f830a2625",
"timestamp": "",
"source": "github",
"line_count": 523,
"max_line_length": 103,
"avg_line_length": 39.879541108986615,
"alnum_prop": 0.5464831950903773,
"repo_name": "jsbronder/inhibitor",
"id": "48c9afc0aa805287fda56e11b6a17ead7e8530cb",
"size": "20857",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "inhibitor/stage.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Elixir",
"bytes": "78"
},
{
"name": "Python",
"bytes": "80837"
},
{
"name": "Shell",
"bytes": "15675"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
import pytz
from nose.tools import * # flake8: noqa
from api.base.settings.defaults import API_BASE
from api_tests import utils as api_utils
from framework.auth.core import Auth
from tests.base import ApiTestCase, capture_signals
from tests.factories import (
ProjectFactory,
UserFactory,
AuthUserFactory,
CommentFactory
)
from website.addons.osfstorage import settings as osfstorage_settings
from website.project.signals import contributor_removed
from website.project.model import NodeLog
# stolen from^W^Winspired by DRF rest_framework.fields.DateTimeField.to_representation
def _dt_to_iso8601(value):
iso8601 = value.isoformat()
if iso8601.endswith('+00:00'):
iso8601 = iso8601[:-9] + 'Z' # offset upped to 9 to get rid of 3 ms decimal points
return iso8601
class TestFileView(ApiTestCase):
def setUp(self):
super(TestFileView, self).setUp()
self.user = AuthUserFactory()
self.node = ProjectFactory(creator=self.user, comment_level='public')
self.file = api_utils.create_test_file(self.node, self.user, create_guid=False)
self.file_url = '/{}files/{}/'.format(API_BASE, self.file._id)
def test_must_have_auth(self):
res = self.app.get(self.file_url, expect_errors=True)
assert_equal(res.status_code, 401)
def test_must_be_contributor(self):
user = AuthUserFactory()
res = self.app.get(self.file_url, auth=user.auth, expect_errors=True)
assert_equal(res.status_code, 403)
def test_unvisited_file_has_no_guid(self):
res = self.app.get(self.file_url, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(res.json['data']['attributes']['guid'], None)
def test_visited_file_has_guid(self):
guid = self.file.get_guid(create=True)
res = self.app.get(self.file_url, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_is_not_none(guid)
assert_equal(res.json['data']['attributes']['guid'], guid._id)
def test_get_file(self):
res = self.app.get(self.file_url, auth=self.user.auth)
self.file.versions[-1]._clear_caches()
self.file.versions[-1].reload()
assert_equal(res.status_code, 200)
assert_equal(res.json.keys(), ['data'])
attributes = res.json['data']['attributes']
assert_equal(attributes['path'], self.file.path)
assert_equal(attributes['kind'], self.file.kind)
assert_equal(attributes['name'], self.file.name)
assert_equal(attributes['materialized_path'], self.file.materialized_path)
assert_equal(attributes['last_touched'], None)
assert_equal(attributes['provider'], self.file.provider)
assert_equal(attributes['size'], self.file.versions[-1].size)
assert_equal(attributes['date_modified'], _dt_to_iso8601(self.file.versions[-1].date_created.replace(tzinfo=pytz.utc)))
assert_equal(attributes['date_created'], _dt_to_iso8601(self.file.versions[0].date_created.replace(tzinfo=pytz.utc)))
assert_equal(attributes['extra']['hashes']['md5'], None)
assert_equal(attributes['extra']['hashes']['sha256'], None)
assert_equal(attributes['tags'], [])
def test_file_has_rel_link_to_owning_project(self):
res = self.app.get(self.file_url, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_in('node', res.json['data']['relationships'].keys())
expected_url = self.node.api_v2_url
actual_url = res.json['data']['relationships']['node']['links']['related']['href']
assert_in(expected_url, actual_url)
def test_file_has_comments_link(self):
guid = self.file.get_guid(create=True)
res = self.app.get(self.file_url, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_in('comments', res.json['data']['relationships'].keys())
expected_url = '/{}nodes/{}/comments/?filter[target]={}'.format(API_BASE, self.node._id, guid._id)
url = res.json['data']['relationships']['comments']['links']['related']['href']
assert_in(expected_url, url)
def test_file_has_correct_unread_comments_count(self):
contributor = AuthUserFactory()
self.node.add_contributor(contributor, auth=Auth(self.user), save=True)
comment = CommentFactory(node=self.node, target=self.file.get_guid(create=True), user=contributor, page='files')
res = self.app.get('/{}files/{}/?related_counts=True'.format(API_BASE, self.file._id), auth=self.user.auth)
assert_equal(res.status_code, 200)
unread_comments = res.json['data']['relationships']['comments']['links']['related']['meta']['unread']
assert_equal(unread_comments, 1)
def test_only_project_contrib_can_comment_on_closed_project(self):
self.node.comment_level = 'private'
self.node.is_public = True
self.node.save()
res = self.app.get(self.file_url, auth=self.user.auth)
can_comment = res.json['data']['attributes']['current_user_can_comment']
assert_equal(res.status_code, 200)
assert_equal(can_comment, True)
non_contributor = AuthUserFactory()
res = self.app.get(self.file_url, auth=non_contributor.auth)
can_comment = res.json['data']['attributes']['current_user_can_comment']
assert_equal(res.status_code, 200)
assert_equal(can_comment, False)
def test_any_loggedin_user_can_comment_on_open_project(self):
self.node.is_public = True
self.node.save()
non_contributor = AuthUserFactory()
res = self.app.get(self.file_url, auth=non_contributor.auth)
can_comment = res.json['data']['attributes']['current_user_can_comment']
assert_equal(res.status_code, 200)
assert_equal(can_comment, True)
def test_non_logged_in_user_cant_comment(self):
self.node.is_public = True
self.node.save()
res = self.app.get(self.file_url)
can_comment = res.json['data']['attributes']['current_user_can_comment']
assert_equal(res.status_code, 200)
assert_equal(can_comment, False)
def test_checkout(self):
assert_equal(self.file.checkout, None)
res = self.app.put_json_api(
self.file_url,
{'data': {'id': self.file._id, 'type': 'files', 'attributes': {'checkout': self.user._id}}},
auth=self.user.auth
)
self.file.reload()
self.file.save()
self.node.reload()
assert_equal(res.status_code, 200)
assert_equal(self.file.checkout, self.user)
res = self.app.get(
self.file_url,
auth=self.user.auth
)
assert_equal(len(self.node.logs),2)
assert_equal(self.node.logs[-1].action, NodeLog.CHECKED_OUT)
assert_equal(self.node.logs[-1].user, self.user)
assert_equal(
self.user._id,
res.json['data']['relationships']['checkout']['links']['related']['meta']['id']
)
assert_in(
'/{}users/{}/'.format(API_BASE, self.user._id),
res.json['data']['relationships']['checkout']['links']['related']['href']
)
res = self.app.put_json_api(
self.file_url,
{'data': {'id': self.file._id, 'type': 'files', 'attributes': {'checkout': None}}},
auth=self.user.auth
)
self.file.reload()
assert_equal(self.file.checkout, None)
assert_equal(res.status_code, 200)
def test_checkout_file_no_type(self):
res = self.app.put_json_api(
self.file_url,
{'data': {'id': self.file._id, 'attributes': {'checkout': self.user._id}}},
auth=self.user.auth, expect_errors=True
)
assert_equal(res.status_code, 400)
def test_checkout_file_no_id(self):
res = self.app.put_json_api(
self.file_url,
{'data': {'type': 'files', 'attributes': {'checkout': self.user._id}}},
auth=self.user.auth, expect_errors=True
)
assert_equal(res.status_code, 400)
def test_checkout_file_incorrect_type(self):
res = self.app.put_json_api(
self.file_url,
{'data': {'id': self.file._id, 'type': 'Wrong type.', 'attributes': {'checkout': self.user._id}}},
auth=self.user.auth, expect_errors=True
)
assert_equal(res.status_code, 409)
def test_checkout_file_incorrect_id(self):
res = self.app.put_json_api(
self.file_url,
{'data': {'id': '12345', 'type': 'files', 'attributes': {'checkout': self.user._id}}},
auth=self.user.auth, expect_errors=True
)
assert_equal(res.status_code, 409)
def test_checkout_file_no_attributes(self):
res = self.app.put_json_api(
self.file_url,
{'data': {'id': self.file._id, 'type': 'files'}},
auth=self.user.auth, expect_errors=True
)
assert_equal(res.status_code, 400)
def test_must_set_self(self):
user = UserFactory()
assert_equal(self.file.checkout, None)
res = self.app.put_json_api(
self.file_url,
{'data': {'id': self.file._id, 'type': 'files', 'attributes': {'checkout': user._id}}},
auth=self.user.auth,
expect_errors=True,
)
self.file.reload()
assert_equal(res.status_code, 400)
assert_equal(self.file.checkout, None)
def test_must_be_self(self):
user = AuthUserFactory()
self.file.checkout = self.user
self.file.save()
res = self.app.put_json_api(
self.file_url,
{'data': {'id': self.file._id, 'type': 'files', 'attributes': {'checkout': user._id}}},
auth=user.auth,
expect_errors=True,
)
self.file.reload()
assert_equal(res.status_code, 403)
assert_equal(self.file.checkout, self.user)
def test_admin_can_checkin(self):
user = UserFactory()
self.node.add_contributor(user)
self.file.checkout = user
self.file.save()
res = self.app.put_json_api(
self.file_url,
{'data': {'id': self.file._id, 'type': 'files', 'attributes': {'checkout': None}}},
auth=self.user.auth,
expect_errors=True,
)
self.file.reload()
self.node.reload()
assert_equal(res.status_code, 200)
assert_equal(self.file.checkout, None)
assert_equal(self.node.logs[-1].action, NodeLog.CHECKED_IN)
assert_equal(self.node.logs[-1].user, self.user)
def test_admin_can_checkout(self):
res = self.app.put_json_api(
self.file_url,
{'data': {'id': self.file._id, 'type': 'files', 'attributes': {'checkout': self.user._id}}},
auth=self.user.auth,
expect_errors=True,
)
self.file.reload()
self.node.reload()
assert_equal(res.status_code, 200)
assert_equal(self.file.checkout, self.user)
assert_equal(self.node.logs[-1].action, NodeLog.CHECKED_OUT)
assert_equal(self.node.logs[-1].user, self.user)
def test_cannot_checkin_when_already_checked_in(self):
count = len(self.node.logs)
assert_false(self.file.is_checked_out)
res = self.app.put_json_api(
self.file_url,
{'data': {'id': self.file._id, 'type': 'files', 'attributes': {'checkout': None}}},
auth=self.user.auth,
expect_errors=True,
)
self.file.reload()
self.node.reload()
assert_equal(res.status_code, 200)
assert_equal(len(self.node.logs), count)
assert_equal(self.file.checkout, None)
def test_cannot_checkout_when_checked_out(self):
user = UserFactory()
self.node.add_contributor(user)
self.file.checkout = user
self.file.save()
count = len(self.node.logs)
res = self.app.put_json_api(
self.file_url,
{'data': {'id': self.file._id, 'type': 'files', 'attributes': {'checkout': self.user._id}}},
auth=self.user.auth,
expect_errors=True,
)
self.file.reload()
self.node.reload()
assert_equal(res.status_code, 200)
assert_equal(self.file.checkout, user)
assert_equal(len(self.node.logs), count)
def test_noncontrib_cannot_checkout(self):
user = AuthUserFactory()
assert_equal(self.file.checkout, None)
assert user._id not in self.node.permissions.keys()
res = self.app.put_json_api(
self.file_url,
{'data': {'id': self.file._id, 'type': 'files', 'attributes': {'checkout': self.user._id}}},
auth=user.auth,
expect_errors=True,
)
self.file.reload()
self.node.reload()
assert_equal(res.status_code, 403)
assert_equal(self.file.checkout, None)
assert self.node.logs[-1].action != NodeLog.CHECKED_OUT
def test_read_contrib_cannot_checkout(self):
user = AuthUserFactory()
self.node.add_contributor(user, permissions=['read'])
self.node.save()
assert_false(self.node.can_edit(user=user))
res = self.app.put_json_api(
self.file_url,
{'data': {'id': self.file._id, 'type': 'files', 'attributes': {'checkout': None}}},
auth=user.auth,
expect_errors=True
)
self.file.reload()
assert_equal(res.status_code, 403)
assert_equal(self.file.checkout, None)
assert self.node.logs[-1].action != NodeLog.CHECKED_OUT
def test_user_can_checkin(self):
user = AuthUserFactory()
self.node.add_contributor(user, permissions=['read', 'write'])
self.node.save()
assert_true(self.node.can_edit(user=user))
self.file.checkout = user
self.file.save()
res = self.app.put_json_api(
self.file_url,
{'data': {'id': self.file._id, 'type': 'files', 'attributes': {'checkout': None}}},
auth=user.auth,
)
self.file.reload()
assert_equal(res.status_code, 200)
assert_equal(self.file.checkout, None)
def test_removed_contrib_files_checked_in(self):
user = AuthUserFactory()
self.node.add_contributor(user, permissions=['read', 'write'])
self.node.save()
assert_true(self.node.can_edit(user=user))
self.file.checkout = user
self.file.save()
assert_true(self.file.is_checked_out)
with capture_signals() as mock_signals:
self.node.remove_contributor(user, auth=Auth(user))
assert_equal(mock_signals.signals_sent(), set([contributor_removed]))
self.file.reload()
assert_false(self.file.is_checked_out)
def test_must_be_osfstorage(self):
self.file.provider = 'github'
self.file.save()
res = self.app.put_json_api(
self.file_url,
{'data': {'id': self.file._id, 'type': 'files', 'attributes': {'checkout': self.user._id}}},
auth=self.user.auth,
expect_errors=True,
)
assert_equal(res.status_code, 403)
def test_get_file_resolves_guids(self):
guid = self.file.get_guid(create=True)
url = '/{}files/{}/'.format(API_BASE, guid._id)
res = self.app.get(url, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(res.json.keys(), ['data'])
assert_equal(res.json['data']['attributes']['path'], self.file.path)
def test_get_file_invalid_guid_gives_404(self):
url = '/{}files/{}/'.format(API_BASE, 'asdasasd')
res = self.app.get(url, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 404)
def test_get_file_non_file_guid_gives_404(self):
url = '/{}files/{}/'.format(API_BASE, self.node._id)
res = self.app.get(url, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 404)
class TestFileVersionView(ApiTestCase):
def setUp(self):
super(TestFileVersionView, self).setUp()
self.user = AuthUserFactory()
self.node = ProjectFactory(creator=self.user)
self.osfstorage = self.node.get_addon('osfstorage')
self.root_node = self.osfstorage.get_root()
self.file = self.root_node.append_file('test_file')
self.file.create_version(self.user, {
'object': '06d80e',
'service': 'cloud',
osfstorage_settings.WATERBUTLER_RESOURCE: 'osf',
}, {
'size': 1337,
'contentType': 'img/png'
}).save()
def test_listing(self):
self.file.create_version(self.user, {
'object': '0683m38e',
'service': 'cloud',
osfstorage_settings.WATERBUTLER_RESOURCE: 'osf',
}, {
'size': 1347,
'contentType': 'img/png'
}).save()
res = self.app.get(
'/{}files/{}/versions/'.format(API_BASE, self.file._id),
auth=self.user.auth,
)
assert_equal(res.status_code, 200)
assert_equal(len(res.json['data']), 2)
assert_equal(res.json['data'][0]['id'], '1')
assert_equal(res.json['data'][1]['id'], '2')
def test_by_id(self):
res = self.app.get(
'/{}files/{}/versions/1/'.format(API_BASE, self.file._id),
auth=self.user.auth,
)
assert_equal(res.status_code, 200)
assert_equal(res.json['data']['id'], '1')
def test_read_only(self):
assert_equal(self.app.put(
'/{}files/{}/versions/1/'.format(API_BASE, self.file._id),
expect_errors=True,
auth=self.user.auth,
).status_code, 405)
assert_equal(self.app.post(
'/{}files/{}/versions/1/'.format(API_BASE, self.file._id),
expect_errors=True,
auth=self.user.auth,
).status_code, 405)
assert_equal(self.app.delete(
'/{}files/{}/versions/1/'.format(API_BASE, self.file._id),
expect_errors=True,
auth=self.user.auth,
).status_code, 405)
class TestFileTagging(ApiTestCase):
def setUp(self):
super(TestFileTagging, self).setUp()
self.user = AuthUserFactory()
self.node = ProjectFactory(creator=self.user)
self.file1 = api_utils.create_test_file(
self.node, self.user, filename='file1')
self.payload = {
"data": {
"type": "files",
"id": self.file1._id,
"attributes": {
"checkout": None,
"tags": ["goofy"]
}
}
}
self.url = '/{}files/{}/'.format(API_BASE, self.file1._id)
def test_tags_add_properly(self):
res = self.app.put_json_api(self.url, self.payload, auth=self.user.auth)
assert_equal(res.status_code, 200)
# Ensure adding tag data is correct from the PUT response
assert_equal(len(res.json['data']['attributes']['tags']), 1)
assert_equal(res.json['data']['attributes']['tags'][0], 'goofy')
def test_tags_update_properly(self):
self.app.put_json_api(self.url, self.payload, auth=self.user.auth)
# Ensure removing and adding tag data is correct from the PUT response
self.payload['data']['attributes']['tags'] = ['goofier']
res = self.app.put_json_api(self.url, self.payload, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(len(res.json['data']['attributes']['tags']), 1)
assert_equal(res.json['data']['attributes']['tags'][0], 'goofier')
def test_tags_add_and_remove_properly(self):
self.app.put_json_api(self.url, self.payload, auth=self.user.auth)
self.payload['data']['attributes']['tags'] = []
res = self.app.put_json_api(self.url, self.payload, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(len(res.json['data']['attributes']['tags']), 0)
def test_put_wo_tags_doesnt_remove_tags(self):
self.app.put_json_api(self.url, self.payload, auth=self.user.auth)
self.payload['data']['attributes'] = {'checkout': None}
res = self.app.put_json_api(self.url, self.payload, auth=self.user.auth)
assert_equal(res.status_code, 200)
# Ensure adding tag data is correct from the PUT response
assert_equal(len(res.json['data']['attributes']['tags']), 1)
assert_equal(res.json['data']['attributes']['tags'][0], 'goofy')
def test_add_tag_adds_log(self):
count = len(self.node.logs)
self.app.put_json_api(self.url, self.payload, auth=self.user.auth)
assert_equal(len(self.node.logs), count + 1)
assert_equal(NodeLog.FILE_TAG_ADDED, self.node.logs[-1].action)
def test_remove_tag_adds_log(self):
self.app.put_json_api(self.url, self.payload, auth=self.user.auth)
self.payload['data']['attributes']['tags'] = []
count = len(self.node.logs)
self.app.put_json_api(self.url, self.payload, auth=self.user.auth)
assert_equal(len(self.node.logs), count + 1)
assert_equal(NodeLog.FILE_TAG_REMOVED, self.node.logs[-1].action)
|
{
"content_hash": "849fe45e820047a5a2c5dad8da67958c",
"timestamp": "",
"source": "github",
"line_count": 532,
"max_line_length": 127,
"avg_line_length": 40.36842105263158,
"alnum_prop": 0.5909852859005401,
"repo_name": "amyshi188/osf.io",
"id": "7bc469bab1daf529314c3a6d9c1cd35aea581090",
"size": "21476",
"binary": false,
"copies": "5",
"ref": "refs/heads/develop",
"path": "api_tests/files/views/test_file_detail.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "159639"
},
{
"name": "HTML",
"bytes": "110361"
},
{
"name": "JavaScript",
"bytes": "1649322"
},
{
"name": "Mako",
"bytes": "645108"
},
{
"name": "Perl",
"bytes": "13885"
},
{
"name": "Python",
"bytes": "5839557"
}
],
"symlink_target": ""
}
|
import logging
from bson.json_util import loads, dumps
import datetime
import mock
import webapps.server
from webapps.server.views import ratelimit
from webapps.lib.db import get_db
from webapps.lib.util import to_coll_name, get_collection_names
from flask import session
from pymongo.errors import OperationFailure
from webapps.lib.MWSServerError import MWSServerError
from tests import MongoWSTestCase
from webapps.lib import CLIENTS_COLLECTION
class ViewsSetUpUnitTestCase(MongoWSTestCase):
def test_create_mws_resource(self):
url = '/mws/'
rv = self.app.post(url)
new_response_dict = loads(rv.data)
self.assertIn('res_id', new_response_dict)
res_id = new_response_dict['res_id']
is_new = new_response_dict['is_new']
self.assertIsNotNone(res_id)
self.assertTrue(is_new)
# check if res_id is unchanged
rv = self.app.post(url)
new_response_dict = loads(rv.data)
new_res_id = new_response_dict['res_id']
new_is_new = new_response_dict['is_new']
self.assertIsNotNone(new_res_id)
self.assertEqual(res_id, new_res_id)
self.assertFalse(new_is_new)
def test_create_mws_resource_new_session(self):
url = '/mws/'
rv = self.app.post(url)
response_dict = loads(rv.data)
self.assertIn('res_id', response_dict)
res_id = response_dict['res_id']
self.assertIsNotNone(res_id)
with self.app.session_transaction() as sess:
del sess['session_id']
# check if res_id is unique
rv = self.app.post(url)
new_res_id = loads(rv.data)['res_id']
self.assertIsNotNone(new_res_id)
self.assertNotEqual(res_id, new_res_id)
@mock.patch('webapps.server.views.datetime')
def test_keep_mws_alive(self, datetime_mock):
first = datetime.datetime(2012, 7, 4)
second = first + datetime.timedelta(days=1)
datetime_mock.now.return_value = first
db = get_db()
# get a session to keep alive
rv = self.app.post('/mws/')
res_id = loads(rv.data)['res_id']
with self.app.session_transaction() as sess:
session_id = sess['session_id']
res = db.clients.find({'res_id': res_id, 'session_id': session_id},
{'timestamp': 1})
_id = res[0]['_id']
old_ts = res[0]['timestamp']
self.assertEqual(old_ts, first)
datetime_mock.now.return_value = second
url = '/mws/' + res_id + '/keep-alive'
rv = self.app.post(url)
self.assertEqual(rv.status_code, 204)
newres = db.clients.find({'_id': _id}, {'timestamp': 1})
self.assertEqual(newres[0]['timestamp'], second)
def test_ratelimit(self):
rv = self.app.post('/mws/')
self.res_id = loads(rv.data)['res_id']
limit = self.real_app.config['RATELIMIT_QUOTA'] = 3
def dummy():
return ('', 204)
with self.app.session_transaction() as client_sess:
session_id = client_sess['session_id']
with self.real_app.test_request_context():
session['session_id'] = session_id
for i in range(limit):
self.assertEqual(ratelimit(dummy)(), ('', 204))
with self.assertRaises(MWSServerError) as cm:
ratelimit(dummy)()
self.assertEqual(cm.exception.error, 429)
def test_ratelimit_no_session(self):
def dummy():
return ('', 204)
with self.real_app.test_request_context():
with self.assertRaises(MWSServerError) as cm:
ratelimit(dummy)()
self.assertEqual(cm.exception.error, 401)
def test_nocache(self):
res = self.app.post('/mws/')
self.assertEqual(res.headers['cache-control'], 'no-cache')
self.assertEqual(res.headers['expires'], '0')
res_id = loads(res.data)['res_id']
res = self.app.get('/mws/%s/db/coll/find?{}' % res_id)
self.assertEqual(res.headers['cache-control'], 'no-cache')
self.assertEqual(res.headers['expires'], '0')
class DBTestCase(MongoWSTestCase):
def setUp(self):
super(DBTestCase, self).setUp()
# Todo: For stuff that isn't checking authentication,
# we probably don't want to rely on/use the authentication code
rv = self.app.post('/mws/')
response_dict = loads(rv.data)
self.assertIn('res_id', response_dict)
self.res_id = response_dict['res_id']
self.assertIsNotNone(self.res_id)
self.db = get_db()
self.make_request_url = '/mws/%s/db/%%s' % (self.res_id)
def _make_request(self, endpoint, data, method, expected_status):
url = self.make_request_url % (endpoint)
if data is not None:
if isinstance(data, dict):
data = dumps(
dict((k, v) for (k, v) in data.iteritems() if v is not None)
)
else:
data = dumps(data)
if method == self.app.get:
url = '%s?data=%s' % (url, data)
data = None
result = method(url, data=data, content_type='application/json')
actual_status = result.status_code
self.assertEqual(
actual_status, expected_status,
("Expected request status to be %s, got %s instead."
" Full result: %s") %
(expected_status, actual_status, result.data))
result_dict = loads(result.data) if result.data else {}
return result_dict
def make_get_collection_names_request(self, expected_status=200):
return self._make_request('getCollectionNames', None, self.app.get,
expected_status)
def make_db_drop_request(self, expected_status=204):
self.make_request_url = '/mws/%s/db%%s' % (self.res_id)
return self._make_request('', None, self.app.delete, expected_status)
class DBCollectionTestCase(DBTestCase):
def setUp(self):
super(DBCollectionTestCase, self).setUp()
self.coll_name = 'test_collection'
self.internal_coll_name = to_coll_name(self.res_id,
self.coll_name)
self.db = get_db()
self.db_collection = self.db[self.internal_coll_name]
self.make_request_url = '/mws/%s/db/%s/%%s' % \
(self.res_id, self.coll_name)
def tearDown(self):
super(DBCollectionTestCase, self).setUp()
self.db_collection.drop()
def make_find_request(self, query=None, projection=None, skip=None,
limit=None, expected_status=200):
data = {
'query': query,
'projection': projection,
'skip': skip,
'limit': limit,
}
return self._make_request('find', data, self.app.get,
expected_status)
def make_insert_request(self, document, expected_status=200):
data = {'document': document}
return self._make_request('insert', data, self.app.post,
expected_status)
def make_remove_request(self, constraint, just_one=False,
expected_status=204):
data = {'constraint': constraint, 'just_one': just_one}
return self._make_request('remove', data, self.app.delete,
expected_status)
def make_update_request(self, query, update, upsert=False, multi=False,
expected_status=204):
data = {
'query': query,
'update': update,
'upsert': upsert,
'multi': multi,
}
return self._make_request('update', data, self.app.put,
expected_status)
def make_aggregate_request(self, query=None, expected_status=200):
return self._make_request('aggregate', query, self.app.get,
expected_status)
def make_drop_request(self, expected_status=204):
return self._make_request('drop', None, self.app.delete,
expected_status)
def make_count_request(self, query=None, skip=None, limit=None,
expected_status=200):
data = {'query': query, 'skip': skip, 'limit': limit}
return self._make_request('count', data, self.app.get, expected_status)
def set_session_id(self, new_id):
with self.app.session_transaction() as sess:
sess['session_id'] = new_id
class FindUnitTestCase(DBCollectionTestCase):
def test_find(self):
query = {'name': 'mongo'}
self.db_collection.insert(query)
result = self.make_find_request(query)
self.assertEqual(len(result), 1)
self.assertEqual(result['result'][0]['name'], 'mongo')
def test_skipping_results(self):
self.db_collection.insert([{'val': i} for i in xrange(10)])
response = self.make_find_request(query={}, skip=4)
result = response['result']
self.assertEqual(len(result), 6)
values = [r['val'] for r in result]
self.assertItemsEqual(values, range(4, 10))
def test_limiting_results(self):
self.db_collection.insert([{'val': i} for i in xrange(10)])
response = self.make_find_request(query={}, limit=4)
result = response['result']
self.assertEqual(len(result), 4)
values = [r['val'] for r in result]
self.assertItemsEqual(values, range(4))
def test_invalid_find_session(self):
self.set_session_id('invalid_id')
document = {'name': 'mongo'}
result = self.make_find_request(document, expected_status=403)
error = {
'error': 403,
'reason': 'Session error. User does not have access to res_id',
'detail': '',
}
self.assertEqual(result, error)
class InsertUnitTestCase(DBCollectionTestCase):
def test_simple_insert(self):
document = {'name': 'Mongo'}
self.make_insert_request(document)
result = self.db_collection.find()
self.assertEqual(result.count(), 1)
self.assertEqual(result[0]['name'], 'Mongo')
def test_multiple_document_insert(self):
document = [{'name': 'Mongo'}, {'name': '10gen'}]
self.make_insert_request(document)
result = self.db_collection.find()
self.assertEqual(result.count(), 2)
names = [r['name'] for r in result]
self.assertItemsEqual(names, ['Mongo', '10gen'])
def test_invalid_insert_session(self):
self.set_session_id('invalid_session')
document = {'name': 'mongo'}
self.make_insert_request(document, expected_status=403)
def test_insert_quota(self):
limit = self.real_app.config['QUOTA_COLLECTION_SIZE'] = 150
self.make_insert_request([
{'name': 'Mongo'}, {'name': 'Mongo'}, {'name': 'NotMongo'}
], expected_status=200)
result = self.make_insert_request([
{'name': 'Mongo'}, {'name': 'Mongo'}, {'name': 'NotMongo'}
], expected_status=403)
error = {
'error': 403,
'reason': 'Collection size exceeded',
'detail': ''
}
self.assertEqual(result, error)
class RemoveUnitTestCase(DBCollectionTestCase):
def test_remove(self):
self.db_collection.insert([
{'name': 'Mongo'}, {'name': 'Mongo'}, {'name': 'NotMongo'}
])
document = {'name': 'Mongo'}
self.make_remove_request(document)
result = self.db_collection.find()
self.assertEqual(result.count(), 1)
self.assertEqual(result[0]['name'], 'NotMongo')
def test_remove_one(self):
self.db_collection.insert([
{'name': 'Mongo'}, {'name': 'Mongo'}, {'name': 'NotMongo'}
])
document = {'name': 'Mongo'}
self.make_remove_request(document, just_one=True)
result = self.db_collection.find()
names = [r['name'] for r in result]
self.assertItemsEqual(names, ['Mongo', 'NotMongo'])
def test_remove_requires_valid_res_id(self):
self.set_session_id('invalid_session')
self.make_remove_request({}, expected_status=403)
class UpdateUnitTestCase(DBCollectionTestCase):
def test_upsert(self):
result = self.db_collection.find({'name': 'Mongo'})
self.assertEqual(result.count(), 0)
self.make_update_request({}, {'name': 'Mongo'}, True)
result = self.db_collection.find()
self.assertEqual(result.count(), 1)
self.assertEqual(result[0]['name'], 'Mongo')
def test_update_one(self):
self.db_collection.insert([
{'name': 'Mongo'}, {'name': 'Mongo'}, {'name': 'NotMongo'}
])
self.make_update_request({'name': 'Mongo'}, {'name': 'Mongo2'}, True)
result = self.db_collection.find()
names = [r['name'] for r in result]
self.assertItemsEqual(names, ['Mongo', 'Mongo2', 'NotMongo'])
def test_update_multi(self):
self.db_collection.insert([
{'name': 'Mongo'}, {'name': 'Mongo'}, {'name': 'NotMongo'}
])
self.make_update_request(
{'name': 'Mongo'},
{'$set': {'name': 'Mongo2'}},
False, True
)
result = self.db_collection.find()
names = [r['name'] for r in result]
self.assertItemsEqual(names, ['Mongo2', 'Mongo2', 'NotMongo'])
def test_multi_upsert(self):
# Does not exist - upsert
self.make_update_request({}, {'$set': {'name': 'Mongo'}}, True, True)
result = self.db_collection.find()
self.assertEqual(result.count(), 1)
self.assertEqual(result[0]['name'], 'Mongo')
# Exists - multi-update
self.db_collection.insert([{'name': 'Mongo'}, {'name': 'NotMongo'}])
self.make_update_request(
{'name': 'Mongo'},
{'$set': {'name': 'Mongo2'}},
True, True
)
result = self.db_collection.find()
names = [r['name'] for r in result]
self.assertItemsEqual(names, ['Mongo2', 'Mongo2', 'NotMongo'])
def test_update_quota(self):
limit = self.real_app.config['QUOTA_COLLECTION_SIZE'] = 500
self.db_collection.insert([
{'name': 'Mongo'}, {'name': 'Mongo'}, {'name': 'NotMongo'}
])
self.make_update_request({'name': 'Mongo'}, {'name': 'Mongo2'},
expected_status=204)
result = self.make_update_request({'name': 'Mongo'},
{'$set': {'a': list(range(50))}},
expected_status=403)
error = {
'error': 403,
'reason': 'Collection size exceeded',
'detail': ''
}
self.assertEqual(result, error)
def test_multi_update_quota(self):
limit = self.real_app.config['QUOTA_COLLECTION_SIZE'] = 500
self.db_collection.insert([
{'name': 'Mongo'}, {'name': 'Mongo'}, {'name': 'NotMongo'}
])
self.make_update_request({},
{'$set': {'a': list(range(12))}},
multi=False,
expected_status=204)
result = self.make_update_request({},
{'$set': {'a': list(range(12))}},
multi=True,
expected_status=403)
error = {
'error': 403,
'reason': 'Collection size exceeded',
'detail': ''
}
self.assertEqual(result, error)
class AggregateUnitTestCase(DBCollectionTestCase):
def test_aggregate(self):
for i in range(6):
self.db_collection.insert({'val': i})
query = [
{'$match': {'val': {'$lt': 5}}},
{'$sort': {'val': -1}},
{'$skip': 1},
{'$limit': 2}
]
self.db_collection.aggregate(query)
result = self.make_aggregate_request(query)
self.assertEqual(result['ok'], 1)
result = result['result']
self.assertEqual(len(result), 2)
self.assertEqual([x['val'] for x in result], [3, 2])
def test_invalid_query(self):
result = self.make_aggregate_request({}, expected_status=400)
self.assertEqual(result['error'], 400)
with self.assertRaises(OperationFailure) as cm:
self.db_collection.aggregate({})
self.assertEqual(cm.exception.message, result['reason'])
def test_invalid_find_session(self):
self.set_session_id('invalid_id')
query = [{'$match': {'val': {'$lt': 5}}}]
result = self.make_aggregate_request(query, expected_status=403)
error = {
'error': 403,
'reason': 'Session error. User does not have access to res_id',
'detail': '',
}
self.assertEqual(result, error)
class CountTestCase(DBCollectionTestCase):
def test_get_query_count(self):
self.db_collection.insert([{'n': i} for i in xrange(10)])
response = self.make_count_request({'n': {'$gt': 5}})
self.assertEqual(response['count'], 4)
self.db_collection.insert([{'n': i} for i in xrange(10)])
response = self.make_count_request({'n': {'$gt': 4}})
self.assertEqual(response['count'], 10)
def test_uses_skip_and_limit_info(self):
self.db_collection.insert([{'n': i} for i in xrange(10)])
response = self.make_count_request({}, skip=0, limit=1)
self.assertEqual(response['count'], 1)
response = self.make_count_request({}, skip=8, limit=0)
self.assertEqual(response['count'], 2)
class DropUnitTestCase(DBCollectionTestCase):
def test_drop(self):
self.db_collection.insert([
{'name': 'Mongo'}, {'name': 'Mongo'}, {'name': 'NotMongo'}
])
result = self.db_collection.find()
self.assertEqual(result.count(), 3)
self.make_drop_request()
result = self.db_collection.find()
self.assertEqual(result.count(), 0)
self.assertNotIn(self.internal_coll_name, self.db.collection_names())
class GetCollectionNamesUnitTestCase(DBTestCase):
def test_get_collection_names(self):
result = self.make_get_collection_names_request()['result']
self.assertEqual(result, [])
self.db[CLIENTS_COLLECTION].update({'res_id': self.res_id},
{'$push': {'collections': 'test'}})
result = self.make_get_collection_names_request()['result']
self.assertEqual(result, ['test'])
def test_invalid_session(self):
with self.app.session_transaction() as sess:
sess['session_id'] = 'invalid session'
result = self.make_get_collection_names_request(expected_status=403)
error = {
'error': 403,
'reason': 'Session error. User does not have access to res_id',
'detail': '',
}
self.assertEqual(result, error)
def test_resid_isolation(self):
self.db[CLIENTS_COLLECTION].update({'res_id': self.res_id},
{'$push': {'collections': 'test'}})
result = self.make_get_collection_names_request()['result']
self.assertEqual(result, ['test'])
with self.app.session_transaction() as sess:
del sess['session_id']
new_resid = loads(self.app.post('/mws/').data)['res_id']
self.assertNotEqual(self.res_id, new_resid)
self.db[CLIENTS_COLLECTION].update({'res_id': new_resid},
{'$push': {'collections': 'test2'}})
self.make_request_url = '/mws/%s/db/%%s' % (new_resid)
result = self.make_get_collection_names_request()['result']
self.assertEqual(result, ['test2'])
class DropDBUnitTestCase(DBTestCase):
def test_drop_db(self):
testdoc = {'name': 'Mongo'}
colls = ['a', 'b', 'c']
update = {'$addToSet': {'collections': {'$each': colls}}}
self.db[CLIENTS_COLLECTION].update({'res_id': self.res_id}, update)
colls = [to_coll_name(self.res_id, c) for c in colls]
for col in colls:
self.db[col].insert(testdoc)
actual_colls = self.db.collection_names()
for col in colls:
self.assertIn(col, actual_colls)
self.make_db_drop_request()
actual_colls = self.db.collection_names()
for col in colls:
self.assertNotIn(col, actual_colls)
self.assertItemsEqual(get_collection_names(self.res_id), [])
class IntegrationTestCase(DBCollectionTestCase):
def test_insert_find(self):
document = {'name': 'mongo'}
self.make_insert_request(document)
result = self.make_find_request(document)
self.assertDictContainsSubset(document, result['result'][0])
|
{
"content_hash": "4632e2e1750e57f4a2d624ecae1d7f36",
"timestamp": "",
"source": "github",
"line_count": 590,
"max_line_length": 80,
"avg_line_length": 35.945762711864404,
"alnum_prop": 0.559364390795926,
"repo_name": "vicjwang/mongo-web-shell",
"id": "221c7bc2adf6d7320448fc4df6a022c859a6443f",
"size": "21809",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/test_mws_views.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
}
|
import eventlet
from eventlet import semaphore
import netaddr
from oslo.config import cfg
from quantum.agent.common import config
from quantum.agent.linux import external_process
from quantum.agent.linux import interface
from quantum.agent.linux import ip_lib
from quantum.agent.linux import iptables_manager
from quantum.agent.linux import utils
from quantum.agent import rpc as agent_rpc
from quantum.common import constants as l3_constants
from quantum.common import topics
from quantum.common import utils as common_utils
from quantum import context
from quantum import manager
from quantum.openstack.common import importutils
from quantum.openstack.common import log as logging
from quantum.openstack.common import loopingcall
from quantum.openstack.common import periodic_task
from quantum.openstack.common.rpc import common as rpc_common
from quantum.openstack.common.rpc import proxy
from quantum.openstack.common import service
from quantum import service as quantum_service
LOG = logging.getLogger(__name__)
NS_PREFIX = 'qrouter-'
INTERNAL_DEV_PREFIX = 'qr-'
EXTERNAL_DEV_PREFIX = 'qg-'
class L3PluginApi(proxy.RpcProxy):
"""Agent side of the l3 agent RPC API.
API version history:
1.0 - Initial version.
"""
BASE_RPC_API_VERSION = '1.0'
def __init__(self, topic, host):
super(L3PluginApi, self).__init__(
topic=topic, default_version=self.BASE_RPC_API_VERSION)
self.host = host
def get_routers(self, context, fullsync=True, router_id=None):
"""Make a remote process call to retrieve the sync data for routers."""
router_ids = [router_id] if router_id else None
return self.call(context,
self.make_msg('sync_routers', host=self.host,
fullsync=fullsync,
router_ids=router_ids),
topic=self.topic)
def get_external_network_id(self, context):
"""Make a remote process call to retrieve the external network id.
@raise common.RemoteError: with TooManyExternalNetworks
as exc_type if there are
more than one external network
"""
return self.call(context,
self.make_msg('get_external_network_id',
host=self.host),
topic=self.topic)
class RouterInfo(object):
def __init__(self, router_id, root_helper, use_namespaces, router):
self.router_id = router_id
self.ex_gw_port = None
self.internal_ports = []
self.floating_ips = []
self.root_helper = root_helper
self.use_namespaces = use_namespaces
self.router = router
self.iptables_manager = iptables_manager.IptablesManager(
root_helper=root_helper,
#FIXME(danwent): use_ipv6=True,
namespace=self.ns_name())
self.routes = []
def ns_name(self):
if self.use_namespaces:
return NS_PREFIX + self.router_id
class L3NATAgent(manager.Manager):
OPTS = [
cfg.StrOpt('external_network_bridge', default='br-ex',
help=_("Name of bridge used for external network "
"traffic.")),
cfg.StrOpt('interface_driver',
help=_("The driver used to manage the virtual "
"interface.")),
cfg.IntOpt('metadata_port',
default=9697,
help=_("TCP Port used by Quantum metadata namespace "
"proxy.")),
cfg.IntOpt('send_arp_for_ha',
default=3,
help=_("Send this many gratuitous ARPs for HA setup, "
"set it below or equal to 0 to disable this "
"feature.")),
cfg.BoolOpt('use_namespaces', default=True,
help=_("Allow overlapping IP.")),
cfg.StrOpt('router_id', default='',
help=_("If namespaces is disabled, the l3 agent can only"
" confgure a router that has the matching router "
"ID.")),
cfg.BoolOpt('handle_internal_only_routers',
default=True,
help=_("Agent should implement routers with no gateway")),
cfg.StrOpt('gateway_external_network_id', default='',
help=_("UUID of external network for routers implemented "
"by the agents.")),
cfg.BoolOpt('enable_metadata_proxy', default=True,
help=_("Allow running metadata proxy.")),
]
def __init__(self, host, conf=None):
if conf:
self.conf = conf
else:
self.conf = cfg.CONF
self.root_helper = config.get_root_helper(self.conf)
self.router_info = {}
if not self.conf.interface_driver:
raise SystemExit(_('An interface driver must be specified'))
try:
self.driver = importutils.import_object(self.conf.interface_driver,
self.conf)
except Exception:
msg = _("Error importing interface driver "
"'%s'") % self.conf.interface_driver
raise SystemExit(msg)
self.context = context.get_admin_context_without_session()
self.plugin_rpc = L3PluginApi(topics.PLUGIN, host)
self.fullsync = True
self.sync_sem = semaphore.Semaphore(1)
if self.conf.use_namespaces:
self._destroy_router_namespaces(self.conf.router_id)
super(L3NATAgent, self).__init__(host=self.conf.host)
def _destroy_router_namespaces(self, only_router_id=None):
"""Destroy router namespaces on the host to eliminate all stale
linux devices, iptables rules, and namespaces.
If only_router_id is passed, only destroy single namespace, to allow
for multiple l3 agents on the same host, without stepping on each
other's toes on init. This only makes sense if router_id is set.
"""
root_ip = ip_lib.IPWrapper(self.root_helper)
for ns in root_ip.get_namespaces(self.root_helper):
if ns.startswith(NS_PREFIX):
if only_router_id and not ns.endswith(only_router_id):
continue
try:
self._destroy_router_namespace(ns)
except Exception:
LOG.exception(_("Failed deleting namespace '%s'"), ns)
def _destroy_router_namespace(self, namespace):
ns_ip = ip_lib.IPWrapper(self.root_helper, namespace=namespace)
for d in ns_ip.get_devices(exclude_loopback=True):
if d.name.startswith(INTERNAL_DEV_PREFIX):
# device is on default bridge
self.driver.unplug(d.name, namespace=namespace,
prefix=INTERNAL_DEV_PREFIX)
elif d.name.startswith(EXTERNAL_DEV_PREFIX):
self.driver.unplug(d.name,
bridge=self.conf.external_network_bridge,
namespace=namespace,
prefix=EXTERNAL_DEV_PREFIX)
#TODO(garyk) Address the failure for the deletion of the namespace
def _create_router_namespace(self, ri):
ip_wrapper_root = ip_lib.IPWrapper(self.root_helper)
ip_wrapper = ip_wrapper_root.ensure_namespace(ri.ns_name())
ip_wrapper.netns.execute(['sysctl', '-w', 'net.ipv4.ip_forward=1'])
def _fetch_external_net_id(self):
"""Find UUID of single external network for this agent."""
if self.conf.gateway_external_network_id:
return self.conf.gateway_external_network_id
try:
return self.plugin_rpc.get_external_network_id(self.context)
except rpc_common.RemoteError as e:
if e.exc_type == 'TooManyExternalNetworks':
msg = _(
"The 'gateway_external_network_id' option must be "
"configured for this agent as Quantum has more than "
"one external network.")
raise Exception(msg)
else:
raise
def _router_added(self, router_id, router):
ri = RouterInfo(router_id, self.root_helper,
self.conf.use_namespaces, router)
self.router_info[router_id] = ri
if self.conf.use_namespaces:
self._create_router_namespace(ri)
for c, r in self.metadata_filter_rules():
ri.iptables_manager.ipv4['filter'].add_rule(c, r)
for c, r in self.metadata_nat_rules():
ri.iptables_manager.ipv4['nat'].add_rule(c, r)
ri.iptables_manager.apply()
if self.conf.enable_metadata_proxy:
self._spawn_metadata_proxy(ri)
def _router_removed(self, router_id):
ri = self.router_info[router_id]
ri.router['gw_port'] = None
ri.router[l3_constants.INTERFACE_KEY] = []
ri.router[l3_constants.FLOATINGIP_KEY] = []
self.process_router(ri)
for c, r in self.metadata_filter_rules():
ri.iptables_manager.ipv4['filter'].remove_rule(c, r)
for c, r in self.metadata_nat_rules():
ri.iptables_manager.ipv4['nat'].remove_rule(c, r)
ri.iptables_manager.apply()
if self.conf.enable_metadata_proxy:
self._destroy_metadata_proxy(ri)
del self.router_info[router_id]
self._destroy_router_namespace(ri.ns_name())
def _spawn_metadata_proxy(self, router_info):
def callback(pid_file):
proxy_cmd = ['quantum-ns-metadata-proxy',
'--pid_file=%s' % pid_file,
'--router_id=%s' % router_info.router_id,
'--state_path=%s' % self.conf.state_path,
'--metadata_port=%s' % self.conf.metadata_port]
proxy_cmd.extend(config.get_log_args(
cfg.CONF, 'quantum-ns-metadata-proxy-%s.log' %
router_info.router_id))
return proxy_cmd
pm = external_process.ProcessManager(
self.conf,
router_info.router_id,
self.root_helper,
router_info.ns_name())
pm.enable(callback)
def _destroy_metadata_proxy(self, router_info):
pm = external_process.ProcessManager(
self.conf,
router_info.router_id,
self.root_helper,
router_info.ns_name())
pm.disable()
def _set_subnet_info(self, port):
ips = port['fixed_ips']
if not ips:
raise Exception(_("Router port %s has no IP address") % port['id'])
if len(ips) > 1:
LOG.error(_("Ignoring multiple IPs on router port %s"),
port['id'])
prefixlen = netaddr.IPNetwork(port['subnet']['cidr']).prefixlen
port['ip_cidr'] = "%s/%s" % (ips[0]['ip_address'], prefixlen)
def process_router(self, ri):
ex_gw_port = self._get_ex_gw_port(ri)
internal_ports = ri.router.get(l3_constants.INTERFACE_KEY, [])
existing_port_ids = set([p['id'] for p in ri.internal_ports])
current_port_ids = set([p['id'] for p in internal_ports
if p['admin_state_up']])
new_ports = [p for p in internal_ports if
p['id'] in current_port_ids and
p['id'] not in existing_port_ids]
old_ports = [p for p in ri.internal_ports if
p['id'] not in current_port_ids]
for p in new_ports:
self._set_subnet_info(p)
ri.internal_ports.append(p)
self.internal_network_added(ri, ex_gw_port,
p['network_id'], p['id'],
p['ip_cidr'], p['mac_address'])
for p in old_ports:
ri.internal_ports.remove(p)
self.internal_network_removed(ri, ex_gw_port, p['id'],
p['ip_cidr'])
internal_cidrs = [p['ip_cidr'] for p in ri.internal_ports]
if ex_gw_port and not ri.ex_gw_port:
self._set_subnet_info(ex_gw_port)
self.external_gateway_added(ri, ex_gw_port, internal_cidrs)
elif not ex_gw_port and ri.ex_gw_port:
self.external_gateway_removed(ri, ri.ex_gw_port,
internal_cidrs)
if ri.ex_gw_port or ex_gw_port:
self.process_router_floating_ips(ri, ex_gw_port)
ri.ex_gw_port = ex_gw_port
self.routes_updated(ri)
def process_router_floating_ips(self, ri, ex_gw_port):
floating_ips = ri.router.get(l3_constants.FLOATINGIP_KEY, [])
existing_floating_ip_ids = set([fip['id'] for fip in ri.floating_ips])
cur_floating_ip_ids = set([fip['id'] for fip in floating_ips])
id_to_fip_map = {}
for fip in floating_ips:
if fip['port_id']:
if fip['id'] not in existing_floating_ip_ids:
ri.floating_ips.append(fip)
self.floating_ip_added(ri, ex_gw_port,
fip['floating_ip_address'],
fip['fixed_ip_address'])
# store to see if floatingip was remapped
id_to_fip_map[fip['id']] = fip
floating_ip_ids_to_remove = (existing_floating_ip_ids -
cur_floating_ip_ids)
for fip in ri.floating_ips:
if fip['id'] in floating_ip_ids_to_remove:
ri.floating_ips.remove(fip)
self.floating_ip_removed(ri, ri.ex_gw_port,
fip['floating_ip_address'],
fip['fixed_ip_address'])
else:
# handle remapping of a floating IP
new_fip = id_to_fip_map[fip['id']]
new_fixed_ip = new_fip['fixed_ip_address']
existing_fixed_ip = fip['fixed_ip_address']
if (new_fixed_ip and existing_fixed_ip and
new_fixed_ip != existing_fixed_ip):
floating_ip = fip['floating_ip_address']
self.floating_ip_removed(ri, ri.ex_gw_port,
floating_ip, existing_fixed_ip)
self.floating_ip_added(ri, ri.ex_gw_port,
floating_ip, new_fixed_ip)
ri.floating_ips.remove(fip)
ri.floating_ips.append(new_fip)
def _get_ex_gw_port(self, ri):
return ri.router.get('gw_port')
def _send_gratuitous_arp_packet(self, ri, interface_name, ip_address):
if self.conf.send_arp_for_ha > 0:
arping_cmd = ['arping', '-A', '-U',
'-I', interface_name,
'-c', self.conf.send_arp_for_ha,
ip_address]
try:
if self.conf.use_namespaces:
ip_wrapper = ip_lib.IPWrapper(self.root_helper,
namespace=ri.ns_name())
ip_wrapper.netns.execute(arping_cmd, check_exit_code=True)
else:
utils.execute(arping_cmd, check_exit_code=True,
root_helper=self.root_helper)
except Exception as e:
LOG.error(_("Failed sending gratuitous ARP: %s"), str(e))
def get_internal_device_name(self, port_id):
return (INTERNAL_DEV_PREFIX + port_id)[:self.driver.DEV_NAME_LEN]
def get_external_device_name(self, port_id):
return (EXTERNAL_DEV_PREFIX + port_id)[:self.driver.DEV_NAME_LEN]
def external_gateway_added(self, ri, ex_gw_port, internal_cidrs):
interface_name = self.get_external_device_name(ex_gw_port['id'])
ex_gw_ip = ex_gw_port['fixed_ips'][0]['ip_address']
if not ip_lib.device_exists(interface_name,
root_helper=self.root_helper,
namespace=ri.ns_name()):
self.driver.plug(ex_gw_port['network_id'],
ex_gw_port['id'], interface_name,
ex_gw_port['mac_address'],
bridge=self.conf.external_network_bridge,
namespace=ri.ns_name(),
prefix=EXTERNAL_DEV_PREFIX)
self.driver.init_l3(interface_name, [ex_gw_port['ip_cidr']],
namespace=ri.ns_name())
ip_address = ex_gw_port['ip_cidr'].split('/')[0]
self._send_gratuitous_arp_packet(ri, interface_name, ip_address)
gw_ip = ex_gw_port['subnet']['gateway_ip']
if ex_gw_port['subnet']['gateway_ip']:
cmd = ['route', 'add', 'default', 'gw', gw_ip]
if self.conf.use_namespaces:
ip_wrapper = ip_lib.IPWrapper(self.root_helper,
namespace=ri.ns_name())
ip_wrapper.netns.execute(cmd, check_exit_code=False)
else:
utils.execute(cmd, check_exit_code=False,
root_helper=self.root_helper)
for (c, r) in self.external_gateway_nat_rules(ex_gw_ip,
internal_cidrs,
interface_name):
ri.iptables_manager.ipv4['nat'].add_rule(c, r)
ri.iptables_manager.apply()
def external_gateway_removed(self, ri, ex_gw_port, internal_cidrs):
interface_name = self.get_external_device_name(ex_gw_port['id'])
if ip_lib.device_exists(interface_name,
root_helper=self.root_helper,
namespace=ri.ns_name()):
self.driver.unplug(interface_name,
bridge=self.conf.external_network_bridge,
namespace=ri.ns_name(),
prefix=EXTERNAL_DEV_PREFIX)
ex_gw_ip = ex_gw_port['fixed_ips'][0]['ip_address']
for c, r in self.external_gateway_nat_rules(ex_gw_ip, internal_cidrs,
interface_name):
ri.iptables_manager.ipv4['nat'].remove_rule(c, r)
ri.iptables_manager.apply()
def metadata_filter_rules(self):
rules = []
rules.append(('INPUT', '-s 0.0.0.0/0 -d 127.0.0.1 '
'-p tcp -m tcp --dport %s '
'-j ACCEPT' % self.conf.metadata_port))
return rules
def metadata_nat_rules(self):
rules = []
rules.append(('PREROUTING', '-s 0.0.0.0/0 -d 169.254.169.254/32 '
'-p tcp -m tcp --dport 80 -j REDIRECT '
'--to-port %s' % self.conf.metadata_port))
return rules
def external_gateway_nat_rules(self, ex_gw_ip, internal_cidrs,
interface_name):
rules = [('POSTROUTING', '! -i %(interface_name)s '
'! -o %(interface_name)s -m conntrack ! '
'--ctstate DNAT -j ACCEPT' %
{'interface_name': interface_name})]
for cidr in internal_cidrs:
rules.extend(self.internal_network_nat_rules(ex_gw_ip, cidr))
return rules
def internal_network_added(self, ri, ex_gw_port, network_id, port_id,
internal_cidr, mac_address):
interface_name = self.get_internal_device_name(port_id)
if not ip_lib.device_exists(interface_name,
root_helper=self.root_helper,
namespace=ri.ns_name()):
self.driver.plug(network_id, port_id, interface_name, mac_address,
namespace=ri.ns_name(),
prefix=INTERNAL_DEV_PREFIX)
self.driver.init_l3(interface_name, [internal_cidr],
namespace=ri.ns_name())
ip_address = internal_cidr.split('/')[0]
self._send_gratuitous_arp_packet(ri, interface_name, ip_address)
if ex_gw_port:
ex_gw_ip = ex_gw_port['fixed_ips'][0]['ip_address']
for c, r in self.internal_network_nat_rules(ex_gw_ip,
internal_cidr):
ri.iptables_manager.ipv4['nat'].add_rule(c, r)
ri.iptables_manager.apply()
def internal_network_removed(self, ri, ex_gw_port, port_id, internal_cidr):
interface_name = self.get_internal_device_name(port_id)
if ip_lib.device_exists(interface_name,
root_helper=self.root_helper,
namespace=ri.ns_name()):
self.driver.unplug(interface_name, namespace=ri.ns_name(),
prefix=INTERNAL_DEV_PREFIX)
if ex_gw_port:
ex_gw_ip = ex_gw_port['fixed_ips'][0]['ip_address']
for c, r in self.internal_network_nat_rules(ex_gw_ip,
internal_cidr):
ri.iptables_manager.ipv4['nat'].remove_rule(c, r)
ri.iptables_manager.apply()
def internal_network_nat_rules(self, ex_gw_ip, internal_cidr):
rules = [('snat', '-s %s -j SNAT --to-source %s' %
(internal_cidr, ex_gw_ip))]
return rules
def floating_ip_added(self, ri, ex_gw_port, floating_ip, fixed_ip):
ip_cidr = str(floating_ip) + '/32'
interface_name = self.get_external_device_name(ex_gw_port['id'])
device = ip_lib.IPDevice(interface_name, self.root_helper,
namespace=ri.ns_name())
if ip_cidr not in [addr['cidr'] for addr in device.addr.list()]:
net = netaddr.IPNetwork(ip_cidr)
device.addr.add(net.version, ip_cidr, str(net.broadcast))
self._send_gratuitous_arp_packet(ri, interface_name, floating_ip)
for chain, rule in self.floating_forward_rules(floating_ip, fixed_ip):
ri.iptables_manager.ipv4['nat'].add_rule(chain, rule)
ri.iptables_manager.apply()
def floating_ip_removed(self, ri, ex_gw_port, floating_ip, fixed_ip):
ip_cidr = str(floating_ip) + '/32'
net = netaddr.IPNetwork(ip_cidr)
interface_name = self.get_external_device_name(ex_gw_port['id'])
device = ip_lib.IPDevice(interface_name, self.root_helper,
namespace=ri.ns_name())
device.addr.delete(net.version, ip_cidr)
for chain, rule in self.floating_forward_rules(floating_ip, fixed_ip):
ri.iptables_manager.ipv4['nat'].remove_rule(chain, rule)
ri.iptables_manager.apply()
def floating_forward_rules(self, floating_ip, fixed_ip):
return [('PREROUTING', '-d %s -j DNAT --to %s' %
(floating_ip, fixed_ip)),
('OUTPUT', '-d %s -j DNAT --to %s' %
(floating_ip, fixed_ip)),
('float-snat', '-s %s -j SNAT --to %s' %
(fixed_ip, floating_ip))]
def router_deleted(self, context, router_id):
"""Deal with router deletion RPC message."""
with self.sync_sem:
if router_id in self.router_info:
try:
self._router_removed(router_id)
except Exception:
msg = _("Failed dealing with router "
"'%s' deletion RPC message")
LOG.debug(msg, router_id)
self.fullsync = True
def routers_updated(self, context, routers):
"""Deal with routers modification and creation RPC message."""
if not routers:
return
with self.sync_sem:
try:
self._process_routers(routers)
except Exception:
msg = _("Failed dealing with routers update RPC message")
LOG.debug(msg)
self.fullsync = True
def router_removed_from_agent(self, context, payload):
self.router_deleted(context, payload['router_id'])
def router_added_to_agent(self, context, payload):
self.routers_updated(context, payload)
def _process_routers(self, routers, all_routers=False):
if (self.conf.external_network_bridge and
not ip_lib.device_exists(self.conf.external_network_bridge)):
LOG.error(_("The external network bridge '%s' does not exist"),
self.conf.external_network_bridge)
return
target_ex_net_id = self._fetch_external_net_id()
# if routers are all the routers we have (They are from router sync on
# starting or when error occurs during running), we seek the
# routers which should be removed.
# If routers are from server side notification, we seek them
# from subset of incoming routers and ones we have now.
if all_routers:
prev_router_ids = set(self.router_info)
else:
prev_router_ids = set(self.router_info) & set(
[router['id'] for router in routers])
cur_router_ids = set()
for r in routers:
if not r['admin_state_up']:
continue
# If namespaces are disabled, only process the router associated
# with the configured agent id.
if (not self.conf.use_namespaces and
r['id'] != self.conf.router_id):
continue
ex_net_id = (r['external_gateway_info'] or {}).get('network_id')
if not ex_net_id and not self.conf.handle_internal_only_routers:
continue
if ex_net_id and ex_net_id != target_ex_net_id:
continue
cur_router_ids.add(r['id'])
if r['id'] not in self.router_info:
self._router_added(r['id'], r)
ri = self.router_info[r['id']]
ri.router = r
self.process_router(ri)
# identify and remove routers that no longer exist
for router_id in prev_router_ids - cur_router_ids:
self._router_removed(router_id)
@periodic_task.periodic_task
def _sync_routers_task(self, context):
# we need to sync with router deletion RPC message
with self.sync_sem:
if self.fullsync:
try:
if not self.conf.use_namespaces:
router_id = self.conf.router_id
else:
router_id = None
routers = self.plugin_rpc.get_routers(
context, router_id)
self._process_routers(routers, all_routers=True)
self.fullsync = False
except Exception:
LOG.exception(_("Failed synchronizing routers"))
self.fullsync = True
def after_start(self):
LOG.info(_("L3 agent started"))
def _update_routing_table(self, ri, operation, route):
cmd = ['ip', 'route', operation, 'to', route['destination'],
'via', route['nexthop']]
#TODO(nati) move this code to iplib
if self.conf.use_namespaces:
ip_wrapper = ip_lib.IPWrapper(self.conf.root_helper,
namespace=ri.ns_name())
ip_wrapper.netns.execute(cmd, check_exit_code=False)
else:
utils.execute(cmd, check_exit_code=False,
root_helper=self.conf.root_helper)
def routes_updated(self, ri):
new_routes = ri.router['routes']
old_routes = ri.routes
adds, removes = common_utils.diff_list_of_dict(old_routes,
new_routes)
for route in adds:
LOG.debug(_("Added route entry is '%s'"), route)
# remove replaced route from deleted route
for del_route in removes:
if route['destination'] == del_route['destination']:
removes.remove(del_route)
#replace success even if there is no existing route
self._update_routing_table(ri, 'replace', route)
for route in removes:
LOG.debug(_("Removed route entry is '%s'"), route)
self._update_routing_table(ri, 'delete', route)
ri.routes = new_routes
class L3NATAgentWithStateReport(L3NATAgent):
def __init__(self, host, conf=None):
super(L3NATAgentWithStateReport, self).__init__(host=host, conf=conf)
self.state_rpc = agent_rpc.PluginReportStateAPI(topics.PLUGIN)
self.agent_state = {
'binary': 'quantum-l3-agent',
'host': host,
'topic': topics.L3_AGENT,
'configurations': {
'use_namespaces': self.conf.use_namespaces,
'router_id': self.conf.router_id,
'handle_internal_only_routers':
self.conf.handle_internal_only_routers,
'gateway_external_network_id':
self.conf.gateway_external_network_id,
'interface_driver': self.conf.interface_driver},
'start_flag': True,
'agent_type': l3_constants.AGENT_TYPE_L3}
report_interval = cfg.CONF.AGENT.report_interval
if report_interval:
self.heartbeat = loopingcall.FixedIntervalLoopingCall(
self._report_state)
self.heartbeat.start(interval=report_interval)
def _report_state(self):
num_ex_gw_ports = 0
num_interfaces = 0
num_floating_ips = 0
router_infos = self.router_info.values()
num_routers = len(router_infos)
for ri in router_infos:
ex_gw_port = self._get_ex_gw_port(ri)
if ex_gw_port:
num_ex_gw_ports += 1
num_interfaces += len(ri.router.get(l3_constants.INTERFACE_KEY,
[]))
num_floating_ips += len(ri.router.get(l3_constants.FLOATINGIP_KEY,
[]))
configurations = self.agent_state['configurations']
configurations['routers'] = num_routers
configurations['ex_gw_ports'] = num_ex_gw_ports
configurations['interfaces'] = num_interfaces
configurations['floating_ips'] = num_floating_ips
try:
self.state_rpc.report_state(self.context,
self.agent_state)
self.agent_state.pop('start_flag', None)
except AttributeError:
# This means the server does not support report_state
LOG.warn(_("Quantum server does not support state report."
" State report for this agent will be disabled."))
self.heartbeat.stop()
return
except Exception:
LOG.exception(_("Failed reporting state!"))
def agent_updated(self, context, payload):
"""Handle the agent_updated notification event."""
self.fullsync = True
LOG.info(_("agent_updated by server side %s!"), payload)
def main():
eventlet.monkey_patch()
conf = cfg.CONF
conf.register_opts(L3NATAgent.OPTS)
config.register_agent_state_opts_helper(conf)
config.register_root_helper(conf)
conf.register_opts(interface.OPTS)
conf.register_opts(external_process.OPTS)
conf(project='quantum')
config.setup_logging(conf)
server = quantum_service.Service.create(
binary='quantum-l3-agent',
topic=topics.L3_AGENT,
report_interval=cfg.CONF.AGENT.report_interval,
manager='quantum.agent.l3_agent.L3NATAgentWithStateReport')
service.launch(server).wait()
|
{
"content_hash": "4326e6829537dcd63402930695859365",
"timestamp": "",
"source": "github",
"line_count": 741,
"max_line_length": 79,
"avg_line_length": 43.497975708502025,
"alnum_prop": 0.5423802432365351,
"repo_name": "ykaneko/quantum",
"id": "55bfe2908c80bc6a2141d3f622c86f77641ce090",
"size": "32956",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "quantum/agent/l3_agent.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "37307"
},
{
"name": "JavaScript",
"bytes": "67928"
},
{
"name": "Perl",
"bytes": "235"
},
{
"name": "Python",
"bytes": "4207649"
},
{
"name": "Shell",
"bytes": "9722"
},
{
"name": "XSLT",
"bytes": "50907"
}
],
"symlink_target": ""
}
|
"""
Server API
Reference for Server API (REST/Json)
OpenAPI spec version: 2.0.21
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import kinow_client
from kinow_client.rest import ApiException
from kinow_client.models.media_file_list_response import MediaFileListResponse
class TestMediaFileListResponse(unittest.TestCase):
""" MediaFileListResponse unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testMediaFileListResponse(self):
"""
Test MediaFileListResponse
"""
model = kinow_client.models.media_file_list_response.MediaFileListResponse()
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "4368b899019c5e28f33e479c931b895f",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 84,
"avg_line_length": 20.175,
"alnum_prop": 0.6864931846344485,
"repo_name": "kinow-io/kinow-python-sdk",
"id": "3cdf904205b1dd4052d14a3ccfef6b43a4b39dbb",
"size": "824",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/test_media_file_list_response.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "4659182"
},
{
"name": "Shell",
"bytes": "1666"
}
],
"symlink_target": ""
}
|
from zope.interface import Interface
class IAPIFactory(Interface):
def __call__(environ):
""" environ -> IRepozeWhoAPI
"""
class IAPI(Interface):
""" Facade for stateful invocation of underlying plugins.
"""
def authenticate():
""" -> {identity}
o Return an authenticated identity mapping, extracted from the
request environment.
o If no identity can be authenticated, return None.
o Identity will include at least a 'repoze.who.userid' key,
as well as any keys added by metadata plugins.
"""
def challenge(status='403 Forbidden', app_headers=()):
""" -> wsgi application
o Return a WSGI application which represents a "challenge"
(request for credentials) in response to the current request.
"""
def remember(identity=None):
""" -> [headers]
O Return a sequence of response headers which suffice to remember
the given identity.
o If 'identity' is not passed, use the identity in the environment.
"""
def forget(identity=None):
""" -> [headers]
O Return a sequence of response headers which suffice to destroy
any credentials used to establish an identity.
o If 'identity' is not passed, use the identity in the environment.
"""
def login(credentials, identifier_name=None):
""" -> (identity, headers)
o This is an API for browser-based application login forms.
o If 'identifier_name' is passed, use it to look up the identifier;
othewise, use the first configured identifier.
o Attempt to authenticate 'credentials' as though the identifier
had extracted them.
o On success, 'identity' will be authenticated mapping, and 'headers'
will be "remember" headers.
o On failure, 'identity' will be None, and response_headers will be
"forget" headers.
"""
def logout(identifier_name=None):
""" -> (headers)
o This is an API for browser-based application logout.
o If 'identifier_name' is passed, use it to look up the identifier;
othewise, use the first configured identifier.
o Returned headers will be "forget" headers.
"""
class IPlugin(Interface):
pass
class IRequestClassifier(IPlugin):
""" On ingress: classify a request.
"""
def __call__(environ):
""" environ -> request classifier string
This interface is responsible for returning a string
value representing a request classification.
o 'environ' is the WSGI environment.
"""
class IChallengeDecider(IPlugin):
""" On egress: decide whether a challenge needs to be presented
to the user.
"""
def __call__(environ, status, headers):
""" args -> True | False
o 'environ' is the WSGI environment.
o 'status' is the HTTP status as returned by the downstream
WSGI application.
o 'headers' are the headers returned by the downstream WSGI
application.
This interface is responsible for returning True if
a challenge needs to be presented to the user, False otherwise.
"""
class IIdentifier(IPlugin):
"""
On ingress: Extract credentials from the WSGI environment and
turn them into an identity.
On egress (remember): Conditionally set information in the response headers
allowing the remote system to remember this identity.
On egress (forget): Conditionally set information in the response
headers allowing the remote system to forget this identity (during
a challenge).
"""
def identify(environ):
""" On ingress:
environ -> { k1 : v1
, ...
, kN : vN
} | None
o 'environ' is the WSGI environment.
o If credentials are found, the returned identity mapping will
contain an arbitrary set of key/value pairs. If the
identity is based on a login and password, the environment
is recommended to contain at least 'login' and 'password'
keys as this provides compatibility between the plugin and
existing authenticator plugins. If the identity can be
'preauthenticated' (e.g. if the userid is embedded in the
identity, such as when we're using ticket-based
authentication), the plugin should set the userid in the
special 'repoze.who.userid' key; no authenticators will be
asked to authenticate the identity thereafer.
o Return None to indicate that the plugin found no appropriate
credentials.
o Only IIdentifier plugins which match one of the the current
request's classifications will be asked to perform
identification.
o An identifier plugin is permitted to add a key to the
environment named 'repoze.who.application', which should be
an arbitrary WSGI application. If an identifier plugin does
so, this application is used instead of the downstream
application set up within the middleware. This feature is
useful for identifier plugins which need to perform
redirection to obtain credentials. If two identifier
plugins add a 'repoze.who.application' WSGI application to
the environment, the last one consulted will"win".
"""
def remember(environ, identity):
""" On egress (no challenge required):
args -> [ (header-name, header-value), ...] | None
Return a list of headers suitable for allowing the requesting
system to remember the identification information (e.g. a
Set-Cookie header). Return None if no headers need to be set.
These headers will be appended to any headers returned by the
downstream application.
"""
def forget(environ, identity):
""" On egress (challenge required):
args -> [ (header-name, header-value), ...] | None
Return a list of headers suitable for allowing the requesting
system to forget the identification information (e.g. a
Set-Cookie header with an expires date in the past). Return
None if no headers need to be set. These headers will be
included in the response provided by the challenge app.
"""
class IAuthenticator(IPlugin):
""" On ingress: validate the identity and return a user id or None.
"""
def authenticate(environ, identity):
""" identity -> 'userid' | None
o 'environ' is the WSGI environment.
o 'identity' will be a dictionary (with arbitrary keys and
values).
o The IAuthenticator should return a single user id (optimally
a string) if the identity can be authenticated. If the
identify cannot be authenticated, the IAuthenticator should
return None.
Each instance of a registered IAuthenticator plugin that
matches the request classifier will be called N times during a
single request, where N is the number of identities found by
any IIdentifierPlugin instances.
An authenticator must not raise an exception if it is provided
an identity dictionary that it does not understand (e.g. if it
presumes that 'login' and 'password' are keys in the
dictionary, it should check for the existence of these keys
before attempting to do anything; if they don't exist, it
should return None).
An authenticator is permitted to add extra keys to the 'identity'
dictionary (e.g., to save metadata from a database query, rather
than requiring a separate query from an IMetadataProvider plugin).
"""
class IChallenger(IPlugin):
""" On egress: Conditionally initiate a challenge to the user to
provide credentials.
Only challenge plugins which match one of the the current
response's classifications will be asked to perform a
challenge.
"""
def challenge(environ, status, app_headers, forget_headers):
""" args -> WSGI application or None
o 'environ' is the WSGI environment.
o 'status' is the status written into start_response by the
downstream application.
o 'app_headers' is the headers list written into start_response by the
downstream application.
o 'forget_headers' is a list of headers which must be passed
back in the response in order to perform credentials reset
(logout). These come from the 'forget' method of
IIdentifier plugin used to do the request's identification.
Examine the values passed in and return a WSGI application
(a callable which accepts environ and start_response as its
two positional arguments, ala PEP 333) which causes a
challenge to be performed. Return None to forego performing a
challenge.
"""
class IMetadataProvider(IPlugin):
"""On ingress: When an identity is authenticated, metadata
providers may scribble on the identity dictionary arbitrarily.
Return values from metadata providers are ignored.
"""
def add_metadata(environ, identity):
"""
Add metadata to the identity (which is a dictionary). One
value is always guaranteed to be in the dictionary when
add_metadata is called: 'repoze.who.userid', representing the
user id of the identity. Availability and composition of
other keys will depend on the identifier plugin which created
the identity.
"""
|
{
"content_hash": "c4f2fefb860060fcabe9c13956aa5cb2",
"timestamp": "",
"source": "github",
"line_count": 280,
"max_line_length": 79,
"avg_line_length": 35.17142857142857,
"alnum_prop": 0.6474411047928513,
"repo_name": "kawamon/hue",
"id": "c0cbe143702dbecdcc0521c3ad984a5f782b8ebf",
"size": "9848",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "desktop/core/ext-py/repoze.who-2.3/repoze/who/interfaces.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ABAP",
"bytes": "962"
},
{
"name": "ActionScript",
"bytes": "1133"
},
{
"name": "Ada",
"bytes": "99"
},
{
"name": "Assembly",
"bytes": "5786"
},
{
"name": "AutoHotkey",
"bytes": "720"
},
{
"name": "Batchfile",
"bytes": "118907"
},
{
"name": "C",
"bytes": "3196521"
},
{
"name": "C#",
"bytes": "83"
},
{
"name": "C++",
"bytes": "308860"
},
{
"name": "COBOL",
"bytes": "4"
},
{
"name": "CSS",
"bytes": "1050129"
},
{
"name": "Cirru",
"bytes": "520"
},
{
"name": "Clojure",
"bytes": "794"
},
{
"name": "CoffeeScript",
"bytes": "403"
},
{
"name": "ColdFusion",
"bytes": "86"
},
{
"name": "Common Lisp",
"bytes": "632"
},
{
"name": "D",
"bytes": "324"
},
{
"name": "Dart",
"bytes": "489"
},
{
"name": "Dockerfile",
"bytes": "10981"
},
{
"name": "Eiffel",
"bytes": "375"
},
{
"name": "Elixir",
"bytes": "692"
},
{
"name": "Elm",
"bytes": "487"
},
{
"name": "Emacs Lisp",
"bytes": "411907"
},
{
"name": "Erlang",
"bytes": "487"
},
{
"name": "Forth",
"bytes": "979"
},
{
"name": "FreeMarker",
"bytes": "1017"
},
{
"name": "G-code",
"bytes": "521"
},
{
"name": "GLSL",
"bytes": "512"
},
{
"name": "Genshi",
"bytes": "946"
},
{
"name": "Gherkin",
"bytes": "699"
},
{
"name": "Go",
"bytes": "7312"
},
{
"name": "Groovy",
"bytes": "1080"
},
{
"name": "HTML",
"bytes": "24999718"
},
{
"name": "Haskell",
"bytes": "512"
},
{
"name": "Haxe",
"bytes": "447"
},
{
"name": "HiveQL",
"bytes": "43"
},
{
"name": "Io",
"bytes": "140"
},
{
"name": "JSONiq",
"bytes": "4"
},
{
"name": "Java",
"bytes": "471854"
},
{
"name": "JavaScript",
"bytes": "28075556"
},
{
"name": "Julia",
"bytes": "210"
},
{
"name": "Jupyter Notebook",
"bytes": "73168"
},
{
"name": "LSL",
"bytes": "2080"
},
{
"name": "Lean",
"bytes": "213"
},
{
"name": "Lex",
"bytes": "264449"
},
{
"name": "Liquid",
"bytes": "1883"
},
{
"name": "LiveScript",
"bytes": "5747"
},
{
"name": "Lua",
"bytes": "78382"
},
{
"name": "M4",
"bytes": "1377"
},
{
"name": "MATLAB",
"bytes": "203"
},
{
"name": "Makefile",
"bytes": "269655"
},
{
"name": "Mako",
"bytes": "3614942"
},
{
"name": "Mask",
"bytes": "597"
},
{
"name": "Myghty",
"bytes": "936"
},
{
"name": "Nix",
"bytes": "2212"
},
{
"name": "OCaml",
"bytes": "539"
},
{
"name": "Objective-C",
"bytes": "2672"
},
{
"name": "OpenSCAD",
"bytes": "333"
},
{
"name": "PHP",
"bytes": "662"
},
{
"name": "PLSQL",
"bytes": "31565"
},
{
"name": "PLpgSQL",
"bytes": "6006"
},
{
"name": "Pascal",
"bytes": "1412"
},
{
"name": "Perl",
"bytes": "4327"
},
{
"name": "PigLatin",
"bytes": "371"
},
{
"name": "PowerShell",
"bytes": "3204"
},
{
"name": "Python",
"bytes": "76440000"
},
{
"name": "R",
"bytes": "2445"
},
{
"name": "Roff",
"bytes": "95764"
},
{
"name": "Ruby",
"bytes": "1098"
},
{
"name": "Rust",
"bytes": "495"
},
{
"name": "Scala",
"bytes": "1541"
},
{
"name": "Scheme",
"bytes": "559"
},
{
"name": "Shell",
"bytes": "190718"
},
{
"name": "Smarty",
"bytes": "130"
},
{
"name": "TSQL",
"bytes": "10013"
},
{
"name": "Tcl",
"bytes": "899"
},
{
"name": "TeX",
"bytes": "165743"
},
{
"name": "Thrift",
"bytes": "317058"
},
{
"name": "TypeScript",
"bytes": "1607"
},
{
"name": "VBA",
"bytes": "2884"
},
{
"name": "VBScript",
"bytes": "938"
},
{
"name": "VHDL",
"bytes": "830"
},
{
"name": "Vala",
"bytes": "485"
},
{
"name": "Verilog",
"bytes": "274"
},
{
"name": "Vim Snippet",
"bytes": "226931"
},
{
"name": "XQuery",
"bytes": "114"
},
{
"name": "XSLT",
"bytes": "521413"
},
{
"name": "Yacc",
"bytes": "2133855"
}
],
"symlink_target": ""
}
|
json_data = {
"cpu": {
"cpu_util": "container_cpu_system_seconds_total",
"gauge": "%"
},
"memory": {
"memory.usage": "container_memory_usage_bytes",
"guage": "MB"}
}
def get_data(metric):
return json_data[metric]
|
{
"content_hash": "3a41ef0d1fcbffb0bc966c172dd5c843",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 57,
"avg_line_length": 20.23076923076923,
"alnum_prop": 0.5247148288973384,
"repo_name": "WiproOpenSourcePractice/galaxia",
"id": "eee756f51603d256680952a269cf2ee925dbdb44",
"size": "842",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "galaxia/gexporter/handler/docker/prometheus_ceilometer_container_metrics_mapping.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "163572"
}
],
"symlink_target": ""
}
|
NSNAM_CODE_BASE_URL = "http://code.nsnam.org/"
PYBINDGEN_BRANCH = 'https://github.com/gjcarneiro/pybindgen.git'
LOCAL_PYBINDGEN_PATH = 'pybindgen'
#
# The last part of the path name to use to find the regression traces tarball.
# path will be APPNAME + '-' + VERSION + REGRESSION_SUFFIX + TRACEBALL_SUFFIX,
# e.g., ns-3-dev-ref-traces.tar.bz2
#
TRACEBALL_SUFFIX = ".tar.bz2"
# NetAnim
NETANIM_REPO = "http://code.nsnam.org/netanim"
NETANIM_RELEASE_URL = "http://www.nsnam.org/tools/netanim"
LOCAL_NETANIM_PATH = "netanim"
# bake
BAKE_REPO = "http://code.nsnam.org/bake"
|
{
"content_hash": "88354115ef7648c18e2a665ef2981e9b",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 78,
"avg_line_length": 30.210526315789473,
"alnum_prop": 0.7142857142857143,
"repo_name": "softDi/clusim",
"id": "2b3666c49e3886e4ecd830064ea3e74ff7101ada",
"size": "575",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "ns3/constants.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "3021"
},
{
"name": "C",
"bytes": "365226"
},
{
"name": "C++",
"bytes": "24340132"
},
{
"name": "CSS",
"bytes": "3775"
},
{
"name": "Click",
"bytes": "19348"
},
{
"name": "Gnuplot",
"bytes": "9919"
},
{
"name": "HTML",
"bytes": "7942"
},
{
"name": "JavaScript",
"bytes": "7698"
},
{
"name": "Makefile",
"bytes": "92131"
},
{
"name": "Matlab",
"bytes": "39069"
},
{
"name": "Perl",
"bytes": "302716"
},
{
"name": "Perl 6",
"bytes": "151"
},
{
"name": "Python",
"bytes": "44191047"
},
{
"name": "QMake",
"bytes": "6602"
},
{
"name": "Shell",
"bytes": "146434"
}
],
"symlink_target": ""
}
|
import json
from .oauth import OAuth2Test
class SoundcloudOAuth2Test(OAuth2Test):
backend_path = "social_core.backends.soundcloud.SoundcloudOAuth2"
user_data_url = "https://api.soundcloud.com/me.json"
expected_username = "foobar"
access_token_body = json.dumps({"access_token": "foobar", "token_type": "bearer"})
user_data_body = json.dumps(
{
"website": None,
"myspace_name": None,
"public_favorites_count": 0,
"followings_count": 0,
"full_name": "Foo Bar",
"id": 10101010,
"city": None,
"track_count": 0,
"playlist_count": 0,
"discogs_name": None,
"private_tracks_count": 0,
"followers_count": 0,
"online": True,
"username": "foobar",
"description": None,
"subscriptions": [],
"kind": "user",
"quota": {
"unlimited_upload_quota": False,
"upload_seconds_left": 7200,
"upload_seconds_used": 0,
},
"website_title": None,
"primary_email_confirmed": False,
"permalink_url": "http://soundcloud.com/foobar",
"private_playlists_count": 0,
"permalink": "foobar",
"upload_seconds_left": 7200,
"country": None,
"uri": "https://api.soundcloud.com/users/10101010",
"avatar_url": "https://a1.sndcdn.com/images/"
"default_avatar_large.png?ca77017",
"plan": "Free",
}
)
def test_login(self):
self.do_login()
def test_partial_pipeline(self):
self.do_partial_pipeline()
|
{
"content_hash": "617a62ea40f1cd25a751e74bb2328add",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 86,
"avg_line_length": 32.660377358490564,
"alnum_prop": 0.510687463893703,
"repo_name": "python-social-auth/social-core",
"id": "974bbc9beec0d78636ca9c142e4b3dd081395b24",
"size": "1731",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "social_core/tests/backends/test_soundcloud.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Dockerfile",
"bytes": "958"
},
{
"name": "Makefile",
"bytes": "316"
},
{
"name": "Python",
"bytes": "807862"
},
{
"name": "Shell",
"bytes": "1923"
}
],
"symlink_target": ""
}
|
from lxml import etree
import mock
from oslo_serialization import jsonutils
import webob
from cinder.api.contrib import extended_snapshot_attributes
from cinder import context
from cinder import test
from cinder.tests.api import fakes
from cinder.tests import fake_snapshot
from cinder.tests import fake_volume
UUID1 = '00000000-0000-0000-0000-000000000001'
UUID2 = '00000000-0000-0000-0000-000000000002'
def _get_default_snapshot_param():
return {'id': UUID1,
'volume_id': 12,
'status': 'available',
'volume_size': 100,
'created_at': None,
'display_name': 'Default name',
'display_description': 'Default description',
'project_id': 'fake',
'progress': '0%',
'expected_attrs': ['metadata']}
def fake_snapshot_get(self, context, snapshot_id):
param = _get_default_snapshot_param()
return param
def fake_snapshot_get_all(self, context, search_opts=None):
param = _get_default_snapshot_param()
return [param]
class ExtendedSnapshotAttributesTest(test.TestCase):
content_type = 'application/json'
prefix = 'os-extended-snapshot-attributes:'
def setUp(self):
super(ExtendedSnapshotAttributesTest, self).setUp()
def _make_request(self, url):
req = webob.Request.blank(url)
req.headers['Accept'] = self.content_type
res = req.get_response(fakes.wsgi_app())
return res
def _get_snapshot(self, body):
return jsonutils.loads(body).get('snapshot')
def _get_snapshots(self, body):
return jsonutils.loads(body).get('snapshots')
def assertSnapshotAttributes(self, snapshot, project_id, progress):
self.assertEqual(snapshot.get('%sproject_id' % self.prefix),
project_id)
self.assertEqual(snapshot.get('%sprogress' % self.prefix), progress)
@mock.patch('cinder.db.snapshot_metadata_get', return_value=dict())
@mock.patch('cinder.objects.Volume.get_by_id')
@mock.patch('cinder.objects.Snapshot.get_by_id')
def test_show(self, snapshot_get_by_id, volume_get_by_id,
snapshot_metadata_get):
ctx = context.RequestContext('fake', 'fake', auth_token=True)
snapshot = _get_default_snapshot_param()
snapshot_obj = fake_snapshot.fake_snapshot_obj(ctx, **snapshot)
fake_volume_obj = fake_volume.fake_volume_obj(ctx)
snapshot_get_by_id.return_value = snapshot_obj
volume_get_by_id.return_value = fake_volume_obj
url = '/v2/fake/snapshots/%s' % UUID1
res = self._make_request(url)
self.assertEqual(res.status_int, 200)
self.assertSnapshotAttributes(self._get_snapshot(res.body),
project_id='fake',
progress='0%')
def test_detail(self):
url = '/v2/fake/snapshots/detail'
res = self._make_request(url)
self.assertEqual(res.status_int, 200)
for snapshot in self._get_snapshots(res.body):
self.assertSnapshotAttributes(snapshot,
project_id='fake',
progress='0%')
class ExtendedSnapshotAttributesXmlTest(ExtendedSnapshotAttributesTest):
content_type = 'application/xml'
ext = extended_snapshot_attributes
prefix = '{%s}' % ext.Extended_snapshot_attributes.namespace
def _get_snapshot(self, body):
return etree.XML(body)
def _get_snapshots(self, body):
return etree.XML(body).getchildren()
|
{
"content_hash": "f9a9d17c02c077612051b5e037eef36e",
"timestamp": "",
"source": "github",
"line_count": 105,
"max_line_length": 76,
"avg_line_length": 34.2,
"alnum_prop": 0.6301865775549986,
"repo_name": "tmenjo/cinder-2015.1.1",
"id": "b0daf1989ad4c8a902578dc0db4828ba2c85cab3",
"size": "4228",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "cinder/tests/api/contrib/test_extended_snapshot_attributes.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "PLpgSQL",
"bytes": "2511"
},
{
"name": "Python",
"bytes": "10804398"
},
{
"name": "Shell",
"bytes": "8111"
}
],
"symlink_target": ""
}
|
import setuptools
# In python < 2.7.4, a lazy loading of package `pbr` will break
# setuptools if some other modules registered functions in `atexit`.
# solution from: http://bugs.python.org/issue15881#msg170215
try:
import multiprocessing # noqa
except ImportError:
pass
setuptools.setup(
setup_requires=['pbr'],
pbr=True)
|
{
"content_hash": "ad258d27189aab1962062637e0c215f4",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 68,
"avg_line_length": 24.571428571428573,
"alnum_prop": 0.7267441860465116,
"repo_name": "bgaifullin/python-jiractl",
"id": "b3beb7d612e15eab10bdcc8c39e77e504ec2614e",
"size": "344",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "43054"
}
],
"symlink_target": ""
}
|
from .model import PaddleModel # noqa # pylint: disable=unused-import
|
{
"content_hash": "dc5437809e8e829194f7a1c0a6937cfe",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 70,
"avg_line_length": 71,
"alnum_prop": 0.7746478873239436,
"repo_name": "kubeflow/kfserving-lts",
"id": "05ba18c483207e18fb226d665886e1f3035fc5dc",
"size": "648",
"binary": false,
"copies": "1",
"ref": "refs/heads/release-0.6",
"path": "python/paddleserver/paddleserver/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "224"
},
{
"name": "Dockerfile",
"bytes": "10549"
},
{
"name": "Go",
"bytes": "1251102"
},
{
"name": "HTML",
"bytes": "17922"
},
{
"name": "JavaScript",
"bytes": "1828"
},
{
"name": "Jsonnet",
"bytes": "2434415"
},
{
"name": "Makefile",
"bytes": "16071"
},
{
"name": "Python",
"bytes": "1860674"
},
{
"name": "SCSS",
"bytes": "1789"
},
{
"name": "Shell",
"bytes": "36788"
},
{
"name": "TypeScript",
"bytes": "78886"
}
],
"symlink_target": ""
}
|
from SpatialIndexer import SpatialIndexer
from operator import sub as _sub
from operator import add as _add
from operator import mul as _mul
class ParticleSystem(object):
"""System of particles.
Maintains the set of particles and runs the physics simulation over them
the specified laws.
"""
def __init__(self, laws, initialParticles = [], initialTick = 0):
"""Initialise the particle system.
laws = laws object
initialParticles = list of particles
initialTick = start value for tick counter
"""
self.indexer = SpatialIndexer(laws.maxInteractRadius)
self.laws = laws
self.particles = []
self.tick = initialTick
self.particleDict = {}
self.add(*initialParticles)
def add(self, *newParticles):
"""Add the specified particle(s) into the system"""
self.particles.extend(newParticles)
for p in newParticles:
self.particleDict[p.ID] = p
self.indexer.updateLoc(*newParticles)
def remove(self, *oldParticles):
"""Remove the specified particle(s) from the system.
Note that this method does not destroy bonds from other particles to these ones.
"""
for particle in oldParticles:
self.particles.remove(particle)
del self.particleDict[particle.ID]
self.indexer.remove(*oldParticles)
def removeByID(self, *ids):
"""Remove particle(s) as specified by id(s) from the system.
Note that this method does not destroy bonds from other particles to these ones.
"""
particles = [self.particleDict[id] for id in ids]
self.remove( *particles )
def updateLoc(self, *particles):
"""Notify this physics system that the specified particle(s)
have changed position.
Must be called if you change a particle's position,
before calling run().
"""
self.indexer.updateLoc(*particles)
def withinRadius(self, centre, radius, filter=(lambda particle:True)):
"""Returns a list of zero or more (particle, distSquared) tuples,
representing those particles within radius distance of the
specified centre coords.
distance-squared from the centre coords is returned too to negate
any need you may have to calculate it again yourself.
You can specify a filter function that takes a candidate particle
as an argument and should return True if it is to be included
(if it is within the radius, of course). This is to allow efficient
pre-filtering of the particles before the distance test is done.
"""
return self.indexer.withinRadius(centre, radius, filter)
def run(self, cycles = 1):
"""Run the simulation for a given number of cycles"""
_indexer = self.indexer
_laws = self.laws
while cycles > 0:
cycles -= 1
self.tick += 1
_tick = self.tick
for p in self.particles:
p.doInteractions(_indexer, _laws, _tick)
for p in self.particles:
p.update(_laws)
_indexer.updateAll()
|
{
"content_hash": "1fc75569a2fbd7df4569aeb389a3cbdd",
"timestamp": "",
"source": "github",
"line_count": 89,
"max_line_length": 91,
"avg_line_length": 37.550561797752806,
"alnum_prop": 0.6071214841412328,
"repo_name": "bbc/kamaelia",
"id": "65986c3549eee11249c73e3aef39e07f07785c81",
"size": "4394",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "Sketches/MH/Layout/Physics/ParticleSystem.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "ApacheConf",
"bytes": "62985"
},
{
"name": "C",
"bytes": "212854"
},
{
"name": "C++",
"bytes": "327546"
},
{
"name": "CSS",
"bytes": "114434"
},
{
"name": "ChucK",
"bytes": "422"
},
{
"name": "Diff",
"bytes": "483"
},
{
"name": "Gettext Catalog",
"bytes": "3919909"
},
{
"name": "HTML",
"bytes": "1288960"
},
{
"name": "Java",
"bytes": "31832"
},
{
"name": "JavaScript",
"bytes": "829491"
},
{
"name": "Makefile",
"bytes": "5768"
},
{
"name": "NSIS",
"bytes": "18867"
},
{
"name": "PHP",
"bytes": "49059"
},
{
"name": "Perl",
"bytes": "31234"
},
{
"name": "Processing",
"bytes": "2885"
},
{
"name": "Pure Data",
"bytes": "7485482"
},
{
"name": "Python",
"bytes": "18896320"
},
{
"name": "Ruby",
"bytes": "4165"
},
{
"name": "Shell",
"bytes": "711244"
}
],
"symlink_target": ""
}
|
import webbrowser
import click
from ghutil.types import Repository
@click.command()
@Repository.argument("repo")
def cli(repo):
"""Open a repository in a web browser"""
webbrowser.open_new(repo.data["html_url"])
|
{
"content_hash": "eddf93faf02066f1dab069d2b9a334d8",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 46,
"avg_line_length": 22.2,
"alnum_prop": 0.7297297297297297,
"repo_name": "jwodder/ghutil",
"id": "2621fbadb0a1e2332c89fea6bfbe639f777fd0b4",
"size": "222",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/ghutil/cli/repo/web.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "533902"
}
],
"symlink_target": ""
}
|
import sys
import logging
import emission.net.usercache.abstract_usercache_handler as euah
import emission.net.usercache.abstract_usercache as enua
import emission.storage.timeseries.abstract_timeseries as esta
import emission.storage.decorations.tour_model_queries as esdtmq
import emission.analysis.intake.cleaning.filter_accuracy as eaicf
import emission.analysis.intake.segmentation.trip_segmentation as eaist
import emission.analysis.intake.segmentation.section_segmentation as eaiss
import emission.analysis.intake.cleaning.location_smoothing as eaicl
import emission.analysis.intake.cleaning.clean_and_resample as eaicr
import emission.net.ext_service.habitica.sync_habitica as autocheck
if __name__ == '__main__':
logging.basicConfig(format='%(asctime)s:%(levelname)s:%(message)s',
level=logging.DEBUG)
cache_uuid_list = enua.UserCache.get_uuid_list()
logging.info("cache UUID list = %s" % cache_uuid_list)
for uuid in cache_uuid_list:
logging.info("*" * 10 + "UUID %s: moving to long term" % uuid + "*" * 10)
uh = euah.UserCacheHandler.getUserCacheHandler(uuid)
uh.moveToLongTerm()
long_term_uuid_list = esta.TimeSeries.get_uuid_list()
logging.info("*" * 10 + "long term UUID list = %s" % long_term_uuid_list)
for uuid in long_term_uuid_list:
if uuid is None:
continue
logging.info("*" * 10 + "UUID %s: filter accuracy if needed" % uuid + "*" * 10)
eaicf.filter_accuracy(uuid)
logging.info("*" * 10 + "UUID %s: segmenting into trips" % uuid + "*" * 10)
eaist.segment_current_trips(uuid)
logging.info("*" * 10 + "UUID %s: segmenting into sections" % uuid + "*" * 10)
eaiss.segment_current_sections(uuid)
logging.info("*" * 10 + "UUID %s: smoothing sections" % uuid + "*" * 10)
eaicl.filter_current_sections(uuid)
logging.info("*" * 10 + "UUID %s: cleaning and resampling timeline" % uuid + "*" * 10)
eaicr.clean_and_resample(uuid)
logging.info("*" * 10 + "UUID %s: finding common trips" % uuid + "*" * 10)
esdtmq.make_tour_model_from_raw_user_data(uuid)
logging.info("*" * 10 + "UUID %s: checking active mode trips to autocheck habits" % uuid + "*" * 10)
autocheck.reward_active_transportation(uuid)
logging.info("*" * 10 + "UUID %s: storing views to cache" % uuid + "*" * 10)
uh = euah.UserCacheHandler.getUserCacheHandler(uuid)
uh.storeViewsToCache()
|
{
"content_hash": "07f0a7a98294126af70538c41b7e4bb2",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 108,
"avg_line_length": 44.12280701754386,
"alnum_prop": 0.6616302186878728,
"repo_name": "yw374cornell/e-mission-server",
"id": "14215c93d7f6cbb164b035071280e72c5646fade",
"size": "2515",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bin/intake_stage.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "445"
},
{
"name": "CSS",
"bytes": "717871"
},
{
"name": "HTML",
"bytes": "114875"
},
{
"name": "JavaScript",
"bytes": "7620696"
},
{
"name": "Jupyter Notebook",
"bytes": "97095629"
},
{
"name": "Python",
"bytes": "1584848"
},
{
"name": "Shell",
"bytes": "2299"
},
{
"name": "Smarty",
"bytes": "3456"
}
],
"symlink_target": ""
}
|
import simplejson as json
from django.shortcuts import render
from django.http import HttpResponse
from django.http import Http404
from django.core.exceptions import ValidationError
from django.db.utils import DatabaseError
from mozdns.domain.models import Domain
from core.search.compiler.django_compile import search_type
from core.utils import locked_function
from mozdns.record.utils import get_obj_meta
def record_search(request, record_type=None):
if not record_type:
record_type = 'A'
return render(request, 'record/record_search.html', {
'record_type': record_type
})
def record(request, record_type='', record_pk=''):
domains = Domain.objects.filter(is_reverse=False)
if not record_type:
record_type = 'A'
return render(request, 'record/record.html', {
'record_type': record_type,
'record_pk': record_pk,
'domains': json.dumps([domain.name for domain in domains]),
})
def record_delete(request, record_type='', record_pk=''):
if request.method != 'POST':
raise Http404
obj_meta = get_obj_meta(record_type)
try:
object_ = obj_meta.Klass.objects.get(pk=record_pk)
except obj_meta.Klass.DoesNotExist:
error = "Could not find that object."
return HttpResponse(json.dumps({'success': False, 'error': error}))
try:
object_.delete()
except ValidationError, e:
error = e.messages[0]
return HttpResponse(json.dumps({'success': False, 'error': error}))
return HttpResponse(json.dumps({'success': True}))
def record_search_ajax(request):
"""
This function will return a list of records matching the 'query' of type
'record_type'. It's used for ajaxy stuff.
"""
query = request.GET.get('query', '')
record_type = request.GET.get('record_type', '')
obj_meta = get_obj_meta(record_type)
if not record_type:
raise Http404
if not query and record_type:
return render(request, 'record/record_search_results.html', {
'objs': [],
'record_type': record_type,
})
if not obj_meta.Klass:
raise Http404
records, error = search_type(query, record_type)
if error:
total_obj_count = 0
records = []
else:
try:
total_obj_count = records.count()
records = records[:50]
except DatabaseError, e:
if "Got error " in str(e) and " from regexp" in str(e):
# This is nasty. If the user is using an invalid regex patter,
# the db might shit a brick
total_obj_count = 0
records = []
else:
raise
return render(request, 'record/record_search_results.html', {
'query': "{0} AND type=:{1}".format(query, record_type),
'objs': records,
'record_type': record_type,
'total_obj_count': total_obj_count
})
def record_ajax(request):
# This function is pretty much a router
if request.method == 'POST':
return _record_post(request)
else:
record_type = request.GET.get('record_type', '')
record_pk = request.GET.get('record_pk', '')
obj_meta = get_obj_meta(record_type)()
return obj_meta.get(request, record_type, record_pk)
@locked_function('inventory.record_lock', 10)
def _record_post(request):
record_type = request.POST.get('record_type', '')
record_pk = request.POST.get('record_pk', '')
obj_meta = get_obj_meta(record_type)()
return obj_meta.post(request, record_type, record_pk)
|
{
"content_hash": "ed339398ba6b62acecec8112d3ce7530",
"timestamp": "",
"source": "github",
"line_count": 115,
"max_line_length": 78,
"avg_line_length": 31.321739130434782,
"alnum_prop": 0.6213214880621877,
"repo_name": "mozilla/inventory",
"id": "490c1eaf8583cea1997aeade51da78a7bd427145",
"size": "3602",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "mozdns/record/views.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "5104"
},
{
"name": "CSS",
"bytes": "362837"
},
{
"name": "CoffeeScript",
"bytes": "9538"
},
{
"name": "HTML",
"bytes": "1195738"
},
{
"name": "JavaScript",
"bytes": "1300342"
},
{
"name": "Makefile",
"bytes": "14421"
},
{
"name": "PHP",
"bytes": "27273"
},
{
"name": "Python",
"bytes": "3642733"
},
{
"name": "Shell",
"bytes": "1783"
}
],
"symlink_target": ""
}
|
from django.core.exceptions import ValidationError
from django.contrib.auth.models import User
from django.db import models
class Nonce(models.Model):
server_url = models.CharField(max_length=2047)
timestamp = models.IntegerField()
salt = models.CharField(max_length=40)
def __unicode__(self):
return u"Nonce: %s, %s" % (self.server_url, self.salt)
class Association(models.Model):
server_url = models.TextField(max_length=2047)
handle = models.CharField(max_length=255)
secret = models.TextField(max_length=255) # Stored base64 encoded
issued = models.IntegerField()
lifetime = models.IntegerField()
assoc_type = models.TextField(max_length=64)
def __unicode__(self):
return u"Association: %s, %s" % (self.server_url, self.handle)
class UserOpenID(models.Model):
user = models.ForeignKey(User)
claimed_id = models.TextField(max_length=2047)
display_id = models.TextField(max_length=2047)
def clean_fields(self):
"""
Validate uniqueness of claimed_id here because MySQL
doesn't like using unique on TextFields without specifying
a key length, which Django doesn't allow you to do. This can
be removed if bug # 524796 [1] get's fixed.
[1] https://bugs.launchpad.net/django-openid-auth/+bug/524796
"""
claims = self.objects.filter(claimed_id=self.claimed_id)
if claims:
raise ValidationError('Claimed ID must be unique')
|
{
"content_hash": "3704bbe29c389aa0faaeeeee9f312fc8",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 70,
"avg_line_length": 34.627906976744185,
"alnum_prop": 0.6816655473472129,
"repo_name": "paulosman/django-openid-auth",
"id": "193eb614107b20969f1a1a9c3e406cf42a399d70",
"size": "2914",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django_openid_auth/models.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "113554"
}
],
"symlink_target": ""
}
|
"""
Remove all your old unwanted django models automatically !
Same feeling as blowing up Megaton guaranteed !
"""
__version__ = "0.0.0"
from .cleaner import AbraxoCleaner
|
{
"content_hash": "5c3b53349df93536a6c4d693dfd1f135",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 58,
"avg_line_length": 21.75,
"alnum_prop": 0.735632183908046,
"repo_name": "PhilipGarnero/django-abraxo-cleaner",
"id": "a2b081e4411986617249cc359c4ae1a3807ab96f",
"size": "174",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "abraxo_cleaner/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "2524"
}
],
"symlink_target": ""
}
|
import datetime
import flask
from flask_mongoengine import MongoEngine
from tests import FlaskMongoEngineTestCase
class BasicAppTestCase(FlaskMongoEngineTestCase):
def setUp(self):
super(BasicAppTestCase, self).setUp()
db = MongoEngine()
class Todo(db.Document):
title = db.StringField(max_length=60)
text = db.StringField()
done = db.BooleanField(default=False)
pub_date = db.DateTimeField(default=datetime.datetime.now)
db.init_app(self.app)
Todo.drop_collection()
self.Todo = Todo
@self.app.route('/')
def index():
return '\n'.join(x.title for x in self.Todo.objects)
@self.app.route('/add', methods=['POST'])
def add():
form = flask.request.form
todo = self.Todo(title=form['title'],
text=form['text'])
todo.save()
return 'added'
@self.app.route('/show/<id>/')
def show(id):
todo = self.Todo.objects.get_or_404(id=id)
return '\n'.join([todo.title, todo.text])
self.db = db
def test_connection_default(self):
self.app.config['MONGODB_SETTINGS'] = {}
self.app.config['TESTING'] = True
db = MongoEngine()
db.init_app(self.app)
self.app.config['TESTING'] = True
db = MongoEngine()
db.init_app(self.app)
def test_with_id(self):
c = self.app.test_client()
resp = c.get('/show/38783728378090/')
self.assertEqual(resp.status_code, 404)
c.post('/add', data={'title': 'First Item', 'text': 'The text'})
resp = c.get('/show/%s/' % self.Todo.objects.first_or_404().id)
self.assertEqual(resp.status_code, 200)
self.assertEquals(resp.data.decode('utf-8'), 'First Item\nThe text')
def test_basic_insert(self):
c = self.app.test_client()
c.post('/add', data={'title': 'First Item', 'text': 'The text'})
c.post('/add', data={'title': '2nd Item', 'text': 'The text'})
rv = c.get('/')
self.assertEquals(rv.data.decode('utf-8'), 'First Item\n2nd Item')
def test_request_context(self):
with self.app.test_request_context():
todo = self.Todo(title='Test', text='test')
todo.save()
self.assertEqual(self.Todo.objects.count(), 1)
|
{
"content_hash": "af7f984dbbbb41332d505b36cd19cfa5",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 76,
"avg_line_length": 31.69736842105263,
"alnum_prop": 0.564549605645496,
"repo_name": "losintikfos/flask-mongoengine",
"id": "474a28a36f2997513bfff90144e3af450a5da777",
"size": "2409",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_basic_app.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "6502"
},
{
"name": "Python",
"bytes": "79171"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
import csv
import plistlib
import sys
import os
import io
import pprint
import glob
CSV_FILE = os.path.join(os.path.dirname(os.path.realpath(__file__)),
'names.csv')
MANIFESTS_DIR = os.path.join(os.path.dirname(os.path.realpath(__file__)),
'manifests')
pp = pprint.PrettyPrinter(indent=2)
def update_manifest_fields(manifest, row=None):
"""Takes a manifest dictionary and updates the fields. Creating blank
entires so everything is uniform or addding data from the CSV file like
asset tag."""
# pp.pprint(manifest)
installs = ['managed_installs',
'managed_uninstalls',
'managed_updates',
'optional_installs']
# Make our empty arrays if they don't exist so it's uniform
for array in installs:
if manifest.get(array) is None:
manifest[array] = []
# Birdville Speical keys
special = ['asset', 'display_name', 'user']
for arr in special:
if manifest.get(arr) is None:
try:
if row.get(arr) is not None:
manifest[arr] = row[arr]
except(AttributeError):
manifest[arr] = ''
return manifest
def eprint(*args, **kwargs):
"""Print to stderr http://stackoverflow.com/a/14981125/4811765"""
print(*args, file=sys.stderr, **kwargs)
def main():
"""For all serials in CSV_FILE we will create a manifest with the proper
munki fields and add missing data to the manifest if not present."""
csvfile = io.open(CSV_FILE, 'rbU')
csv_data = csv.DictReader(csvfile, delimiter=',')
for row in csv_data:
manifest = {}
serial_manifest = os.path.join(MANIFESTS_DIR, row['serial'])
if os.path.isfile(serial_manifest):
# The manifest file does exist. We will update fields.
manifest = plistlib.readPlist(serial_manifest)
# Update all the fields and add meta data
manifest = update_manifest_fields(manifest, row)
# Write the updated manifest to disk
plistlib.writePlist(manifest, serial_manifest)
else:
# The manifest file doesn't exist. We will create a blank
# manifest and add the data from the csv file. We also want to
# try and delete the named manifest if it exists.
serial_manifest = os.path.join(MANIFESTS_DIR, row['serial'])
named_manifest = os.path.join(MANIFESTS_DIR, row['name'])
if os.path.isfile(named_manifest):
# We have a manifest on disk as the computer name
manifest = plistlib.readPlist(named_manifest)
# Update all the fields and add meta data
manifest = update_manifest_fields(manifest, row)
# Write the updated manifest to disk
plistlib.writePlist(manifest, serial_manifest)
# Remove the old named manifest file from disk DANGERZONE
os.remove(named_manifest)
print("File '{0}' with serial '{1}' has been removed"
.format(
os.path.basename(named_manifest),
row['serial'])
)
else:
# We are creating a blank manifest for this machine.
# Potentally scary town.
manifest = {'included_manifests': []}
# Update all the fields and add meta data
manifest = update_manifest_fields(manifest, row)
# Write the updated manifest to disk
plistlib.writePlist(manifest, serial_manifest)
eprint("WARN: This manifest was created from scratch for "
"serial '{0}'".format(row['serial']))
csvfile.close()
# Update the remainding manifest files to have the same uniform items
for manifest_file in os.listdir(MANIFESTS_DIR):
if not manifest_file.startswith('.'):
try:
manifest = plistlib.readPlist(os.path.join(MANIFESTS_DIR,
manifest_file))
# Update all the fields
manifest = update_manifest_fields(manifest)
# Write the updated manifest to disk
plistlib.writePlist(manifest, os.path.join(MANIFESTS_DIR,
manifest_file))
except(IOError):
pass
if __name__ == '__main__':
main()
|
{
"content_hash": "5c881506c815e7d962ee3172ea9f669b",
"timestamp": "",
"source": "github",
"line_count": 113,
"max_line_length": 76,
"avg_line_length": 40.68141592920354,
"alnum_prop": 0.5708070480748314,
"repo_name": "clburlison/scripts",
"id": "f832da6e6880432733e47fa40eccb57aa26293f6",
"size": "4616",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "clburlison_scripts/add_asset_and_name_to_manifest/add_asset_and_name_to_manifest.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "823"
},
{
"name": "Makefile",
"bytes": "10552"
},
{
"name": "Python",
"bytes": "33727"
},
{
"name": "Shell",
"bytes": "54723"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
import unittest
import os
from music21 import common
from music21 import converter
from music21 import corpus
from music21 import exceptions21
from music21 import stream
from music21 import text
from music21 import environment
_MOD = 'features/base.py'
environLocal = environment.Environment(_MOD)
#-------------------------------------------------------------------------------
class FeatureException(exceptions21.Music21Exception):
pass
class Feature(object):
'''
An object representation of a feature, capable of presentation in a variety of formats,
and returned from FeatureExtractor objects.
Feature objects are simple. It is FeatureExtractors that store all metadata and processing
routines for creating Feature objects.
'''
def __init__(self):
# these values will be filled by the extractor
self.dimensions = None # number of dimensions
# data storage; possibly use numpy array
self.vector = None
# consider not storing this values, as may not be necessary
self.name = None # string name representation
self.description = None # string description
self.isSequential = None # True or False
self.discrete = None # is discrete or continuous
def _getVectors(self):
'''Prepare a vector of appropriate size and return
'''
return [0] * self.dimensions
def prepareVectors(self):
'''Prepare the vector stored in this feature.
'''
self.vector = self._getVectors()
def normalize(self):
'''Normalize the vector between 0 and 1, assuming there is more than one value.
'''
if self.dimensions == 1:
return # do nothing
m = max(self.vector)
if m == 0:
return # do nothing
scalar = 1. / m # get floating point scalar for speed
temp = self._getVectors()
for i, v in enumerate(self.vector):
temp[i] = v * scalar
self.vector = temp
#-------------------------------------------------------------------------------
class FeatureExtractorException(exceptions21.Music21Exception):
pass
class FeatureExtractor(object):
'''A model of process that extracts a feature from a Music21 Stream. The main public interface is the extract() method.
The extractor can be passed a Stream or a reference to a DataInstance. All Stream's are internally converted to a DataInstance if necessary. Usage of a DataInstance offers significant performance advantages, as common forms of the Stream are cached for easy processing.
'''
def __init__(self, dataOrStream=None, *arguments, **keywords):
self.stream = None # the original Stream, or None
self.data = None # a DataInstance object: use to get data
self.setData(dataOrStream)
self._feature = None # Feature object that results from processing
if not hasattr(self, "name"):
self.name = None # string name representation
if not hasattr(self, "description"):
self.description = None # string description
if not hasattr(self, "isSequential"):
self.isSequential = None # True or False
if not hasattr(self, "dimensions"):
self.dimensions = None # number of dimensions
if not hasattr(self, "discrete"):
self.discrete = True # default
if not hasattr(self, "normalize"):
self.normalize = False # default is no
def setData(self, dataOrStream):
'''Set the data that this FeatureExtractor will process. Either a Stream or a DataInstance object can be provided.
'''
if dataOrStream is not None:
if (hasattr(dataOrStream, 'classes') and 'Stream' in
dataOrStream.classes):
#environLocal.printDebug(['creating new DataInstance: this should be a Stream:', dataOrStream])
# if we are passed a stream, create a DataInstrance to
# manage the
# its data; this is less efficient but is good for testing
self.stream = dataOrStream
self.data = DataInstance(self.stream)
# if a DataInstance, do nothing
else:
self.stream = None
self.data = dataOrStream
def getAttributeLabels(self):
'''Return a list of string in a form that is appropriate for data storage.
>>> fe = features.jSymbolic.AmountOfArpeggiationFeature()
>>> fe.getAttributeLabels()
['Amount_of_Arpeggiation']
>>> fe = features.jSymbolic.FifthsPitchHistogramFeature()
>>> fe.getAttributeLabels()
['Fifths_Pitch_Histogram_0', 'Fifths_Pitch_Histogram_1', 'Fifths_Pitch_Histogram_2', 'Fifths_Pitch_Histogram_3', 'Fifths_Pitch_Histogram_4', 'Fifths_Pitch_Histogram_5', 'Fifths_Pitch_Histogram_6', 'Fifths_Pitch_Histogram_7', 'Fifths_Pitch_Histogram_8', 'Fifths_Pitch_Histogram_9', 'Fifths_Pitch_Histogram_10', 'Fifths_Pitch_Histogram_11']
'''
post = []
if self.dimensions == 1:
post.append(self.name.replace(' ', '_'))
else:
for i in range(self.dimensions):
post.append('%s_%s' % (self.name.replace(' ', '_'), i))
return post
def _fillFeatureAttributes(self, feature=None):
'''Fill the attributes of a Feature with the descriptors in the FeatureExtractor.
'''
# operate on passed-in feature or self._feature
if feature is None:
feature = self._feature
feature.name = self.name
feature.description = self.description
feature.isSequential = self.isSequential
feature.dimensions = self.dimensions
feature.discrete = self.discrete
return feature
def _prepareFeature(self):
'''Prepare a new Feature object for data acquisition.
>>> s = stream.Stream()
>>> fe = features.jSymbolic.InitialTimeSignatureFeature(s)
>>> fe._prepareFeature()
>>> fe._feature.name
'Initial Time Signature'
>>> fe._feature.dimensions
2
>>> fe._feature.vector
[0, 0]
'''
self._feature = Feature()
self._fillFeatureAttributes() # will fill self._feature
self._feature.prepareVectors() # will vector with necessary zeros
def _process(self):
'''Do processing necessary, storing result in _feature.
'''
# do work in subclass, calling on self.data
pass
def extract(self, source=None):
'''Extract the feature and return the result.
'''
if source is not None:
self.stream = source
# preparing the feature always sets self._feature to a new instance
self._prepareFeature()
self._process() # will set Feature object to _feature
# assume we always want to normalize?
if self.normalize:
self._feature.normalize()
return self._feature
def getBlankFeature(self):
'''Return a properly configured plain feature as a place holder
>>> from music21 import features
>>> fe = features.jSymbolic.InitialTimeSignatureFeature()
>>> fe.getBlankFeature().vector
[0, 0]
'''
f = Feature()
self._fillFeatureAttributes(f)
f.prepareVectors() # will vector with necessary zeros
return f
#-------------------------------------------------------------------------------
class StreamForms(object):
'''A dictionary-like wrapper of a Stream, providing
numerous representations, generated on-demand, and cached.
A single StreamForms object can be created for an
entire Score, as well as one for each Part and/or Voice.
A DataSet object manages one or more StreamForms
objects, and exposes them to FeatureExtractors for usage.
'''
def __init__(self, streamObj, prepareStream=True):
self.stream = streamObj
if self.stream is not None:
if prepareStream:
self._base = self._prepareStream(self.stream)
else: # possibly make a copy?
self._base = self.stream
else:
self._base = None
# basic data storage is a dictionary
self._forms = {}
def keys(self):
# will only return forms that are established
return self._forms.keys()
def _prepareStream(self, streamObj):
'''
Common routines done on Streams prior to processing. Return a new Stream
'''
# this causes lots of deepcopys, but an inPlace operation loses
# accuracy on feature extractors
streamObj = streamObj.stripTies(retainContainers=True)
return streamObj
def __getitem__(self, key):
'''Get a form of this Stream, using a cached version if available.
'''
# first, check for cached version
if key in self._forms:
return self._forms[key]
# else, process, store, and return
elif key in ['flat']:
self._forms['flat'] = self._base.flat
return self._forms['flat']
elif key in ['flat.pitches']:
self._forms['flat.pitches'] = self._base.flat.pitches
return self._forms['flat.pitches']
elif key in ['flat.notes']:
self._forms['flat.notes'] = self._base.flat.notes
return self._forms['flat.notes']
elif key in ['getElementsByClass.Measure']:
# need to determine if should concatenate
# measure for all parts if a score?
if 'Score' in self._base.classes:
post = stream.Stream()
for p in self._base.parts:
# insert in overlapping offset positions
for m in p.getElementsByClass('Measure'):
post.insert(m.getOffsetBySite(p), m)
else:
post = self._base.getElementsByClass('Measure')
self._forms['getElementsByClass.Measure'] = post
return self._forms['getElementsByClass.Measure']
elif key in ['flat.getElementsByClass.TimeSignature']:
self._forms['flat.getElementsByClass.TimeSignature'] = self._base.flat.getElementsByClass('TimeSignature')
return self._forms['flat.getElementsByClass.TimeSignature']
elif key in ['flat.getElementsByClass.KeySignature']:
self._forms['flat.getElementsByClass.KeySignature'] = self._base.flat.getElementsByClass('KeySignature')
return self._forms['flat.getElementsByClass.KeySignature']
elif key in ['flat.getElementsByClass.Harmony']:
self._forms['flat.getElementsByClass.Harmony'] = self._base.flat.getElementsByClass('Harmony')
return self._forms['flat.getElementsByClass.Harmony']
elif key in ['metronomeMarkBoundaries']: # already flat
self._forms['metronomeMarkBoundaries'] = self._base.metronomeMarkBoundaries()
return self._forms['metronomeMarkBoundaries']
# some methods that return new streams
elif key in ['chordify']:
if 'Score' in self._base.classes:
# options here permit getting part information out
# of chordified representation
self._forms['chordify'] = self._base.chordify(
addPartIdAsGroup=True, removeRedundantPitches=False)
else: # for now, just return a normal Part or Stream
self._forms['chordify'] = self._base
return self._forms['chordify']
elif key in ['chordify.getElementsByClass.Chord']:
# need flat here, as chordify might return Measures
x = self.__getitem__('chordify').flat.getElementsByClass('Chord')
self._forms['chordify.getElementsByClass.Chord'] = x
return self._forms['chordify.getElementsByClass.Chord']
# create a Part in a Score for each Instrument
elif key in ['partitionByInstrument']:
from music21 import instrument
x = instrument.partitionByInstrument(self._base)
self._forms['partitionByInstrument'] = x
return self._forms['partitionByInstrument']
# create a dictionary of encountered set classes and a count
elif key in ['chordifySetClassHistogram']:
histo = {}
for c in self.__getitem__('chordify.getElementsByClass.Chord'):
key = c.forteClassTnI
if key not in histo:
histo[key] = 0
histo[key] += 1
self._forms['chordifySetClassHistogram'] = histo
return self._forms['chordifySetClassHistogram']
# a dictionary of pitch class sets
elif key in ['chordifyPitchClassSetHistogram']:
histo = {}
for c in self.__getitem__('chordify.getElementsByClass.Chord'):
key = c.orderedPitchClassesString
if key not in histo:
histo[key] = 0
histo[key] += 1
self._forms['chordifyPitchClassSetHistogram'] = histo
return self._forms['chordifyPitchClassSetHistogram']
# dictionary of common chord types
elif key in ['chordifyTypesHistogram']:
histo = {}
# keys are methods on Chord
keys = ['isTriad', 'isSeventh', 'isMajorTriad', 'isMinorTriad', 'isIncompleteMajorTriad', 'isIncompleteMinorTriad', 'isDiminishedTriad', 'isAugmentedTriad', 'isDominantSeventh', 'isDiminishedSeventh', 'isHalfDiminishedSeventh']
for c in self.__getitem__('chordify.getElementsByClass.Chord'):
for key in keys:
if key not in histo:
histo[key] = 0
# get the function attr, call it, check bool
if getattr(c, key)():
histo[key] += 1
# not breaking here means that we may get multiple
# hits for the same chord
self._forms['chordifyTypesHistogram'] = histo
return self._forms['chordifyTypesHistogram']
# a dictionary of intervals
#self.flat.melodicIntervals(skipRests=True, skipChords=False, skipGaps=True)
# a dictionary of quarter length values
elif key in ['noteQuarterLengthHistogram']:
histo = {}
for n in self.__getitem__('flat.notes'):
key = n.quarterLength
if key not in histo:
histo[key] = 0
histo[key] += 1
self._forms['noteQuarterLengthHistogram'] = histo
return self._forms['noteQuarterLengthHistogram']
# data lists / histograms
elif key in ['pitchClassHistogram']:
histo = [0] * 12
for p in self.__getitem__('flat.pitches'): # recursive call
histo[p.pitchClass] += 1
self._forms['pitchClassHistogram'] = histo
return self._forms['pitchClassHistogram']
elif key in ['midiPitchHistogram']:
histo = [0] * 128
for p in self.__getitem__('flat.pitches'): # recursive call
histo[p.midi] += 1
self._forms['midiPitchHistogram'] = histo
return self._forms['midiPitchHistogram']
# bins for all abs spans between adjacent melodic notes
elif key in ['midiIntervalHistogram']:
# note that this does not optimize and cache part presentations
histo = [0] * 128
# if we have parts, must add one at a time
if self._base.hasPartLikeStreams():
parts = self._base.parts
else:
parts = [self._base] # emulate a list
for p in parts:
# will be flat
# edit June 2012:
# was causing millions of deepcopy calls
# so I made it inPlace, but for some reason
# code errored with 'p =' not present
# also, this part has measures...so should retainContains be True?
p = p.stripTies(retainContainers=False, inPlace=True)
# noNone means that we will see all connections, even w/ a gap
post = p.findConsecutiveNotes(skipRests=True,
skipChords=True, skipGaps=True, noNone=True)
for i, n in enumerate(post):
if i < len(post) - 1: # if not last
iNext = i + 1
nNext = post[iNext]
try:
histo[abs(n.pitch.midi - nNext.pitch.midi)] += 1
except AttributeError:
pass # problem with not having midi
self._forms['midiIntervalHistogram'] = histo
return self._forms['midiIntervalHistogram']
elif key in ['contourList']:
# list of all directed half steps
cList = []
# if we have parts, must add one at a time
if self._base.hasPartLikeStreams():
parts = self._base.parts
else:
parts = [self._base] # emulate a list
for p in parts:
# this may be unnecessary but we cannot accessed cached part data
# edit June 2012:
# was causing lots of deepcopy calls, so I made
# it inPlace=True, but errors when 'p =' no present
# also, this part has measures...so should retainContains be True?
p = p.stripTies(retainContainers=False, inPlace=True) # will be flat
# noNone means that we will see all connections, even w/ a gap
post = p.findConsecutiveNotes(skipRests=True,
skipChords=False, skipGaps=True, noNone=True)
for i, n in enumerate(post):
if i < (len(post) - 1): # if not last
iNext = i + 1
nNext = post[iNext]
if n.isChord:
ps = n.sortDiatonicAscending().pitches[-1].midi
else: # normal note
ps = n.pitch.midi
if nNext.isChord:
psNext = nNext.sortDiatonicAscending().pitches[-1].midi
else: # normal note
psNext = nNext.pitch.midi
cList.append(psNext - ps)
#environLocal.printDebug(['contourList', cList])
self._forms['contourList'] = cList
return self._forms['contourList']
elif key in ['flat.analyzedKey']:
# this will use default weightings
self._forms['analyzedKey'] = self.__getitem__('flat').analyze(
method='key')
return self._forms['analyzedKey']
elif key in ['flat.tonalCertainty']:
# this will use default weightings
foundKey = self.__getitem__('flat.analyzedKey')
self._forms['flat.tonalCertainty'] = foundKey.tonalCertainty()
return self._forms['flat.tonalCertainty']
elif key in ['metadata']:
self._forms['metadata'] = self._base.metadata
return self._forms['metadata']
elif key in ['secondsMap']:
secondsMap = self.__getitem__('flat').secondsMap
post = []
# filter only notes; all elements would otherwise be gathered
for bundle in secondsMap:
if 'GeneralNote' in bundle['element'].classes:
post.append(bundle)
self._forms['secondsMap'] = post
return self._forms['secondsMap']
elif key in ['assembledLyrics']:
self._forms['assembledLyrics'] = text.assembleLyrics(self._base)
return self._forms['assembledLyrics']
else:
raise AttributeError('no such attribute: %s' % key)
#-------------------------------------------------------------------------------
class DataInstance(object):
'''
A data instance for analysis. This object prepares a Stream
(by stripping ties, etc.) and stores
multiple commonly-used stream representations once, providing rapid processing.
'''
def __init__(self, streamObj=None, id=None): #@ReservedAssignment
self.stream = streamObj
# perform basic operations that are performed on all
# streams
# store an id for the source stream: file path url, corpus url
# or metadata title
if id is not None:
self._id = id
else:
if hasattr(self.stream, 'metadata'):
self._id = self.stream.metadata # may be None
# the attribute name in the data set for this label
self._classLabel = None
# store the class value for this data instance
self._classValue = None
# store a dictionary of StreamForms
self._forms = StreamForms(self.stream)
# if parts exist, store a forms for each
self._formsByPart = []
if hasattr(self.stream, 'parts'):
self.partsCount = len(self.stream.parts)
for p in self.stream.parts:
# note that this will join ties and expand rests again
self._formsByPart.append(StreamForms(p))
else:
self.partsCount = 0
# TODO: store a list of voices, extracted from each part,
# presently this will only work on a measure stream
self._formsByVoice = []
if hasattr(self.stream, 'voices'):
for v in self.stream.voices:
self._formsByPart.append(StreamForms(v))
def setClassLabel(self, classLabel, classValue=None):
'''Set the class label, as well as the class value if known. The class label is the attribute name used to define the class of this data instance.
>>> #_DOCS_SHOW s = corpus.parse('bwv66.6')
>>> s = stream.Stream() #_DOCS_HIDE
>>> di = features.DataInstance(s)
>>> di.setClassLabel('Composer', 'Bach')
'''
self._classLabel = classLabel
self._classValue = classValue
def getClassValue(self):
if self._classValue is None:
return ''
else:
return self._classValue
def getId(self):
if self._id is None:
return ''
else:
# make sure there are no spaces
return self._id.replace(' ', '_')
def __getitem__(self, key):
'''Get a form of this Stream, using a cached version if available.
>>> s = corpus.parse('bwv66.6')
>>> di = features.DataInstance(s)
>>> len(di['flat'])
193
>>> len(di['flat.pitches'])
163
>>> len(di['flat.notes'])
163
>>> len(di['getElementsByClass.Measure'])
40
>>> len(di['getElementsByClass.Measure'])
40
>>> len(di['flat.getElementsByClass.TimeSignature'])
4
'''
if key in ['parts']:
# return a list of Forms for each part
return self._formsByPart
elif key in ['voices']:
# return a list of Forms for voices
return self._formsByVoices
# try to create by calling the attribute
# will raise an attribute error if there is a problem
return self._forms[key]
#-------------------------------------------------------------------------------
class OutputFormatException(exceptions21.Music21Exception):
pass
class OutputFormat(object):
'''Provide output for a DataSet, passed as an initial argument.
'''
def __init__(self, dataSet=None):
# assume a two dimensional array
self._ext = None # store a file extension if necessary
# pass a data set object
self._dataSet = dataSet
def getHeaderLines(self):
'''Get the header as a list of lines.
'''
pass # define in subclass
def write(self, fp=None, includeClassLabel=True, includeId=True):
'''Write the file. If not file path is given, a temporary file will be written.
'''
if fp is None:
fp = environLocal.getTempFile(suffix=self._ext)
if not fp.endswith(self._ext):
raise
f = open(fp, 'w')
f.write(self.getString(includeClassLabel=includeClassLabel,
includeId=includeId))
f.close()
return fp
class OutputTabOrange(OutputFormat):
'''Tab delimited file format used with Orange.
http://orange.biolab.si/doc/reference/Orange.data.formats/
'''
def __init__(self, dataSet=None):
OutputFormat.__init__(self, dataSet=dataSet)
self._ext = '.tab'
def getHeaderLines(self, includeClassLabel=True, includeId=True):
'''Get the header as a list of lines.
>>> f = [features.jSymbolic.ChangesOfMeterFeature]
>>> ds = features.DataSet()
>>> ds.addFeatureExtractors(f)
>>> of = features.OutputTabOrange(ds)
>>> for x in of.getHeaderLines(): print(x)
['Identifier', 'Changes_of_Meter']
['string', 'discrete']
['meta', '']
>>> ds = features.DataSet(classLabel='Composer')
>>> ds.addFeatureExtractors(f)
>>> of = features.OutputTabOrange(ds)
>>> for x in of.getHeaderLines(): print(x)
['Identifier', 'Changes_of_Meter', 'Composer']
['string', 'discrete', 'discrete']
['meta', '', 'class']
'''
post = []
post.append(self._dataSet.getAttributeLabels(
includeClassLabel=includeClassLabel, includeId=includeId))
# second row meta data
row = []
for x in self._dataSet.getDiscreteLabels(
includeClassLabel=includeClassLabel, includeId=includeId):
if x is None: # this is a string entry
row.append('string')
elif x is True: # if True, it is discrete
row.append('discrete')
else:
row.append('continuous')
post.append(row)
# third row metadata
row = []
for x in self._dataSet.getClassPositionLabels(includeId=includeId):
if x is None: # the id value
row.append('meta')
elif x is True: # if True, it is the class column
row.append('class')
else:
row.append('')
post.append(row)
return post
def getString(self, includeClassLabel=True, includeId=True, lineBreak=None):
'''Get the complete DataSet as a string with the appropriate headers.
'''
if lineBreak is None:
lineBreak = '\n'
msg = []
header = self.getHeaderLines(includeClassLabel=includeClassLabel,
includeId=includeId)
data = header + self._dataSet.getFeaturesAsList(
includeClassLabel=includeClassLabel)
for row in data:
sub = []
for e in row:
sub.append(str(e))
msg.append('\t'.join(sub))
return lineBreak.join(msg)
class OutputCSV(OutputFormat):
'''Comma-separated value list.
'''
def __init__(self, dataSet=None):
OutputFormat.__init__(self, dataSet=dataSet)
self._ext = '.csv'
def getHeaderLines(self, includeClassLabel=True, includeId=True):
'''Get the header as a list of lines.
>>> f = [features.jSymbolic.ChangesOfMeterFeature]
>>> ds = features.DataSet(classLabel='Composer')
>>> ds.addFeatureExtractors(f)
>>> of = features.OutputCSV(ds)
>>> of.getHeaderLines()[0]
['Identifier', 'Changes_of_Meter', 'Composer']
'''
post = []
post.append(self._dataSet.getAttributeLabels(
includeClassLabel=includeClassLabel, includeId=includeId))
return post
def getString(self, includeClassLabel=True, includeId=True, lineBreak=None):
if lineBreak is None:
lineBreak = '\n'
msg = []
header = self.getHeaderLines(includeClassLabel=includeClassLabel,
includeId=includeId)
data = header + self._dataSet.getFeaturesAsList(
includeClassLabel=includeClassLabel, includeId=includeId)
for row in data:
sub = []
for e in row:
sub.append(str(e))
msg.append(','.join(sub))
return lineBreak.join(msg)
class OutputARFF(OutputFormat):
'''An ARFF (Attribute-Relation File Format) file.
See http://weka.wikispaces.com/ARFF+%28stable+version%29 for more details
>>> oa = features.OutputARFF()
>>> oa._ext
'.arff'
'''
def __init__(self, dataSet=None):
OutputFormat.__init__(self, dataSet=dataSet)
self._ext = '.arff'
def getHeaderLines(self, includeClassLabel=True, includeId=True):
'''Get the header as a list of lines.
>>> f = [features.jSymbolic.ChangesOfMeterFeature]
>>> ds = features.DataSet(classLabel='Composer')
>>> ds.addFeatureExtractors(f)
>>> of = features.OutputARFF(ds)
>>> for x in of.getHeaderLines(): print(x)
@RELATION Composer
@ATTRIBUTE Identifier STRING
@ATTRIBUTE Changes_of_Meter NUMERIC
@ATTRIBUTE class {}
@DATA
'''
post = []
# get three parallel lists
attrs = self._dataSet.getAttributeLabels(
includeClassLabel=includeClassLabel, includeId=includeId)
discreteLabels = self._dataSet.getDiscreteLabels(
includeClassLabel=includeClassLabel, includeId=includeId)
classLabels = self._dataSet.getClassPositionLabels(includeId=includeId)
post.append('@RELATION %s' % self._dataSet.getClassLabel())
for i, attrLabel in enumerate(attrs):
discrete = discreteLabels[i]
classLabel = classLabels[i]
if not classLabel: # a normal attribute
if discrete is None: # this is an identifier
post.append('@ATTRIBUTE %s STRING' % attrLabel)
elif discrete is True:
post.append('@ATTRIBUTE %s NUMERIC' % attrLabel)
else: # this needs to be a NOMINAL type
post.append('@ATTRIBUTE %s NUMERIC' % attrLabel)
else:
values = self._dataSet.getUniqueClassValues()
post.append('@ATTRIBUTE class {%s}' % ','.join(values))
# include start of data declaration
post.append('@DATA')
return post
def getString(self, includeClassLabel=True, includeId=True, lineBreak=None):
if lineBreak is None:
lineBreak = '\n'
msg = []
header = self.getHeaderLines(includeClassLabel=includeClassLabel,
includeId=includeId)
for row in header:
msg.append(row)
data = self._dataSet.getFeaturesAsList(
includeClassLabel=includeClassLabel)
# data is separated by commas
for row in data:
sub = []
for e in row:
sub.append(str(e))
msg.append(','.join(sub))
return lineBreak.join(msg)
#-------------------------------------------------------------------------------
class DataSetException(exceptions21.Music21Exception):
pass
class DataSet(object):
'''
A set of features, as well as a collection of data to operate on
Multiple DataInstance objects, a FeatureSet, and an OutputFormat.
>>> ds = features.DataSet(classLabel='Composer')
>>> f = [features.jSymbolic.PitchClassDistributionFeature, features.jSymbolic.ChangesOfMeterFeature, features.jSymbolic.InitialTimeSignatureFeature]
>>> ds.addFeatureExtractors(f)
>>> ds.addData('bwv66.6', classValue='Bach')
>>> ds.addData('bach/bwv324.xml', classValue='Bach')
>>> ds.process()
>>> ds.getFeaturesAsList()[0]
['bwv66.6', 0.0, 1.0, 0.375, 0.03125, 0.5, 0.1875, 0.90625, 0.0, 0.4375, 0.6875, 0.09375, 0.875, 0, 4, 4, 'Bach']
>>> ds.getFeaturesAsList()[1]
['bach/bwv324.xml', 0.12, 0.0, 1.0, 0.12, 0.56..., 0.0, ..., 0.52..., 0.0, 0.68..., 0.0, 0.56..., 0, 4, 4, 'Bach']
>>> ds = ds.getString()
By default, all exceptions are caught and printed if debug mode is on.
Set ds.failFast = True to not catch them.
Set ds.quiet = False to print them regardless of debug mode.
'''
def __init__(self, classLabel=None, featureExtractors=()):
# assume a two dimensional array
self.dataInstances = []
self.streams = []
# order of feature extractors is the order used in the presentations
self._featureExtractors = []
# the label of the class
self._classLabel = classLabel
# store a multidimensional storage of all features
self._features = []
self.failFast = False
self.quiet = True
# set extractors
self.addFeatureExtractors(featureExtractors)
def getClassLabel(self):
return self._classLabel
def addFeatureExtractors(self, values):
'''Add one or more FeatureExtractor objects, either as a list or as an individual object.
'''
# features are instantiated here
# however, they do not have a data assignment
if not common.isIterable(values):
values = [values]
# need to create instances
for sub in values:
self._featureExtractors.append(sub())
def getAttributeLabels(self, includeClassLabel=True,
includeId=True):
'''Return a list of all attribute labels. Optionally add a class label field and/or an id field.
>>> f = [features.jSymbolic.PitchClassDistributionFeature, features.jSymbolic.ChangesOfMeterFeature]
>>> ds = features.DataSet(classLabel='Composer', featureExtractors=f)
>>> ds.getAttributeLabels(includeId=False)
['Pitch_Class_Distribution_0', 'Pitch_Class_Distribution_1', 'Pitch_Class_Distribution_2', 'Pitch_Class_Distribution_3', 'Pitch_Class_Distribution_4', 'Pitch_Class_Distribution_5', 'Pitch_Class_Distribution_6', 'Pitch_Class_Distribution_7', 'Pitch_Class_Distribution_8', 'Pitch_Class_Distribution_9', 'Pitch_Class_Distribution_10', 'Pitch_Class_Distribution_11', 'Changes_of_Meter', 'Composer']
'''
post = []
# place ids first
if includeId:
post.append('Identifier')
for fe in self._featureExtractors:
post += fe.getAttributeLabels()
if self._classLabel is not None and includeClassLabel:
post.append(self._classLabel.replace(' ', '_'))
return post
def getDiscreteLabels(self, includeClassLabel=True, includeId=True):
'''Return column labels for discrete status.
>>> f = [features.jSymbolic.PitchClassDistributionFeature, features.jSymbolic.ChangesOfMeterFeature]
>>> ds = features.DataSet(classLabel='Composer', featureExtractors=f)
>>> ds.getDiscreteLabels()
[None, False, False, False, False, False, False, False, False, False, False, False, False, True, True]
'''
post = []
if includeId:
post.append(None) # just a spacer
for fe in self._featureExtractors:
# need as many statements of discrete as there are dimensions
post += [fe.discrete] * fe.dimensions
# class label is assumed always discrete
if self._classLabel is not None and includeClassLabel:
post.append(True)
return post
def getClassPositionLabels(self, includeId=True):
'''Return column labels for the presence of a class definition
>>> f = [features.jSymbolic.PitchClassDistributionFeature, features.jSymbolic.ChangesOfMeterFeature]
>>> ds = features.DataSet(classLabel='Composer', featureExtractors=f)
>>> ds.getClassPositionLabels()
[None, False, False, False, False, False, False, False, False, False, False, False, False, False, True]
'''
post = []
if includeId:
post.append(None) # just a spacer
for fe in self._featureExtractors:
# need as many statements of discrete as there are dimensions
post += [False] * fe.dimensions
# class label is assumed always discrete
if self._classLabel is not None:
post.append(True)
return post
def addData(self, dataOrStreamOrPath, classValue=None, id=None): #@ReservedAssignment
'''Add a Stream, DataInstance, or path to a corpus or local file to this data set.
The class value passed here is assumed to be the same as the classLable assigned at startup.
'''
if self._classLabel is None:
raise DataSetException('cannot add data unless a class label for this DataSet has been set.')
s = None
if isinstance(dataOrStreamOrPath, DataInstance):
di = dataOrStreamOrPath
s = di.stream
elif common.isStr(dataOrStreamOrPath):
# could be corpus or file path
if os.path.exists(dataOrStreamOrPath) or dataOrStreamOrPath.startswith('http'):
s = converter.parse(dataOrStreamOrPath)
else: # assume corpus
s = corpus.parse(dataOrStreamOrPath)
# assume we can use this string as an id
di = DataInstance(s, id=dataOrStreamOrPath)
else:
# for now, assume all else are streams
s = dataOrStreamOrPath
di = DataInstance(dataOrStreamOrPath, id=id)
di.setClassLabel(self._classLabel, classValue)
self.dataInstances.append(di)
self.streams.append(s)
def process(self):
'''Process all Data with all FeatureExtractors. Processed data is stored internally as numerous Feature objects.
'''
# clear features
self._features = []
for data in self.dataInstances:
row = []
for fe in self._featureExtractors:
fe.setData(data)
# in some cases there might be problem; to not fail
try:
fReturned = fe.extract()
except Exception as e: # for now take any error # pylint: disable=broad-except
fList = ['failed feature extactor:', fe, str(e)]
if self.quiet is True:
environLocal.printDebug(fList)
else:
environLocal.warn(fList)
if self.failFast is True:
raise e
# provide a blank feature extactor
fReturned = fe.getBlankFeature()
row.append(fReturned) # get feature and store
# rows will align with data the order of DataInstances
self._features.append(row)
def getFeaturesAsList(self, includeClassLabel=True, includeId=True, concatenateLists=True):
'''Get processed data as a list of lists, merging any sub-lists in multi-dimensional features.
'''
post = []
for i, row in enumerate(self._features):
v = []
di = self.dataInstances[i]
if includeId:
v.append(di.getId())
for f in row:
if concatenateLists:
v += f.vector
else:
v.append(f.vector)
if includeClassLabel:
v.append(di.getClassValue())
post.append(v)
if not includeClassLabel and not includeId:
return post[0]
else:
return post
def getUniqueClassValues(self):
'''Return a list of unique class values.
'''
post = []
for di in self.dataInstances:
v = di.getClassValue()
if v not in post:
post.append(v)
return post
def _getOutputFormat(self, featureFormat):
if featureFormat.lower() in ['tab', 'orange', 'taborange', None]:
outputFormat = OutputTabOrange(dataSet=self)
elif featureFormat.lower() in ['csv', 'comma']:
outputFormat = OutputCSV(dataSet=self)
elif featureFormat.lower() in ['arff', 'attribute']:
outputFormat = OutputARFF(dataSet=self)
else:
return None
return outputFormat
def _getOutputFormatFromFilePath(self, fp):
'''Get an output format from a file path if possible, otherwise return None.
>>> ds = features.DataSet()
>>> ds._getOutputFormatFromFilePath('test.tab')
<music21.features.base.OutputTabOrange object at ...>
>>> ds._getOutputFormatFromFilePath('test.csv')
<music21.features.base.OutputCSV object at ...>
>>> ds._getOutputFormatFromFilePath('junk') is None
True
'''
# get format from fp if possible
of = None
if '.' in fp:
if self._getOutputFormat(fp.split('.')[-1]) is not None:
of = self._getOutputFormat(fp.split('.')[-1])
return of
def getString(self, outputFmt='tab'):
'''Get a string representation of the data set in a specific format.
'''
# pass reference to self to output
outputFormat = self._getOutputFormat(outputFmt)
return outputFormat.getString()
def write(self, fp=None, format=None, includeClassLabel=True): #@ReservedAssignment
'''
Set the output format object.
'''
if format is None and fp is not None:
outputFormat = self._getOutputFormatFromFilePath(fp)
else:
outputFormat = self._getOutputFormat(format)
if OutputFormat is None:
raise DataSetException('no output format could be defined from file path %s or format %s' % (fp, format))
outputFormat.write(fp=fp, includeClassLabel=includeClassLabel)
def allFeaturesAsList(streamInput):
'''
returns a tuple containing ALL currentingly implemented feature extractors. The first
in the tuple are jsymbolic vectors, and the second native vectors. Vectors are NOT nested
streamInput can be Add a Stream, DataInstance, or path to a corpus or local file to this data set.
>>> #_DOCS_SHOW s = corpus.parse('bwv66.6')
>>> s = converter.parse('tinynotation: 4/4 c4 d e2') #_DOCS_HIDE
>>> f = features.allFeaturesAsList(s)
>>> f[1][0:3]
[[1], [0.6899992497638124], [2]]
>>> len(f[0]) > 65
True
>>> len(f[1]) > 20
True
'''
from music21.features import jSymbolic, native
ds = DataSet(classLabel='')
f = [f for f in jSymbolic.featureExtractors]
ds.addFeatureExtractors(f)
ds.addData(streamInput)
ds.process()
jsymb = ds.getFeaturesAsList( includeClassLabel=False, includeId=False, concatenateLists=False)
ds._featureExtractors = []
ds._features = []
n = [f for f in native.featureExtractors]
ds.addFeatureExtractors(n)
ds.process()
nat = ds.getFeaturesAsList(includeClassLabel=False, includeId=False, concatenateLists=False)
return (jsymb, nat)
#-------------------------------------------------------------------------------
def extractorsById(idOrList, library=('jSymbolic', 'native')):
'''Given one or more :class:`~music21.features.FeatureExtractor` ids, return the appropriate subclass. An optional `library` argument can be added to define which module is used. Current options are jSymbolic and native.
>>> [x.id for x in features.extractorsById('p20')]
['P20']
>>> [x.id for x in features.extractorsById(['p19', 'p20'])]
['P19', 'P20']
>>> [x.id for x in features.extractorsById(['r31', 'r32', 'r33', 'r34', 'r35', 'p1', 'p2', 'p3', 'p4', 'p5', 'p6', 'p7', 'p8', 'p9', 'p10', 'p11', 'p12', 'p13', 'p14', 'p15', 'p16', 'p19', 'p20', 'p21'])]
['R31', 'R32', 'R33', 'R34', 'R35', 'P1', 'P2', 'P3', 'P4', 'P5', 'P6', 'P7', 'P8', 'P9', 'P10', 'P11', 'P12', 'P13', 'P14', 'P15', 'P16', 'P19', 'P20', 'P21']
Get all feature extractors from all libraries
>>> y = [x.id for x in features.extractorsById('all')]
>>> y[0:3], y[-3:-1]
(['M1', 'M2', 'M3'], ['MD1', 'MC1'])
'''
from music21.features import jSymbolic
from music21.features import native
if not common.isIterable(library):
library = [library]
featureExtractors = []
for l in library:
if l.lower() in ['jsymbolic', 'all']:
featureExtractors += jSymbolic.featureExtractors
elif l.lower() in ['native', 'all']:
featureExtractors += native.featureExtractors
if not common.isIterable(idOrList):
idOrList = [idOrList]
flatIds = []
for featureId in idOrList:
featureId = featureId.strip().lower()
featureId.replace('-', '')
featureId.replace(' ', '')
flatIds.append(featureId)
post = []
if len(flatIds) == 0:
return post
for fe in featureExtractors:
if fe.id.lower() in flatIds or flatIds[0].lower() == 'all':
post.append(fe)
return post
def extractorById(idOrList, library=('jSymbolic', 'native')):
'''Get the first feature matched by extractorsById().
>>> s = stream.Stream()
>>> s.append(note.Note('A4'))
>>> fe = features.extractorById('p20')(s) # call class
>>> fe.extract().vector
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0]
'''
ebi = extractorsById(idOrList=idOrList, library=library)
if ebi:
return ebi[0]
return None # no match
def vectorById(streamObj, vectorId, library=('jSymbolic', 'native')):
'''Utility function to get a vector from an extractor
>>> s = stream.Stream()
>>> s.append(note.Note('A4'))
>>> features.vectorById(s, 'p20')
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0]
'''
fe = extractorById(vectorId)(streamObj) # call class with stream
if fe is None:
return None # could raise exception
return fe.extract().vector
def getIndex(featureString, extractorType=None):
'''
returns the list index of the given feature extractor and the feature extractor
category (jsymbolic or native). If feature extractor string is not in either
jsymbolic or native feature extractors, returns None
optionally include the extractorType ('jsymbolic' or 'native' if known
and searching will be made more efficient
>>> features.getIndex('Range')
(59, 'jsymbolic')
>>> features.getIndex('Ends With Landini Melodic Contour')
(19, 'native')
>>> features.getIndex('abrandnewfeature!')
>>> features.getIndex('Fifths Pitch Histogram','jsymbolic')
(68, 'jsymbolic')
>>> features.getIndex('Tonal Certainty','native')
(1, 'native')
'''
from music21.features import jSymbolic, native
if extractorType == None or extractorType == 'jsymbolic':
indexcnt=0
for feature in jSymbolic.featureExtractors:
if feature().name == featureString:
return indexcnt, 'jsymbolic'
indexcnt+=1
if extractorType == None or extractorType == 'native':
indexcnt=0
for feature in native.featureExtractors:
if feature().name == featureString:
return indexcnt, 'native'
indexcnt+=1
return None
#-------------------------------------------------------------------------------
class Test(unittest.TestCase):
def runTest(self):
pass
# def testGetAllExtractorsMethods(self):
# '''
# ahh..this test taks a realy long time....
# '''
# from music21 import stream, features, pitch
# s = corpus.parse('bwv66.6').measures(1,5)
# self.assertEqual( len(features.alljSymbolicFeatures(s)), 70)
# self.assertEqual(len (features.allNativeFeatures(s)),21)
# self.assertEqual(str(features.alljSymbolicVectors(s)[1:5]),
#'[[2.6630434782608696], [2], [2], [0.391304347826087]]')
# self.assertEqual(str(features.allNativeVectors(s)[0:4]),
#'[[1], [1.0328322202181006], [2], [1.0]]')
def testStreamFormsA(self):
from music21 import features
s = corpus.parse('corelli/opus3no1/1grave')
di = features.DataInstance(s)
self.assertEqual(len(di['flat']), 291)
self.assertEqual(len(di['flat.notes']), 238)
#di['chordify'].show('t')
self.assertEqual(len(di['chordify']), 20)
self.assertEqual(len(di['chordify.getElementsByClass.Chord']), 144)
self.assertEqual(di['chordifySetClassHistogram'], {'2-2': 6, '2-3': 12, '2-4': 21, '2-5': 5,
'3-10': 4, '3-11': 33, '3-2': 3, '3-4': 7,
'3-6': 7, '3-7': 9, '3-8': 6, '3-9': 16,
'1-1': 15})
self.maxDiff = None
self.assertEqual(di['chordifyTypesHistogram'], {'isMinorTriad': 8, 'isAugmentedTriad': 0,
'isTriad': 37, 'isSeventh': 0, 'isDiminishedTriad': 4,
'isDiminishedSeventh': 0, 'isIncompleteMajorTriad': 21,
'isHalfDiminishedSeventh': 0, 'isMajorTriad': 25,
'isDominantSeventh': 0, 'isIncompleteMinorTriad': 12})
self.assertEqual(di['noteQuarterLengthHistogram'], {0.5: 116, 1.0: 39, 1.5: 27, 2.0: 31, 3.0: 2, 4.0: 3,
0.75: 4, 0.25: 16})
# can access parts by index
self.assertEqual(len(di['parts']), 3)
# stored in parts are StreamForms instances, caching their results
self.assertEqual(len(di['parts'][0]['flat.notes']), 71)
self.assertEqual(len(di['parts'][1]['flat.notes']), 66)
# getting a measure by part
self.assertEqual(len(di['parts'][0]['getElementsByClass.Measure']), 19)
self.assertEqual(len(di['parts'][1]['getElementsByClass.Measure']), 19)
self.assertEqual(di['parts'][0]['pitchClassHistogram'], [9, 1, 11, 0, 9, 13, 0, 11, 0, 12, 5, 0])
# the sum of the two arrays is the pitch class histogram of the complete
# work
self.assertEqual(di['pitchClassHistogram'], [47, 2, 25, 0, 25, 42, 0, 33, 0, 38, 22, 4])
def testStreamFormsB(self):
from music21 import features, note
s = stream.Stream()
for p in ['c4', 'c4', 'd-4', 'd#4', 'f#4', 'a#4', 'd#5', 'a5']:
s.append(note.Note(p))
di = features.DataInstance(s)
self.assertEqual(di['midiIntervalHistogram'], [1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
# # in most cases will want to get a vector for each part
# s = corpus.parse('corelli/opus3no1/1grave')
# di = features.DataInstance(s)
# self.assertEqual(di['parts'][0]['midiIntervalHistogram'], [9, 1, 4, 3, 1, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
#
# self.assertEqual(di['parts'][1]['midiIntervalHistogram'], [0, 1, 3, 1, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
def testStreamFormsC(self):
from pprint import pformat
from music21 import features, note
s = stream.Stream()
for p in ['c4', 'c4', 'd-4', 'd#4', 'f#4', 'a#4', 'd#5', 'a5']:
s.append(note.Note(p))
di = features.DataInstance(s)
self.assertEqual(pformat(di['secondsMap']), """[{'durationSeconds': 0.5,
'element': <music21.note.Note C>,
'endTimeSeconds': 0.5,
'offsetSeconds': 0.0,
'voiceIndex': None},
{'durationSeconds': 0.5,
'element': <music21.note.Note C>,
'endTimeSeconds': 1.0,
'offsetSeconds': 0.5,
'voiceIndex': None},
{'durationSeconds': 0.5,
'element': <music21.note.Note D->,
'endTimeSeconds': 1.5,
'offsetSeconds': 1.0,
'voiceIndex': None},
{'durationSeconds': 0.5,
'element': <music21.note.Note D#>,
'endTimeSeconds': 2.0,
'offsetSeconds': 1.5,
'voiceIndex': None},
{'durationSeconds': 0.5,
'element': <music21.note.Note F#>,
'endTimeSeconds': 2.5,
'offsetSeconds': 2.0,
'voiceIndex': None},
{'durationSeconds': 0.5,
'element': <music21.note.Note A#>,
'endTimeSeconds': 3.0,
'offsetSeconds': 2.5,
'voiceIndex': None},
{'durationSeconds': 0.5,
'element': <music21.note.Note D#>,
'endTimeSeconds': 3.5,
'offsetSeconds': 3.0,
'voiceIndex': None},
{'durationSeconds': 0.5,
'element': <music21.note.Note A>,
'endTimeSeconds': 4.0,
'offsetSeconds': 3.5,
'voiceIndex': None}]""", pformat(di['secondsMap']))
def testDataSetOutput(self):
from music21 import features
# test just a few features
featureExtractors = features.extractorsById(['ql1', 'ql2', 'ql4'], 'native')
# need to define what the class label will be
ds = features.DataSet(classLabel='Composer')
ds.addFeatureExtractors(featureExtractors)
# add works, defining the class value
ds.addData('bwv66.6', classValue='Bach')
ds.addData('corelli/opus3no1/1grave', classValue='Corelli')
ds.process()
# manually create an output format and get output
of = OutputCSV(ds)
post = of.getString(lineBreak='//')
self.assertEqual(post, 'Identifier,Unique_Note_Quarter_Lengths,Most_Common_Note_Quarter_Length,Range_of_Note_Quarter_Lengths,Composer//bwv66.6,3,1.0,1.5,Bach//corelli/opus3no1/1grave,8,0.5,3.75,Corelli')
# without id
post = of.getString(lineBreak='//', includeId=False)
self.assertEqual(post, 'Unique_Note_Quarter_Lengths,Most_Common_Note_Quarter_Length,Range_of_Note_Quarter_Lengths,Composer//3,1.0,1.5,Bach//8,0.5,3.75,Corelli')
ds.write(format='tab')
ds.write(format='csv')
ds.write(format='arff')
def testFeatureFail(self):
from music21 import features
from music21 import base
featureExtractors = ['p10', 'p11', 'p12', 'p13']
featureExtractors = features.extractorsById(featureExtractors,
'jSymbolic')
ds = features.DataSet(classLabel='Composer')
ds.addFeatureExtractors(featureExtractors)
# create problematic streams
s = stream.Stream()
#s.append(None) # will create a wrapper -- NOT ANYMORE
s.append(base.ElementWrapper(None))
ds.addData(s, classValue='Monteverdi')
ds.addData(s, classValue='Handel')
# process with all feature extractors, store all features
ds.process()
#---------------------------------------------------------------------------
# silent tests
def xtestComposerClassificationJSymbolic(self):
'''Demonstrating writing out data files for feature extraction. Here, features are used from the jSymbolic library.
'''
from music21 import features
featureExtractors = ['r31', 'r32', 'r33', 'r34', 'r35', 'p1', 'p2', 'p3', 'p4', 'p5', 'p6', 'p7', 'p8', 'p9', 'p10', 'p11', 'p12', 'p13', 'p14', 'p15', 'p16', 'p19', 'p20', 'p21']
# will return a list
featureExtractors = features.extractorsById(featureExtractors,
'jSymbolic')
#worksBach = corpus.getBachChorales()[100:143] # a middle range
worksMonteverdi = corpus.getMonteverdiMadrigals()[:43]
worksBach = corpus.getBachChorales()[:5]
# worksMonteverdi = corpus.getMonteverdiMadrigals()[:5]
# need to define what the class label will be
ds = features.DataSet(classLabel='Composer')
ds.addFeatureExtractors(featureExtractors)
# add works, defining the class value
# for w in worksBach:
# ds.addData(w, classValue='Bach')
for w in worksMonteverdi:
ds.addData(w, classValue='Monteverdi')
for w in worksBach:
ds.addData(w, classValue='Bach')
# process with all feature extractors, store all features
ds.process()
ds.write(format='tab')
ds.write(format='csv')
ds.write(format='arff')
def xtestRegionClassificationJSymbolicA(self):
'''Demonstrating writing out data files for feature extraction. Here, features are used from the jSymbolic library.
'''
from music21 import features
featureExtractors = features.extractorsById(['r31', 'r32', 'r33', 'r34', 'r35', 'p1', 'p2', 'p3', 'p4', 'p5', 'p6', 'p7', 'p8', 'p9', 'p10', 'p11', 'p12', 'p13', 'p14', 'p15', 'p16', 'p19', 'p20', 'p21'],
'jSymbolic')
oChina1 = corpus.parse('essenFolksong/han1')
oChina2 = corpus.parse('essenFolksong/han2')
oMitteleuropa1 = corpus.parse('essenFolksong/boehme10')
oMitteleuropa2 = corpus.parse('essenFolksong/boehme20')
ds = features.DataSet(classLabel='Region')
ds.addFeatureExtractors(featureExtractors)
# add works, defining the class value
for o, name in [(oChina1, 'han1'),
(oChina2, 'han2')]:
for w in o.scores:
songId = 'essenFolksong/%s-%s' % (name, w.metadata.number)
ds.addData(w, classValue='China', id=songId)
for o, name in [(oMitteleuropa1, 'boehme10'),
(oMitteleuropa2, 'boehme20')]:
for w in o.scores:
songId = 'essenFolksong/%s-%s' % (name, w.metadata.number)
ds.addData(w, classValue='Mitteleuropa', id=songId)
# process with all feature extractors, store all features
ds.process()
ds.getString(format='tab') # pylint: disable=unexpected-keyword-arg
ds.getString(format='csv') # pylint: disable=unexpected-keyword-arg
ds.getString(format='arff') # pylint: disable=unexpected-keyword-arg
def xtestRegionClassificationJSymbolicB(self):
'''Demonstrating writing out data files for feature extraction. Here, features are used from the jSymbolic library.
'''
from music21 import features
# features common to both collections
featureExtractors = features.extractorsById(['r31', 'r32', 'r33', 'r34', 'r35', 'p1', 'p2', 'p3', 'p4', 'p5', 'p6', 'p7', 'p8', 'p9', 'p10', 'p11', 'p12', 'p13', 'p14', 'p15', 'p16', 'p19', 'p20', 'p21'],
'jSymbolic')
# first bundle
ds = features.DataSet(classLabel='Region')
ds.addFeatureExtractors(featureExtractors)
oChina1 = corpus.parse('essenFolksong/han1')
oMitteleuropa1 = corpus.parse('essenFolksong/boehme10')
# add works, defining the class value
for o, name in [(oChina1, 'han1')]:
for w in o.scores:
songId = 'essenFolksong/%s-%s' % (name, w.metadata.number)
ds.addData(w, classValue='China', id=songId)
for o, name in [(oMitteleuropa1, 'boehme10')]:
for w in o.scores:
songId = 'essenFolksong/%s-%s' % (name, w.metadata.number)
ds.addData(w, classValue='Mitteleuropa', id=songId)
# process with all feature extractors, store all features
ds.process()
ds.write('/_scratch/chinaMitteleuropaSplit-a.tab')
ds.write('/_scratch/chinaMitteleuropaSplit-a.csv')
ds.write('/_scratch/chinaMitteleuropaSplit-a.arff')
# create second data set from alternate collections
ds = features.DataSet(classLabel='Region')
ds.addFeatureExtractors(featureExtractors)
oChina2 = corpus.parse('essenFolksong/han2')
oMitteleuropa2 = corpus.parse('essenFolksong/boehme20')
# add works, defining the class value
for o, name in [(oChina2, 'han2')]:
for w in o.scores:
songId = 'essenFolksong/%s-%s' % (name, w.metadata.number)
ds.addData(w, classValue='China', id=songId)
for o, name in [(oMitteleuropa2, 'boehme20')]:
for w in o.scores:
songId = 'essenFolksong/%s-%s' % (name, w.metadata.number)
ds.addData(w, classValue='Mitteleuropa', id=songId)
# process with all feature extractors, store all features
ds.process()
ds.write('/_scratch/chinaMitteleuropaSplit-b.tab')
ds.write('/_scratch/chinaMitteleuropaSplit-b.csv')
ds.write('/_scratch/chinaMitteleuropaSplit-b.arff')
def xtestOrangeBayesA(self):
'''Using an already created test file with a BayesLearner.
'''
import orange # @UnresolvedImport # pylint: disable=import-error
data = orange.ExampleTable('/Volumes/xdisc/_sync/_x/src/music21Ext/mlDataSets/bachMonteverdi-a/bachMonteverdi-a.tab')
classifier = orange.BayesLearner(data)
for i in range(len(data)):
c = classifier(data[i])
print("original", data[i].getclass(), "BayesLearner:", c)
def xtestClassifiersA(self):
'''Using an already created test file with a BayesLearner.
'''
import orange, orngTree # @UnresolvedImport # pylint: disable=import-error
data1 = orange.ExampleTable('/Volumes/xdisc/_sync/_x/src/music21Ext/mlDataSets/chinaMitteleuropa-b/chinaMitteleuropa-b1.tab')
data2 = orange.ExampleTable('/Volumes/xdisc/_sync/_x/src/music21Ext/mlDataSets/chinaMitteleuropa-b/chinaMitteleuropa-b2.tab')
majority = orange.MajorityLearner
bayes = orange.BayesLearner
tree = orngTree.TreeLearner
knn = orange.kNNLearner
for classifierType in [majority, bayes, tree, knn]:
print('')
for classifierData, classifierStr, matchData, matchStr in [
(data1, 'data1', data1, 'data1'),
(data1, 'data1', data2, 'data2'),
(data2, 'data2', data2, 'data2'),
(data2, 'data2', data1, 'data1'),
]:
# train with data1
classifier = classifierType(classifierData)
mismatch = 0
for i in range(len(matchData)):
c = classifier(matchData[i])
if c != matchData[i].getclass():
mismatch += 1
print('%s %s: misclassified %s/%s of %s' % (classifierStr, classifierType, mismatch, len(matchData), matchStr))
# if classifierType == orngTree.TreeLearner:
# orngTree.printTxt(classifier)
def xtestClassifiersB(self):
'''Using an already created test file with a BayesLearner.
'''
import orange, orngTree # @UnresolvedImport # pylint: disable=import-error
data1 = orange.ExampleTable('/Volumes/xdisc/_sync/_x/src/music21Ext/mlDataSets/chinaMitteleuropa-b/chinaMitteleuropa-b1.tab')
data2 = orange.ExampleTable('/Volumes/xdisc/_sync/_x/src/music21Ext/mlDataSets/chinaMitteleuropa-b/chinaMitteleuropa-b2.tab', use = data1.domain)
data1.extend(data2)
data = data1
majority = orange.MajorityLearner
bayes = orange.BayesLearner
tree = orngTree.TreeLearner
knn = orange.kNNLearner
folds = 10
for classifierType in [majority, bayes, tree, knn]:
print('')
cvIndices = orange.MakeRandomIndicesCV(data, folds)
for fold in range(folds):
train = data.select(cvIndices, fold, negate=1)
test = data.select(cvIndices, fold)
for classifierData, classifierStr, matchData, matchStr in [
(train, 'train', test, 'test'),
]:
# train with data1
classifier = classifierType(classifierData)
mismatch = 0
for i in range(len(matchData)):
c = classifier(matchData[i])
if c != matchData[i].getclass():
mismatch += 1
print('%s %s: misclassified %s/%s of %s' % (classifierStr, classifierType, mismatch, len(matchData), matchStr))
def xtestOrangeClassifiers(self):
'''This test shows how to compare four classifiers; replace the file path with a path to the .tab data file.
'''
import orange, orngTree # @UnresolvedImport # pylint: disable=import-error
data = orange.ExampleTable('/Volumes/xdisc/_sync/_x/src/music21Ext/mlDataSets/bachMonteverdi-a/bachMonteverdi-a.tab')
# setting up the classifiers
majority = orange.MajorityLearner(data)
bayes = orange.BayesLearner(data)
tree = orngTree.TreeLearner(data, sameMajorityPruning=1, mForPruning=2)
knn = orange.kNNLearner(data, k=21)
majority.name="Majority"
bayes.name="Naive Bayes"
tree.name="Tree"
knn.name="kNN"
classifiers = [majority, bayes, tree, knn]
# print the head
print("Possible classes:", data.domain.classVar.values)
print("Original Class", end=' ')
for l in classifiers:
print("%-13s" % (l.name), end=' ')
print()
for example in data:
print("(%-10s) " % (example.getclass()), end=' ')
for c in classifiers:
p = c([example, orange.GetProbabilities])
print("%5.3f " % (p[0]), end=' ')
print("")
def xtestOrangeClassifierTreeLearner(self):
import orange, orngTree # @UnresolvedImport # pylint: disable=import-error
data = orange.ExampleTable('/Volumes/xdisc/_sync/_x/src/music21Ext/mlDataSets/bachMonteverdi-a/bachMonteverdi-a.tab')
tree = orngTree.TreeLearner(data, sameMajorityPruning=1, mForPruning=2)
#tree = orngTree.TreeLearner(data)
for i in range(len(data)):
p = tree(data[i], orange.GetProbabilities)
print("%d: %5.3f (originally %s)" % (i+1, p[1], data[i].getclass()))
orngTree.printTxt(tree)
#-------------------------------------------------------------------------------
# define presented order in documentation
_DOC_ORDER = [FeatureExtractor]
if __name__ == "__main__":
import music21
music21.mainTest(Test)
#------------------------------------------------------------------------------
# eof
|
{
"content_hash": "3478dfcf561d25d193d34ff86cf90305",
"timestamp": "",
"source": "github",
"line_count": 1731,
"max_line_length": 453,
"avg_line_length": 39.552281917966496,
"alnum_prop": 0.5717958080771197,
"repo_name": "arnavd96/Cinemiezer",
"id": "63f2b55a204fba9e383381fcf70053d1e91276b1",
"size": "68941",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "myvenv/lib/python3.4/site-packages/music21/features/base.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "300501"
},
{
"name": "C++",
"bytes": "14430"
},
{
"name": "CSS",
"bytes": "105126"
},
{
"name": "FORTRAN",
"bytes": "3200"
},
{
"name": "HTML",
"bytes": "290903"
},
{
"name": "JavaScript",
"bytes": "154747"
},
{
"name": "Jupyter Notebook",
"bytes": "558334"
},
{
"name": "Objective-C",
"bytes": "567"
},
{
"name": "Python",
"bytes": "37092739"
},
{
"name": "Shell",
"bytes": "3668"
},
{
"name": "TeX",
"bytes": "1527"
}
],
"symlink_target": ""
}
|
from collections import OrderedDict
import copy
from panoptes_aggregation import reducers
from panoptes_aggregation.csv_utils import flatten_data, unflatten_data, order_columns
import json
import math
import io
import os
import pandas
import progressbar
import warnings
def first_filter(data):
first_time = data.created_at.min()
fdx = data.created_at == first_time
return data[fdx]
def last_filter(data):
last_time = data.created_at.max()
ldx = data.created_at == last_time
return data[ldx]
def reduce_csv(extracted_csv, filter='first', keywords={}, output='reductions', order=False):
if not isinstance(extracted_csv, io.IOBase):
extracted_csv = open(extracted_csv, 'r')
with extracted_csv as extracted_csv_in:
extracted = pandas.read_csv(extracted_csv_in, infer_datetime_format=True, parse_dates=['created_at'])
extracted.sort_values(['subject_id', 'created_at'], inplace=True)
subjects = extracted.subject_id.unique()
tasks = extracted.task.unique()
workflow_id = extracted.workflow_id.iloc[0]
extractor_name = extracted.extractor.iloc[0]
reducer_name = extractor_name.replace('extractor', 'reducer')
reduced_data = OrderedDict([
('subject_id', []),
('workflow_id', []),
('task', []),
('reducer', []),
('data', [])
])
widgets = [
'Reducing: ',
progressbar.Percentage(),
' ', progressbar.Bar(),
' ', progressbar.ETA()
]
pbar = progressbar.ProgressBar(widgets=widgets, max_value=len(subjects))
pbar.start()
for sdx, subject in enumerate(subjects):
idx = extracted.subject_id == subject
for task in tasks:
jdx = extracted.task == task
classifications = extracted[idx & jdx]
classifications = classifications.drop_duplicates()
if filter == 'first':
classifications = classifications.groupby(['user_name'], group_keys=False).apply(first_filter)
elif filter == 'last':
classifications = classifications.groupby(['user_name'], group_keys=False).apply(last_filter)
data = [unflatten_data(c) for cdx, c in classifications.iterrows()]
reduction = reducers.reducer[reducer_name](data, **keywords)
if isinstance(reduction, list):
for r in reduction:
reduced_data['subject_id'].append(subject)
reduced_data['workflow_id'].append(workflow_id)
reduced_data['task'].append(task)
reduced_data['reducer'].append(reducer_name)
reduced_data['data'].append(r)
else:
reduced_data['subject_id'].append(subject)
reduced_data['workflow_id'].append(workflow_id)
reduced_data['task'].append(task)
reduced_data['reducer'].append(reducer_name)
reduced_data['data'].append(reduction)
pbar.update(sdx + 1)
pbar.finish()
if len(reduced_data['data']) == 0:
warnings.warn('No data reduced with {0}'.format(reducer_name))
output_path, output_base = os.path.split(output)
output_base_name, output_ext = os.path.splitext(output_base)
output_name = os.path.join(output_path, '{0}_{1}.csv'.format(reducer_name, output_base_name))
flat_reduced_data = flatten_data(reduced_data)
if order:
flat_reduced_data = order_columns(flat_reduced_data, front=['choice', 'total_vote_count', 'choice_count'])
flat_reduced_data.to_csv(output_name, index=False)
return output_name
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description="reduce data from panoptes classifications based on the extracted data (see extract_panoptes_csv)")
parser.add_argument("extracted_csv", help="the extracted csv file output from extract_panoptes_csv", type=argparse.FileType('r'))
parser.add_argument("-F", "--filter", help="how to filter a user makeing multiple classifications for one subject", type=str, choices=['first', 'last', 'all'], default='fisrt')
parser.add_argument("-k", "--keywords", help="keywords to be passed into the reducer in the form of a json string, e.g. \'{\"eps\": 5.5, \"min_samples\": 3}\' (note: double quotes must be used inside the brackets)", type=json.loads, default={})
parser.add_argument("-O", "--order", help="arrange the data columns in alphabetical order before saving", action="store_true")
parser.add_argument("-o", "--output", help="the base name for output csv file to store the reductions", type=str, default="reductions")
args = parser.parse_args()
reduce_csv(args.extracted_csv, filter=args.filter, keywords=args.keywords, output=args.output, order=args.order)
|
{
"content_hash": "de12e93b4195e5102af1f69851469388",
"timestamp": "",
"source": "github",
"line_count": 108,
"max_line_length": 249,
"avg_line_length": 44.50925925925926,
"alnum_prop": 0.6442687747035574,
"repo_name": "CKrawczyk/python-reducers-for-caesar",
"id": "24613f449dafcd481d73e40f021e61a11cf73259",
"size": "4830",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bin/reduce_panoptes_csv.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "77144"
}
],
"symlink_target": ""
}
|
from .resource import Resource
class ExpressRouteCircuit(Resource):
"""ExpressRouteCircuit resource.
Variables are only populated by the server, and will be ignored when
sending a request.
:param id: Resource ID.
:type id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Resource location.
:type location: str
:param tags: Resource tags.
:type tags: dict[str, str]
:param sku: The SKU.
:type sku: ~azure.mgmt.network.v2016_09_01.models.ExpressRouteCircuitSku
:param allow_classic_operations: Allow classic operations
:type allow_classic_operations: bool
:param circuit_provisioning_state: The CircuitProvisioningState state of
the resource.
:type circuit_provisioning_state: str
:param service_provider_provisioning_state: The
ServiceProviderProvisioningState state of the resource. Possible values
are 'NotProvisioned', 'Provisioning', 'Provisioned', and 'Deprovisioning'.
Possible values include: 'NotProvisioned', 'Provisioning', 'Provisioned',
'Deprovisioning'
:type service_provider_provisioning_state: str or
~azure.mgmt.network.v2016_09_01.models.ServiceProviderProvisioningState
:param authorizations: The list of authorizations.
:type authorizations:
list[~azure.mgmt.network.v2016_09_01.models.ExpressRouteCircuitAuthorization]
:param peerings: The list of peerings.
:type peerings:
list[~azure.mgmt.network.v2016_09_01.models.ExpressRouteCircuitPeering]
:param service_key: The ServiceKey.
:type service_key: str
:param service_provider_notes: The ServiceProviderNotes.
:type service_provider_notes: str
:param service_provider_properties: The ServiceProviderProperties.
:type service_provider_properties:
~azure.mgmt.network.v2016_09_01.models.ExpressRouteCircuitServiceProviderProperties
:param provisioning_state: Gets the provisioning state of the public IP
resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
:type provisioning_state: str
:param gateway_manager_etag: The GatewayManager Etag.
:type gateway_manager_etag: str
:param etag: Gets a unique read-only string that changes whenever the
resource is updated.
:type etag: str
"""
_validation = {
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'sku': {'key': 'sku', 'type': 'ExpressRouteCircuitSku'},
'allow_classic_operations': {'key': 'properties.allowClassicOperations', 'type': 'bool'},
'circuit_provisioning_state': {'key': 'properties.circuitProvisioningState', 'type': 'str'},
'service_provider_provisioning_state': {'key': 'properties.serviceProviderProvisioningState', 'type': 'str'},
'authorizations': {'key': 'properties.authorizations', 'type': '[ExpressRouteCircuitAuthorization]'},
'peerings': {'key': 'properties.peerings', 'type': '[ExpressRouteCircuitPeering]'},
'service_key': {'key': 'properties.serviceKey', 'type': 'str'},
'service_provider_notes': {'key': 'properties.serviceProviderNotes', 'type': 'str'},
'service_provider_properties': {'key': 'properties.serviceProviderProperties', 'type': 'ExpressRouteCircuitServiceProviderProperties'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'gateway_manager_etag': {'key': 'properties.gatewayManagerEtag', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
}
def __init__(self, **kwargs):
super(ExpressRouteCircuit, self).__init__(**kwargs)
self.sku = kwargs.get('sku', None)
self.allow_classic_operations = kwargs.get('allow_classic_operations', None)
self.circuit_provisioning_state = kwargs.get('circuit_provisioning_state', None)
self.service_provider_provisioning_state = kwargs.get('service_provider_provisioning_state', None)
self.authorizations = kwargs.get('authorizations', None)
self.peerings = kwargs.get('peerings', None)
self.service_key = kwargs.get('service_key', None)
self.service_provider_notes = kwargs.get('service_provider_notes', None)
self.service_provider_properties = kwargs.get('service_provider_properties', None)
self.provisioning_state = kwargs.get('provisioning_state', None)
self.gateway_manager_etag = kwargs.get('gateway_manager_etag', None)
self.etag = kwargs.get('etag', None)
|
{
"content_hash": "2b796a404d47e5826b73f3e0023eecf8",
"timestamp": "",
"source": "github",
"line_count": 95,
"max_line_length": 143,
"avg_line_length": 50.51578947368421,
"alnum_prop": 0.6780579287351531,
"repo_name": "lmazuel/azure-sdk-for-python",
"id": "59b5d8c04ac92d3739b515398f38d2984a6b91d5",
"size": "5273",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "azure-mgmt-network/azure/mgmt/network/v2016_09_01/models/express_route_circuit.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "42572767"
}
],
"symlink_target": ""
}
|
"""Tests for QuantizationDebugger."""
import csv
import io
from unittest import mock
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from tensorflow.lite.experimental.quantization_debugger import debugger
from tensorflow.lite.python import convert
from tensorflow.lite.python import lite
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test
from tensorflow.python.training.tracking import tracking
# pylint: disable=g-import-not-at-top
try:
from tensorflow.lite.python import metrics_portable as metrics
except ImportError:
from tensorflow.lite.python import metrics_nonportable as metrics
# pylint: enable=g-import-not-at-top
def _get_model():
"""Returns somple model with Conv2D and representative dataset gen."""
root = tracking.AutoTrackable()
kernel_in = np.array([-2, -1, 1, 2], dtype=np.float32).reshape((2, 2, 1, 1))
@tf.function(
input_signature=[tf.TensorSpec(shape=[1, 3, 3, 1], dtype=tf.float32)])
def func(inp):
kernel = tf.constant(kernel_in, dtype=tf.float32)
conv = tf.nn.conv2d(inp, kernel, strides=1, padding='SAME')
output = tf.nn.relu(conv, name='output')
return output
root.f = func
to_save = root.f.get_concrete_function()
return to_save
def _calibration_gen():
for i in range(5):
yield [np.arange(9).reshape((1, 3, 3, 1)).astype(np.float32) * i]
def _convert_model(func):
"""Converts TF model to TFLite float model."""
converter = lite.TFLiteConverterV2.from_concrete_functions([func])
return converter.convert()
def _quantize_model(func, calibration_gen, quantized_io=False, debug=True):
"""Quantizes model, in debug or normal mode."""
converter = lite.TFLiteConverterV2.from_concrete_functions([func])
converter.target_spec.supported_ops = [lite.OpsSet.TFLITE_BUILTINS_INT8]
converter.representative_dataset = calibration_gen
# Create a TFLite model with new quantizer and numeric verify ops.
converter.optimizations = [lite.Optimize.DEFAULT]
converter.experimental_new_quantizer = True
if debug:
converter._experimental_calibrate_only = True
calibrated = converter.convert()
return convert.mlir_quantize(
calibrated, enable_numeric_verify=True, fully_quantize=quantized_io)
else:
return converter.convert()
class QuantizationDebuggerTest(test_util.TensorFlowTestCase,
parameterized.TestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.tf_model = _get_model()
cls.float_model = _convert_model(cls.tf_model)
cls.debug_model_float = _quantize_model(
cls.tf_model, _calibration_gen, quantized_io=False)
cls.debug_model_int8 = _quantize_model(
cls.tf_model, _calibration_gen, quantized_io=True)
@parameterized.named_parameters(
('float_io', False),
('quantized_io', True),
)
@test_util.run_v2_only
def test_quantization_debugger_layer_metrics(self, quantized_io):
if quantized_io:
debug_model = QuantizationDebuggerTest.debug_model_int8
else:
debug_model = QuantizationDebuggerTest.debug_model_float
options = debugger.QuantizationDebugOptions(
layer_debug_metrics={'l1_norm': lambda diffs: np.mean(np.abs(diffs))})
quant_debugger = debugger.QuantizationDebugger(
quant_debug_model_content=debug_model,
debug_dataset=_calibration_gen,
debug_options=options)
quant_debugger.run()
expected_metrics = {
'num_elements': 9,
'stddev': 0.03850026,
'mean_error': 0.01673192,
'max_abs_error': 0.10039272,
'mean_square_error': 0.0027558778,
'l1_norm': 0.023704167,
}
self.assertLen(quant_debugger.layer_statistics, 1)
actual_metrics = next(iter(quant_debugger.layer_statistics.values()))
self.assertCountEqual(expected_metrics.keys(), actual_metrics.keys())
for key, value in expected_metrics.items():
self.assertAlmostEqual(value, actual_metrics[key], places=5)
buffer = io.StringIO()
quant_debugger.layer_statistics_dump(buffer)
reader = csv.DictReader(buffer.getvalue().split())
actual_values = next(iter(reader))
expected_values = expected_metrics.copy()
expected_values.update({
'op_name': 'CONV_2D',
'tensor_idx': 7 if quantized_io else 8,
'scales': [0.15686275],
'zero_points': [-128],
'tensor_name': 'Identity' if quantized_io else 'Identity4'
})
for key, value in expected_values.items():
if isinstance(value, str):
self.assertEqual(value, actual_values[key])
elif isinstance(value, list):
self.assertAlmostEqual(
value[0], float(actual_values[key][1:-1]), places=5)
else:
self.assertAlmostEqual(value, float(actual_values[key]), places=5)
@parameterized.named_parameters(
('float_io', False),
('quantized_io', True),
)
@test_util.run_v2_only
def test_quantization_debugger_model_metrics(self, quantized_io):
if quantized_io:
debug_model = QuantizationDebuggerTest.debug_model_int8
else:
debug_model = QuantizationDebuggerTest.debug_model_float
options = debugger.QuantizationDebugOptions(
model_debug_metrics={'stdev': lambda x, y: np.std(x[0] - y[0])})
quant_debugger = debugger.QuantizationDebugger(
quant_debug_model_content=debug_model,
float_model_content=QuantizationDebuggerTest.float_model,
debug_dataset=_calibration_gen,
debug_options=options)
quant_debugger.run()
expected_metrics = {'stdev': 0.050998904}
actual_metrics = quant_debugger.model_statistics
self.assertCountEqual(expected_metrics.keys(), actual_metrics.keys())
for key, value in expected_metrics.items():
self.assertAlmostEqual(value, actual_metrics[key], places=5)
@test_util.run_v2_only
def test_quantization_debugger_wrong_input_raises_ValueError(self):
def wrong_calibration_gen():
for _ in range(5):
yield [
np.ones((1, 3, 3, 1), dtype=np.float32),
np.ones((1, 3, 3, 1), dtype=np.float32)
]
quant_debugger = debugger.QuantizationDebugger(
quant_debug_model_content=QuantizationDebuggerTest.debug_model_float,
debug_dataset=wrong_calibration_gen)
with self.assertRaisesRegex(
ValueError, r'inputs provided \(2\).+inputs to the model \(1\)'):
quant_debugger.run()
@test_util.run_v2_only
def test_quantization_debugger_non_debug_model_raises_ValueError(self):
normal_quant_model = _quantize_model(
QuantizationDebuggerTest.tf_model, _calibration_gen, debug=False)
with self.assertRaisesRegex(
ValueError, 'Please check if the quantized model is in debug mode'):
debugger.QuantizationDebugger(
quant_debug_model_content=normal_quant_model,
debug_dataset=_calibration_gen)
@parameterized.named_parameters(
('empty quantization parameter', {
'quantization_parameters': {}
}, None),
('empty scales/zero points', {
'quantization_parameters': {
'scales': [],
'zero_points': []
}
}, None),
('invalid scales/zero points', {
'quantization_parameters': {
'scales': [1.0],
'zero_points': []
}
}, None),
('correct case', {
'quantization_parameters': {
'scales': [0.5, 1.0],
'zero_points': [42, 7]
}
}, (0.5, 42)),
)
def test_get_quant_params(self, tensor_detail, expected_value):
self.assertEqual(debugger._get_quant_params(tensor_detail), expected_value)
@mock.patch.object(metrics.TFLiteMetrics,
'increase_counter_debugger_creation')
def test_quantization_debugger_creation_counter(self, increase_call):
debug_model = QuantizationDebuggerTest.debug_model_float
debugger.QuantizationDebugger(
quant_debug_model_content=debug_model,
debug_dataset=_calibration_gen)
increase_call.assert_called_once()
if __name__ == '__main__':
test.main()
|
{
"content_hash": "a7151562b6613e35c2f348e1ca4ede84",
"timestamp": "",
"source": "github",
"line_count": 234,
"max_line_length": 79,
"avg_line_length": 34.76923076923077,
"alnum_prop": 0.6718289085545722,
"repo_name": "petewarden/tensorflow",
"id": "fcbcbb49be05bd48e6c1920d4bdfdcaa3ac0ed1a",
"size": "8825",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow/lite/experimental/quantization_debugger/debugger_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "31796"
},
{
"name": "Batchfile",
"bytes": "55269"
},
{
"name": "C",
"bytes": "895451"
},
{
"name": "C#",
"bytes": "8562"
},
{
"name": "C++",
"bytes": "82100676"
},
{
"name": "CMake",
"bytes": "6500"
},
{
"name": "Dockerfile",
"bytes": "112853"
},
{
"name": "Go",
"bytes": "1867248"
},
{
"name": "HTML",
"bytes": "4686483"
},
{
"name": "Java",
"bytes": "984477"
},
{
"name": "Jupyter Notebook",
"bytes": "550862"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "MLIR",
"bytes": "1982867"
},
{
"name": "Makefile",
"bytes": "66496"
},
{
"name": "Objective-C",
"bytes": "116558"
},
{
"name": "Objective-C++",
"bytes": "317461"
},
{
"name": "PHP",
"bytes": "4236"
},
{
"name": "Pascal",
"bytes": "318"
},
{
"name": "Pawn",
"bytes": "20422"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "Python",
"bytes": "37425809"
},
{
"name": "RobotFramework",
"bytes": "1779"
},
{
"name": "Roff",
"bytes": "2705"
},
{
"name": "Ruby",
"bytes": "7464"
},
{
"name": "SWIG",
"bytes": "8992"
},
{
"name": "Shell",
"bytes": "700106"
},
{
"name": "Smarty",
"bytes": "35725"
},
{
"name": "Starlark",
"bytes": "3613406"
},
{
"name": "Swift",
"bytes": "62814"
},
{
"name": "Vim Snippet",
"bytes": "58"
}
],
"symlink_target": ""
}
|
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from tensorflow.contrib.learn.python.learn.datasets.mnist import read_data_sets
from tensorflow.python.framework import ops
ops.reset_default_graph()
# Start a graph session
sess = tf.Session()
# Load data
data_dir = 'temp'
mnist = read_data_sets(data_dir)
# Convert images into 28x28 (they are downloaded as 1x784)
train_xdata = np.array([np.reshape(x, (28,28)) for x in mnist.train.images])
test_xdata = np.array([np.reshape(x, (28,28)) for x in mnist.test.images])
# Convert labels into one-hot encoded vectors
train_labels = mnist.train.labels
test_labels = mnist.test.labels
# Set model parameters
batch_size = 100
learning_rate = 0.005
evaluation_size = 500
image_width = train_xdata[0].shape[0]
image_height = train_xdata[0].shape[1]
target_size = max(train_labels) + 1
num_channels = 1 # greyscale = 1 channel
generations = 500
eval_every = 5
conv1_features = 25
conv2_features = 50
max_pool_size1 = 2 # NxN window for 1st max pool layer
max_pool_size2 = 2 # NxN window for 2nd max pool layer
fully_connected_size1 = 100
x_input_shape = (batch_size, image_width, image_height, num_channels)
x_input = tf.placeholder(tf.float32, shape=x_input_shape)
y_target = tf.placeholder(tf.int32, shape=(batch_size))
eval_input_shape = (evaluation_size, image_width, image_height, num_channels)
eval_input = tf.placeholder(tf.float32, shape=eval_input_shape)
eval_target = tf.placeholder(tf.int32, shape=(evaluation_size))
# Convolutional layer variables
conv1_weight = tf.Variable(tf.truncated_normal([4, 4, num_channels, conv1_features],
stddev=0.1, dtype=tf.float32))
conv1_bias = tf.Variable(tf.zeros([conv1_features], dtype=tf.float32))
conv2_weight = tf.Variable(tf.truncated_normal([4, 4, conv1_features, conv2_features],
stddev=0.1, dtype=tf.float32))
conv2_bias = tf.Variable(tf.zeros([conv2_features], dtype=tf.float32))
# fully connected variables
resulting_width = image_width // (max_pool_size1 * max_pool_size2)
resulting_height = image_height // (max_pool_size1 * max_pool_size2)
full1_input_size = resulting_width * resulting_height * conv2_features
full1_weight = tf.Variable(tf.truncated_normal([full1_input_size, fully_connected_size1],
stddev=0.1, dtype=tf.float32))
full1_bias = tf.Variable(tf.truncated_normal([fully_connected_size1], stddev=0.1, dtype=tf.float32))
full2_weight = tf.Variable(tf.truncated_normal([fully_connected_size1, target_size],
stddev=0.1, dtype=tf.float32))
full2_bias = tf.Variable(tf.truncated_normal([target_size], stddev=0.1, dtype=tf.float32))
# Initialize Model Operations
def my_conv_net(input_data):
# First Conv-ReLU-MaxPool Layer
conv1 = tf.nn.conv2d(input_data, conv1_weight, strides=[1, 1, 1, 1], padding='SAME')
relu1 = tf.nn.relu(tf.nn.bias_add(conv1, conv1_bias))
max_pool1 = tf.nn.max_pool(relu1, ksize=[1, max_pool_size1, max_pool_size1, 1],
strides=[1, max_pool_size1, max_pool_size1, 1], padding='SAME')
# Second Conv-ReLU-MaxPool Layer
conv2 = tf.nn.conv2d(max_pool1, conv2_weight, strides=[1, 1, 1, 1], padding='SAME')
relu2 = tf.nn.relu(tf.nn.bias_add(conv2, conv2_bias))
max_pool2 = tf.nn.max_pool(relu2, ksize=[1, max_pool_size2, max_pool_size2, 1],
strides=[1, max_pool_size2, max_pool_size2, 1], padding='SAME')
# Transform Output into a 1xN layer for next fully connected layer
final_conv_shape = max_pool2.get_shape().as_list()
final_shape = final_conv_shape[1] * final_conv_shape[2] * final_conv_shape[3]
flat_output = tf.reshape(max_pool2, [final_conv_shape[0], final_shape])
# First Fully Connected Layer
fully_connected1 = tf.nn.relu(tf.add(tf.matmul(flat_output, full1_weight), full1_bias))
# Second Fully Connected Layer
final_model_output = tf.add(tf.matmul(fully_connected1, full2_weight), full2_bias)
return(final_model_output)
model_output = my_conv_net(x_input)
test_model_output = my_conv_net(eval_input)
# Declare Loss Function (softmax cross entropy)
loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=model_output, labels=y_target))
# Create a prediction function
prediction = tf.nn.softmax(model_output)
test_prediction = tf.nn.softmax(test_model_output)
# Create accuracy function
def get_accuracy(logits, targets):
batch_predictions = np.argmax(logits, axis=1)
num_correct = np.sum(np.equal(batch_predictions, targets))
return(100. * num_correct/batch_predictions.shape[0])
# Create an optimizer
my_optimizer = tf.train.MomentumOptimizer(learning_rate, 0.9)
train_step = my_optimizer.minimize(loss)
# Initialize Variables
init = tf.global_variables_initializer()
sess.run(init)
# Start training loop
train_loss = []
train_acc = []
test_acc = []
for i in range(generations):
rand_index = np.random.choice(len(train_xdata), size=batch_size)
rand_x = train_xdata[rand_index]
rand_x = np.expand_dims(rand_x, 3)
rand_y = train_labels[rand_index]
train_dict = {x_input: rand_x, y_target: rand_y}
sess.run(train_step, feed_dict=train_dict)
temp_train_loss, temp_train_preds = sess.run([loss, prediction], feed_dict=train_dict)
temp_train_acc = get_accuracy(temp_train_preds, rand_y)
if (i+1) % eval_every == 0:
eval_index = np.random.choice(len(test_xdata), size=evaluation_size)
eval_x = test_xdata[eval_index]
eval_x = np.expand_dims(eval_x, 3)
eval_y = test_labels[eval_index]
test_dict = {eval_input: eval_x, eval_target: eval_y}
test_preds = sess.run(test_prediction, feed_dict=test_dict)
temp_test_acc = get_accuracy(test_preds, eval_y)
# Record and print results
train_loss.append(temp_train_loss)
train_acc.append(temp_train_acc)
test_acc.append(temp_test_acc)
acc_and_loss = [(i+1), temp_train_loss, temp_train_acc, temp_test_acc]
acc_and_loss = [np.round(x,2) for x in acc_and_loss]
print('Generation # {}. Train Loss: {:.2f}. Train Acc (Test Acc): {:.2f} ({:.2f})'.format(*acc_and_loss))
|
{
"content_hash": "65ce59b22dbf75128fbfd946023142f8",
"timestamp": "",
"source": "github",
"line_count": 150,
"max_line_length": 113,
"avg_line_length": 41.906666666666666,
"alnum_prop": 0.68119630925867,
"repo_name": "shanaka-desoysa/tensorflow",
"id": "bd7db895008bc568f6e9097052fe9df2032ddfb1",
"size": "6286",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hw11/cnn_mnist.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "523210"
},
{
"name": "Python",
"bytes": "154001"
}
],
"symlink_target": ""
}
|
"""Sensor for the Open Sky Network."""
from __future__ import annotations
from datetime import timedelta
import requests
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA, SensorEntity
from homeassistant.const import (
ATTR_ATTRIBUTION,
ATTR_LATITUDE,
ATTR_LONGITUDE,
CONF_LATITUDE,
CONF_LONGITUDE,
CONF_NAME,
CONF_RADIUS,
LENGTH_KILOMETERS,
LENGTH_METERS,
)
from homeassistant.core import HomeAssistant
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.typing import ConfigType, DiscoveryInfoType
from homeassistant.util import distance as util_distance, location as util_location
CONF_ALTITUDE = "altitude"
ATTR_ICAO24 = "icao24"
ATTR_CALLSIGN = "callsign"
ATTR_ALTITUDE = "altitude"
ATTR_ON_GROUND = "on_ground"
ATTR_SENSOR = "sensor"
ATTR_STATES = "states"
DOMAIN = "opensky"
DEFAULT_ALTITUDE = 0
EVENT_OPENSKY_ENTRY = f"{DOMAIN}_entry"
EVENT_OPENSKY_EXIT = f"{DOMAIN}_exit"
SCAN_INTERVAL = timedelta(seconds=12) # opensky public limit is 10 seconds
OPENSKY_ATTRIBUTION = (
"Information provided by the OpenSky Network (https://opensky-network.org)"
)
OPENSKY_API_URL = "https://opensky-network.org/api/states/all"
OPENSKY_API_FIELDS = [
ATTR_ICAO24,
ATTR_CALLSIGN,
"origin_country",
"time_position",
"time_velocity",
ATTR_LONGITUDE,
ATTR_LATITUDE,
ATTR_ALTITUDE,
ATTR_ON_GROUND,
"velocity",
"heading",
"vertical_rate",
"sensors",
]
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_RADIUS): vol.Coerce(float),
vol.Optional(CONF_NAME): cv.string,
vol.Inclusive(CONF_LATITUDE, "coordinates"): cv.latitude,
vol.Inclusive(CONF_LONGITUDE, "coordinates"): cv.longitude,
vol.Optional(CONF_ALTITUDE, default=DEFAULT_ALTITUDE): vol.Coerce(float),
}
)
def setup_platform(
hass: HomeAssistant,
config: ConfigType,
add_entities: AddEntitiesCallback,
discovery_info: DiscoveryInfoType | None = None,
) -> None:
"""Set up the Open Sky platform."""
latitude = config.get(CONF_LATITUDE, hass.config.latitude)
longitude = config.get(CONF_LONGITUDE, hass.config.longitude)
add_entities(
[
OpenSkySensor(
hass,
config.get(CONF_NAME, DOMAIN),
latitude,
longitude,
config.get(CONF_RADIUS),
config.get(CONF_ALTITUDE),
)
],
True,
)
class OpenSkySensor(SensorEntity):
"""Open Sky Network Sensor."""
def __init__(self, hass, name, latitude, longitude, radius, altitude):
"""Initialize the sensor."""
self._session = requests.Session()
self._latitude = latitude
self._longitude = longitude
self._radius = util_distance.convert(radius, LENGTH_KILOMETERS, LENGTH_METERS)
self._altitude = altitude
self._state = 0
self._hass = hass
self._name = name
self._previously_tracked = None
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def native_value(self):
"""Return the state of the sensor."""
return self._state
def _handle_boundary(self, flights, event, metadata):
"""Handle flights crossing region boundary."""
for flight in flights:
if flight in metadata:
altitude = metadata[flight].get(ATTR_ALTITUDE)
longitude = metadata[flight].get(ATTR_LONGITUDE)
latitude = metadata[flight].get(ATTR_LATITUDE)
icao24 = metadata[flight].get(ATTR_ICAO24)
else:
# Assume Flight has landed if missing.
altitude = 0
longitude = None
latitude = None
icao24 = None
data = {
ATTR_CALLSIGN: flight,
ATTR_ALTITUDE: altitude,
ATTR_SENSOR: self._name,
ATTR_LONGITUDE: longitude,
ATTR_LATITUDE: latitude,
ATTR_ICAO24: icao24,
}
self._hass.bus.fire(event, data)
def update(self):
"""Update device state."""
currently_tracked = set()
flight_metadata = {}
states = self._session.get(OPENSKY_API_URL).json().get(ATTR_STATES)
for state in states:
flight = dict(zip(OPENSKY_API_FIELDS, state))
callsign = flight[ATTR_CALLSIGN].strip()
if callsign != "":
flight_metadata[callsign] = flight
else:
continue
missing_location = (
flight.get(ATTR_LONGITUDE) is None or flight.get(ATTR_LATITUDE) is None
)
if missing_location:
continue
if flight.get(ATTR_ON_GROUND):
continue
distance = util_location.distance(
self._latitude,
self._longitude,
flight.get(ATTR_LATITUDE),
flight.get(ATTR_LONGITUDE),
)
if distance is None or distance > self._radius:
continue
altitude = flight.get(ATTR_ALTITUDE)
if altitude > self._altitude and self._altitude != 0:
continue
currently_tracked.add(callsign)
if self._previously_tracked is not None:
entries = currently_tracked - self._previously_tracked
exits = self._previously_tracked - currently_tracked
self._handle_boundary(entries, EVENT_OPENSKY_ENTRY, flight_metadata)
self._handle_boundary(exits, EVENT_OPENSKY_EXIT, flight_metadata)
self._state = len(currently_tracked)
self._previously_tracked = currently_tracked
@property
def extra_state_attributes(self):
"""Return the state attributes."""
return {ATTR_ATTRIBUTION: OPENSKY_ATTRIBUTION}
@property
def native_unit_of_measurement(self):
"""Return the unit of measurement."""
return "flights"
@property
def icon(self):
"""Return the icon."""
return "mdi:airplane"
|
{
"content_hash": "8dbc15811945112a34f6b1e1e3b30541",
"timestamp": "",
"source": "github",
"line_count": 202,
"max_line_length": 87,
"avg_line_length": 31.287128712871286,
"alnum_prop": 0.6039556962025316,
"repo_name": "toddeye/home-assistant",
"id": "b4278bcce3632cdafd02559930b1302b814f6758",
"size": "6320",
"binary": false,
"copies": "3",
"ref": "refs/heads/dev",
"path": "homeassistant/components/opensky/sensor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "3005"
},
{
"name": "PLSQL",
"bytes": "840"
},
{
"name": "Python",
"bytes": "47414832"
},
{
"name": "Shell",
"bytes": "6252"
}
],
"symlink_target": ""
}
|
"""
Clams Git
=========
This is the executable source-code version of git.rst.
"""
from __future__ import unicode_literals
from clams import arg, Command
# Let's get some imports out of the way first. Since we're not going to actually
# implement any of git, we need to have a method of calling the system's git
# commands to perform the work for us. Subprocess is a great way to do that.
import subprocess
# First we will create our `git` subcommand. If you're paying attention,
# you'll notice that we have not mentioned the root command at all (but we'll
# see what this means a bit later).
git = Command('git')
# Let's start by implementing a very simple version of `git commit`.
#
# One of the standout features of Clams is that the argparse interface is
# exposed directly, for... you know... parsing args.
#
# The `arg` decorator has exactly the same interface as
# `argparse.ArgumentParser().add_argument`. In fact, in its most basic form,
# all the `arg` decorator does is pass it's arguments directly to
# `add_argument` as `some_parser.add_argument(*args, **kwargs)`.
@git.register('commit')
@arg('-m', '--message')
@arg('-a', '--all', action='store_true', default=False)
def handler(all, message):
git_command = ['git', 'commit']
if all:
git_command.append('-a')
if message:
git_command.append('-m')
git_command.append(message)
return subprocess.call(git_command)
# TODO(nick): Finish converting this!
# ===================================
# And that's it! We can now run commands like:
#
# unb git add .
# unb git commit -m "My awesome commit message"
# unb git remote add my_remote_name https://.../myrepo.git
#
# Well... not quite yet.
#
# Until this point, all of the `add_command`s and `register`s we've been
# calling haven't actually built any argparse parsers!
#
# Instead, we've just been storing all the information to build these parsers
# in simple lists, tuples and dicts within each Command instance. To build the
# actual parsers (essential if we're going to actually *use* this), we need to
# call the root command's `init` function (with no arguments).
#
# This will create the main argparse entrypoint (`argparse.ArgumentParser`) and
# recursively build all of its subcommands by calling their `init` methods
# with itself as the parent/`command` argument. Each subcommand will,
# in-turn, call its own `init` method with itself as the parent/`command`
# argument. This allows us to create arbitrarily deep subcommands (until we
# hit Python's maximum recursion depth, anyway).
git.init()
if __name__ == '__main__':
# Just like argparse, we call the root command's `parse_args` method to
# retrieve the arguments from `sys.argv` and parse them.
#
# When a subcommand is found, the remainder of the arguments will be
# passed to its handler function, which will perform its action and
# (optionally) return some value. `parse_args` does return this value, but
# it isn't beneficial to us, so we'll just ignore it.
git.parse_args()
|
{
"content_hash": "276cbfbcc3583200796089affee14863",
"timestamp": "",
"source": "github",
"line_count": 88,
"max_line_length": 81,
"avg_line_length": 34.90909090909091,
"alnum_prop": 0.6966145833333334,
"repo_name": "unbservices/clams",
"id": "6e9b4e1b5f18c67756b8400962a77008691cfbe5",
"size": "3072",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "demo/git.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "15547"
}
],
"symlink_target": ""
}
|
import numpy as np
import pytest
import mbuild as mb
from mbuild.utils.io import get_fn
class BaseTest:
@pytest.fixture(autouse=True)
def initdir(self, tmpdir):
tmpdir.chdir()
@pytest.fixture
def ethane(self):
from mbuild.examples import Ethane
return Ethane()
@pytest.fixture
def methane(self):
from mbuild.examples import Methane
return Methane()
@pytest.fixture
def h2o(self):
from mbuild.lib.moieties import H2O
return H2O()
@pytest.fixture
def ch2(self):
from mbuild.lib.moieties import CH2
return CH2()
@pytest.fixture
def ester(self):
from mbuild.lib.moieties import Ester
return Ester()
@pytest.fixture
def ch3(self):
from mbuild.lib.moieties import CH3
return CH3()
@pytest.fixture
def c3(self):
from mbuild.lib.atoms import C3
return C3()
@pytest.fixture
def n4(self):
from mbuild.lib.atoms import N4
return N4()
@pytest.fixture
def betacristobalite(self):
from mbuild.lib.surfaces import Betacristobalite
return Betacristobalite()
@pytest.fixture
def propyl(self):
from mbuild.examples import Alkane
return Alkane(3, cap_front=True, cap_end=False)
@pytest.fixture
def hexane(self, propyl):
class Hexane(mb.Compound):
def __init__(self):
super(Hexane, self).__init__()
self.add(propyl, 'propyl1')
self.add(mb.clone(propyl), 'propyl2')
mb.force_overlap(self['propyl1'],
self['propyl1']['down'],
self['propyl2']['down'])
return Hexane()
@pytest.fixture
def octane(self):
from mbuild.examples import Alkane
return Alkane(8, cap_front=True, cap_end=True)
@pytest.fixture
def sixpoints(self):
molecule = mb.Compound()
molecule.add(mb.Particle(name='C', pos=[5, 5, 5]), label='middle')
molecule.add(mb.Particle(name='C', pos=[6, 5, 5]), label='right')
molecule.add(mb.Particle(name='C', pos=[4, 5, 5]), label='left')
molecule.add(mb.Port(anchor=molecule[0]), label='up')
molecule['up'].translate([0, 1, 0])
molecule.add(mb.Port(anchor=molecule[0]), label='down')
molecule['down'].translate([0, -1, 0])
molecule.add(mb.Particle(name='C', pos=[5, 5, 6]), label='front')
molecule.add(mb.Particle(name='C', pos=[5, 5, 4]), label='back')
molecule.generate_bonds('C', 'C', 0.9, 1.1)
return molecule
@pytest.fixture
def benzene(self):
compound = mb.load(get_fn('benzene.mol2'))
compound.name = 'Benzene'
return compound
@pytest.fixture
def rigid_benzene(self):
compound = mb.load(get_fn('benzene.mol2'))
compound.name = 'Benzene'
compound.label_rigid_bodies()
return compound
@pytest.fixture
def benzene_from_parts(self):
ch = mb.load(get_fn('ch.mol2'))
ch.name = 'CH'
mb.translate(ch, -ch[0].pos)
ch.add(mb.Port(anchor=ch[0]), 'a')
mb.translate(ch['a'], [0, 0.07, 0])
mb.rotate_around_z(ch['a'], 120.0 * (np.pi/180.0))
ch.add(mb.Port(anchor=ch[0]), 'b')
mb.translate(ch['b'], [0, 0.07, 0])
mb.rotate_around_z(ch['b'], -120.0 * (np.pi/180.0))
benzene = mb.Compound(name='Benzene')
benzene.add(ch)
current = ch
for _ in range(5):
ch_new = mb.clone(ch)
mb.force_overlap(move_this=ch_new,
from_positions=ch_new['a'],
to_positions=current['b'])
current = ch_new
benzene.add(ch_new)
carbons = [p for p in benzene.particles_by_name('C')]
benzene.add_bond((carbons[0],carbons[-1]))
return benzene
@pytest.fixture
def box_of_benzenes(self, benzene):
n_benzenes = 10
benzene.name = 'Benzene'
filled = mb.fill_box(benzene,
n_compounds=n_benzenes,
box=[0, 0, 0, 4, 4, 4])
filled.label_rigid_bodies(discrete_bodies='Benzene', rigid_particles='C')
return filled
@pytest.fixture
def rigid_ch(self):
ch = mb.load(get_fn('ch.mol2'))
ch.name = 'CH'
ch.label_rigid_bodies()
mb.translate(ch, -ch[0].pos)
ch.add(mb.Port(anchor=ch[0]), 'a')
mb.translate(ch['a'], [0, 0.07, 0])
mb.rotate_around_z(ch['a'], 120.0 * (np.pi/180.0))
ch.add(mb.Port(anchor=ch[0]), 'b')
mb.translate(ch['b'], [0, 0.07, 0])
mb.rotate_around_z(ch['b'], -120.0 * (np.pi/180.0))
return ch
@pytest.fixture
def silane(self):
from mbuild.lib.moieties import Silane
return Silane()
|
{
"content_hash": "d6b7f4b44a85d798c713354cb7678c0a",
"timestamp": "",
"source": "github",
"line_count": 169,
"max_line_length": 81,
"avg_line_length": 29.366863905325445,
"alnum_prop": 0.5488615756598831,
"repo_name": "ctk3b/mbuild",
"id": "8bf6b4d242c3384a799229f7d7cd4383f635e425",
"size": "4963",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "mbuild/tests/base_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "45739"
},
{
"name": "Python",
"bytes": "380869"
},
{
"name": "Shell",
"bytes": "4281"
}
],
"symlink_target": ""
}
|
import sys
sys.path.append("C:\\PyEmu")
sys.path.append("C:\\PyEmu\\lib")
from PyEmu import *
def ret_handler(emu, address):
num1 = emu.get_stack_argument("arg_0")
num2 = emu.get_stack_argument("arg_4")
sum = emu.get_register("EAX")
print "[*] Function took: %d, %d and the result is %d" % ( num1, num2, sum)
return True
emu = IDAPyEmu()
# Load the binary's code segment
code_start = SegByName(".text")
code_end = SegEnd( code_start )
while code_start <= code_end:
emu.set_memory( code_start, GetOriginalByte(code_start), size=1 )
code_start += 1
print "[*] Finished loading code section into memory."
# Load the binary's data segment
data_start = SegByName(".data")
data_end = SegEnd( data_start )
while data_start <= data_end:
emu.set_memory( data_start, GetOriginalByte(data_start), size=1)
data_start += 1
print "[*] Finished loading data section into memory."
# Set EIP to start executing at the function head
emu.set_register("EIP", 0x00401000)
# Set up the ret handler
emu.set_mnemonic_handler("ret", ret_handler)
# Set the function parameters for the call
emu.set_stack_argument(0x8, 0x00000001, name="arg_0")
emu.set_stack_argument(0xc, 0x00000002, name="arg_4")
# There are 10 instructions in this function
emu.execute( steps = 10 )
print "[*] Finished function emulation run."
|
{
"content_hash": "8effecebb0681f890fb6fc018eb6aa80",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 79,
"avg_line_length": 24.945454545454545,
"alnum_prop": 0.6763848396501457,
"repo_name": "psb-seclab/CTFStuff",
"id": "e72234efaf9eb447de28e7b2d27570c5ff765365",
"size": "1372",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "utils/gray_hat_python_code/src/addnum_function_call.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "1004"
},
{
"name": "Python",
"bytes": "210746"
}
],
"symlink_target": ""
}
|
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "posts_manager_django.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
{
"content_hash": "57f0038f1d56b078e17038c1af7f87b3",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 84,
"avg_line_length": 26.77777777777778,
"alnum_prop": 0.7178423236514523,
"repo_name": "KKorvin/posts_manager_django",
"id": "fa8e030cdfb938e4894e6a406b593efecfc36ce6",
"size": "263",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "manage.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "900"
},
{
"name": "HTML",
"bytes": "2315"
},
{
"name": "JavaScript",
"bytes": "2245"
},
{
"name": "Python",
"bytes": "5571"
}
],
"symlink_target": ""
}
|
"""
exception classes and constants handling test outcomes
as well as functions creating them
"""
import sys
from typing import Any
from typing import Callable
from typing import cast
from typing import Optional
from typing import TypeVar
TYPE_CHECKING = False # avoid circular import through compat
if TYPE_CHECKING:
from typing import NoReturn
from typing import Type # noqa: F401 (used in type string)
from typing_extensions import Protocol
else:
# typing.Protocol is only available starting from Python 3.8. It is also
# available from typing_extensions, but we don't want a runtime dependency
# on that. So use a dummy runtime implementation.
from typing import Generic
Protocol = Generic
class OutcomeException(BaseException):
""" OutcomeException and its subclass instances indicate and
contain info about test and collection outcomes.
"""
def __init__(self, msg: Optional[str] = None, pytrace: bool = True) -> None:
if msg is not None and not isinstance(msg, str):
error_msg = (
"{} expected string as 'msg' parameter, got '{}' instead.\n"
"Perhaps you meant to use a mark?"
)
raise TypeError(error_msg.format(type(self).__name__, type(msg).__name__))
BaseException.__init__(self, msg)
self.msg = msg
self.pytrace = pytrace
def __repr__(self) -> str:
if self.msg:
return self.msg
return "<{} instance>".format(self.__class__.__name__)
__str__ = __repr__
TEST_OUTCOME = (OutcomeException, Exception)
class Skipped(OutcomeException):
# XXX hackish: on 3k we fake to live in the builtins
# in order to have Skipped exception printing shorter/nicer
__module__ = "builtins"
def __init__(
self,
msg: Optional[str] = None,
pytrace: bool = True,
allow_module_level: bool = False,
) -> None:
OutcomeException.__init__(self, msg=msg, pytrace=pytrace)
self.allow_module_level = allow_module_level
class Failed(OutcomeException):
""" raised from an explicit call to pytest.fail() """
__module__ = "builtins"
class Exit(Exception):
""" raised for immediate program exits (no tracebacks/summaries)"""
def __init__(
self, msg: str = "unknown reason", returncode: Optional[int] = None
) -> None:
self.msg = msg
self.returncode = returncode
super().__init__(msg)
# Elaborate hack to work around https://github.com/python/mypy/issues/2087.
# Ideally would just be `exit.Exception = Exit` etc.
_F = TypeVar("_F", bound=Callable)
_ET = TypeVar("_ET", bound="Type[BaseException]")
class _WithException(Protocol[_F, _ET]):
Exception = None # type: _ET
__call__ = None # type: _F
def _with_exception(exception_type: _ET) -> Callable[[_F], _WithException[_F, _ET]]:
def decorate(func: _F) -> _WithException[_F, _ET]:
func_with_exception = cast(_WithException[_F, _ET], func)
func_with_exception.Exception = exception_type
return func_with_exception
return decorate
# exposed helper methods
@_with_exception(Exit)
def exit(msg: str, returncode: Optional[int] = None) -> "NoReturn":
"""
Exit testing process.
:param str msg: message to display upon exit.
:param int returncode: return code to be used when exiting pytest.
"""
__tracebackhide__ = True
raise Exit(msg, returncode)
@_with_exception(Skipped)
def skip(msg: str = "", *, allow_module_level: bool = False) -> "NoReturn":
"""
Skip an executing test with the given message.
This function should be called only during testing (setup, call or teardown) or
during collection by using the ``allow_module_level`` flag. This function can
be called in doctests as well.
:kwarg bool allow_module_level: allows this function to be called at
module level, skipping the rest of the module. Default to False.
.. note::
It is better to use the :ref:`pytest.mark.skipif ref` marker when possible to declare a test to be
skipped under certain conditions like mismatching platforms or
dependencies.
Similarly, use the ``# doctest: +SKIP`` directive (see `doctest.SKIP
<https://docs.python.org/3/library/doctest.html#doctest.SKIP>`_)
to skip a doctest statically.
"""
__tracebackhide__ = True
raise Skipped(msg=msg, allow_module_level=allow_module_level)
@_with_exception(Failed)
def fail(msg: str = "", pytrace: bool = True) -> "NoReturn":
"""
Explicitly fail an executing test with the given message.
:param str msg: the message to show the user as reason for the failure.
:param bool pytrace: if false the msg represents the full failure information and no
python traceback will be reported.
"""
__tracebackhide__ = True
raise Failed(msg=msg, pytrace=pytrace)
class XFailed(Failed):
""" raised from an explicit call to pytest.xfail() """
@_with_exception(XFailed)
def xfail(reason: str = "") -> "NoReturn":
"""
Imperatively xfail an executing test or setup functions with the given reason.
This function should be called only during testing (setup, call or teardown).
.. note::
It is better to use the :ref:`pytest.mark.xfail ref` marker when possible to declare a test to be
xfailed under certain conditions like known bugs or missing features.
"""
__tracebackhide__ = True
raise XFailed(reason)
def importorskip(
modname: str, minversion: Optional[str] = None, reason: Optional[str] = None
) -> Any:
"""Imports and returns the requested module ``modname``, or skip the
current test if the module cannot be imported.
:param str modname: the name of the module to import
:param str minversion: if given, the imported module's ``__version__``
attribute must be at least this minimal version, otherwise the test is
still skipped.
:param str reason: if given, this reason is shown as the message when the
module cannot be imported.
:returns: The imported module. This should be assigned to its canonical
name.
Example::
docutils = pytest.importorskip("docutils")
"""
import warnings
__tracebackhide__ = True
compile(modname, "", "eval") # to catch syntaxerrors
with warnings.catch_warnings():
# make sure to ignore ImportWarnings that might happen because
# of existing directories with the same name we're trying to
# import but without a __init__.py file
warnings.simplefilter("ignore")
try:
__import__(modname)
except ImportError as exc:
if reason is None:
reason = "could not import {!r}: {}".format(modname, exc)
raise Skipped(reason, allow_module_level=True) from None
mod = sys.modules[modname]
if minversion is None:
return mod
verattr = getattr(mod, "__version__", None)
if minversion is not None:
# Imported lazily to improve start-up time.
from packaging.version import Version
if verattr is None or Version(verattr) < Version(minversion):
raise Skipped(
"module %r has __version__ %r, required is: %r"
% (modname, verattr, minversion),
allow_module_level=True,
)
return mod
|
{
"content_hash": "b8a04c9cf100826a1afb0483616322d5",
"timestamp": "",
"source": "github",
"line_count": 227,
"max_line_length": 106,
"avg_line_length": 32.779735682819386,
"alnum_prop": 0.6484343502217443,
"repo_name": "JoelMarcey/buck",
"id": "751cf9474fb3a845c18902c7ed92204b2c1a48f1",
"size": "7441",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "third-party/py/pytest/src/_pytest/outcomes.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "579"
},
{
"name": "Batchfile",
"bytes": "2093"
},
{
"name": "C",
"bytes": "255521"
},
{
"name": "C#",
"bytes": "237"
},
{
"name": "C++",
"bytes": "10992"
},
{
"name": "CSS",
"bytes": "54863"
},
{
"name": "D",
"bytes": "1017"
},
{
"name": "Go",
"bytes": "16819"
},
{
"name": "Groovy",
"bytes": "3362"
},
{
"name": "HTML",
"bytes": "6115"
},
{
"name": "Haskell",
"bytes": "895"
},
{
"name": "IDL",
"bytes": "385"
},
{
"name": "Java",
"bytes": "19430296"
},
{
"name": "JavaScript",
"bytes": "932672"
},
{
"name": "Kotlin",
"bytes": "2079"
},
{
"name": "Lex",
"bytes": "2731"
},
{
"name": "Makefile",
"bytes": "1816"
},
{
"name": "Matlab",
"bytes": "47"
},
{
"name": "OCaml",
"bytes": "4384"
},
{
"name": "Objective-C",
"bytes": "138150"
},
{
"name": "Objective-C++",
"bytes": "34"
},
{
"name": "PowerShell",
"bytes": "244"
},
{
"name": "Prolog",
"bytes": "858"
},
{
"name": "Python",
"bytes": "1786899"
},
{
"name": "Roff",
"bytes": "1109"
},
{
"name": "Rust",
"bytes": "3618"
},
{
"name": "Scala",
"bytes": "4906"
},
{
"name": "Shell",
"bytes": "49876"
},
{
"name": "Smalltalk",
"bytes": "3355"
},
{
"name": "Standard ML",
"bytes": "15"
},
{
"name": "Swift",
"bytes": "6897"
},
{
"name": "Thrift",
"bytes": "26256"
},
{
"name": "Yacc",
"bytes": "323"
}
],
"symlink_target": ""
}
|
__author__ = 'parsons'
from datetime import datetime
import requests
import json
import logging
from time import sleep
import os
import smtplib
SENT_ALERTS = []
logging.basicConfig(level=logging.INFO, format='%(asctime)s [%(module)11s] [%(levelname)7s] %(message)s')
logging.getLogger("requests").setLevel(logging.WARNING)
CUR_DIR = os.path.dirname(os.path.realpath(__file__))
def main():
with open('{0}/config.json'.format(CUR_DIR), 'r') as f:
config = json.load(f)
for name in config.keys():
DEFAULT_LIST = ['Dragonite', 'Snorlax', 'Aerodactyl', 'Venusaur', 'Charizard', 'Omastar', 'Ditto', 'Articuno', 'Zapdos', 'Moltres', 'Mewtwo', 'Mew', 'Blastoise', 'Kabutops', 'Grimer', 'Muk', 'Weezing']
logging.info('Running notify check for {0}'.format(name))
url = config[name]['url']
email = config[name]['email']
user_list = config[name]['list']
fromemail = config[name]['from_email']
poke_list = DEFAULT_LIST
if len(user_list) > 0:
for poke in user_list:
poke_list.append(poke)
response = requests.get(url)
try:
raw_data = json.loads(response.content)
except:
raw_data = []
for pokemon in raw_data['pokemons']:
pokemon['trainer'] = name
if check_for_pokemon(pokemon, poke_list):
if email != '':
if not quiet_time(config[name]):
send_notification(pokemon, email, fromemail)
def check_for_pokemon(pokemon, poke_list):
if pokemon['pokemon_name'] in poke_list:
return pokemon not in SENT_ALERTS
else:
return False
def quiet_time(config):
start = config['quiet_start']
end = config['quiet_end']
nowhour = datetime.now().hour
return nowhour >= start and nowhour < end
def send_notification(pokemon, email, fromaddr):
datestr = datetime.fromtimestamp(pokemon['disappear_time'] / 1000).strftime("%H:%M:%S")
with open('{0}/gmailcreds.json'.format(CUR_DIR), 'r') as f:
gmailcreds = json.load(f)
logging.info('Sending alert to {0}'.format(email))
msg = "\r\n".join([
"From: {0}".format(fromaddr),
"To: {0}".format(email),
"Subject: A {0} has been spotted!".format(pokemon['pokemon_name']),
"",
"{0} Disapears at {1}. https://www.google.com/maps/dir/Current+Location/{2},{3}".format(pokemon['pokemon_name'], datestr, pokemon['latitude'], pokemon['longitude'])
])
server = smtplib.SMTP('smtp.gmail.com:587')
server.ehlo()
server.starttls()
server.login(fromaddr, gmailcreds['password'])
server.sendmail(fromaddr, email, msg)
server.quit()
SENT_ALERTS.append(pokemon)
if __name__ == '__main__':
logging.info('Starting PokemonGo-Map-Notify')
while 1:
try:
main()
sleep(15)
except:
logging.error('Error running app.', exc_info=True)
sleep(45)
pass
|
{
"content_hash": "c47b1ff0ac2357587bbda6598cad1554",
"timestamp": "",
"source": "github",
"line_count": 92,
"max_line_length": 209,
"avg_line_length": 32.80434782608695,
"alnum_prop": 0.5950960901259112,
"repo_name": "devinjparsons/PokemonGo-Map-Notify",
"id": "7b76551f57616bfa4b9da344c9afe0d0ddfe5867",
"size": "3018",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pokemon_notifier.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "3018"
}
],
"symlink_target": ""
}
|
import os
from setuptools import setup, find_packages
def read(fname):
"""
Returns the contents of the file in the top level directory with the name
``fname``.
"""
return open(os.path.join(os.path.dirname(__file__), fname)).read()
def get_files(path):
relative_to = os.path.dirname(path)
result = []
for dirpath, dirnames, filenames in os.walk(path):
result += [os.path.relpath(os.path.join(dirpath, i), relative_to)
for i in filenames]
return result
setup(
name = "superzippy",
version = read("VERSION").strip(),
author = "John Sullivan and other contributers",
author_email = "john@galahgroup.com",
description = (
"A Python utility for packaging up multi-file Python scripts into a "
"single file, dependencies and all."
),
license = "Apache v2.0",
keywords = "python packaging",
url = "https://www.github.com/brownhead/superzippy",
long_description = read("README.rst"),
classifiers = [
"Development Status :: 2 - Pre-Alpha",
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache Software License"
],
packages = find_packages(),
entry_points = {
"console_scripts": [
"superzippy = superzippy.packaging:run"
]
},
# This ensures that the MANIFEST.IN file is used for both binary and source
# distributions.
include_package_data = True,
zip_safe = True,
data_files = [
(".", ["LICENSE", "README.rst", "VERSION"])
]
)
|
{
"content_hash": "14e477becd744714748a68052f83026f",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 79,
"avg_line_length": 29.846153846153847,
"alnum_prop": 0.6146907216494846,
"repo_name": "pombredanne/superzippy",
"id": "30990f3d264cdb6c028538c7a33faac58fa1ed01",
"size": "1574",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "43608"
},
{
"name": "Shell",
"bytes": "3024"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Record',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('time_100', models.TimeField(verbose_name='100 m')),
('time_200', models.TimeField(verbose_name='200 m')),
('time_300', models.TimeField(verbose_name='300 m')),
('time_400', models.TimeField(verbose_name='400 m')),
('time_500', models.TimeField(verbose_name='500 m')),
('split_100', models.TimeField(null=True, verbose_name='100 split', blank=True)),
('split_200', models.TimeField(null=True, verbose_name='200 split', blank=True)),
('split_300', models.TimeField(null=True, verbose_name='300 split', blank=True)),
('split_400', models.TimeField(null=True, verbose_name='400 split', blank=True)),
('split_500', models.TimeField(null=True, verbose_name='500 split', blank=True)),
('pace_time', models.TimeField(verbose_name='Pacetime', blank=True)),
('remarks', models.CharField(max_length=200, verbose_name=b'Opmerkingen', blank=True)),
],
options={
'ordering': ['pace_time'],
'verbose_name': 'Testresultaat',
'verbose_name_plural': 'Testresultaten',
},
),
migrations.CreateModel(
name='SwimTest',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('date', models.DateField()),
],
options={
'ordering': ['-date'],
'verbose_name': 'zwemtest',
'verbose_name_plural': 'zwemtesten',
},
),
migrations.AddField(
model_name='record',
name='swim_test',
field=models.ForeignKey(to='swimtest.SwimTest'),
),
migrations.AddField(
model_name='record',
name='user',
field=models.ForeignKey(verbose_name=b'Atleet', to=settings.AUTH_USER_MODEL),
),
migrations.AlterUniqueTogether(
name='record',
unique_together=set([('swim_test', 'user')]),
),
]
|
{
"content_hash": "401afae6f64c05bec018cac0e9d94b93",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 114,
"avg_line_length": 41.96825396825397,
"alnum_prop": 0.5423600605143721,
"repo_name": "allcaps/tvdordrecht.nl",
"id": "bf000aad715ff1656fdabcfea4209f912b0bf13b",
"size": "2668",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tvdordrecht/swimtest/migrations/0001_initial.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "25879"
},
{
"name": "HTML",
"bytes": "267591"
},
{
"name": "JavaScript",
"bytes": "1003099"
},
{
"name": "PHP",
"bytes": "2274"
},
{
"name": "Python",
"bytes": "143335"
},
{
"name": "SCSS",
"bytes": "2755"
},
{
"name": "Shell",
"bytes": "2234"
}
],
"symlink_target": ""
}
|
import pandas as pd
import statsmodels.api as sm
import matplotlib.pyplot as plt
import numpy as np
import statsmodels.formula.api as smf
df = pd.read_csv('train.csv')
dfTest = pd.read_csv('test.csv')
X = df[['OverallQual', 'GarageArea', 'GarageCars','TotalBsmtSF','TotRmsAbvGrd','FullBath','GrLivArea']]
lm = smf.ols(formula='SalePrice ~ GarageArea+GarageCars+OverallQual+TotalBsmtSF+TotRmsAbvGrd+GrLivArea', data=df).fit()
print lm.summary()
pred= lm.predict(dfTest)
print pred
np.savetxt("foo.csv", np.dstack((np.arange(1, pred.size+1),pred))[0],"%d,%s",header="Id,SalePrice")
|
{
"content_hash": "0d7ff039fcca1628dda01ad695296dec",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 119,
"avg_line_length": 26.772727272727273,
"alnum_prop": 0.733446519524618,
"repo_name": "talbarda/kaggle_predict_house_prices",
"id": "524d002725ab4ad84d318dbe985b3a71e67f2386",
"size": "589",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "reg.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "30596"
},
{
"name": "Python",
"bytes": "11679"
}
],
"symlink_target": ""
}
|
"""Collection of tests around log handling."""
import logging
import pytest
from cookiecutter.log import configure_logger
def create_log_records():
cookiecutter_logger = logging.getLogger('cookiecutter')
foo_logger = logging.getLogger('cookiecutter.foo')
foobar_logger = logging.getLogger('cookiecutter.foo.bar')
cookiecutter_logger.info('Welcome to Cookiecutter')
cookiecutter_logger.debug('Generating project from pytest-plugin')
foo_logger.info('Loading user config from home dir')
foobar_logger.debug("I don't know.")
foobar_logger.debug('I wanted to save the world.')
foo_logger.error('Aw, snap! Something went wrong')
cookiecutter_logger.debug('Successfully generated project')
@pytest.fixture
def info_messages():
return [
'INFO: Welcome to Cookiecutter',
'INFO: Loading user config from home dir',
'ERROR: Aw, snap! Something went wrong',
]
@pytest.fixture
def debug_messages():
return [
'INFO cookiecutter: '
'Welcome to Cookiecutter',
'DEBUG cookiecutter: '
'Generating project from pytest-plugin',
'INFO cookiecutter.foo: '
'Loading user config from home dir',
"DEBUG cookiecutter.foo.bar: "
"I don't know.",
'DEBUG cookiecutter.foo.bar: '
'I wanted to save the world.',
'ERROR cookiecutter.foo: '
'Aw, snap! Something went wrong',
'DEBUG cookiecutter: '
'Successfully generated project',
]
@pytest.fixture
def info_logger():
return configure_logger(stream_level='INFO')
@pytest.fixture
def debug_logger():
return configure_logger(stream_level='DEBUG')
@pytest.fixture
def debug_file(tmpdir):
return tmpdir / 'pytest-plugin.log'
@pytest.fixture
def info_logger_with_file(debug_file):
return configure_logger(
stream_level='INFO',
debug_file=str(debug_file),
)
def test_info_stdout_logging(caplog, info_logger, info_messages):
"""Test that stdout logs use info format and level."""
[stream_handler] = info_logger.handlers
assert isinstance(stream_handler, logging.StreamHandler)
assert stream_handler.level == logging.INFO
create_log_records()
stream_messages = [
stream_handler.format(r)
for r in caplog.records
if r.levelno >= stream_handler.level
]
assert stream_messages == info_messages
def test_debug_stdout_logging(caplog, debug_logger, debug_messages):
"""Test that stdout logs use debug format and level."""
[stream_handler] = debug_logger.handlers
assert isinstance(stream_handler, logging.StreamHandler)
assert stream_handler.level == logging.DEBUG
create_log_records()
stream_messages = [
stream_handler.format(r)
for r in caplog.records
if r.levelno >= stream_handler.level
]
assert stream_messages == debug_messages
def test_debug_file_logging(
caplog, info_logger_with_file, debug_file, debug_messages):
"""Test that logging to stdout uses a different format and level than \
the the file handler."""
[file_handler, stream_handler] = info_logger_with_file.handlers
assert isinstance(file_handler, logging.FileHandler)
assert isinstance(stream_handler, logging.StreamHandler)
assert stream_handler.level == logging.INFO
assert file_handler.level == logging.DEBUG
create_log_records()
assert debug_file.exists()
# Last line in the log file is an empty line
assert debug_file.readlines(cr=False) == debug_messages + ['']
|
{
"content_hash": "7018a9fb15c8b6ac8f22cba212abbd2f",
"timestamp": "",
"source": "github",
"line_count": 131,
"max_line_length": 75,
"avg_line_length": 27.282442748091604,
"alnum_prop": 0.6787912702853945,
"repo_name": "luzfcb/cookiecutter",
"id": "a4d71102ee8d3c346bd1f4184b36deb1816d25c4",
"size": "3599",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_log.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "3206"
},
{
"name": "Python",
"bytes": "215934"
},
{
"name": "Shell",
"bytes": "161"
}
],
"symlink_target": ""
}
|
import logging
from pyvisdk.exceptions import InvalidArgumentError
########################################
# Automatically generated, do not edit.
########################################
log = logging.getLogger(__name__)
def HostVnicConnectedToCustomizedDVPortEvent(vim, *args, **kwargs):
'''This event records when some host Virtual NICs were reconfigured to use DVPorts
with port level configuration, which might be different from the DVportgroup.'''
obj = vim.client.factory.create('ns0:HostVnicConnectedToCustomizedDVPortEvent')
# do some validation checking...
if (len(args) + len(kwargs)) < 5:
raise IndexError('Expected at least 6 arguments got: %d' % len(args))
required = [ 'vnic', 'chainId', 'createdTime', 'key', 'userName' ]
optional = [ 'changeTag', 'computeResource', 'datacenter', 'ds', 'dvs',
'fullFormattedMessage', 'host', 'net', 'vm', 'dynamicProperty', 'dynamicType' ]
for name, arg in zip(required+optional, args):
setattr(obj, name, arg)
for name, value in kwargs.items():
if name in required + optional:
setattr(obj, name, value)
else:
raise InvalidArgumentError("Invalid argument: %s. Expected one of %s" % (name, ", ".join(required + optional)))
return obj
|
{
"content_hash": "81b4e5801b0dbec59356a05e83b93598",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 124,
"avg_line_length": 38.411764705882355,
"alnum_prop": 0.6271056661562021,
"repo_name": "xuru/pyvisdk",
"id": "960c70a37162ee40fde4d3b79966f030dfc35625",
"size": "1307",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyvisdk/do/host_vnic_connected_to_customized_dv_port_event.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "369"
},
{
"name": "Python",
"bytes": "3037849"
},
{
"name": "Shell",
"bytes": "4517"
}
],
"symlink_target": ""
}
|
"""
A remote procedure call (rpc) abstraction.
For some wrappers that add message versioning to rpc, see:
rpc.dispatcher
rpc.proxy
"""
from oslo.config import cfg
from ironic.openstack.common import importutils
from ironic.openstack.common import log as logging
LOG = logging.getLogger(__name__)
rpc_opts = [
cfg.StrOpt('rpc_backend',
default='%s.impl_kombu' % __package__,
help="The messaging module to use, defaults to kombu."),
cfg.IntOpt('rpc_thread_pool_size',
default=64,
help='Size of RPC thread pool'),
cfg.IntOpt('rpc_conn_pool_size',
default=30,
help='Size of RPC connection pool'),
cfg.IntOpt('rpc_response_timeout',
default=60,
help='Seconds to wait for a response from call or multicall'),
cfg.IntOpt('rpc_cast_timeout',
default=30,
help='Seconds to wait before a cast expires (TTL). '
'Only supported by impl_zmq.'),
cfg.ListOpt('allowed_rpc_exception_modules',
default=['nova.exception',
'cinder.exception',
'exceptions',
],
help='Modules of exceptions that are permitted to be recreated'
' upon receiving exception data from an rpc call.'),
cfg.BoolOpt('fake_rabbit',
default=False,
help='If passed, use a fake RabbitMQ provider'),
cfg.StrOpt('control_exchange',
default='openstack',
help='AMQP exchange to connect to if using RabbitMQ or Qpid'),
]
CONF = cfg.CONF
CONF.register_opts(rpc_opts)
def set_defaults(control_exchange):
cfg.set_defaults(rpc_opts,
control_exchange=control_exchange)
def create_connection(new=True):
"""Create a connection to the message bus used for rpc.
For some example usage of creating a connection and some consumers on that
connection, see nova.service.
:param new: Whether or not to create a new connection. A new connection
will be created by default. If new is False, the
implementation is free to return an existing connection from a
pool.
:returns: An instance of openstack.common.rpc.common.Connection
"""
return _get_impl().create_connection(CONF, new=new)
def call(context, topic, msg, timeout=None):
"""Invoke a remote method that returns something.
:param context: Information that identifies the user that has made this
request.
:param topic: The topic to send the rpc message to. This correlates to the
topic argument of
openstack.common.rpc.common.Connection.create_consumer()
and only applies when the consumer was created with
fanout=False.
:param msg: This is a dict in the form { "method" : "method_to_invoke",
"args" : dict_of_kwargs }
:param timeout: int, number of seconds to use for a response timeout.
If set, this overrides the rpc_response_timeout option.
:returns: A dict from the remote method.
:raises: openstack.common.rpc.common.Timeout if a complete response
is not received before the timeout is reached.
"""
return _get_impl().call(CONF, context, topic, msg, timeout)
def cast(context, topic, msg):
"""Invoke a remote method that does not return anything.
:param context: Information that identifies the user that has made this
request.
:param topic: The topic to send the rpc message to. This correlates to the
topic argument of
openstack.common.rpc.common.Connection.create_consumer()
and only applies when the consumer was created with
fanout=False.
:param msg: This is a dict in the form { "method" : "method_to_invoke",
"args" : dict_of_kwargs }
:returns: None
"""
return _get_impl().cast(CONF, context, topic, msg)
def fanout_cast(context, topic, msg):
"""Broadcast a remote method invocation with no return.
This method will get invoked on all consumers that were set up with this
topic name and fanout=True.
:param context: Information that identifies the user that has made this
request.
:param topic: The topic to send the rpc message to. This correlates to the
topic argument of
openstack.common.rpc.common.Connection.create_consumer()
and only applies when the consumer was created with
fanout=True.
:param msg: This is a dict in the form { "method" : "method_to_invoke",
"args" : dict_of_kwargs }
:returns: None
"""
return _get_impl().fanout_cast(CONF, context, topic, msg)
def multicall(context, topic, msg, timeout=None):
"""Invoke a remote method and get back an iterator.
In this case, the remote method will be returning multiple values in
separate messages, so the return values can be processed as the come in via
an iterator.
:param context: Information that identifies the user that has made this
request.
:param topic: The topic to send the rpc message to. This correlates to the
topic argument of
openstack.common.rpc.common.Connection.create_consumer()
and only applies when the consumer was created with
fanout=False.
:param msg: This is a dict in the form { "method" : "method_to_invoke",
"args" : dict_of_kwargs }
:param timeout: int, number of seconds to use for a response timeout.
If set, this overrides the rpc_response_timeout option.
:returns: An iterator. The iterator will yield a tuple (N, X) where N is
an index that starts at 0 and increases by one for each value
returned and X is the Nth value that was returned by the remote
method.
:raises: openstack.common.rpc.common.Timeout if a complete response
is not received before the timeout is reached.
"""
return _get_impl().multicall(CONF, context, topic, msg, timeout)
def notify(context, topic, msg, envelope=False):
"""Send notification event.
:param context: Information that identifies the user that has made this
request.
:param topic: The topic to send the notification to.
:param msg: This is a dict of content of event.
:param envelope: Set to True to enable message envelope for notifications.
:returns: None
"""
return _get_impl().notify(cfg.CONF, context, topic, msg, envelope)
def cleanup():
"""Clean up resources in use by implementation.
Clean up any resources that have been allocated by the RPC implementation.
This is typically open connections to a messaging service. This function
would get called before an application using this API exits to allow
connections to get torn down cleanly.
:returns: None
"""
return _get_impl().cleanup()
def cast_to_server(context, server_params, topic, msg):
"""Invoke a remote method that does not return anything.
:param context: Information that identifies the user that has made this
request.
:param server_params: Connection information
:param topic: The topic to send the notification to.
:param msg: This is a dict in the form { "method" : "method_to_invoke",
"args" : dict_of_kwargs }
:returns: None
"""
return _get_impl().cast_to_server(CONF, context, server_params, topic,
msg)
def fanout_cast_to_server(context, server_params, topic, msg):
"""Broadcast to a remote method invocation with no return.
:param context: Information that identifies the user that has made this
request.
:param server_params: Connection information
:param topic: The topic to send the notification to.
:param msg: This is a dict in the form { "method" : "method_to_invoke",
"args" : dict_of_kwargs }
:returns: None
"""
return _get_impl().fanout_cast_to_server(CONF, context, server_params,
topic, msg)
def queue_get_for(context, topic, host):
"""Get a queue name for a given topic + host.
This function only works if this naming convention is followed on the
consumer side, as well. For example, in nova, every instance of the
nova-foo service calls create_consumer() for two topics:
foo
foo.<host>
Messages sent to the 'foo' topic are distributed to exactly one instance of
the nova-foo service. The services are chosen in a round-robin fashion.
Messages sent to the 'foo.<host>' topic are sent to the nova-foo service on
<host>.
"""
return '%s.%s' % (topic, host) if host else topic
_RPCIMPL = None
def _get_impl():
"""Delay import of rpc_backend until configuration is loaded."""
global _RPCIMPL
if _RPCIMPL is None:
try:
_RPCIMPL = importutils.import_module(CONF.rpc_backend)
except ImportError:
# For backwards compatibility with older nova config.
impl = CONF.rpc_backend.replace('nova.rpc',
'nova.openstack.common.rpc')
_RPCIMPL = importutils.import_module(impl)
return _RPCIMPL
|
{
"content_hash": "7f01115f18c90f4eb3bb0f20d28cccc1",
"timestamp": "",
"source": "github",
"line_count": 258,
"max_line_length": 79,
"avg_line_length": 38.02713178294574,
"alnum_prop": 0.6146162470696157,
"repo_name": "JioCloud/ironic",
"id": "c334015111687d4bcb53feb3f3f8b64e32fb0adf",
"size": "10574",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "ironic/openstack/common/rpc/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "1640165"
}
],
"symlink_target": ""
}
|
def greet(name):
print 'Hello', name
greet('Jack')
greet('Jill')
greet('Bob')
|
{
"content_hash": "35ad0c1aca1598c250cbd6aedc22fc6a",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 23,
"avg_line_length": 16.4,
"alnum_prop": 0.6341463414634146,
"repo_name": "csvoss/onelinerizer",
"id": "3b5fbeac6d6eaa30e10a150143d9811c9cb57966",
"size": "141",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tests/5lines.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "106646"
}
],
"symlink_target": ""
}
|
from setuptools import setup, find_packages
setup(
name='scscp',
version='0.2.0',
description='Implementation of the SCSCP protocol',
url='https://github.com/OpenMath/py-scscp',
author='Luca De Feo',
license='MIT',
classifiers=[
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
],
keywords='openmath scscp',
packages=find_packages(),
install_requires=['openmath>=0.3.0', 'pexpect', 'six'],
)
|
{
"content_hash": "be4f62d03cfa3a1f22e3ddba734922b6",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 59,
"avg_line_length": 30.57894736842105,
"alnum_prop": 0.6196213425129088,
"repo_name": "OpenMath/py-scscp",
"id": "8ce2809fd1ec7f3fb06007273d73a060f80636f9",
"size": "581",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "34194"
}
],
"symlink_target": ""
}
|
"""empty message
Revision ID: 2c579199005
Revises: 1f9c61031fa
Create Date: 2016-01-26 17:21:29.659591
"""
# revision identifiers, used by Alembic.
revision = '2c579199005'
down_revision = '1f9c61031fa'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('user', sa.Column('name', sa.String(length=255), nullable=False))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('user', 'name')
### end Alembic commands ###
|
{
"content_hash": "b77376c9edd43724896b2b591e3df3af",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 83,
"avg_line_length": 23.192307692307693,
"alnum_prop": 0.6849087893864013,
"repo_name": "Encrylize/flask-blogger",
"id": "86bd7285dc763cb63f1ab8d432884a3e05c18d8d",
"size": "603",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "migrations/versions/2c579199005_.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "478"
},
{
"name": "HTML",
"bytes": "14166"
},
{
"name": "JavaScript",
"bytes": "53574"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "55355"
}
],
"symlink_target": ""
}
|
import maya.cmds as mc
import maya.mel as mel
# appleseedMaya imports.
from logger import logger
def hyperShadePanelBuildCreateMenuCallback():
mc.menuItem(label="appleseed")
mc.menuItem(divider=True)
def hyperShadePanelBuildCreateSubMenuCallback():
return "rendernode/appleseed/surface"
def hyperShadePanelPluginChangeCallback(classification, changeType):
if 'rendernode/appleseed' in classification:
return 1
return 0
def createRenderNodeSelectNodeCategoriesCallback(flag, treeLister):
if flag == "allWithAppleseedUp":
mc.treeLister(treeLister, edit=True, selectPath="appleseed")
def createRenderNodePluginChangeCallback(classification):
if 'rendernode/appleseed' in classification:
return 1
return 0
def renderNodeClassificationCallback():
return "rendernode/appleseed"
def createAsRenderNode(nodeType=None, postCommand=None):
classification = mc.getClassification(nodeType)
logger.debug(
"CreateAsRenderNode called: nodeType = {0}, class = {1}, pcmd = {2}".format(
nodeType,
classification,
postCommand
)
)
for cl in classification:
if "rendernode/appleseed/surface" in cl.lower():
mat = mc.shadingNode(nodeType, asShader=True)
shadingGroup = mc.sets(
renderable=True,
noSurfaceShader=True,
empty=True,
name=mat + "SG"
)
mc.connectAttr(mat + ".outColor", shadingGroup + ".surfaceShader")
logger.debug("Created shading node {0} asShader".format(mat))
elif "rendernode/appleseed/texture/2d" in cl.lower():
mat = mc.shadingNode(nodeType, asTexture=True)
placeTex = mc.shadingNode("place2dTexture", asUtility=True)
mc.connectAttr(placeTex + ".outUV", mat + ".uv")
mc.connectAttr(placeTex + ".outUvFilterSize", mat + ".uvFilterSize")
logger.debug("Created shading node {0} asTexture2D".format(mat))
elif "rendernode/appleseed/texture/3d" in cl.lower():
mat = mc.shadingNode(nodeType, asTexture=True)
placeTex = mc.shadingNode("place3dTexture", asUtility=True)
mc.connectAttr(placeTex + ".wim[0]", mat + ".placementMatrix")
logger.debug("Created shading node {0} asTexture3D".format(mat))
else:
mat = mc.shadingNode(nodeType, asUtility=True)
logger.debug("Created shading node {0} asUtility".format(mat))
if postCommand is not None:
postCommand = postCommand.replace("%node", mat)
postCommand = postCommand.replace("%type", '\"\"')
mel.eval(postCommand)
return ""
def createRenderNodeCallback(postCommand, nodeType):
#logger.debug("createRenderNodeCallback called!")
for c in mc.getClassification(nodeType):
if 'rendernode/appleseed' in c.lower():
buildNodeCmd = (
"import appleseedMaya.hypershadeCallbacks;"
"appleseedMaya.hypershadeCallbacks.createAsRenderNode"
"(nodeType=\\\"{0}\\\", postCommand='{1}')").format(nodeType, postCommand)
return "string $cmd = \"{0}\"; python($cmd);".format(buildNodeCmd)
def buildRenderNodeTreeListerContentCallback(tl, postCommand, filterString):
melCmd = 'addToRenderNodeTreeLister("{0}", "{1}", "{2}", "{3}", "{4}", "{5}");'.format(
tl,
postCommand,
"appleseed/Surface",
"rendernode/appleseed/surface",
"-asShader",
""
)
logger.debug("buildRenderNodeTreeListerContentCallback: mel = %s" % melCmd)
mel.eval(melCmd)
melCmd = 'addToRenderNodeTreeLister("{0}", "{1}", "{2}", "{3}", "{4}", "{5}");'.format(
tl,
postCommand,
"appleseed/2D Textures",
"rendernode/appleseed/texture/2d",
"-asTexture",
""
)
logger.debug("buildRenderNodeTreeListerContentCallback: mel = %s" % melCmd)
mel.eval(melCmd)
melCmd = 'addToRenderNodeTreeLister("{0}", "{1}", "{2}", "{3}", "{4}", "{5}");'.format(
tl,
postCommand,
"appleseed/3D Textures",
"rendernode/appleseed/texture/3d",
"-asTexture",
""
)
logger.debug("buildRenderNodeTreeListerContentCallback: mel = %s" % melCmd)
mel.eval(melCmd)
melCmd = 'addToRenderNodeTreeLister("{0}", "{1}", "{2}", "{3}", "{4}", "{5}");'.format(
tl,
postCommand,
"appleseed/Utilities",
"rendernode/appleseed/utility",
"-asUtility",
""
)
logger.debug("buildRenderNodeTreeListerContentCallback: mel = %s" % melCmd)
mel.eval(melCmd)
def nodeCanBeUsedAsMaterialCallback(nodeId, nodeOwner):
logger.debug((
"nodeCanBeUsedAsMaterialCallback called: "
"nodeId = {0}, nodeOwner = {1}").format(nodeId, nodeOwner)
)
if nodeOwner == 'appleseedMaya':
return 1
return 0
|
{
"content_hash": "cdbbed7d8a2eacb80bfdd0b0737aa5a3",
"timestamp": "",
"source": "github",
"line_count": 152,
"max_line_length": 91,
"avg_line_length": 32.671052631578945,
"alnum_prop": 0.621828433346758,
"repo_name": "est77/appleseed-maya",
"id": "70319589ea6842d48f57e2a604401f040cfb4473",
"size": "6291",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "scripts/appleseedMaya/hypershadeCallbacks.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "4799"
},
{
"name": "C++",
"bytes": "208703"
},
{
"name": "CMake",
"bytes": "11033"
},
{
"name": "Objective-C",
"bytes": "1718"
},
{
"name": "Python",
"bytes": "37062"
}
],
"symlink_target": ""
}
|
import sys
from oslo.config import cfg
from neutron.agent import securitygroups_rpc as sg_rpc
from neutron.api.rpc.agentnotifiers import dhcp_rpc_agent_api
from neutron.api.rpc.agentnotifiers import l3_rpc_agent_api
from neutron.api.v2 import attributes
from neutron.common import constants as q_const
from neutron.common import exceptions as q_exc
from neutron.common import rpc as q_rpc
from neutron.common import topics
from neutron.common import utils
from neutron.db import agents_db
from neutron.db import agentschedulers_db
from neutron.db import api as db_api
from neutron.db import db_base_plugin_v2
from neutron.db import dhcp_rpc_base
from neutron.db import extraroute_db
from neutron.db import l3_gwmode_db
from neutron.db import l3_rpc_base
from neutron.db import portbindings_db
from neutron.db import quota_db # noqa
from neutron.db import securitygroups_rpc_base as sg_db_rpc
from neutron.extensions import portbindings
from neutron.extensions import providernet as provider
from neutron.openstack.common import importutils
from neutron.openstack.common import log as logging
from neutron.openstack.common import rpc
from neutron.openstack.common.rpc import proxy
from neutron.plugins.common import utils as plugin_utils
from neutron.plugins.linuxbridge.common import constants
from neutron.plugins.linuxbridge.db import l2network_db_v2 as db
LOG = logging.getLogger(__name__)
class LinuxBridgeRpcCallbacks(dhcp_rpc_base.DhcpRpcCallbackMixin,
l3_rpc_base.L3RpcCallbackMixin,
sg_db_rpc.SecurityGroupServerRpcCallbackMixin
):
# history
# 1.1 Support Security Group RPC
RPC_API_VERSION = '1.1'
# Device names start with "tap"
TAP_PREFIX_LEN = 3
def create_rpc_dispatcher(self):
'''Get the rpc dispatcher for this manager.
If a manager would like to set an rpc API version, or support more than
one class as the target of rpc messages, override this method.
'''
return q_rpc.PluginRpcDispatcher([self,
agents_db.AgentExtRpcCallback()])
@classmethod
def get_port_from_device(cls, device):
port = db.get_port_from_device(device[cls.TAP_PREFIX_LEN:])
if port:
port['device'] = device
return port
def get_device_details(self, rpc_context, **kwargs):
"""Agent requests device details."""
agent_id = kwargs.get('agent_id')
device = kwargs.get('device')
LOG.debug(_("Device %(device)s details requested from %(agent_id)s"),
{'device': device, 'agent_id': agent_id})
port = self.get_port_from_device(device)
if port:
binding = db.get_network_binding(db_api.get_session(),
port['network_id'])
(network_type,
segmentation_id) = constants.interpret_vlan_id(binding.vlan_id)
entry = {'device': device,
'network_type': network_type,
'physical_network': binding.physical_network,
'segmentation_id': segmentation_id,
'network_id': port['network_id'],
'port_id': port['id'],
'admin_state_up': port['admin_state_up']}
if cfg.CONF.AGENT.rpc_support_old_agents:
entry['vlan_id'] = binding.vlan_id
new_status = (q_const.PORT_STATUS_ACTIVE if port['admin_state_up']
else q_const.PORT_STATUS_DOWN)
if port['status'] != new_status:
db.set_port_status(port['id'], new_status)
else:
entry = {'device': device}
LOG.debug(_("%s can not be found in database"), device)
return entry
def update_device_down(self, rpc_context, **kwargs):
"""Device no longer exists on agent."""
# TODO(garyk) - live migration and port status
agent_id = kwargs.get('agent_id')
device = kwargs.get('device')
LOG.debug(_("Device %(device)s no longer exists on %(agent_id)s"),
{'device': device, 'agent_id': agent_id})
port = self.get_port_from_device(device)
if port:
entry = {'device': device,
'exists': True}
if port['status'] != q_const.PORT_STATUS_DOWN:
# Set port status to DOWN
db.set_port_status(port['id'], q_const.PORT_STATUS_DOWN)
else:
entry = {'device': device,
'exists': False}
LOG.debug(_("%s can not be found in database"), device)
return entry
def update_device_up(self, rpc_context, **kwargs):
"""Device is up on agent."""
agent_id = kwargs.get('agent_id')
device = kwargs.get('device')
LOG.debug(_("Device %(device)s up %(agent_id)s"),
{'device': device, 'agent_id': agent_id})
port = self.get_port_from_device(device)
if port:
if port['status'] != q_const.PORT_STATUS_ACTIVE:
# Set port status to ACTIVE
db.set_port_status(port['id'], q_const.PORT_STATUS_ACTIVE)
else:
LOG.debug(_("%s can not be found in database"), device)
class AgentNotifierApi(proxy.RpcProxy,
sg_rpc.SecurityGroupAgentRpcApiMixin):
'''Agent side of the linux bridge rpc API.
API version history:
1.0 - Initial version.
1.1 - Added get_active_networks_info, create_dhcp_port,
and update_dhcp_port methods.
'''
BASE_RPC_API_VERSION = '1.1'
def __init__(self, topic):
super(AgentNotifierApi, self).__init__(
topic=topic, default_version=self.BASE_RPC_API_VERSION)
self.topic = topic
self.topic_network_delete = topics.get_topic_name(topic,
topics.NETWORK,
topics.DELETE)
self.topic_port_update = topics.get_topic_name(topic,
topics.PORT,
topics.UPDATE)
def network_delete(self, context, network_id):
self.fanout_cast(context,
self.make_msg('network_delete',
network_id=network_id),
topic=self.topic_network_delete)
def port_update(self, context, port, physical_network, vlan_id):
network_type, segmentation_id = constants.interpret_vlan_id(vlan_id)
kwargs = {'port': port,
'network_type': network_type,
'physical_network': physical_network,
'segmentation_id': segmentation_id}
if cfg.CONF.AGENT.rpc_support_old_agents:
kwargs['vlan_id'] = vlan_id
msg = self.make_msg('port_update', **kwargs)
self.fanout_cast(context, msg,
topic=self.topic_port_update)
class LinuxBridgePluginV2(db_base_plugin_v2.NeutronDbPluginV2,
extraroute_db.ExtraRoute_db_mixin,
l3_gwmode_db.L3_NAT_db_mixin,
sg_db_rpc.SecurityGroupServerRpcMixin,
agentschedulers_db.L3AgentSchedulerDbMixin,
agentschedulers_db.DhcpAgentSchedulerDbMixin,
portbindings_db.PortBindingMixin):
"""Implement the Neutron abstractions using Linux bridging.
A new VLAN is created for each network. An agent is relied upon
to perform the actual Linux bridge configuration on each host.
The provider extension is also supported. As discussed in
https://bugs.launchpad.net/neutron/+bug/1023156, this class could
be simplified, and filtering on extended attributes could be
handled, by adding support for extended attributes to the
NeutronDbPluginV2 base class. When that occurs, this class should
be updated to take advantage of it.
The port binding extension enables an external application relay
information to and from the plugin.
"""
# This attribute specifies whether the plugin supports or not
# bulk/pagination/sorting operations. Name mangling is used in
# order to ensure it is qualified by class
__native_bulk_support = True
__native_pagination_support = True
__native_sorting_support = True
_supported_extension_aliases = ["provider", "router", "ext-gw-mode",
"binding", "quotas", "security-group",
"agent", "extraroute",
"l3_agent_scheduler",
"dhcp_agent_scheduler"]
@property
def supported_extension_aliases(self):
if not hasattr(self, '_aliases'):
aliases = self._supported_extension_aliases[:]
sg_rpc.disable_security_group_extension_if_noop_driver(aliases)
self._aliases = aliases
return self._aliases
def __init__(self):
self.extra_binding_dict = {
portbindings.VIF_TYPE: portbindings.VIF_TYPE_BRIDGE,
portbindings.CAPABILITIES: {
portbindings.CAP_PORT_FILTER:
'security-group' in self.supported_extension_aliases}}
db.initialize()
self._parse_network_vlan_ranges()
db.sync_network_states(self.network_vlan_ranges)
self.tenant_network_type = cfg.CONF.VLANS.tenant_network_type
if self.tenant_network_type not in [constants.TYPE_LOCAL,
constants.TYPE_VLAN,
constants.TYPE_NONE]:
LOG.error(_("Invalid tenant_network_type: %s. "
"Service terminated!"),
self.tenant_network_type)
sys.exit(1)
self._setup_rpc()
self.network_scheduler = importutils.import_object(
cfg.CONF.network_scheduler_driver
)
self.router_scheduler = importutils.import_object(
cfg.CONF.router_scheduler_driver
)
LOG.debug(_("Linux Bridge Plugin initialization complete"))
def _setup_rpc(self):
# RPC support
self.topic = topics.PLUGIN
self.conn = rpc.create_connection(new=True)
self.callbacks = LinuxBridgeRpcCallbacks()
self.dispatcher = self.callbacks.create_rpc_dispatcher()
self.conn.create_consumer(self.topic, self.dispatcher,
fanout=False)
# Consume from all consumers in a thread
self.conn.consume_in_thread()
self.notifier = AgentNotifierApi(topics.AGENT)
self.dhcp_agent_notifier = dhcp_rpc_agent_api.DhcpAgentNotifyAPI()
self.l3_agent_notifier = l3_rpc_agent_api.L3AgentNotify
def _parse_network_vlan_ranges(self):
try:
self.network_vlan_ranges = plugin_utils.parse_network_vlan_ranges(
cfg.CONF.VLANS.network_vlan_ranges)
except Exception as ex:
LOG.error(_("%s. Agent terminated!"), ex)
sys.exit(1)
LOG.info(_("Network VLAN ranges: %s"), self.network_vlan_ranges)
def _add_network_vlan_range(self, physical_network, vlan_min, vlan_max):
self._add_network(physical_network)
self.network_vlan_ranges[physical_network].append((vlan_min, vlan_max))
def _add_network(self, physical_network):
if physical_network not in self.network_vlan_ranges:
self.network_vlan_ranges[physical_network] = []
def _extend_network_dict_provider(self, context, network):
binding = db.get_network_binding(context.session, network['id'])
if binding.vlan_id == constants.FLAT_VLAN_ID:
network[provider.NETWORK_TYPE] = constants.TYPE_FLAT
network[provider.PHYSICAL_NETWORK] = binding.physical_network
network[provider.SEGMENTATION_ID] = None
elif binding.vlan_id == constants.LOCAL_VLAN_ID:
network[provider.NETWORK_TYPE] = constants.TYPE_LOCAL
network[provider.PHYSICAL_NETWORK] = None
network[provider.SEGMENTATION_ID] = None
else:
network[provider.NETWORK_TYPE] = constants.TYPE_VLAN
network[provider.PHYSICAL_NETWORK] = binding.physical_network
network[provider.SEGMENTATION_ID] = binding.vlan_id
def _process_provider_create(self, context, attrs):
network_type = attrs.get(provider.NETWORK_TYPE)
physical_network = attrs.get(provider.PHYSICAL_NETWORK)
segmentation_id = attrs.get(provider.SEGMENTATION_ID)
network_type_set = attributes.is_attr_set(network_type)
physical_network_set = attributes.is_attr_set(physical_network)
segmentation_id_set = attributes.is_attr_set(segmentation_id)
if not (network_type_set or physical_network_set or
segmentation_id_set):
return (None, None, None)
if not network_type_set:
msg = _("provider:network_type required")
raise q_exc.InvalidInput(error_message=msg)
elif network_type == constants.TYPE_FLAT:
if segmentation_id_set:
msg = _("provider:segmentation_id specified for flat network")
raise q_exc.InvalidInput(error_message=msg)
else:
segmentation_id = constants.FLAT_VLAN_ID
elif network_type == constants.TYPE_VLAN:
if not segmentation_id_set:
msg = _("provider:segmentation_id required")
raise q_exc.InvalidInput(error_message=msg)
if not utils.is_valid_vlan_tag(segmentation_id):
msg = (_("provider:segmentation_id out of range "
"(%(min_id)s through %(max_id)s)") %
{'min_id': q_const.MIN_VLAN_TAG,
'max_id': q_const.MAX_VLAN_TAG})
raise q_exc.InvalidInput(error_message=msg)
elif network_type == constants.TYPE_LOCAL:
if physical_network_set:
msg = _("provider:physical_network specified for local "
"network")
raise q_exc.InvalidInput(error_message=msg)
else:
physical_network = None
if segmentation_id_set:
msg = _("provider:segmentation_id specified for local "
"network")
raise q_exc.InvalidInput(error_message=msg)
else:
segmentation_id = constants.LOCAL_VLAN_ID
else:
msg = _("provider:network_type %s not supported") % network_type
raise q_exc.InvalidInput(error_message=msg)
if network_type in [constants.TYPE_VLAN, constants.TYPE_FLAT]:
if physical_network_set:
if physical_network not in self.network_vlan_ranges:
msg = (_("Unknown provider:physical_network %s") %
physical_network)
raise q_exc.InvalidInput(error_message=msg)
elif 'default' in self.network_vlan_ranges:
physical_network = 'default'
else:
msg = _("provider:physical_network required")
raise q_exc.InvalidInput(error_message=msg)
return (network_type, physical_network, segmentation_id)
def create_network(self, context, network):
(network_type, physical_network,
vlan_id) = self._process_provider_create(context,
network['network'])
session = context.session
with session.begin(subtransactions=True):
#set up default security groups
tenant_id = self._get_tenant_id_for_create(
context, network['network'])
self._ensure_default_security_group(context, tenant_id)
if not network_type:
# tenant network
network_type = self.tenant_network_type
if network_type == constants.TYPE_NONE:
raise q_exc.TenantNetworksDisabled()
elif network_type == constants.TYPE_VLAN:
physical_network, vlan_id = db.reserve_network(session)
else: # TYPE_LOCAL
vlan_id = constants.LOCAL_VLAN_ID
else:
# provider network
if network_type in [constants.TYPE_VLAN, constants.TYPE_FLAT]:
db.reserve_specific_network(session, physical_network,
vlan_id)
# no reservation needed for TYPE_LOCAL
net = super(LinuxBridgePluginV2, self).create_network(context,
network)
db.add_network_binding(session, net['id'],
physical_network, vlan_id)
self._process_l3_create(context, net, network['network'])
self._extend_network_dict_provider(context, net)
# note - exception will rollback entire transaction
return net
def update_network(self, context, id, network):
provider._raise_if_updates_provider_attributes(network['network'])
session = context.session
with session.begin(subtransactions=True):
net = super(LinuxBridgePluginV2, self).update_network(context, id,
network)
self._process_l3_update(context, net, network['network'])
self._extend_network_dict_provider(context, net)
return net
def delete_network(self, context, id):
session = context.session
with session.begin(subtransactions=True):
binding = db.get_network_binding(session, id)
super(LinuxBridgePluginV2, self).delete_network(context, id)
if binding.vlan_id != constants.LOCAL_VLAN_ID:
db.release_network(session, binding.physical_network,
binding.vlan_id, self.network_vlan_ranges)
# the network_binding record is deleted via cascade from
# the network record, so explicit removal is not necessary
self.notifier.network_delete(context, id)
def get_network(self, context, id, fields=None):
session = context.session
with session.begin(subtransactions=True):
net = super(LinuxBridgePluginV2, self).get_network(context,
id, None)
self._extend_network_dict_provider(context, net)
return self._fields(net, fields)
def get_networks(self, context, filters=None, fields=None,
sorts=None, limit=None, marker=None, page_reverse=False):
session = context.session
with session.begin(subtransactions=True):
nets = super(LinuxBridgePluginV2,
self).get_networks(context, filters, None, sorts,
limit, marker, page_reverse)
for net in nets:
self._extend_network_dict_provider(context, net)
return [self._fields(net, fields) for net in nets]
def create_port(self, context, port):
session = context.session
port_data = port['port']
with session.begin(subtransactions=True):
self._ensure_default_security_group_on_port(context, port)
sgids = self._get_security_groups_on_port(context, port)
# Set port status as 'DOWN'. This will be updated by agent
port['port']['status'] = q_const.PORT_STATUS_DOWN
port = super(LinuxBridgePluginV2,
self).create_port(context, port)
self._process_portbindings_create_and_update(context,
port_data,
port)
self._process_port_create_security_group(
context, port, sgids)
self.notify_security_groups_member_updated(context, port)
return port
def update_port(self, context, id, port):
original_port = self.get_port(context, id)
session = context.session
need_port_update_notify = False
with session.begin(subtransactions=True):
updated_port = super(LinuxBridgePluginV2, self).update_port(
context, id, port)
self._process_portbindings_create_and_update(context,
port['port'],
updated_port)
need_port_update_notify = self.update_security_group_on_port(
context, id, port, original_port, updated_port)
need_port_update_notify |= self.is_security_group_member_updated(
context, original_port, updated_port)
if original_port['admin_state_up'] != updated_port['admin_state_up']:
need_port_update_notify = True
if need_port_update_notify:
self._notify_port_updated(context, updated_port)
return updated_port
def delete_port(self, context, id, l3_port_check=True):
# if needed, check to see if this is a port owned by
# and l3-router. If so, we should prevent deletion.
if l3_port_check:
self.prevent_l3_port_deletion(context, id)
session = context.session
with session.begin(subtransactions=True):
self.disassociate_floatingips(context, id)
port = self.get_port(context, id)
self._delete_port_security_group_bindings(context, id)
super(LinuxBridgePluginV2, self).delete_port(context, id)
self.notify_security_groups_member_updated(context, port)
def _notify_port_updated(self, context, port):
binding = db.get_network_binding(context.session,
port['network_id'])
self.notifier.port_update(context, port,
binding.physical_network,
binding.vlan_id)
|
{
"content_hash": "e33fbc70f44cb4b289d01053098e985a",
"timestamp": "",
"source": "github",
"line_count": 497,
"max_line_length": 79,
"avg_line_length": 45.12877263581489,
"alnum_prop": 0.5812118239778857,
"repo_name": "Brocade-OpenSource/OpenStack-DNRM-Neutron",
"id": "1f6383fc33edf807d685d959b0b6ac7f1fc5108f",
"size": "23020",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "neutron/plugins/linuxbridge/lb_neutron_plugin.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "37307"
},
{
"name": "JavaScript",
"bytes": "67928"
},
{
"name": "Python",
"bytes": "4805111"
},
{
"name": "Shell",
"bytes": "9112"
},
{
"name": "XSLT",
"bytes": "50907"
}
],
"symlink_target": ""
}
|
from beanstream.gateway import Beanstream
from beanstream.billing import CreditCard
from beanstream.process_transaction import Adjustment
from django.conf import settings
from billing import Gateway, GatewayNotConfigured
from billing.gateway import CardNotSupported
from billing.signals import transaction_was_successful, \
transaction_was_unsuccessful
from billing.utils.credit_card import InvalidCard, Visa, \
MasterCard, Discover, AmericanExpress
class BeanstreamGateway(Gateway):
txnurl = "https://www.beanstream.com/scripts/process_transaction.asp"
profileurl = "https://www.beanstream.com/scripts/payment_profile.asp"
display_name = "Beanstream"
# A list of all the valid parameters, and which ones are required.
params = [
("requestType", True), # BACKEND Enter requestType=BACKEND for the recommended server to server integration method. Note that server to server typically cannot be used when hosting forms in the Beanstream Secure Webspace.
("merchant_id", True), # 9-digits Beanstream assigns one merchant ID number for each processing currency. Include the 9-digit Beanstream ID number here. Additional accounts may also have been issued for special services. Complete one full integration for each of the merchant IDs issued.
("trnOrderNumber", False), # but Recommended 30 alphanumeric (a/n) characters Include a unique order reference number if desired. If no number is passed, Beanstream will place the default transaction identification number (trnId) in this field. Custom order numbers will be used in duplicate transaction error checking. Order numbers are also required for Server to Server transaction queries. Integrators that wish to use the query function should pass custom values.
("trnAmount", True), # In the format 0.00. Max 2 decimal places. Max 9 digits total. This is the total dollar value of the purchase. This should represent the total of all taxes, shipping charges and other product/service costs as applicable.
("errorPage", True), # URL (encoded). Max 128 a/n characters. Not for use with server to server integrations. If a standard transaction request contains errors in billing or credit card information, the customer's browser will be re-directed to this page. Error messages will prompt the user to correct their data.
("approvedPage", False), # URL (encoded). Unlimited a/n characters. Beanstream provides default approved or declined transaction pages. For a seamless transaction flow, design unique pages and specify the approved transaction redirection URL here.
("declinedPage", False), # URL (encoded). Unlimited a/n characters. Specify the URL for your custom declined transaction notification page here.
("trnCardOwner", True), #* Max 64 a/n characters This field must contain the full name of the card holder exactly as it appears on their credit card.
("trnCardNumber", True), # Max 20 digits Capture the customer's credit card number.
("trnExpMonth", True), # 2 digits (January = 01) The card expiry month with January as 01 and December as 12.
("trnExpYear", True), # 2 digits (2011=11) Card expiry years must be entered as a number less than 50. In combination, trnExpYear and trnExpMonth must reflect a date in the future.
("trnCardCvd", False), # 4 digits Amex, 3 digits all other cards. Include the three or four-digit CVD number from the back of the customer's credit card. This information may be made mandatory using the "Require CVD" option in the Beanstream Order Settings module.
("ordName", True), #* Max 64 a/n characters. Capture the first and last name of the customer placing the order. This may be different from trnCardOwner.
("ordEmailAddress", True), # Max 64 a/n characters in the format a@b.com. The email address specified here will be used for sending automated email receipts.
("ordPhoneNumber", True), #* Min 7 a/n characters Max 32 a/n characters Collect a customer phone number for order follow-up.
("ordAddress1", True), #* Max 64 a/n characters Collect a unique street address for billing purposes.
("ordAddress2", False), # Max 64 a/n characters An optional variable is available for longer addresses.
("ordCity", True), #* Max 32 a/n characters The customer's billing city.
("ordProvince", True), #* 2 characters Province and state ID codes in this variable must match one of the available province and state codes.
("ordPostalCode", True), #* 16 a/n characters Indicates the customer's postal code for billing purposes.
("ordCountry", True), #* 2 characters Country codes must match one of the available ISO country codes.
("termURL", True), # URL (encoded) Specify the URL where the bank response codes will be collected after enters their VBV or SecureCode pin on the banking portal.
("vbvEnabled", False), # 1 digit When VBV service has been activated, Beanstream will attempt VBV authentication on all transactions. Use this variable to override our default settings and process VBV on selected transactions only. Pass vbvEnabled=1 to enable VBV authentication with an order. Pass vbvEnabled=0 to bypass VBV authentication on specific orders.
("scEnabled", False), # 1 digit When SecureCode service has been activated, Beanstream will attempt SC authentication on all transactions. Use this variable to override our default settings and process SC on selected transactions only. Pass scEnabled=1 to enable SC authentication with an order. Pass scEnabled=0 to bypass SC authentication on specific orders.
("SecureXID", True), # 20 digits Include the 3D secure transaction identifier as issued by the bank following VBV or SecureCode authentication.
("SecureECI", True), # 1 digit Provide the ECI status. 5=transaction authenticated. 6= authentication attempted but not completed.
("SecireCAVV", True), # 40 a/n characters Include the cardholder authentication verification value as issued by the bank.
]
def __init__(self, *args, **kwargs):
merchant_settings = getattr(settings, "MERCHANT_SETTINGS")
if not merchant_settings or not merchant_settings.get("beanstream"):
raise GatewayNotConfigured("The '%s' gateway is not correctly "
"configured." % self.display_name)
beanstream_settings = merchant_settings["beanstream"]
self.supported_cardtypes = [Visa, MasterCard, AmericanExpress, Discover]
hash_validation = False
if kwargs.get("hash_algorithm", beanstream_settings.get("HASH_ALGORITHM", None)):
hash_validation = True
self.beangw = Beanstream(
hash_validation=hash_validation,
require_billing_address=kwargs.get("require_billing_address", False),
require_cvd=kwargs.get("require_cvd", False))
merchant_id = kwargs.pop("merchant_id", beanstream_settings["MERCHANT_ID"])
login_company = kwargs.pop("login_company", beanstream_settings["LOGIN_COMPANY"])
login_user = kwargs.pop("login_user", beanstream_settings["LOGIN_USER"])
login_password = kwargs.pop("login_password", beanstream_settings["LOGIN_PASSWORD"])
kwargs["payment_profile_passcode"] = beanstream_settings.get("PAYMENT_PROFILE_PASSCODE", None)
if hash_validation:
if not kwargs.get("hash_algorithm"):
kwargs["hash_algorithm"] = beanstream_settings["HASH_ALGORITHM"]
if not kwargs.get("hashcode"):
kwargs["hashcode"] = beanstream_settings["HASHCODE"]
self.beangw.configure(
merchant_id,
login_company,
login_user,
login_password,
**kwargs)
def convert_cc(self, credit_card, validate=True):
"""Convert merchant.billing.utils.CreditCard to beanstream.billing.CreditCard"""
card = CreditCard(
credit_card.first_name + " " + credit_card.last_name,
credit_card.number,
credit_card.month, credit_card.year,
credit_card.verification_value)
if validate:
self.validate_card(card)
return card
def _parse_resp(self, resp):
status = "FAILURE"
response = resp
if resp.approved():
status = "SUCCESS"
return {"status": status, "response": response}
def purchase(self, money, credit_card, options=None):
"""One go authorize and capture transaction"""
options = options or {}
txn = None
order_number = options.get("order_number") if options else None
if credit_card:
card = self.convert_cc(credit_card)
txn = self.beangw.purchase(money, card, None, order_number)
billing_address = options.get("billing_address")
if billing_address:
txn.params.update({"ordName": billing_address["name"],
"ordEmailAddress": billing_address["email"],
"ordPhoneNumber": billing_address["phone"],
"ordAddress1": billing_address["address1"],
"ordAddress2": billing_address.get("address2", ""),
"ordCity": billing_address["city"],
"ordProvince": billing_address["state"],
"ordCountry": billing_address["country"]})
elif options.get("customer_code"):
customer_code = options.get("customer_code", None)
txn = self.beangw.purchase_with_payment_profile(money, customer_code, order_number)
txn.validate()
resp = self._parse_resp(txn.commit())
if resp["status"] == "SUCCESS":
transaction_was_successful.send(sender=self,
type="purchase",
response=resp["response"])
else:
transaction_was_unsuccessful.send(sender=self,
type="purchase",
response=resp["response"])
return resp
def authorize(self, money, credit_card, options=None):
"""Authorization for a future capture transaction"""
# TODO: Need to add check for trnAmount
# For Beanstream Canada and TD Visa & MasterCard merchant accounts this value may be $0 or $1 or more.
# For all other scenarios, this value must be $0.50 or greater.
options = options or {}
order_number = options.get("order_number") if options else None
card = self.convert_cc(credit_card)
txn = self.beangw.preauth(money, card, None, order_number)
billing_address = options.get("billing_address")
if billing_address:
txn.params.update({"ordName": billing_address["name"],
"ordEmailAddress": billing_address["email"],
"ordPhoneNumber": billing_address["phone"],
"ordAddress1": billing_address["address1"],
"ordAddress2": billing_address.get("address2", ""),
"ordCity": billing_address["city"],
"ordProvince": billing_address["state"],
"ordCountry": billing_address["country"]})
if options and "order_number" in options:
txn.order_number = options.get("order_number");
txn.validate()
resp = self._parse_resp(txn.commit())
if resp["status"] == "SUCCESS":
transaction_was_successful.send(sender=self,
type="authorize",
response=resp["response"])
else:
transaction_was_unsuccessful.send(sender=self,
type="authorize",
response=resp["response"])
return resp
def unauthorize(self, money, authorization, options=None):
"""Cancel a previously authorized transaction"""
txn = Adjustment(self.beangw, Adjustment.PREAUTH_COMPLETION, authorization, money)
resp = self._parse_resp(txn.commit())
if resp["status"] == "SUCCESS":
transaction_was_successful.send(sender=self,
type="unauthorize",
response=resp["response"])
else:
transaction_was_unsuccessful.send(sender=self,
type="unauthorize",
response=resp["response"])
return resp
def capture(self, money, authorization, options=None):
"""Capture funds from a previously authorized transaction"""
order_number = options.get("order_number") if options else None
txn = self.beangw.preauth_completion(authorization, money, order_number)
resp = self._parse_resp(txn.commit())
if resp["status"] == "SUCCESS":
transaction_was_successful.send(sender=self,
type="capture",
response=resp["response"])
else:
transaction_was_unsuccessful.send(sender=self,
type="capture",
response=resp["response"])
return resp
def void(self, identification, options=None):
"""Null/Blank/Delete a previous transaction"""
"""Right now this only handles VOID_PURCHASE"""
txn = self.beangw.void_purchase(identification["txnid"], identification["amount"])
resp = self._parse_resp(txn.commit())
if resp["status"] == "SUCCESS":
transaction_was_successful.send(sender=self,
type="void",
response=resp["response"])
else:
transaction_was_unsuccessful.send(sender=self,
type="void",
response=resp["response"])
return resp
def credit(self, money, identification, options=None):
"""Refund a previously 'settled' transaction"""
order_number = options.get("order_number") if options else None
txn = self.beangw.return_purchase(identification, money, order_number)
resp = self._parse_resp(txn.commit())
if resp["status"] == "SUCCESS":
transaction_was_successful.send(sender=self,
type="credit",
response=resp["response"])
else:
transaction_was_unsuccessful.send(sender=self,
type="credit",
response=resp["response"])
return resp
def recurring(self, money, creditcard, options=None):
"""Setup a recurring transaction"""
card = self.convert_cc(creditcard)
frequency_period = options['frequency_period']
frequency_increment = options['frequency_increment']
billing_address = options.get('billing_address', None) # must be a beanstream.billing.Address instance
txn = self.beangw.create_recurring_billing_account(
money, card, frequency_period, frequency_increment, billing_address)
resp = self._parse_resp(txn.commit())
if resp["status"] == "SUCCESS":
transaction_was_successful.send(sender=self,
type="recurring",
response=resp["response"])
else:
transaction_was_unsuccessful.send(sender=self,
type="recurring",
response=resp["response"])
return resp
def store(self, credit_card, options=None):
"""Store the credit card and user profile information
on the gateway for future use"""
card = self.convert_cc(credit_card)
billing_address = options.get("billing_address")
txn = self.beangw.create_payment_profile(card, billing_address)
resp = txn.commit()
status = "FAILURE"
response = None
if resp.approved() or resp.resp["responseCode"] == ["17"]:
status = "SUCCESS"
else:
response = resp
if status == "SUCCESS":
transaction_was_successful.send(sender=self,
type="recurring",
response=response)
else:
transaction_was_unsuccessful.send(sender=self,
type="recurring",
response=response)
return {"status": status, "response": response}
def unstore(self, identification, options=None):
"""Delete the previously stored credit card and user
profile information on the gateway"""
raise NotImplementedError
|
{
"content_hash": "ea5d4782851e1f3e50d55ac92cfafa3e",
"timestamp": "",
"source": "github",
"line_count": 289,
"max_line_length": 476,
"avg_line_length": 60.21107266435986,
"alnum_prop": 0.6094477328889144,
"repo_name": "agiliq/merchant",
"id": "95f2ded52653704fd443300f28449d5184e060be",
"size": "17401",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "billing/gateways/beanstream_gateway.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "22046"
},
{
"name": "Makefile",
"bytes": "409"
},
{
"name": "Python",
"bytes": "419711"
}
],
"symlink_target": ""
}
|
"""
Django settings for ob_census project.
Generated by 'django-admin startproject' using Django 1.8.4.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'w58)8zhyk$h8$f!tau@b3k0utr4az1-1n(q_tnl%4f%cow$ofy'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'census',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'ob_census_project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'ob_census_project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
|
{
"content_hash": "c6c8018904e182a35a93834c9923c7e4",
"timestamp": "",
"source": "github",
"line_count": 103,
"max_line_length": 71,
"avg_line_length": 26.33009708737864,
"alnum_prop": 0.6924778761061947,
"repo_name": "escobar022/cens_django",
"id": "42ad3618a4d589bd5f1327f8ce1f6b55a183840a",
"size": "2712",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ob_census_project/settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1237"
},
{
"name": "HTML",
"bytes": "7036"
},
{
"name": "JavaScript",
"bytes": "2182"
},
{
"name": "Python",
"bytes": "14384"
}
],
"symlink_target": ""
}
|
from twilio.rest import Client
# required for all twilio access tokens
account_sid = 'ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX'
auth_token = 'your_auth_token'
client = Client(account_sid, auth_token)
ratePlans = client.wireless.rate_plans.list()
print(ratePlans)
|
{
"content_hash": "c38bb82ae2366eb7c0442f4435baf080",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 50,
"avg_line_length": 23.727272727272727,
"alnum_prop": 0.7854406130268199,
"repo_name": "teoreteetik/api-snippets",
"id": "fdde021df2e6fa4612403eaf26be13bd179a37de",
"size": "334",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "wireless/rateplans/list-example-1/list-example-1.6.x.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C#",
"bytes": "643369"
},
{
"name": "HTML",
"bytes": "335"
},
{
"name": "Java",
"bytes": "943336"
},
{
"name": "JavaScript",
"bytes": "539577"
},
{
"name": "M",
"bytes": "117"
},
{
"name": "Mathematica",
"bytes": "93"
},
{
"name": "Objective-C",
"bytes": "46198"
},
{
"name": "PHP",
"bytes": "538312"
},
{
"name": "Python",
"bytes": "467248"
},
{
"name": "Ruby",
"bytes": "470316"
},
{
"name": "Shell",
"bytes": "1564"
},
{
"name": "Swift",
"bytes": "36563"
}
],
"symlink_target": ""
}
|
from djblets.util.decorators import augment_method_from
from reviewboard.webapi.decorators import webapi_check_local_site
from reviewboard.webapi.resources import resources
from reviewboard.webapi.resources.base_watched_object import \
BaseWatchedObjectResource
class WatchedReviewGroupResource(BaseWatchedObjectResource):
"""Lists and manipulates entries for review groups watched by the user.
These are groups that the user has starred in their Dashboard.
This resource can be used for listing existing review groups and adding
new review groups to watch.
Each item in the resource is an association between the user and the
review group. The entries in the list are not the review groups themselves,
but rather an entry that represents this association by listing the
association's ID (which can be used for removing the association) and
linking to the review group.
"""
name = 'watched_review_group'
uri_name = 'review-groups'
profile_field = 'starred_groups'
star_function = 'star_review_group'
unstar_function = 'unstar_review_group'
@property
def watched_resource(self):
"""Return the watched resource.
This is implemented as a property in order to work around
a circular reference issue.
"""
return resources.review_group
@webapi_check_local_site
@augment_method_from(BaseWatchedObjectResource)
def get(self, *args, **kwargs):
"""Redirects to the review group being watched.
Rather than returning a body with the entry, performing an HTTP GET
on this resource will redirect the client to the actual review group
being watched.
Clients must properly handle :http:`302` and expect this redirect
to happen.
"""
pass
@webapi_check_local_site
@augment_method_from(BaseWatchedObjectResource)
def get_list(self, *args, **kwargs):
"""Retrieves the list of watched review groups.
Each entry in the list consists of a numeric ID that represents the
entry for the watched review group. This is not necessarily the ID
of the review group itself. It's used for looking up the resource
of the watched item so that it can be removed.
"""
pass
@webapi_check_local_site
@augment_method_from(BaseWatchedObjectResource)
def create(self, *args, **kwargs):
"""Marks a review group as being watched.
The ID of the review group must be passed as ``object_id``, and will
store that review group in the list.
"""
pass
@webapi_check_local_site
@augment_method_from(BaseWatchedObjectResource)
def delete(self, *args, **kwargs):
"""Deletes a watched review group entry.
This is the same effect as unstarring a review group. It does
not actually delete the review group, just the entry in the list.
"""
pass
watched_review_group_resource = WatchedReviewGroupResource()
|
{
"content_hash": "d78aabce56d835eeba33bb299d264ab8",
"timestamp": "",
"source": "github",
"line_count": 84,
"max_line_length": 79,
"avg_line_length": 36.023809523809526,
"alnum_prop": 0.6956378056840714,
"repo_name": "reviewboard/reviewboard",
"id": "c0deb14a2de8e72e324429bce985b8634bd3e05f",
"size": "3026",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "reviewboard/webapi/resources/watched_review_group.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "10167"
},
{
"name": "Dockerfile",
"bytes": "7721"
},
{
"name": "HTML",
"bytes": "226489"
},
{
"name": "JavaScript",
"bytes": "3991608"
},
{
"name": "Less",
"bytes": "438017"
},
{
"name": "Python",
"bytes": "9186415"
},
{
"name": "Shell",
"bytes": "3855"
}
],
"symlink_target": ""
}
|
from fabric.api import *
from fabric.contrib import *
|
{
"content_hash": "7b3741758da7135d5b29975cc44dbcd7",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 28,
"avg_line_length": 26.5,
"alnum_prop": 0.7924528301886793,
"repo_name": "BukGet/devfiles",
"id": "1e3dc798bdb1698c4643830b17620b44d1e08c17",
"size": "53",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fabfile/networking.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "28613"
}
],
"symlink_target": ""
}
|
"""
Implements test cases for the file system factory.
"""
import decimal
import re
import unittest
from datafinder.persistence.common.base_factory import BaseFileSystem
from datafinder.persistence.error import PersistenceError
from datafinder.persistence.factory import FileSystem
from datafinder_test.mocks import SimpleMock
__version__ = "$Revision-Id:$"
_UNSUPPORTED_URI_SCHEME = "unknown"
_VALID_URI_SCHEME = "valid"
_VALID_PRINCIPAL_SEARCH_SCHEME = "ldap"
_VALID_SEARCH_SCHEME = "lucene+http"
class _ConcreteFactoryMock(BaseFileSystem):
def __init__(self, _):
BaseFileSystem.__init__(self)
def createDataStorer(self, _):
mock = SimpleMock(True)
mock.identifier = "id"
return mock
def createMetadataStorer(self, _):
mock = SimpleMock()
mock.identifier = "id"
return mock
def createPrincipalSearcher(self):
class PrincipalSearcherMock(object):
def searchPrincipal(self, _, __):
return list()
return PrincipalSearcherMock()
def createPrivilegeStorer(self, _):
mock = SimpleMock()
mock.identifier = "id"
return mock
@property
def hasCustomMetadataSupport(self):
return True
@property
def hasMetadataSearchSupport(self):
return True
@property
def hasPrivilegeSupport(self):
return True
def determineFreeDiskSpace(self):
return decimal.Decimal("200")
@property
def metadataIdentifierPattern(self):
return re.compile(".")
@property
def identifierPattern(self):
return re.compile(".")
class _ConcretePrincipalSearcherFactoryMock(BaseFileSystem):
def __init__(self, _):
BaseFileSystem.__init__(self)
def createPrincipalSearcher(self):
class PrincipalSearcherMock(object):
def searchPrincipal(self, _, __):
return ["", ""]
return PrincipalSearcherMock()
class _ConcreteSearcherFactoryMock(BaseFileSystem):
def __init__(self, _):
BaseFileSystem.__init__(self)
def createSearcher(self):
class SearcherMock(object):
def search(self, _, __):
return ["", ""]
return SearcherMock()
def _createFactoryMock(_, uriScheme, configuration):
if uriScheme == _UNSUPPORTED_URI_SCHEME:
raise PersistenceError("")
elif uriScheme == _VALID_PRINCIPAL_SEARCH_SCHEME:
return _ConcretePrincipalSearcherFactoryMock(configuration)
elif uriScheme == _VALID_SEARCH_SCHEME:
return _ConcreteSearcherFactoryMock(configuration)
else:
return _ConcreteFactoryMock(configuration)
class FileSystemTestCase(unittest.TestCase):
""" Test cases for the file system factory. """
def setUp(self):
FileSystem._createFactory = _createFactoryMock
def testNullFactory(self):
nullFileSystem = FileSystem()
self.assertEquals(nullFileSystem.searchPrincipal("pattern", "searchMode"), list())
self.assertTrue(not nullFileSystem.createFileStorer("identifier") is None)
self.assertFalse(nullFileSystem.hasCustomMetadataSupport)
self.assertFalse(nullFileSystem.hasMetadataSearchSupport)
self.assertFalse(nullFileSystem.hasPrivilegeSupport)
self.assertTrue(nullFileSystem.determineFreeDiskSpace() > 1)
self.assertEquals(nullFileSystem.baseUri, None)
self.assertEquals(nullFileSystem.baseConfiguration, None)
self.assertEquals(nullFileSystem.isAccessible, False)
nullFileSystem.updateCredentials(dict())
nullFileSystem.updatePrincipalSearchCredentials(dict())
nullFileSystem.release()
def testInvalidInterfaceType(self):
baseConf = SimpleMock()
baseConf.uriScheme = _UNSUPPORTED_URI_SCHEME
self.assertRaises(PersistenceError, FileSystem, baseConf)
def testValidInterfaceType(self):
baseConf = SimpleMock()
baseConf.uriScheme = _VALID_URI_SCHEME
fileSystem = FileSystem(baseConf)
fileStorer = fileSystem.createFileStorer("identifier")
self.assertFalse(fileStorer is None)
self.assertFalse(fileStorer.dataStorer is None)
self.assertFalse(fileStorer.metadataStorer is None)
self.assertFalse(fileStorer.privilegeStorer is None)
self.assertTrue(fileSystem.hasCustomMetadataSupport)
self.assertTrue(fileSystem.hasMetadataSearchSupport)
self.assertTrue(fileSystem.hasPrivilegeSupport)
self.assertEquals(fileSystem.determineFreeDiskSpace(), 200)
self.assertNotEquals(fileSystem.baseUri, None)
self.assertNotEquals(fileSystem.baseConfiguration, None)
self.assertEquals(fileSystem.isAccessible, True)
self.assertEquals(len(fileSystem.searchPrincipal("pattern", "searchMode")), 0)
fileSystem.updateCredentials(dict())
fileSystem.updatePrincipalSearchCredentials(dict())
fileSystem.release()
def testDifferentPrincipalSearch(self):
baseConf = SimpleMock()
baseConf.uriScheme = _VALID_URI_SCHEME
principalSearchBaseConf = SimpleMock()
principalSearchBaseConf.uriScheme = _VALID_PRINCIPAL_SEARCH_SCHEME
fileSystem = FileSystem(baseConf, principalSearchBaseConf)
self.assertEquals(len(fileSystem.searchPrincipal("pattern", "searchMode")), 2)
def testDifferentPrincipalSearchFallback(self):
baseConf = SimpleMock()
baseConf.uriScheme = _VALID_URI_SCHEME
principalSearchBaseConf = SimpleMock()
principalSearchBaseConf.uriScheme = "invalid_principal_scheme"
fileSystem = FileSystem(baseConf, principalSearchBaseConf)
self.assertEquals(len(fileSystem.searchPrincipal("pattern", "searchMode")), 0)
def testDifferentSearch(self):
baseConf = SimpleMock()
baseConf.uriScheme = _VALID_URI_SCHEME
searchBaseConf = SimpleMock()
searchBaseConf.uriScheme = _VALID_SEARCH_SCHEME
fileSystem = FileSystem(baseConf, baseSearchConfiguration=searchBaseConf)
self.assertEquals(len(fileSystem.search("*", "/")), 2)
def testDifferentSearchFallback(self):
baseConf = SimpleMock()
baseConf.uriScheme = _VALID_URI_SCHEME
searchBaseConf = SimpleMock()
searchBaseConf.uriScheme = "invalid_search_interface"
fileSystem = FileSystem(baseConf, baseSearchConfiguration=searchBaseConf)
self.assertEquals(len(fileSystem.search("*", "/")), 0)
|
{
"content_hash": "22d3b8034ade846a4228ff9027032985",
"timestamp": "",
"source": "github",
"line_count": 195,
"max_line_length": 90,
"avg_line_length": 35.14358974358974,
"alnum_prop": 0.6579600175105793,
"repo_name": "DLR-SC/DataFinder",
"id": "9d471fa9fd526edb7e56269c671d865324d1328e",
"size": "8593",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/unittest/datafinder_test/persistence/factory_test.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "NSIS",
"bytes": "7649"
},
{
"name": "Python",
"bytes": "7056802"
},
{
"name": "QMake",
"bytes": "1975"
}
],
"symlink_target": ""
}
|
"""private module containing functions used to convert database
rows into object instances and associated state.
the functions here are called primarily by Query, Mapper,
as well as some of the attribute loading strategies.
"""
from .. import util
from . import attributes, exc as orm_exc, state as statelib
from .interfaces import EXT_CONTINUE
from ..sql import util as sql_util
from .util import _none_set, state_str
from .. import exc as sa_exc
_new_runid = util.counter()
def instances(query, cursor, context):
"""Return an ORM result as an iterator."""
session = query.session
context.runid = _new_runid()
filter_fns = [ent.filter_fn
for ent in query._entities]
filtered = id in filter_fns
single_entity = len(query._entities) == 1 and \
query._entities[0].supports_single_entity
if filtered:
if single_entity:
filter_fn = id
else:
def filter_fn(row):
return tuple(fn(x) for x, fn in zip(row, filter_fns))
custom_rows = single_entity and \
query._entities[0].custom_rows
(process, labels) = \
list(zip(*[
query_entity.row_processor(query,
context, custom_rows)
for query_entity in query._entities
]))
while True:
context.progress = {}
context.partials = {}
if query._yield_per:
fetch = cursor.fetchmany(query._yield_per)
if not fetch:
break
else:
fetch = cursor.fetchall()
if custom_rows:
rows = []
for row in fetch:
process[0](row, rows)
elif single_entity:
rows = [process[0](row, None) for row in fetch]
else:
rows = [util.KeyedTuple([proc(row, None) for proc in process],
labels) for row in fetch]
if filtered:
rows = util.unique_list(rows, filter_fn)
if context.refresh_state and query._only_load_props \
and context.refresh_state in context.progress:
context.refresh_state._commit(
context.refresh_state.dict, query._only_load_props)
context.progress.pop(context.refresh_state)
statelib.InstanceState._commit_all_states(
list(context.progress.items()),
session.identity_map
)
for state, (dict_, attrs) in context.partials.items():
state._commit(dict_, attrs)
for row in rows:
yield row
if not query._yield_per:
break
@util.dependencies("sqlalchemy.orm.query")
def merge_result(querylib, query, iterator, load=True):
"""Merge a result into this :class:`.Query` object's Session."""
session = query.session
if load:
# flush current contents if we expect to load data
session._autoflush()
autoflush = session.autoflush
try:
session.autoflush = False
single_entity = len(query._entities) == 1
if single_entity:
if isinstance(query._entities[0], querylib._MapperEntity):
result = [session._merge(
attributes.instance_state(instance),
attributes.instance_dict(instance),
load=load, _recursive={})
for instance in iterator]
else:
result = list(iterator)
else:
mapped_entities = [i for i, e in enumerate(query._entities)
if isinstance(e, querylib._MapperEntity)]
result = []
keys = [ent._label_name for ent in query._entities]
for row in iterator:
newrow = list(row)
for i in mapped_entities:
if newrow[i] is not None:
newrow[i] = session._merge(
attributes.instance_state(newrow[i]),
attributes.instance_dict(newrow[i]),
load=load, _recursive={})
result.append(util.KeyedTuple(newrow, keys))
return iter(result)
finally:
session.autoflush = autoflush
def get_from_identity(session, key, passive):
"""Look up the given key in the given session's identity map,
check the object for expired state if found.
"""
instance = session.identity_map.get(key)
if instance is not None:
state = attributes.instance_state(instance)
# expired - ensure it still exists
if state.expired:
if not passive & attributes.SQL_OK:
# TODO: no coverage here
return attributes.PASSIVE_NO_RESULT
elif not passive & attributes.RELATED_OBJECT_OK:
# this mode is used within a flush and the instance's
# expired state will be checked soon enough, if necessary
return instance
try:
state(state, passive)
except orm_exc.ObjectDeletedError:
session._remove_newly_deleted([state])
return None
return instance
else:
return None
def load_on_ident(query, key,
refresh_state=None, lockmode=None,
only_load_props=None):
"""Load the given identity key from the database."""
if key is not None:
ident = key[1]
else:
ident = None
if refresh_state is None:
q = query._clone()
q._get_condition()
else:
q = query._clone()
if ident is not None:
mapper = query._mapper_zero()
(_get_clause, _get_params) = mapper._get_clause
# None present in ident - turn those comparisons
# into "IS NULL"
if None in ident:
nones = set([
_get_params[col].key for col, value in
zip(mapper.primary_key, ident) if value is None
])
_get_clause = sql_util.adapt_criterion_to_null(
_get_clause, nones)
_get_clause = q._adapt_clause(_get_clause, True, False)
q._criterion = _get_clause
params = dict([
(_get_params[primary_key].key, id_val)
for id_val, primary_key in zip(ident, mapper.primary_key)
])
q._params = params
if lockmode is not None:
version_check = True
q = q.with_lockmode(lockmode)
elif query._for_update_arg is not None:
version_check = True
q._for_update_arg = query._for_update_arg
else:
version_check = False
q._get_options(
populate_existing=bool(refresh_state),
version_check=version_check,
only_load_props=only_load_props,
refresh_state=refresh_state)
q._order_by = None
try:
return q.one()
except orm_exc.NoResultFound:
return None
def instance_processor(mapper, context, path, adapter,
polymorphic_from=None,
only_load_props=None,
refresh_state=None,
polymorphic_discriminator=None):
"""Produce a mapper level row processor callable
which processes rows into mapped instances."""
# note that this method, most of which exists in a closure
# called _instance(), resists being broken out, as
# attempts to do so tend to add significant function
# call overhead. _instance() is the most
# performance-critical section in the whole ORM.
pk_cols = mapper.primary_key
if polymorphic_from or refresh_state:
polymorphic_on = None
else:
if polymorphic_discriminator is not None:
polymorphic_on = polymorphic_discriminator
else:
polymorphic_on = mapper.polymorphic_on
polymorphic_instances = util.PopulateDict(
_configure_subclass_mapper(
mapper,
context, path, adapter)
)
version_id_col = mapper.version_id_col
if adapter:
pk_cols = [adapter.columns[c] for c in pk_cols]
if polymorphic_on is not None:
polymorphic_on = adapter.columns[polymorphic_on]
if version_id_col is not None:
version_id_col = adapter.columns[version_id_col]
identity_class = mapper._identity_class
new_populators = []
existing_populators = []
eager_populators = []
load_path = context.query._current_path + path \
if context.query._current_path.path \
else path
def populate_state(state, dict_, row, isnew, only_load_props):
if isnew:
if context.propagate_options:
state.load_options = context.propagate_options
if state.load_options:
state.load_path = load_path
if not new_populators:
_populators(mapper, context, path, row, adapter,
new_populators,
existing_populators,
eager_populators
)
if isnew:
populators = new_populators
else:
populators = existing_populators
if only_load_props is None:
for key, populator in populators:
populator(state, dict_, row)
elif only_load_props:
for key, populator in populators:
if key in only_load_props:
populator(state, dict_, row)
session_identity_map = context.session.identity_map
listeners = mapper.dispatch
translate_row = listeners.translate_row or None
create_instance = listeners.create_instance or None
populate_instance = listeners.populate_instance or None
append_result = listeners.append_result or None
populate_existing = context.populate_existing or mapper.always_refresh
invoke_all_eagers = context.invoke_all_eagers
if mapper.allow_partial_pks:
is_not_primary_key = _none_set.issuperset
else:
is_not_primary_key = _none_set.issubset
def _instance(row, result):
if not new_populators and invoke_all_eagers:
_populators(mapper, context, path, row, adapter,
new_populators,
existing_populators,
eager_populators
)
if translate_row:
for fn in translate_row:
ret = fn(mapper, context, row)
if ret is not EXT_CONTINUE:
row = ret
break
if polymorphic_on is not None:
discriminator = row[polymorphic_on]
if discriminator is not None:
_instance = polymorphic_instances[discriminator]
if _instance:
return _instance(row, result)
# determine identity key
if refresh_state:
identitykey = refresh_state.key
if identitykey is None:
# super-rare condition; a refresh is being called
# on a non-instance-key instance; this is meant to only
# occur within a flush()
identitykey = mapper._identity_key_from_state(refresh_state)
else:
identitykey = (
identity_class,
tuple([row[column] for column in pk_cols])
)
instance = session_identity_map.get(identitykey)
if instance is not None:
state = attributes.instance_state(instance)
dict_ = attributes.instance_dict(instance)
isnew = state.runid != context.runid
currentload = not isnew
loaded_instance = False
if not currentload and \
version_id_col is not None and \
context.version_check and \
mapper._get_state_attr_by_column(
state,
dict_,
mapper.version_id_col) != \
row[version_id_col]:
raise orm_exc.StaleDataError(
"Instance '%s' has version id '%s' which "
"does not match database-loaded version id '%s'."
% (state_str(state),
mapper._get_state_attr_by_column(
state, dict_,
mapper.version_id_col),
row[version_id_col]))
elif refresh_state:
# out of band refresh_state detected (i.e. its not in the
# session.identity_map) honor it anyway. this can happen
# if a _get() occurs within save_obj(), such as
# when eager_defaults is True.
state = refresh_state
instance = state.obj()
dict_ = attributes.instance_dict(instance)
isnew = state.runid != context.runid
currentload = True
loaded_instance = False
else:
# check for non-NULL values in the primary key columns,
# else no entity is returned for the row
if is_not_primary_key(identitykey[1]):
return None
isnew = True
currentload = True
loaded_instance = True
if create_instance:
for fn in create_instance:
instance = fn(mapper, context,
row, mapper.class_)
if instance is not EXT_CONTINUE:
manager = attributes.manager_of_class(
instance.__class__)
# TODO: if manager is None, raise a friendly error
# about returning instances of unmapped types
manager.setup_instance(instance)
break
else:
instance = mapper.class_manager.new_instance()
else:
instance = mapper.class_manager.new_instance()
dict_ = attributes.instance_dict(instance)
state = attributes.instance_state(instance)
state.key = identitykey
# attach instance to session.
state.session_id = context.session.hash_key
session_identity_map.add(state)
if currentload or populate_existing:
# state is being fully loaded, so populate.
# add to the "context.progress" collection.
if isnew:
state.runid = context.runid
context.progress[state] = dict_
if populate_instance:
for fn in populate_instance:
ret = fn(mapper, context, row, state,
only_load_props=only_load_props,
instancekey=identitykey, isnew=isnew)
if ret is not EXT_CONTINUE:
break
else:
populate_state(state, dict_, row, isnew, only_load_props)
else:
populate_state(state, dict_, row, isnew, only_load_props)
if loaded_instance:
state.manager.dispatch.load(state, context)
elif isnew:
state.manager.dispatch.refresh(state, context, only_load_props)
elif state in context.partials or state.unloaded or eager_populators:
# state is having a partial set of its attributes
# refreshed. Populate those attributes,
# and add to the "context.partials" collection.
if state in context.partials:
isnew = False
(d_, attrs) = context.partials[state]
else:
isnew = True
attrs = state.unloaded
context.partials[state] = (dict_, attrs)
if populate_instance:
for fn in populate_instance:
ret = fn(mapper, context, row, state,
only_load_props=attrs,
instancekey=identitykey, isnew=isnew)
if ret is not EXT_CONTINUE:
break
else:
populate_state(state, dict_, row, isnew, attrs)
else:
populate_state(state, dict_, row, isnew, attrs)
for key, pop in eager_populators:
if key not in state.unloaded:
pop(state, dict_, row)
if isnew:
state.manager.dispatch.refresh(state, context, attrs)
if result is not None:
if append_result:
for fn in append_result:
if fn(mapper, context, row, state,
result, instancekey=identitykey,
isnew=isnew) is not EXT_CONTINUE:
break
else:
result.append(instance)
else:
result.append(instance)
return instance
return _instance
def _populators(mapper, context, path, row, adapter,
new_populators, existing_populators, eager_populators):
"""Produce a collection of attribute level row processor
callables."""
delayed_populators = []
pops = (new_populators, existing_populators, delayed_populators,
eager_populators)
for prop in mapper._props.values():
for i, pop in enumerate(prop.create_row_processor(
context,
path,
mapper, row, adapter)):
if pop is not None:
pops[i].append((prop.key, pop))
if delayed_populators:
new_populators.extend(delayed_populators)
def _configure_subclass_mapper(mapper, context, path, adapter):
"""Produce a mapper level row processor callable factory for mappers
inheriting this one."""
def configure_subclass_mapper(discriminator):
try:
sub_mapper = mapper.polymorphic_map[discriminator]
except KeyError:
raise AssertionError(
"No such polymorphic_identity %r is defined" %
discriminator)
if sub_mapper is mapper:
return None
return instance_processor(
sub_mapper,
context,
path,
adapter,
polymorphic_from=mapper)
return configure_subclass_mapper
def load_scalar_attributes(mapper, state, attribute_names):
"""initiate a column-based attribute refresh operation."""
#assert mapper is _state_mapper(state)
session = state.session
if not session:
raise orm_exc.DetachedInstanceError(
"Instance %s is not bound to a Session; "
"attribute refresh operation cannot proceed" %
(state_str(state)))
has_key = bool(state.key)
result = False
if mapper.inherits and not mapper.concrete:
statement = mapper._optimized_get_statement(state, attribute_names)
if statement is not None:
result = load_on_ident(
session.query(mapper).from_statement(statement),
None,
only_load_props=attribute_names,
refresh_state=state
)
if result is False:
if has_key:
identity_key = state.key
else:
# this codepath is rare - only valid when inside a flush, and the
# object is becoming persistent but hasn't yet been assigned
# an identity_key.
# check here to ensure we have the attrs we need.
pk_attrs = [mapper._columntoproperty[col].key
for col in mapper.primary_key]
if state.expired_attributes.intersection(pk_attrs):
raise sa_exc.InvalidRequestError(
"Instance %s cannot be refreshed - it's not "
" persistent and does not "
"contain a full primary key." % state_str(state))
identity_key = mapper._identity_key_from_state(state)
if (_none_set.issubset(identity_key) and \
not mapper.allow_partial_pks) or \
_none_set.issuperset(identity_key):
util.warn("Instance %s to be refreshed doesn't "
"contain a full primary key - can't be refreshed "
"(and shouldn't be expired, either)."
% state_str(state))
return
result = load_on_ident(
session.query(mapper),
identity_key,
refresh_state=state,
only_load_props=attribute_names)
# if instance is pending, a refresh operation
# may not complete (even if PK attributes are assigned)
if has_key and result is None:
raise orm_exc.ObjectDeletedError(state)
|
{
"content_hash": "85d86fe2eed58543b0073b2533461c75",
"timestamp": "",
"source": "github",
"line_count": 604,
"max_line_length": 79,
"avg_line_length": 35.908940397350996,
"alnum_prop": 0.5275946332242151,
"repo_name": "FRC-Team-3140/north-american-happiness",
"id": "af77fe3e0598371af4fa90ac7cd945ad179eacfc",
"size": "21922",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "lib/python2.7/site-packages/sqlalchemy/orm/loading.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "5939"
},
{
"name": "CSS",
"bytes": "6472"
},
{
"name": "JavaScript",
"bytes": "6648"
},
{
"name": "Python",
"bytes": "6901716"
},
{
"name": "Shell",
"bytes": "3801"
}
],
"symlink_target": ""
}
|
"""The module contains the classes and methods that are used to
execute jobs and handle streams of data between jobs.
.. note::
Usually you do not have to interact with this module directly. The
:py:func:`jip.jobs.run` method deals with the construction of the pipe
graphs for you.
A job group is a set of jobs that have to be executed together because data is
piped between the jobs. We call set of jobs and their dependencies a `dispatch
graph`. These graphs are created using this module. With such a graph the
following scenarios can be resolved.
**Single jobs:**
A dispatch graph can consist of a single node that wraps a single job
without any dependencies. In such case no pipelining and no redirection
will happen.
**Direct pipes**:
Given two jobs *A* and *B*, a direct pipe is used between the process for
*A* and the process for *B*. In addition, if *A* writes an output file
in *addition* to a direct pipe to *B*, this is handled by the dispatcher.
**Fan out**:
Given three jobs, *A*, *B*, and *C*, where *A's* output piped to both *B*
and *C* in parallel.
The pipes are resolved using a `disaptcher graph`, wich
can be created using the :py:func:`~jip.executils.create_dispatcher_graph`
function. The functions returns a sorted list of
:py:class:`jip.executils.DispatcherNode` instances. The dispatcher nodes are
executable units that can be started with their `run` methods. They will
run asynchroniously and you have to use the nodes `wait` method to wait
for termination.
"""
import sys
import jip.db
from jip.logger import getLogger
import jip.cluster
import jip.jobs
import jip.profiler
log = getLogger('jip.executils')
def create_dispatcher_graph(job, _nodes=None):
"""Create a dispatcher graph for a given job. If the job does not
have any pipe targets, a list with a single dispatcher node is returned,
otherwise the dispatching graph is created from all the pipe target job.
:param job: the job
:type: `jip.db.Job`
:returns: list of dispatcher nodes
:rtype: list of `jip.executils.DispatcherNode` instances
"""
# collect all jobs that are part
# of this graph
if len(job.pipe_to) == 0 and _nodes is None:
return [DispatcherNode(job)]
# do not operate on jobs that take pipes as long as this
# is not a recursive call, in which case the _nodes dict
# will be initialized
if len(job.pipe_from) > 0 and _nodes is None:
return []
# _initialized marks the recursion start
_initialized = False
if _nodes is None:
_initialized = True
_nodes = {}
# check if there is a node for the jobs
node = _nodes.get(job, None)
if node is not None:
# node exists, skip it
return None
# search for a new with the same target
for n in _nodes.itervalues():
if set(job.pipe_to) == n.targets:
node = n
break
else:
# create a new node
node = DispatcherNode()
_nodes[job] = node
node.sources.add(job)
# add the target
for pipe_to in job.pipe_to:
node.targets.add(pipe_to)
# recursive call
for pipe_to in job.pipe_to:
create_dispatcher_graph(pipe_to, _nodes)
if _initialized:
# I am the first iteration
# and we create edges between the nodes based on source/target
for k, node in _nodes.iteritems():
for target in node.targets:
for k, other in _nodes.iteritems():
if target in other.sources:
other.depends_on.append(node)
node.children.append(other)
return _sort_dispatcher_nodes(set(_nodes.itervalues()))
return None
def _sort_dispatcher_nodes(nodes):
count = {}
for node in nodes:
count[node] = 0
for node in nodes:
for successor in node.children:
count[successor] += 1
ready = [node for node in nodes if count[node] == 0]
result = []
while ready:
node = ready.pop(-1)
result.append(node)
for successor in node.children:
count[successor] -= 1
if count[successor] == 0:
ready.append(successor)
return result
class DispatcherNode(object):
"""Node element of a dispatcher graph that handles pipes between jobs.
A dispatcher node wraps around a single job in a dispatcher graph and
is able to execute the job and wait for its termination.
"""
def __init__(self, job=None):
"""Create a new dispatcher node
:param job: the job
:type job: :class:`jip.db.Job`
"""
self.sources = set([])
self.targets = set([])
self.depends_on = []
self.children = []
self.processes = []
if job is not None:
self.sources.add(job)
def __repr__(self):
return "[%s->%s]" % (",".join([str(j) for j in self.sources]),
(",".join([str(j) for j in self.targets])))
def run(self, profiler=False):
"""Run the job wrapped but this node.
:param profiler: enable job profiling
"""
from jip.db import STATE_RUNNING
num_sources = len(self.sources)
num_targets = len(self.targets)
has_groups = len(filter(lambda x: len(x.group_to) > 0,
self.sources)) > 0
if has_groups:
for job in self.sources:
default_in = None
try:
default_in = job.configuration.get_default_input()
except LookupError:
pass
# open default input stream, just in case
if job.stream_in == sys.stdin and \
default_in and \
default_in.streamable and \
default_in.get():
job.stream_in = open(default_in.get())
log.info("Open jobs input stream on %s", default_in.get())
jip.jobs.set_state(job, STATE_RUNNING, update_children=False)
p = job.run()
self.processes.append(p)
if profiler:
jip.profiler.Profiler(p, job).start()
log.info("Waiting for job group process: %s", job)
p.wait()
return
if num_targets == 0:
# no targets, just run the source jobs
# as they are
for job in self.sources:
default_in = None
try:
default_in = job.configuration.get_default_input()
except LookupError:
pass
# open default input stream, just in case
if job.stream_in == sys.stdin and \
default_in and \
default_in.streamable and \
default_in.get() and len(default_in._value) == 1:
job.stream_in = open(default_in.get())
log.info("Open jobs input stream on %s", default_in.get())
jip.jobs.set_state(job, STATE_RUNNING, update_children=False)
p = job.run()
self.processes.append(p)
if profiler:
jip.profiler.Profiler(p, job).start()
return
if num_sources == num_targets:
self.processes.extend(_FanDirect(self.sources,
self.targets).run(
profiler=profiler
))
return
if num_sources == 1:
self.processes.extend(_FanOut(self.sources,
self.targets).run(
profiler=profiler
))
return
if num_targets == 1:
self.processes.extend(_FanIn(self.sources,
self.targets).run(
profiler=profiler
))
return
raise ValueError("Unsupported fan operation "
"for %d sources and %d targets"
% (num_sources, num_targets))
def wait(self):
"""Blocks until this nodes process is terminated and returns
True if the process terminated with 0.
:returns: True if the job finished successfully
"""
# check the processes
success = True
for process, job in zip(self.processes, self.sources):
try:
log.debug("%s | waiting for process to finish", job)
ret_state = process.wait()
if ret_state != 0:
success = False
log.info("%s | finished with %d", job, ret_state)
except OSError as err:
if err.errno != 10:
raise
success = False
return success
class _FanDirect(object):
def __init__(self, sources, targets):
self.sources = list(sources)
self.targets = list(targets)
def run(self, profiler=False):
import os
from subprocess import PIPE
from jip.dispatcher import dispatch
from jip.db import STATE_RUNNING
if len(self.sources) != len(self.targets):
raise ValueError("Number of sources != targets!")
processes = []
direct_outs = jip.utils.flat_list([job.get_pipe_targets()
for job in self.sources])
if len(filter(lambda x: x is not None, direct_outs)) == 0:
# no extra output file dispatching is needed,
# we can just create the pipes directly
for source, target in zip(self.sources, self.targets):
source.stream_out = PIPE
jip.jobs.set_state(source, STATE_RUNNING,
update_children=False)
process = source.run()
target.stream_in = process.stdout
processes.append(process)
if profiler:
jip.profiler.Profiler(process, source).start()
return processes
inputs = []
outputs = []
for source, target in zip(self.sources, self.targets):
i, o = os.pipe()
i = os.fdopen(i, 'r')
o = os.fdopen(o, 'w')
source.stream_out = PIPE
target.stream_in = i
outputs.append(o)
for source, target in zip(self.sources, self.targets):
jip.jobs.set_state(source, STATE_RUNNING, update_children=False)
process = source.run()
inputs.append(process.stdout)
processes.append(process)
if profiler:
jip.profiler.Profiler(process, source).start()
# start the dispatcher
direct_outs = [open(f, 'wb') for f in direct_outs]
dispatch(inputs, outputs, direct_outs)
return processes
class _FanOut(_FanDirect):
def run(self, profiler=False):
import os
from subprocess import PIPE
from jip.dispatcher import dispatch_fanout
from jip.db import STATE_RUNNING
if len(self.sources) != 1 or len(self.targets) == 0:
raise ValueError("Number of sources != 1 or targets == 0!")
processes = []
direct_outs = jip.utils.flat_list([job.get_pipe_targets()
for job in self.sources])
inputs = []
outputs = []
source = self.sources[0]
source.stream_out = PIPE
num_targets = len(self.targets)
for target in self.targets:
i, o = os.pipe()
i = os.fdopen(i, 'r')
o = os.fdopen(o, 'w')
log.debug("%s | set stream_in to dispatcher pipe :: %s", target, i)
target.stream_in = i
outputs.append(o)
jip.jobs.set_state(source, STATE_RUNNING, update_children=False)
process = source.run()
inputs.append(process.stdout)
processes.append(process)
if profiler:
jip.profiler.Profiler(process, source).start()
empty = [None] * (num_targets - 1)
# start the dispatcher
direct_outs = [open(f, 'wb') for f in direct_outs]
log.debug("%s | fanout: %d targets", source, len(outputs))
ins = inputs + empty
douts = direct_outs + empty
while len(ins) < len(outputs):
ins.append(None)
while len(douts) < len(outputs):
douts.append(None)
dispatch_fanout(ins, outputs, douts)
return processes
class _FanIn(_FanDirect):
def run(self, profiler=False):
import os
from subprocess import PIPE
from jip.dispatcher import dispatch_fanin
from jip.db import STATE_RUNNING
if len(self.sources) == 0 or len(self.targets) != 1:
raise ValueError("Number of sources == 0 or targets != 1!")
processes = []
direct_outs = jip.utils.flat_list([job.get_pipe_targets()
for job in self.sources])
inputs = []
target = self.targets[0]
outputs = []
i, o = os.pipe()
i = os.fdopen(i, 'r')
o = os.fdopen(o, 'w')
outputs.append(o)
target.stream_in = i
num_sources = len(self.sources)
empty = [None] * (num_sources - 1)
for source in self.sources:
source.strream_out = PIPE
for source in self.sources:
jip.jobs.set_state(source, STATE_RUNNING, update_children=False)
process = source.run()
inputs.append(process.stdout)
processes.append(process)
if profiler:
jip.profiler.Profiler(process, source).start()
# start the dispatcher
direct_outs = [open(f, 'wb') for f in direct_outs]
log.debug("%s | fanin: %d sources", source, len(inputs))
outs = outputs + empty
while len(outs) < len(inputs):
outs.append(None)
while len(direct_outs) < len(inputs):
direct_outs.append(None)
dispatch_fanin(inputs, outs, direct_outs)
return processes
|
{
"content_hash": "d74bd10bd7ce3c0caf464b38f7d6f57c",
"timestamp": "",
"source": "github",
"line_count": 404,
"max_line_length": 79,
"avg_line_length": 35.75990099009901,
"alnum_prop": 0.5486952308437738,
"repo_name": "thasso/pyjip",
"id": "be150d5150cdc89715f63c75785c6a99e1c2dde1",
"size": "14469",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "jip/executils.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "8031"
},
{
"name": "Makefile",
"bytes": "355"
},
{
"name": "Python",
"bytes": "714627"
},
{
"name": "Shell",
"bytes": "800"
}
],
"symlink_target": ""
}
|
#-------------------------------------------------------------------------------
# IMPORTS
#-------------------------------------------------------------------------------
import Queue
import SocketServer
import threading
#-------------------------------------------------------------------------------
# VARIABLES
#-------------------------------------------------------------------------------
data_queue = None
#-------------------------------------------------------------------------------
# CLASSES
#-------------------------------------------------------------------------------
class GoalTCPHandler(SocketServer.BaseRequestHandler):
"""
The client will only send a connect request and then after
that just listens for us to output
"""
def handle( self ):
"""Callback for TCP server"""
global data_queue
socket = self.request
if( not data_queue.empty() ):
#=========================================================
# If there is older data, discard it and use the newest
#=========================================================
while( not data_queue.empty() ):
next_data = data_queue.get()
socket.sendto( next_data, self.client_address )
#-------------------------------------------------------------------------------
# PROCEDURES
#-------------------------------------------------------------------------------
#=====================================================================
# IMPORTANT: Don't make part of class def!!
#=====================================================================
def GoalTCPStartHandler( que ):
"""
TCP Start Handler Function. Cannot be in a class
"""
global data_queue
data_queue = que
#HOST, PORT = "192.168.1.250", 4545
HOST, PORT = "localhost", 4545
server = SocketServer.TCPServer((HOST, PORT), GoalTCPHandler )
print "Create Socket Thread"
server_thread = threading.Thread( target=server.serve_forever )
server_thread.daemon = True
print "Start Socket Thread"
server_thread.start()
print "Socket Thread Started"
|
{
"content_hash": "fb9047ab42476d15e1764a4de7361050",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 80,
"avg_line_length": 40.016949152542374,
"alnum_prop": 0.33587462939432444,
"repo_name": "SMS-Raiders/First2016",
"id": "9a767ac9d4193dc4d1fdfbb8e9c8d8fb7d17c62b",
"size": "2391",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vision_proc/GoalTCPServer.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Java",
"bytes": "41318"
},
{
"name": "Python",
"bytes": "15642"
}
],
"symlink_target": ""
}
|
"""elastic-companion cli runner. To see the available options, run:
>>> companion -h
"""
import logging
import argparse
from . import setup, health, reindex, backup, deletebulk
# Create main parser
parser = argparse.ArgumentParser(description='CLI tool for Elastic search.')
parser.add_argument('-u', '--url', help='The host url to connect to',
default='http://localhost:9200')
parser.add_argument('--log-level', help='The log level', default='INFO')
command_parser = parser.add_subparsers(help='Command options', dest='command')
# http://stackoverflow.com/a/23354355/2021517
command_parser.required = True
# Create parser for status command
health_parser = command_parser.add_parser('health',
help='Shows health of the cluster')
health_parser.add_argument('-l', '--level', help='The status level',
choices=['cluster', 'indices', 'shards'],
default='cluster')
health_parser.set_defaults(func=health.run)
# Create parser for setup command
setup_parser = command_parser.add_parser('setup', help='Perform index setup')
setup_parser.add_argument('-r', '--reset', action='store_true',
help="""Reset indexes before updating, BE CAREFUL,
THIS WILL DELETE ALL DATA""")
setup_parser.add_argument('-p', '--data-path',
help='Directory containing the setup data files',
default='./data')
setup_parser.set_defaults(func=setup.run)
# Create parser for reindex command
reindex_parser = command_parser.add_parser('reindex', help='Re-index an index')
reindex_parser.add_argument('source_index_name',
help='The name of the index to re-index')
reindex_parser.add_argument('target_index_name',
help='''The target index name. The name can be
specific such as "myindex" or use a date pattern
such as "myindex-{:%%Y-%%m-%%d}". The date for a
date pattern is read from the field specified by the
DATEFIELD parameter''')
reindex_parser.add_argument('-d', '--datefield',
help='The field to base the date on')
reindex_parser.add_argument('--deletedoc', help='Delete the source document',
action='store_true')
reindex_parser.set_defaults(func=reindex.run)
# Create parser for backup command
backup_parser = command_parser.add_parser('backup',
help='Backup an index')
backup_type_parser = backup_parser.add_subparsers(help='Storage type',
dest='storagetype')
s3_parser = backup_type_parser.add_parser('s3', help='Backup to AWS S3')
s3_parser.add_argument('index_name', help='The name of bucket to backup to')
s3_parser.add_argument('bucket_name', help='The name of bucket to backup to')
s3_parser.add_argument('-r', '--region', help='The name of aws region',
default='eu-west-1')
s3_parser.add_argument('-u', '--user', help='User key for s3')
s3_parser.add_argument('-s', '--secret', help='Secret key for s3')
s3_parser.set_defaults(func=backup.s3_run)
# Create parser for delete command
delete_parser = command_parser.add_parser('delete', help='Delete documents')
delete_parser.add_argument('index_name',
help='The name of the index to delete in')
delete_parser.add_argument('doc_type',
help='The name of the document type to delete from')
delete_parser.add_argument('-q', '--query',
help='Optional query object')
delete_parser.set_defaults(func=deletebulk.run)
def main():
args = parser.parse_args()
logging.basicConfig(level=args.log_level)
args.func(args)
if __name__ == '__main__':
"""When run from the command-line, set a standard handler and formatter."""
main()
|
{
"content_hash": "02a2fe2b70eb75fb82d829805c00e657",
"timestamp": "",
"source": "github",
"line_count": 89,
"max_line_length": 80,
"avg_line_length": 45.168539325842694,
"alnum_prop": 0.613681592039801,
"repo_name": "Receiptful/elastic-companion",
"id": "d49d2146f77e2f61574e0c96489e50533937abf2",
"size": "4020",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "companion/cli/cli.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "46056"
}
],
"symlink_target": ""
}
|
import os
from skimage import io
import optparse
import circle_detector
#import detector_watershed
def save_data(filename, data):
import pickle
print("Saving data")
f = open(filename, 'w')
pickle.dump(data, f)
f.close()
if __name__ == '__main__' :
parser = optparse.OptionParser()
(options, args) = parser.parse_args()
# get the number of the frame to process
task_id = int(os.environ['SGE_TASK_ID']) - 1
# make the filename
input_filename = args[0] % task_id
output_filename = args[1] % task_id
label_filename = args[2]
min_thresh = float(args[3])
max_thresh = float(args[4])
# load image
print("Loading image %s" % input_filename)
image = io.imread(input_filename)
# process image
print("Processing data")
result = circle_detector.watershed_segmentation(image, 3, label_filename, task_id, min_thresh, max_thresh)
# TODO: TRY WATERSHED SLICING INSTEAD OF HOUGH
#result = detector_watershed.watershed_segmentation(image, 3, label_filename, task_id)
# save image
print("Saving image %s" % output_filename)
save_data(output_filename, result)
|
{
"content_hash": "15511946f57560e618bddaa677b61298",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 110,
"avg_line_length": 28.29268292682927,
"alnum_prop": 0.6594827586206896,
"repo_name": "DiamondLightSource/auto_tomo_calibration-experimental",
"id": "63e4880e6919c4b2ef202d552bbd2145cecf1b12",
"size": "1160",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "old_code_scripts/measure_resolution/detector.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1956"
},
{
"name": "C",
"bytes": "1"
},
{
"name": "C++",
"bytes": "1"
},
{
"name": "HTML",
"bytes": "9816"
},
{
"name": "Jupyter Notebook",
"bytes": "177680"
},
{
"name": "Makefile",
"bytes": "14912"
},
{
"name": "Python",
"bytes": "3808110"
},
{
"name": "Shell",
"bytes": "27616"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from tapi_server.models.base_model_ import Model
from tapi_server.models.tapi_oam_mep_ref import TapiOamMepRef # noqa: F401,E501
from tapi_server.models.tapi_oam_mip_ref import TapiOamMipRef # noqa: F401,E501
from tapi_server import util
class TapiOamMepMipList(Model):
"""NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
Do not edit the class manually.
"""
def __init__(self, mip=None, mep=None): # noqa: E501
"""TapiOamMepMipList - a model defined in OpenAPI
:param mip: The mip of this TapiOamMepMipList. # noqa: E501
:type mip: List[TapiOamMipRef]
:param mep: The mep of this TapiOamMepMipList. # noqa: E501
:type mep: List[TapiOamMepRef]
"""
self.openapi_types = {
'mip': List[TapiOamMipRef],
'mep': List[TapiOamMepRef]
}
self.attribute_map = {
'mip': 'mip',
'mep': 'mep'
}
self._mip = mip
self._mep = mep
@classmethod
def from_dict(cls, dikt) -> 'TapiOamMepMipList':
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The tapi.oam.MepMipList of this TapiOamMepMipList. # noqa: E501
:rtype: TapiOamMepMipList
"""
return util.deserialize_model(dikt, cls)
@property
def mip(self):
"""Gets the mip of this TapiOamMepMipList.
none # noqa: E501
:return: The mip of this TapiOamMepMipList.
:rtype: List[TapiOamMipRef]
"""
return self._mip
@mip.setter
def mip(self, mip):
"""Sets the mip of this TapiOamMepMipList.
none # noqa: E501
:param mip: The mip of this TapiOamMepMipList.
:type mip: List[TapiOamMipRef]
"""
self._mip = mip
@property
def mep(self):
"""Gets the mep of this TapiOamMepMipList.
none # noqa: E501
:return: The mep of this TapiOamMepMipList.
:rtype: List[TapiOamMepRef]
"""
return self._mep
@mep.setter
def mep(self, mep):
"""Sets the mep of this TapiOamMepMipList.
none # noqa: E501
:param mep: The mep of this TapiOamMepMipList.
:type mep: List[TapiOamMepRef]
"""
self._mep = mep
|
{
"content_hash": "a9d0e2c7d37d64b81c0ddf36582c996e",
"timestamp": "",
"source": "github",
"line_count": 94,
"max_line_length": 96,
"avg_line_length": 26.361702127659573,
"alnum_prop": 0.5944309927360775,
"repo_name": "karthik-sethuraman/Snowmass-ONFOpenTransport",
"id": "2f5c7adf73a1aaa512e6a9cc4c08309320745cee",
"size": "2495",
"binary": false,
"copies": "4",
"ref": "refs/heads/develop",
"path": "RI/flask_server/tapi_server/models/tapi_oam_mep_mip_list.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "653"
},
{
"name": "D",
"bytes": "2405"
},
{
"name": "HTML",
"bytes": "137234"
},
{
"name": "Python",
"bytes": "937060"
},
{
"name": "Shell",
"bytes": "4361"
}
],
"symlink_target": ""
}
|
import functools
import warnings
import sqlalchemy
from common.config import config
def new_engine_and_metadata():
"""
Create new SQLAlchemy engine and metadata.
NOTE: Every process should have AT MOST one engine.
"""
engine = sqlalchemy.create_engine(config["postgres"], echo=config["debugsql"], execution_options={"autocommit": False})
metadata = sqlalchemy.MetaData(bind=engine)
with warnings.catch_warnings():
# Yes, I know you can't understand FTS indexes.
warnings.simplefilter("ignore", category=sqlalchemy.exc.SAWarning)
metadata.reflect()
sqlalchemy.event.listen(engine, "engine_connect", ping_connection)
return engine, metadata
def ping_connection(connection, branch):
if branch:
# "branch" refers to a sub-connection of a connection, don't ping those
return
# Check if connection is valid
try:
connection.scalar(sqlalchemy.select([1]))
except sqlalchemy.exc.DBAPIError as err:
if err.connection_invalidated:
# connection not valid, force reconnect.
connection.scalar(sqlalchemy.select([1]))
else:
raise
_engine_and_metadata = None
def get_engine_and_metadata():
"""
Return the SQLAlchemy engine and metadata for this process, creating one if
there isn't one already.
"""
if _engine_and_metadata is None:
set_engine_and_metadata(*new_engine_and_metadata())
return _engine_and_metadata
def set_engine_and_metadata(engine, metadata):
"""
Set the SQLAlchemy engine and metadata for this process, for
get_engine_and_metadata to return, if they are created by another source
(eg flask_sqlalchemy).
"""
global _engine_and_metadata
_engine_and_metadata = engine, metadata
def escape_like(s):
return s.replace('\\', '\\\\').replace('%', '\\%').replace('_', '\\_')
|
{
"content_hash": "f319c95297716f2b1aa294be1f0f8771",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 120,
"avg_line_length": 29.82758620689655,
"alnum_prop": 0.7358381502890173,
"repo_name": "andreasots/lrrbot",
"id": "5fa206bb1575b7e5952d2bba439650290b8af230",
"size": "1730",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "common/postgres.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "15924"
},
{
"name": "HTML",
"bytes": "65230"
},
{
"name": "JavaScript",
"bytes": "39616"
},
{
"name": "Mako",
"bytes": "318"
},
{
"name": "Python",
"bytes": "381399"
}
],
"symlink_target": ""
}
|
from RGT.XML.SVG.basicSvgNode import BasicSvgNode
from types import StringType
class FeMergeNodeNode(BasicSvgNode):
svgNodeType = BasicSvgNode.SVG_FE_MERGE_NODE_NODE
ATTRIBUTE_IN = 'in'
def __init__(self, ownerDoc):
BasicSvgNode.__init__(self, ownerDoc, 'feMergeNode')
self._allowedSvgChildNodes.update({self.SVG_ANIMATE_NODE, self.SVG_SET_NODE})
def setIn(self, data):
if data is not None:
if type(data) is not StringType:
data = str(data)
self._setNodeAttribute(self.ATTRIBUTE_IN, data)
def getIn(self):
node = self._getNodeAttribute(self.ATTRIBUTE_IN)
if node is not None:
return node.nodeValue
return None
|
{
"content_hash": "f002ec8d4badb9fd518880be578f8f35",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 85,
"avg_line_length": 31.583333333333332,
"alnum_prop": 0.6266490765171504,
"repo_name": "danrg/RGT-tool",
"id": "fac102827a8ed51f5d287ec30aac5de55e5536aa",
"size": "758",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/RGT/XML/SVG/Filters/feMergeNodeNode.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "83200"
},
{
"name": "HTML",
"bytes": "93970"
},
{
"name": "JavaScript",
"bytes": "111380"
},
{
"name": "Python",
"bytes": "788710"
},
{
"name": "SQLPL",
"bytes": "722"
}
],
"symlink_target": ""
}
|
"""
$ python simple_cli.py --help
simple_cli.py v1.0
Usage: simple_cli.py [SWITCHES] srcfiles...
Meta-switches:
-h, --help Prints this help message and quits
--version Prints the program's version and quits
Switches:
-I VALUE:str Specify include directories; may be given
multiple times
--loglevel LEVEL:int Sets the log-level of the logger
-v, --verbose Enable verbose mode
$ python simple_cli.py x.cpp y.cpp z.cpp
Verbose: False
Include dirs: []
Compiling: ('x.cpp', 'y.cpp', 'z.cpp')
$ python simple_cli.py -v
Verbose: True
Include dirs: []
Compiling: ()
$ python simple_cli.py -v -Ifoo/bar -Ispam/eggs
Verbose: True
Include dirs: ['foo/bar', 'spam/eggs']
Compiling: ()
$ python simple_cli.py -v -I foo/bar -Ispam/eggs x.cpp y.cpp z.cpp
Verbose: True
Include dirs: ['foo/bar', 'spam/eggs']
Compiling: ('x.cpp', 'y.cpp', 'z.cpp')
"""
from __future__ import print_function
import logging
from plumbum import cli
class MyCompiler(cli.Application):
verbose = cli.Flag(["-v", "--verbose"], help = "Enable verbose mode")
include_dirs = cli.SwitchAttr("-I", list = True, help = "Specify include directories")
@cli.switch("-loglevel", int)
def set_log_level(self, level):
"""Sets the log-level of the logger"""
logging.root.setLevel(level)
def main(self, *srcfiles):
print("Verbose:", self.verbose)
print("Include dirs:", self.include_dirs)
print("Compiling:", srcfiles)
if __name__ == "__main__":
MyCompiler()
|
{
"content_hash": "c471ca71ebb8560e10abbbc6134a3cf5",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 90,
"avg_line_length": 27.586206896551722,
"alnum_prop": 0.62,
"repo_name": "AndydeCleyre/plumbum",
"id": "47178cc9e8d4fadd059e99644bd1e598e02f1374",
"size": "1622",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "examples/simple_cli.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "52"
},
{
"name": "Python",
"bytes": "440514"
},
{
"name": "Shell",
"bytes": "810"
}
],
"symlink_target": ""
}
|
from securitylib.advanced_crypto import safe_compare, validate_authenticator_key, hmac
from securitylib.random import get_random_bytes
from securitylib.utils import randomize, get_random_element
import string
import math
import os
import re
__all__ = ['prepare_password_for_storage', 'compare_stored_password',
'generate_password', 'validate_password', 'get_password_strength',
'get_entropy_bits']
KEYBOARD_SEQUENCES = [
"1234567890qwertyuiopasdfghjklzxcvbnm",
"1qaz2wsx3edc4rfv5tgb6yhn7ujm8ik9ol0p",
"qazwsxedcrfvtgbyhnujmikolp",
"0147258369",
"1470258369",
"7894561230",
"abcdefgh",
"13579",
"02468",
"a1b2c3d4e5f6g7h8i9j0",
"1q2w3e4r5t6y7u8i9o0p",
]
LICENCE_PLATE_REGEX = re.compile(r'([0-9]{2}|[a-zA-Z]{2})[.\-_]([0-9]{2}|[a-zA-Z]{2})[.\-_]([0-9]{2}|[a-zA-Z]{2})')
DATE_REGEX = re.compile(r'(?<![0-9])(19|20)\d\d(?![0-9])')
FULL_DATE_REGEXS = [
re.compile(r'(?<![0-9])(19|20)?\d\d(?P<sep>[- /._\\])(1[012]|0?[1-9])(?P=sep)([12][0-9]|3[01]|0?[1-9])(?![0-9])'),
re.compile(r'(?<![0-9])(19|20)?\d\d(?P<sep>[- /._\\])([12][0-9]|3[01]|0?[1-9])(?P=sep)(1[012]|0?[1-9])(?![0-9])'),
re.compile(r'(?<![0-9])(1[012]|0?[1-9])(?P<sep>[- /._\\])([12][0-9]|3[01]|0?[1-9])(?P=sep)(19|20)?\d\d(?![0-9])'),
re.compile(r'(?<![0-9])([12][0-9]|3[01]|0?[1-9])(?P<sep>[- /._\\])(1[012]|0?[1-9])(?P=sep)(19|20)?\d\d(?![0-9])'),
re.compile(r'(?<![0-9])(19|20)?\d\d(0[1-9]|1[012])(0[1-9]|[12][0-9]|3[01])(?![0-9])'),
re.compile(r'(?<![0-9])(19|20)?\d\d(0[1-9]|[12][0-9]|3[01])(0[1-9]|1[012])(?![0-9])'),
re.compile(r'(?<![0-9])(0[1-9]|1[012])(0[1-9]|[12][0-9]|3[01])(19|20)?\d\d(?![0-9])'),
re.compile(r'(?<![0-9])(0[1-9]|[12][0-9]|3[01])(0[1-9]|1[012])(19|20)?\d\d(?![0-9])'),
re.compile(r'(?<![0-9])(0[1-9]|1[012])[- /._\\](0[1-9]|[12][0-9]|3[01])(?![0-9])'),
re.compile(r'(?<![0-9])(0[1-9]|[12][0-9]|3[01])[- /._\\](0[1-9]|1[012])(?![0-9])'),
]
DICT_WORDS = None
def prepare_password_for_storage(password, authenticator_key):
"""
Use this function if you want to store a password.
This function returns a hex representation of the password that is safe to be stored.
It uses a one-way algorithm which means you need to provide the password
you are trying to verify in :func:`~securitylib.passwords.compare_stored_password` as one of the parameters.
:param password: The password to be prepared for storage.
:type password: :class:`str`
:param authenticator_key: This key is used to make it harder for an attacker to find the users passwords,
even if he compromises the database.
This is done by making the transformation of the password be unique for the given key
(using the given authenticator_key),
so even if an attacker gets hold of the stored password,
he has no way to verify whether a password matches it without knowing the key.
This also means that this key MUST be stored separate from the stored passwords,
else an attacker that compromises the database will also get hold of this key.
Other recomendations include storing it outside the webserver tree and
with read permissions only for the application that must read it.
You can use :func:`~securitylib.crypto.generate_authenticator_key` to generate it.
:type authenticator_key: :class:`str`
:returns: :class:`str` -- Returns the password prepared for storage.
"""
validate_authenticator_key(authenticator_key)
version = 1
salt = get_random_bytes(8)
return prepare_password_for_storage_all_params(password, authenticator_key, salt, version)
def compare_stored_password(password, authenticator_key, stored_password):
"""
Use this function to verify a password given by a user
against a password stored with :func:`~securitylib.passwords.prepare_password_for_storage`.
:param password: The password to be compared to the stored one.
:type password: :class:`str`
:param authenticator_key: The key that was used when storing the password, in byte string.
:type authenticator_key: :class:`str`
:param stored_password: Stored password against which the given password is to be compared.
:type stored_password: :class:`str`
:returns: :class:`bool` -- True if the given password matches the stored one.
"""
validate_authenticator_key(authenticator_key)
# Tests whether stored_password is correct hex but does not replace it
version = ord(stored_password[:2].decode('hex'))
salt = stored_password[2:18].decode('hex')
return safe_compare(prepare_password_for_storage_all_params(password, authenticator_key, salt, version),
stored_password.lower())
def prepare_password_for_storage_all_params(password, authenticator_key, salt, version):
"""
Use this function if you want to store a password.
This function returns a hex representation of the password that is safe to be stored.
It uses a one-way algorithm which means you need to provide the password
you are trying to verify in :func:`~securitylib.passwords.compare_stored_password` as one of the parameters.
:param password: The password to be prepared for storage.
:type password: :class:`str`
:param authenticator_key: Secret to be used in the one-way algorithm, in hex.
:type authenticator_key: :class:`str`
:param salt: Salt for the password.
:type salt: :class:`str`
:param version: Version of the function to use.
It is used to guarantee backward compatibility in case
a new version of this function is released.
:type version: :class:`int`
:returns: :class:`str` -- Returns the password prepared for storage.
"""
# Tests whether authenticator_key is correct hex but does not replace it
if version == 1:
version_hex = chr(version).encode('hex')
hpass = hmac(salt + password, authenticator_key, 32, 10).encode('hex')
return version_hex + salt.encode('hex') + hpass
else:
raise NotImplementedError('Version {0} not supported'.format(version))
def generate_password(length=12, lower=True, upper=True, digits=True, special=True, ambig=True):
"""
Generates a password according to the given parameters.
It is guaranteed that if a type of characters (lower, upper, etc.) is allowed in the password,
then the generated password will always contain at least one character of that type,
e.g. if the parameter special is True, then the generated password will have at least a special character.
:param length: Length of the generated password. Must be at least 8.
:type length: :class:`int`
:param lower: Whether the password should contain lower case characters.
:type lower: :class:`bool`
:param upper: Whether the password should contain upper case characters.
:type upper: :class:`bool`
:param digits: Whether the password should contain digits.
:type digits: :class:`bool`
:param special: Whether the password should contain special characters (!\@#$%^&*).
:type special: :class:`bool`
:param ambig: Whether the password should contain ambiguous characters (iloILO10).
:type ambig: :class:`bool`
:returns: :class:`str` -- The generated password.
"""
if length < 8:
raise ValueError('Parameter length must be at least 8.')
if not any([upper, lower, digits, special]):
raise ValueError('At least one of upper, lower, digits or special must be True.')
s_all = ''
s_lower = 'abcdefghjkmnpqrstuvwxyz'
s_upper = 'ABCDEFGHJKMNPQRSTUVWXYZ'
s_digits = '23456789'
s_special = '!@#$%^&*'
if ambig:
s_lower += 'ilo'
s_upper += 'ILO'
s_digits += '10'
password = []
if lower:
s_all += s_lower
password.append(get_random_element(s_lower))
if upper:
s_all += s_upper
password.append(get_random_element(s_upper))
if digits:
s_all += s_digits
password.append(get_random_element(s_digits))
if special:
s_all += s_special
password.append(get_random_element(s_special))
for _ in xrange(length - len(password)):
password.append(get_random_element(s_all))
randomize(password)
return ''.join(password)
def validate_password(password, min_length=12, min_lower=1, min_upper=1, min_digits=1, min_special=1, min_strength=50):
"""
Validates a given password against some basic rules.
:param password: Password to validate.
:type password: :class:`str`
:param min_length: Minimum length that the password must have.
:type min_length: :class:`int`
:param min_lower: Minimum number of lower case characters that the password must contain.
:type min_lower: :class:`int`
:param min_upper: Minimum number of upper case characters that the password must contain.
:type min_upper: :class:`int`
:param min_digits: Minimum number of digits that the password must contain.
:type min_digits: :class:`int`
:param min_special: Minimum number of special characters (!\@#$%^&*) that the password must contain.
:type min_special: :class:`int`
:param min_strength: Minimum strength that the password must have according to function :func:`~securitylib.passwords.get_password_strength`.
:type min_strength: :class:`bool`
:returns: :class:`list` -- A list with the name of the parameters whose validations have failed.
This means a password is valid only if this function returns an empty list.
"""
s_lower = set('abcdefghijklmnopqrstuvwxyz')
s_upper = set('ABCDEFGHIJKLMNOPQRSTUVWXYZ')
s_digits = set('0123456789')
s_special = set('!@#$%^&*')
problems = []
if len(password) < min_length:
problems.append('min_length')
if count_occurrences_in_set(password, s_lower) < min_lower:
problems.append('min_lower')
if count_occurrences_in_set(password, s_upper) < min_upper:
problems.append('min_upper')
if count_occurrences_in_set(password, s_digits) < min_digits:
problems.append('min_digits')
if count_occurrences_in_set(password, s_special) < min_special:
problems.append('min_special')
if min_strength and get_password_strength(password) < min_strength:
problems.append('min_strength')
return problems
def count_occurrences_in_set(seq, target_set):
count = 0
for element in seq:
if element in target_set:
count += 1
return count
def get_password_strength(password, username=None):
"""
Evaluate a password's strength according to some heuristics.
:param password: Password to evaluate.
:type password: :class:`str`
:param username: Username of the password's owner. When provided, the password strength will
be lower if it contains the given username. If the username is an email,
both the whole email and its left part will be used.
:type username: :class:`str`
:returns: :class:`int` -- Strength of the password as an int between 0 and 100.
"""
return min(int(get_entropy_bits(password, username) * 100 / 52), 100)
class PassVariant:
def __init__(self, password, entropy=0):
self.password = password
self.entropy = entropy
def __hash__(self):
return hash(self.password)
def __eq__(self, other):
return self.password == other.password
def __cmp__(self, other):
return cmp(self.entropy, other.entropy)
def __repr__(self):
return '{0} {1}'.format(self.password, self.entropy)
class KeepMinDict(dict):
def __init__(self, *args, **kwargs):
super(KeepMinDict, self).__init__(*args, **kwargs)
def __setitem__(self, key, value):
if key not in self or value < self[key]:
super(KeepMinDict, self).__setitem__(key, value)
def load_dict_words():
global DICT_WORDS
# If the list of dictionary words is not yet created
# load the dictionary file and load it into the list.
if not DICT_WORDS:
dictionary_dir = os.path.dirname(os.path.abspath(__file__))
dictionary_path = os.path.join(dictionary_dir, 'dictionary.txt')
with open(dictionary_path) as f:
DICT_WORDS = f.read().splitlines()
return DICT_WORDS
def get_NIST_num_bits(password, repeatcalc=False):
passlen = len(password)
result = 0
if repeatcalc:
# Variant on NIST rules to reduce long sequences of repeated characters.
charmult = [1] * 256
for i in xrange(passlen):
tempchr = ord(password[i])
if i >= 19:
result += charmult[tempchr]
elif i >= 8:
result += charmult[tempchr] * 1.5
elif i >= 1:
result += charmult[tempchr] * 2
else:
result += 4
# Each time a character appears, it's value is reduced * 0.75, never going below 0.4
charmult[tempchr] = max(charmult[tempchr] * 0.75, 0.4)
elif passlen > 20:
result = 4 + (7 * 2) + (12 * 1.5) + passlen - 20
elif passlen > 8:
result = 4 + (7 * 2) + ((passlen - 8) * 1.5)
elif passlen > 1:
result = 4 + ((passlen - 1) * 2)
elif passlen == 1:
result = 4
else:
result = 0
return result
def get_entropy_bits(password, username=None):
"""
Evaluate a password's strength according to some heuristics.
Returns the entropy of the given password in bits.
E.g. a password with 8 characters, lowercase + digits,
without dictionary words and without keyboard sequences, will have entropy about 26.
If it had also uppercase characters the entropy would be about 30.
:param password: Password to evaluate.
:type password: :class:`str`
:param username: Username of the password's owner. When provided, the password strength will
be lower if it contains the given username. If the username is an email,
both the whole email and its left part will be used.
:type username: :class:`str`
returns: :class:`int` -- Number of bits of entropy that the password has.
"""
orig_pass = password
if not orig_pass:
return 0
# Hardcoded parameters
find_keyboard_sequences = True
find_dict_words = True
minwordlen = 3
minword_accept_len = 6
# If all the characters in the password are the same return early.
n_different_characters = len(set(orig_pass))
if n_different_characters == 1:
return math.log(len(orig_pass) * 40) / math.log(2)
# Tests which types of character the password has.
upper = False
lower = False
digits = False
common_separators = False
other = False
for char in orig_pass:
ord_chr = ord(char)
if ord('A') <= ord_chr <= ord('Z'):
upper = True
elif ord('a') <= ord_chr <= ord('z'):
lower = True
elif ord('0') <= ord_chr <= ord('9'):
digits = True
elif char in ' ._-':
common_separators = True
else:
other = True
# Sets the keyspace_multiplier to the sum of the keyspace sizes
# for each type of character in the password.
keyspace_multiplier = 0
if lower:
keyspace_multiplier += 26
if upper:
keyspace_multiplier += 26
if digits:
keyspace_multiplier += 10
if common_separators:
keyspace_multiplier += 4
if other:
keyspace_multiplier += 28
# Converts the keyspace_multiplier to multiply bits of entropy
# and uses 26 (lowercase keyspace size) as the baseline,
# i.e. if the password keyspace size is 26, the multiplier will be
# equal to 1.
keyspace_multiplier = math.log(keyspace_multiplier) / math.log(26)
orig_pass = handle_license_plates(orig_pass)
orig_pass = handle_dates(orig_pass)
if username:
# Remove whole username
orig_pass = handle_username(orig_pass, username)
if '@' in username:
# If username is an email, also remove the first part of the email
username = username.partition('@')[0]
orig_pass = handle_username(orig_pass, username)
### Creates many variants of the origial password ###
# Lowercase variant
lower_pass = PassVariant(orig_pass.lower())
# Reversed lowercase variant
rev_pass = reverse_password(lower_pass)
passwords_variants = KeepMinDict()
passwords_variants[lower_pass.password] = lower_pass
passwords_variants[rev_pass.password] = rev_pass
# Leet-speak substitutions variants
leetspeakmap = string.maketrans('@!$1234567890', 'aisizeasgtbgo')
leetspeak_pass = PassVariant(lower_pass.password.translate(leetspeakmap), 1)
passwords_variants[leetspeak_pass.password] = leetspeak_pass
leetspeakmap2 = string.maketrans('@!$1234567890', 'aislzeasgtbgo')
leetspeak_pass2 = PassVariant(lower_pass.password.translate(leetspeakmap2), 1)
passwords_variants[leetspeak_pass2.password] = leetspeak_pass2
if find_keyboard_sequences:
# Tries to find sequences from keyboard
# in the variants of the password and removes them.
for cur_pass in passwords_variants.values():
tmp_pass = cur_pass.password
for keyboard_seq in KEYBOARD_SEQUENCES:
tmp_pass = remove_sequence(tmp_pass, keyboard_seq)
if cur_pass.password != tmp_pass:
# Since keyboard sequences were found in the password and removed,
# we add the new shortened password to the password variants.
# We don't replace the original (non shortened) variant since it
# might contain a dictionary word that was hidden by the shortening.
shortened_pass = PassVariant(tmp_pass, cur_pass.entropy)
passwords_variants[shortened_pass.password] = shortened_pass
if find_dict_words:
# Looks for dictionary words in the password variants.
dict_words = load_dict_words()
for cur_pass in passwords_variants.values():
clean_pass = ''.join(char for char in cur_pass.password if char != '\x00')
n_alpha_chars = len([char for char in clean_pass if char_is_lower(char)])
if len(clean_pass) >= minwordlen:
# Creates a set with all the substrings of the password in it.
substr_set = get_substrings_set(clean_pass, minwordlen)
for dict_word in dict_words:
# If a dictionary word is found in the substr_set then
# it means that word was part of the original password.
if dict_word in substr_set and dict_word in clean_pass:
if len(dict_word) >= minword_accept_len:
break
if len(dict_word) * 2 < n_alpha_chars:
continue
start_match = clean_pass.index(dict_word)
if start_match == 0:
break
if not char_is_lower(clean_pass[start_match - 1]):
break
else:
# If no word is found in the password give it's entropy a bonus.
cur_pass.entropy += 6
for pwd in passwords_variants.values():
pwd.entropy += get_NIST_num_bits(pwd.password)
# Find the minimum entropy among all password variants.
min_entropy = min(pass_variant.entropy for pass_variant in passwords_variants.values())
# Also consider the entropy of running the get_NIST_num_bits variant
# for repeated chars against the original password.
# We add 6 bits to the result simulating that no word was found
# in the original password. This way, the only way this will result
# in less entropy than the variants is if there is a great number of
# repetitions in the original password.
orig_pass_entropy = get_NIST_num_bits(orig_pass, True) + 6
if orig_pass_entropy < min_entropy:
min_entropy = orig_pass_entropy
return min_entropy * keyspace_multiplier
def handle_license_plates(pwd):
m = LICENCE_PLATE_REGEX.search(pwd)
if m:
filtered_license = ''.join(filter(None, m.groups()))
count_letters = sum(1 for c in filtered_license if c.isalpha())
if count_letters == 2:
# is valid license plate
pwd = replace_at_span(pwd, filtered_license, m.start(), m.end())
return pwd
def handle_dates(pwd):
all_full_date_matches = (regex.search(pwd) for regex in FULL_DATE_REGEXS)
all_full_date_matches = filter(None, all_full_date_matches)
if all_full_date_matches:
maximum_match = max(all_full_date_matches, key=lambda m: m.end() - m.start())
pwd = replace_at_span(pwd, '\x00' * 4, maximum_match.start(), maximum_match.end())
else:
m = DATE_REGEX.search(pwd)
if m:
pwd = replace_at_span(pwd, '\x00' * 2, m.start(), m.end())
return pwd
def handle_username(pwd, username):
def remove_username(pwd, username_lower):
index = pwd.lower().find(username_lower)
while index != -1:
pwd = replace_at_span(pwd, '\x00' * 2, index, index + len(username_lower))
index = pwd.lower().find(username_lower)
return pwd
if len(username) > 2:
username_lower = username.lower()
pwd = remove_username(pwd, username_lower)
pwd = reverse_string(pwd)
pwd = remove_username(pwd, username_lower)
pwd = reverse_string(pwd)
return pwd
def replace_at_span(orig_str, replacer, start, end):
return orig_str[:start] + replacer + orig_str[end:]
def char_is_lower(char):
return ord('a') <= ord(char) <= ord('z')
def remove_sequence(string, keyboard_seq):
"""
Finds the longest common substring between the string and keyboard sequence given
and if it is big enough (> 2) replaces it in the string with two null bytes
and repeats the process with the rest of the string.
Returns the result of remove the keyboard sequences from the string.
"""
start, lcs = longest_common_substring(string, keyboard_seq)
if len(lcs) > 2:
return remove_sequence(string[:start], keyboard_seq) + '\x00\x00' + remove_sequence(string[start + len(lcs):], keyboard_seq)
else:
return string
def get_substrings_set(string, min_length):
"""
Creates a set with all the substring of string that have at least min_length.
"""
substr_set = set()
slen = len(string)
for substr_len in xrange(min_length, slen + 1):
for substr_start in xrange(slen - substr_len + 1):
substr_set.add(string[substr_start:substr_start + substr_len])
return substr_set
def reverse_password(password):
"""
Creates a PassVariant whose password field is the reverse of the original.
Also adds 1 to the entropy of the returned PassVariant.
"""
return PassVariant(reverse_string(password.password), password.entropy + 1)
def reverse_string(string):
return ''.join(reversed(string))
def longest_common_substring(s1, s2):
l2 = 1 + len(s2)
m1 = [0] * l2
m2 = [0] * l2
longest, x_longest = 0, 0
enumerate1 = list(enumerate(s1, 1))
enumerate2 = list(enumerate(s2, 1))
for x, s1_char in enumerate1:
for y, s2_char in enumerate2:
if s1_char == s2_char:
m2[y] = m1[y - 1] + 1
if m2[y] > longest:
longest = m2[y]
x_longest = x
else:
m2[y] = 0
m1, m2 = m2, m1
start = x_longest - longest
return (start, s1[start:x_longest])
|
{
"content_hash": "14c6ce98a3a66fc42e368f31741d1d84",
"timestamp": "",
"source": "github",
"line_count": 610,
"max_line_length": 145,
"avg_line_length": 39.472131147540985,
"alnum_prop": 0.6257579533183819,
"repo_name": "sapo/securitylib-python",
"id": "519bc5ddd1c8eb6688bb41b4f0402282ce66638c",
"size": "24078",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "securitylib/passwords.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "6150"
},
{
"name": "Python",
"bytes": "178391"
}
],
"symlink_target": ""
}
|
import os
import datetime
from fnmatch import fnmatch
from process import process
from main.models import Client, Show, Location, Episode, Raw_File, Mark
VIDEO_EXTENSIONS = ('.dv', '.flv', '.mp4', '.MTS', '.mkv', '.mov', '.ts')
class add_dv(process):
def mark_file(self,pathname,show,location):
# one file of timestamps when Cut was Clicked
fullpathname = os.path.join(
self.show_dir, "dv", location.slug, pathname )
with open(fullpathname) as f:
cutlist = f.read().strip()
if not cutlist:
return
for line in cutlist.split('\n'):
if line:
try:
click = datetime.datetime.strptime(
line,'%Y-%m-%d/%H_%M_%S')
# click = click + datetime.timedelta(hours=-1)
except ValueError as e:
print(e)
continue
print(click, end=' ')
mark, created = Mark.objects.get_or_create(
show=show, location=location,
click=click)
if created:
print("(new)")
mark.save()
else:
print(" {} (exists)".format(mark.id))
def one_file(self,pathname,show,location,seq):
# one video asset file
print(pathname, end=' ')
if self.options.test:
rfs = Raw_File.objects.filter(
show=show, location=location,
filename=pathname,)
if rfs: print("in db:", rfs)
else: print("not in db")
else:
fullpathname = os.path.join(
self.show_dir, "dv", location.slug, pathname )
st = os.stat(fullpathname)
filesize=st.st_size
if filesize == 0:
print("(zero size)")
else:
rf, created = Raw_File.objects.get_or_create(
show=show, location=location,
filename=pathname,)
if created:
print("(new)")
rf.sequence=seq
rf.filesize=filesize
rf.save()
else:
print("(exists)")
def one_loc(self,show,location):
"""
finds dv files for this location
"""
if self.options.whack:
Raw_File.objects.filter(show=show).delete()
Mark.objects.filter(show=show).delete()
loc_dir=os.path.join(self.show_dir,'dv',location.slug)
if self.options.verbose: print("loc dir:", loc_dir)
seq=0
# os.walk returns a list of branches and leaves
# branches are the dirs,
# leaves are the files at the end of each branch
# the branch includes the root (which we don't want in the db)
for dirpath, dirnames, filenames in os.walk(loc_dir,followlinks=True):
# dirpath is the whole path from /
# we want to strip off .../client/show/dv/loc
# and only store what is under loc/
stuby=dirpath[len(loc_dir)+1:]
if self.options.verbose:
print("checking...", dirpath, stuby, filenames)
for filename in filenames:
if self.options.verbose:
print("filename: {}".format(filename))
basename, extension = os.path.splitext(filename)
# cut list file from voctomix
if extension == ".log":
self.mark_file(
os.path.join(stuby,filename),show,location)
continue
# skip Low Quality version made by sync-rax
if basename in filenames:
# foo.ts makes foo.ts.mp4.
# strip the .mp4, see if foo.ts in [foo.ts]
# the mp4 is the lq from sync-rax, so don't add it.
if os.path.splitext(basename)[1] in VIDEO_EXTENSIONS:
# I am not sure how we got here if it wasn't an lq
# so I am not sure why we are checking this
# but tweed says:
# This must be a preview mp4 for web editing
if self.options.verbose:
print("skipping low quality")
continue
pathname=os.path.join(dirpath,filename)
if self.options.include and not fnmatch(
pathname,self.options.include):
# only add files that match --include
if self.options.verbose:
print("skipping (not in --include)")
continue
if extension in VIDEO_EXTENSIONS:
seq+=1
self.one_file(
os.path.join(stuby,filename),show,location,seq)
def one_show(self, show):
if self.options.whack:
Raw_File.objects.filter(show=show).delete()
return super(add_dv, self).one_show(show)
def work(self):
"""
find and process show
"""
if self.options.client:
client = Client.objects.get(slug=self.options.client)
show = Show.objects.get(
client=client, slug=self.options.show)
else:
show = Show.objects.get(slug=self.options.show)
self.one_show(show)
return
def add_more_options(self, parser):
parser.add_option('--include',
help="only include this glob.")
if __name__=='__main__':
p=add_dv()
p.main()
|
{
"content_hash": "6b0f9b6fd46e6f927b6e893bf91a7267",
"timestamp": "",
"source": "github",
"line_count": 167,
"max_line_length": 78,
"avg_line_length": 34.16167664670659,
"alnum_prop": 0.4976336546888694,
"repo_name": "xfxf/veyepar",
"id": "9e42adb27be8e92e961c45e4433a07df3d1c91c3",
"size": "5821",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dj/scripts/adddv.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "6132"
},
{
"name": "HTML",
"bytes": "88316"
},
{
"name": "JavaScript",
"bytes": "76640"
},
{
"name": "Python",
"bytes": "883375"
},
{
"name": "Ruby",
"bytes": "3503"
},
{
"name": "Shell",
"bytes": "82496"
}
],
"symlink_target": ""
}
|
"""
Compatibility functionality for GDBINIT users.
https://github.com/gdbinit/Gdbinit/blob/master/gdbinit
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import gdb
import pwndbg.commands
@pwndbg.commands.ArgparsedCommand("GDBINIT compatibility alias for 'start' command.")
@pwndbg.commands.OnlyWhenRunning
def init():
"""GDBINIT compatibility alias for 'start' command."""
pwndbg.commands.start.start()
@pwndbg.commands.ArgparsedCommand("GDBINIT compatibility alias for 'tbreak __libc_start_main; run' command.")
@pwndbg.commands.OnlyWhenRunning
def sstart():
"""GDBINIT compatibility alias for 'tbreak __libc_start_main; run' command."""
gdb.execute('tbreak __libc_start_main')
gdb.execute('run')
@pwndbg.commands.ArgparsedCommand("GDBINIT compatibility alias for 'main' command.")
@pwndbg.commands.OnlyWhenRunning
def main():
"""GDBINIT compatibility alias for 'main' command."""
pwndbg.commands.start.start()
@pwndbg.commands.ArgparsedCommand("GDBINIT compatibility alias for 'libs' command.")
@pwndbg.commands.OnlyWhenRunning
def libs():
"""GDBINIT compatibility alias for 'libs' command."""
pwndbg.commands.vmmap.vmmap()
@pwndbg.commands.ArgparsedCommand("GDBINIT compatibility alias to print the entry point. See also the 'entry' command.")
@pwndbg.commands.OnlyWhenRunning
def entry_point():
"""GDBINIT compatibility alias to print the entry point.
See also the 'entry' command."""
print(hex(int(pwndbg.elf.entry())))
|
{
"content_hash": "7c85292a6ded28b0d344545e78785959",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 120,
"avg_line_length": 33.4375,
"alnum_prop": 0.7501557632398754,
"repo_name": "cebrusfs/217gdb",
"id": "d3599d31fbb1d4b5424c8ffbc7f3cd39151d3ae6",
"size": "1651",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pwndbg/commands/gdbinit.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "584"
},
{
"name": "C",
"bytes": "113"
},
{
"name": "Go",
"bytes": "58"
},
{
"name": "Makefile",
"bytes": "1302"
},
{
"name": "Python",
"bytes": "1824522"
},
{
"name": "Shell",
"bytes": "6068"
}
],
"symlink_target": ""
}
|
import json, urllib3
from datetime import datetime, timedelta
from django.http import HttpResponse
from django.shortcuts import render_to_response
from django.utils.cache import patch_response_headers
from core.oauth.utils import login_customrequired
from core.views import initRequest
from core.libs.DateEncoder import DateEncoder
from core.libs.cache import setCacheEntry, getCacheEntry
from core.libs.datetimestrings import parse_datetime
from core.oi.utils import round_time
import matplotlib
from core.utils import removeParam
from django.template.defaulttags import register
CACHE_TIMEOUT = 5
OI_DATETIME_FORMAT = "%Y-%m-%dT%H:%M:%S"
@register.filter(takes_context=True)
def to_float(value):
return float(value)
def formatError(json):
outstr = ""
for errorcat, errormessages in json.items():
outstr += '<p>Error cathegory: '+errorcat+'<br />'
for message, num in errormessages.items():
outstr += 'Error message:<b>' + message + '</b>: ' +str(num)+ '<br />'
return outstr
@login_customrequired
def jbhome(request):
valid, response = initRequest(request)
if not valid:
return response
# Here we try to get cached data
data = getCacheEntry(request, "jobProblem")
data = None
if data is not None and len(data) > 10:
data = json.loads(data)
if not ('message' in data and 'warning' in data['message'] and len(data['message']['warning']) > 1):
data['request'] = request
response = render_to_response('jobsbuster.html', data, content_type='text/html')
patch_response_headers(response, cache_timeout=request.session['max_age_minutes'] * 60)
return response
message = {}
# process params
metric = None
computetype = None
if 'metric' in request.session['requestParams'] and request.session['requestParams']['metric']:
metric = request.session['requestParams']['metric']
if 'hours' in request.session['requestParams'] and request.session['requestParams']['hours']:
hours = int(request.session['requestParams']['hours'])
endtime = datetime.now()
starttime = datetime.now() - timedelta(hours=hours)
elif 'endtime_from' in request.session['requestParams'] and 'endtime_to' in request.session['requestParams']:
endtime = parse_datetime(request.session['requestParams']['endtime_to'])
starttime = parse_datetime(request.session['requestParams']['endtime_from'])
else:
default_hours = 12
endtime = datetime.now()
starttime = datetime.now() - timedelta(hours=default_hours)
if 'jobtype' in request.session['requestParams'] and request.session['requestParams']['jobtype']:
jobtype = request.session['requestParams']['jobtype']
else:
jobtype = 'prod'
if 'computetype' in request.session['requestParams'] and request.session['requestParams']['computetype']:
computetype = request.session['requestParams']['computetype']
# getting data from jobbuster API
base_url = 'http://aipanda030.cern.ch:8010/jobsbuster/api/?'
url = base_url + 'timewindow={}|{}'.format(
round_time(starttime, timedelta(minutes=1)).strftime(OI_DATETIME_FORMAT),
round_time(endtime, timedelta(minutes=1)).strftime(OI_DATETIME_FORMAT))
if metric:
url += '&metric=' + metric
if computetype:
url += '&computetype=' + computetype
http = urllib3.PoolManager()
try:
resp = http.request('GET', url, timeout=500)
except:
resp = None
message['warning'] = "Can not connect to jobbuster API, please try later"
resp_data = None
if resp and len(resp.data) > 10:
try:
resp_data = json.loads(resp.data)
except:
message['warning'] = "No data was received"
else:
message['warning'] = "No data was received"
http.clear()
# processing data
plots = {}
spots = []
names = []
if resp_data:
for i, problem in enumerate(resp_data['mesuresW']):
resp_data['mesuresW'][i] = list(map(lambda x: x/3.154e+7 if not type(x) is str else x, problem))
names = [i[0] for i in resp_data['mesuresW']]
resp_dict = None
timeticks = []
errormessages = {}
if resp_data and 'ticks' in resp_data:
timeticks = [parse_datetime(tick) for tick in resp_data['ticks']]
timeticks = ['x'] + [tick.strftime("%Y-%m-%d %H:%M:%S") for tick in timeticks]
resp_data['mesuresW'].insert(0,timeticks)
resp_data['mesuresNF'].insert(0,timeticks)
colors = {}
for name, color in zip(names, resp_data['colorsW']):
colors[name] = matplotlib.colors.to_hex(color)
for issue in resp_data['issues']:
card = {}
card['color'] = colors[issue['name']]
card['impactloss'] = str(round(issue['walltime_loss'] / 3.154e+7, 2))
card['impactfails'] = issue['nFailed_jobs']
card['name'] = issue['name']
card['params'] = {}
id = str(len(errormessages.keys()))
card['errormessagesid'] = id
errormessages[id] = formatError(json.loads(issue['err_messages']))
urlstr = "https://bigpanda.cern.ch/jobs/?endtimerange=" + str(issue['observation_started']).replace(" ", "T") + "|" + str(issue['observation_finished']).replace(" ", "T")
for key,value in issue['features'].items():
card['params'][key] = value
# if isinstance(value, tuple):
# propname = value[i]
# else:
propname = value
urlstr += "&" + str(key).lower() + "=" + str(propname)
urlstr += "&mode=nodrop&prodsourcelabel=managed"
card['url'] = urlstr
spots.append(card)
measures = resp_data['mesuresW'] if not metric or metric=='loss' else resp_data['mesuresNF']
resp_dict = {
'mesures': measures,
'ticks': resp_data['ticks'],
'issnames': names,
'doGroup': False if len(names) < 2 else True,
'colors': colors,
'spots':spots,
}
url_no_computetype = removeParam(request.get_full_path(), 'computetype')
request.session['timerange'] = [starttime.strftime(OI_DATETIME_FORMAT), endtime.strftime(OI_DATETIME_FORMAT)]
data = {
'request': request,
'requestParams': request.session['requestParams'],
'viewParams': request.session['viewParams'],
'timerange': request.session['timerange'],
'message': message,
'mesures': [],
'metric': metric,
'urlBase': url_no_computetype + ('&' if url_no_computetype.find('?') > -1 else '?'),
'errormessages': json.dumps(errormessages)
#'plots': plots,
#'spots': spots,
}
if resp_dict:
data.update(resp_dict)
if (not (('HTTP_ACCEPT' in request.META) and (request.META.get('HTTP_ACCEPT') in ('application/json',))) and (
'json' not in request.session['requestParams'])):
response = render_to_response('jobsbuster.html', data, content_type='text/html')
else:
response = HttpResponse(json.dumps(data, cls=DateEncoder), content_type='application/json')
if resp and len(resp.data) > 10:
setCacheEntry(request, "jobProblem", json.dumps(data, cls=DateEncoder), 60 * CACHE_TIMEOUT)
patch_response_headers(response, cache_timeout=request.session['max_age_minutes'] * 60)
return response
|
{
"content_hash": "99232287f8655a2e33fd711062da5601",
"timestamp": "",
"source": "github",
"line_count": 199,
"max_line_length": 182,
"avg_line_length": 38,
"alnum_prop": 0.6174292515207617,
"repo_name": "PanDAWMS/panda-bigmon-core",
"id": "8da24dbe0db506e835bf0e3959e957b945ec4ec2",
"size": "7562",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "core/oi/jbviews.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "394242"
},
{
"name": "Dockerfile",
"bytes": "5386"
},
{
"name": "HTML",
"bytes": "1445043"
},
{
"name": "JavaScript",
"bytes": "6858568"
},
{
"name": "Python",
"bytes": "1943651"
},
{
"name": "SCSS",
"bytes": "3129"
},
{
"name": "Shell",
"bytes": "1411"
}
],
"symlink_target": ""
}
|
"""@file sdr_snr_scorer.py
contains the scorer using SdrSnrScorer"""
# Edited by Pieter Appeltans (added snr score)
import scorer
import numpy as np
from nabu.postprocessing import data_reader
import bss_eval
class SdrSnrScorer(scorer.Scorer):
"""the SDR scorer class. Uses the script from
C. Raffel, B. McFee, E. J. Humphrey, J. Salamon, O. Nieto, D. Liang, and D. P. W. Ellis,
'mir_eval: A Transparent Implementation of Common MIR Metrics', Proceedings of the 15th
International Conference on Music Information Retrieval, 2014
a scorer using SDR
"""
score_metrics = ('SDR', 'SIR', 'SNR', 'SAR', 'perm')
score_metrics_to_summarize = ('SDR', 'SIR', 'SNR', 'SAR')
score_scenarios = ('SS', 'base')
score_expects = 'data'
def __init__(self, conf, evalconf, dataconf, rec_dir, numbatches, task, scorer_name, checkpoint_file):
"""Reconstructor constructor
Args:
conf: the scorer configuration as a dictionary
evalconf: the evaluator configuration as a ConfigParser
dataconf: the database configuration
rec_dir: the directory where the reconstructions are
numbatches: the number of batches to process
"""
super(SdrSnrScorer, self).__init__(conf, evalconf, dataconf, rec_dir, numbatches, task, scorer_name, checkpoint_file)
# get the original noise signal reader
noise_names = conf['noise'].split(' ')
noise_dataconfs = []
for noise_name in noise_names:
noise_dataconfs.append(dict(dataconf.items(noise_name)))
self.noise_reader = data_reader.DataReader(noise_dataconfs, self.segment_lengths)
def _get_score(self, org_src_signals, base_signals, rec_src_signals, noise_signal):
"""score the reconstructed utterances with respect to the original source signals
Args:
org_src_signals: the original source signals, as a list of numpy arrarys
base_signals: the duplicated base signal (original mixture), as a list of numpy arrarys
rec_src_signals: the reconstructed source signals, as a list of numpy arrarys
Returns:
the score"""
# convert to numpy arrays
org_src_signals = np.array(org_src_signals)[:, :, 0]
base_signals = np.array(base_signals)[:, :, 0]
rec_src_signals = np.array(rec_src_signals)
noise_signal = np.squeeze(noise_signal)
#
collect_outputs = dict()
collect_outputs[self.score_scenarios[1]] = bss_eval.bss_eval_sources_extended(org_src_signals, base_signals, noise_signal)
collect_outputs[self.score_scenarios[0]] = bss_eval.bss_eval_sources_extended(org_src_signals, rec_src_signals, noise_signal)
nr_spk = len(org_src_signals)
# convert the outputs to a single dictionary
score_dict = dict()
for i, metric in enumerate(self.score_metrics):
score_dict[metric] = dict()
for j, scen in enumerate(self.score_scenarios):
score_dict[metric][scen] = []
for spk in range(nr_spk):
score_dict[metric][scen].append(collect_outputs[scen][i][spk])
return score_dict
|
{
"content_hash": "5e9bdf3bdb41f78559ff00813f74dd8c",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 133,
"avg_line_length": 40.89873417721519,
"alnum_prop": 0.6453110492107706,
"repo_name": "JeroenZegers/Nabu-MSSS",
"id": "8058f2d281ed903d9ff7ed3666c92538878bc06f",
"size": "3231",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nabu/postprocessing/scorers/sdr_snr_scorer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "981104"
},
{
"name": "Shell",
"bytes": "4125"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.