code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
|---|---|---|---|---|---|
"""
API MAPPING FOR Zendesk API V2
"""
mapping_table = {
# Tickets
'list_all_tickets': {
'path': '/tickets.json',
'method': 'GET',
},
'show_ticket': {
'path': '/tickets/{{ticket_id}}.json',
'method': 'GET',
},
'create_ticket': {
'path': '/tickets.json',
'method': 'POST',
},
'update_ticket': {
'path': '/tickets/{{ticket_id}}.json',
'method': 'PUT',
},
'update_many_tickets': {
'path': '/tickets/update_many.json',
'valid_params': ['ids'],
'method': 'PUT',
},
'delete_ticket': {
'path': '/tickets/{{ticket_id}}.json',
'method': 'DELETE',
},
'delete_many_tickets': {
'path': 'tickets/destroy_many.json',
'valid_params': ['ids'],
'method': 'DELETE',
},
'list_ticket_collaborators': {
'path': '/tickets/{{ticket_id}}/collaborators.json',
'method': 'GET',
},
'list_ticket_incidents': {
'path': '/tickets/{{ticket_id}}/incidents.json',
'method': 'GET',
},
# Ticket Audits
'list_audits': {
'path': '/tickets/{{ticket_id}}/audits.json',
'method': 'GET',
},
'mark_audit_as_trusted': {
'path': '/tickets/{{ticket_id}}/audits/{{audid_id}}/trust.json',
'method': 'PUT',
},
# Incremental Tickets
'export_incremental_tickets': {
'path': '/exports/tickets.json',
'valid_params': ['start_time'],
'method': 'GET',
},
# Ticket Fields
'list_ticket_fields': {
'path': '/ticket_fields.json',
'method': 'GET',
},
'show_ticket_field': {
'path': '/ticket_fields/{{ticket_field_id}}.json',
'method': 'GET',
},
'create_ticket_field': {
'path': '/ticket_fields.json',
'method': 'POST',
},
'update_ticket_field': {
'path': '/ticket_fields/{{ticket_field_id}}.json',
'method': 'PUT',
},
'delete_ticket_field': {
'path': '/ticket_fields/{{ticket_field_id}}.json',
'method': 'DELETE',
},
# Views
'list_views': {
'path': '/views.json',
'method': 'GET',
},
'list_active_views': {
'path': '/views/active.json',
'method': 'GET',
},
'get_view': {
'path': '/views/{{view_id}}.json',
'method': 'GET',
},
'execute_view': {
'path': '/views/{{view_id}}/execute.json',
'method': 'GET',
},
'preview_view': {
'path': '/views/preview.json',
'method': 'POST',
},
'count_many_views': {
'path': '/views/count_many.json',
'valid_params': ('ids',),
'method': 'GET',
},
'count_view': {
'path': '/views/{{view_id}}/count.json',
'method': 'GET',
},
# Users
'list_users': {
'path': '/users.json',
'method': 'GET',
},
'list_users_in_group': {
'path': '/groups/{{group_id}}/users.json',
'method': 'GET',
},
'list_users_in_organization': {
'path': '/organization/{{organization_id}}/users.json',
'method': 'GET',
},
'show_user': {
'path': '/users/{{user_id}}.json',
'method': 'GET',
},
'create_user': {
'path': '/users.json',
'method': 'POST',
},
'create_many_users': {
'path': '/users/create_many.json',
'method': 'POST',
},
'update_user': {
'path': '/users/{{user_id}}.json',
'method': 'PUT',
},
'delete_user': {
'path': '/users/{{user_id}}.json',
'method': 'DELETE',
},
'search_user': {
'path': '/users/search.json',
'valid_params': ['query', 'external_id'],
'method': 'GET',
},
'show_me': {
'path': '/users/me.json',
'method': 'GET',
},
# Requests
'list_requests': {
'path': '/requests.json',
'method': 'GET',
},
'list_open_requests': {
'path': '/requests/open.json',
'method': 'GET',
},
'list_solved_requests': {
'path': '/requests/solved.json',
'method': 'GET',
},
'list_ccd_requests': {
'path': '/requests/ccd.json',
'method': 'GET',
},
'list_requests_for_user': {
'path': '/users/{{user_id}}/requests.json',
'method': 'GET',
},
'show_request': {
'path': '/requests/{{request_id}}.json',
'method': 'GET',
},
'create_request': {
'path': '/requests.json',
'method': 'POST',
},
'update_request': {
'path': '/requests/{{request_id}}.json',
'method': 'PUT',
},
'list_comments': {
'path': '/requests/{{request_id}}/comments.json',
'method': 'GET',
},
'show_comment': {
'path': '/requests/{{request_id}}/comments/{{comment_id}}.json',
'method': 'GET',
},
# User Identities
'list_user_identities': {
'path': '/users/{{user_id}}/identities.json',
'method': 'GET',
},
'show_user_identity': {
'path': '/users/{{user_id}}/identities/{{identity_id}}.json',
'method': 'GET',
},
'create_user_identity': {
'path': '/users/{{user_id}}/identities.json',
'method': 'POST',
},
'update_user_identity': {
'path': '/users/{{user_id}}/identities/{{identity_id}}.json',
'valid_params': ['identityverified'],
'method': 'PUT',
},
'make_user_identity_primary': {
'path': '/users/{{user_id}}/identities/{{identity_id}}/make_primary',
'method': 'PUT',
},
'verify_user_identity': {
'path': '/users/{{user_id}}/identities/{{identity_id}}/verify',
'method': 'PUT',
},
'request_user_identity_verification': {
'path': '/users/{{user_id}}/identities/{{identity_id}}/request_verification',
'method': 'PUT',
},
'delete_user_identity': {
'path': '/users/{{user_id}}/identities/{{identity_id}}.json',
'method': 'DELETE',
},
# Groups
'list_groups': {
'path': '/groups.json',
'method': 'GET',
},
'list_assignable_groups': {
'path': '/groups/assignable.json',
'method': 'GET',
},
'show_group': {
'path': '/groups/{{group_id}}.json',
'method': 'GET',
},
'create_group': {
'path': '/groups.json',
'method': 'POST',
},
'update_group': {
'path': '/groups/{{group_id}}.json',
'method': 'PUT',
},
'delete_group': {
'path': '/groups/{{group_id}}.json',
'method': 'DELETE',
},
# Group Memberships
'list_memberships': {
'path': '/group_memberships.json',
'method': 'GET',
},
'list_memberships_for_user': {
'path': '/users/{{user_id}}/group_memberships.json',
'method': 'GET',
},
'list_memberships_for_group': {
'path': '/groups/{{group_id}}/group_memberships.json',
'method': 'GET',
},
'list_assignable_memberships': {
'path': '/group_memberships/assignable.json',
'method': 'GET',
},
'list_assignable_memberships_for_group': {
'path': 'groups/{{groups_id}}/group_memberships/assignable.json',
'method': 'GET',
},
'show_membership': {
'path': '/group_memberships/{{group_membership_id}}.json',
'method': 'GET',
},
'show_membership_for_user': {
'path': '/users/{{user_id}}/group_memberships/{{group_membership_id}}.json',
'method': 'GET',
},
'create_membership': {
'path': '/group_memberships.json',
'method': 'POST',
},
'create_membership_for_user': {
'path': '/users/{{user_id}}/group_memberships.json',
'method': 'POST',
},
'delete_membership': {
'path': '/group_memberships/{{group_membership_id}}.json',
'method': 'DELETE',
},
'delete_membership_for_user': {
'path': '/users/{{user_id}}/group_memberships/{{group_membership_id}}.json',
'method': 'DELETE',
},
'set_default_membership_for_user': {
'path': '/users/{{user_id}}/group_memberships/{{group_membership_id}}/make_default.json',
'method': 'PUT',
},
# Custom Agent Rules
'list_custom_roles': {
'path': '/custom_roles.json',
'method': 'GET',
},
# Organizations
'list_organzations': {
'path': '/organizations.json',
'method': 'GET',
},
'autocomplete_organzations': {
'path': '/organizations/autocomplete.json',
'valid_params': ['name'],
'method': 'GET',
},
'show_organzation': {
'path': '/organizations/{{organization_id}}.json',
'method': 'GET',
},
'create_organzation': {
'path': '/organizations.json',
'method': 'POST',
},
'update_organzation': {
'path': '/organizations.json',
'method': 'PUT',
},
'delete_organzation': {
'path': '/organizations.json',
'method': 'DELETE',
},
# Search
'search': {
'path': '/search.json',
'valid_params': ['query'],
'method': 'GET',
},
'anonymous_search': {
'path': '/portal_search.json',
'valid_params': ['query'],
'method': 'GET',
},
# Tags
'list_tags': {
'path': '/tags.json',
'method': 'GET',
},
# Forums
'list_forums': {
'path': '/forums.json',
'method': 'GET',
},
'list_forums_catagory': {
'path': '/catagories/{{catagory_id}}/forums.json',
'method': 'GET',
},
'show_forum': {
'path': '/forums/{{forum_id}}.json',
'method': 'GET',
},
'create_forum': {
'path': '/forums.json',
'method': 'POST',
},
'update_forum': {
'path': '/forums/{{forum_id}}.json',
'method': 'PUT',
},
'delete_forum': {
'path': '/forums/{{forum_id}}.json',
'method': 'DELETE',
},
# Forum Subscriptions
'list_forum_subscriptions': {
'path': '/forum_subscriptions.json',
'method': 'GET',
},
'list_forum_subscriptions_for_forum': {
'path': '/forum/{{forum_id}}/subscriptions.json',
'method': 'GET',
},
'show_forum_subscription': {
'path': '/forum_subscriptions/{{forum_subscription_id}}.json',
'method': 'GET',
},
'create_forum_subscription': {
'path': '/forum_subscriptions.json',
'method': 'POST',
},
'delete_forum_subscription': {
'path': '/forum_subscriptions/{{forum_subscription_id}}.json',
'method': 'DELETE',
},
# Categories
'list_categories': {
'path': '/categories.json',
'method': 'GET',
},
'show_category': {
'path': '/category/{{category_id}}.json',
'method': 'GET',
},
'create_category': {
'path': '/categories.json',
'method': 'GET',
},
'update_category': {
'path': '/category/{{category_id}}.json',
'method': 'PUT',
},
'delete_category': {
'path': '/category/{{category_id}}.json',
'method': 'DELETE',
},
# Topics
'list_topics': {
'path': '/topics.json',
'method': 'GET',
},
'list_topics_for_forum': {
'path': '/forums/{{forum_id}}/topics.json',
'method': 'GET',
},
'list_topics_for_user': {
'path': '/users/{{user_id}}/topics.json',
'method': 'GET',
},
'show_topic': {
'path': '/topics/{{topic_id}}.json',
'method': 'GET',
},
'show_many_topics': {
'path': '/topics/show_many.json',
'valid_params': ['ids'],
'method': 'GET',
},
'create_topics': {
'path': '/topics.json',
'method': 'POST',
},
'update_topic': {
'path': '/topics/{{topic_id}}.json',
'method': 'PUT',
},
'delete_topic': {
'path': '/topics/{{topic_id}}.json',
'method': 'DELETE',
},
# Topic Comments
'list_topic_comments': {
'path': '/topics/{{topic_id}}/comments.json',
'method': 'GET',
},
'list_topic_comments_for_user': {
'path': '/users/{{user_id}}/top_comments.json',
'method': 'GET',
},
'show_topic_comment': {
'path': '/topics/{{topic_id}}/comments/{{comment_id}}.json',
'method': 'GET',
},
'show_topic_comment_for_user': {
'path': '/users/{{user_id}}/top_comments/{{comment_id}}.json',
'method': 'GET',
},
'create_topic_comments': {
'path': '/topics/{{topic_id}}/comments.json',
'method': 'POST',
},
'update_topic_comment': {
'path': '/topics/{{topic_id}}/comments/{{comment_id}}.json',
'method': 'PUT',
},
'delete_topic_comment': {
'path': '/topics/{{topic_id}}/comments/{{comment_id}}.json',
'method': 'DELETE',
},
# Topic Subscriptions
'list_topic_subscriptions': {
'path': '/topic_subscriptions.json',
'method': 'GET',
},
'list_subscriptions_for_topic': {
'path': '/topic/{{topic_id}}/subscriptions.json',
'method': 'GET',
},
'show_topic_subscription': {
'path': '/topic_subscriptions/{{topic_subscription_id}}.json',
'method': 'GET',
},
'create_topic_subscription': {
'path': '/topic_subscriptions.json',
'method': 'POST',
},
'delete_topic_subscription': {
'path': '/topic_subscriptions/{{topic_subscription_id}}.json',
'method': 'DELETE',
},
# Topic Votes
'list_topic_votes': {
'path': '/topics/{{topic_id}}/votes.json',
'method': 'GET',
},
'list_topic_votes_for_user': {
'path': '/user/{{user_id}}/topic_votes.json',
'method': 'GET',
},
'show_topic_vote': {
'path': '/topics/{{topic_id}}/vote.json',
'method': 'GET',
},
'create_topic_vote': {
'path': '/topics/{{topic_id}}/vote.json',
'method': 'POST',
},
'delete_topic_vote': {
'path': '/topics/{{topic_id}}/vote.json',
'method': 'DELETE',
},
# Account Settings
'show_account_settings': {
'path': '/account/settings.json',
'method': 'GET',
},
# Activity Stream
'list_activities': {
'path': '/activities.json',
'method': 'GET',
},
'show_activity': {
'path': '/activities/{{activity_id}}.json',
'method': 'GET',
},
# Attachments
'upload_attachment': {
'path': '/uploads.json',
'method': 'POST',
},
# Job Statuses
'show_job_status': {
'path': '/job_statuses/{{job_id}}.json',
'method': 'GET',
},
# Locales
'list_locales': {
'path': '/locales.json',
'method': 'GET',
},
'list_locales_for_agents': {
'path': '/locales/agent.json',
'method': 'GET',
},
'show_locale': {
'path': '/locales/{{locale_id}}.json',
'method': 'GET',
},
'show_current_locale': {
'path': '/locales/current.json',
'method': 'GET',
},
# Macros
'list_macros': {
'path': '/macros.json',
'method': 'GET',
},
'list_active_macros': {
'path': '/macros/active.json',
'method': 'GET',
},
'show_macro': {
'path': '/macros/{{macro_id}}.json',
'method': 'GET',
},
'apply_macro': {
'path': '/macros/{{macro_id}}/apply.json',
'method': 'GET',
},
'apply_macro_for_ticket': {
'path': '/tickets/{{ticket_id}}/macros/{{macro_id}}/apply.json',
'method': 'GET',
},
# List Satisfaction Ratings
'list_satisfaction_ratings': {
'path': '/satisfaction_ratings.json',
'method': 'GET',
},
'list_received_satisfaction_ratings': {
'path': '/satisfaction_ratings/received.json',
'method': 'GET',
},
'show_satisfaction_rating': {
'path': '/satisfaction_ratings/{{satisfaction_rating_id}}.json',
'method': 'GET',
},
# Suspended Tickets
'list_suspended_tickets': {
'path': '/suspended_tickets.json',
'method': 'GET',
},
'show_suspended_ticket': {
'path': '/suspended_tickets/{{ticket_id}}.json',
'method': 'GET',
},
'recover_suspended_ticket': {
'path': '/suspended_tickets/{{ticket_id}}/recover.json',
'method': 'PUT',
},
'recover_many_suspended_tickets': {
'path': '/suspended_tickets/recover_many.json',
'valid_params' : ['ids'],
'method': 'PUT',
},
'delete_suspended_ticket': {
'path': '/suspended_tickets/{{ticket_id}}.json',
'method': 'DELETE',
},
'delete_many_suspended_tickets': {
'path': '/suspended_tickets/destroy_many.json',
'valid_params' : ['ids'],
'method': 'DELETE',
},
}
# Patch mapping table with correct HTTP Status expected
for method, api_map in mapping_table.iteritems():
status = 200
if method.startswith('create_'):
status = 201
api_map['status'] = status
|
GbalsaC/bitnamiP
|
zendesk/zendesk/endpoints_v2.py
|
Python
|
agpl-3.0
| 17,249
|
# coding: utf-8
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import os
import sys
import pip
from .. import click, sync
from ..exceptions import PipToolsError
from ..logging import log
from ..utils import assert_compatible_pip_version, flat_map
# Make sure we're using a compatible version of pip
assert_compatible_pip_version()
DEFAULT_REQUIREMENTS_FILE = 'requirements.txt'
@click.command()
@click.version_option()
@click.option('-n', '--dry-run', is_flag=True, help="Only show what would happen, don't change anything")
@click.option('--force', is_flag=True, help="Proceed even if conflicts are found")
@click.option('-f', '--find-links', multiple=True, help="Look for archives in this directory or on this HTML page", envvar='PIP_FIND_LINKS') # noqa
@click.option('-i', '--index-url', help="Change index URL (defaults to PyPI)", envvar='PIP_INDEX_URL')
@click.option('--extra-index-url', multiple=True, help="Add additional index URL to search", envvar='PIP_EXTRA_INDEX_URL') # noqa
@click.option('--no-index', is_flag=True, help="Ignore package index (only looking at --find-links URLs instead)")
@click.option('-q', '--quiet', default=False, is_flag=True, help="Give less output")
@click.argument('src_files', required=False, type=click.Path(exists=True), nargs=-1)
def cli(dry_run, force, find_links, index_url, extra_index_url, no_index, quiet, src_files):
"""Synchronize virtual environment with requirements.txt."""
if not src_files:
if os.path.exists(DEFAULT_REQUIREMENTS_FILE):
src_files = (DEFAULT_REQUIREMENTS_FILE,)
else:
msg = 'No requirement files given and no {} found in the current directory'
log.error(msg.format(DEFAULT_REQUIREMENTS_FILE))
sys.exit(2)
if any(src_file.endswith('.in') for src_file in src_files):
msg = ('Some input files have the .in extension, which is most likely an error and can '
'cause weird behaviour. You probably meant to use the corresponding *.txt file?')
if force:
log.warning('WARNING: ' + msg)
else:
log.error('ERROR: ' + msg)
sys.exit(2)
requirements = flat_map(lambda src: pip.req.parse_requirements(src, session=True),
src_files)
try:
requirements = sync.merge(requirements, ignore_conflicts=force)
except PipToolsError as e:
log.error(str(e))
sys.exit(2)
installed_dists = pip.get_installed_distributions(skip=[])
to_install, to_uninstall = sync.diff(requirements, installed_dists)
install_flags = []
for link in find_links or []:
install_flags.extend(['-f', link])
if no_index:
install_flags.append('--no-index')
if index_url:
install_flags.extend(['-i', index_url])
if extra_index_url:
for extra_index in extra_index_url:
install_flags.extend(['--extra-index-url', extra_index])
sys.exit(sync.sync(to_install, to_uninstall, verbose=(not quiet), dry_run=dry_run,
install_flags=install_flags))
|
nateprewitt/pipenv
|
pipenv/patched/piptools/scripts/sync.py
|
Python
|
mit
| 3,144
|
import wx
import urllib
import urllib2
import os
class customColorDialog(wx.Dialog):
def DialogSetup(self):
self.rows = []
yPos = 25
while yPos <= 155:
temp = wx.TextCtrl(self, -1, "", size = (60, 20), pos=(20, yPos),
style = wx.TE_PROCESS_ENTER)
button = wx.Button(self, -1, label = '', pos=(90,yPos),
size = (60, 20), style = wx.BORDER_RAISED)
button.Bind(wx.EVT_BUTTON, self.selectColour)
self.rows.append([temp, button])
yPos += 30
def __init__(self, parent):
wx.Dialog.__init__(self, parent, title = 'Custom Color Dialog',
size = (170, 200))
self.SetBackgroundColour('LIGHT GRAY')
self.DialogSetup()
bgText = wx.StaticText(self, -1, "Symbols", style=wx.NO_BORDER,
pos = (30, 5), size = (80,-1))
fgText = wx.StaticText(self, -1, "Color", style=wx.NO_BORDER,
pos = (105,5), size = (80,-1))
self.Bind(wx.EVT_CLOSE, self.doExit)
def selectColour(self, event):
# Display the colour dialog and allow user selection"""
dlg = wx.ColourDialog(self)
dlg.GetColourData().SetChooseFull(True)
if dlg.ShowModal() == wx.ID_OK:
data = dlg.GetColourData()
rgb = data.GetColour().Get()
evtObj = event.GetEventObject()
eoId = evtObj.SetBackgroundColour(rgb)
self.Refresh()
dlg.Destroy()
def doExit(self, event):
self.Destroy()
def GetReturn(self):
ret = {}
for i,r in enumerate(self.rows):
ret['symbols' + str(i)] = str(r[0].GetValue())
rgb = r[1].GetBackgroundColour()
s = 'rgb('
for c in rgb:
s += str(c) + ','
s = s[:-1] + ')'
if s == 'rgb(212,208,200)':
s = ''
ret['color' + str(i)] = s
return ret
class Plugin():
def Clear(self):
if hasattr(self, 'coverPanel'):
self.coverPanel.Show(False)
self.frameBox.Show(False)
self.createButton.Show(False)
def SaveImg(self, event):
fname = 'WebLogo'
#curDir = os.getcwd()
fileName = wx.FileSelector("Save File As", "Saving",
default_filename = fname,
default_extension = "png",
wildcard = "*.png",
flags = wx.SAVE | wx.OVERWRITE_PROMPT)
if not fileName == "":
self.wl.SaveFile(fileName, wx.BITMAP_TYPE_PNG)
#os.chdir(curDir)
def CoverInit(self):
self.coverPanel = wx.Panel(self.bigPanel, -1, pos = (0, 0),
size = (self.bPSize[0] - 277,
self.bPSize[1] - 8))
self.coverPanel.Show(True)
fs = self.frame.GetSize()
self.frameBox = wx.ScrolledWindow(self.frame, -1, pos = (2, 80),
size = (fs[0] - 10, fs[1] - 120),
style = wx.VSCROLL|wx.BORDER_SUNKEN)
self.frameBox.SetBackgroundColour('WHITE')
self.frameBox.Show(True)
def FrameBoxFill(self):
yPos = 5
for opt in self.options:
if '\n' in opt[1]:
yp = yPos - 3
else:
yp = yPos + 3
dummy = wx.StaticText(self.frameBox, -1, opt[1], pos=(3,yp))
opt[2].SetSize((self.frameBox.GetSize()[0] - 80, -1))
osi = opt[2].GetSize()
if not osi[1] == 21:
opt[2].SetPosition((57, yPos + (21-osi[1])/2))
else:
opt[2].SetPosition((57, yPos))
opt[2].SetValue(opt[3])
yPos += 30
def OptionsInit(self):
ta = ['alphabet_auto','alphabet_protein','alphabet_dna','alphabet_rna']
tb = ['probability','bits','nats','kT','kJ/mol','kcal/mol']
tc = ['comp_none','comp_auto','comp_equiprobable','comp_CG',
'comp_Celegans','comp_Dmelanogaster','comp_Ecoli',
'comp_Hsapiens','comp_Mmusculus','comp_Scerevisiae']
td = ['color_auto','color_monochrome','color_base_pairing',
'color_classic','color_hydrophobicity','color_chemistry',
'color_charge','color_custom']
self.options = [['stacks_per_line','Residues\nper line:',
wx.SpinCtrl(self.frameBox, -1),40],
['alphabet','Sequence\ntype:',
wx.ComboBox(self.frameBox,-1,choices=ta,
style=wx.CB_READONLY),ta[0]],
['unit_name','Units:',
wx.ComboBox(self.frameBox,-1,choices=tb,
style=wx.CB_READONLY),tb[1]],
['logo_start','Start res.:',
wx.TextCtrl(self.frameBox, -1, "",),'1'],
['logo_end','Final res.:',
wx.TextCtrl(self.frameBox, -1, "",),
str(len(self.rec[0].seq))],
['composition','Comp.:',
wx.ComboBox(self.frameBox,-1,choices=tc,
style=wx.CB_READONLY),tc[1]],
['show_errorbars','Error\nbars?',
wx.CheckBox(self.frameBox, -1, label=""),True],
['logo_title','Title:',
wx.TextCtrl(self.frameBox, -1, ""),''],
['logo_label','Label:',
wx.TextCtrl(self.frameBox, -1, ""),''],
['show_xaxis','X-axis?',
wx.CheckBox(self.frameBox, -1, label=""),True],
['show_yaxis','Y-axis?',
wx.CheckBox(self.frameBox, -1, label=""),True],
['show_ends','Seq.\nends?',
wx.CheckBox(self.frameBox, -1, label=""),True],
['show_fineprint','Fine\nprint?',
wx.CheckBox(self.frameBox, -1, label=""),True],
['color_scheme','Coloring:',
wx.ComboBox(self.frameBox,-1,choices=td,
style=wx.CB_READONLY),td[0]]]
self.FrameBoxFill()
def MessageDia(self, string):
dialog = wx.MessageDialog(self.frame, string, 'Error', style=wx.OK)
dialog.ShowModal()
dialog.Destroy()
def SanitizeChecks(self):
if not self.para['show_errorbars']:
del self.para['show_errorbars']
if not self.para['show_ends']:
del self.para['show_ends']
if not self.para['show_fineprint']:
del self.para['show_fineprint']
if not self.para['show_xaxis']:
del self.para['show_xaxis']
else:
dialog = wx.TextEntryDialog(self.frame,
"Enter X-axis label.",
"X-axis label", "",
style=wx.OK|wx.CANCEL)
if dialog.ShowModal() == wx.ID_OK:
dV = dialog.GetValue()
else:
dV = ''
self.para['xaxis_label'] = str(dV)
dialog.Destroy()
if not self.para['show_yaxis']:
del self.para['show_yaxis']
else:
dialog = wx.TextEntryDialog(self.frame,
"Enter Y-axis label.",
"Y-axis label", "",
style=wx.OK|wx.CANCEL)
if dialog.ShowModal() == wx.ID_OK:
dV = dialog.GetValue()
else:
dV = ''
self.para['yaxis_label'] = str(dV)
dialog.Destroy()
def SanitizeNucleo(self):
i = 0
lines = self.para['sequences']
while i < len(lines) and lines[i] in 'ATGCU -\n':
i += 1
if i == len(lines) and self.para['composition'] == 'comp_CG':
dialog = wx.TextEntryDialog(self.frame,
"Enter expected CG content.",
"CG Content", "",
style=wx.OK|wx.CANCEL)
if dialog.ShowModal() == wx.ID_OK:
dV = dialog.GetValue()
if dV.isdigit() and int(dV) < 100:
self.para['percentCG'] = str(dV)
else:
self.para['composition'] = 'comp_auto'
s = 'Value entered was incorrect.\n'
s += 'Automatic composition used instead.'
self.MessageDia(s)
else:
self.para['composition'] = 'comp_auto'
s = 'User cancelled before entering a valid composition.\n'
s += 'Automatic composition used instead.'
self.MessageDia(s)
dialog.Destroy()
else:
self.para['composition'] = 'comp_auto'
s = 'The supplied sequences contained characters\n'
s += 'not in the set of standard nucleotides, [A,T,G,C,U].\n'
s += 'Automatic composition was used instead.'
self.MessageDia(s)
def ShowImage(self, event):
if hasattr(self, 'display'):
self.display.Show(False)
u = 'http://weblogo.threeplusone.com/create.cgi'
lines = ''
for r in self.rec:
lines += r.seq + '\n'
self.para = {}
self.para['sequences']=lines
for opt in self.options:
self.para[opt[0]] = opt[2].GetValue()
self.SanitizeChecks()
if not self.para['composition'] in ['comp_none','comp_auto']:
self.SanitizeNucleo()
if self.para['color_scheme'] == 'color_custom':
cCD = customColorDialog(self.frame)
cCD.ShowModal()
cols = cCD.GetReturn()
for k in cols.keys():
self.para[k] = cols[k]
params = urllib.urlencode(self.para)
imgpage = urllib.urlopen(u, params)
iFile = './Plugins/weblogo.png'
img = open(iFile,'wb')
img.write(imgpage.read())
img.close()
self.wl = wx.Image(iFile, wx.BITMAP_TYPE_ANY)
self.wl = self.wl.Rescale(self.bPSize[0] - 267,self.bPSize[1]-5)
self.display = wx.StaticBitmap(self.coverPanel, -1, pos = (0, 0),
bitmap = self.wl.ConvertToBitmap(),
size=(self.bPSize[0]-267,self.bPSize[1]-5))
self.display.Bind(wx.EVT_RIGHT_DOWN, self.SaveImg)
def GetExec(self, fr, bp, rec, cL):
self.frame = fr
self.bigPanel = bp
self.bPSize = bp.GetSize()
self.colorList = cL
self.rec = rec
self.CoverInit()
self.OptionsInit()
self.createButton = wx.Button(self.frame, -1, "CREATE",
pos = (5,self.frame.GetSize()[1] - 35),
size = (self.frame.GetSize()[0] - 10,25))
self.frame.Bind(wx.EVT_BUTTON, self.ShowImage, self.createButton)
self.frameBox.SetScrollbars(0, 1, 0, len(self.options)*30+13)
self.frameBox.SetScrollRate(15, 35)
def GetName():
return "WebLogo"
|
fxb22/BioGUI
|
plugins/Views/AlignViewPlugins/WebLogo.py
|
Python
|
gpl-2.0
| 11,645
|
from kivy.app import App
from kivy.uix.widget import Widget
from kivy.uix.button import Button
from kivy.uix.boxlayout import BoxLayout
from kivy.properties import ListProperty
class RootWidget(BoxLayout):
def __init__(self, **kwargs):
super(RootWidget, self).__init__(**kwargs)
self.add_widget(Label(text='Pressure'))
def update(self):
for child in self.children:
child.update()
class CustomBtn(Widget):
pressed = ListProperty([0, 0])
def on_touch_down(self, touch):
if self.collide_point(*touch.pos):
self.pressed = touch.pos
# we consumed the touch. return False here to propagate
# the touch further to the children.
return True
return super(CustomBtn, self).on_touch_down(touch)
def on_pressed(self, instance, pos):
print ('pressed at {pos}'.format(pos=pos))
Clock.schedule_interval(update, 1 / 60.)
class LabelValue(Widget):
text = StringProperty(None)
value = NumericProperty(None)
def update(self):
self.text =
self.value = variables[]
def on_touch_down(self, touch):
if self.collide_point(*touch.pos):
self.pressed = touch.pos
# we consumed the touch. return False here to propagate
# the touch further to the children.
return True
return super(LebeledValue, self).on_touch_down(touch)
def on_pressed(self, instance, pos):
print ('pressed at {pos}'.format(pos=pos))
class MyApp(App):
def build(self):
rw = RootWidget()
Clock.schedule_interval(rw.update, 1.0 / 60.0)
return rw
if __name__ == '__main__':
TestApp().run()
|
victor-rene/MicroScada
|
archive/day_01/main.py
|
Python
|
mit
| 1,614
|
# pylint: disable=unused-wildcard-import, wildcard-import
from .base import *
DEBUG = True
TEMPLATE_DEBUG = True
STATIC_ROOT = 'static'
|
SgtHotshot/forge-cortex
|
cortex/settings/dev.py
|
Python
|
mit
| 149
|
from __future__ import print_function, division
from contextlib import closing
import gzip
import optparse
import os
import shutil
import struct
import tempfile
import sys
import subprocess
import itertools
from concurrent.futures import ThreadPoolExecutor, as_completed
from colorama import init, Fore, Style
import guessit
init(autoreset=True)
__version__ = '1.5.2'
if sys.version_info[0] == 3: # pragma: no cover
from urllib.request import urlopen
from xmlrpc.client import ServerProxy
from configparser import RawConfigParser
else: # pragma: no cover
from urllib import urlopen
from xmlrpclib import Server as ServerProxy
from ConfigParser import RawConfigParser
def obtain_guessit_query(movie_filename, language):
guess = guessit.guessit(os.path.basename(movie_filename))
def extract_query(guess, parts):
result = ['"%s"' % guess.get(k) for k in parts if guess.get(k)]
return ' '.join(result)
result = {}
if guess.get('type') == 'episode':
result['query'] = extract_query(guess, ['title', 'episode_title', 'release_group'])
if 'season' in guess:
result['season'] = guess['season']
if 'episode' in guess:
result['episode'] = guess['episode']
elif guess.get('type') == 'movie':
result['query'] = extract_query(guess, ['title', 'year'])
else: # pragma: no cover
assert False, 'internal error: guessit guess: {0}'.format(guess)
result['sublanguageid'] = language
return result
def obtain_movie_hash_query(movie_filename, language):
return {
'moviehash': calculate_hash_for_file(movie_filename),
'moviebytesize': str(os.path.getsize(movie_filename)),
'sublanguageid': language,
}
def filter_bad_results(search_results, guessit_query):
"""
filter out search results with bad season and episode number (if
applicable); sometimes OpenSubtitles will report search results subtitles
that belong to a different episode or season from a tv show; no reason
why, but it seems to work well just filtering those out
"""
if 'season' in guessit_query and 'episode' in guessit_query:
guessit_season_episode = (guessit_query['season'], guessit_query['episode'])
search_results = [x for x in search_results
if (int(x['SeriesSeason']), int(x['SeriesEpisode'])) == guessit_season_episode]
return search_results
def query_open_subtitles(movie_filename, language):
uri = 'http://api.opensubtitles.org/xml-rpc'
server = ServerProxy(uri, verbose=0, allow_none=True, use_datetime=True)
login_info = server.LogIn('', '', 'en', 'ss v' + __version__)
token = login_info['token']
try:
guessit_query = obtain_guessit_query(movie_filename, language)
search_queries = [
guessit_query,
obtain_movie_hash_query(movie_filename, language),
]
response = server.SearchSubtitles(token, search_queries)
try:
search_results = response['data']
except KeyError: # noqa
raise KeyError('"data" key not found in response: %r' % response)
if search_results:
search_results = filter_bad_results(search_results, guessit_query)
return search_results
finally:
server.LogOut(token)
def find_subtitle(movie_filename, language):
search_results = query_open_subtitles(movie_filename, language)
if search_results:
search_result = search_results[0]
return search_result['SubDownloadLink'], '.' + search_result['SubFormat']
else:
return None, None
def obtain_subtitle_filename(movie_filename, language, subtitle_ext, multi):
# possibilities where we don't override
if multi:
new_ext = '.' + language + subtitle_ext
else:
new_ext = subtitle_ext
return os.path.splitext(movie_filename)[0] + new_ext
def download_subtitle(subtitle_url, subtitle_filename):
# first download it and save to a temp dir
with closing(urlopen(subtitle_url)) as urlfile:
gzip_subtitle_contents = urlfile.read()
tempdir = tempfile.mkdtemp()
try:
basename = subtitle_url.split('/')[-1]
tempfilename = os.path.join(tempdir, basename)
with open(tempfilename, 'wb') as f:
f.write(gzip_subtitle_contents)
with closing(gzip.GzipFile(tempfilename, 'rb')) as f:
subtitle_contents = f.read()
# copy it over the new filename
with open(subtitle_filename, 'wb') as f:
f.write(subtitle_contents)
finally:
shutil.rmtree(tempdir)
def find_movie_files(input_names, recursive=False):
extensions = set(['.avi', '.mp4', '.mpg', '.mkv'])
returned = set()
for input_name in input_names:
if os.path.isfile(input_name) and input_name not in returned:
yield input_name
returned.add(input_name)
else:
names = os.listdir(input_name)
for name in names:
result = os.path.join(input_name, name)
if name[-4:] in extensions:
if result not in returned:
yield result
returned.add(result)
elif os.path.isdir(result) and recursive:
for x in find_movie_files([result], recursive):
yield x
def has_subtitle(filename, language, multi):
# list of subtitle formats obtained from opensubtitles' advanced search page.
formats = ['.sub', '.srt', '.ssa', '.smi', '.mpl']
for ext in formats:
subtitle_filename = obtain_subtitle_filename(filename, language, ext,
multi)
if os.path.isfile(subtitle_filename):
return True
return False
def search_and_download(movie_filename, language, multi):
subtitle_url, subtitle_ext = find_subtitle(movie_filename, language=language)
if subtitle_url:
subtitle_filename = obtain_subtitle_filename(movie_filename,
language,
subtitle_ext,
multi=multi)
download_subtitle(subtitle_url, subtitle_filename)
return subtitle_filename
else:
return None
def load_configuration(filename):
p = RawConfigParser()
p.add_section('ss')
p.read(filename)
def read_if_defined(option, getter):
if p.has_option('ss', option):
value = getattr(p, getter)('ss', option)
setattr(config, option, value)
config = Configuration()
read_if_defined('recursive', 'getboolean')
read_if_defined('skip', 'getboolean')
read_if_defined('mkv', 'getboolean')
read_if_defined('parallel_jobs', 'getint')
if p.has_option('ss', 'languages'):
value = p.get('ss', 'languages')
config.languages = [x.strip() for x in value.split(',')]
return config
def calculate_hash_for_file(name):
'''
Calculates the hash for the given filename.
Algorithm from: http://trac.opensubtitles.org/projects/opensubtitles/wiki/HashSourceCodes
@param name: str
Path to the file
@return: str
The calculated hash code, as an hex string.
'''
longlongformat = 'q' # long long
bytesize = struct.calcsize(longlongformat)
f = open(name, "rb")
filesize = os.path.getsize(name)
hash = filesize
minimum_size = 65536 * 2
assert filesize >= minimum_size, \
'Movie {name} must have at least {min} bytes'.format(min=minimum_size,
name=name)
for x in range(65536//bytesize):
buffer = f.read(bytesize)
(l_value,)= struct.unpack(longlongformat, buffer)
hash += l_value
hash = hash & 0xFFFFFFFFFFFFFFFF #to remain as 64bit number
f.seek(max(0,filesize-65536),0)
for x in range(65536//bytesize):
buffer = f.read(bytesize)
(l_value,)= struct.unpack(longlongformat, buffer)
hash += l_value
hash = hash & 0xFFFFFFFFFFFFFFFF
f.close()
returnedhash = "%016x" % hash
return returnedhash
class Configuration(object):
attrs = 'languages recursive skip mkv parallel_jobs'.split()
def __init__(self, languages=('eng',), recursive=False, skip=False,
mkv=False, parallel_jobs=8):
self.languages = list(languages)
self.recursive = recursive
self.skip = skip
self.mkv = mkv
self.parallel_jobs = parallel_jobs
def __eq__(self, other):
for attr in self.attrs:
if getattr(self, attr) != getattr(other, attr):
return False
return True
def __ne__(self, other): # pragma: no cover
return not self == other
def __repr__(self): # pragma: no cover
pairs = ['{attr}={value}'.format(attr=attr, value=getattr(self, attr))
for attr in self.attrs]
return 'Configuration({0})'.format(', '.join(pairs))
def __str__(self):
values = [
'languages = %s' % ', '.join(self.languages),
'recursive = %s' % self.recursive,
'skip = %s' % self.skip,
'mkv = %s' % self.mkv,
'parallel_jobs = %d' % self.parallel_jobs,
]
return '\n'.join(values)
def main(argv=sys.argv, stream=sys.stdout):
parser = optparse.OptionParser(
usage='Usage: ss [options] <file or dir> <file or dir>...',
description='Searches for subtitles using OpenSubtitles (http://www.opensubtitles.org).\n\nVersion: %s' % __version__,
epilog='If a directory is given, search for subtitles for all movies on it (non-recursively).',
)
parser.add_option('-v', '--verbose',
help='always displays configuration and enable verbose mode.',
action='store_true', default=False)
options, args = parser.parse_args(args=argv)
config_filename = os.path.join(os.path.expanduser('~'), '.ss.ini')
config = load_configuration(config_filename)
if options.verbose:
print('Configuration read from {0}'.format(config_filename))
print(config, file=stream)
print()
if len(args) < 2:
parser.print_help(file=stream)
return 2
input_filenames = list(find_movie_files(args[1:], recursive=config.recursive))
if not input_filenames:
print('No files to search subtitles for. Aborting.', file=stream)
return 1
if config.mkv:
if not check_mkv_installed():
print('mkvmerge not found in PATH.', file=stream)
print('Either install mkvtoolnix or disable mkv merging ' +
'in your config.', file=stream)
return 4
header = Fore.WHITE + Style.BRIGHT
lang_style = Fore.CYAN + Style.BRIGHT
languages = ', '.join(
lang_style + x + Style.RESET_ALL for x in config.languages)
msg = '{header}Languages: {languages}'.format(header=header,
languages=languages)
print(msg, file=stream)
print(file=stream)
multi = len(config.languages) > 1
to_skip = set()
if config.skip:
for input_filename in input_filenames:
for language in config.languages:
if has_subtitle(input_filename, language, multi):
to_skip.add((input_filename, language))
if to_skip:
print('Skipping %d subtitles.' % len(to_skip), file=stream)
def print_status(text, status):
spaces = max(70 - len(text), 2)
print('{text}{spaces}{status}'.format(
text=text, spaces=' ' * spaces, status=status), file=stream)
to_query = set(itertools.product(input_filenames, config.languages))
to_query.difference_update(to_skip)
if not to_query:
return 0
header_style = Fore.WHITE + Style.BRIGHT
print(header_style + 'Downloading', file=stream)
print(file=stream)
matches = []
to_query = sorted(to_query)
with ThreadPoolExecutor(max_workers=config.parallel_jobs) as executor:
future_to_movie_and_language = {}
for movie_filename, language in to_query:
f = executor.submit(search_and_download, movie_filename,
language=language, multi=multi)
future_to_movie_and_language[f] = (movie_filename, language)
for future in as_completed(future_to_movie_and_language):
movie_filename, language = future_to_movie_and_language[future]
exception = future.exception()
if exception is None:
subtitle_filename = future.result()
if subtitle_filename:
status = Fore.GREEN + '[OK]'
matches.append((movie_filename, language, subtitle_filename))
else:
status = Fore.RED + '[Not found]'
else:
status = Fore.RED + '[ERROR]: {}'.format(str(exception))
name = os.path.basename(movie_filename)
print_status(
name,
status='{lang_color}{lang} {status}'.format(
lang_color=Fore.CYAN + Style.BRIGHT,
lang=language,
status=status))
if config.mkv:
print(file=stream)
print(header_style + 'Embedding MKV', file=stream)
print(file=stream)
failures = [] # list of (movie_filename, output)
to_embed = {} # dict of movie -> (language, subtitle_filename)
for movie_filename, language, subtitle_filename in matches:
to_embed.setdefault(movie_filename, []).append((language,
subtitle_filename))
to_embed = sorted(to_embed.items())
with ThreadPoolExecutor(max_workers=config.parallel_jobs) as executor:
future_to_mkv_filename = {}
for movie_filename, subtitles in to_embed:
subtitles.sort()
movie_ext = os.path.splitext(movie_filename)[1].lower()
mkv_filename = os.path.splitext(movie_filename)[0] + u'.mkv'
if movie_ext != u'.mkv' and not os.path.isfile(mkv_filename):
f = executor.submit(embed_mkv, movie_filename, subtitles)
future_to_mkv_filename[f] = (mkv_filename, movie_filename)
else:
print_status(os.path.basename(mkv_filename),
Style.BRIGHT + Fore.YELLOW + '[skipped]')
for future in as_completed(future_to_mkv_filename):
mkv_filename, movie_filename = future_to_mkv_filename[future]
status, output = future.result()
if not status:
failures.append((movie_filename, output))
status = Fore.GREEN + '[OK]' if status else Fore.RED + '[ERROR]'
status = Style.BRIGHT + status
print_status(os.path.basename(mkv_filename), status)
if failures:
print('_' * 80, file=stream)
for movie_filename, output in failures:
print(':%s:' % movie_filename, file=stream)
print(output, file=stream)
return 0
def embed_mkv(movie_filename, subtitles):
output_filename = os.path.splitext(movie_filename)[0] + u'.mkv'
params = [
u'mkvmerge',
u'--output', output_filename,
movie_filename,
]
for language, subtitle_filename in sorted(subtitles):
iso_language = convert_language_code_to_iso639_2(language)
params.extend([
u'--language', u'0:{0}'.format(iso_language),
subtitle_filename,
])
try:
check_output(params)
except subprocess.CalledProcessError as e:
return False, e.output
else:
return True, ''
def convert_language_code_to_iso639_2(lang_code):
"""
Translate OpenSubtitle language code to its iso-639-2 equivalent.
OpenSubtitles seem to support some extensions to iso-639-2, for instance
"pob" means "brazilian portuguese".
See http://www.opensubtitles.org/addons/export_languages.php.
:param str lang_code: original language code from OpenSubtitles
:return: iso-639-2 compatible
"""
return {
'pob': 'por',
'pb': 'por',
}.get(lang_code, lang_code)
def check_mkv_installed():
"""
Returns True if mkvtoolinx seems to be installed.
"""
try:
check_output([u'mkvmerge', u'--version'])
except subprocess.CalledProcessError:
return False
else:
return True
def check_output(params):
"""
Python 2.6 support: subprocess.check_output from Python 2.7.
"""
popen = subprocess.Popen(params, shell=True, stderr=subprocess.STDOUT,
stdout=subprocess.PIPE)
output, _ = popen.communicate()
returncode = popen.poll()
if returncode != 0:
error = subprocess.CalledProcessError(returncode=returncode, cmd=params)
error.output = output
raise error
return output
if __name__ == '__main__':
sys.exit(main(sys.argv)) # pragma: no cover
|
nicoddemus/ss
|
ss.py
|
Python
|
gpl-3.0
| 17,390
|
# -*- coding: utf-8 -*-
#------------------------------------------------------------
# Movie Ultra 7K -
# Version 0.2.9 (18.07.2014)
#------------------------------------------------------------
# License: GPL (http://www.gnu.org/licenses/gpl-3.0.html)
# Gracias a la librería plugintools de Jesús (www.mimediacenter.info)
import os
import sys
import urllib
import urllib2
import re
import shutil
import zipfile
import time
import xbmc
import xbmcgui
import xbmcaddon
import xbmcplugin
import plugintools
import json
addonName = xbmcaddon.Addon().getAddonInfo("name")
addonVersion = xbmcaddon.Addon().getAddonInfo("version")
addonId = xbmcaddon.Addon().getAddonInfo("id")
addonPath = xbmcaddon.Addon().getAddonInfo("path")
# Función que guía el proceso de elaboración de la URL original
def freebroadcast(params):
plugintools.log("[movie.ultra.7k-0.3.0].freebroadcast "+repr(params))
url_user = {}
# Construimos diccionario...
url = params.get("url")
url_extracted = url.split(" ")
for entry in url_extracted:
if entry.startswith("rtmp"):
entry = entry.replace("rtmp=", "")
url_user["rtmp"]=entry
elif entry.startswith("playpath"):
entry = entry.replace("playpath=", "")
url_user["playpath"]=entry
elif entry.startswith("swfUrl"):
entry = entry.replace("swfUrl=", "")
url_user["swfurl"]=entry
elif entry.startswith("pageUrl"):
entry = entry.replace("pageUrl=", "")
url_user["pageurl"]=entry
elif entry.startswith("token"):
entry = entry.replace("token=", "")
url_user["token"]=entry
elif entry.startswith("referer"):
entry = entry.replace("referer=", "")
url_user["referer"]=entry
plugintools.log("URL_user dict= "+repr(url_user))
pageurl = url_user.get("pageurl")
# Controlamos ambos casos de URL: Único link (pageUrl) o link completo rtmp://...
if pageurl is None:
pageurl = url_user.get("url")
referer= url_user.get("referer")
if referer is None:
referer = 'http://www.juanin.tv'
# channel_id = re.compile('channel=([^&]*)').findall(pageurl)
# print channel_id
# channel_id = channel_id[0]
pageurl = 'http://freebroadcast.pw/embed/embed.php?n=' + url_user.get("playpath") + '&w=670&h=400'
url_user["pageurl"]=pageurl
print 'pageurl',pageurl
print 'referer',referer
body = gethttp_headers(pageurl, referer)
getparams_freebroadcast(url_user, body)
url = url_user.get("ip") + ' playpath=' + url_user.get("playpath") + ' swfUrl=http://freebroadcast.pw/player/player.swf pageUrl=' + url_user.get("pageurl") + ' live=1 timeout=10'
plugintools.play_resolved_url(url)
# Vamos a hacer una llamada al pageUrl
def gethttp_headers(pageurl, referer):
request_headers=[]
request_headers.append(["User-Agent","Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_3) AppleWebKit/537.31 (KHTML, like Gecko) Chrome/26.0.1410.65 Safari/537.31"])
# request_headers.append(["Referer",referer])
body,response_headers = plugintools.read_body_and_headers(pageurl, headers=request_headers)
plugintools.log("body= "+body)
return body
# Iniciamos protocolo de elaboración de la URL original
# Capturamos parámetros correctos
def getparams_freebroadcast(url_user, body):
plugintools.log("[movie.ultra.7k-0.3.0].getparams_freebroadcast " + repr(url_user) )
# Construimos el diccionario de 9stream
entry = plugintools.find_single_match(body, 'setStream(token) {(.*?)}')
ip = re.compile("streamer', \'(.*?)\'").findall(body)
url_user["ip"]=str(ip[0])
plugintools.log("IP= "+str(ip[0]))
# Vamos a capturar el playpath
def getfile_freebroadcast(url_user, decoded, body):
plugintools.log("movie.ultra.7k getfile_freebroadcast( "+repr(url_user))
referer = url_user.get("referer")
req = urllib2.Request(decoded)
req.add_header('User-Agent','Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/33.0.1750.154 Safari/537.36')
req.add_header('Referer', referer)
response = urllib2.urlopen(req)
print response
data = response.read()
print data
file = re.compile("file': '([^.]*)").findall(data)
print 'file',file
return file
# Vamos a capturar el fileserver.php (token del server)
def get_fileserver(decoded, url_user):
plugintools.log("movie.ultra.7k fileserver "+repr(url_user))
referer=url_user.get("pageurl")
req = urllib2.Request(decoded)
req.add_header('User-Agent','Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/33.0.1750.154 Safari/537.36')
req.add_header('Referer',referer)
response = urllib2.urlopen(req)
print response
data = response.read()
print data
token = re.compile('token":"(.*)"').findall(data)
print 'token',token
return token
|
corvorepack/REPOIVAN
|
plugin.video.movie.ultra.7k/resources/regex/freebroadcast.py
|
Python
|
gpl-2.0
| 5,087
|
from django.test import TestCase
from django.utils import timezone
from .. import utils
from .. import models as survey_models
from myvoice.core.tests import factories
from operator import itemgetter
class TestDisplayFeedback(TestCase):
def test_false(self):
bad_feedback = [None, '', ' ', ' 1', '1', 'yes', 'Yes', 'YES',
'no', 'No', 'NO', '55999', 'n0', 'start', 'Start']
for bad in bad_feedback:
self.assertEqual(utils.display_feedback(bad), False)
def test_true(self):
good_feedback = ['Yes this is good', 'Great feedback', '20']
for good in good_feedback:
self.assertEqual(utils.display_feedback(good), True)
class TestSurveyUtils(TestCase):
def setUp(self):
self.survey = factories.Survey.create()
self.question = factories.SurveyQuestion.create(survey=self.survey, label='Test')
self.responses = [factories.SurveyQuestionResponse.create(
response=ans, question=self.question)
for ans in ('Yes', 'Yes', 'Yes', 'No')]
self.answers = [r.response for r in self.responses]
def test_analyze(self):
"""Test it returns percentage of responses with a given answer."""
self.assertEqual(75, utils.analyze(self.answers, 'Yes'))
self.assertEqual(25, utils.analyze(self.answers, 'No'))
self.assertEqual(None, utils.analyze([], 'Yes'))
def test_get_mode(self):
"""Test that get_mode function finds the most common item."""
for i in range(3):
self.responses.append(factories.SurveyQuestionResponse.create(
response='No', question=self.question))
answers = [r.response for r in self.responses]
self.assertEqual('No', utils.get_mode(answers))
self.assertEqual(None, utils.get_mode([]))
def test_get_mode_acceptable_answers(self):
"""Test that get_mode respects acceptable answers."""
# So we have 4 'Yes' and only 3 'Maybe'
self.responses.append(factories.SurveyQuestionResponse.create(
response='Yes', question=self.question))
for i in range(3):
self.responses.append(factories.SurveyQuestionResponse.create(
response='Maybe', question=self.question))
answers = [r.response for r in self.responses]
self.assertEqual(
'Maybe', utils.get_mode(answers, acceptable_answers=['No', 'Maybe']))
def test_group_responses(self):
"""Test group_responses."""
question = factories.SurveyQuestion.create(survey=self.survey, label='Test1')
for i in range(3):
self.responses.append(factories.SurveyQuestionResponse.create(
response='Maybe', question=question))
grouped_responses = utils.group_responses(self.responses, 'question.label')
self.assertEqual(2, len(grouped_responses))
# Test the content of each group
grouped_dict = dict(grouped_responses)
self.assertEqual(4, len(grouped_dict['Test']))
self.assertEqual(3, len(grouped_dict['Test1']))
def test_group_responses_valuesqset(self):
"""Test group_responses with ValuesQueryset."""
question = factories.SurveyQuestion.create(survey=self.survey, label='Test1')
for i in range(3):
self.responses.append(factories.SurveyQuestionResponse.create(
response='Maybe', question=question))
responses = survey_models.SurveyQuestionResponse.objects.values(
'question__label', 'response')
grouped_responses = utils.group_responses(responses, 'question__label', keyfunc=itemgetter)
self.assertEqual(2, len(grouped_responses))
# Test the content of each group
grouped_dict = dict(grouped_responses)
self.assertEqual(4, len(grouped_dict['Test']))
self.assertEqual(3, len(grouped_dict['Test1']))
def test_convert_local_format(self):
"""Test conversion of phone number to local format."""
self.assertEqual('08111111111', utils.convert_to_local_format('08111111111'))
self.assertEqual('08111111111', utils.convert_to_local_format('+2348111111111'))
self.assertEqual('08111111111', utils.convert_to_local_format('2348111111111'))
self.assertEqual(None, utils.convert_to_local_format('234811111111122'))
def test_convert_international_format(self):
"""Test conversion of phone number to international format."""
self.assertEqual('+2348111111111', utils.convert_to_international_format('+2348111111111'))
self.assertEqual('+2348111111111', utils.convert_to_international_format('2348111111111'))
self.assertEqual('+2348111111111', utils.convert_to_international_format('08111111111'))
self.assertEqual(None, utils.convert_to_international_format('0811111'))
class TestFilterSQRQuery(TestCase):
def setUp(self):
self.survey = factories.Survey.create(role=survey_models.Survey.PATIENT_FEEDBACK)
self.clinic1 = factories.Clinic.create(name='Clinic1')
self.clinic2 = factories.Clinic.create(name='Clinic2')
self.service1 = factories.Service.create(name='Service1')
self.service2 = factories.Service.create(name='Service2')
dt1 = timezone.make_aware(timezone.datetime(2014, 7, 22), timezone.utc)
dt2 = timezone.make_aware(timezone.datetime(2014, 7, 25), timezone.utc)
visit1 = factories.Visit.create(
patient=factories.Patient.create(clinic=self.clinic1),
service=self.service1,
visit_time=dt1)
visit2 = factories.Visit.create(
patient=factories.Patient.create(clinic=self.clinic2),
service=self.service2,
visit_time=dt2)
visit3 = factories.Visit.create(
patient=factories.Patient.create(clinic=self.clinic1),
service=self.service2,
visit_time=dt2)
factories.SurveyQuestionResponse.create(
visit=visit1, question__survey=self.survey)
factories.SurveyQuestionResponse.create(
visit=visit2, question__survey=self.survey)
factories.SurveyQuestionResponse.create(
visit=visit3, question__survey=self.survey)
self.responses = survey_models.SurveyQuestionResponse.objects.all()
def test_filter_clinic(self):
responses = utils.filter_sqr_query(self.responses, clinic='clinic1')
self.assertEqual(2, responses.count())
self.assertEqual(self.clinic1, responses[0].clinic)
def test_filter_service(self):
responses = utils.filter_sqr_query(self.responses, service='service1')
self.assertEqual(1, responses.count())
self.assertEqual(self.service1, responses[0].service)
def test_filter_dates(self):
responses = utils.filter_sqr_query(
self.responses, start_date='2014-07-23', end_date='2014-07-30')
self.assertEqual(2, responses.count())
def test_filter_combination(self):
responses = utils.filter_sqr_query(
self.responses, clinic='clinic1', service='service2')
self.assertEqual(1, responses.count())
|
myvoice-nigeria/myvoice
|
myvoice/survey/tests/test_utils.py
|
Python
|
bsd-2-clause
| 7,181
|
age = dict(tom=23, jane=32, mike=27, linda=25)
|
codermoji-contrib/python
|
start/Intro to Dicts/printdict/printval1.py
|
Python
|
mit
| 47
|
import warnings
import numpy as np
import pandas as pd
from statsmodels.base import model
import statsmodels.base.wrapper as wrap
from statsmodels.tools.sm_exceptions import ConvergenceWarning
class _DimReductionRegression(model.Model):
"""
A base class for dimension reduction regression methods.
"""
def __init__(self, endog, exog, **kwargs):
super(_DimReductionRegression, self).__init__(endog, exog, **kwargs)
def _prep(self, n_slice):
# Sort the data by endog
ii = np.argsort(self.endog)
x = self.exog[ii, :]
# Whiten the data
x -= x.mean(0)
covx = np.dot(x.T, x) / x.shape[0]
covxr = np.linalg.cholesky(covx)
x = np.linalg.solve(covxr, x.T).T
self.wexog = x
self._covxr = covxr
# Split the data into slices
self._split_wexog = np.array_split(x, n_slice)
class SlicedInverseReg(_DimReductionRegression):
"""
Sliced Inverse Regression (SIR)
Parameters
----------
endog : array_like (1d)
The dependent variable
exog : array_like (2d)
The covariates
References
----------
KC Li (1991). Sliced inverse regression for dimension reduction.
JASA 86, 316-342.
"""
def fit(self, slice_n=20, **kwargs):
"""
Estimate the EDR space using Sliced Inverse Regression.
Parameters
----------
slice_n : int, optional
Target number of observations per slice
"""
# Sample size per slice
if len(kwargs) > 0:
msg = "SIR.fit does not take any extra keyword arguments"
warnings.warn(msg)
# Number of slices
n_slice = self.exog.shape[0] // slice_n
self._prep(n_slice)
mn = [z.mean(0) for z in self._split_wexog]
n = [z.shape[0] for z in self._split_wexog]
mn = np.asarray(mn)
n = np.asarray(n)
# Estimate Cov E[X | Y=y]
mnc = np.dot(mn.T, n[:, None] * mn) / n.sum()
a, b = np.linalg.eigh(mnc)
jj = np.argsort(-a)
a = a[jj]
b = b[:, jj]
params = np.linalg.solve(self._covxr.T, b)
results = DimReductionResults(self, params, eigs=a)
return DimReductionResultsWrapper(results)
def _regularized_objective(self, A):
# The objective function for regularized SIR
p = self.k_vars
covx = self._covx
mn = self._slice_means
ph = self._slice_props
v = 0
A = np.reshape(A, (p, self.ndim))
# The penalty
for k in range(self.ndim):
u = np.dot(self.pen_mat, A[:, k])
v += np.sum(u * u)
# The SIR objective function
covxa = np.dot(covx, A)
q, _ = np.linalg.qr(covxa)
qd = np.dot(q, np.dot(q.T, mn.T))
qu = mn.T - qd
v += np.dot(ph, (qu * qu).sum(0))
return v
def _regularized_grad(self, A):
# The gradient of the objective function for regularized SIR
p = self.k_vars
ndim = self.ndim
covx = self._covx
n_slice = self.n_slice
mn = self._slice_means
ph = self._slice_props
A = A.reshape((p, ndim))
# Penalty gradient
gr = 2 * np.dot(self.pen_mat.T, np.dot(self.pen_mat, A))
A = A.reshape((p, ndim))
covxa = np.dot(covx, A)
covx2a = np.dot(covx, covxa)
Q = np.dot(covxa.T, covxa)
Qi = np.linalg.inv(Q)
jm = np.zeros((p, ndim))
qcv = np.linalg.solve(Q, covxa.T)
ft = [None] * (p * ndim)
for q in range(p):
for r in range(ndim):
jm *= 0
jm[q, r] = 1
umat = np.dot(covx2a.T, jm)
umat += umat.T
umat = -np.dot(Qi, np.dot(umat, Qi))
fmat = np.dot(np.dot(covx, jm), qcv)
fmat += np.dot(covxa, np.dot(umat, covxa.T))
fmat += np.dot(covxa, np.linalg.solve(Q, np.dot(jm.T, covx)))
ft[q*ndim + r] = fmat
ch = np.linalg.solve(Q, np.dot(covxa.T, mn.T))
cu = mn - np.dot(covxa, ch).T
for i in range(n_slice):
u = cu[i, :]
v = mn[i, :]
for q in range(p):
for r in range(ndim):
f = np.dot(u, np.dot(ft[q*ndim + r], v))
gr[q, r] -= 2 * ph[i] * f
return gr.ravel()
def fit_regularized(self, ndim=1, pen_mat=None, slice_n=20, maxiter=100,
gtol=1e-3, **kwargs):
"""
Estimate the EDR space using regularized SIR.
Parameters
----------
ndim : int
The number of EDR directions to estimate
pen_mat : array_like
A 2d array such that the squared Frobenius norm of
`dot(pen_mat, dirs)`` is added to the objective function,
where `dirs` is an orthogonal array whose columns span
the estimated EDR space.
slice_n : int, optional
Target number of observations per slice
maxiter :int
The maximum number of iterations for estimating the EDR
space.
gtol : float
If the norm of the gradient of the objective function
falls below this value, the algorithm has converged.
Returns
-------
A results class instance.
Notes
-----
If each row of `exog` can be viewed as containing the values of a
function evaluated at equally-spaced locations, then setting the
rows of `pen_mat` to [[1, -2, 1, ...], [0, 1, -2, 1, ..], ...]
will give smooth EDR coefficients. This is a form of "functional
SIR" using the squared second derivative as a penalty.
References
----------
L. Ferre, A.F. Yao (2003). Functional sliced inverse regression
analysis. Statistics: a journal of theoretical and applied
statistics 37(6) 475-488.
"""
if len(kwargs) > 0:
msg = "SIR.fit_regularized does not take keyword arguments"
warnings.warn(msg)
if pen_mat is None:
raise ValueError("pen_mat is a required argument")
start_params = kwargs.get("start_params", None)
# Sample size per slice
slice_n = kwargs.get("slice_n", 20)
# Number of slices
n_slice = self.exog.shape[0] // slice_n
# Sort the data by endog
ii = np.argsort(self.endog)
x = self.exog[ii, :]
x -= x.mean(0)
covx = np.cov(x.T)
# Split the data into slices
split_exog = np.array_split(x, n_slice)
mn = [z.mean(0) for z in split_exog]
n = [z.shape[0] for z in split_exog]
mn = np.asarray(mn)
n = np.asarray(n)
self._slice_props = n / n.sum()
self.ndim = ndim
self.k_vars = covx.shape[0]
self.pen_mat = pen_mat
self._covx = covx
self.n_slice = n_slice
self._slice_means = mn
if start_params is None:
params = np.zeros((self.k_vars, ndim))
params[0:ndim, 0:ndim] = np.eye(ndim)
params = params
else:
if start_params.shape[1] != ndim:
msg = "Shape of start_params is not compatible with ndim"
raise ValueError(msg)
params = start_params
params, _, cnvrg = _grass_opt(params, self._regularized_objective,
self._regularized_grad, maxiter, gtol)
if not cnvrg:
g = self._regularized_grad(params.ravel())
gn = np.sqrt(np.dot(g, g))
msg = "SIR.fit_regularized did not converge, |g|=%f" % gn
warnings.warn(msg)
results = DimReductionResults(self, params, eigs=None)
return DimReductionResultsWrapper(results)
class PrincipalHessianDirections(_DimReductionRegression):
"""
Principal Hessian Directions (PHD)
Parameters
----------
endog : array_like (1d)
The dependent variable
exog : array_like (2d)
The covariates
Returns
-------
A model instance. Call `fit` to obtain a results instance,
from which the estimated parameters can be obtained.
References
----------
KC Li (1992). On Principal Hessian Directions for Data
Visualization and Dimension Reduction: Another application
of Stein's lemma. JASA 87:420.
"""
def fit(self, **kwargs):
"""
Estimate the EDR space using PHD.
Parameters
----------
resid : bool, optional
If True, use least squares regression to remove the
linear relationship between each covariate and the
response, before conducting PHD.
Returns
-------
A results instance which can be used to access the estimated
parameters.
"""
resid = kwargs.get("resid", False)
y = self.endog - self.endog.mean()
x = self.exog - self.exog.mean(0)
if resid:
from statsmodels.regression.linear_model import OLS
r = OLS(y, x).fit()
y = r.resid
cm = np.einsum('i,ij,ik->jk', y, x, x)
cm /= len(y)
cx = np.cov(x.T)
cb = np.linalg.solve(cx, cm)
a, b = np.linalg.eig(cb)
jj = np.argsort(-np.abs(a))
a = a[jj]
params = b[:, jj]
results = DimReductionResults(self, params, eigs=a)
return DimReductionResultsWrapper(results)
class SlicedAverageVarianceEstimation(_DimReductionRegression):
"""
Sliced Average Variance Estimation (SAVE)
Parameters
----------
endog : array_like (1d)
The dependent variable
exog : array_like (2d)
The covariates
bc : bool, optional
If True, use the bias-corrected CSAVE method of Li and Zhu.
References
----------
RD Cook. SAVE: A method for dimension reduction and graphics
in regression.
http://www.stat.umn.edu/RegGraph/RecentDev/save.pdf
Y Li, L-X Zhu (2007). Asymptotics for sliced average
variance estimation. The Annals of Statistics.
https://arxiv.org/pdf/0708.0462.pdf
"""
def __init__(self, endog, exog, **kwargs):
super(SAVE, self).__init__(endog, exog, **kwargs)
self.bc = False
if "bc" in kwargs and kwargs["bc"] is True:
self.bc = True
def fit(self, **kwargs):
"""
Estimate the EDR space.
Parameters
----------
slice_n : int
Number of observations per slice
"""
# Sample size per slice
slice_n = kwargs.get("slice_n", 50)
# Number of slices
n_slice = self.exog.shape[0] // slice_n
self._prep(n_slice)
cv = [np.cov(z.T) for z in self._split_wexog]
ns = [z.shape[0] for z in self._split_wexog]
p = self.wexog.shape[1]
if not self.bc:
# Cook's original approach
vm = 0
for w, cvx in zip(ns, cv):
icv = np.eye(p) - cvx
vm += w * np.dot(icv, icv)
vm /= len(cv)
else:
# The bias-corrected approach of Li and Zhu
# \Lambda_n in Li, Zhu
av = 0
for c in cv:
av += np.dot(c, c)
av /= len(cv)
# V_n in Li, Zhu
vn = 0
for x in self._split_wexog:
r = x - x.mean(0)
for i in range(r.shape[0]):
u = r[i, :]
m = np.outer(u, u)
vn += np.dot(m, m)
vn /= self.exog.shape[0]
c = np.mean(ns)
k1 = c * (c - 1) / ((c - 1)**2 + 1)
k2 = (c - 1) / ((c - 1)**2 + 1)
av2 = k1 * av - k2 * vn
vm = np.eye(p) - 2 * sum(cv) / len(cv) + av2
a, b = np.linalg.eigh(vm)
jj = np.argsort(-a)
a = a[jj]
b = b[:, jj]
params = np.linalg.solve(self._covxr.T, b)
results = DimReductionResults(self, params, eigs=a)
return DimReductionResultsWrapper(results)
class DimReductionResults(model.Results):
"""
Results class for a dimension reduction regression.
Notes
-----
The `params` attribute is a matrix whose columns span
the effective dimension reduction (EDR) space. Some
methods produce a corresponding set of eigenvalues
(`eigs`) that indicate how much information is contained
in each basis direction.
"""
def __init__(self, model, params, eigs):
super(DimReductionResults, self).__init__(
model, params)
self.eigs = eigs
class DimReductionResultsWrapper(wrap.ResultsWrapper):
_attrs = {
'params': 'columns',
}
_wrap_attrs = _attrs
wrap.populate_wrapper(DimReductionResultsWrapper, # noqa:E305
DimReductionResults)
def _grass_opt(params, fun, grad, maxiter, gtol):
"""
Minimize a function on a Grassmann manifold.
Parameters
----------
params : array_like
Starting value for the optimization.
fun : function
The function to be minimized.
grad : function
The gradient of fun.
maxiter : int
The maximum number of iterations.
gtol : float
Convergence occurs when the gradient norm falls below this value.
Returns
-------
params : array_like
The minimizing value for the objective function.
fval : float
The smallest achieved value of the objective function.
cnvrg : bool
True if the algorithm converged to a limit point.
Notes
-----
`params` is 2-d, but `fun` and `grad` should take 1-d arrays
`params.ravel()` as arguments.
Reference
---------
A Edelman, TA Arias, ST Smith (1998). The geometry of algorithms with
orthogonality constraints. SIAM J Matrix Anal Appl.
http://math.mit.edu/~edelman/publications/geometry_of_algorithms.pdf
"""
p, d = params.shape
params = params.ravel()
f0 = fun(params)
cnvrg = False
for _ in range(maxiter):
# Project the gradient to the tangent space
g = grad(params)
g -= np.dot(g, params) * params / np.dot(params, params)
if np.sqrt(np.sum(g * g)) < gtol:
cnvrg = True
break
gm = g.reshape((p, d))
u, s, vt = np.linalg.svd(gm, 0)
paramsm = params.reshape((p, d))
pa0 = np.dot(paramsm, vt.T)
def geo(t):
# Parameterize the geodesic path in the direction
# of the gradient as a function of a real value t.
pa = pa0 * np.cos(s * t) + u * np.sin(s * t)
return np.dot(pa, vt).ravel()
# Try to find a downhill step along the geodesic path.
step = 2.
while step > 1e-10:
pa = geo(-step)
f1 = fun(pa)
if f1 < f0:
params = pa
f0 = f1
break
step /= 2
params = params.reshape((p, d))
return params, f0, cnvrg
class CovarianceReduction(_DimReductionRegression):
"""
Dimension reduction for covariance matrices (CORE).
Parameters
----------
endog : array_like
The dependent variable, treated as group labels
exog : array_like
The independent variables.
dim : int
The dimension of the subspace onto which the covariance
matrices are projected.
Returns
-------
A model instance. Call `fit` on the model instance to obtain
a results instance, which contains the fitted model parameters.
Notes
-----
This is a likelihood-based dimension reduction procedure based
on Wishart models for sample covariance matrices. The goal
is to find a projection matrix P so that C_i | P'C_iP and
C_j | P'C_jP are equal in distribution for all i, j, where
the C_i are the within-group covariance matrices.
The model and methodology are as described in Cook and Forzani.
The optimization method follows Edelman et. al.
References
----------
DR Cook, L Forzani (2008). Covariance reducing models: an alternative
to spectral modeling of covariance matrices. Biometrika 95:4.
A Edelman, TA Arias, ST Smith (1998). The geometry of algorithms with
orthogonality constraints. SIAM J Matrix Anal Appl.
http://math.mit.edu/~edelman/publications/geometry_of_algorithms.pdf
"""
def __init__(self, endog, exog, dim):
super(CovarianceReduction, self).__init__(endog, exog)
covs, ns = [], []
df = pd.DataFrame(self.exog, index=self.endog)
for _, v in df.groupby(df.index):
covs.append(v.cov().values)
ns.append(v.shape[0])
self.nobs = len(endog)
# The marginal covariance
covm = 0
for i, _ in enumerate(covs):
covm += covs[i] * ns[i]
covm /= self.nobs
self.covm = covm
self.covs = covs
self.ns = ns
self.dim = dim
def loglike(self, params):
"""
Evaluate the log-likelihood
Parameters
----------
params : array_like
The projection matrix used to reduce the covariances, flattened
to 1d.
Returns the log-likelihood.
"""
p = self.covm.shape[0]
proj = params.reshape((p, self.dim))
c = np.dot(proj.T, np.dot(self.covm, proj))
_, ldet = np.linalg.slogdet(c)
f = self.nobs * ldet / 2
for j, c in enumerate(self.covs):
c = np.dot(proj.T, np.dot(c, proj))
_, ldet = np.linalg.slogdet(c)
f -= self.ns[j] * ldet / 2
return f
def score(self, params):
"""
Evaluate the score function.
Parameters
----------
params : array_like
The projection matrix used to reduce the covariances,
flattened to 1d.
Returns the score function evaluated at 'params'.
"""
p = self.covm.shape[0]
proj = params.reshape((p, self.dim))
c0 = np.dot(proj.T, np.dot(self.covm, proj))
cP = np.dot(self.covm, proj)
g = self.nobs * np.linalg.solve(c0, cP.T).T
for j, c in enumerate(self.covs):
c0 = np.dot(proj.T, np.dot(c, proj))
cP = np.dot(c, proj)
g -= self.ns[j] * np.linalg.solve(c0, cP.T).T
return g.ravel()
def fit(self, start_params=None, maxiter=200, gtol=1e-4):
"""
Fit the covariance reduction model.
Parameters
----------
start_params : array_like
Starting value for the projection matrix. May be
rectangular, or flattened.
maxiter : int
The maximum number of gradient steps to take.
gtol : float
Convergence criterion for the gradient norm.
Returns
-------
A results instance that can be used to access the
fitted parameters.
"""
p = self.covm.shape[0]
d = self.dim
# Starting value for params
if start_params is None:
params = np.zeros((p, d))
params[0:d, 0:d] = np.eye(d)
params = params
else:
params = start_params
# _grass_opt is designed for minimization, we are doing maximization
# here so everything needs to be flipped.
params, llf, cnvrg = _grass_opt(params, lambda x: -self.loglike(x),
lambda x: -self.score(x), maxiter,
gtol)
llf *= -1
if not cnvrg:
g = self.score(params.ravel())
gn = np.sqrt(np.sum(g * g))
msg = "CovReduce optimization did not converge, |g|=%f" % gn
warnings.warn(msg, ConvergenceWarning)
results = DimReductionResults(self, params, eigs=None)
results.llf = llf
return DimReductionResultsWrapper(results)
# aliases for expert users
SIR = SlicedInverseReg
PHD = PrincipalHessianDirections
SAVE = SlicedAverageVarianceEstimation
CORE = CovarianceReduction
|
bashtage/statsmodels
|
statsmodels/regression/dimred.py
|
Python
|
bsd-3-clause
| 20,343
|
from __future__ import absolute_import
from django.db.models import F
from rest_framework import serializers, status
from rest_framework.response import Response
from sentry.api.base import DocSection
from sentry.api.bases.project import ProjectEndpoint
from sentry.api.serializers import serialize
from sentry.models import AuditLogEntryEvent, ProjectKey, ProjectKeyStatus
from sentry.utils.apidocs import scenario, attach_scenarios
@scenario("ListClientKeys")
def list_keys_scenario(runner):
runner.request(
method="GET", path="/projects/%s/%s/keys/" % (runner.org.slug, runner.default_project.slug)
)
@scenario("CreateClientKey")
def create_key_scenario(runner):
runner.request(
method="POST",
path="/projects/%s/%s/keys/" % (runner.org.slug, runner.default_project.slug),
data={"name": "Fabulous Key"},
)
class KeySerializer(serializers.Serializer):
name = serializers.CharField(max_length=64, required=False, allow_blank=True, allow_null=True)
public = serializers.RegexField(r"^[a-f0-9]{32}$", required=False, allow_null=True)
secret = serializers.RegexField(r"^[a-f0-9]{32}$", required=False, allow_null=True)
class ProjectKeysEndpoint(ProjectEndpoint):
doc_section = DocSection.PROJECTS
@attach_scenarios([list_keys_scenario])
def get(self, request, project):
"""
List a Project's Client Keys
````````````````````````````
Return a list of client keys bound to a project.
:pparam string organization_slug: the slug of the organization the
client keys belong to.
:pparam string project_slug: the slug of the project the client keys
belong to.
"""
queryset = ProjectKey.objects.filter(
project=project, roles=F("roles").bitor(ProjectKey.roles.store)
)
status = request.GET.get("status")
if status == "active":
queryset = queryset.filter(status=ProjectKeyStatus.ACTIVE)
elif status == "inactive":
queryset = queryset.filter(status=ProjectKeyStatus.INACTIVE)
elif status:
queryset = queryset.none()
return self.paginate(
request=request,
queryset=queryset,
order_by="-id",
on_results=lambda x: serialize(x, request.user),
)
@attach_scenarios([create_key_scenario])
def post(self, request, project):
"""
Create a new Client Key
```````````````````````
Create a new client key bound to a project. The key's secret and
public key are generated by the server.
:pparam string organization_slug: the slug of the organization the
client keys belong to.
:pparam string project_slug: the slug of the project the client keys
belong to.
:param string name: the name for the new key.
"""
serializer = KeySerializer(data=request.data)
if serializer.is_valid():
result = serializer.validated_data
key = ProjectKey.objects.create(
project=project,
label=result.get("name"),
public_key=result.get("public"),
secret_key=result.get("secret"),
)
self.create_audit_entry(
request=request,
organization=project.organization,
target_object=key.id,
event=AuditLogEntryEvent.PROJECTKEY_ADD,
data=key.get_audit_log_data(),
)
return Response(serialize(key, request.user), status=201)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
|
mvaled/sentry
|
src/sentry/api/endpoints/project_keys.py
|
Python
|
bsd-3-clause
| 3,820
|
#!/usr/bin/env python3
#
# Copyright 2014 Simone Campagna
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
__author__ = "Simone Campagna"
__all__ = [
'output_mode_callback',
'get_output_mode_callback',
'set_output_mode_callback',
'set_random_seed',
]
import random
import numpy as np
OUTPUT_MODE_CALLBACK = None
def set_output_mode_callback(callback):
global OUTPUT_MODE_CALLBACK
OUTPUT_MODE_CALLBACK = callback
def get_output_mode_callback():
global OUTPUT_MODE_CALLBACK
return OUTPUT_MODE_CALLBACK
def output_mode_callback():
global OUTPUT_MODE_CALLBACK
if OUTPUT_MODE_CALLBACK is not None:
OUTPUT_MODE_CALLBACK()
def set_random_seed(random_seed):
np.random.seed(random_seed)
random.seed(random_seed)
|
simone-campagna/rubik
|
rubik/cubes/internals.py
|
Python
|
apache-2.0
| 1,309
|
# -*- coding: utf-8 -*-
def classFactory(iface):
# load FeatureTemplates class from file FeatureTemplates
from featuretemplates import FeatureTemplates
return FeatureTemplates(iface)
|
NathanW2/featuretemplates
|
__init__.py
|
Python
|
gpl-2.0
| 196
|
# -*- coding:utf-8 -*-
from flask import Blueprint
from flask import jsonify
from flask import request
from lib.ci_type import CITypeRelationManager
from lib.auth import auth_with_key
cityperelation = Blueprint("cityperelation", __name__)
@cityperelation.route("/types", methods=["GET"])
def get_types():
manager = CITypeRelationManager()
return jsonify(relation_types=manager.relation_types)
@cityperelation.route("/<int:parent>/children", methods=["GET"])
def get_children_by_parent(parent=None):
manager = CITypeRelationManager()
return jsonify(children=manager.get_children(parent))
@cityperelation.route("/<int:child>/parents", methods=["GET"])
def get_parents_by_child(child=None):
manager = CITypeRelationManager()
return jsonify(parents=manager.get_parents(child))
@cityperelation.route("/<int:parent>/<int:child>", methods=["POST"])
@auth_with_key
def create_citype_realtions(parent=None, child=None):
relation_type = request.values.get("relation_type", "contain")
manager = CITypeRelationManager()
res = manager.add(parent, child, relation_type=relation_type)
return jsonify(ctr_id=res)
@cityperelation.route("/<int:ctr_id>", methods=["DELETE"])
@auth_with_key
def delete_citype_relation(ctr_id=None):
manager = CITypeRelationManager()
manager.delete(ctr_id)
return jsonify(message="CIType Relation is deleted")
@cityperelation.route("/<int:parent>/<int:child>", methods=["DELETE"])
@auth_with_key
def delete_citype_relation_2(parent=None, child=None):
manager = CITypeRelationManager()
manager.delete_2(parent, child)
return jsonify(message="CIType Relation is deleted")
|
kdyq007/cmdb-api
|
core/ci_type_relation.py
|
Python
|
gpl-2.0
| 1,664
|
"""
Contains tests for the statistics aggregator base class as well
as the default aggregator class.
"""
import time
from tests.base import TestBase
from statsite.aggregator import Aggregator, DefaultAggregator
from statsite.metrics import Counter, KeyValue, Timer
class TestAggregator(TestBase):
def test_fold_metrics_works(self, monkeypatch):
"""
Tests that aggregators can fold metrics properly.
"""
now = 12
monkeypatch.setattr(time, 'time', lambda: now)
metrics = [KeyValue("k", 1, now), Counter("j", 2)]
result = Aggregator(None)._fold_metrics(metrics)
assert 1 == result.count(("kv.k", 1, now))
assert 1 == result.count(("counts.j", 2, now))
def test_fold_metrics_passes_metric_settings(self, monkeypatch):
"""
Tests that aggregators pass the proper metric settings when
folding over.
"""
now = 12
settings = { "ms": { "percentile": 80 } }
metrics = [Timer("k", 20, now)]
monkeypatch.setattr(time, 'time', lambda: now)
result = Aggregator(None, metrics_settings=settings)._fold_metrics(metrics)
print repr(result)
assert 1 == result.count(("timers.k.sum_80", 20, now))
class TestDefaultAggregator(TestBase):
def test_flushes_collected_metrics(self, metrics_store):
"""
Tests that the default aggregator properly flushes the
collected metrics to the metric store.
"""
now = 17
agg = DefaultAggregator(metrics_store)
agg.add_metrics([KeyValue("k", 1, now)])
agg.add_metrics([KeyValue("k", 2, now)])
agg.flush()
assert [("kv.k", 1, now), ("kv.k", 2, now)] == metrics_store.data
|
kiip/statsite
|
tests/unit/test_aggregator.py
|
Python
|
bsd-3-clause
| 1,745
|
#!/usr/bin/python
# (c) 2019, XLAB d.o.o <www.xlab.si>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: s3_bucket_notification
short_description: Creates, updates or deletes S3 Bucket notification for lambda
description:
- This module allows the management of AWS Lambda function bucket event mappings via the
Ansible framework. Use module M(lambda) to manage the lambda function itself, M(lambda_alias)
to manage function aliases and M(lambda_policy) to modify lambda permissions.
notes:
- This module heavily depends on M(lambda_policy) as you need to allow C(lambda:InvokeFunction)
permission for your lambda function.
version_added: "2.9"
author:
- XLAB d.o.o. (@xlab-si)
- Aljaz Kosir (@aljazkosir)
- Miha Plesko (@miha-plesko)
options:
event_name:
description:
- Unique name for event notification on bucket.
required: true
type: str
lambda_function_arn:
description:
- The ARN of the lambda function.
aliases: ['function_arn']
type: str
bucket_name:
description:
- S3 bucket name.
required: true
type: str
state:
description:
- Describes the desired state.
default: "present"
choices: ["present", "absent"]
type: str
lambda_alias:
description:
- Name of the Lambda function alias.
- Mutually exclusive with I(lambda_version).
type: str
lambda_version:
description:
- Version of the Lambda function.
- Mutually exclusive with I(lambda_alias).
type: int
events:
description:
- Events that you want to be triggering notifications. You can select multiple events to send
to the same destination, you can set up different events to send to different destinations,
and you can set up a prefix or suffix for an event. However, for each bucket,
individual events cannot have multiple configurations with overlapping prefixes or
suffixes that could match the same object key.
- Required when I(state=present).
choices: ['s3:ObjectCreated:*', 's3:ObjectCreated:Put', 's3:ObjectCreated:Post',
's3:ObjectCreated:Copy', 's3:ObjectCreated:CompleteMultipartUpload',
's3:ObjectRemoved:*', 's3:ObjectRemoved:Delete',
's3:ObjectRemoved:DeleteMarkerCreated', 's3:ObjectRestore:Post',
's3:ObjectRestore:Completed', 's3:ReducedRedundancyLostObject']
type: list
elements: str
prefix:
description:
- Optional prefix to limit the notifications to objects with keys that start with matching
characters.
type: str
suffix:
description:
- Optional suffix to limit the notifications to objects with keys that end with matching
characters.
type: str
requirements:
- boto3
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
---
# Example that creates a lambda event notification for a bucket
- hosts: localhost
gather_facts: no
tasks:
- name: Process jpg image
s3_bucket_notification:
state: present
event_name: on_file_add_or_remove
bucket_name: test-bucket
function_name: arn:aws:lambda:us-east-2:526810320200:function:test-lambda
events: ["s3:ObjectCreated:*", "s3:ObjectRemoved:*"]
prefix: images/
suffix: .jpg
'''
RETURN = '''
notification_configuration:
description: list of currently applied notifications
returned: success
type: list
'''
from ansible.module_utils.aws.core import AnsibleAWSModule
from ansible.module_utils.ec2 import camel_dict_to_snake_dict
try:
from botocore.exceptions import ClientError, BotoCoreError
except ImportError:
pass # will be protected by AnsibleAWSModule
class AmazonBucket:
def __init__(self, client, bucket_name):
self.client = client
self.bucket_name = bucket_name
self._full_config_cache = None
def full_config(self):
if self._full_config_cache is None:
self._full_config_cache = [Config.from_api(cfg) for cfg in
self.client.get_bucket_notification_configuration(
Bucket=self.bucket_name).get(
'LambdaFunctionConfigurations', list())]
return self._full_config_cache
def current_config(self, config_name):
for config in self.full_config():
if config.raw['Id'] == config_name:
return config
def apply_config(self, desired):
configs = [cfg.raw for cfg in self.full_config() if cfg.name != desired.raw['Id']]
configs.append(desired.raw)
self._upload_bucket_config(configs)
return configs
def delete_config(self, desired):
configs = [cfg.raw for cfg in self.full_config() if cfg.name != desired.raw['Id']]
self._upload_bucket_config(configs)
return configs
def _upload_bucket_config(self, config):
self.client.put_bucket_notification_configuration(
Bucket=self.bucket_name,
NotificationConfiguration={
'LambdaFunctionConfigurations': config
})
class Config:
def __init__(self, content):
self._content = content
self.name = content['Id']
@property
def raw(self):
return self._content
def __eq__(self, other):
if other:
return self.raw == other.raw
return False
@classmethod
def from_params(cls, **params):
function_arn = params['lambda_function_arn']
qualifier = None
if params['lambda_version'] > 0:
qualifier = str(params['lambda_version'])
elif params['lambda_alias']:
qualifier = str(params['lambda_alias'])
if qualifier:
params['lambda_function_arn'] = '{0}:{1}'.format(function_arn, qualifier)
return cls({
'Id': params['event_name'],
'LambdaFunctionArn': params['lambda_function_arn'],
'Events': sorted(params['events']),
'Filter': {
'Key': {
'FilterRules': [{
'Name': 'Prefix',
'Value': params['prefix']
}, {
'Name': 'Suffix',
'Value': params['suffix']
}]
}
}
})
@classmethod
def from_api(cls, config):
return cls(config)
def main():
event_types = ['s3:ObjectCreated:*', 's3:ObjectCreated:Put', 's3:ObjectCreated:Post',
's3:ObjectCreated:Copy', 's3:ObjectCreated:CompleteMultipartUpload',
's3:ObjectRemoved:*', 's3:ObjectRemoved:Delete',
's3:ObjectRemoved:DeleteMarkerCreated', 's3:ObjectRestore:Post',
's3:ObjectRestore:Completed', 's3:ReducedRedundancyLostObject']
argument_spec = dict(
state=dict(default='present', choices=['present', 'absent']),
event_name=dict(required=True),
lambda_function_arn=dict(aliases=['function_arn']),
bucket_name=dict(required=True),
events=dict(type='list', default=[], choices=event_types),
prefix=dict(default=''),
suffix=dict(default=''),
lambda_alias=dict(),
lambda_version=dict(type='int', default=0),
)
module = AnsibleAWSModule(
argument_spec=argument_spec,
supports_check_mode=True,
mutually_exclusive=[['lambda_alias', 'lambda_version']],
required_if=[['state', 'present', ['events']]]
)
bucket = AmazonBucket(module.client('s3'), module.params['bucket_name'])
current = bucket.current_config(module.params['event_name'])
desired = Config.from_params(**module.params)
notification_configuration = [cfg.raw for cfg in bucket.full_config()]
state = module.params['state']
try:
if (state == 'present' and current == desired) or (state == 'absent' and not current):
changed = False
elif module.check_mode:
changed = True
elif state == 'present':
changed = True
notification_configuration = bucket.apply_config(desired)
elif state == 'absent':
changed = True
notification_configuration = bucket.delete_config(desired)
except (ClientError, BotoCoreError) as e:
module.fail_json(msg='{0}'.format(e))
module.exit_json(**dict(changed=changed,
notification_configuration=[camel_dict_to_snake_dict(cfg) for cfg in
notification_configuration]))
if __name__ == '__main__':
main()
|
roadmapper/ansible
|
lib/ansible/modules/cloud/amazon/s3_bucket_notification.py
|
Python
|
gpl-3.0
| 9,017
|
# -*- coding: utf-8 -*-
#
# Copyright 2016 edX PDR Lab, National Central University, Taiwan.
#
# http://edxpdrlab.ncu.cc/
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Created By: yrchen@ATCity.org
# Maintained By: yrchen@ATCity.org
#
"""
ELOs package in Common Repository project.
This module provides ELOs related functions.
"""
default_app_config = 'commonrepo.elos.apps.ELOsAppConfig'
|
yrchen/CommonRepo
|
commonrepo/elos/__init__.py
|
Python
|
apache-2.0
| 901
|
"""
loadconfig_primitive
====================
:Module: pyfileserver.loadconfig_primitive
:Author: Ho Chun Wei, fuzzybr80(at)gmail.com
:Project: PyFileServer, http://pyfilesync.berlios.de/
:Copyright: Lesser GNU Public License, see LICENSE file attached with package
Loads a python module file returning its module namespace as a dictionary,
except all variables starting with '__' (excluding system and built-in objects).
A compiled module with the filename suffixed with a 'c' may be created as a
byproduct.
If Paste <http://pythonpaste.org> is installed, then paste.pyconfig should be
used as a safer and better variant.
functions::
load(filename)
"""
__docformat__ = 'reStructuredText'
#import imp
def load(filename):
returnDict = dict([])
#configmodule = imp.load_source('configuration_module', filename)
import PyFileServerConf as configmodule
for configkey in configmodule.__dict__.keys():
if not configkey.startswith('__'):
returnDict[configkey] = configmodule.__dict__[configkey]
return returnDict
|
selfcommit/gaedav
|
pyfileserver/loadconfig_primitive.py
|
Python
|
lgpl-2.1
| 1,082
|
'''
Tex: Compressed texture
'''
__all__ = ('ImageLoaderTex', )
import json
from struct import unpack
from kivy.logger import Logger
from kivy.core.image import ImageLoaderBase, ImageData, ImageLoader
class ImageLoaderTex(ImageLoaderBase):
@staticmethod
def extensions():
return ('tex', )
def load(self, filename):
try:
fd = open(filename, 'rb')
if fd.read(4) != 'KTEX':
raise Exception('Invalid tex identifier')
headersize = unpack('I', fd.read(4))[0]
header = fd.read(headersize)
if len(header) != headersize:
raise Exception('Truncated tex header')
info = json.loads(header)
data = fd.read()
if len(data) != info['datalen']:
raise Exception('Truncated tex data')
except:
Logger.warning('Image: Image <%s> is corrupted' % filename)
raise
width, height = info['image_size']
tw, th = info['texture_size']
images = [data]
im = ImageData(width, height, str(info['format']), images[0],
source=filename)
'''
if len(dds.images) > 1:
images = dds.images
images_size = dds.images_size
for index in range(1, len(dds.images)):
w, h = images_size[index]
data = images[index]
im.add_mipmap(index, w, h, data)
'''
return [im]
# register
ImageLoader.register(ImageLoaderTex)
|
KeyWeeUsr/kivy
|
kivy/core/image/img_tex.py
|
Python
|
mit
| 1,549
|
import sys
from geopy import Point
from django.apps import apps as django_apps
from django.core.exceptions import ImproperlyConfigured
from django.core.management.color import color_style
from .geo_mixin import GeoMixin
LANDMARK_NAME = 0
LATITUDE = 2
LETTERS = list(map(chr, range(65, 91)))
LONGITUDE = 1
style = color_style()
class Mapper(GeoMixin):
center_lat = None
center_lon = None
landmarks = None # format ((name, longitude, latitude), )
map_area = None
radius = 5.5
mapper_model = None
def __init__(self):
self.name = self.map_area or f'mapper {self.__class__.__name__}'
app_config = django_apps.get_app_config('edc_map')
mapper_model = self.mapper_model or app_config.mapper_model
if not mapper_model:
raise ImproperlyConfigured(
f'Invalid mapper_model. Got None. See {repr(self)}.')
try:
self.item_model = django_apps.get_model(*mapper_model.split('.'))
except LookupError as e:
sys.stdout.write(style.WARNING(
f'\n Warning. Lookup error in mapper. See {repr(self)}. Got {e} '
'edc_map.apps.AppConfig\n'))
else:
self.item_model_cls = self.item_model
self.item_label = self.item_model._meta.verbose_name
self.load()
def __repr__(self):
return 'Mapper({0.map_area!r})'.format(self)
def __str__(self):
return '({0.map_area!r})'.format(self)
def load(self):
return None
@property
def __dict__(self):
return {
'map_area': self.map_area,
'center_lat': self.center_lat,
'center_lon': self.center_lon,
'radius': self.radius}
@property
def area_center_point(self):
return Point(self.center_lat, self.center_lon)
@property
def area_radius(self):
return self.radius
def point_in_map_area(self, point):
"""Return True if point is within mapper area radius."""
return self.point_in_radius(
point, self.area_center_point, self.area_radius)
def raise_if_not_in_map_area(self, point):
self.raise_if_not_in_radius(
point, self.area_center_point, self.area_radius,
units='km', label=self.map_area)
|
botswana-harvard/edc-map
|
edc_map/mapper.py
|
Python
|
gpl-2.0
| 2,309
|
import netaddr
from oslo.config import cfg
from neutron import manager
from neutron.common import rpc as q_rpc
from neutron.db import api as qdbapi
from neutron.db import common_db_mixin as db_base_plugin_v2
from neutron.db.servicechain import servicechain_db
from neutron.db.servicechain import servicechain_pool as sc_pool
from neutron.db import model_base
from neutron.openstack.common import log
from neutron.common import rpc as n_rpc
from neutron.plugins.common import constants
from neutron.services.servicechain import constants as sc_const
from neutron.services.servicechain import rpc as sc_rpc
from neutron.db import agents_db
from neutron.openstack.common import loopingcall
from neutron.common import topics
LOG = log.getLogger(__name__)
class ServiceChainPluginRpcCallbacks(n_rpc.RpcCallback, sc_rpc.ServiceChainRpcCallbackMixin):
def __init__(self, plugin):
super(ServiceChainPluginRpcCallbacks, self).__init__()
self.plugin = plugin
class ServiceChainNotifierApi(n_rpc.RpcProxy,
sc_rpc.ServiceChainNotifierRpcApiMixin):
BASE_RPC_API_VERSION = '1.0'
def __init__(self, topic):
super(ServiceChainNotifierApi, self).__init__(
topic=topic, default_version=self.BASE_RPC_API_VERSION)
class ServiceChainPlugin(servicechain_db.ServiceChainDbMixin,
db_base_plugin_v2.CommonDbMixin):
supported_extension_aliases = ['service-chain']
def __init__(self):
self.setup_sc_pool()
self.setup_rpc()
upate_chain_status = loopingcall.FixedIntervalLoopingCall(
self._upate_chain_status_task, session=qdbapi.get_session())
upate_chain_status.start(interval=10)
def setup_sc_pool(self):
try:
sc_sf_pools = []
for sc_pool_str in cfg.CONF.SERVICECHAIN.servicechain_pool.split(','):
sc_range = sc_pool_str.split(':')
if len(sc_range) == 3:
pool = {'sf_sc_identifier':sc_range[0],'sf_port_id_begin':sc_range[1],
'sf_port_id_end':sc_range[2]}
sc_sf_pools.append(pool)
if len(sc_sf_pools) > 0:
sc_pool.init_servicechain_pool(sc_sf_pools)
except Exception,ex:
LOG.error(_('init_servicechain_pool error: %s'),ex)
def setup_rpc(self):
self.notifier = ServiceChainNotifierApi(sc_const.SERVICECHAIN_AGENT_TOPIC)
self.topic = sc_const.SERVICECHAIN_TOPIC
self.endpoints = [ServiceChainPluginRpcCallbacks(self)]
self.conn = n_rpc.create_connection(new=True)
self.conn.create_consumer(
self.topic, self.endpoints, fanout=False)
self.conn.consume_in_threads()
def get_plugin_name(self):
return constants.SERVICE_CHAIN
def get_plugin_type(self):
return constants.SERVICE_CHAIN
def get_plugin_description(self):
return 'Service Chain plugin'
def get_service_traffic_classifier(self, context, service_traffic_classifier_id,fields=None):
LOG.debug(_("Getting Service Traffic Classifier id %s "),service_traffic_classifier_id)
session = context.session
with session.begin(subtransactions=True):
result = super(ServiceChainPlugin,
self).get_service_traffic_classifier(context, service_traffic_classifier_id,fields)
return self._fields(result, fields)
def get_service_traffic_classifiers(self, context,filters=None, fields=None,
sorts=None, limit=None, marker=None, page_reverse=False):
LOG.debug(_("Getting All Service Traffic Classifier List"))
session = context.session
with session.begin(subtransactions=True):
classifiers = super(ServiceChainPlugin, self).get_service_traffic_classifiers(context, filters, fields, sorts,
limit, marker, page_reverse)
return [self._fields(classifier, fields) for classifier in classifiers]
def create_service_traffic_classifier(self, context, service_traffic_classifier):
LOG.debug(_("Creating service traffic classifier %s"), service_traffic_classifier)
result = super(ServiceChainPlugin, self).create_service_traffic_classifier(context, service_traffic_classifier)
classifier_id = result['id']
LOG.debug(_("Create service instance success, classifier_id:" + classifier_id))
return result
def update_service_traffic_classifier(self, context, service_traffic_classifier_id,
service_traffic_classifier):
LOG.debug(_("Updating service traffic classifier %s"), service_traffic_classifier)
result = super(ServiceChainPlugin, self).update_service_traffic_classifier(context, service_traffic_classifier_id,
service_traffic_classifier)
LOG.debug(_("update service instance success"))
return result
def delete_service_traffic_classifier(self, context, service_traffic_classifier_id):
LOG.debug(_("Deleting service traffic classifier %s"), service_traffic_classifier_id)
result = super(ServiceChainPlugin, self).delete_service_traffic_classifier(context, service_traffic_classifier_id)
LOG.debug(_("Delete service instance success, classifier_id:" + service_traffic_classifier_id))
def create_service_function_instance(self, context, service_function_instance):
LOG.debug(_("Creating service instance %s"), service_function_instance)
result = super(ServiceChainPlugin, self).create_service_function_instance(context, service_function_instance)
serviceinstance_id = result['id']
LOG.debug(_("Create service instance success, serviceinstance_id:" + serviceinstance_id))
return result
def get_service_function_instance(self, context, service_function_instance_id,fields=None):
LOG.debug(_("Getting Service Function Instance id %s "),service_function_instance_id)
session = context.session
with session.begin(subtransactions=True):
result = super(ServiceChainPlugin,
self).get_service_function_instance(context, service_function_instance_id,fields)
return self._fields(result, fields)
def get_service_function_instances(self, context,filters=None, fields=None,
sorts=None, limit=None, marker=None, page_reverse=False):
LOG.debug(_("Getting All Service Function Instance List"))
session = context.session
with session.begin(subtransactions=True):
serviceinstances = super(ServiceChainPlugin, self).get_service_function_instances(context, filters, fields, sorts,
limit, marker, page_reverse)
return [self._fields(instance, fields) for instance in serviceinstances]
def update_service_function_instance(self, context, service_function_instance_id,
service_function_instance):
LOG.debug(_("updating Service Function Instance %s, with %s"), service_function_instance_id,
service_function_instance)
result = super(ServiceChainPlugin, self).update_service_function_instance(context,
service_function_instance_id,
service_function_instance)
serviceinstance_id = result['id']
LOG.debug(_("Update service instance success, serviceinstance_id:" + serviceinstance_id))
return result
def delete_service_function_instance(self, context, service_function_instance_id):
LOG.debug(_("Deleting service instance %s"), service_function_instance_id)
super(ServiceChainPlugin, self).delete_service_function_instance(context, service_function_instance_id)
LOG.debug(_("Delete service instance success, serviceinstance_id:" + service_function_instance_id))
def create_service_function_group(self, context, service_function_group):
LOG.debug(_("Creating service_function_group %s"), service_function_group)
result = super(ServiceChainPlugin, self).create_service_function_group(context, service_function_group)
service_function_group_id = result['id']
LOG.debug(_("Create service_function_group success, service_function_group:" + service_function_group_id))
return result
def update_service_function_group(self, context, service_function_group_id,
service_function_group):
LOG.debug(_("Updating service_function_group %s"), service_function_group_id)
result = super(ServiceChainPlugin, self).update_service_function_group(context, \
service_function_group_id, service_function_group)
LOG.debug(_("Updating service_function_group success"))
return result
def delete_service_function_group(self, context, service_function_group_id):
LOG.debug(_("Deleting service_function_group %s"), service_function_group_id)
super(ServiceChainPlugin, self).delete_service_function_group(context, service_function_group_id)
LOG.debug(_("Delete service_function_group success"))
def get_service_function_group(self, context, service_function_group_id, fields=None):
LOG.debug(_("Getting service_function_group id %s"),service_function_group_id)
session = context.session
with session.begin(subtransactions=True):
result = super(ServiceChainPlugin,\
self).get_service_function_group(context, service_function_group_id,fields)
return self._fields(result, fields)
def get_service_function_groups(self, context,filters=None, fields=None,
sorts=None, limit=None, marker=None, page_reverse=False):
session = context.session
with session.begin(subtransactions=True):
service_function_groups = super(ServiceChainPlugin,
self).get_service_function_groups(context, filters, fields, sorts,
limit, marker, page_reverse)
return [self._fields(group, fields) for group in service_function_groups]
def create_service_chain(self, context, service_chain):
LOG.debug(_("Creating service_chain %s"), service_chain)
result = super(ServiceChainPlugin, self).create_service_chain(context, service_chain)
service_chain_id = result['id']
LOG.debug(_("Create service_chain success, service_chain_id:" + service_chain_id))
return result
def update_service_chain(self, context, service_chain_id,
service_chain):
LOG.debug(_("Updating service_chain %s"), service_chain_id)
result = super(ServiceChainPlugin, self).update_service_chain(context, \
service_chain_id, service_chain)
LOG.debug(_("Updating service_chain success"))
return result
def delete_service_chain(self, context, service_chain_id):
LOG.debug(_("Deleting service_chain %s"), service_chain_id)
super(ServiceChainPlugin, self).delete_service_chain(context, service_chain_id)
LOG.debug(_("Delete service_chain success"))
def get_service_chain(self, context, service_service_chain_id,fields=None):
LOG.debug(_("Getting service_chain id %s"),service_service_chain_id)
session = context.session
with session.begin(subtransactions=True):
result = super(ServiceChainPlugin,\
self).get_service_chain(context, service_service_chain_id,fields)
return self._fields(result, fields)
def get_service_chains(self, context,filters=None, fields=None,
sorts=None, limit=None, marker=None, page_reverse=False):
session = context.session
with session.begin(subtransactions=True):
service_chains = super(ServiceChainPlugin,
self).get_service_chains(context, filters, fields, sorts,
limit, marker, page_reverse)
return [self._fields(chain, fields) for chain in service_chains]
|
nash-x/hws
|
neutron/services/servicechain/plugin.py
|
Python
|
apache-2.0
| 12,790
|
from django import template
import markdown2
from django.utils.safestring import mark_safe
from django.utils.encoding import force_unicode
register = template.Library()
@register.filter(is_safe=True)
def markdowner(value):
value = markdown2.markdown(str(value), extras={"html-classes" : {"img" : "testing" } })
return mark_safe(value)
@register.filter
def shortenEntry(value, post):
value_list = [str(x) for x in value.split(" ")]
if len(value) >= 600:
shortened = ' '.join(value_list[:600])
return shortened + "<a href='%s'>...</a>" % post
else:
return value
|
Nimmard/james-olson.com
|
blog/templatetags/entryfilters.py
|
Python
|
gpl-2.0
| 609
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import unicode_literals
import os
import re
import glob
from argparse import Namespace
from os import path
from babel import Locale
from babel.core import UnknownLocaleError
from convert import key_plural_keywords
from config import Config
from utils import Path, format_to_re
from convert import read_xml, InvalidResourceError
__all__ = ('EnvironmentError', 'IncompleteEnvironment',
'Environment', 'Language', 'resolve_locale')
class EnvironmentError(Exception):
pass
class IncompleteEnvironment(EnvironmentError):
pass
ANDROID_LOCALE_MAPPING = {
'from': {
'in': 'id',
'iw': 'he',
'ji': 'yi',
'zh_CN': 'zh_Hans_CN',
'zh_HK': 'zh_Hant_HK',
'zh_TW': 'zh_Hant_TW'
},
'to': {
'id': 'in',
'he': 'iw',
'yi': 'ji',
'zh_Hans_CN': 'zh_CN',
'zh_Hant_HK': 'zh_HK',
'zh_Hant_TW': 'zh_TW'
}
}
"""
Android uses locale scheme that differs from one used inside Babel,
so we must provide a mapping between one another. This list is not
full and must be updated to include all such mappings.
We can not simply ignore middle element in transition from android
to Babel locale mapping.
"""
MISSING_LOCALES = {
'ia': {
'name': "Interlingua",
'local_name': "Interlingua",
'plural_rule': 'es',
'team': 'ia <LL@li.org>\n'
},
'cak': {
'name': "Kaqchikel",
'local_name': "Kaqchikel",
'plural_rule': 'az',
'team': 'cak <LL@li.org>\n'
},
'zam': {
'name': "Miahuatlán Zapotec",
'local_name': "DíɁztè",
'plural_rule': 'az',
'team': 'zam <LL@li.org>\n'
},
'trs': {
'name': "Chicahuaxtla Triqui",
'local_name': "Triqui",
'plural_rule': 'az',
'team': 'trs <LL@li.org>\n'
},
'meh': {
'name': "Mixteco Yucuhiti",
'local_name': "Tu´un savi ñuu Yasi'í Yuku Iti",
'plural_rule': 'id',
'team': 'meh <LL@li.org>\n'
},
'mix': {
'name': "Mixtepec Mixtec",
'local_name': "Tu'un savi",
'plural_rule': 'id',
'team': 'mix <LL@li.org>\n'
},
'oc': {
'name': 'Occitan',
'local_name': 'occitan',
'plural_rule': 'fi',
'team': 'oc <LL@li.org>\n'
},
'an': {
'name': 'Aragonese',
'local_name': 'Aragonés',
'plural_rule': 'fi',
'team': 'an <LL@li.org>\n'
},
'wo': {
'name': 'Wolof',
'local_name': 'Wolof',
'plural_rule': 'id',
'team': 'wo <LL@li.org>\n'
},
'tt': {
'name': 'Tatar',
'local_name': 'татарча',
'plural_rule': 'fi',
'team': 'tt <LL@li.org>\n'
},
'anp': {
'name': 'Angika',
'local_name': 'अंगिका',
'plural_rule': 'bg',
'team': 'anp <LL@li.org>\n'
},
'tsz': {
'name': 'Purépecha',
'local_name': 'p\'urhepecha',
'plural_rule': 'de',
'team': 'anp <LL@li.org>\n'
},
'ixl': {
'name': 'Ixil',
'local_name': 'ixil',
'plural_rule': 'de',
'team': 'anp <LL@li.org>\n'
},
'pai': {
'name': 'Pai pai',
'local_name': 'paa ipai',
'plural_rule': 'lo',
'team': 'anp <LL@li.org>\n'
},
'quy': {
'name': 'Quechua Chanka',
'local_name': 'Chanka Qhichwa',
'plural_rule': 'pt',
'team': 'anp <LL@li.org>\n'
},
'ay': {
'name': 'Aymara',
'local_name': 'Aimara',
'plural_rule': 'de',
'team': 'anp <LL@li.org>\n'
},
'quc': {
'name': 'K\'iche\'',
'local_name': 'K\'iche\'',
'plural_rule': 'de',
'team': 'anp <LL@li.org>\n'
},
'jv': {
'name': 'Javanese',
'local_name': 'Basa Jawa',
'plural_rule': 'ja',
'team': 'anp <LL@li.org>\n'
},
'ppl': {
'name': 'Náhuat Pipil',
'local_name': 'Náhuat Pipil',
'plural_rule': 'az',
'team': 'anp <LL@li.org>\n'
},
'su': {
'name': 'Sundanese',
'local_name': 'Basa Sunda',
'plural_rule': 'ja',
'team': 'anp <LL@li.org>\n'
},
'hus': {
'name': 'Huastec',
'local_name': 'Tének',
'plural_rule': 'ja',
'team': 'anp <LL@li.org>\n'
},
'yua': {
'name': 'Yucatec',
'local_name': 'Maaya',
'plural_rule': 'az',
'team': 'anp <LL@li.org>\n'
},
'ace': {
'name': 'Acehnese',
'local_name': 'Basa Acèh',
'plural_rule': 'id',
'team': 'anp <LL@li.org>\n'
},
'nv': {
'name': 'Navajo',
'local_name': 'Diné Bizaad',
'plural_rule': 'id',
'team': 'anp <LL@li.org>\n'
},
'co': {
'name': 'Corsican',
'local_name': 'Corsu',
'plural_rule': 'pt',
'team': 'anp <LL@li.org>\n'
},
'sn': {
'name': 'Shona',
'local_name': 'ChiShona',
'plural_rule': 'az',
'team': 'anp <LL@li.org>\n'
}
}
class Language(object):
"""Represents a single language."""
def __init__(self, code, env=None):
self.code = code
self.env = env
if code and code in MISSING_LOCALES:
self.locale = Locale.parse(MISSING_LOCALES[code]['plural_rule'], sep='-')
elif code:
self.locale = Locale.parse(code, sep='-')
else:
self.locale = None
def __unicode__(self): # pragma: no cover
return str(self.code)
def xml(self, kind):
# Android uses a special language code format for the region part
if self.code in ANDROID_LOCALE_MAPPING['to']:
code = ANDROID_LOCALE_MAPPING['to'][self.code]
else:
code = self.code
parts = tuple(code.split('_', 2))
if len(parts) == 2:
android_code = "%s-r%s" % parts
else:
android_code = "%s" % parts
return self.env.path(self.env.resource_dir,
'values-%s/%s.xml' % (android_code, kind))
def po(self, kind):
filename = self.env.config.layout % {
'group': kind,
'domain': self.env.config.domain or 'android',
'locale': self.code}
return self.env.path(self.env.gettext_dir, filename)
@property
def plural_keywords(self):
# Sort plural rules properly
ret = list(self.locale.plural_form.rules.keys()) + ['other']
return sorted(ret, key=key_plural_keywords)
class DefaultLanguage(Language):
"""A special version of ``Language``, representing the default
language.
For the Android side, this means the XML files in the values/
directory. For the gettext side, it means the .pot file(s).
"""
def __init__(self, env):
super(DefaultLanguage, self).__init__(None, env)
def __unicode__(self): # pragma: no cover
return '<def>'
def xml(self, kind):
return self.env.path(self.env.resource_dir, 'values/%s.xml' % kind)
def po(self, kind):
filename = self.env.config.template_name % {
'group': kind,
'domain': self.env.config.domain or 'android',
}
return self.env.path(self.env.gettext_dir, filename)
def resolve_locale(code, env):
"""Return a ``Language`` instance for a locale code.
Deals with incorrect Babel locale values."""
try:
return Language(code, env)
except UnknownLocaleError:
env.w.action('failed', '%s is not a valid locale' % code)
def find_project_dir_and_config():
"""Goes upwards through the directory hierarchy and tries to find
either an Android project directory, a config file for ours, or both.
The latter case (both) can only happen if the config file is in the
root of the Android directory, because once we have either, we stop
searching.
Note that the two are distinct, in that if a config file is found,
it's directory is not considered a "project directory" from which
default paths can be derived.
Returns a 2-tuple (project_dir, config_file).
"""
cur = os.getcwd()
while True:
project_dir = config_file = None
manifest_path = path.join(cur, 'AndroidManifest.xml')
if path.exists(manifest_path) and path.isfile(manifest_path):
project_dir = cur
config_path = path.join(cur, '.android2po')
if path.exists(config_path) and path.isfile(config_path):
config_file = config_path
# Stop once we found either.
if project_dir or config_file:
return project_dir, config_file
# Stop once we're at the root of the filesystem.
old = cur
cur = path.normpath(path.join(cur, path.pardir))
if cur == old:
# No further change, we are probably at root level.
# TODO: Is there a better way? Is path.ismount suitable?
# Or we could split the path into pieces by path.sep.
break
return None, None
def find_android_kinds(resource_dir, get_all=False):
"""Return a list of Android XML resource types that are in use.
For this, we simply have a look which xml files exists in the
default values/ resource directory, and return those which
include string resources.
If ``get_all`` is given, the test for string resources will be
skipped.
"""
kinds = []
search_dir = path.join(resource_dir, 'values')
for name in os.listdir(search_dir):
filename = path.join(search_dir, name)
if path.isfile(filename) and name.endswith('.xml'):
# We want to support arbitrary xml resource file names, but
# we also need to make sure we only return those which actually
# contain string resources. More specifically, a file named
# my-colors.xml, containing only color resources, should not
# result in a my-colors.po catalog to be created.
#
# We thus attempt to read each file here, see if there are any
# strings in it. If we fail to parse a file, we return it and
# trust that whatever command the user selected will later also
# stumble and show a proper error.
#
# TODO:
# I'm not entirely happy about this. One obvious problem is that
# we are likely to parse these xml files twice, which seems like
# a code smell. One potential solution: Stores the parsed XML
# result directly in memory, with the environment, rather than
# parsing it a second time later.
#
# We could also opt to fail outright if we encounter an invalid
# XML file here, since the error doesn't belong to any "action".
kind = path.splitext(name)[0]
if kind in ('strings', 'arrays') or get_all:
# These kinds are special, they are always supposed to
# contain something translatable, so always include them.
kinds.append(kind)
else:
try:
strings = read_xml(filename)
except InvalidResourceError as e:
raise EnvironmentError('Failed to parse "%s": %s' % (filename, e))
else:
# If there are any strings in the file, detect as
# a kind of xml file.
if strings:
kinds.append(kind)
return kinds
class Environment(object):
"""Environment is the main object that holds all the data with
which we run.
Usage:
env = Environment()
env.pop_from_config(config)
env.init()
"""
def __init__(self, writer):
self.w = writer
self.xmlfiles = []
self.default = DefaultLanguage(self)
self.config = Config()
self.auto_gettext_dir = None
self.auto_resource_dir = None
self.resource_dir = None
self.gettext_dir = None
# Try to determine if we are inside a project; if so, we a) might
# find a configuration file, and b) can potentially assume some
# default directory names.
self.project_dir, self.config_file = find_project_dir_and_config()
def _pull_into(self, namespace, target):
"""If for a value ``namespace`` there exists a corresponding
attribute on ``target``, then update that attribute with the
values from ``namespace``, and then remove the value from
``namespace``.
This is needed because certain options, if passed on the command
line, need nevertheless to be stored in the ``self.config``
object. We therefore **pull** those values in, and return the
rest of the options.
"""
for name in dir(namespace):
if name.startswith('_'):
continue
if name in target.__dict__:
setattr(target, name, getattr(namespace, name))
delattr(namespace, name)
return namespace
def _pull_into_self(self, namespace):
"""This is essentially like ``self._pull_info``, but we pull
values into the environment object itself, and in order to avoid
conflicts between option values and attributes on the environment
(for example ``config``), we explicitly specify the values we're
interested in: It's the "big" ones which we would like to make
available on the environment object directly.
"""
for name in ('resource_dir', 'gettext_dir'):
if hasattr(namespace, name):
setattr(self, name, getattr(namespace, name))
delattr(namespace, name)
return namespace
def pop_from_options(self, argparse_namespace):
"""Apply the set of options given on the command line.
These means that we need those options that are "configuration"
values to end up in ``self.config``. The normal options will
be made available as ``self.options``.
"""
rest = self._pull_into_self(argparse_namespace)
rest = self._pull_into(rest, self.config)
self.options = rest
def pop_from_config(self, argparse_namespace):
"""Load the values we support into our attributes, remove them
from the ``config`` namespace, and store whatever is left in
``self.config``.
"""
rest = self._pull_into_self(argparse_namespace)
rest = self._pull_into(rest, self.config)
# At this point, there shouldn't be anything left, because
# nothing should be included in the argparse result that we
# don't consider a configuration option.
ns = Namespace()
assert rest == ns
def auto_paths(self):
"""Try to auto-fill some path values that don't have values yet.
"""
if self.project_dir:
if not self.resource_dir:
self.resource_dir = path.join(self.project_dir, 'res')
self.auto_resource_dir = True
if not self.gettext_dir:
self.gettext_dir = path.join(self.project_dir, 'locale')
self.auto_gettext_dir = True
def path(self, *pargs):
"""Helper that constructs a Path object using the project dir
as the base."""
return Path(*pargs, base=self.project_dir)
def init(self):
"""Initialize the environment.
This entails finding the default Android language resource files,
and in the process doing some basic validation.
An ``EnvironmentError`` is thrown if there is something wrong.
"""
# If either of those is not specified, we can't continue. Raise a
# special exception that let's the caller display the proper steps
# on how to proceed.
if not self.resource_dir or not self.gettext_dir:
raise IncompleteEnvironment()
# It's not enough for directories to be specified; they really
# should exist as well. In particular, the locale/ directory is
# not part of the standard Android tree and thus likely to not
# exist yet, so we create it automatically, but ONLY if it wasn't
# specified explicitely. If the user gave a specific location,
# it seems right to let him deal with it fully.
if not path.exists(self.gettext_dir) and self.auto_gettext_dir:
os.makedirs(self.gettext_dir)
elif not path.exists(self.gettext_dir):
raise EnvironmentError('Gettext directory at "%s" doesn\'t exist.' %
self.gettext_dir)
elif not path.exists(self.resource_dir):
raise EnvironmentError('Android resource direcory at "%s" doesn\'t exist.' %
self.resource_dir)
# Find the Android XML resources that are our original source
# files, i.e. for example the values/strings.xml file.
groups_found = find_android_kinds(self.resource_dir,
get_all=bool(self.config.groups))
if self.config.groups:
self.xmlfiles = self.config.groups
_missing = set(self.config.groups) - set(groups_found)
if _missing:
raise EnvironmentError(
'Unable to find the default XML files for the following groups: %s' % (
", ".join(["%s (%s)" % (
g, path.join(self.resource_dir, 'values', "%s.xml" % g)) for g in _missing])
))
else:
self.xmlfiles = groups_found
if not self.xmlfiles:
raise EnvironmentError('no language-neutral string resources found in "values/".')
# If regular expressions are used as ignore filters, precompile
# those to help speed things along. For simplicity, we also
# convert all static ignores to regexes.
compiled_list = []
for ignore_list in self.config.ignores:
for ignore in ignore_list:
if ignore.startswith('/') and ignore.endswith('/'):
compiled_list.append(re.compile(ignore[1:-1]))
else:
compiled_list.append(re.compile("^%s$" % re.escape(ignore)))
self.config.ignores = compiled_list
# Validate the layout option, and resolve magic constants ("gnu")
# to an actual format string.
layout = self.config.layout
multiple_pos = len(self.xmlfiles) > 1
if not layout or layout == 'default':
if self.config.domain and multiple_pos:
layout = '%(domain)s-%(group)s-%(locale)s.po'
elif self.config.domain:
layout = '%(domain)s-%(locale)s.po'
elif multiple_pos:
layout = '%(group)s-%(locale)s.po'
else:
layout = '%(locale)s.po'
elif layout == 'gnu':
if multiple_pos:
layout = '%(locale)s/LC_MESSAGES/%(group)s-%(domain)s.po'
else:
layout = '%(locale)s/LC_MESSAGES/%(domain)s.po'
else:
# TODO: These tests essentially disallow any advanced
# formatting syntax. While that is unlikely to be used
# or needed, a better way to test for the existance of
# a placeholder would probably be to insert a unique string
# and see if it comes out at the end; or, come up with
# a proper regex to parse.
if '%(locale)s' not in layout:
raise EnvironmentError('--layout lacks %(locale)s variable')
if self.config.domain and '%(domain)s' not in layout:
raise EnvironmentError('--layout needs %(domain)s variable, ',
'since you have set a --domain')
if multiple_pos and '%(group)s' not in layout:
raise EnvironmentError('--layout needs %%(group)s variable, '
'since you have multiple groups: %s' % (
", ".join(self.xmlfiles)))
self.config.layout = layout
# The --template option needs similar processing:
template = self.config.template_name
if not template:
if self.config.domain and multiple_pos:
template = '%(domain)s-%(group)s.pot'
elif self.config.domain:
template = '%(domain)s.pot'
elif multiple_pos:
template = '%(group)s.pot'
else:
template = 'template.pot'
elif '%s' in template and '%(group)s' not in template:
# In an earlier version the --template option only
# supported a %s placeholder for the XML kind. Make
# sure we still support this.
# TODO: Would be nice we if could raise a deprecation
# warning here somehow. That means adding a callback
# to this function. Or, probably we should just make the
# environment aware of the writer object. This would
# simplify other things as well.
template = template.replace('%s', '%(group)s')
else:
# Note that we do not validate %(domain)s here; we expressively
# allow the user to define a template without a domain.
# TODO: See the same case above when handling --layout
if multiple_pos and '%(group)s' not in template:
raise EnvironmentError('--template needs %%(group)s variable, '
'since you have multiple groups: %s' % (
", ".join(self.xmlfiles)))
self.config.template_name = template
LANG_DIR = re.compile(r'^values-(\w\w)(?:-r(\w\w))?$')
def get_android_languages(self):
"""Finds the languages that already exist inside the Android
resource directory.
Return value is a list of ``Language`` instances.
"""
languages = []
for name in os.listdir(self.resource_dir):
match = self.LANG_DIR.match(name)
if not match:
continue
country, region = match.groups()
pseudo_code = "%s" % country
if region:
pseudo_code += "_%s" % region
if pseudo_code in ANDROID_LOCALE_MAPPING['from']:
code = ANDROID_LOCALE_MAPPING['from'][pseudo_code]
else:
code = pseudo_code
language = resolve_locale(code, self)
if language:
languages.append(language)
return languages
def get_gettext_languages(self):
"""Finds the languages that already exist inside the gettext
directory.
This is a little more though than on the Android side, since
we give the user a lot of flexibility in configuring how the
.po files are layed out.
Return value is a list of ``Language`` instances.
"""
# Build a glob pattern based on the layout. This will enable
# us to easily get a list of files that match the pattern.
glob_pattern = self.config.layout % {
'domain': self.config.domain,
'group': '*',
'locale': '*',
}
# Temporarily switch to the gettext directory. This allows us
# to simply call glob() using the relative pattern, rather than
# having to deal with making a full path, and then later on
# stripping the full path again for the regex matching, and
# potentially even running into problems when, say, the pattern
# contains references like ../ to a parent directory.
old_dir = os.getcwd()
os.chdir(self.gettext_dir)
try:
list = glob.glob(glob_pattern)
# We now have a list of matching .po files, but now idea
# which languages they represent, because we don't know
# which part of the filename is the locale. To solve this,
# we build a regular expression from the format string,
# one with a capture group where the locale code should be.
regex = re.compile(format_to_re(self.config.layout))
# We then try to match every single file returned by glob.
# In this way, we can build a list of unique locale codes.
languages = {}
for item in list:
m = regex.match(item)
if not m:
continue
code = m.groupdict()['locale']
if code not in languages:
language = resolve_locale(code, self)
if language:
languages[code] = language
return languages.values()
finally:
os.chdir(old_dir)
|
ekager/focus-android
|
tools/l10n/android2po/env.py
|
Python
|
mpl-2.0
| 25,197
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'UserProfile.user'
db.alter_column(u'spa_userprofile', 'user_id', self.gf('django.db.models.fields.related.OneToOneField')(unique=True, to=orm['auth.User']))
def backwards(self, orm):
# Changing field 'UserProfile.user'
db.alter_column(u'spa_userprofile', 'user_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'], unique=True))
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'spa._activity': {
'Meta': {'object_name': '_Activity'},
'date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'uid': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True'})
},
'spa._lookup': {
'Meta': {'object_name': '_Lookup'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'spa.chatmessage': {
'Meta': {'object_name': 'ChatMessage'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'chat_messages'", 'null': 'True', 'to': "orm['spa.UserProfile']"})
},
'spa.comment': {
'Meta': {'object_name': 'Comment'},
'comment': ('django.db.models.fields.CharField', [], {'max_length': '1024'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mix': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'comments'", 'null': 'True', 'to': "orm['spa.Mix']"}),
'time_index': ('django.db.models.fields.IntegerField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'spa.event': {
'Meta': {'object_name': 'Event'},
'attendees': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'attendees'", 'symmetrical': 'False', 'to': u"orm['auth.User']"}),
'date_created': ('django.db.models.fields.DateField', [], {'default': 'datetime.datetime(2013, 4, 24, 0, 0)'}),
'event_date': ('django.db.models.fields.DateField', [], {'default': 'datetime.datetime(2013, 4, 24, 0, 0)'}),
'event_description': ('tinymce.views.HTMLField', [], {}),
'event_recurrence': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['spa.Recurrence']"}),
'event_time': ('django.db.models.fields.TimeField', [], {'default': 'datetime.datetime(2013, 4, 24, 0, 0)'}),
'event_title': ('django.db.models.fields.CharField', [], {'max_length': '250'}),
'event_venue': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['spa.Venue']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'spa.genre': {
'Meta': {'object_name': 'Genre'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'})
},
'spa.label': {
'Meta': {'object_name': 'Label'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'spa.mix': {
'Meta': {'object_name': 'Mix'},
'description': ('django.db.models.fields.TextField', [], {}),
'download_allowed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'download_url': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'duration': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'genres': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['spa.Genre']", 'symmetrical': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_featured': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'local_file': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'blank': 'True'}),
'mix_image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'stream_url': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'uid': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '38', 'blank': 'True'}),
'upload_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 4, 24, 0, 0)'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['spa.UserProfile']"}),
'waveform_generated': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'spa.mixdownload': {
'Meta': {'object_name': 'MixDownload', '_ormbases': ['spa._Activity']},
u'_activity_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['spa._Activity']", 'unique': 'True', 'primary_key': 'True'}),
'mix': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'downloads'", 'to': "orm['spa.Mix']"})
},
'spa.mixfavourite': {
'Meta': {'object_name': 'MixFavourite', '_ormbases': ['spa._Activity']},
u'_activity_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['spa._Activity']", 'unique': 'True', 'primary_key': 'True'}),
'mix': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'favourites'", 'to': "orm['spa.Mix']"})
},
'spa.mixlike': {
'Meta': {'object_name': 'MixLike', '_ormbases': ['spa._Activity']},
u'_activity_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['spa._Activity']", 'unique': 'True', 'primary_key': 'True'}),
'mix': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'likes'", 'to': "orm['spa.Mix']"})
},
'spa.mixplay': {
'Meta': {'object_name': 'MixPlay', '_ormbases': ['spa._Activity']},
u'_activity_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['spa._Activity']", 'unique': 'True', 'primary_key': 'True'}),
'mix': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'plays'", 'to': "orm['spa.Mix']"})
},
'spa.purchaselink': {
'Meta': {'object_name': 'PurchaseLink'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'provider': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'track': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'purchase_link'", 'to': "orm['spa.Tracklist']"}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'})
},
'spa.recurrence': {
'Meta': {'object_name': 'Recurrence', '_ormbases': ['spa._Lookup']},
u'_lookup_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['spa._Lookup']", 'unique': 'True', 'primary_key': 'True'})
},
'spa.release': {
'Meta': {'object_name': 'Release'},
'embed_code': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'release_artist': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'release_date': ('django.db.models.fields.DateField', [], {'default': 'datetime.datetime(2013, 4, 24, 0, 0)'}),
'release_description': ('django.db.models.fields.TextField', [], {}),
'release_image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'release_label': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['spa.Label']"}),
'release_title': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['spa.UserProfile']"})
},
'spa.releaseaudio': {
'Meta': {'object_name': 'ReleaseAudio'},
'description': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'local_file': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
'release': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'release_audio'", 'null': 'True', 'to': "orm['spa.Release']"})
},
'spa.tracklist': {
'Meta': {'object_name': 'Tracklist'},
'artist': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'index': ('django.db.models.fields.SmallIntegerField', [], {}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'mix': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'tracklist'", 'to': "orm['spa.Mix']"}),
'remixer': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'timeindex': ('django.db.models.fields.TimeField', [], {'null': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'spa.userfollows': {
'Meta': {'object_name': 'UserFollows'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user_from': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'followers'", 'unique': 'True', 'to': "orm['spa.UserProfile']"}),
'user_to': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'following'", 'unique': 'True', 'to': "orm['spa.UserProfile']"})
},
'spa.userprofile': {
'Meta': {'object_name': 'UserProfile'},
'activity_sharing': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'activity_sharing_networks': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'avatar_image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'avatar_type': ('django.db.models.fields.CharField', [], {'default': "'social'", 'max_length': '15'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '2048', 'blank': 'True'}),
'display_name': ('django.db.models.fields.CharField', [], {'max_length': '35', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'default': 'None', 'max_length': '50', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'userprofile'", 'unique': 'True', 'to': u"orm['auth.User']"})
},
'spa.venue': {
'Meta': {'object_name': 'Venue'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'venue_address': ('django.db.models.fields.CharField', [], {'max_length': '1024'}),
'venue_image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'venue_name': ('django.db.models.fields.CharField', [], {'max_length': '250'})
}
}
complete_apps = ['spa']
|
fergalmoran/dss
|
spa/migrations/0006_auto__chg_field_userprofile_user.py
|
Python
|
bsd-2-clause
| 16,601
|
# Natural Language Toolkit: NomBank Corpus Reader
#
# Copyright (C) 2001-2016 NLTK Project
# Authors: Paul Bedaride <paul.bedaride@gmail.com>
# Edward Loper <edloper@gmail.com>
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
from __future__ import unicode_literals
from nltk.tree import Tree
from xml.etree import ElementTree
from nltk.internals import raise_unorderable_types
from nltk.compat import total_ordering, python_2_unicode_compatible, string_types
from nltk.corpus.reader.util import *
from nltk.corpus.reader.api import *
class NombankCorpusReader(CorpusReader):
"""
Corpus reader for the nombank corpus, which augments the Penn
Treebank with information about the predicate argument structure
of every noun instance. The corpus consists of two parts: the
predicate-argument annotations themselves, and a set of "frameset
files" which define the argument labels used by the annotations,
on a per-noun basis. Each "frameset file" contains one or more
predicates, such as ``'turn'`` or ``'turn_on'``, each of which is
divided into coarse-grained word senses called "rolesets". For
each "roleset", the frameset file provides descriptions of the
argument roles, along with examples.
"""
def __init__(self, root, nomfile, framefiles='',
nounsfile=None, parse_fileid_xform=None,
parse_corpus=None, encoding='utf8'):
"""
:param root: The root directory for this corpus.
:param nomfile: The name of the file containing the predicate-
argument annotations (relative to ``root``).
:param framefiles: A list or regexp specifying the frameset
fileids for this corpus.
:param parse_fileid_xform: A transform that should be applied
to the fileids in this corpus. This should be a function
of one argument (a fileid) that returns a string (the new
fileid).
:param parse_corpus: The corpus containing the parse trees
corresponding to this corpus. These parse trees are
necessary to resolve the tree pointers used by nombank.
"""
# If framefiles is specified as a regexp, expand it.
if isinstance(framefiles, string_types):
framefiles = find_corpus_fileids(root, framefiles)
framefiles = list(framefiles)
# Initialze the corpus reader.
CorpusReader.__init__(self, root, [nomfile, nounsfile] + framefiles,
encoding)
# Record our frame fileids & nom file.
self._nomfile = nomfile
self._framefiles = framefiles
self._nounsfile = nounsfile
self._parse_fileid_xform = parse_fileid_xform
self._parse_corpus = parse_corpus
def raw(self, fileids=None):
"""
:return: the text contents of the given fileids, as a single string.
"""
if fileids is None: fileids = self._fileids
elif isinstance(fileids, compat.string_types): fileids = [fileids]
return concat([self.open(f).read() for f in fileids])
def instances(self, baseform=None):
"""
:return: a corpus view that acts as a list of
``NombankInstance`` objects, one for each noun in the corpus.
"""
kwargs = {}
if baseform is not None:
kwargs['instance_filter'] = lambda inst: inst.baseform==baseform
return StreamBackedCorpusView(self.abspath(self._nomfile),
lambda stream: self._read_instance_block(stream, **kwargs),
encoding=self.encoding(self._nomfile))
def lines(self):
"""
:return: a corpus view that acts as a list of strings, one for
each line in the predicate-argument annotation file.
"""
return StreamBackedCorpusView(self.abspath(self._nomfile),
read_line_block,
encoding=self.encoding(self._nomfile))
def roleset(self, roleset_id):
"""
:return: the xml description for the given roleset.
"""
baseform = roleset_id.split('.')[0]
baseform = baseform.replace('perc-sign','%')
baseform = baseform.replace('oneslashonezero', '1/10').replace('1/10','1-slash-10')
framefile = 'frames/%s.xml' % baseform
if framefile not in self._framefiles:
raise ValueError('Frameset file for %s not found' %
roleset_id)
# n.b.: The encoding for XML fileids is specified by the file
# itself; so we ignore self._encoding here.
etree = ElementTree.parse(self.abspath(framefile).open()).getroot()
for roleset in etree.findall('predicate/roleset'):
if roleset.attrib['id'] == roleset_id:
return roleset
else:
raise ValueError('Roleset %s not found in %s' %
(roleset_id, framefile))
def rolesets(self, baseform=None):
"""
:return: list of xml descriptions for rolesets.
"""
if baseform is not None:
framefile = 'frames/%s.xml' % baseform
if framefile not in self._framefiles:
raise ValueError('Frameset file for %s not found' %
baseform)
framefiles = [framefile]
else:
framefiles = self._framefiles
rsets = []
for framefile in framefiles:
# n.b.: The encoding for XML fileids is specified by the file
# itself; so we ignore self._encoding here.
etree = ElementTree.parse(self.abspath(framefile).open()).getroot()
rsets.append(etree.findall('predicate/roleset'))
return LazyConcatenation(rsets)
def nouns(self):
"""
:return: a corpus view that acts as a list of all noun lemmas
in this corpus (from the nombank.1.0.words file).
"""
return StreamBackedCorpusView(self.abspath(self._nounsfile),
read_line_block,
encoding=self.encoding(self._nounsfile))
def _read_instance_block(self, stream, instance_filter=lambda inst: True):
block = []
# Read 100 at a time.
for i in range(100):
line = stream.readline().strip()
if line:
inst = NombankInstance.parse(
line, self._parse_fileid_xform,
self._parse_corpus)
if instance_filter(inst):
block.append(inst)
return block
######################################################################
#{ Nombank Instance & related datatypes
######################################################################
@python_2_unicode_compatible
class NombankInstance(object):
def __init__(self, fileid, sentnum, wordnum, baseform, sensenumber,
predicate, predid, arguments, parse_corpus=None):
self.fileid = fileid
"""The name of the file containing the parse tree for this
instance's sentence."""
self.sentnum = sentnum
"""The sentence number of this sentence within ``fileid``.
Indexing starts from zero."""
self.wordnum = wordnum
"""The word number of this instance's predicate within its
containing sentence. Word numbers are indexed starting from
zero, and include traces and other empty parse elements."""
self.baseform = baseform
"""The baseform of the predicate."""
self.sensenumber = sensenumber
"""The sense number of the predicate."""
self.predicate = predicate
"""A ``NombankTreePointer`` indicating the position of this
instance's predicate within its containing sentence."""
self.predid = predid
"""Identifier of the predicate."""
self.arguments = tuple(arguments)
"""A list of tuples (argloc, argid), specifying the location
and identifier for each of the predicate's argument in the
containing sentence. Argument identifiers are strings such as
``'ARG0'`` or ``'ARGM-TMP'``. This list does *not* contain
the predicate."""
self.parse_corpus = parse_corpus
"""A corpus reader for the parse trees corresponding to the
instances in this nombank corpus."""
@property
def roleset(self):
"""The name of the roleset used by this instance's predicate.
Use ``nombank.roleset() <NombankCorpusReader.roleset>`` to
look up information about the roleset."""
r = self.baseform.replace('%', 'perc-sign')
r = r.replace('1/10', '1-slash-10').replace('1-slash-10', 'oneslashonezero')
return '%s.%s'%(r, self.sensenumber)
def __repr__(self):
return ('<NombankInstance: %s, sent %s, word %s>' %
(self.fileid, self.sentnum, self.wordnum))
def __str__(self):
s = '%s %s %s %s %s' % (self.fileid, self.sentnum, self.wordnum,
self.baseform, self.sensenumber)
items = self.arguments + ((self.predicate, 'rel'),)
for (argloc, argid) in sorted(items):
s += ' %s-%s' % (argloc, argid)
return s
def _get_tree(self):
if self.parse_corpus is None: return None
if self.fileid not in self.parse_corpus.fileids(): return None
return self.parse_corpus.parsed_sents(self.fileid)[self.sentnum]
tree = property(_get_tree, doc="""
The parse tree corresponding to this instance, or None if
the corresponding tree is not available.""")
@staticmethod
def parse(s, parse_fileid_xform=None, parse_corpus=None):
pieces = s.split()
if len(pieces) < 6:
raise ValueError('Badly formatted nombank line: %r' % s)
# Divide the line into its basic pieces.
(fileid, sentnum, wordnum,
baseform, sensenumber) = pieces[:5]
args = pieces[5:]
rel = [args.pop(i) for i,p in enumerate(args) if '-rel' in p]
if len(rel) != 1:
raise ValueError('Badly formatted nombank line: %r' % s)
# Apply the fileid selector, if any.
if parse_fileid_xform is not None:
fileid = parse_fileid_xform(fileid)
# Convert sentence & word numbers to ints.
sentnum = int(sentnum)
wordnum = int(wordnum)
# Parse the predicate location.
predloc, predid = rel[0].split('-', 1)
predicate = NombankTreePointer.parse(predloc)
# Parse the arguments.
arguments = []
for arg in args:
argloc, argid = arg.split('-', 1)
arguments.append( (NombankTreePointer.parse(argloc), argid) )
# Put it all together.
return NombankInstance(fileid, sentnum, wordnum, baseform, sensenumber,
predicate, predid, arguments, parse_corpus)
class NombankPointer(object):
"""
A pointer used by nombank to identify one or more constituents in
a parse tree. ``NombankPointer`` is an abstract base class with
three concrete subclasses:
- ``NombankTreePointer`` is used to point to single constituents.
- ``NombankSplitTreePointer`` is used to point to 'split'
constituents, which consist of a sequence of two or more
``NombankTreePointer`` pointers.
- ``NombankChainTreePointer`` is used to point to entire trace
chains in a tree. It consists of a sequence of pieces, which
can be ``NombankTreePointer`` or ``NombankSplitTreePointer`` pointers.
"""
def __init__(self):
if self.__class__ == NombankPointer:
raise NotImplementedError()
@python_2_unicode_compatible
class NombankChainTreePointer(NombankPointer):
def __init__(self, pieces):
self.pieces = pieces
"""A list of the pieces that make up this chain. Elements may
be either ``NombankSplitTreePointer`` or
``NombankTreePointer`` pointers."""
def __str__(self):
return '*'.join('%s' % p for p in self.pieces)
def __repr__(self):
return '<NombankChainTreePointer: %s>' % self
def select(self, tree):
if tree is None: raise ValueError('Parse tree not avaialable')
return Tree('*CHAIN*', [p.select(tree) for p in self.pieces])
@python_2_unicode_compatible
class NombankSplitTreePointer(NombankPointer):
def __init__(self, pieces):
self.pieces = pieces
"""A list of the pieces that make up this chain. Elements are
all ``NombankTreePointer`` pointers."""
def __str__(self):
return ','.join('%s' % p for p in self.pieces)
def __repr__(self):
return '<NombankSplitTreePointer: %s>' % self
def select(self, tree):
if tree is None: raise ValueError('Parse tree not avaialable')
return Tree('*SPLIT*', [p.select(tree) for p in self.pieces])
@total_ordering
@python_2_unicode_compatible
class NombankTreePointer(NombankPointer):
"""
wordnum:height*wordnum:height*...
wordnum:height,
"""
def __init__(self, wordnum, height):
self.wordnum = wordnum
self.height = height
@staticmethod
def parse(s):
# Deal with chains (xx*yy*zz)
pieces = s.split('*')
if len(pieces) > 1:
return NombankChainTreePointer([NombankTreePointer.parse(elt)
for elt in pieces])
# Deal with split args (xx,yy,zz)
pieces = s.split(',')
if len(pieces) > 1:
return NombankSplitTreePointer([NombankTreePointer.parse(elt)
for elt in pieces])
# Deal with normal pointers.
pieces = s.split(':')
if len(pieces) != 2: raise ValueError('bad nombank pointer %r' % s)
return NombankTreePointer(int(pieces[0]), int(pieces[1]))
def __str__(self):
return '%s:%s' % (self.wordnum, self.height)
def __repr__(self):
return 'NombankTreePointer(%d, %d)' % (self.wordnum, self.height)
def __eq__(self, other):
while isinstance(other, (NombankChainTreePointer,
NombankSplitTreePointer)):
other = other.pieces[0]
if not isinstance(other, NombankTreePointer):
return self is other
return (self.wordnum == other.wordnum and self.height == other.height)
def __ne__(self, other):
return not self == other
def __lt__(self, other):
while isinstance(other, (NombankChainTreePointer,
NombankSplitTreePointer)):
other = other.pieces[0]
if not isinstance(other, NombankTreePointer):
return id(self) < id(other)
return (self.wordnum, -self.height) < (other.wordnum, -other.height)
def select(self, tree):
if tree is None: raise ValueError('Parse tree not avaialable')
return tree[self.treepos(tree)]
def treepos(self, tree):
"""
Convert this pointer to a standard 'tree position' pointer,
given that it points to the given tree.
"""
if tree is None: raise ValueError('Parse tree not avaialable')
stack = [tree]
treepos = []
wordnum = 0
while True:
#print treepos
#print stack[-1]
# tree node:
if isinstance(stack[-1], Tree):
# Select the next child.
if len(treepos) < len(stack):
treepos.append(0)
else:
treepos[-1] += 1
# Update the stack.
if treepos[-1] < len(stack[-1]):
stack.append(stack[-1][treepos[-1]])
else:
# End of node's child list: pop up a level.
stack.pop()
treepos.pop()
# word node:
else:
if wordnum == self.wordnum:
return tuple(treepos[:len(treepos)-self.height-1])
else:
wordnum += 1
stack.pop()
|
JFriel/honours_project
|
venv/lib/python2.7/site-packages/nltk/corpus/reader/nombank.py
|
Python
|
gpl-3.0
| 16,317
|
###############################################################################
# This file is part of openWNS (open Wireless Network Simulator)
# _____________________________________________________________________________
#
# Copyright (C) 2004-2007
# Chair of Communication Networks (ComNets)
# Kopernikusstr. 5, D-52074 Aachen, Germany
# phone: ++49-241-80-27910,
# fax: ++49-241-80-22242
# email: info@openwns.org
# www: http://www.openwns.org
# _____________________________________________________________________________
#
# openWNS is free software; you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License version 2 as published by the
# Free Software Foundation;
#
# openWNS is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
from openwns.pyconfig import attrsetter
import openwns.logger
import copy
# The strategies for Adaptive Power Control (APC)
class APCStrategy(object):
nameInAPCStrategyFactory = None
logger = None
def __init__(self, parentLogger = None, **kw):
#self.logger = openwns.logger.Logger("WNS", "APCStrategy", True, parentLogger)
self.logger = openwns.logger.Logger("WNS", ".".join(["APCStrategy",self.nameInAPCStrategyFactory]), True, parentLogger)
attrsetter(self, kw)
def setParentLogger(self,parentLogger = None):
#self.logger = openwns.logger.Logger("WNS", "APCStrategy", True, parentLogger)
self.logger = openwns.logger.Logger("WNS", ".".join(["APCStrategy",self.nameInAPCStrategyFactory]), True, parentLogger)
self.logger.enabled = parentLogger.enabled
#print "APCStrategy.setParentLogger():",self.logger.name
class DoNotUseAPC(APCStrategy):
requiresCQI = False
def __init__(self, **kw):
self.nameInAPCStrategyFactory = "DoNotUseAPC"
super(DoNotUseAPC,self).__init__(**kw)
class UseNominalTxPower(APCStrategy):
requiresCQI = False
def __init__(self, **kw):
self.nameInAPCStrategyFactory = "UseNominalTxPower"
super(UseNominalTxPower,self).__init__(**kw)
class UseMaxTxPower(APCStrategy):
requiresCQI = False
def __init__(self, **kw):
self.nameInAPCStrategyFactory = "UseMaxTxPower"
super(UseMaxTxPower,self).__init__(**kw)
class APCSlave(APCStrategy):
requiresCQI = False
def __init__(self, **kw):
self.nameInAPCStrategyFactory = "APCSlave"
super(APCSlave,self).__init__(**kw)
class FCFSMaxPhyMode(APCStrategy):
requiresCQI = True
def __init__(self, **kw):
self.nameInAPCStrategyFactory = "FCFSMaxPhyMode"
super(FCFSMaxPhyMode,self).__init__(**kw)
class FairSINR(APCStrategy):
requiresCQI = True
fair_sinrdl = None
fair_sinrul = None
def __init__(self, fairsinrdl = 18.2, fairsinrul = 13.0, **kw):
self.nameInAPCStrategyFactory = "FairSINR"
super(FairSINR,self).__init__(**kw)
self.fair_sinrdl = fairsinrdl
self.fair_sinrul = fairsinrul
class LTE_UL(APCStrategy):
requiresCQI = False
alpha = 1.0
pNull = "-106 dBm"
maxInterference = "-200 dBm"
sinrMargin = "0.0 dB"
minimumPhyMode = 0
def __init__(self, **kw):
self.nameInAPCStrategyFactory = "LTE_UL"
super(LTE_UL,self).__init__(**kw)
|
creasyw/IMTAphy
|
framework/library/PyConfig/openwns/scheduler/APCStrategy.py
|
Python
|
gpl-2.0
| 3,677
|
#!/usr/bin/env python
import sys
from setuptools import setup
from bugjar import VERSION
try:
readme = open('README.rst')
long_description = str(readme.read())
finally:
readme.close()
required_pkgs = [
'Pygments>=1.5',
'tkreadonly>=0.5.2',
]
if sys.version_info < (2, 7):
required_pkgs.append('argparse')
setup(
name='bugjar',
version=VERSION,
description='A graphical Python debugger.',
long_description=long_description,
author='Russell Keith-Magee',
author_email='russell@keith-magee.com',
url='http://pybee.org/bugjar',
packages=[
'bugjar',
],
install_requires=required_pkgs,
scripts=[],
entry_points={
'console_scripts': [
'bugjar = bugjar.main:local',
'bugjar-jar = bugjar.main:jar',
'bugjar-net = bugjar.main:net',
]
},
license='New BSD',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2',
'Topic :: Software Development',
'Topic :: Utilities',
],
test_suite='tests'
)
|
pombredanne/bugjar
|
setup.py
|
Python
|
bsd-3-clause
| 1,241
|
# -*- coding: utf-8 -*-
#########################################################################
# #
# #
#########################################################################
# #
# Copyright (C) 2009-2011 Akretion, Emmanuel Samyn #
# #
#This program is free software: you can redistribute it and/or modify #
#it under the terms of the GNU General Public License as published by #
#the Free Software Foundation, either version 3 of the License, or #
#(at your option) any later version. #
# #
#This program is distributed in the hope that it will be useful, #
#but WITHOUT ANY WARRANTY; without even the implied warranty of #
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
#GNU General Public License for more details. #
# #
#You should have received a copy of the GNU General Public License #
#along with this program. If not, see <http://www.gnu.org/licenses/>. #
#########################################################################
{
'name': 'Product warranty',
'version': '1.0',
'category': 'Generic Modules/Product',
'description': """
Product Warranty
================
Extend the product warranty management with warranty details on product / supplier relation:
* supplier warranty duration
* Set default return address for company (if different from standard one)
* return product to company, supplier, other
Those informations are used in the RMA Claim (Product Return Management) module.
""",
'author': 'Akretion',
'website': 'http://akretion.com',
'depends': ['product'],
'data': [
'security/ir.model.access.csv',
'res_company_view.xml',
'product_warranty_view.xml',
],
'demo_xml': [],
'test': [],
'installable': False,
'active': False,
'certificate' : '',
'images': ['images/product_warranty.png'],
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
akretion/rma
|
__unported__/product_warranty/__openerp__.py
|
Python
|
agpl-3.0
| 2,439
|
import six
import requests
from bugwarrior.config import die
from bugwarrior.services import Issue, IssueService, ServiceClient
import logging
log = logging.getLogger(__name__)
class TeamLabClient(ServiceClient):
def __init__(self, hostname, verbose=False):
self.hostname = hostname
self.verbose = verbose
self.token = None
def authenticate(self, login, password):
resp = self.call_api("/api/1.0/authentication.json", post={
"userName": six.text_type(login),
"password": six.text_type(password),
})
self.token = six.text_type(resp["token"])
def get_task_list(self):
resp = self.call_api("/api/1.0/project/task/@self.json")
return resp
def call_api(self, uri, post=None, params=None):
uri = "http://" + self.hostname + uri
kwargs = {'params': params}
if self.token:
kwargs['headers'] = {'Authorization': self.token}
response = (requests.post(uri, data=post, **kwargs) if post
else requests.get(uri, **kwargs))
return self.json_response(response)
class TeamLabIssue(Issue):
URL = 'teamlaburl'
FOREIGN_ID = 'teamlabid'
TITLE = 'teamlabtitle'
PROJECTOWNER_ID = 'teamlabprojectownerid'
UDAS = {
URL: {
'type': 'string',
'label': 'Teamlab URL',
},
FOREIGN_ID: {
'type': 'string',
'label': 'Teamlab ID',
},
TITLE: {
'type': 'string',
'label': 'Teamlab Title',
},
PROJECTOWNER_ID: {
'type': 'string',
'label': 'Teamlab ProjectOwner ID',
}
}
UNIQUE_KEY = (URL, )
def to_taskwarrior(self):
return {
'project': self.get_project(),
'priority': self.get_priority(),
self.TITLE: self.record['title'],
self.FOREIGN_ID: self.record['id'],
self.URL: self.get_issue_url(),
self.PROJECTOWNER_ID: self.record['projectOwner']['id'],
}
def get_default_description(self):
return self.build_default_description(
title=self.record['title'],
url=self.get_processed_url(self.get_issue_url()),
number=self.record['id'],
cls='issue',
)
def get_project(self):
return self.origin['project_name']
def get_issue_url(self):
return "http://%s/products/projects/tasks.aspx?prjID=%d&id=%d" % (
self.origin['hostname'],
self.record["projectOwner"]["id"],
self.record["id"]
)
def get_priority(self):
if self.record.get("priority") == 1:
return "H"
return self.origin['default_priority']
class TeamLabService(IssueService):
ISSUE_CLASS = TeamLabIssue
CONFIG_PREFIX = 'teamlab'
def __init__(self, *args, **kw):
super(TeamLabService, self).__init__(*args, **kw)
self.hostname = self.config_get('hostname')
_login = self.config_get('login')
_password = self.config_get_password('password', _login)
self.client = TeamLabClient(self.hostname)
self.client.authenticate(_login, _password)
self.project_name = self.config_get_default(
'project_name', self.hostname
)
@classmethod
def get_keyring_service(cls, config, section):
login = config.get(section, cls._get_key('login'))
hostname = config.get(section, cls._get_key('hostname'))
return "teamlab://%s@%s" % (login, hostname)
def get_service_metadata(self):
return {
'hostname': self.hostname,
'project_name': self.project_name,
}
@classmethod
def validate_config(cls, config, target):
for k in ('teamlab.login', 'teamlab.password', 'teamlab.hostname'):
if not config.has_option(target, k):
die("[%s] has no '%s'" % (target, k))
IssueService.validate_config(config, target)
def issues(self):
issues = self.client.get_task_list()
log.debug(" Remote has %i total issues.", len(issues))
# Filter out closed tasks.
issues = [i for i in issues if i["status"] == 1]
log.debug(" Remote has %i active issues.", len(issues))
for issue in issues:
yield self.get_issue_for_record(issue)
|
lyarwood/bugwarrior
|
bugwarrior/services/teamlab.py
|
Python
|
gpl-3.0
| 4,419
|
from .test_settings import * # NOQA
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'db.sqlite',
}
}
|
ateoto/django-recipebook
|
recipebook/tests/persistent_settings.py
|
Python
|
mit
| 153
|
import os
from app import create_app, db
from app.models import User, Machine, Revision, Role, RevokedToken
from flask_migrate import Migrate
import click
from flask_jwt_extended import JWTManager
app = create_app(os.getenv('FLASK_CONFIG') or 'default')
jwt = JWTManager(app)
migrate = Migrate(app, db)
# Why does this need to be here? I want to put this in authentication.
@jwt.token_in_blacklist_loader
def check_if_token_in_blacklist(decrypted_token):
jti = decrypted_token['jti']
return RevokedToken.is_jti_blacklisted(jti)
@app.shell_context_processor
def make_shell_context():
return dict(app=app, db=db, User=User, Machine=Machine,
Revision=Revision, Role=Role)
@app.cli.command()
def deploy():
"""Run deployment tasks."""
from flask_migrate import upgrade
# migrate database to latest Revision
upgrade()
|
rivalrockets/benchmarks.rivalrockets.com
|
rivalrockets-api.py
|
Python
|
mit
| 866
|
# -*- coding: utf-8 -*-
import logging
import unittest
import uuid
import boto3
import moto
from smart_open import open
BUCKET_NAME = 'test-smartopen'
KEY_NAME = 'test-key'
logger = logging.getLogger(__name__)
@moto.mock_s3
def setUpModule():
'''Called once by unittest when initializing this module. Sets up the
test S3 bucket.
'''
bucket = boto3.resource('s3').create_bucket(Bucket=BUCKET_NAME)
bucket.wait_until_exists()
boto3.resource('s3').BucketVersioning(BUCKET_NAME).enable()
@moto.mock_s3
def tearDownModule():
'''Called once by unittest when tearing down this module. Empties and
removes the test S3 bucket.
'''
s3 = boto3.resource('s3')
bucket = s3.Bucket(BUCKET_NAME)
try:
bucket.object_versions.delete()
bucket.delete()
except s3.meta.client.exceptions.NoSuchBucket:
pass
bucket.wait_until_not_exists()
def get_versions(bucket, key):
"""Return object versions in chronological order."""
return [
v.id
for v in sorted(
boto3.resource('s3').Bucket(bucket).object_versions.filter(Prefix=key),
key=lambda version: version.last_modified,
)
]
@moto.mock_s3
class TestVersionId(unittest.TestCase):
def setUp(self):
#
# Each run of this test reuses the BUCKET_NAME, but works with a
# different key for isolation.
#
self.key = 'test-write-key-{}'.format(uuid.uuid4().hex)
self.url = "s3://%s/%s" % (BUCKET_NAME, self.key)
self.test_ver1 = u"String version 1.0".encode('utf8')
self.test_ver2 = u"String version 2.0".encode('utf8')
bucket = boto3.resource('s3').Bucket(BUCKET_NAME)
bucket.put_object(Key=self.key, Body=self.test_ver1)
logging.critical('versions after first write: %r', get_versions(BUCKET_NAME, self.key))
bucket.put_object(Key=self.key, Body=self.test_ver2)
self.versions = get_versions(BUCKET_NAME, self.key)
logging.critical('versions after second write: %r', get_versions(BUCKET_NAME, self.key))
assert len(self.versions) == 2
def test_good_id(self):
"""Does passing the version_id parameter into the s3 submodule work correctly when reading?"""
params = {'version_id': self.versions[0]}
with open(self.url, mode='rb', transport_params=params) as fin:
actual = fin.read()
self.assertEqual(actual, self.test_ver1)
def test_bad_id(self):
"""Does passing an invalid version_id exception into the s3 submodule get handled correctly?"""
params = {'version_id': 'bad-version-does-not-exist'}
with self.assertRaises(IOError):
open(self.url, 'rb', transport_params=params)
def test_bad_mode(self):
"""Do we correctly handle non-None version when writing?"""
params = {'version_id': self.versions[0]}
with self.assertRaises(ValueError):
open(self.url, 'wb', transport_params=params)
def test_no_version(self):
"""Passing in no version at all gives the newest version of the file?"""
with open(self.url, 'rb') as fin:
actual = fin.read()
self.assertEqual(actual, self.test_ver2)
def test_newest_version(self):
"""Passing in the newest version explicitly gives the most recent content?"""
params = {'version_id': self.versions[1]}
with open(self.url, mode='rb', transport_params=params) as fin:
actual = fin.read()
self.assertEqual(actual, self.test_ver2)
def test_oldset_version(self):
"""Passing in the oldest version gives the oldest content?"""
params = {'version_id': self.versions[0]}
with open(self.url, mode='rb', transport_params=params) as fin:
actual = fin.read()
self.assertEqual(actual, self.test_ver1)
if __name__ == '__main__':
unittest.main()
|
piskvorky/smart_open
|
smart_open/tests/test_s3_version.py
|
Python
|
mit
| 3,937
|
import os, yaml, util
from kivy.uix.floatlayout import FloatLayout
from kivy.graphics import Color, ClearColor, Rectangle
from kivy.clock import Clock
class Valve:
def __init__(self, pos):
self.pos = pos
self.modifier = 1
def rotate_modifier(self):
if self.modifier == -1:
self.modifier = 1
elif self.modifier == 1:
self.modifier = -1
class Pump:
def __init__(self, section):
self.section = section
self.power = 5.0
self.modifier = 0
def rotate_modifier(self):
if self.modifier == 0:
self.modifier = 1
elif self.modifier == 1:
self.modifier = -1
elif self.modifier == -1:
self.modifier = 0
class Section:
def __init__(self, pos, pattern):
self.pos = pos
self.pattern = pattern
self.content = 0.0
self.capacity = 10.0
self.neighbors = []
class PipeNetwork:
def __init__(self, cells, cols, rows):
self.cells = cells
self.rows = rows
self.cols = cols
self.sections = []
self.pumps = []
self.valves = []
self.build()
def find_section(self, pos, pattern):
for section in self.sections:
if section.pos == pos and section.pattern == pattern:
return section
return None
def join_vertical(self, pos, pos_top):
section = self.find_section(pos, '1000')
section_top = self.find_section(pos_top, '0010')
if section != None and section_top != None:
section.neighbors.append(section_top)
section_top.neighbors.append(section)
def join_horizontal(self, pos, pos_right):
section = self.find_section(pos, '0100')
section_right = self.find_section(pos_right, '0001')
if section != None and section_right != None:
section.neighbors.append(section_right)
section_right.neighbors.append(section)
def build(self):
i_row = 0
i_col = 0
for i_row in range(self.rows):
for i_col in range(self.cols):
cell = self.cells[i_row][i_col]
if cell.pattern == None:
continue
cell_sections = []
if cell.pattern[0] == '1':
cell_sections.append(Section([i_row, i_col], '1000'))
self.sections.append(cell_sections[-1])
if cell.pattern[1] == '1':
cell_sections.append(Section([i_row, i_col], '0100'))
self.sections.append(cell_sections[-1])
if cell.pattern[2] == '1':
cell_sections.append(Section([i_row, i_col], '0010'))
self.sections.append(cell_sections[-1])
if cell.pattern[3] == '1':
cell_sections.append(Section([i_row, i_col], '0001'))
self.sections.append(cell_sections[-1])
if cell.type == 'pump':
self.pumps.append(Pump(cell_sections[-1]))
elif cell.addon == 'valve':
self.valves.append(Valve([i_row, i_col]))
for section in cell_sections:
for neighbor in cell_sections:
if neighbor != section:
section.neighbors.append(neighbor)
for i_row in range(self.rows):
for i_col in range(self.cols):
#cell = self.cells[i_row][i_col]
if i_row < (self.rows - 1):
#cell_top = self.cells[i_row + 1][i_col]
self.join_horizontal([i_row, i_col], [i_row + 1, i_col])
if i_col < (self.cols - 1):
#cell_right = self.cells[i_row][i_col + 1]
self.join_vertical([i_row, i_col], [i_row, i_col + 1])
def search_section(self, section, searched):
if section in searched:
return
else: searched.append(section)
if section.content < section.capacity:
return section, searched
else:
for neighbor in section.neighbors:
if not neighbor in searched:
return self.search_section(neighbor, searched)
def valve_open(self, pos):
for valve in self.valves:
if valve.pos == pos:
return valve.modifier == 1
return True #no valve found to obstruct
def search_fill(self, previous, current, searched, fluid):
if fluid == 0: return
if current in searched:
return
else: searched.append(current)
if current.content < current.capacity:
if previous == None:
can_pass = True
elif previous.pos != current.pos:
can_pass = True
else: can_pass = self.valve_open(current.pos)
if can_pass:
diff = current.capacity - current.content
if fluid > diff:
current.content = current.capacity
fluid -= diff
else:
current.content += fluid
fluid = 0
else:
for neighbor in current.neighbors:
if not neighbor in searched:
return self.search_fill(current, neighbor, searched, fluid)
def fill_section(self, section, fluid):
diff = section.capacity - section.content
if fluid > diff:
section.content = section.capacity
return fluid - diff
else:
section.content += fluid
return 0
def run_pumps(self):
for pump in self.pumps:
if pump.modifier > 0:
self.search_fill(None, pump.section, [], pump.power)
#fluid = pump.power
# while fluid > 0:
# section, searched = self.search_section(pump.section, [])
# print section.pos, [x.pos for x in searched]
# if section is None:
# print 'No section found'
# return
# fluid = self.fill_section(section, fluid)
elif pump.modifier < 0:
if pump.power > pump.section.content:
pump.section.content = 0
else: pump.section.content -= pump.power
def propagation(self):
for section in self.sections:
for neighbor in section.neighbors:
if neighbor.pos != section.pos:
can_pass = True
else: can_pass = self.valve_open(section.pos)
if can_pass:
diff = section.content - neighbor.content
if abs(diff) > 0.1: #TODO viscosity
section.content -= diff / 2
neighbor.content += diff / 2
class Cell():
def __init__(self):
self.pattern = None
self.type = None
self.addon = None
class SimulGrid(FloatLayout):
def __init__(self, **kwargs):
super(SimulGrid, self).__init__(**kwargs)
self.rows = None
self.cols = None
self.cell_width = None
self.cell_height = None
self.network = None
self.cells = self.load_data()
with self.canvas:
Color(.2, .2, .2)
self.rect = Rectangle(pos=self.pos, size=self.size)
self.bind(pos=self.update_gfx)
self.bind(size=self.update_gfx)
def build_network(self):
self.network = PipeNetwork(list(self.cells), self.rows, self.cols)
def on_touch_down(self, touch):
if not self.collide_point(touch.x, touch.y):
return
i_row = int( (touch.x - self.pos[0]) / self.cell_width )
i_col = int( (touch.y - self.pos[1]) / self.cell_height )
cell = self.cells[i_row][i_col]
if cell.type == 'pump':
for pump in self.network.pumps:
if pump.section.pos == [i_row, i_col]:
pump.rotate_modifier()
if cell.addon == 'valve':
for valve in self.network.valves:
if valve.pos == [i_row, i_col]:
valve.rotate_modifier()
def set_dimenstion(self, rows, cols):
self.rows = rows
self.cols = cols
if self.cells is None:
self.cells = [[Cell() for x in range(rows)] for y in xrange(cols)]
self.update_gfx()
def update_gfx(self, *args):
self.cell_width = self.size[0] / self.cols
self.cell_height = self.size[1] / self.rows
self.draw_tiles()
def load_data(self):
if os.path.isfile('data/pipeline-grid.yaml'):
stream = file('data/pipeline-grid.yaml', 'r')
cells = yaml.load(stream)
return list(cells)
else: return None
def draw_tiles(self, *args):
self.canvas.clear()
with self.canvas:
# ground and pipes
for i_row in range(self.rows):
for i_col in range(self.cols):
cell_x = self.pos[0] + i_row * self.cell_width
cell_y = self.pos[1] + i_col * self.cell_height
Rectangle(source=os.path.join(util.dirname, 'img', 'ground.png'),
pos=[cell_x, cell_y], size=[self.cell_width, self.cell_height])
cell = self.cells[i_row][i_col]
if not cell.pattern is None:
Rectangle(source=os.path.join(util.dirname, 'img', cell.type + '-' + cell.pattern + '.png'),
pos=[cell_x, cell_y], size=[self.cell_width, self.cell_height])
#fluid
if self.network != None:
Color(.1,.5,1.)
for section in self.network.sections:
cell_x = self.pos[0] + section.pos[0] * self.cell_width
cell_y = self.pos[1] + section.pos[1] * self.cell_height
fill = section.content / section.capacity
if section.pattern == '1000':
pos = [cell_x + self.cell_width / 2, cell_y + self.cell_height / 2]
size = [self.cell_width * 0.1 * fill, self.cell_height / 2]
pos = [pos[0] - size[0] / 2, pos[1]]
if section.pattern == '0100':
pos = [cell_x + self.cell_width / 2, cell_y + self.cell_height / 2]
size = [self.cell_width / 2, self.cell_height * 0.1 * fill]
pos = [pos[0], pos[1] - size[1] / 2]
if section.pattern == '0010':
pos = [cell_x + self.cell_width / 2, cell_y]
size = [self.cell_width * 0.1 * fill, self.cell_height / 2]
pos = [pos[0] - size[0] / 2, pos[1]]
if section.pattern == '0001':
pos = [cell_x, cell_y + self.cell_height / 2]
size = [self.cell_width / 2, self.cell_height * 0.1 * fill]
pos = [pos[0], pos[1] - size[1] / 2]
Rectangle(pos=pos, size=size)
Color(1,1,1)
# pumps
for pump in self.network.pumps:
cell_x = self.pos[0] + pump.section.pos[0] * self.cell_width
cell_y = self.pos[1] + pump.section.pos[1] * self.cell_height
if pump.modifier > 0:
Rectangle(source=os.path.join(util.dirname, 'img', 'indicator-plus.png'),
pos=[cell_x, cell_y], size=[self.cell_width, self.cell_height])
elif pump.modifier < 0:
Rectangle(source=os.path.join(util.dirname, 'img', 'indicator-minus.png'),
pos=[cell_x, cell_y], size=[self.cell_width, self.cell_height])
# valves
for valve in self.network.valves:
cell_x = self.pos[0] + valve.pos[0] * self.cell_width
cell_y = self.pos[1] + valve.pos[1] * self.cell_height
if valve.modifier > 0:
Rectangle(source=os.path.join(util.dirname, 'img', 'indicator-plus.png'),
pos=[cell_x, cell_y], size=[self.cell_width, self.cell_height])
elif valve.modifier < 0:
Rectangle(source=os.path.join(util.dirname, 'img', 'indicator-minus.png'),
pos=[cell_x, cell_y], size=[self.cell_width, self.cell_height])
# addons
for i_row in range(self.rows):
for i_col in range(self.cols):
cell_x = self.pos[0] + i_row * self.cell_width
cell_y = self.pos[1] + i_col * self.cell_height
cell = self.cells[i_row][i_col]
if not cell.addon is None:
Rectangle(source=os.path.join(util.dirname, 'img', cell.addon + '.png'),
pos=[cell_x, cell_y], size=[self.cell_width, self.cell_height])
class Simulator:
running = False
grid = None
@staticmethod
def set_grid(simulgrid):
Simulator.grid = simulgrid
@staticmethod
def process(dt):
if Simulator.grid != None:
Simulator.grid.network.run_pumps()
Simulator.grid.network.propagation()
Simulator.grid.draw_tiles()
return Simulator.running
@staticmethod
def start(instance):
Clock.schedule_interval(Simulator.process, 0.2)
Simulator.running = True
@staticmethod
def stop(instance):
Simulator.running = False
|
victor-rene/MicroScada
|
archive/day_09/simulator.py
|
Python
|
mit
| 12,003
|
from SassyMQ.CommonClasses.BaseClasses import *
from SassyMQ.CommonClasses.SMQActorBase import *
from json import JSONEncoder
class SMQPublicBase(SMQActorBase):
def __init__(self, isAutoConnect = True):
super(SMQPublicBase, self).__init__("public.all", isAutoConnect)
# OpenSourceToolsLexicon - OSTL
def CheckRouting(self, message_frame, header_frame, body):
pass
# ACTOR CAN SAY:
def PublicCreateSMQProjectNoPayload(self):
self.PublicCreateSMQProject(self.CreatePayload())
def PublicCreateSMQProjectString(self, content):
payload = self.CreatePayload()
payload.Content = content
self.PublicCreateSMQProject(payload)
def PublicCreateSMQProject(self, payload):
if self.IsDebugMode:
print("Create S M Q Project - ")
print("payload: " + payload)
self.RMQChannel.basic_publish(exchange='publicmic', routing_key='host.general.public.createsmqproject', body=payload.toJSON())
|
CODEiverse/SassyMQ-OpenSourceTools
|
PySassyMQ/SassyMQ/SMQActors/SMQPublicBase.py
|
Python
|
mpl-2.0
| 1,084
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim: autoindent tabstop=4 shiftwidth=4 expandtab softtabstop=4 filetype=python
#
# Copyright (c) 2013 Greenhost VOF
# https://greenhost.nl -\- https://greenhost.io
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import os, sys
import logging
import subprocess
from viper import tools
# Return values of fwipv6 command:
# 0 = operation was successful
# 1 = operation failed
# 2 = firewall is not enabled
class FirewallException(Exception):
pass
def is_firewall_enabled():
"""Check whether windows firewall is enabled or not"""
try:
cmd = subprocess.Popen(['netsh', 'advfirewall', 'show', 'currentprofile'], stdout=subprocess.PIPE)
out = cmd.stdout.readlines()
for l in out:
if l.startswith('State'):
state = l.split()[-1]
if state == "ON":
return True
else:
return False
except OSError as ex:
raise FirewallException("Couldn't determine if firewall is up")
def set_firewall_state(state = "on"):
""" Elevated privileges needed to run this
@note uses the netsh command to interact with the firewall which is notorious for changing acrsso versions of windows
"""
cmd = "netsh advfirewall set allprofiles state {0}".format(state)
FNULL = open(os.devnull, 'w')
subprocess.call(cmd.split(), stdout=FNULL, stderr=subprocess.STDOUT)
def firewall_enable():
set_firewall_state("on")
def firewall_disable():
set_firewall_state("off")
def exec_rules(rules):
for r in rules:
FNULL = open(os.devnull, 'w')
retval = subprocess.call(r.split(), stdout=FNULL, stderr=subprocess.STDOUT)
if retval != 0:
# if setting one of the rules fails, return
return False
return True
def block_ipv6():
"""Execute external fwipv6 tool to enable the Windows Firewall filtering of IPv6 traffic"""
rules = [
"netsh advfirewall firewall add rule name=\"Viper - IPv6\" protocol=icmpv6 dir=out action=block",
"netsh advfirewall firewall add rule name=\"Viper - IPv6\" protocol=icmpv6 dir=in action=block",
"netsh advfirewall firewall add rule name=\"Viper - IPv6\" action=block protocol=41 dir=out",
"netsh advfirewall firewall add rule name=\"Viper - IPv6 protocol 43\" protocol=43 action=block dir=out",
"netsh advfirewall firewall add rule name=\"Viper - IPv6 protocol 44\" protocol=44 action=block dir=out",
"netsh advfirewall firewall add rule name=\"Viper - IPv6 protocol 58\" protocol=58 action=block dir=out",
"netsh advfirewall firewall add rule name=\"Viper - IPv6 protocol 59\" protocol=59 action=block dir=out",
"netsh advfirewall firewall add rule name=\"Viper - IPv6 protocol 60\" protocol=60 action=block dir=out"
]
logging.info("Configuring Windows Firewall to block IPv6 traffic...")
return exec_rules(rules)
def unblock_ipv6():
"""Execute external fwipv6 tool to disable the Windows Firewall filtering of IPv6 traffic"""
rules = [
"netsh advfirewall firewall delete rule name=\"Viper - IPv6\"",
"netsh advfirewall firewall delete rule name=\"Viper - IPv6 protocol 43\"",
"netsh advfirewall firewall delete rule name=\"Viper - IPv6 protocol 44\"",
"netsh advfirewall firewall delete rule name=\"Viper - IPv6 protocol 58\"",
"netsh advfirewall firewall delete rule name=\"Viper - IPv6 protocol 59\"",
"netsh advfirewall firewall delete rule name=\"Viper - IPv6 protocol 60\""
]
logging.info("Windows Firewall allows IPv6 traffic now...")
return exec_rules(rules)
def block_default_local_subnet(interface_ip):
rules = [
"netsh advfirewall firewall add rule name=\"Viper - Block local subnet\" action=block protocol=any dir=out localip=any remoteip=LocalSubnet",
]
logging.info("Blocking all traffic on the local subnet (gateway ip: {0})".format(interface_ip))
return exec_rules( rules )
def unblock_default_local_subnet(interface_ip):
rules = [
"netsh advfirewall firewall delete rule name=\"Viper - Block local subnet\"",
]
logging.info("Unblocking local subnet (gateway ip: {0})".format(interface_ip))
return exec_rules( rules )
def block_all_ports_except_vpn(vpn_port):
logging.info("Blocking all ports except the VPN's (vpn port: {0})".format(vpn_port))
pass
def unblock_all_ports():
logging.info("Unblocking all ports")
pass
|
greenhost/viper
|
viper/windows/firewall.py
|
Python
|
gpl-3.0
| 5,108
|
import numpy as np
from trans_rot_coords import get_normal, get_unit
D2R = 3.14159265358979/180.0
Mass_table = {'O':15.999, 'H':1.008}
## Indices of one square:
Grid_Quarts_wtr = { 0: [ 0, 1, 13, 14] ,
1: [ 1, 2, 14, 15] ,
2: [ 2, 3, 15, 16] ,
3: [ 3, 4, 16, 17] ,
4: [ 4, 5, 17, 18] ,
5: [ 5, 6, 18, 19] ,
6: [ 6, 7, 19, 20] ,
7: [ 7, 8, 20, 21] ,
8: [ 8, 9, 21, 22] ,
9: [ 9, 10, 22, 23] ,
10: [10, 11, 23, 24] ,
11: [11, 12, 24, 25] ,
13: [13, 14, 26, 27] ,
14: [14, 15, 27, 28] ,
15: [15, 16, 28, 29] ,
16: [16, 17, 29, 30] ,
17: [17, 18, 30, 31] ,
18: [18, 19, 31, 32] ,
19: [19, 20, 32, 33] ,
20: [20, 21, 33, 34] ,
21: [21, 22, 34, 35] ,
22: [22, 23, 35, 36] ,
23: [23, 24, 36, 37] ,
24: [24, 25, 37, 38] ,
26: [26, 27, 39, 40] ,
27: [27, 28, 40, 41] ,
28: [28, 29, 41, 41] ,
29: [29, 30, 41, 42] ,
30: [30, 31, 42, 43] ,
31: [31, 32, 43, 44] ,
32: [32, 33, 44, 44] ,
33: [33, 34, 44, 45] ,
34: [34, 35, 45, 46] ,
35: [35, 36, 46, 47] ,
36: [36, 37, 47, 47] ,
37: [37, 38, 47, 48] ,
39: [39, 40, 49, 50] ,
40: [40, 41, 50, 50] ,
41: [41, 42, 50, 51] ,
42: [42, 43, 51, 52] ,
43: [43, 44, 52, 52] ,
44: [44, 45, 52, 53] ,
45: [45, 46, 53, 54] ,
46: [46, 47, 54, 54] ,
47: [47, 48, 54, 55] ,
49: [49, 50, 56, 57] ,
50: [50, 51, 57, 57] ,
51: [51, 52, 57, 58] ,
52: [52, 53, 58, 59] ,
53: [53, 54, 59, 59] ,
54: [54, 55, 59, 60] ,
56: [56, 57, 61, 61] ,
57: [57, 58, 61, 61] ,
58: [58, 59, 61, 61] ,
59: [59, 60, 61, 61]}
class data_structure():
def __init__(self):
self.set_symmetry()
self.set_R()
self.set_phi()
self.set_theta()
self.set_nConf()
self.degree2radius()
self.set_num_of_atoms()
def set_theta(self):
self.THETA_angles = {0: [float(i) for i in range(0, 359, 15)],
1: [float(i) for i in range(0, 359, 15)],
2: [float(i) for i in range(0, 359, 15)],
3: [float(i) for i in range(0, 359, 20)],
4: [float(i) for i in range(0, 359, 30)],
5: [float(i) for i in range(0, 359, 45)],
6: [0.0]}
def set_symmetry(self):
self.symface = ['xy']
def set_num_of_atoms(self):
self.n1 = 12
self.n2 = 3
def calt_dvec(self, a0, a1, a2):
"""
a0, a1, a2: O, CA, next CA
"""
ratio_CA1 = 0.5618541311379575
ratio_CA2 = 0.4381458688620425
return -np.array(a1)*ratio_CA1 - np.array(a2)*ratio_CA2
def calt_vec1(self, a0, a1, a2): return a0, [1,0,0]
def calt_vec2(self, a0, a1, a2): return a1, [0,1,0]
def set_R(self):
self.R_NDX = [2.0,2.1,2.2,2.3,2.4,2.5,2.6,2.65,2.7,2.75,2.8,2.85,2.9,2.95,3.0,3.1,3.2,3.3,3.4,3.5,3.6,3.7,3.8,4.0,4.2,4.5,4.7,5.0,5.5,6.0,6.5,7.0,8.0,9.0,10.0,11.0,12.0]
self.nDist = len(self.R_NDX)
self.DR = []
for i in range(len(self.R_NDX)-1):
self.DR.append(self.R_NDX[i+1]-self.R_NDX[i])
self.DR.append( 0.0 )
def set_phi(self):
"""
default: 0~90.0, d_ang = 15.0
"""
self.PHI_angles = [i*15.0 for i in range(0,7)]
self.nPhi = len(self.PHI_angles)
def set_nConf(self): # depending on the distance
self.nConf = []
self.nNorm = []
for i in range(self.nDist):
if self.R_NDX[i] < 2.51:
self.nConf.append(54)
self.nNorm.append(4)
elif self.R_NDX[i] > 2.51 and self.R_NDX[i] < 3.51:
self.nConf.append(210)
self.nNorm.append(4)
elif self.R_NDX[i] > 3.51 and self.R_NDX[i] < 5.51:
self.nConf.append(54)
self.nNorm.append(2)
else:
self.nConf.append(26)
self.nNorm.append(2)
def degree2radius(self):
for i in range(self.nPhi): self.PHI_angles[i] *= D2R
self.NTheta = {}
self.nGrid = 0
for i in range(self.nPhi):
self.NTheta[i] = len(self.THETA_angles[i])
for j in range(self.NTheta[i]):
self.THETA_angles[i][j] *= D2R
self.nGrid += self.NTheta[i]
class wtr_structure(data_structure):
def set_symmetry(self):
self.symface = ['xy','xz']
def set_num_of_atoms(self):
self.n1 = 3
self.n2 = 3
def set_theta(self):
self.THETA_angles = {0: [float(i) for i in range(0, 181, 15)],
1: [float(i) for i in range(0, 181, 15)],
2: [float(i) for i in range(0, 181, 15)],
3: [float(i) for i in range(0, 181, 20)],
4: [float(i) for i in range(0, 181, 30)],
5: [float(i) for i in range(0, 181, 45)],
6: [0.0]}
def calt_dvec(self, a0, a1, a2):
"""
a0, a1, a2: 'O','H','H' in H2O.
"""
totalM = Mass_table['O'] + Mass_table['H'] + Mass_table['H']
com = [a0[k]*Mass_table['O']+a1[k]*Mass_table['H']+a2[k]*Mass_table['H'] for k in range(3)]
com = [com[k]/totalM for k in range(3)]
dvec = np.array(com)
return -dvec
def calt_vec1(self, a0, a1, a2):
return 0.5*(np.array(a1)+np.array(a2)), [1,0,0]
def calt_vec2(self, a0, a1, a2):
vec = get_normal(np.array(a2), np.array(a1))
return vec, [0,0,1]
Grid_Quarts = { 'wtr': Grid_Quarts_wtr}
## Data Structures of the fragments:
DS = { 'wtr': wtr_structure()}
|
sethbrin/QM
|
version2/python/grids_structures_general.py
|
Python
|
mit
| 6,582
|
"""
<Module Name>
dsa.py
<Author>
Santiago Torres-Arias <santiago@nyu.edu>
<Started>
Nov 15, 2017
<Copyright>
See LICENSE for licensing information.
<Purpose>
DSA-specific handling routines for signature verification and key parsing
"""
import binascii
CRYPTO = True
NO_CRYPTO_MSG = 'DSA key support for GPG requires the cryptography library'
try:
from cryptography.exceptions import InvalidSignature
from cryptography.hazmat import backends
from cryptography.hazmat.primitives.asymmetric import dsa
from cryptography.hazmat.primitives.asymmetric import utils as dsautils
except ImportError:
CRYPTO = False
from securesystemslib import exceptions
from securesystemslib import formats
from securesystemslib.gpg.exceptions import PacketParsingError
from securesystemslib.gpg import util as gpg_util
def create_pubkey(pubkey_info):
"""
<Purpose>
Create and return a DSAPublicKey object from the passed pubkey_info
using pyca/cryptography.
<Arguments>
pubkey_info:
The DSA pubkey info dictionary as specified by
securesystemslib.formats.GPG_DSA_PUBKEY_SCHEMA
<Exceptions>
securesystemslib.exceptions.FormatError if
pubkey_info does not match securesystemslib.formats.GPG_DSA_PUBKEY_SCHEMA
securesystemslib.exceptions.UnsupportedLibraryError if
the cryptography module is not available
<Returns>
A cryptography.hazmat.primitives.asymmetric.dsa.DSAPublicKey based on the
passed pubkey_info.
"""
if not CRYPTO: # pragma: no cover
raise exceptions.UnsupportedLibraryError(NO_CRYPTO_MSG)
formats.GPG_DSA_PUBKEY_SCHEMA.check_match(pubkey_info)
y = int(pubkey_info['keyval']['public']['y'], 16)
g = int(pubkey_info['keyval']['public']['g'], 16)
p = int(pubkey_info['keyval']['public']['p'], 16)
q = int(pubkey_info['keyval']['public']['q'], 16)
parameter_numbers = dsa.DSAParameterNumbers(p, q, g)
pubkey = dsa.DSAPublicNumbers(y, parameter_numbers).public_key(
backends.default_backend())
return pubkey
def get_pubkey_params(data):
"""
<Purpose>
Parse the public-key parameters as multi-precision-integers.
<Arguments>
data:
the RFC4880-encoded public key parameters data buffer as described
in the fifth paragraph of section 5.5.2.
<Exceptions>
securesystemslib.gpg.exceptions.PacketParsingError:
if the public key parameters are malformed
<Side Effects>
None.
<Returns>
The parsed DSA public key in the format
securesystemslib.formats.GPG_DSA_PUBKEY_SCHEMA.
"""
ptr = 0
prime_p_length = gpg_util.get_mpi_length(data[ptr: ptr + 2])
ptr += 2
prime_p = data[ptr:ptr + prime_p_length]
if len(prime_p) != prime_p_length: # pragma: no cover
raise PacketParsingError("This MPI was truncated!")
ptr += prime_p_length
group_order_q_length = gpg_util.get_mpi_length(data[ptr: ptr + 2])
ptr += 2
group_order_q = data[ptr:ptr + group_order_q_length]
if len(group_order_q) != group_order_q_length: # pragma: no cover
raise PacketParsingError("This MPI has been truncated!")
ptr += group_order_q_length
generator_length = gpg_util.get_mpi_length(data[ptr: ptr + 2])
ptr += 2
generator = data[ptr:ptr + generator_length]
if len(generator) != generator_length: # pragma: no cover
raise PacketParsingError("This MPI has been truncated!")
ptr += generator_length
value_y_length = gpg_util.get_mpi_length(data[ptr: ptr + 2])
ptr += 2
value_y = data[ptr:ptr + value_y_length]
if len(value_y) != value_y_length: # pragma: no cover
raise PacketParsingError("This MPI has been truncated!")
return {
"y": binascii.hexlify(value_y).decode('ascii'),
"p": binascii.hexlify(prime_p).decode("ascii"),
"g": binascii.hexlify(generator).decode("ascii"),
"q": binascii.hexlify(group_order_q).decode("ascii"),
}
def get_signature_params(data):
"""
<Purpose>
Parse the signature parameters as multi-precision-integers.
<Arguments>
data:
the RFC4880-encoded signature data buffer as described
in the fourth paragraph of section 5.2.2
<Exceptions>
securesystemslib.gpg.exceptions.PacketParsingError:
if the public key parameters are malformed
securesystemslib.exceptions.UnsupportedLibraryError:
if the cryptography module is not available
<Side Effects>
None.
<Returns>
The decoded signature buffer
"""
if not CRYPTO: # pragma: no cover
return exceptions.UnsupportedLibraryError(NO_CRYPTO_MSG)
ptr = 0
r_length = gpg_util.get_mpi_length(data[ptr:ptr+2])
ptr += 2
r = data[ptr:ptr + r_length]
if len(r) != r_length: # pragma: no cover
raise PacketParsingError("r-value truncated in signature")
ptr += r_length
s_length = gpg_util.get_mpi_length(data[ptr: ptr+2])
ptr += 2
s = data[ptr: ptr + s_length]
if len(s) != s_length: # pragma: no cover
raise PacketParsingError("s-value truncated in signature")
s = int(binascii.hexlify(s), 16)
r = int(binascii.hexlify(r), 16)
signature = dsautils.encode_dss_signature(r, s)
return signature
def verify_signature(signature_object, pubkey_info, content,
hash_algorithm_id):
"""
<Purpose>
Verify the passed signature against the passed content with the passed
DSA public key using pyca/cryptography.
<Arguments>
signature_object:
A signature dictionary as specified by
securesystemslib.formats.GPG_SIGNATURE_SCHEMA
pubkey_info:
The DSA public key info dictionary as specified by
securesystemslib.formats.GPG_DSA_PUBKEY_SCHEMA
hash_algorithm_id:
one of SHA1, SHA256, SHA512 (see securesystemslib.gpg.constants)
used to verify the signature
NOTE: Overrides any hash algorithm specification in "pubkey_info"'s
"hashes" or "method" fields.
content:
The signed bytes against which the signature is verified
<Exceptions>
securesystemslib.exceptions.FormatError if:
signature_object does not match securesystemslib.formats.GPG_SIGNATURE_SCHEMA
pubkey_info does not match securesystemslib.formats.GPG_DSA_PUBKEY_SCHEMA
securesystemslib.exceptions.UnsupportedLibraryError if:
the cryptography module is not available
ValueError:
if the passed hash_algorithm_id is not supported (see
securesystemslib.gpg.util.get_hashing_class)
<Returns>
True if signature verification passes and False otherwise
"""
if not CRYPTO: # pragma: no cover
raise exceptions.UnsupportedLibraryError(NO_CRYPTO_MSG)
formats.GPG_SIGNATURE_SCHEMA.check_match(signature_object)
formats.GPG_DSA_PUBKEY_SCHEMA.check_match(pubkey_info)
hasher = gpg_util.get_hashing_class(hash_algorithm_id)
pubkey_object = create_pubkey(pubkey_info)
digest = gpg_util.hash_object(
binascii.unhexlify(signature_object['other_headers']),
hasher(), content)
try:
pubkey_object.verify(
binascii.unhexlify(signature_object['signature']),
digest,
dsautils.Prehashed(hasher())
)
return True
except InvalidSignature:
return False
|
secure-systems-lab/securesystemslib
|
securesystemslib/gpg/dsa.py
|
Python
|
mit
| 7,171
|
from . import api, example, bauth, friendAPI, apiHelper, postAPI
|
CMPUT404W16/social-dist
|
fbook/api/__init__.py
|
Python
|
apache-2.0
| 64
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Script to download time tables as PDF and calculate route durations based on relations for the routes in OpenStreetMap
from common import *
import os
import sys
import io
import logging
import requests
import json
import datetime
import time
#from unidecode import unidecode
logger = logging.getLogger("GTFS_get_durations")
logging.basicConfig(filename="/var/log/GTFS/planeta-es.log", level=logging.DEBUG, format="%(asctime)s %(name)s %(levelname)s - %(message)s", datefmt="%Y/%m/%d %H:%M:%S:")
# PDFs are stored here
baseurl = "http://www.viacaoplaneta-es.com.br/destinos-e-horarios-viacao-planeta/"
debugMe = False
# List of route numbers
config = {}
with open('planeta.json', 'r') as infile:
config = json.load(infile)
durationsList = {}
try:
with open('durations.json', 'r') as infile:
durationsList = json.load(infile)
except:
pass
durationsList[u"updated"] = str(datetime.date.today())
durationsList[u"operator"] = u"Viação Planeta"
durationsList[u"network"] = u"Planeta"
durationsList[u"source"] = baseurl
for i in getLines():
name = i[1]
ref = i[0]
print ref, name
tmp = name.split(u" x ")
origin = tmp.pop(0)
destination = get_destination(tmp)
origin = origin.strip()
destination = destination.strip()
print u" From:", origin
print u" To: ", destination
if len(tmp) > 0:
debug_to_screen( u" Via: {0}".format( (tmp) ) )
durationsList[ref] = [ get_duration(ref, origin, destination, config["query"]["bbox"]), get_duration(ref, destination, origin, config["query"]["bbox"]) ]
print u"Durations calculated ",ref, u":", durationsList[ref]
if ref == "1" or ref == "2":
myRef = "{0}-1".format(ref)
durationsList[myRef] = [ get_duration(myRef, origin, destination, config["query"]["bbox"]), get_duration(myRef, destination, origin, config["query"]["bbox"]) ]
print u"Durations calculated ",myRef, u":", durationsList[myRef]
with open('durations.json', 'w') as outfile:
json.dump(durationsList, outfile, sort_keys=True, indent=4)
|
Skippern/GV-scraper
|
creators/planeta/get_duration.py
|
Python
|
gpl-3.0
| 2,121
|
"""
Dambreak flow - Ubbink (1997)
"""
import numpy as np
from math import sqrt
from proteus import (Domain, Context,
FemTools as ft,
#SpatialTools as st,
MeshTools as mt,
WaveTools as wt)
from proteus.mprans import SpatialTools as st
from proteus.Profiling import logEvent
# predefined options
opts=Context.Options([
# water column
("water_level", 0.292, "Height of water column in m"),
("water_width", 0.146, "Width of water column in m"),
# tank
("tank_dim", (0.584,0.584), "Dimensions of the tank in m"),
("obstacle_dim", (0.024, 0.048),"Dimensions of the obstacle in m"),
("obstacle_x_start", 0.292,"x location of start of obstacle in m"),
#gravity
("g",(0,-9.81,0), "Gravity vector in m/s^2"),
# gauges
("gauge_output", True, "Produce gauge data."),
("gauge_location_p", (0.292,0.04,0.0), "Pressure gauge location in m"),
# refinement
("refinement",32,"Refinement level, he = L/(4*refinement - 1), where L is the horizontal dimension"),
("cfl", 0.33,"Target cfl"),
# run time
("T", 0.03,"Simulation time in m"),
("dt_fixed", 0.01, "Fixed time step in s"),
("dt_init", 0.001 ,"Maximum initial time step in s"),
("gen_mesh", True ,"Generate new mesh"),
])
# ----- CONTEXT ------ #
# water
waterLine_z = opts.water_level
waterLine_x = opts.water_width
# tank
tank_dim = opts.tank_dim
obstacle_dim = opts.obstacle_dim
obstacle_x_start = opts.obstacle_x_start
obstacle_x_end = obstacle_x_start + obstacle_dim[0]
obstacle_height = obstacle_dim[1]
##########################################
# Discretization Input Options #
##########################################
#[temp] temporary location
backgroundDiffusionFactor = 0.01
refinement = opts.refinement
genMesh = opts.gen_mesh
movingDomain = False
checkMass = False
applyRedistancing = True
useOldPETSc = False
useSuperlu = False
timeDiscretization = 'be' # 'vbdf', 'be', 'flcbdf'
spaceOrder = 1
useHex = False
useRBLES = 0.0
useMetrics = 1.0
applyCorrection = True
useVF = 1.0
useOnlyVF = False
useRANS = 0 # 0 -- None
# 1 -- K-Epsilon
# 2 -- K-Omega
# ----- INPUT CHECKS ----- #
if spaceOrder not in [1,2]:
raise ValueError("INVALID: spaceOrder(" + str(spaceOrder) + ")")
if useRBLES not in [0.0, 1.0]:
raise ValueError("INVALID: useRBLES(" + str(useRBLES) + ")")
if useMetrics not in [0.0, 1.0]:
raise ValueError("INVALID: useMetrics(" + str(useMetrics) + ")")
# ----- DISCRETIZATION ----- #
nd = 2
if spaceOrder == 1:
hFactor = 1.0
if useHex:
basis = ft.C0_AffineLinearOnCubeWithNodalBasis
elementQuadrature = ft.CubeGaussQuadrature(nd, 2)
elementBoundaryQuadrature = ft.CubeGaussQuadrature(nd - 1, 2)
else:
basis = ft.C0_AffineLinearOnSimplexWithNodalBasis
elementQuadrature = ft.SimplexGaussQuadrature(nd, 3)
elementBoundaryQuadrature = ft.SimplexGaussQuadrature(nd - 1, 3)
elif spaceOrder == 2:
hFactor = 0.5
if useHex:
basis = ft.C0_AffineLagrangeOnCubeWithNodalBasis
elementQuadrature = ft.CubeGaussQuadrature(nd, 4)
elementBoundaryQuadrature = ft.CubeGaussQuadrature(nd - 1, 4)
else:
basis = ft.C0_AffineQuadraticOnSimplexWithNodalBasis
elementQuadrature = ft.SimplexGaussQuadrature(nd, 4)
elementBoundaryQuadrature = ft.SimplexGaussQuadrature(nd - 1, 4)
##########################################
# Numerical Options and Other Parameters #
##########################################
weak_bc_penalty_constant = 100.0
nLevels = 1
# ----- PHYSICAL PROPERTIES ----- #
# Water
rho_0 = 998.2
nu_0 = 1.004e-6
# Air
rho_1 = 1.205
nu_1 = 1.500e-5
# Surface Tension
sigma_01 = 0.0
# Gravity
g = opts.g
# ----- TIME STEPPING & VELOCITY----- #
T = opts.T
dt_fixed = opts.dt_fixed
dt_init = min(0.1 * dt_fixed, opts.dt_init)
runCFL = opts.cfl
nDTout = int(round(T / dt_fixed))
# ----- DOMAIN ----- #
domain = Domain.PlanarStraightLineGraphDomain()
# ----- TANK ----- #
tank = st.TankWithObstacles2D(domain=domain,
dim=tank_dim,
obstacles=[[[obstacle_x_start, 0],
[obstacle_x_start, obstacle_height],
[obstacle_x_end, obstacle_height],
[obstacle_x_end, 0]]])
# ----- GAUGES ----- #
if opts.gauge_output:
tank.attachPointGauges(
'twp',
gauges = ((('p',), (opts.gauge_location_p,)),),
activeTime=(0, opts.T),
sampleRate=0,
fileName='pressureGauge.csv'
)
# ----- EXTRA BOUNDARY CONDITIONS ----- #
tank.BC['y+'].setAtmosphere()
tank.BC['y-'].setFreeSlip()
tank.BC['x+'].setFreeSlip()
tank.BC['x-'].setFreeSlip()
# ----- MESH CONSTRUCTION ----- #
he = tank_dim[0] / float(4 * refinement - 1)
domain.MeshOptions.he = he
st.assembleDomain(domain)
# ----- STRONG DIRICHLET ----- #
ns_forceStrongDirichlet = False
# ----- NUMERICAL PARAMETERS ----- #
if useMetrics:
ns_shockCapturingFactor = 0.25
ns_lag_shockCapturing = True
ns_lag_subgridError = True
ls_shockCapturingFactor = 0.25
ls_lag_shockCapturing = True
ls_sc_uref = 1.0
ls_sc_beta = 1.0
vof_shockCapturingFactor = 0.25
vof_lag_shockCapturing = True
vof_sc_uref = 1.0
vof_sc_beta = 1.0
rd_shockCapturingFactor = 0.5
rd_lag_shockCapturing = False
epsFact_density = epsFact_viscosity = epsFact_curvature \
= epsFact_vof = ecH \
= epsFact_consrv_dirac = epsFact_density \
= 3.0
epsFact_redistance = 0.33
epsFact_consrv_diffusion = 0.1
redist_Newton = True
kappa_shockCapturingFactor = 0.25
kappa_lag_shockCapturing = True #False
kappa_sc_uref = 1.0
kappa_sc_beta = 1.0
dissipation_shockCapturingFactor = 0.25
dissipation_lag_shockCapturing = True #False
dissipation_sc_uref = 1.0
dissipation_sc_beta = 1.0
else:
ns_shockCapturingFactor = 0.9
ns_lag_shockCapturing = True
ns_lag_subgridError = True
ls_shockCapturingFactor = 0.9
ls_lag_shockCapturing = True
ls_sc_uref = 1.0
ls_sc_beta = 1.0
vof_shockCapturingFactor = 0.9
vof_lag_shockCapturing = True
vof_sc_uref = 1.0
vof_sc_beta = 1.0
rd_shockCapturingFactor = 0.9
rd_lag_shockCapturing = False
epsFact_density = epsFact_viscosity = epsFact_curvature \
= epsFact_vof = ecH \
= epsFact_consrv_dirac = epsFact_density \
= 1.5
epsFact_redistance = 0.33
epsFact_consrv_diffusion = 1.0
redist_Newton = False
kappa_shockCapturingFactor = 0.9
kappa_lag_shockCapturing = True #False
kappa_sc_uref = 1.0
kappa_sc_beta = 1.0
dissipation_shockCapturingFactor = 0.9
dissipation_lag_shockCapturing = True #False
dissipation_sc_uref = 1.0
dissipation_sc_beta = 1.0
# ----- NUMERICS: TOLERANCES ----- #
ns_nl_atol_res = max(1.0e-10, 0.001 * he ** 2)
vof_nl_atol_res = max(1.0e-10, 0.001 * he ** 2)
ls_nl_atol_res = max(1.0e-10, 0.001 * he ** 2)
rd_nl_atol_res = max(1.0e-10, 0.005 * he)
mcorr_nl_atol_res = max(1.0e-10, 0.001 * he ** 2)
kappa_nl_atol_res = max(1.0e-10, 0.001 * he ** 2)
dissipation_nl_atol_res = max(1.0e-10, 0.001 * he ** 2)
# ----- TURBULENCE MODELS ----- #
ns_closure = 2 #1-classic smagorinsky, 2-dynamic smagorinsky, 3 -- k-epsilon, 4 -- k-omega
if useRANS == 1:
ns_closure = 3
elif useRANS == 2:
ns_closure = 4
##########################################
# Signed Distance #
##########################################
def signedDistance(x):
phi_x = x[0] - waterLine_x
phi_z = x[1] - waterLine_z
if phi_x < 0.0:
if phi_z < 0.0:
return max(phi_x, phi_z)
else:
return phi_z
else:
if phi_z < 0.0:
return phi_x
else:
return sqrt(phi_x ** 2 + phi_z ** 2)
|
erdc-cm/air-water-vv
|
2d/benchmarks/dambreak_Ubbink/dambreak_Ubbink.py
|
Python
|
mit
| 8,038
|
import sys
import numpy
import math
from KMCLib.PluginInterfaces.KMCAnalysisPlugin import KMCAnalysisPlugin
from KMCLib.Utilities.CheckUtilities import checkSequenceOfPositiveIntegers
from KMCLib.Utilities.CheckUtilities import checkPositiveFloat
from KMCLib.Utilities.CheckUtilities import checkPositiveInteger
from KMCLib.Exceptions.Error import Error
from KMCLib.Backend.Backend import MPICommons
class RateCalc(KMCAnalysisPlugin):
def __init__(self, processes=None):
msg = "The 'processes' parameter must be given as a list of process numbers."
self.__processes = checkSequenceOfPositiveIntegers(processes, msg)
self.__initTime = 0.0
self.__lastTime = 0.0
self.__currentTime = 0.0
self.__current_count = 0
def setup(self, step, time, configuration):
self.__initTime = time
def registerStep(self, step, time, configuration):
if configuration.latestEventProcess() in self.__processes:
self.__current_count += 1
self.__currentTime = time
self.__lastTime = time
def finalize(self):
self.__lastTime = self.__currentTime
def printResults(self, stream=sys.stdout):
rateEst = float(self.__current_count)/(self.__lastTime - self.__initTime)
if MPICommons.isMaster() and not math.isnan(rateEst):
stream.write("{:.6E}".format(rateEst)+"\n")
|
joshuahellier/PhDStuff
|
codes/thesisCodes/kmc/customAnalysis/RateCalc.py
|
Python
|
mit
| 1,393
|
# coding: utf8
# Copyright 2018 The Servo Project Developers. See the COPYRIGHT
# file at the top-level directory of this distribution.
#
# Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
# http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
# <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
# option. This file may not be copied, modified, or distributed
# except according to those terms.
"""
Project-independent library for Taskcluster decision tasks
"""
import base64
import datetime
import hashlib
import json
import os
import re
import subprocess
import sys
import taskcluster
# Public API
__all__ = [
"CONFIG", "SHARED", "Task", "DockerWorkerTask",
"GenericWorkerTask", "WindowsGenericWorkerTask", "MacOsGenericWorkerTask",
]
class Config:
"""
Global configuration, for users of the library to modify.
"""
def __init__(self):
self.task_name_template = "%s"
self.index_prefix = "garbage.servo-decisionlib"
self.index_read_only = False
self.scopes_for_all_subtasks = []
self.routes_for_all_subtasks = []
self.docker_image_build_worker_type = None
self.docker_images_expire_in = "1 month"
self.repacked_msi_files_expire_in = "1 month"
self.treeherder_repository_name = None
# Set by docker-worker:
# https://docs.taskcluster.net/docs/reference/workers/docker-worker/docs/environment
self.decision_task_id = os.environ.get("TASK_ID")
# Set in the decision task’s payload, such as defined in .taskcluster.yml
self.task_owner = os.environ.get("TASK_OWNER")
self.task_source = os.environ.get("TASK_SOURCE")
self.git_url = os.environ.get("GIT_URL")
self.git_ref = os.environ.get("GIT_REF")
self.git_sha = os.environ.get("GIT_SHA")
self.tc_root_url = os.environ.get("TASKCLUSTER_ROOT_URL")
self.default_provisioner_id = "proj-example"
def task_id(self):
if hasattr(self, "_task_id"):
return self._task_id
# If the head commit is a merge, we want to generate a unique task id which incorporates
# the merge parents rather that the actual sha of the merge commit. This ensures that tasks
# can be reused if the tree is in an identical state. Otherwise, if the head commit is
# not a merge, we can rely on the head commit sha for that purpose.
raw_commit = subprocess.check_output(["git", "cat-file", "commit", "HEAD"])
parent_commits = [
value.decode("utf8")
for line in raw_commit.split(b"\n")
for key, _, value in [line.partition(b" ")]
if key == b"parent"
]
if len(parent_commits) > 1:
self._task_id = "-".join(parent_commits) # pragma: no cover
else:
self._task_id = self.git_sha # pragma: no cover
return self._task_id
def git_sha_is_current_head(self):
output = subprocess.check_output(["git", "rev-parse", "HEAD"])
self.git_sha = output.decode("utf8").strip()
class Shared:
"""
Global shared state.
"""
def __init__(self):
self.now = datetime.datetime.utcnow()
self.found_or_created_indexed_tasks = {}
options = {"rootUrl": os.environ["TASKCLUSTER_PROXY_URL"]}
self.queue_service = taskcluster.Queue(options)
self.index_service = taskcluster.Index(options)
def from_now_json(self, offset):
"""
Same as `taskcluster.fromNowJSON`, but uses the creation time of `self` for “now”.
"""
return taskcluster.stringDate(taskcluster.fromNow(offset, dateObj=self.now))
CONFIG = Config()
SHARED = Shared()
def chaining(op, attr):
def method(self, *args, **kwargs):
op(self, attr, *args, **kwargs)
return self
return method
def append_to_attr(self, attr, *args): getattr(self, attr).extend(args)
def prepend_to_attr(self, attr, *args): getattr(self, attr)[0:0] = list(args)
def update_attr(self, attr, **kwargs): getattr(self, attr).update(kwargs)
class Task:
"""
A task definition, waiting to be created.
Typical is to use chain the `with_*` methods to set or extend this object’s attributes,
then call the `crate` or `find_or_create` method to schedule a task.
This is an abstract class that needs to be specialized for different worker implementations.
"""
def __init__(self, name):
self.name = name
self.description = ""
self.scheduler_id = "taskcluster-github"
self.provisioner_id = CONFIG.default_provisioner_id
self.worker_type = "github-worker"
self.deadline_in = "1 day"
self.expires_in = "1 year"
self.index_and_artifacts_expire_in = self.expires_in
self.dependencies = []
self.scopes = []
self.routes = []
self.extra = {}
self.treeherder_required = False
# All `with_*` methods return `self`, so multiple method calls can be chained.
with_description = chaining(setattr, "description")
with_scheduler_id = chaining(setattr, "scheduler_id")
with_provisioner_id = chaining(setattr, "provisioner_id")
with_worker_type = chaining(setattr, "worker_type")
with_deadline_in = chaining(setattr, "deadline_in")
with_expires_in = chaining(setattr, "expires_in")
with_index_and_artifacts_expire_in = chaining(setattr, "index_and_artifacts_expire_in")
with_dependencies = chaining(append_to_attr, "dependencies")
with_scopes = chaining(append_to_attr, "scopes")
with_routes = chaining(append_to_attr, "routes")
with_extra = chaining(update_attr, "extra")
def with_treeherder_required(self):
self.treeherder_required = True
return self
def with_treeherder(self, category, symbol=None):
symbol = symbol or self.name
assert len(symbol) <= 25, symbol
self.name = "%s: %s" % (category, self.name)
# The message schema does not allow spaces in the platfrom or in labels,
# but the UI shows them in that order separated by spaces.
# So massage the metadata to get the UI to show the string we want.
# `labels` defaults to ["opt"] if not provided or empty,
# so use a more neutral underscore instead.
parts = category.split(" ")
platform = parts[0]
labels = parts[1:] or ["_"]
# https://docs.taskcluster.net/docs/reference/integrations/taskcluster-treeherder/docs/task-treeherder-config
self.with_extra(treeherder={
"machine": {"platform": platform},
"labels": labels,
"symbol": symbol,
})
if CONFIG.treeherder_repository_name:
assert CONFIG.git_sha
suffix = ".v2._/%s.%s" % (CONFIG.treeherder_repository_name, CONFIG.git_sha)
self.with_routes(
"tc-treeherder" + suffix,
"tc-treeherder-staging" + suffix,
)
self.treeherder_required = False # Taken care of
return self
def build_worker_payload(self): # pragma: no cover
"""
Overridden by sub-classes to return a dictionary in a worker-specific format,
which is used as the `payload` property in a task definition request
passed to the Queue’s `createTask` API.
<https://docs.taskcluster.net/docs/reference/platform/taskcluster-queue/references/api#createTask>
"""
raise NotImplementedError
def create(self):
"""
Call the Queue’s `createTask` API to schedule a new task, and return its ID.
<https://docs.taskcluster.net/docs/reference/platform/taskcluster-queue/references/api#createTask>
"""
worker_payload = self.build_worker_payload()
assert not self.treeherder_required, \
"make sure to call with_treeherder() for this task: %s" % self.name
assert CONFIG.decision_task_id
assert CONFIG.task_owner
assert CONFIG.task_source
queue_payload = {
"taskGroupId": CONFIG.decision_task_id,
"dependencies": [CONFIG.decision_task_id] + self.dependencies,
"schedulerId": self.scheduler_id,
"provisionerId": self.provisioner_id,
"workerType": self.worker_type,
"created": SHARED.from_now_json(""),
"deadline": SHARED.from_now_json(self.deadline_in),
"expires": SHARED.from_now_json(self.expires_in),
"metadata": {
"name": CONFIG.task_name_template % self.name,
"description": self.description,
"owner": CONFIG.task_owner,
"source": CONFIG.task_source,
},
"payload": worker_payload,
}
scopes = self.scopes + CONFIG.scopes_for_all_subtasks
routes = self.routes + CONFIG.routes_for_all_subtasks
if any(r.startswith("index.") for r in routes):
self.extra.setdefault("index", {})["expires"] = \
SHARED.from_now_json(self.index_and_artifacts_expire_in)
dict_update_if_truthy(
queue_payload,
scopes=scopes,
routes=routes,
extra=self.extra,
)
task_id = taskcluster.slugId()
SHARED.queue_service.createTask(task_id, queue_payload)
print("Scheduled %s: %s" % (task_id, self.name))
return task_id
@staticmethod
def find(index_path):
full_index_path = "%s.%s" % (CONFIG.index_prefix, index_path)
task_id = SHARED.index_service.findTask(full_index_path)["taskId"]
print("Found task %s indexed at %s" % (task_id, full_index_path))
return task_id
def find_or_create(self, index_path=None):
"""
Try to find a task in the Index and return its ID.
The index path used is `{CONFIG.index_prefix}.{index_path}`.
`index_path` defaults to `by-task-definition.{sha256}`
with a hash of the worker payload and worker type.
If no task is found in the index,
it is created with a route to add it to the index at that same path if it succeeds.
<https://docs.taskcluster.net/docs/reference/core/taskcluster-index/references/api#findTask>
"""
if not index_path:
worker_type = self.worker_type
index_by = json.dumps([worker_type, self.build_worker_payload()]).encode("utf-8")
index_path = "by-task-definition." + hashlib.sha256(index_by).hexdigest()
task_id = SHARED.found_or_created_indexed_tasks.get(index_path)
if task_id is not None:
return task_id
try:
task_id = Task.find(index_path)
except taskcluster.TaskclusterRestFailure as e:
if e.status_code != 404: # pragma: no cover
raise
if not CONFIG.index_read_only:
self.routes.append("index.%s.%s" % (CONFIG.index_prefix, index_path))
task_id = self.create()
SHARED.found_or_created_indexed_tasks[index_path] = task_id
return task_id
class GenericWorkerTask(Task):
"""
Task definition for a worker type that runs the `generic-worker` implementation.
This is an abstract class that needs to be specialized for different operating systems.
<https://github.com/taskcluster/generic-worker>
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.max_run_time_minutes = 30
self.env = {}
self.features = {}
self.mounts = []
self.artifacts = []
with_max_run_time_minutes = chaining(setattr, "max_run_time_minutes")
with_mounts = chaining(append_to_attr, "mounts")
with_env = chaining(update_attr, "env")
def build_command(self): # pragma: no cover
"""
Overridden by sub-classes to return the `command` property of the worker payload,
in the format appropriate for the operating system.
"""
raise NotImplementedError
def build_worker_payload(self):
"""
Return a `generic-worker` worker payload.
<https://docs.taskcluster.net/docs/reference/workers/generic-worker/docs/payload>
"""
worker_payload = {
"command": self.build_command(),
"maxRunTime": self.max_run_time_minutes * 60
}
return dict_update_if_truthy(
worker_payload,
env=self.env,
mounts=self.mounts,
features=self.features,
artifacts=[
{
"type": type_,
"path": path,
"name": "public/" + url_basename(path),
"expires": SHARED.from_now_json(self.index_and_artifacts_expire_in),
}
for type_, path in self.artifacts
],
)
def with_artifacts(self, *paths, type="file"):
"""
Add each path in `paths` as a task artifact
that expires in `self.index_and_artifacts_expire_in`.
`type` can be `"file"` or `"directory"`.
Paths are relative to the task’s home directory.
"""
self.artifacts.extend((type, path) for path in paths)
return self
def with_features(self, *names):
"""
Enable the given `generic-worker` features.
<https://github.com/taskcluster/generic-worker/blob/master/native_windows.yml>
"""
self.features.update({name: True for name in names})
return self
def _mount_content(self, url_or_artifact_name, task_id, sha256):
if task_id:
content = {"taskId": task_id, "artifact": url_or_artifact_name}
else:
content = {"url": url_or_artifact_name}
if sha256:
content["sha256"] = sha256
return content
def with_file_mount(self, url_or_artifact_name, task_id=None, sha256=None, path=None):
"""
Make `generic-worker` download a file before the task starts
and make it available at `path` (which is relative to the task’s home directory).
If `sha256` is provided, `generic-worker` will hash the downloaded file
and check it against the provided signature.
If `task_id` is provided, this task will depend on that task
and `url_or_artifact_name` is the name of an artifact of that task.
"""
return self.with_mounts({
"file": path or url_basename(url_or_artifact_name),
"content": self._mount_content(url_or_artifact_name, task_id, sha256),
})
def with_directory_mount(self, url_or_artifact_name, task_id=None, sha256=None, path=None):
"""
Make `generic-worker` download an archive before the task starts,
and uncompress it at `path` (which is relative to the task’s home directory).
`url_or_artifact_name` must end in one of `.rar`, `.tar.bz2`, `.tar.gz`, or `.zip`.
The archive must be in the corresponding format.
If `sha256` is provided, `generic-worker` will hash the downloaded archive
and check it against the provided signature.
If `task_id` is provided, this task will depend on that task
and `url_or_artifact_name` is the name of an artifact of that task.
"""
supported_formats = ["rar", "tar.bz2", "tar.gz", "zip"]
for fmt in supported_formats:
suffix = "." + fmt
if url_or_artifact_name.endswith(suffix):
return self.with_mounts({
"directory": path or url_basename(url_or_artifact_name[:-len(suffix)]),
"content": self._mount_content(url_or_artifact_name, task_id, sha256),
"format": fmt,
})
raise ValueError(
"%r does not appear to be in one of the supported formats: %r"
% (url_or_artifact_name, ", ".join(supported_formats))
) # pragma: no cover
class WindowsGenericWorkerTask(GenericWorkerTask):
"""
Task definition for a `generic-worker` task running on Windows.
Scripts are written as `.bat` files executed with `cmd.exe`.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.scripts = []
with_script = chaining(append_to_attr, "scripts")
with_early_script = chaining(prepend_to_attr, "scripts")
def build_command(self):
return [deindent(s) for s in self.scripts]
def with_path_from_homedir(self, *paths):
"""
Interpret each path in `paths` as relative to the task’s home directory,
and add it to the `PATH` environment variable.
"""
for p in paths:
self.with_early_script("set PATH=%HOMEDRIVE%%HOMEPATH%\\{};%PATH%".format(p))
return self
def with_repo(self, sparse_checkout=None, shallow=True):
"""
Make a shallow clone the git repository at the start of the task.
This uses `CONFIG.git_url`, `CONFIG.git_ref`, and `CONFIG.git_sha`,
and creates the clone in a `repo` directory in the task’s home directory.
If `sparse_checkout` is given, it must be a list of path patterns
to be used in `.git/info/sparse-checkout`.
See <https://git-scm.com/docs/git-read-tree#_sparse_checkout>.
"""
git = """
git init repo
cd repo
"""
if sparse_checkout:
self.with_mounts({
"file": "sparse-checkout",
"content": {"raw": "\n".join(sparse_checkout)},
})
git += """
git config core.sparsecheckout true
copy ..\\sparse-checkout .git\\info\\sparse-checkout
type .git\\info\\sparse-checkout
"""
git += """
git fetch {depth} %GIT_URL% %GIT_REF%
git reset --hard %GIT_SHA%
""".format(depth="--depth 100" if shallow else "")
return self \
.with_git() \
.with_script(git) \
.with_env(**git_env())
def with_git(self):
"""
Make the task download `git-for-windows` and make it available for `git` commands.
This is implied by `with_repo`.
"""
return self \
.with_path_from_homedir("git\\cmd") \
.with_directory_mount(
"https://github.com/git-for-windows/git/releases/download/" +
"v2.19.0.windows.1/MinGit-2.19.0-64-bit.zip",
sha256="424d24b5fc185a9c5488d7872262464f2facab4f1d4693ea8008196f14a3c19b",
path="git",
)
def with_rustup(self):
"""
Download rustup.rs and make it available to task commands,
but does not download any default toolchain.
"""
return self \
.with_path_from_homedir(".cargo\\bin") \
.with_early_script(
"%HOMEDRIVE%%HOMEPATH%\\rustup-init.exe --default-toolchain none --profile=minimal -y"
) \
.with_file_mount("https://win.rustup.rs/x86_64", path="rustup-init.exe")
def with_repacked_msi(self, url, sha256, path):
"""
Download an MSI file from `url`, extract the files in it with `lessmsi`,
and make them available in the directory at `path` (relative to the task’s home directory).
`sha256` is required and the MSI file must have that hash.
The file extraction (and recompression in a ZIP file) is done in a separate task,
wich is indexed based on `sha256` and cached for `CONFIG.repacked_msi_files_expire_in`.
<https://github.com/activescott/lessmsi>
"""
repack_task = (
WindowsGenericWorkerTask("MSI repack: " + url)
.with_worker_type(self.worker_type)
.with_max_run_time_minutes(20)
.with_file_mount(url, sha256=sha256, path="input.msi")
.with_directory_mount(
"https://github.com/activescott/lessmsi/releases/download/" +
"v1.6.1/lessmsi-v1.6.1.zip",
sha256="540b8801e08ec39ba26a100c855898f455410cecbae4991afae7bb2b4df026c7",
path="lessmsi"
)
.with_directory_mount(
"https://www.7-zip.org/a/7za920.zip",
sha256="2a3afe19c180f8373fa02ff00254d5394fec0349f5804e0ad2f6067854ff28ac",
path="7zip",
)
.with_path_from_homedir("lessmsi", "7zip")
.with_script("""
lessmsi x input.msi extracted\\
cd extracted\\SourceDir
7za a repacked.zip *
""")
.with_artifacts("extracted/SourceDir/repacked.zip")
.with_index_and_artifacts_expire_in(CONFIG.repacked_msi_files_expire_in)
.find_or_create("repacked-msi." + sha256)
)
return self \
.with_dependencies(repack_task) \
.with_directory_mount("public/repacked.zip", task_id=repack_task, path=path)
def with_python2(self):
"""
Make Python 2, pip, and virtualenv accessible to the task’s commands.
For Python 3, use `with_directory_mount` and the "embeddable zip file" distribution
from python.org.
You may need to remove `python37._pth` from the ZIP in order to work around
<https://bugs.python.org/issue34841>.
"""
return self \
.with_repacked_msi(
"https://www.python.org/ftp/python/2.7.15/python-2.7.15.amd64.msi",
sha256="5e85f3c4c209de98480acbf2ba2e71a907fd5567a838ad4b6748c76deb286ad7",
path="python2"
) \
.with_early_script("""
python -m ensurepip
pip install virtualenv==16.0.0
""") \
.with_path_from_homedir("python2", "python2\\Scripts")
class UnixTaskMixin(Task):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.curl_scripts_count = 0
def with_repo(self, shallow=True):
"""
Make a shallow clone the git repository at the start of the task.
This uses `CONFIG.git_url`, `CONFIG.git_ref`, and `CONFIG.git_sha`
* generic-worker: creates the clone in a `repo` directory
in the task’s directory.
* docker-worker: creates the clone in a `/repo` directory
at the root of the Docker container’s filesystem.
`git` and `ca-certificate` need to be installed in the Docker image.
"""
return self \
.with_env(**git_env()) \
.with_early_script("""
git init repo
cd repo
git fetch {depth} "$GIT_URL" "$GIT_REF"
git reset --hard "$GIT_SHA"
""".format(depth="--depth 100" if shallow else ""))
def with_curl_script(self, url, file_path):
self.curl_scripts_count += 1
n = self.curl_scripts_count
return self \
.with_env(**{
"CURL_%s_URL" % n: url,
"CURL_%s_PATH" % n: file_path,
}) \
.with_script("""
mkdir -p $(dirname "$CURL_{n}_PATH")
curl --retry 5 --connect-timeout 10 -Lf "$CURL_{n}_URL" -o "$CURL_{n}_PATH"
""".format(n=n))
def with_curl_artifact_script(self, task_id, artifact_name, out_directory=""):
queue_service = CONFIG.tc_root_url + "/api/queue"
return self \
.with_dependencies(task_id) \
.with_curl_script(
queue_service + "/v1/task/%s/artifacts/public/%s" % (task_id, artifact_name),
os.path.join(out_directory, url_basename(artifact_name)),
)
class MacOsGenericWorkerTask(UnixTaskMixin, GenericWorkerTask):
"""
Task definition for a `generic-worker` task running on macOS.
Scripts are interpreted with `bash`.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.scripts = []
with_script = chaining(append_to_attr, "scripts")
with_early_script = chaining(prepend_to_attr, "scripts")
def build_command(self):
# generic-worker accepts multiple commands, but unlike on Windows
# the current directory and environment variables
# are not preserved across commands on macOS.
# So concatenate scripts and use a single `bash` command instead.
return [
[
"/bin/bash", "--login", "-x", "-e", "-c",
deindent("\n".join(self.scripts))
]
]
def with_python2(self):
return self.with_early_script("""
export PATH="$HOME/Library/Python/2.7/bin:$PATH"
python -m ensurepip --user
pip install --user virtualenv
""")
def with_rustup(self):
return self.with_early_script("""
export PATH="$HOME/.cargo/bin:$PATH"
which rustup || curl https://sh.rustup.rs -sSf | sh -s -- --default-toolchain none -y
""")
class DockerWorkerTask(UnixTaskMixin, Task):
"""
Task definition for a worker type that runs the `generic-worker` implementation.
Scripts are interpreted with `bash`.
<https://github.com/taskcluster/docker-worker>
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.docker_image = "ubuntu:bionic-20180821"
self.max_run_time_minutes = 30
self.scripts = []
self.env = {}
self.caches = {}
self.features = {}
self.capabilities = {}
self.artifacts = []
with_docker_image = chaining(setattr, "docker_image")
with_max_run_time_minutes = chaining(setattr, "max_run_time_minutes")
with_artifacts = chaining(append_to_attr, "artifacts")
with_script = chaining(append_to_attr, "scripts")
with_early_script = chaining(prepend_to_attr, "scripts")
with_caches = chaining(update_attr, "caches")
with_env = chaining(update_attr, "env")
with_capabilities = chaining(update_attr, "capabilities")
def build_worker_payload(self):
"""
Return a `docker-worker` worker payload.
<https://docs.taskcluster.net/docs/reference/workers/docker-worker/docs/payload>
"""
worker_payload = {
"image": self.docker_image,
"maxRunTime": self.max_run_time_minutes * 60,
"command": [
"/bin/bash", "--login", "-x", "-e", "-c",
deindent("\n".join(self.scripts))
],
}
return dict_update_if_truthy(
worker_payload,
env=self.env,
cache=self.caches,
features=self.features,
capabilities=self.capabilities,
artifacts={
"public/" + url_basename(path): {
"type": "file",
"path": path,
"expires": SHARED.from_now_json(self.index_and_artifacts_expire_in),
}
for path in self.artifacts
},
)
def with_features(self, *names):
"""
Enable the given `docker-worker` features.
<https://github.com/taskcluster/docker-worker/blob/master/docs/features.md>
"""
self.features.update({name: True for name in names})
return self
def with_dockerfile(self, dockerfile):
"""
Build a Docker image based on the given `Dockerfile`, and use it for this task.
`dockerfile` is a path in the filesystem where this code is running.
Some non-standard syntax is supported, see `expand_dockerfile`.
The image is indexed based on a hash of the expanded `Dockerfile`,
and cached for `CONFIG.docker_images_expire_in`.
Images are built without any *context*.
<https://docs.docker.com/develop/develop-images/dockerfile_best-practices/#understand-build-context>
"""
basename = os.path.basename(dockerfile)
suffix = ".dockerfile"
assert basename.endswith(suffix)
image_name = basename[:-len(suffix)]
dockerfile_contents = expand_dockerfile(dockerfile)
digest = hashlib.sha256(dockerfile_contents).hexdigest()
image_build_task = (
DockerWorkerTask("Docker image: " + image_name)
.with_worker_type(CONFIG.docker_image_build_worker_type or self.worker_type)
.with_max_run_time_minutes(30)
.with_index_and_artifacts_expire_in(CONFIG.docker_images_expire_in)
.with_features("dind")
.with_env(DOCKERFILE=dockerfile_contents)
.with_artifacts("/image.tar.lz4")
.with_script("""
echo "$DOCKERFILE" | docker build -t taskcluster-built -
docker save taskcluster-built | lz4 > /image.tar.lz4
""")
.with_docker_image(
# https://github.com/servo/taskcluster-bootstrap-docker-images#image-builder
"servobrowser/taskcluster-bootstrap:image-builder@sha256:" \
"0a7d012ce444d62ffb9e7f06f0c52fedc24b68c2060711b313263367f7272d9d"
)
.find_or_create("docker-image." + digest)
)
return self \
.with_dependencies(image_build_task) \
.with_docker_image({
"type": "task-image",
"path": "public/image.tar.lz4",
"taskId": image_build_task,
})
def expand_dockerfile(dockerfile):
"""
Read the file at path `dockerfile`,
and transitively expand the non-standard `% include` header if it is present.
"""
with open(dockerfile, "rb") as f:
dockerfile_contents = f.read()
include_marker = b"% include"
if not dockerfile_contents.startswith(include_marker):
return dockerfile_contents
include_line, _, rest = dockerfile_contents.partition(b"\n")
included = include_line[len(include_marker):].strip().decode("utf8")
path = os.path.join(os.path.dirname(dockerfile), included)
return b"\n".join([expand_dockerfile(path), rest])
def git_env():
assert CONFIG.git_url
assert CONFIG.git_ref
assert CONFIG.git_sha
return {
"GIT_URL": CONFIG.git_url,
"GIT_REF": CONFIG.git_ref,
"GIT_SHA": CONFIG.git_sha,
}
def dict_update_if_truthy(d, **kwargs):
for key, value in kwargs.items():
if value:
d[key] = value
return d
def deindent(string):
return re.sub("\n +", "\n ", string).strip()
def url_basename(url):
return url.rpartition("/")[-1]
|
saneyuki/servo
|
etc/taskcluster/decisionlib.py
|
Python
|
mpl-2.0
| 30,561
|
# Exercise 18: Names, Variables, Code, Functions
# this one is like your scripts with argv
def print_two(*args):
arg1, arg2 = args
print "arg1: %r, arg2: %r" % (arg1, arg2)
# ok, that *args is actually pointless, we can just do this
def print_two_again(arg1, arg2):
print "arg1: %r, arg2: %r" % (arg1, arg2)
# this just takes one argument
def print_one(arg1):
print "arg1: %r" % arg1
# this one takes no arguments
def print_none():
print "I got nothin'."
print_two("Paul","Carroty")
print_two_again("Paul","Carroty")
print_one("First!")
print_none()
|
paulcarroty/Learn-Python-The-Hard-Way
|
ex18.py
|
Python
|
gpl-3.0
| 575
|
# -*- coding:utf-8 -*-
# Copyright 2015 NEC Corporation. #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); #
# you may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
import unittest
from org.o3project.odenos.core.component.network.topology.node import Node
from org.o3project.odenos.core.component.network.topology.node_changed\
import NodeChanged
class NodeChangedTest(unittest.TestCase):
def setUp(self):
node1 = Node('Node', '1', 'NodeId1', {}, {})
node2 = Node('Node', '1', 'NodeId2', {}, {})
self.target = NodeChanged('ID', NodeChanged.Action.ADD,
'1', node1, node2)
def tearDown(self):
pass
def test_constructor(self):
self.assertEqual(self.target._NodeChanged__id, 'ID')
self.assertEqual(self.target._NodeChanged__action,
NodeChanged.Action.ADD)
self.assertEqual(self.target._NodeChanged__version, '1')
self.assertEqual(self.target._NodeChanged__prev.node_id, 'NodeId1')
self.assertEqual(self.target._NodeChanged__curr.node_id, 'NodeId2')
def test_id(self):
self.assertEqual(self.target.id, 'ID')
def test_action(self):
self.assertEqual(self.target.action, NodeChanged.Action.ADD)
def test_version(self):
self.assertEqual(self.target.version, '1')
def test_prev(self):
prev = self.target.prev
self.assertEqual(prev.node_id, 'NodeId1')
def test_curr(self):
curr = self.target.curr
self.assertEqual(curr.node_id, 'NodeId2')
def test_create_from_packed_add(self):
packed = {'id': 'ID', 'action': 'add', 'version': '1', 'prev': None,
'curr': {'type': 'Node', 'version': '1',
'node_id': 'NodeId2', 'ports': {},
'attributes': {}}}
result = NodeChanged.create_from_packed(packed)
self.assertEqual(result.id, 'ID')
self.assertEqual(result.action, NodeChanged.Action.ADD)
self.assertEqual(result.version, '1')
self.assertEqual(result.prev, None)
self.assertEqual(result.curr.node_id, 'NodeId2')
def test_create_from_packed_delete(self):
packed = {'id': 'ID', 'action': 'delete',
'prev': {'type': 'Node', 'version': '1',
'node_id': 'NodeId1', 'ports': {},
'attributes': {}},
'curr': None}
result = NodeChanged.create_from_packed(packed)
self.assertEqual(result.id, 'ID')
self.assertEqual(result.action, NodeChanged.Action.DELETE)
self.assertEqual(result.version, '')
self.assertEqual(result.prev.node_id, 'NodeId1')
self.assertEqual(result.curr, None)
def test_create_from_packed_update(self):
packed = {'id': 'ID', 'action': 'update', 'version': '1',
'prev': {'type': 'Node', 'version': '1',
'node_id': 'NodeId1', 'ports': {},
'attributes': {}},
'curr': {'type': 'Node', 'version': '1',
'node_id': 'NodeId2', 'ports': {},
'attributes': {}}}
result = NodeChanged.create_from_packed(packed)
self.assertEqual(result.id, 'ID')
self.assertEqual(result.action, NodeChanged.Action.UPDATE)
self.assertEqual(result.version, '1')
self.assertEqual(result.prev.node_id, 'NodeId1')
self.assertEqual(result.curr.node_id, 'NodeId2')
if __name__ == "__main__":
unittest.main()
|
haizawa/odenos
|
src/test/python/org/o3project/odenos/core/component/network/topology/test_node_changed.py
|
Python
|
apache-2.0
| 4,446
|
from django.conf import settings
from .. import Tags, Warning, register
def add_session_cookie_message(message):
return message + (
" Using a secure-only session cookie makes it more difficult for "
"network traffic sniffers to hijack user sessions."
)
W010 = Warning(
add_session_cookie_message(
"You have 'django.contrib.sessions' in your INSTALLED_APPS, "
"but you have not set SESSION_COOKIE_SECURE to True."
),
id='security.W010',
)
W011 = Warning(
add_session_cookie_message(
"You have 'django.contrib.sessions.middleware.SessionMiddleware' "
"in your MIDDLEWARE_CLASSES, but you have not set "
"SESSION_COOKIE_SECURE to True."
),
id='security.W011',
)
W012 = Warning(
add_session_cookie_message("SESSION_COOKIE_SECURE is not set to True."),
id='security.W012',
)
def add_httponly_message(message):
return message + (
" Using an HttpOnly session cookie makes it more difficult for "
"cross-site scripting attacks to hijack user sessions."
)
W013 = Warning(
add_httponly_message(
"You have 'django.contrib.sessions' in your INSTALLED_APPS, "
"but you have not set SESSION_COOKIE_HTTPONLY to True.",
),
id='security.W013',
)
W014 = Warning(
add_httponly_message(
"You have 'django.contrib.sessions.middleware.SessionMiddleware' "
"in your MIDDLEWARE_CLASSES, but you have not set "
"SESSION_COOKIE_HTTPONLY to True."
),
id='security.W014',
)
W015 = Warning(
add_httponly_message("SESSION_COOKIE_HTTPONLY is not set to True."),
id='security.W015',
)
@register(Tags.security, deploy=True)
def check_session_cookie_secure(app_configs, **kwargs):
errors = []
if not settings.SESSION_COOKIE_SECURE:
if _session_app():
errors.append(W010)
if _session_middleware():
errors.append(W011)
if len(errors) > 1:
errors = [W012]
return errors
@register(Tags.security, deploy=True)
def check_session_cookie_httponly(app_configs, **kwargs):
errors = []
if not settings.SESSION_COOKIE_HTTPONLY:
if _session_app():
errors.append(W013)
if _session_middleware():
errors.append(W014)
if len(errors) > 1:
errors = [W015]
return errors
def _session_middleware():
return ("django.contrib.sessions.middleware.SessionMiddleware" in
settings.MIDDLEWARE_CLASSES)
def _session_app():
return "django.contrib.sessions" in settings.INSTALLED_APPS
|
BitWriters/Zenith_project
|
zango/lib/python3.5/site-packages/django/core/checks/security/sessions.py
|
Python
|
mit
| 2,595
|
from __future__ import print_function, unicode_literals, division, absolute_import
import datetime
import time
import ntplib
from pyotp import utils
from pyotp.otp import OTP
class TOTP(OTP):
systime_offset = None
def __init__(self, *args, **kwargs):
"""
@option options [Integer] interval (30) the time interval in seconds
for OTP This defaults to 30 which is standard.
"""
self.interval = kwargs.pop('interval', 30)
if self.systime_offset is None:
try:
c = ntplib.NTPClient()
TOTP.systime_offset = int(c.request(
'pool.ntp.org', version=3).offset)
except Exception:
self.systime_offset = 0
super(TOTP, self).__init__(*args, **kwargs)
def at(self, for_time, counter_offset=0):
"""
Accepts either a Unix timestamp integer or a Time object.
Time objects will be adjusted to UTC automatically
@param [Time/Integer] time the time to generate an OTP for
@param [Integer] counter_offset an amount of ticks to add to the time counter
"""
if not isinstance(for_time, datetime.datetime):
for_time = datetime.datetime.fromtimestamp(int(for_time))
return self.generate_otp(self.timecode(for_time) + counter_offset)
def now(self):
"""
Generate the current time OTP
@return [Integer] the OTP as an integer
"""
return self.generate_otp(self.timecode(datetime.datetime.now()))
def verify(self, otp, for_time=None, valid_window=0):
"""
Verifies the OTP passed in against the current time OTP
@param [String/Integer] otp the OTP to check against
@param [Integer] valid_window extends the validity to this many counter ticks before and after the current one
"""
if for_time is None:
for_time = datetime.datetime.now()
if valid_window:
for i in range(-valid_window, valid_window + 1):
if utils.strings_equal(str(otp), str(self.at(for_time, i))):
return True
return False
return utils.strings_equal(str(otp), str(self.at(for_time)))
def provisioning_uri(self, name, issuer_name=None):
"""
Returns the provisioning URI for the OTP
This can then be encoded in a QR Code and used
to provision the Google Authenticator app
@param [String] name of the account
@return [String] provisioning uri
"""
return utils.build_uri(self.secret, name, issuer_name=issuer_name)
def timecode(self, for_time):
i = time.mktime(for_time.timetuple()) + self.systime_offset
return int(i / self.interval)
|
projectarkc/arkc-server
|
arkcserver/pyotp/totp.py
|
Python
|
gpl-2.0
| 2,787
|
__version__ = '0.1.0'
from .reports import Report
|
grantmcconnaughey/django-reports
|
djreports/__init__.py
|
Python
|
bsd-3-clause
| 51
|
# Authors: Mr_Orange <mr_orange@hotmail.it>, EchelonFour
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
import re
import sickbeard
from sickbeard import logger
from sickbeard.clients.generic import GenericClient
class uTorrentAPI(GenericClient):
def __init__(self, host=None, username=None, password=None):
super(uTorrentAPI, self).__init__('uTorrent', host, username, password)
self.url = self.host + 'gui/'
def _request(self, method='get', params={}, files=None):
params.update({'token': self.auth})
return super(uTorrentAPI, self)._request(method=method, params=params, files=files)
def _get_auth(self):
try:
self.response = self.session.get(self.url + 'token.html', verify=False)
self.auth = re.findall("<div.*?>(.*?)</", self.response.text)[0]
except:
return None
return self.auth if not self.response.status_code == 404 else None
def _add_torrent_uri(self, result):
params = {'action': 'add-url', 's': result.url}
return self._request(params=params)
def _add_torrent_file(self, result):
params = {'action': 'add-file'}
files = {'torrent_file': (result.name + '.torrent', result.content)}
return self._request(method='post', params=params, files=files)
def _set_torrent_label(self, result):
params = {'action': 'setprops',
'hash': result.hash,
's': 'label',
'v': sickbeard.TORRENT_LABEL
}
return self._request(params=params)
def _set_torrent_ratio(self, result):
ratio = None
if result.ratio:
ratio = result.ratio
if ratio:
params = {'action': 'setprops',
'hash': result.hash,
's': 'seed_override',
'v': '1'
}
if self._request(params=params):
params = {'action': 'setprops',
'hash': result.hash,
's': 'seed_ratio',
'v': float(ratio) * 10
}
return self._request(params=params)
else:
return False
return True
def _set_torrent_seed_time(self, result):
if sickbeard.TORRENT_SEED_TIME:
time = 3600 * float(sickbeard.TORRENT_SEED_TIME)
params = {'action': 'setprops',
'hash': result.hash,
's': 'seed_override',
'v': '1'
}
if self._request(params=params):
params = {'action': 'setprops',
'hash': result.hash,
's': 'seed_time',
'v': time
}
return self._request(params=params)
else:
return False
else:
return True
def _set_torrent_priority(self, result):
if result.priority == 1:
params = {'action': 'queuetop', 'hash': result.hash}
return self._request(params=params)
else:
return True
def _set_torrent_pause(self, result):
if sickbeard.TORRENT_PAUSED:
params = {'action': 'pause', 'hash': result.hash}
else:
params = {'action': 'start', 'hash': result.hash}
return self._request(params=params)
api = uTorrentAPI()
|
whitepyro/debian_server_setup
|
sickbeard/clients/utorrent.py
|
Python
|
gpl-3.0
| 4,180
|
import json
from coalib.bearlib.abstractions.Linter import linter
from dependency_management.requirements.NpmRequirement import NpmRequirement
from coalib.results.Result import Result
from coalib.settings.Setting import path
@linter(executable='tslint')
class TSLintBear:
"""
Check TypeScript code for style violations and possible semantical
problems.
Read more about the capabilities at
<https://github.com/palantir/tslint#core-rules>.
"""
LANGUAGES = {'TypeScript'}
REQUIREMENTS = {NpmRequirement('tslint', '3'),
NpmRequirement('typescript', '>=1.7.3')}
AUTHORS = {'The coala developers'}
AUTHORS_EMAILS = {'coala-devel@googlegroups.com'}
LICENSE = 'AGPL-3.0'
ASCIINEMA_URL = 'https://asciinema.org/a/9re9c4fv17lhn7rmvzueebb3b'
CAN_DETECT = {'Syntax', 'Formatting', 'Smell'}
@staticmethod
def create_arguments(filename, file, config_file,
tslint_config: path = '',
rules_dir: path = '',
):
"""
:param tslint_config: Path to configuration file.
:param rules_dir: Rules directory
"""
args = ('--format', 'json')
if tslint_config:
args += ('--config', tslint_config)
if rules_dir:
args += ('--rules-dir', rules_dir)
return args + (filename,)
def process_output(self, output, filename, file):
output = json.loads(output) if output else []
for issue in output:
yield Result.from_values(
origin='{} ({})'.format(self.__class__.__name__,
issue['ruleName']),
message=issue['failure'],
file=issue['name'],
line=int(issue['startPosition']['line']) + 1,
end_line=int(issue['endPosition']['line']) + 1,
column=int(issue['startPosition']['character']) + 1,
end_column=int(issue['endPosition']['character']) + 1)
|
refeed/coala-bears
|
bears/typescript/TSLintBear.py
|
Python
|
agpl-3.0
| 2,040
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from .base import TabGroup, Tab
from .views import TabView
|
developerworks/horizon
|
horizon/tabs/__init__.py
|
Python
|
apache-2.0
| 709
|
####
#Importing modules can serve 3 purposes if done right
#1-Allows you to add features to bare bones python
#2-Only importing what you need lets your imports serve as documentation for
# someone else reading your code later.
#3-By forcing you to import things, python helps you keep your programs small
####
#import the 'hook' which lets python read command line arguments
from sys import argv
#argv is a list. This line puts all the elements into variables
script, first, second, third, fourth = argv
#You understand printing by now
print "Your script is called ", script, ", which was the zeroth variable."
print "Your first variable was ", first
print "Your second variable was ", second
print "Your third variable was ", third
print "Your first variable was ", first
|
isaac-friedman/lphw
|
ex13.py
|
Python
|
mit
| 777
|
import numpy as np
from PIL import Image, ImageEnhance
import cv2
from skimage.transform import rotate
def RandomResize(img, scale=0.12):
rn = np.random.uniform(1 - scale, 1 + scale)
h = np.shape(img)[0]
w = np.shape(img)[1]
h = int(h * rn)
w = int(w * rn)
return img.resize((h, w))
####################################################
def transformations(img, choice):
# Can try 3 flips (no flip, vertical and horizontal); and 4 rotations (0, 90, 180, 270)
if choice == 0:
# Rotate 90
new_img = img.rotate(270)
if choice == 1:
# Rotate 90 and flip horizontally
new_img = img.rotate(270)
new_img = new_img.transpose(Image.FLIP_TOP_BOTTOM)
if choice == 2:
# Rotate 180
new_img = img.rotate(180)
if choice == 3:
# Rotate 180 and flip horizontally
new_img = img.rotate(180)
new_img = new_img.transpose(Image.FLIP_TOP_BOTTOM)
if choice == 4:
# Rotate 90 counter-clockwise
new_img = img.rotate(90)
if choice == 5:
# Rotate 90 counter-clockwise and flip horizontally
new_img = img.rotate(90)
new_img = new_img.transpose(Image.FLIP_TOP_BOTTOM)
if choice == 6:
# no transformation
new_img = img
return new_img
|
ivanlai/Kaggle-Planet-Amazon
|
Image_transformation.py
|
Python
|
mit
| 1,310
|
{
'name': 'SaaS Portal Sign Up',
'version': '1.0.0',
'author': 'Cesar Lage',
'license': 'GPL-3',
'category': 'SaaS',
'website': 'https://it-projects.info',
'depends': ['auth_signup', 'saas_portal'],
'data': ['views/signup.xml'],
'installable': True,
'description': '''
Module to book a new client in SaaS Portal from sign up
''',
}
|
Endika/odoo-saas-tools
|
saas_portal_signup/__openerp__.py
|
Python
|
gpl-3.0
| 377
|
from sympy.holonomic import (DifferentialOperator, HolonomicFunction,
DifferentialOperators, from_hyper,
from_meijerg, expr_to_holonomic)
from sympy.holonomic.recurrence import RecurrenceOperators, HolonomicSequence
from sympy import (symbols, hyper, S, sqrt, pi, exp, erf, erfc, sstr, Symbol,
O, I, meijerg, sin, cos, log, cosh, besselj, hyperexpand,
Ci, EulerGamma, Si, asinh, gamma, beta)
from sympy import ZZ, QQ, RR
def test_DifferentialOperator():
x = symbols('x')
R, Dx = DifferentialOperators(QQ.old_poly_ring(x), 'Dx')
assert Dx == R.derivative_operator
assert Dx == DifferentialOperator([R.base.zero, R.base.one], R)
assert x * Dx + x**2 * Dx**2 == DifferentialOperator([0, x, x**2], R)
assert (x**2 + 1) + Dx + x * \
Dx**5 == DifferentialOperator([x**2 + 1, 1, 0, 0, 0, x], R)
assert (x * Dx + x**2 + 1 - Dx * (x**3 + x))**3 == (-48 * x**6) + \
(-57 * x**7) * Dx + (-15 * x**8) * Dx**2 + (-x**9) * Dx**3
p = (x * Dx**2 + (x**2 + 3) * Dx**5) * (Dx + x**2)
q = (2 * x) + (4 * x**2) * Dx + (x**3) * Dx**2 + \
(20 * x**2 + x + 60) * Dx**3 + (10 * x**3 + 30 * x) * Dx**4 + \
(x**4 + 3 * x**2) * Dx**5 + (x**2 + 3) * Dx**6
assert p == q
def test_HolonomicFunction_addition():
x = symbols('x')
R, Dx = DifferentialOperators(ZZ.old_poly_ring(x), 'Dx')
p = HolonomicFunction(Dx**2 * x, x)
q = HolonomicFunction((2) * Dx + (x) * Dx**2, x)
assert p == q
p = HolonomicFunction(x * Dx + 1, x)
q = HolonomicFunction(Dx + 1, x)
r = HolonomicFunction((x - 2) + (x**2 - 2) * Dx + (x**2 - x) * Dx**2, x)
assert p + q == r
p = HolonomicFunction(x * Dx + Dx**2 * (x**2 + 2), x)
q = HolonomicFunction(Dx - 3, x)
r = HolonomicFunction((-54 * x**2 - 126 * x - 150) + (-135 * x**3 - 252 * x**2 - 270 * x + 140) * Dx +\
(-27 * x**4 - 24 * x**2 + 14 * x - 150) * Dx**2 + \
(9 * x**4 + 15 * x**3 + 38 * x**2 + 30 * x +40) * Dx**3, x)
assert p + q == r
p = HolonomicFunction(Dx**5 - 1, x)
q = HolonomicFunction(x**3 + Dx, x)
r = HolonomicFunction((-x**18 + 45*x**14 - 525*x**10 + 1575*x**6 - x**3 - 630*x**2) + \
(-x**15 + 30*x**11 - 195*x**7 + 210*x**3 - 1)*Dx + (x**18 - 45*x**14 + 525*x**10 - \
1575*x**6 + x**3 + 630*x**2)*Dx**5 + (x**15 - 30*x**11 + 195*x**7 - 210*x**3 + \
1)*Dx**6, x)
assert p+q == r
p = x**2 + 3*x + 8
q = x**3 - 7*x + 5
p = p*Dx - p.diff()
q = q*Dx - q.diff()
r = HolonomicFunction(p, x) + HolonomicFunction(q, x)
s = HolonomicFunction((6*x**2 + 18*x + 14) + (-4*x**3 - 18*x**2 - 62*x + 10)*Dx +\
(x**4 + 6*x**3 + 31*x**2 - 10*x - 71)*Dx**2, x)
assert r == s
def test_HolonomicFunction_multiplication():
x = symbols('x')
R, Dx = DifferentialOperators(ZZ.old_poly_ring(x), 'Dx')
p = HolonomicFunction(Dx+x+x*Dx**2, x)
q = HolonomicFunction(x*Dx+Dx*x+Dx**2, x)
r = HolonomicFunction((8*x**6 + 4*x**4 + 6*x**2 + 3) + (24*x**5 - 4*x**3 + 24*x)*Dx + \
(8*x**6 + 20*x**4 + 12*x**2 + 2)*Dx**2 + (8*x**5 + 4*x**3 + 4*x)*Dx**3 + \
(2*x**4 + x**2)*Dx**4, x)
assert p*q == r
p = HolonomicFunction(Dx**2+1, x)
q = HolonomicFunction(Dx-1, x)
r = HolonomicFunction((2) + (-2)*Dx + (1)*Dx**2, x)
assert p*q == r
p = HolonomicFunction(Dx**2+1+x+Dx, x)
q = HolonomicFunction((Dx*x-1)**2, x)
r = HolonomicFunction((4*x**7 + 11*x**6 + 16*x**5 + 4*x**4 - 6*x**3 - 7*x**2 - 8*x - 2) + \
(8*x**6 + 26*x**5 + 24*x**4 - 3*x**3 - 11*x**2 - 6*x - 2)*Dx + \
(8*x**6 + 18*x**5 + 15*x**4 - 3*x**3 - 6*x**2 - 6*x - 2)*Dx**2 + (8*x**5 + \
10*x**4 + 6*x**3 - 2*x**2 - 4*x)*Dx**3 + (4*x**5 + 3*x**4 - x**2)*Dx**4, x)
assert p*q == r
p = HolonomicFunction(x*Dx**2-1, x)
q = HolonomicFunction(Dx*x-x, x)
r = HolonomicFunction((x - 3) + (-2*x + 2)*Dx + (x)*Dx**2, x)
assert p*q == r
def test_addition_initial_condition():
x = symbols('x')
R, Dx = DifferentialOperators(QQ.old_poly_ring(x), 'Dx')
p = HolonomicFunction(Dx-1, x, 0, [3])
q = HolonomicFunction(Dx**2+1, x, 0, [1, 0])
r = HolonomicFunction(-1 + Dx - Dx**2 + Dx**3, x, 0, [4, 3, 2])
assert p + q == r
p = HolonomicFunction(Dx - x + Dx**2, x, 0, [1, 2])
q = HolonomicFunction(Dx**2 + x, x, 0, [1, 0])
r = HolonomicFunction((-x**4 - x**3/4 - x**2 + 1/4) + (x**3 + x**2/4 + 3*x/4 + 1)*Dx + \
(-3*x/2 + 7/4)*Dx**2 + (x**2 - 7*x/4 + 1/4)*Dx**3 + (x**2 + x/4 + 1/2)*Dx**4, x, 0, [2, 2, -2, 2])
assert p + q == r
p = HolonomicFunction(Dx**2 + 4*x*Dx + x**2, x, 0, [3, 4])
q = HolonomicFunction(Dx**2 + 1, x, 0, [1, 1])
r = HolonomicFunction((x**6 + 2*x**4 - 5*x**2 - 6) + (4*x**5 + 36*x**3 - 32*x)*Dx + \
(x**6 + 3*x**4 + 5*x**2 - 9)*Dx**2 + (4*x**5 + 36*x**3 - 32*x)*Dx**3 + (x**4 + \
10*x**2 - 3)*Dx**4, x, 0, [4, 5, -1, -17])
assert p + q == r
q = HolonomicFunction(Dx**3 + x, x, 2, [3, 0, 1])
p = HolonomicFunction(Dx - 1, x, 2, [1])
r = HolonomicFunction((-x**2 - x + 1) + (x**2 + x)*Dx + (-x - 2)*Dx**3 + \
(x + 1)*Dx**4, x, 2, [4, 1, 2, -5 ])
assert p + q == r
p = expr_to_holonomic(sin(x))
q = expr_to_holonomic(1/x, x0=1)
r = HolonomicFunction((x**2 + 6) + (x**3 + 2*x)*Dx + (x**2 + 6)*Dx**2 + (x**3 + 2*x)*Dx**3, \
x, 1, [sin(1) + 1, -1 + cos(1), -sin(1) + 2])
assert p + q == r
C_1 = symbols('C_1')
p = expr_to_holonomic(sqrt(x))
q = expr_to_holonomic(sqrt(x**2-x))
r = (p + q).to_expr().subs(C_1, -I/2).expand()
assert r == I*sqrt(x)*sqrt(-x + 1) + sqrt(x)
def test_multiplication_initial_condition():
x = symbols('x')
R, Dx = DifferentialOperators(QQ.old_poly_ring(x), 'Dx')
p = HolonomicFunction(Dx**2 + x*Dx - 1, x, 0, [3, 1])
q = HolonomicFunction(Dx**2 + 1, x, 0, [1, 1])
r = HolonomicFunction((x**4 + 14*x**2 + 60) + 4*x*Dx + (x**4 + 9*x**2 + 20)*Dx**2 + \
(2*x**3 + 18*x)*Dx**3 + (x**2 + 10)*Dx**4, x, 0, [3, 4, 2, 3])
assert p * q == r
p = HolonomicFunction(Dx**2 + x, x, 0, [1, 0])
q = HolonomicFunction(Dx**3 - x**2, x, 0, [3, 3, 3])
r = HolonomicFunction((x**8 - 37*x**7/27 - 10*x**6/27 - 164*x**5/9 - 184*x**4/9 + \
160*x**3/27 + 404*x**2/9 + 8*x + 40/3) + (6*x**7 - 128*x**6/9 - 98*x**5/9 - 28*x**4/9 + \
8*x**3/9 + 28*x**2 + 40*x/9 - 40)*Dx + (3*x**6 - 82*x**5/9 + 76*x**4/9 + 4*x**3/3 + \
220*x**2/9 - 80*x/3)*Dx**2 + (-2*x**6 + 128*x**5/27 - 2*x**4/3 -80*x**2/9 + 200/9)*Dx**3 + \
(3*x**5 - 64*x**4/9 - 28*x**3/9 + 6*x**2 - 20*x/9 - 20/3)*Dx**4 + (-4*x**3 + 64*x**2/9 + \
8*x/3)*Dx**5 + (x**4 - 64*x**3/27 - 4*x**2/3 + 20/9)*Dx**6, x, 0, [3, 3, 3, -3, -12, -24])
assert p * q == r
p = HolonomicFunction(Dx - 1, x, 0, [2])
q = HolonomicFunction(Dx**2 + 1, x, 0, [0, 1])
r = HolonomicFunction(2 -2*Dx + Dx**2, x, 0, [0, 2])
assert p * q == r
q = HolonomicFunction(x*Dx**2 + 1 + 2*Dx, x, 0,[0, 1])
r = HolonomicFunction((x - 1) + (-2*x + 2)*Dx + x*Dx**2, x, 0, [0, 2])
assert p * q == r
p = HolonomicFunction(Dx**2 - 1, x, 0, [1, 3])
q = HolonomicFunction(Dx**3 + 1, x, 0, [1, 2, 1])
r = HolonomicFunction(6*Dx + 3*Dx**2 + 2*Dx**3 - 3*Dx**4 + Dx**6, x, 0, [1, 5, 14, 17, 17, 2])
assert p * q == r
p = expr_to_holonomic(sin(x))
q = expr_to_holonomic(1/x, x0=1)
r = HolonomicFunction(x + 2*Dx + x*Dx**2, x, 1, [sin(1), -sin(1) + cos(1)])
assert p * q == r
p = expr_to_holonomic(sqrt(x))
q = expr_to_holonomic(sqrt(x**2-x))
r = (p * q).to_expr()
assert r == I*x*sqrt(-x + 1)
def test_HolonomicFunction_composition():
x = symbols('x')
R, Dx = DifferentialOperators(ZZ.old_poly_ring(x), 'Dx')
p = HolonomicFunction(Dx-1, x).composition(x**2+x)
r = HolonomicFunction((-2*x - 1) + Dx, x)
assert p == r
p = HolonomicFunction(Dx**2+1, x).composition(x**5+x**2+1)
r = HolonomicFunction((125*x**12 + 150*x**9 + 60*x**6 + 8*x**3) + (-20*x**3 - 2)*Dx + \
(5*x**4 + 2*x)*Dx**2, x)
assert p == r
p = HolonomicFunction(Dx**2*x+x, x).composition(2*x**3+x**2+1)
r = HolonomicFunction((216*x**9 + 324*x**8 + 180*x**7 + 152*x**6 + 112*x**5 + \
36*x**4 + 4*x**3) + (24*x**4 + 16*x**3 + 3*x**2 - 6*x - 1)*Dx + (6*x**5 + 5*x**4 + \
x**3 + 3*x**2 + x)*Dx**2, x)
assert p == r
p = HolonomicFunction(Dx**2+1, x).composition(1-x**2)
r = HolonomicFunction((4*x**3) - Dx + x*Dx**2, x)
assert p == r
p = HolonomicFunction(Dx**2+1, x).composition(x - 2/(x**2 + 1))
r = HolonomicFunction((x**12 + 6*x**10 + 12*x**9 + 15*x**8 + 48*x**7 + 68*x**6 + \
72*x**5 + 111*x**4 + 112*x**3 + 54*x**2 + 12*x + 1) + (12*x**8 + 32*x**6 + \
24*x**4 - 4)*Dx + (x**12 + 6*x**10 + 4*x**9 + 15*x**8 + 16*x**7 + 20*x**6 + 24*x**5+ \
15*x**4 + 16*x**3 + 6*x**2 + 4*x + 1)*Dx**2, x)
assert p == r
def test_from_hyper():
x = symbols('x')
R, Dx = DifferentialOperators(QQ.old_poly_ring(x), 'Dx')
p = hyper([1, 1], [S(3)/2], x**2/4)
q = HolonomicFunction((4*x) + (5*x**2 - 8)*Dx + (x**3 - 4*x)*Dx**2, x, 1, [2*sqrt(3)*pi/9, -4*sqrt(3)*pi/27 + 4/3])
r = from_hyper(p)
assert r == q
p = from_hyper(hyper([1], [S(3)/2], x**2/4))
q = HolonomicFunction(-x + (-x**2/2 + 2)*Dx + x*Dx**2, x)
x0 = 1
y0 = '[sqrt(pi)*exp(1/4)*erf(1/2), -sqrt(pi)*exp(1/4)*erf(1/2)/2 + 1]'
assert sstr(p.y0) == y0
assert q.annihilator == p.annihilator
def test_from_meijerg():
x = symbols('x')
R, Dx = DifferentialOperators(QQ.old_poly_ring(x), 'Dx')
p = from_meijerg(meijerg(([], [S(3)/2]), ([S(1)/2], [S(1)/2, 1]), x))
q = HolonomicFunction(x/2 - 1/4 + (-x**2 + x/4)*Dx + x**2*Dx**2 + x**3*Dx**3, x, 1, \
[1/sqrt(pi), 1/(2*sqrt(pi)), -1/(4*sqrt(pi))])
assert p == q
p = from_meijerg(meijerg(([], []), ([0], []), x))
q = HolonomicFunction(1 + Dx, x, 0, [1])
assert p == q
p = from_meijerg(meijerg(([1], []), ([S(1)/2], [0]), x))
q = HolonomicFunction((x + 1/2)*Dx + x*Dx**2, x, 1, [sqrt(pi)*erf(1), exp(-1)])
assert p == q
p = from_meijerg(meijerg(([0], [1]), ([0], []), 2*x**2))
q = HolonomicFunction((3*x**2 - 1)*Dx + x**3*Dx**2, x, 1, [-exp(-S(1)/2) + 1, -exp(-S(1)/2)])
assert p == q
def test_to_Sequence():
x = symbols('x')
R, Dx = DifferentialOperators(ZZ.old_poly_ring(x), 'Dx')
n = symbols('n', integer=True)
_, Sn = RecurrenceOperators(ZZ.old_poly_ring(n), 'Sn')
p = HolonomicFunction(x**2*Dx**4 + x + Dx, x).to_sequence()
q = [(HolonomicSequence(1 + (n + 2)*Sn**2 + (n**4 + 6*n**3 + 11*n**2 + 6*n)*Sn**3), 0, 1)]
assert p == q
p = HolonomicFunction(x**2*Dx**4 + x**3 + Dx**2, x).to_sequence()
q = [(HolonomicSequence(1 + (n**4 + 14*n**3 + 72*n**2 + 163*n + 140)*Sn**5), 0, 0)]
assert p == q
p = HolonomicFunction(x**3*Dx**4 + 1 + Dx**2, x).to_sequence()
q = [(HolonomicSequence(1 + (n**4 - 2*n**3 - n**2 + 2*n)*Sn + (n**2 + 3*n + 2)*Sn**2), 0, 0)]
assert p == q
p = HolonomicFunction(3*x**3*Dx**4 + 2*x*Dx + x*Dx**3, x).to_sequence()
q = [(HolonomicSequence(2*n + (3*n**4 - 6*n**3 - 3*n**2 + 6*n)*Sn + (n**3 + 3*n**2 + 2*n)*Sn**2), 0, 1)]
assert p == q
def test_to_Sequence_Initial_Coniditons():
x = symbols('x')
R, Dx = DifferentialOperators(QQ.old_poly_ring(x), 'Dx')
n = symbols('n', integer=True)
_, Sn = RecurrenceOperators(QQ.old_poly_ring(n), 'Sn')
p = HolonomicFunction(Dx - 1, x, 0, [1]).to_sequence()
q = [(HolonomicSequence(-1 + (n + 1)*Sn, 1), 0)]
assert p == q
p = HolonomicFunction(Dx**2 + 1, x, 0, [0, 1]).to_sequence()
q = [(HolonomicSequence(1 + (n**2 + 3*n + 2)*Sn**2, [0, 1]), 0)]
assert p == q
p = HolonomicFunction(Dx**2 + 1 + x**3*Dx, x, 0, [2, 3]).to_sequence()
q = [(HolonomicSequence(n + Sn**2 + (n**2 + 7*n + 12)*Sn**4, [2, 3, -1, -1/2, 1/12]), 1)]
assert p == q
p = HolonomicFunction(x**3*Dx**5 + 1 + Dx, x).to_sequence()
q = [(HolonomicSequence(1 + (n + 1)*Sn + (n**5 - 5*n**3 + 4*n)*Sn**2), 0, 3)]
assert p == q
C_0, C_1, C_2, C_3 = symbols('C_0, C_1, C_2, C_3')
p = expr_to_holonomic(log(1+x**2))
q = [(HolonomicSequence(n**2 + (n**2 + 2*n)*Sn**2, [0, 0, C_2]), 0, 1)]
assert p.to_sequence() == q
p = p.diff()
q = [(HolonomicSequence((n + 2) + (n + 2)*Sn**2, [C_0, 0]), 1, 0)]
assert p.to_sequence() == q
p = expr_to_holonomic(erf(x) + x).to_sequence()
q = [(HolonomicSequence((2*n**2 - 2*n) + (n**3 + 2*n**2 - n - 2)*Sn**2, [0, 1 + 2/sqrt(pi), 0, C_3]), 0, 2)]
assert p == q
def test_series():
x = symbols('x')
R, Dx = DifferentialOperators(ZZ.old_poly_ring(x), 'Dx')
p = HolonomicFunction(Dx**2 + 2*x*Dx, x, 0, [0, 1]).series(n=10)
q = x - x**3/3 + x**5/10 - x**7/42 + x**9/216 + O(x**10)
assert p == q
p = HolonomicFunction(Dx - 1, x).composition(x**2, 0, [1]) # e^(x**2)
q = HolonomicFunction(Dx**2 + 1, x, 0, [1, 0]) # cos(x)
r = (p * q).series(n=10) # expansion of cos(x) * exp(x**2)
s = 1 + x**2/2 + x**4/24 - 31*x**6/720 - 179*x**8/8064 + O(x**10)
assert r == s
t = HolonomicFunction((1 + x)*Dx**2 + Dx, x, 0, [0, 1]) # log(1 + x)
r = (p * t + q).series(n=10)
s = 1 + x - x**2 + 4*x**3/3 - 17*x**4/24 + 31*x**5/30 - 481*x**6/720 +\
71*x**7/105 - 20159*x**8/40320 + 379*x**9/840 + O(x**10)
assert r == s
p = HolonomicFunction((6+6*x-3*x**2) - (10*x-3*x**2-3*x**3)*Dx + \
(4-6*x**3+2*x**4)*Dx**2, x, 0, [0, 1]).series(n=7)
q = x + x**3/6 - 3*x**4/16 + x**5/20 - 23*x**6/960 + O(x**7)
assert p == q
p = HolonomicFunction((6+6*x-3*x**2) - (10*x-3*x**2-3*x**3)*Dx + \
(4-6*x**3+2*x**4)*Dx**2, x, 0, [1, 0]).series(n=7)
q = 1 - 3*x**2/4 - x**3/4 - 5*x**4/32 - 3*x**5/40 - 17*x**6/384 + O(x**7)
assert p == q
p = expr_to_holonomic(erf(x) + x).series(n=10)
C_3 = symbols('C_3')
q = (erf(x) + x).series(n=10)
assert p.subs(C_3, -2/(3*sqrt(pi))) == q
assert expr_to_holonomic(sqrt(x**3 + x)).series(n=10) == sqrt(x**3 + x).series(n=10)
assert expr_to_holonomic((2*x - 3*x**2)**(S(1)/3)).series() == ((2*x - 3*x**2)**(S(1)/3)).series()
assert expr_to_holonomic(sqrt(x**2-x)).series() == (sqrt(x**2-x)).series()
assert expr_to_holonomic(cos(x)**2/x**2, y0={-2: [1, 0, -1]}).series(n=10) == (cos(x)**2/x**2).series(n=10)
assert expr_to_holonomic(cos(x)**2/x**2, x0=1).series(n=10) == (cos(x)**2/x**2).series(n=10, x0=1)
assert expr_to_holonomic(cos(x-1)**2/(x-1)**2, x0=1, y0={-2: [1, 0, -1]}).series(n=10) \
== (cos(x-1)**2/(x-1)**2).series(x0=1, n=10)
def test_evalf_euler():
x = symbols('x')
R, Dx = DifferentialOperators(QQ.old_poly_ring(x), 'Dx')
# log(1+x)
p = HolonomicFunction((1 + x)*Dx**2 + Dx, x, 0, [0, 1])
# path taken is a straight line from 0 to 1, on the real axis
r = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1]
s = '0.699525841805253' # approx. equal to log(2) i.e. 0.693147180559945
assert sstr(p.evalf(r, method='Euler')[-1]) == s
# path taken is a traingle 0-->1+i-->2
r = [0.1 + 0.1*I]
for i in range(9):
r.append(r[-1]+0.1+0.1*I)
for i in range(10):
r.append(r[-1]+0.1-0.1*I)
# close to the exact solution 1.09861228866811
# imaginary part also close to zero
s = '1.07530466271334 - 0.0251200594793912*I'
assert sstr(p.evalf(r, method='Euler')[-1]) == s
# sin(x)
p = HolonomicFunction(Dx**2 + 1, x, 0, [0, 1])
s = '0.905546532085401 - 6.93889390390723e-18*I'
assert sstr(p.evalf(r, method='Euler')[-1]) == s
# computing sin(pi/2) using this method
# using a linear path from 0 to pi/2
r = [0.1]
for i in range(14):
r.append(r[-1] + 0.1)
r.append(pi/2)
s = '1.08016557252834' # close to 1.0 (exact solution)
assert sstr(p.evalf(r, method='Euler')[-1]) == s
# trying different path, a rectangle (0-->i-->pi/2 + i-->pi/2)
# computing the same value sin(pi/2) using different path
r = [0.1*I]
for i in range(9):
r.append(r[-1]+0.1*I)
for i in range(15):
r.append(r[-1]+0.1)
r.append(pi/2+I)
for i in range(10):
r.append(r[-1]-0.1*I)
# close to 1.0
s = '0.976882381836257 - 1.65557671738537e-16*I'
assert sstr(p.evalf(r, method='Euler')[-1]) == s
# cos(x)
p = HolonomicFunction(Dx**2 + 1, x, 0, [1, 0])
# compute cos(pi) along 0-->pi
r = [0.05]
for i in range(61):
r.append(r[-1]+0.05)
r.append(pi)
# close to -1 (exact answer)
s = '-1.08140824719196'
assert sstr(p.evalf(r, method='Euler')[-1]) == s
# a rectangular path (0 -> i -> 2+i -> 2)
r = [0.1*I]
for i in range(9):
r.append(r[-1]+0.1*I)
for i in range(20):
r.append(r[-1]+0.1)
for i in range(10):
r.append(r[-1]-0.1*I)
p = HolonomicFunction(Dx**2 + 1, x, 0, [1,1]).evalf(r, method='Euler')
s = '0.501421652861245 - 3.88578058618805e-16*I'
assert sstr(p[-1]) == s
def test_evalf_rk4():
x = symbols('x')
R, Dx = DifferentialOperators(QQ.old_poly_ring(x), 'Dx')
# log(1+x)
p = HolonomicFunction((1 + x)*Dx**2 + Dx, x, 0, [0, 1])
# path taken is a straight line from 0 to 1, on the real axis
r = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1]
s = '0.693146363174626' # approx. equal to log(2) i.e. 0.693147180559945
assert sstr(p.evalf(r)[-1]) == s
# path taken is a traingle 0-->1+i-->2
r = [0.1 + 0.1*I]
for i in range(9):
r.append(r[-1]+0.1+0.1*I)
for i in range(10):
r.append(r[-1]+0.1-0.1*I)
# close to the exact solution 1.09861228866811
# imaginary part also close to zero
s = '1.098616 + 1.36083e-7*I'
assert sstr(p.evalf(r)[-1].n(7)) == s
# sin(x)
p = HolonomicFunction(Dx**2 + 1, x, 0, [0, 1])
s = '0.90929463522785 + 1.52655665885959e-16*I'
assert sstr(p.evalf(r)[-1]) == s
# computing sin(pi/2) using this method
# using a linear path from 0 to pi/2
r = [0.1]
for i in range(14):
r.append(r[-1] + 0.1)
r.append(pi/2)
s = '0.999999895088917' # close to 1.0 (exact solution)
assert sstr(p.evalf(r)[-1]) == s
# trying different path, a rectangle (0-->i-->pi/2 + i-->pi/2)
# computing the same value sin(pi/2) using different path
r = [0.1*I]
for i in range(9):
r.append(r[-1]+0.1*I)
for i in range(15):
r.append(r[-1]+0.1)
r.append(pi/2+I)
for i in range(10):
r.append(r[-1]-0.1*I)
# close to 1.0
s = '1.00000003415141 + 6.11940487991086e-16*I'
assert sstr(p.evalf(r)[-1]) == s
# cos(x)
p = HolonomicFunction(Dx**2 + 1, x, 0, [1, 0])
# compute cos(pi) along 0-->pi
r = [0.05]
for i in range(61):
r.append(r[-1]+0.05)
r.append(pi)
# close to -1 (exact answer)
s = '-0.999999993238714'
assert sstr(p.evalf(r)[-1]) == s
# a rectangular path (0 -> i -> 2+i -> 2)
r = [0.1*I]
for i in range(9):
r.append(r[-1]+0.1*I)
for i in range(20):
r.append(r[-1]+0.1)
for i in range(10):
r.append(r[-1]-0.1*I)
p = HolonomicFunction(Dx**2 + 1, x, 0, [1,1]).evalf(r)
s = '0.493152791638442 - 1.41553435639707e-15*I'
assert sstr(p[-1]) == s
def test_expr_to_holonomic():
x = symbols('x')
R, Dx = DifferentialOperators(QQ.old_poly_ring(x), 'Dx')
p = expr_to_holonomic((sin(x)/x)**2)
q = HolonomicFunction(8*x + (4*x**2 + 6)*Dx + 6*x*Dx**2 + x**2*Dx**3, x, 0, \
[1, 0, -2/3])
assert p == q
p = expr_to_holonomic(1/(1+x**2)**2)
q = HolonomicFunction(4*x + (x**2 + 1)*Dx, x, 0, [1])
assert p == q
p = expr_to_holonomic(exp(x)*sin(x)+x*log(1+x))
q = HolonomicFunction((2*x**3 + 10*x**2 + 20*x + 18) + (-2*x**4 - 10*x**3 - 20*x**2 \
- 18*x)*Dx + (2*x**5 + 6*x**4 + 7*x**3 + 8*x**2 + 10*x - 4)*Dx**2 + \
(-2*x**5 - 5*x**4 - 2*x**3 + 2*x**2 - x + 4)*Dx**3 + (x**5 + 2*x**4 - x**3 - \
7*x**2/2 + x + 5/2)*Dx**4, x, 0, [0, 1, 4, -1])
assert p == q
p = expr_to_holonomic(x*exp(x)+cos(x)+1)
q = HolonomicFunction((-x - 3)*Dx + (x + 2)*Dx**2 + (-x - 3)*Dx**3 + (x + 2)*Dx**4, x, \
0, [2, 1, 1, 3])
assert p == q
assert (x*exp(x)+cos(x)+1).series(n=10) == p.series(n=10)
p = expr_to_holonomic(log(1 + x)**2 + 1)
q = HolonomicFunction(Dx + (3*x + 3)*Dx**2 + (x**2 + 2*x + 1)*Dx**3, x, 0, [1, 0, 2])
assert p == q
p = expr_to_holonomic(erf(x)**2 + x)
q = HolonomicFunction((8*x**4 - 2*x**2 + 2)*Dx**2 + (6*x**3 - x/2)*Dx**3 + \
(x**2+ 1/4)*Dx**4, x, 0, [0, 1, 8/pi, 0])
assert p == q
p = expr_to_holonomic(cosh(x)*x)
q = HolonomicFunction((-x**2 + 2) -2*x*Dx + x**2*Dx**2, x, 0, [0, 1])
assert p == q
p = expr_to_holonomic(besselj(2, x))
q = HolonomicFunction((x**2 - 4) + x*Dx + x**2*Dx**2, x, 0, [0, 0])
assert p == q
p = expr_to_holonomic(besselj(0, x) + exp(x))
q = HolonomicFunction((-x**2 - x/2 + 1/2) + (x**2 - x/2 - 3/2)*Dx + (-x**2 + x/2 + 1)*Dx**2 +\
(x**2 + x/2)*Dx**3, x, 0, [2, 1, 1/2])
assert p == q
p = expr_to_holonomic(sin(x)**2/x)
q = HolonomicFunction(4 + 4*x*Dx + 3*Dx**2 + x*Dx**3, x, 0, [0, 1, 0])
assert p == q
p = expr_to_holonomic(sin(x)**2/x, x0=2)
q = HolonomicFunction((4) + (4*x)*Dx + (3)*Dx**2 + (x)*Dx**3, x, 2, [sin(2)**2/2,
sin(2)*cos(2) - sin(2)**2/4, -3*sin(2)**2/4 + cos(2)**2 - sin(2)*cos(2)])
assert p == q
p = expr_to_holonomic(log(x)/2 - Ci(2*x)/2 + Ci(2)/2)
q = HolonomicFunction(4*Dx + 4*x*Dx**2 + 3*Dx**3 + x*Dx**4, x, 0, \
[-log(2)/2 - EulerGamma/2 + Ci(2)/2, 0, 1, 0])
assert p == q
p = p.to_expr()
q = log(x)/2 - Ci(2*x)/2 + Ci(2)/2
assert p == q
p = expr_to_holonomic(x**(S(1)/2), x0=1)
q = HolonomicFunction(x*Dx - 1/2, x, 1, [1])
assert p == q
p = expr_to_holonomic(sqrt(1 + x**2))
q = HolonomicFunction((-x) + (x**2 + 1)*Dx, x, 0, [1])
assert p == q
assert (expr_to_holonomic(sqrt(x) + sqrt(2*x)).to_expr()-\
(sqrt(x) + sqrt(2*x))).simplify() == 0
assert expr_to_holonomic(3*x+2*sqrt(x)).to_expr() == 3*x+2*sqrt(x)
p = expr_to_holonomic((x**4+x**3+5*x**2+3*x+2)/x**2, lenics=3)
q = HolonomicFunction((-2*x**4 - x**3 + 3*x + 4) + (x**5 + x**4 + 5*x**3 + 3*x**2 + \
2*x)*Dx, x, 0, {-2: [2, 3, 5]})
assert p == q
p = expr_to_holonomic(1/(x-1)**2, lenics=3, x0=1)
q = HolonomicFunction((2) + (x - 1)*Dx, x, 1, {-2: [1, 0, 0]})
assert p == q
a = symbols("a")
p = expr_to_holonomic(sqrt(a*x), x=x)
assert p.to_expr() == sqrt(a)*sqrt(x)
def test_to_hyper():
x = symbols('x')
R, Dx = DifferentialOperators(QQ.old_poly_ring(x), 'Dx')
p = HolonomicFunction(Dx - 2, x, 0, [3]).to_hyper()
q = 3 * hyper([], [], 2*x)
assert p == q
p = hyperexpand(HolonomicFunction((1 + x) * Dx - 3, x, 0, [2]).to_hyper()).expand()
q = 2*x**3 + 6*x**2 + 6*x + 2
assert p == q
p = HolonomicFunction((1 + x)*Dx**2 + Dx, x, 0, [0, 1]).to_hyper()
q = -x**2*hyper((2, 2, 1), (3, 2), -x)/2 + x
assert p == q
p = HolonomicFunction(2*x*Dx + Dx**2, x, 0, [0, 2/sqrt(pi)]).to_hyper()
q = 2*x*hyper((1/2,), (3/2,), -x**2)/sqrt(pi)
assert p == q
p = hyperexpand(HolonomicFunction(2*x*Dx + Dx**2, x, 0, [1, -2/sqrt(pi)]).to_hyper())
q = erfc(x)
assert p.rewrite(erfc) == q
p = hyperexpand(HolonomicFunction((x**2 - 1) + x*Dx + x**2*Dx**2,
x, 0, [0, S(1)/2]).to_hyper())
q = besselj(1, x)
assert p == q
p = hyperexpand(HolonomicFunction(x*Dx**2 + Dx + x, x, 0, [1, 0]).to_hyper())
q = besselj(0, x)
assert p == q
def test_to_expr():
x = symbols('x')
R, Dx = DifferentialOperators(ZZ.old_poly_ring(x), 'Dx')
p = HolonomicFunction(Dx - 1, x, 0, [1]).to_expr()
q = exp(x)
assert p == q
p = HolonomicFunction(Dx**2 + 1, x, 0, [1, 0]).to_expr()
q = cos(x)
assert p == q
p = HolonomicFunction(Dx**2 - 1, x, 0, [1, 0]).to_expr()
q = cosh(x)
assert p == q
p = HolonomicFunction(2 + (4*x - 1)*Dx + \
(x**2 - x)*Dx**2, x, 0, [1, 2]).to_expr().expand()
q = 1/(x**2 - 2*x + 1)
assert p == q
p = expr_to_holonomic(sin(x)**2/x).integrate((x, 0, x)).to_expr()
q = (sin(x)**2/x).integrate((x, 0, x))
assert p == q
C_0, C_1, C_2, C_3 = symbols('C_0, C_1, C_2, C_3')
p = expr_to_holonomic(log(1+x**2)).to_expr()
q = C_2*log(x**2 + 1)
assert p == q
p = expr_to_holonomic(log(1+x**2)).diff().to_expr()
q = C_0*x/(x**2 + 1)
assert p == q
p = expr_to_holonomic(erf(x) + x).to_expr()
q = 3*C_3*x - 3*sqrt(pi)*C_3*erf(x)/2 + x + 2*x/sqrt(pi)
assert p == q
p = expr_to_holonomic(sqrt(x), x0=1).to_expr()
assert p == sqrt(x)
assert expr_to_holonomic(sqrt(x)).to_expr() == sqrt(x)
p = expr_to_holonomic(sqrt(1 + x**2)).to_expr()
assert p == sqrt(1+x**2)
p = expr_to_holonomic((2*x**2 + 1)**(S(2)/3)).to_expr()
assert p == (2*x**2 + 1)**(S(2)/3)
p = expr_to_holonomic(sqrt(-x**2+2*x)).to_expr()
assert p == sqrt(x)*sqrt(-x + 2)
p = expr_to_holonomic((-2*x**3+7*x)**(S(2)/3)).to_expr()
q = x**(S(2)/3)*(-2*x**2 + 7)**(S(2)/3)
assert p == q
p = from_hyper(hyper((-2, -3), (S(1)/2, ), x))
s = hyperexpand(hyper((-2, -3), (S(1)/2, ), x))
D_0 = Symbol('D_0')
C_0 = Symbol('C_0')
assert (p.to_expr().subs({C_0:1, D_0:0}) - s).simplify() == 0
p.y0 = {0: [1], S(1)/2: [0]}
assert p.to_expr() == s
assert expr_to_holonomic(x**5).to_expr() == x**5
assert expr_to_holonomic(2*x**3-3*x**2).to_expr().expand() == \
2*x**3-3*x**2
a = symbols("a")
p = (expr_to_holonomic(1.4*x)*expr_to_holonomic(a*x, x)).to_expr()
q = 1.4*a*x**2
assert p == q
p = (expr_to_holonomic(1.4*x)+expr_to_holonomic(a*x, x)).to_expr()
q = x*(a + 1.4)
assert p == q
p = (expr_to_holonomic(1.4*x)+expr_to_holonomic(x)).to_expr()
assert p == 2.4*x
def test_integrate():
x = symbols('x')
R, Dx = DifferentialOperators(ZZ.old_poly_ring(x), 'Dx')
p = expr_to_holonomic(sin(x)**2/x, x0=1).integrate((x, 2, 3))
q = '0.166270406994788'
assert sstr(p) == q
p = expr_to_holonomic(sin(x)).integrate((x, 0, x)).to_expr()
q = 1 - cos(x)
assert p == q
p = expr_to_holonomic(sin(x)).integrate((x, 0, 3))
q = 1 - cos(3)
assert p == q
p = expr_to_holonomic(sin(x)/x, x0=1).integrate((x, 1, 2))
q = '0.659329913368450'
assert sstr(p) == q
p = expr_to_holonomic(sin(x)**2/x, x0=1).integrate((x, 1, 0))
q = '-0.423690480850035'
assert sstr(p) == q
p = expr_to_holonomic(sin(x)/x)
assert p.integrate(x).to_expr() == Si(x)
assert p.integrate((x, 0, 2)) == Si(2)
p = expr_to_holonomic(sin(x)**2/x)
q = p.to_expr()
assert p.integrate(x).to_expr() == q.integrate((x, 0, x))
assert p.integrate((x, 0, 1)) == q.integrate((x, 0, 1))
assert expr_to_holonomic(1/x, x0=1).integrate(x).to_expr() == log(x)
p = expr_to_holonomic((x + 1)**3*exp(-x), x0=-1).integrate(x).to_expr()
q = (-x**3 - 6*x**2 - 15*x + 6*exp(x + 1) - 16)*exp(-x)
assert p == q
p = expr_to_holonomic(cos(x)**2/x**2, y0={-2: [1, 0, -1]}).integrate(x).to_expr()
q = -Si(2*x) - cos(x)**2/x
assert p == q
p = expr_to_holonomic(sqrt(x**2+x)).integrate(x).to_expr()
q = (x**(3/2)*(2*x**2 + 3*x + 1) - x*sqrt(x + 1)*asinh(sqrt(x)))/(4*x*sqrt(x + 1))
assert p == q
p = expr_to_holonomic(sqrt(x**2+1)).integrate(x).to_expr()
q = (sqrt(x**2+1)).integrate(x)
assert (p-q).simplify() == 0
p = expr_to_holonomic(1/x**2, y0={-2:[1, 0, 0]})
r = expr_to_holonomic(1/x**2, lenics=3)
assert p == r
q = expr_to_holonomic(cos(x)**2)
assert (r*q).integrate(x).to_expr() == -Si(2*x) - cos(x)**2/x
def test_diff():
x, y = symbols('x, y')
R, Dx = DifferentialOperators(ZZ.old_poly_ring(x), 'Dx')
p = HolonomicFunction(x*Dx**2 + 1, x, 0, [0, 1])
assert p.diff().to_expr() == p.to_expr().diff().simplify()
p = HolonomicFunction(Dx**2 - 1, x, 0, [1, 0])
assert p.diff(x, 2).to_expr() == p.to_expr()
p = expr_to_holonomic(Si(x))
assert p.diff().to_expr() == sin(x)/x
assert p.diff(y) == 0
C_0, C_1, C_2, C_3 = symbols('C_0, C_1, C_2, C_3')
q = Si(x)
assert p.diff(x).to_expr() == q.diff()
assert p.diff(x, 2).to_expr().subs(C_0, -S(1)/3) == q.diff(x, 2).simplify()
assert p.diff(x, 3).series().subs({C_3:-S(1)/3, C_0:0}) == q.diff(x, 3).series()
def test_extended_domain_in_expr_to_holonomic():
x = symbols('x')
p = expr_to_holonomic(1.2*cos(3.1*x))
assert p.to_expr() == 1.2*cos(3.1*x)
assert sstr(p.integrate(x).to_expr()) == '0.387096774193548*sin(3.1*x)'
_, Dx = DifferentialOperators(RR.old_poly_ring(x), 'Dx')
p = expr_to_holonomic(1.1329138213*x)
q = HolonomicFunction((-1.1329138213) + (1.1329138213*x)*Dx, x, 0, {1: [1.1329138213]})
assert p == q
assert p.to_expr() == 1.1329138213*x
assert sstr(p.integrate((x, 1, 2))) == sstr((1.1329138213*x).integrate((x, 1, 2)))
y, z = symbols('y, z')
p = expr_to_holonomic(sin(x*y*z), x=x)
assert p.to_expr() == sin(x*y*z)
assert p.integrate(x).to_expr() == (-cos(x*y*z) + 1)/(y*z)
p = expr_to_holonomic(sin(x*y + z), x=x).integrate(x).to_expr()
q = (cos(z) - cos(x*y + z))/y
assert p == q
a = symbols('a')
p = expr_to_holonomic(a*x, x)
assert p.to_expr() == a*x
assert p.integrate(x).to_expr() == a*x**2/2
D_2, C_1 = symbols("D_2, C_1")
p = expr_to_holonomic(x) + expr_to_holonomic(1.2*cos(x))
p = p.to_expr().subs(D_2, 0)
assert p - x - 1.2*cos(1.0*x) == 0
p = expr_to_holonomic(x) * expr_to_holonomic(1.2*cos(x))
p = p.to_expr().subs(C_1, 0)
assert p - 1.2*x*cos(1.0*x) == 0
def test_to_meijerg():
x = symbols('x')
assert hyperexpand(expr_to_holonomic(sin(x)).to_meijerg()) == sin(x)
assert hyperexpand(expr_to_holonomic(cos(x)).to_meijerg()) == cos(x)
assert hyperexpand(expr_to_holonomic(exp(x)).to_meijerg()) == exp(x)
assert hyperexpand(expr_to_holonomic(log(x)).to_meijerg()).simplify() == log(x)
assert expr_to_holonomic(4*x**2/3 + 7).to_meijerg() == 4*x**2/3 + 7
assert hyperexpand(expr_to_holonomic(besselj(2, x), lenics=3).to_meijerg()) == besselj(2, x)
p = hyper((-S(1)/2, -3), (), x)
assert from_hyper(p).to_meijerg() == hyperexpand(p)
p = hyper((S(1), S(3)), (S(2), ), x)
assert (hyperexpand(from_hyper(p).to_meijerg()) - hyperexpand(p)).expand() == 0
p = from_hyper(hyper((-2, -3), (S(1)/2, ), x))
s = hyperexpand(hyper((-2, -3), (S(1)/2, ), x))
C_0 = Symbol('C_0')
C_1 = Symbol('C_1')
D_0 = Symbol('D_0')
assert (hyperexpand(p.to_meijerg()).subs({C_0:1, D_0:0}) - s).simplify() == 0
p.y0 = {0: [1], S(1)/2: [0]}
assert (hyperexpand(p.to_meijerg()) - s).simplify() == 0
p = expr_to_holonomic(besselj(S(1)/2, x), initcond=False)
assert (p.to_expr() - (D_0*sin(x) + C_0*cos(x) + C_1*sin(x))/sqrt(x)).simplify() == 0
p = expr_to_holonomic(besselj(S(1)/2, x), y0={S(-1)/2: [sqrt(2)/sqrt(pi), sqrt(2)/sqrt(pi)]})
assert (p.to_expr() - besselj(S(1)/2, x) - besselj(S(-1)/2, x)).simplify() == 0
def test_gaussian():
mu, x = symbols("mu x")
sd = symbols("sd", positive=True)
Q = QQ[mu, sd].get_field()
e = sqrt(2)*exp(-(-mu + x)**2/(2*sd**2))/(2*sqrt(pi)*sd)
h1 = expr_to_holonomic(e, x, domain=Q)
_, Dx = DifferentialOperators(Q.old_poly_ring(x), 'Dx')
h2 = HolonomicFunction((-mu/sd**2 + x/sd**2) + (1)*Dx, x)
assert h1 == h2
def test_beta():
a, b, x = symbols("a b x", positive=True)
e = x**(a - 1)*(-x + 1)**(b - 1)/beta(a, b)
Q = QQ[a, b].get_field()
h1 = expr_to_holonomic(e, x, domain=Q)
_, Dx = DifferentialOperators(Q.old_poly_ring(x), 'Dx')
h2 = HolonomicFunction((a + x*(-a - b + 2) - 1) + (x**2 - x)*Dx, x)
assert h1 == h2
def test_gamma():
a, b, x = symbols("a b x", positive=True)
e = b**(-a)*x**(a - 1)*exp(-x/b)/gamma(a)
Q = QQ[a, b].get_field()
h1 = expr_to_holonomic(e, x, domain=Q)
_, Dx = DifferentialOperators(Q.old_poly_ring(x), 'Dx')
h2 = HolonomicFunction((-a + 1 + x/b) + (x)*Dx, x)
assert h1 == h2
def test_symbolic_power():
x, n = symbols("x n")
Q = QQ[n].get_field()
_, Dx = DifferentialOperators(Q.old_poly_ring(x), 'Dx')
h1 = HolonomicFunction((-1) + (x)*Dx, x) ** -n
h2 = HolonomicFunction((n) + (x)*Dx, x)
assert h1 == h2
def test_negative_power():
x = symbols("x")
_, Dx = DifferentialOperators(QQ.old_poly_ring(x), 'Dx')
h1 = HolonomicFunction((-1) + (x)*Dx, x) ** -2
h2 = HolonomicFunction((2) + (x)*Dx, x)
assert h1 == h2
def test_expr_in_power():
x, n = symbols("x n")
Q = QQ[n].get_field()
_, Dx = DifferentialOperators(Q.old_poly_ring(x), 'Dx')
h1 = HolonomicFunction((-1) + (x)*Dx, x) ** (n - 3)
h2 = HolonomicFunction((-n + 3) + (x)*Dx, x)
assert h1 == h2
|
wxgeo/geophar
|
wxgeometrie/sympy/holonomic/tests/test_holonomic.py
|
Python
|
gpl-2.0
| 33,101
|
from pyqtgraph import RectROI
from pyqtgraph import QtCore
from pyqtgraph import QtGui
from pyqtgraph import LayoutWidget
from .utils import delete_content
from .utils import compute_bbox_of_points
from .color import JChooseColor
from .color import setup_color
from .remove_item import JRemoveItem
class Jrectangle(RectROI, JChooseColor, JRemoveItem):
def __init__(self, pos, size, viewbox=None):
RectROI.__init__(self, pos, size)
JChooseColor.__init__(self)
self.set_black_color()
JRemoveItem.__init__(self, viewbox)
self.info_dock = viewbox.info_dock
self._menu = self._build_menu()
self._display_info_dock()
for h in self.handles:
handle = h["item"]
handle.currentPen.setColor(QtGui.QColor(0, 0, 0))
@classmethod
def load(cls, s, viewbox=None):
if "*JRectangle" not in s:
print("Error reading a rectangle from a string %s" % s)
s = s.replace("*JRectangle", "")
if s[0] != "{" or s[-1] != "}":
print("Error the string is in the wrong format")
data = eval(s)
rectangle = cls(data["pos"], data["size"], viewbox=viewbox)
setup_color(rectangle, data["color"])
if viewbox is not None:
viewbox.label.setText("Rectangle loaded.")
return rectangle
def get_save_control_points(self):
return self.get_drawing_points()
def save(self, file, pos=None, size=None):
if pos is None and size is None:
pos = [self.pos().x(), self.pos().y()]
size = [self.size().x(), self.size().y()]
data = {
"color": self.color,
"pos": pos,
"size": size
}
file.write("*JRectangle\n")
file.write(str(data) + "\n")
def _build_menu(self):
menu = QtGui.QMenu()
menu.setTitle("Rectangle")
menu.addAction("Remove", self.remove_item)
return menu
def mouseClickEvent(self, ev):
self._display_info_dock()
if ev.button() == QtCore.Qt.RightButton:
self._raise_menu(ev)
def get_drawing_points(self):
low = [self.pos().x(), self.pos().y()]
disp = [self.size().x(), self.size().y()]
low_x = low[0]
low_y = low[1]
dx = disp[0]
dy = disp[1]
pts = [low, [low_x, low_y + dy], [low_x + dx, low_y + dy],
[low_x + dx, low_y], low]
return pts
def compute_bbox(self):
return compute_bbox_of_points(self.get_drawing_points())
def paint(self, p, *args):
p.setRenderHint(QtGui.QPainter.Antialiasing)
p.setPen(self.currentPen)
super().paint(p, *args)
def _raise_menu(self, event):
pos = event.screenPos()
self._menu.popup(QtCore.QPoint(pos.x(), pos.y()))
def _display_info_dock(self):
if self.info_dock is None:
return
delete_content(self.info_dock)
container = LayoutWidget()
label1 = QtGui.QLabel("Rectangle")
container.addWidget(label1, row=0, col=0)
color_dock_widget = self.get_color_dock_widget()
container.addWidget(color_dock_widget, row=1, col=0)
remove_item_widget = self.get_remove_item_dock_widget()
container.addWidget(remove_item_widget, row=2, col=0)
vertical_spacer = QtGui.QSpacerItem(1, 1, QtGui.QSizePolicy.Minimum,
QtGui.QSizePolicy.Expanding)
container.layout.addItem(vertical_spacer, 3, 0)
self.info_dock.addWidget(container)
|
jakaspeh/JDesigner
|
jdesigner/jrectangle.py
|
Python
|
mit
| 3,610
|
__author__ = 'andreas'
from random import randint
secret = randint(1,10)
print("Welcome!")
guess = 0
while guess != secret:
g = input("Guess the Number")
guess = int(g)
if guess == secret:
print("You win!")
else:
if guess > secret:
print("Your guess is too high")
else:
print("Too low")
print("Game over!")
|
Nebucatnetzer/Learning_python
|
guessing_game/guessing_game.py
|
Python
|
gpl-2.0
| 371
|
from bs4 import BeautifulSoup
import urllib
r = urllib.urlopen("https://nl.wikipedia.org/wiki/Tweede_Kamerverkiezingen_2012").read()
soup = BeautifulSoup(r, "html.parser")
for item in soup.findAll("div", {"class": "thumb tright"}):
tekst = item.findAll("div", {"class": "thumbcaption"}) [0].getText()
lijstSP = item.findAll('p', {"style": "margin:0px;font-size:90%;"})[0].getText()
SP = item.findAll('a', {"title": "Socialistische Partij (Nederland)"})[0].getText()
print tekst
print lijstSP
print SP
|
ArtezGDA/MappingTheCity-Data
|
ElectionData/ElectionData-Scraper-Wikipedia.py
|
Python
|
mit
| 560
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Model classes for use in the events storage API.
"""
from oslo_utils import timeutils
def serialize_dt(value):
"""Serializes parameter if it is datetime."""
return value.isoformat() if hasattr(value, 'isoformat') else value
class Model(object):
"""Base class for storage API models."""
def __init__(self, **kwds):
self.fields = list(kwds)
for k, v in kwds.items():
setattr(self, k, v)
def as_dict(self):
d = {}
for f in self.fields:
v = getattr(self, f)
if isinstance(v, Model):
v = v.as_dict()
elif isinstance(v, list) and v and isinstance(v[0], Model):
v = [sub.as_dict() for sub in v]
d[f] = v
return d
def __eq__(self, other):
return self.as_dict() == other.as_dict()
def __ne__(self, other):
return not self.__eq__(other)
class Event(Model):
"""A raw event from the source system. Events have Traits.
Metrics will be derived from one or more Events.
"""
DUPLICATE = 1
UNKNOWN_PROBLEM = 2
INCOMPATIBLE_TRAIT = 3
def __init__(self, message_id, event_type, generated, traits, raw):
"""Create a new event.
:param message_id: Unique ID for the message this event
stemmed from. This is different than
the Event ID, which comes from the
underlying storage system.
:param event_type: The type of the event.
:param generated: UTC time for when the event occurred.
:param traits: list of Traits on this Event.
:param raw: Unindexed raw notification details.
"""
Model.__init__(self, message_id=message_id, event_type=event_type,
generated=generated, traits=traits, raw=raw)
def append_trait(self, trait_model):
self.traits.append(trait_model)
def __repr__(self):
trait_list = []
if self.traits:
trait_list = [str(trait) for trait in self.traits]
return ("<Event: %s, %s, %s, %s>" %
(self.message_id, self.event_type, self.generated,
" ".join(trait_list)))
def serialize(self):
return {'message_id': self.message_id,
'event_type': self.event_type,
'generated': serialize_dt(self.generated),
'traits': [trait.serialize() for trait in self.traits],
'raw': self.raw}
class Trait(Model):
"""A Trait is a key/value pair of data on an Event.
The value is variant record of basic data types (int, date, float, etc).
"""
NONE_TYPE = 0
TEXT_TYPE = 1
INT_TYPE = 2
FLOAT_TYPE = 3
DATETIME_TYPE = 4
type_names = {
NONE_TYPE: "none",
TEXT_TYPE: "string",
INT_TYPE: "integer",
FLOAT_TYPE: "float",
DATETIME_TYPE: "datetime"
}
def __init__(self, name, dtype, value):
if not dtype:
dtype = Trait.NONE_TYPE
Model.__init__(self, name=name, dtype=dtype, value=value)
def __repr__(self):
return "<Trait: %s %d %s>" % (self.name, self.dtype, self.value)
def serialize(self):
return self.name, self.dtype, serialize_dt(self.value)
def get_type_name(self):
return self.get_name_by_type(self.dtype)
@classmethod
def get_type_by_name(cls, type_name):
return getattr(cls, '%s_TYPE' % type_name.upper(), None)
@classmethod
def get_type_names(cls):
return cls.type_names.values()
@classmethod
def get_name_by_type(cls, type_id):
return cls.type_names.get(type_id, "none")
@classmethod
def convert_value(cls, trait_type, value):
if trait_type is cls.INT_TYPE:
return int(value)
if trait_type is cls.FLOAT_TYPE:
return float(value)
if trait_type is cls.DATETIME_TYPE:
return timeutils.normalize_time(timeutils.parse_isotime(value))
# Cropping the text value to match the TraitText value size
if isinstance(value, bytes):
return value.decode('utf-8')[:255]
return str(value)[:255]
|
openstack/ceilometer
|
ceilometer/event/models.py
|
Python
|
apache-2.0
| 4,780
|
from django.shortcuts import get_object_or_404, render
# Create your views here.
from django.http import HttpResponse
from django.template import RequestContext
from .models import Text
def index(request):
latest_text_list = Text.objects.order_by("-pub_date")[:5]
context = RequestContext(request, {"latest_text_list": latest_text_list})
return render(request, "bettertexts/index.html", context)
def detail(request, slug):
text = get_object_or_404(Text, slug=slug)
siteProfile = text.site.siteProfile
return render(request, "bettertexts/detail.html", {"text": text, "siteProfile":siteProfile})
def results(request, text_id):
response = "You're looking at the results of text %s."
return HttpResponse(response % text_id)
def vote(request, text_id):
return HttpResponse("You're voting on text %s." % text_id)
|
citizenline/citizenline
|
bettertexts/views.py
|
Python
|
mit
| 852
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Example Airflow DAG that displays interactions with Google Cloud Functions.
It creates a function and then deletes it.
This DAG relies on the following OS environment variables
https://airflow.apache.org/concepts.html#variables
* GCP_PROJECT_ID - Google Cloud Project to use for the Cloud Function.
* GCP_LOCATION - Google Cloud Functions region where the function should be
created.
* GCF_ENTRYPOINT - Name of the executable function in the source code.
* and one of the below:
* GCF_SOURCE_ARCHIVE_URL - Path to the zipped source in Google Cloud Storage
* GCF_SOURCE_UPLOAD_URL - Generated upload URL for the zipped source and GCF_ZIP_PATH - Local path to
the zipped source archive
* GCF_SOURCE_REPOSITORY - The URL pointing to the hosted repository where the function
is defined in a supported Cloud Source Repository URL format
https://cloud.google.com/functions/docs/reference/rest/v1/projects.locations.functions#SourceRepository
"""
import os
from airflow import models
from airflow.providers.google.cloud.operators.functions import (
CloudFunctionDeleteFunctionOperator, CloudFunctionDeployFunctionOperator,
CloudFunctionInvokeFunctionOperator,
)
from airflow.utils import dates
# [START howto_operator_gcf_common_variables]
GCP_PROJECT_ID = os.environ.get('GCP_PROJECT_ID', 'example-project')
GCP_LOCATION = os.environ.get('GCP_LOCATION', 'europe-west1')
GCF_SHORT_FUNCTION_NAME = os.environ.get('GCF_SHORT_FUNCTION_NAME', 'hello').\
replace("-", "_") # make sure there are no dashes in function name (!)
FUNCTION_NAME = 'projects/{}/locations/{}/functions/{}'.format(GCP_PROJECT_ID,
GCP_LOCATION,
GCF_SHORT_FUNCTION_NAME)
# [END howto_operator_gcf_common_variables]
# [START howto_operator_gcf_deploy_variables]
GCF_SOURCE_ARCHIVE_URL = os.environ.get('GCF_SOURCE_ARCHIVE_URL', '')
GCF_SOURCE_UPLOAD_URL = os.environ.get('GCF_SOURCE_UPLOAD_URL', '')
GCF_SOURCE_REPOSITORY = os.environ.get(
'GCF_SOURCE_REPOSITORY',
'https://source.developers.google.com/'
'projects/{}/repos/hello-world/moveable-aliases/master'.format(GCP_PROJECT_ID))
GCF_ZIP_PATH = os.environ.get('GCF_ZIP_PATH', '')
GCF_ENTRYPOINT = os.environ.get('GCF_ENTRYPOINT', 'helloWorld')
GCF_RUNTIME = 'nodejs6'
GCP_VALIDATE_BODY = os.environ.get('GCP_VALIDATE_BODY', True)
# [END howto_operator_gcf_deploy_variables]
# [START howto_operator_gcf_deploy_body]
body = {
"name": FUNCTION_NAME,
"entryPoint": GCF_ENTRYPOINT,
"runtime": GCF_RUNTIME,
"httpsTrigger": {}
}
# [END howto_operator_gcf_deploy_body]
# [START howto_operator_gcf_default_args]
default_args = {
'start_date': dates.days_ago(1)
}
# [END howto_operator_gcf_default_args]
# [START howto_operator_gcf_deploy_variants]
if GCF_SOURCE_ARCHIVE_URL:
body['sourceArchiveUrl'] = GCF_SOURCE_ARCHIVE_URL
elif GCF_SOURCE_REPOSITORY:
body['sourceRepository'] = {
'url': GCF_SOURCE_REPOSITORY
}
elif GCF_ZIP_PATH:
body['sourceUploadUrl'] = ''
default_args['zip_path'] = GCF_ZIP_PATH
elif GCF_SOURCE_UPLOAD_URL:
body['sourceUploadUrl'] = GCF_SOURCE_UPLOAD_URL
else:
raise Exception("Please provide one of the source_code parameters")
# [END howto_operator_gcf_deploy_variants]
with models.DAG(
'example_gcp_function',
default_args=default_args,
schedule_interval=None, # Override to match your needs
tags=['example'],
) as dag:
# [START howto_operator_gcf_deploy]
deploy_task = CloudFunctionDeployFunctionOperator(
task_id="gcf_deploy_task",
project_id=GCP_PROJECT_ID,
location=GCP_LOCATION,
body=body,
validate_body=GCP_VALIDATE_BODY
)
# [END howto_operator_gcf_deploy]
# [START howto_operator_gcf_deploy_no_project_id]
deploy2_task = CloudFunctionDeployFunctionOperator(
task_id="gcf_deploy2_task",
location=GCP_LOCATION,
body=body,
validate_body=GCP_VALIDATE_BODY
)
# [END howto_operator_gcf_deploy_no_project_id]
# [START howto_operator_gcf_invoke_function]
invoke_task = CloudFunctionInvokeFunctionOperator(
task_id="invoke_task",
project_id=GCP_PROJECT_ID,
location=GCP_LOCATION,
input_data={},
function_id=GCF_SHORT_FUNCTION_NAME
)
# [END howto_operator_gcf_invoke_function]
# [START howto_operator_gcf_delete]
delete_task = CloudFunctionDeleteFunctionOperator(
task_id="gcf_delete_task",
name=FUNCTION_NAME
)
# [END howto_operator_gcf_delete]
deploy_task >> deploy2_task >> invoke_task >> delete_task
|
wileeam/airflow
|
airflow/providers/google/cloud/example_dags/example_functions.py
|
Python
|
apache-2.0
| 5,490
|
from django.core.management.base import BaseCommand, CommandError
from apps.models.models import Initiative
# TODO: tests!!!
"""
A manage.py command to migrate cities from a CSV file
"""
class Command(BaseCommand):
help = "Delete all Initiative models from database."
"""
Deletes all Initiative objects
"""
def handle(self, *args, **options):
Initiative.objects.all().delete()
|
Ale-/civics
|
apps/models/management/commands/delete_initiatives.py
|
Python
|
gpl-3.0
| 409
|
# This file is part of the REMOTE API
#
# Copyright 2006-2015 Coppelia Robotics GmbH. All rights reserved.
# marc@coppeliarobotics.com
# www.coppeliarobotics.com
#
# The REMOTE API is licensed under the terms of GNU GPL:
#
# -------------------------------------------------------------------
# The REMOTE API is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# THE REMOTE API IS DISTRIBUTED "AS IS", WITHOUT ANY EXPRESS OR IMPLIED
# WARRANTY. THE USER WILL USE IT AT HIS/HER OWN RISK. THE ORIGINAL
# AUTHORS AND COPPELIA ROBOTICS GMBH WILL NOT BE LIABLE FOR DATA LOSS,
# DAMAGES, LOSS OF PROFITS OR ANY OTHER KIND OF LOSS WHILE USING OR
# MISUSING THIS SOFTWARE.
#
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with the REMOTE API. If not, see <http://www.gnu.org/licenses/>.
# -------------------------------------------------------------------
#
# This file was automatically created for V-REP release V3.2.3 on November 24th 2015
#constants
#Scene object types. Values are serialized
sim_object_shape_type =0
sim_object_joint_type =1
sim_object_graph_type =2
sim_object_camera_type =3
sim_object_dummy_type =4
sim_object_proximitysensor_type =5
sim_object_reserved1 =6
sim_object_reserved2 =7
sim_object_path_type =8
sim_object_visionsensor_type =9
sim_object_volume_type =10
sim_object_mill_type =11
sim_object_forcesensor_type =12
sim_object_light_type =13
sim_object_mirror_type =14
#General object types. Values are serialized
sim_appobj_object_type =109
sim_appobj_collision_type =110
sim_appobj_distance_type =111
sim_appobj_simulation_type =112
sim_appobj_ik_type =113
sim_appobj_constraintsolver_type=114
sim_appobj_collection_type =115
sim_appobj_ui_type =116
sim_appobj_script_type =117
sim_appobj_pathplanning_type =118
sim_appobj_RESERVED_type =119
sim_appobj_texture_type =120
# Ik calculation methods. Values are serialized
sim_ik_pseudo_inverse_method =0
sim_ik_damped_least_squares_method =1
sim_ik_jacobian_transpose_method =2
# Ik constraints. Values are serialized
sim_ik_x_constraint =1
sim_ik_y_constraint =2
sim_ik_z_constraint =4
sim_ik_alpha_beta_constraint=8
sim_ik_gamma_constraint =16
sim_ik_avoidance_constraint =64
# Ik calculation results
sim_ikresult_not_performed =0
sim_ikresult_success =1
sim_ikresult_fail =2
# Scene object sub-types. Values are serialized
# Light sub-types
sim_light_omnidirectional_subtype =1
sim_light_spot_subtype =2
sim_light_directional_subtype =3
# Joint sub-types
sim_joint_revolute_subtype =10
sim_joint_prismatic_subtype =11
sim_joint_spherical_subtype =12
# Shape sub-types
sim_shape_simpleshape_subtype =20
sim_shape_multishape_subtype =21
# Proximity sensor sub-types
sim_proximitysensor_pyramid_subtype =30
sim_proximitysensor_cylinder_subtype=31
sim_proximitysensor_disc_subtype =32
sim_proximitysensor_cone_subtype =33
sim_proximitysensor_ray_subtype =34
# Mill sub-types
sim_mill_pyramid_subtype =40
sim_mill_cylinder_subtype =41
sim_mill_disc_subtype =42
sim_mill_cone_subtype =42
# No sub-type
sim_object_no_subtype =200
#Scene object main properties (serialized)
sim_objectspecialproperty_collidable =0x0001
sim_objectspecialproperty_measurable =0x0002
#reserved =0x0004
#reserved =0x0008
sim_objectspecialproperty_detectable_ultrasonic =0x0010
sim_objectspecialproperty_detectable_infrared =0x0020
sim_objectspecialproperty_detectable_laser =0x0040
sim_objectspecialproperty_detectable_inductive =0x0080
sim_objectspecialproperty_detectable_capacitive =0x0100
sim_objectspecialproperty_renderable =0x0200
sim_objectspecialproperty_detectable_all =sim_objectspecialproperty_detectable_ultrasonic|sim_objectspecialproperty_detectable_infrared|sim_objectspecialproperty_detectable_laser|sim_objectspecialproperty_detectable_inductive|sim_objectspecialproperty_detectable_capacitive
sim_objectspecialproperty_cuttable =0x0400
sim_objectspecialproperty_pathplanning_ignored =0x0800
# Model properties (serialized)
sim_modelproperty_not_collidable =0x0001
sim_modelproperty_not_measurable =0x0002
sim_modelproperty_not_renderable =0x0004
sim_modelproperty_not_detectable =0x0008
sim_modelproperty_not_cuttable =0x0010
sim_modelproperty_not_dynamic =0x0020
sim_modelproperty_not_respondable =0x0040 # cannot be selected if sim_modelproperty_not_dynamic is not selected
sim_modelproperty_not_reset =0x0080 # Model is not reset at simulation end. This flag is cleared at simulation end
sim_modelproperty_not_visible =0x0100 # Whole model is invisible independent of local visibility settings
sim_modelproperty_not_model =0xf000 # object is not a model
# Check the documentation instead of comments below!!
# Following messages are dispatched to the Lua-message container
sim_message_ui_button_state_change =0 # a UI button slider etc. changed (due to a user's action). aux[0]=UI handle aux[1]=button handle aux[2]=button attributes aux[3]=slider position (if slider)
sim_message_reserved9 =1 # Do not use
sim_message_object_selection_changed=2
sim_message_reserved10 =3 # do not use
sim_message_model_loaded =4
sim_message_reserved11 =5 # do not use
sim_message_keypress =6 # a key was pressed while the focus was on a page (aux[0]=key aux[1]=ctrl and shift key state)
sim_message_bannerclicked =7 # a banner was clicked (aux[0]=banner ID)
# Following messages are dispatched only to the C-API (not available from Lua)
sim_message_for_c_api_only_start =0x100 # Do not use
sim_message_reserved1 =0x101 # Do not use
sim_message_reserved2 =0x102 # Do not use
sim_message_reserved3 =0x103 # Do not use
sim_message_eventcallback_scenesave =0x104 # about to save a scene
sim_message_eventcallback_modelsave =0x105 # about to save a model (current selection will be saved)
sim_message_eventcallback_moduleopen =0x106 # called when simOpenModule in Lua is called
sim_message_eventcallback_modulehandle =0x107 # called when simHandleModule in Lua is called with argument false
sim_message_eventcallback_moduleclose =0x108 # called when simCloseModule in Lua is called
sim_message_reserved4 =0x109 # Do not use
sim_message_reserved5 =0x10a # Do not use
sim_message_reserved6 =0x10b # Do not use
sim_message_reserved7 =0x10c # Do not use
sim_message_eventcallback_instancepass =0x10d # Called once every main application loop pass. auxiliaryData[0] contains event flags of events that happened since last time
sim_message_eventcallback_broadcast =0x10e
sim_message_eventcallback_imagefilter_enumreset =0x10f
sim_message_eventcallback_imagefilter_enumerate =0x110
sim_message_eventcallback_imagefilter_adjustparams =0x111
sim_message_eventcallback_imagefilter_reserved =0x112
sim_message_eventcallback_imagefilter_process =0x113
sim_message_eventcallback_reserved1 =0x114 # do not use
sim_message_eventcallback_reserved2 =0x115 # do not use
sim_message_eventcallback_reserved3 =0x116 # do not use
sim_message_eventcallback_reserved4 =0x117 # do not use
sim_message_eventcallback_abouttoundo =0x118 # the undo button was hit and a previous state is about to be restored
sim_message_eventcallback_undoperformed =0x119 # the undo button was hit and a previous state restored
sim_message_eventcallback_abouttoredo =0x11a # the redo button was hit and a future state is about to be restored
sim_message_eventcallback_redoperformed =0x11b # the redo button was hit and a future state restored
sim_message_eventcallback_scripticondblclick =0x11c # scipt icon was double clicked. (aux[0]=object handle associated with script set replyData[0] to 1 if script should not be opened)
sim_message_eventcallback_simulationabouttostart =0x11d
sim_message_eventcallback_simulationended =0x11e
sim_message_eventcallback_reserved5 =0x11f # do not use
sim_message_eventcallback_keypress =0x120 # a key was pressed while the focus was on a page (aux[0]=key aux[1]=ctrl and shift key state)
sim_message_eventcallback_modulehandleinsensingpart =0x121 # called when simHandleModule in Lua is called with argument true
sim_message_eventcallback_renderingpass =0x122 # called just before the scene is rendered
sim_message_eventcallback_bannerclicked =0x123 # called when a banner was clicked (aux[0]=banner ID)
sim_message_eventcallback_menuitemselected =0x124 # auxiliaryData[0] indicates the handle of the item auxiliaryData[1] indicates the state of the item
sim_message_eventcallback_refreshdialogs =0x125 # aux[0]=refresh degree (0=light 1=medium 2=full)
sim_message_eventcallback_sceneloaded =0x126
sim_message_eventcallback_modelloaded =0x127
sim_message_eventcallback_instanceswitch =0x128
sim_message_eventcallback_guipass =0x129
sim_message_eventcallback_mainscriptabouttobecalled =0x12a
sim_message_eventcallback_rmlposition =0x12b #the command simRMLPosition was called. The appropriate plugin should handle the call
sim_message_eventcallback_rmlvelocity =0x12c # the command simRMLVelocity was called. The appropriate plugin should handle the call
sim_message_simulation_start_resume_request =0x1000
sim_message_simulation_pause_request =0x1001
sim_message_simulation_stop_request =0x1002
# Scene object properties. Combine with the | operator
sim_objectproperty_reserved1 =0x0000
sim_objectproperty_reserved2 =0x0001
sim_objectproperty_reserved3 =0x0002
sim_objectproperty_reserved4 =0x0003
sim_objectproperty_reserved5 =0x0004 # formely sim_objectproperty_visible
sim_objectproperty_reserved6 =0x0008 # formely sim_objectproperty_wireframe
sim_objectproperty_collapsed =0x0010
sim_objectproperty_selectable =0x0020
sim_objectproperty_reserved7 =0x0040
sim_objectproperty_selectmodelbaseinstead =0x0080
sim_objectproperty_dontshowasinsidemodel =0x0100
# reserved =0x0200
sim_objectproperty_canupdatedna =0x0400
sim_objectproperty_selectinvisible =0x0800
sim_objectproperty_depthinvisible =0x1000
# type of arguments (input and output) for custom lua commands
sim_lua_arg_nil =0
sim_lua_arg_bool =1
sim_lua_arg_int =2
sim_lua_arg_float =3
sim_lua_arg_string =4
sim_lua_arg_invalid =5
sim_lua_arg_table =8
# custom user interface properties. Values are serialized.
sim_ui_property_visible =0x0001
sim_ui_property_visibleduringsimulationonly =0x0002
sim_ui_property_moveable =0x0004
sim_ui_property_relativetoleftborder =0x0008
sim_ui_property_relativetotopborder =0x0010
sim_ui_property_fixedwidthfont =0x0020
sim_ui_property_systemblock =0x0040
sim_ui_property_settocenter =0x0080
sim_ui_property_rolledup =0x0100
sim_ui_property_selectassociatedobject =0x0200
sim_ui_property_visiblewhenobjectselected =0x0400
# button properties. Values are serialized.
sim_buttonproperty_button =0x0000
sim_buttonproperty_label =0x0001
sim_buttonproperty_slider =0x0002
sim_buttonproperty_editbox =0x0003
sim_buttonproperty_staydown =0x0008
sim_buttonproperty_enabled =0x0010
sim_buttonproperty_borderless =0x0020
sim_buttonproperty_horizontallycentered =0x0040
sim_buttonproperty_ignoremouse =0x0080
sim_buttonproperty_isdown =0x0100
sim_buttonproperty_transparent =0x0200
sim_buttonproperty_nobackgroundcolor =0x0400
sim_buttonproperty_rollupaction =0x0800
sim_buttonproperty_closeaction =0x1000
sim_buttonproperty_verticallycentered =0x2000
sim_buttonproperty_downupevent =0x4000
# Simulation status
sim_simulation_stopped =0x00 # Simulation is stopped
sim_simulation_paused =0x08 # Simulation is paused
sim_simulation_advancing =0x10 # Simulation is advancing
sim_simulation_advancing_firstafterstop =sim_simulation_advancing|0x00 # First simulation pass (1x)
sim_simulation_advancing_running =sim_simulation_advancing|0x01 # Normal simulation pass (>=1x)
# reserved =sim_simulation_advancing|0x02
sim_simulation_advancing_lastbeforepause =sim_simulation_advancing|0x03 # Last simulation pass before pause (1x)
sim_simulation_advancing_firstafterpause =sim_simulation_advancing|0x04 # First simulation pass after pause (1x)
sim_simulation_advancing_abouttostop =sim_simulation_advancing|0x05 # "Trying to stop" simulation pass (>=1x)
sim_simulation_advancing_lastbeforestop =sim_simulation_advancing|0x06 # Last simulation pass (1x)
# Script execution result (first return value)
sim_script_no_error =0
sim_script_main_script_nonexistent =1
sim_script_main_script_not_called =2
sim_script_reentrance_error =4
sim_script_lua_error =8
sim_script_call_error =16
# Script types (serialized!)
sim_scripttype_mainscript =0
sim_scripttype_childscript =1
sim_scripttype_pluginscript =2
sim_scripttype_threaded =0x00f0 # Combine with one of above's type values
# API call error messages
sim_api_errormessage_ignore =0 # does not memorize nor output errors
sim_api_errormessage_report =1 # memorizes errors (default for C-API calls)
sim_api_errormessage_output =2 # memorizes and outputs errors (default for Lua-API calls)
# special argument of some functions
sim_handle_all =-2
sim_handle_all_except_explicit =-3
sim_handle_self =-4
sim_handle_main_script =-5
sim_handle_tree =-6
sim_handle_chain =-7
sim_handle_single =-8
sim_handle_default =-9
sim_handle_all_except_self =-10
sim_handle_parent =-11
# special handle flags
sim_handleflag_assembly =0x400000
sim_handleflag_model =0x800000
# distance calculation methods (serialized)
sim_distcalcmethod_dl =0
sim_distcalcmethod_dac =1
sim_distcalcmethod_max_dl_dac =2
sim_distcalcmethod_dl_and_dac =3
sim_distcalcmethod_sqrt_dl2_and_dac2=4
sim_distcalcmethod_dl_if_nonzero =5
sim_distcalcmethod_dac_if_nonzero =6
# Generic dialog styles
sim_dlgstyle_message =0
sim_dlgstyle_input =1
sim_dlgstyle_ok =2
sim_dlgstyle_ok_cancel =3
sim_dlgstyle_yes_no =4
sim_dlgstyle_dont_center =32# can be combined with one of above values. Only with this flag can the position of the related UI be set just after dialog creation
# Generic dialog return values
sim_dlgret_still_open =0
sim_dlgret_ok =1
sim_dlgret_cancel =2
sim_dlgret_yes =3
sim_dlgret_no =4
# Path properties
sim_pathproperty_show_line =0x0001
sim_pathproperty_show_orientation =0x0002
sim_pathproperty_closed_path =0x0004
sim_pathproperty_automatic_orientation =0x0008
sim_pathproperty_invert_velocity =0x0010
sim_pathproperty_infinite_acceleration =0x0020
sim_pathproperty_flat_path =0x0040
sim_pathproperty_show_position =0x0080
sim_pathproperty_auto_velocity_profile_translation =0x0100
sim_pathproperty_auto_velocity_profile_rotation =0x0200
sim_pathproperty_endpoints_at_zero =0x0400
sim_pathproperty_keep_x_up =0x0800
# drawing objects
# following are mutually exclusive
sim_drawing_points =0 # 3 values per point (point size in pixels)
sim_drawing_lines =1 # 6 values per line (line size in pixels)
sim_drawing_triangles =2 # 9 values per triangle
sim_drawing_trianglepoints =3 # 6 values per point (3 for triangle position 3 for triangle normal vector) (triangle size in meters)
sim_drawing_quadpoints =4 # 6 values per point (3 for quad position 3 for quad normal vector) (quad size in meters)
sim_drawing_discpoints =5 # 6 values per point (3 for disc position 3 for disc normal vector) (disc size in meters)
sim_drawing_cubepoints =6 # 6 values per point (3 for cube position 3 for cube normal vector) (cube size in meters)
sim_drawing_spherepoints =7 # 3 values per point (sphere size in meters)
# following can be or-combined
sim_drawing_itemcolors =0x00020 # +3 values per item (each item has its own ambient color (rgb values)).
# Mutually exclusive with sim_drawing_vertexcolors
sim_drawing_vertexcolors =0x00040 # +3 values per vertex (each vertex has its own ambient color (rgb values). Only for sim_drawing_lines (+6) and for sim_drawing_triangles(+9)). Mutually exclusive with sim_drawing_itemcolors
sim_drawing_itemsizes =0x00080 # +1 value per item (each item has its own size). Not for sim_drawing_triangles
sim_drawing_backfaceculling =0x00100 # back faces are not displayed for all items
sim_drawing_wireframe =0x00200 # all items displayed in wireframe
sim_drawing_painttag =0x00400 # all items are tagged as paint (for additinal processing at a later stage)
sim_drawing_followparentvisibility =0x00800 # if the object is associated with a scene object then it follows that visibility otherwise it is always visible
sim_drawing_cyclic =0x01000 # if the max item count was reached then the first items are overwritten.
sim_drawing_50percenttransparency =0x02000 # the drawing object will be 50% transparent
sim_drawing_25percenttransparency =0x04000 # the drawing object will be 25% transparent
sim_drawing_12percenttransparency =0x08000 # the drawing object will be 12.5% transparent
sim_drawing_emissioncolor =0x10000 # When used in combination with sim_drawing_itemcolors or sim_drawing_vertexcolors then the specified colors will be for the emissive component
sim_drawing_facingcamera =0x20000 # Only for trianglepoints quadpoints discpoints and cubepoints. If specified the normal verctor is calculated to face the camera (each item data requires 3 values less)
sim_drawing_overlay =0x40000 # When specified objects are always drawn on top of "regular objects"
sim_drawing_itemtransparency =0x80000 # +1 value per item (each item has its own transparency value (0-1)). Not compatible with sim_drawing_vertexcolors
# banner values
# following can be or-combined
sim_banner_left =0x00001 # Banners display on the left of the specified point
sim_banner_right =0x00002 # Banners display on the right of the specified point
sim_banner_nobackground =0x00004 # Banners have no background rectangle
sim_banner_overlay =0x00008 # When specified banners are always drawn on top of "regular objects"
sim_banner_followparentvisibility =0x00010 # if the object is associated with a scene object then it follows that visibility otherwise it is always visible
sim_banner_clickselectsparent =0x00020 # if the object is associated with a scene object then clicking the banner will select the scene object
sim_banner_clicktriggersevent =0x00040 # if the banner is clicked an event is triggered (sim_message_eventcallback_bannerclicked and sim_message_bannerclicked are generated)
sim_banner_facingcamera =0x00080 # If specified the banner will always face the camera by rotating around the banner's vertical axis (y-axis)
sim_banner_fullyfacingcamera =0x00100 # If specified the banner will always fully face the camera (the banner's orientation is same as the camera looking at it)
sim_banner_backfaceculling =0x00200 # If specified the banner will only be visible from one side
sim_banner_keepsamesize =0x00400 # If specified the banner will always appear in the same size. In that case size represents the character height in pixels
sim_banner_bitmapfont =0x00800 # If specified a fixed-size bitmap font is used. The text will also always fully face the camera and be right
# to the specified position. Bitmap fonts are not clickable
# particle objects following are mutually exclusive
sim_particle_points1 =0 # 6 values per point (pt1 and pt2. Pt1 is start position pt2-pt1 is the initial velocity vector). i
#Point is 1 pixel big. Only appearance is a point internally handled as a perfect sphere
sim_particle_points2 =1 # 6 values per point. Point is 2 pixel big. Only appearance is a point internally handled as a perfect sphere
sim_particle_points4 =2 # 6 values per point. Point is 4 pixel big. Only appearance is a point internally handled as a perfect sphere
sim_particle_roughspheres =3 # 6 values per sphere. Only appearance is rough. Internally a perfect sphere
sim_particle_spheres =4 # 6 values per sphere. Internally a perfect sphere
# following can be or-combined
sim_particle_respondable1to4 =0x0020 # the particles are respondable against shapes (against all objects that have at least one bit 1-4 activated in the global respondable mask)
sim_particle_respondable5to8 =0x0040 # the particles are respondable against shapes (against all objects that have at least one bit 5-8 activated in the global respondable mask)
sim_particle_particlerespondable =0x0080 # the particles are respondable against each other
sim_particle_ignoresgravity =0x0100 # the particles ignore the effect of gravity. Not compatible with sim_particle_water
sim_particle_invisible =0x0200 # the particles are invisible
sim_particle_itemsizes =0x0400 # +1 value per particle (each particle can have a different size)
sim_particle_itemdensities =0x0800 # +1 value per particle (each particle can have a different density)
sim_particle_itemcolors =0x1000 # +3 values per particle (each particle can have a different color)
sim_particle_cyclic =0x2000 # if the max item count was reached then the first items are overwritten.
sim_particle_emissioncolor =0x4000 # When used in combination with sim_particle_itemcolors then the specified colors will be for the emissive component
sim_particle_water =0x8000 # the particles are water particles (no weight in the water (i.e. when z<0)). Not compatible with sim_particle_ignoresgravity
sim_particle_painttag =0x10000 # The particles can be seen by vision sensors (sim_particle_invisible must not be set)
# custom user interface menu attributes
sim_ui_menu_title =1
sim_ui_menu_minimize =2
sim_ui_menu_close =4
sim_ui_menu_systemblock =8
# Boolean parameters
sim_boolparam_hierarchy_visible =0
sim_boolparam_console_visible =1
sim_boolparam_collision_handling_enabled =2
sim_boolparam_distance_handling_enabled =3
sim_boolparam_ik_handling_enabled =4
sim_boolparam_gcs_handling_enabled =5
sim_boolparam_dynamics_handling_enabled =6
sim_boolparam_joint_motion_handling_enabled =7
sim_boolparam_path_motion_handling_enabled =8
sim_boolparam_proximity_sensor_handling_enabled =9
sim_boolparam_vision_sensor_handling_enabled =10
sim_boolparam_mill_handling_enabled =11
sim_boolparam_browser_visible =12
sim_boolparam_scene_and_model_load_messages =13
sim_reserved0 =14
sim_boolparam_shape_textures_are_visible =15
sim_boolparam_display_enabled =16
sim_boolparam_infotext_visible =17
sim_boolparam_statustext_open =18
sim_boolparam_fog_enabled =19
sim_boolparam_rml2_available =20
sim_boolparam_rml4_available =21
sim_boolparam_mirrors_enabled =22
sim_boolparam_aux_clip_planes_enabled =23
sim_boolparam_full_model_copy_from_api =24
sim_boolparam_realtime_simulation =25
sim_boolparam_force_show_wireless_emission =27
sim_boolparam_force_show_wireless_reception =28
sim_boolparam_video_recording_triggered =29
sim_boolparam_threaded_rendering_enabled =32
sim_boolparam_fullscreen =33
sim_boolparam_headless =34
sim_boolparam_hierarchy_toolbarbutton_enabled =35
sim_boolparam_browser_toolbarbutton_enabled =36
sim_boolparam_objectshift_toolbarbutton_enabled =37
sim_boolparam_objectrotate_toolbarbutton_enabled=38
sim_boolparam_force_calcstruct_all_visible =39
sim_boolparam_force_calcstruct_all =40
sim_boolparam_exit_request =41
sim_boolparam_play_toolbarbutton_enabled =42
sim_boolparam_pause_toolbarbutton_enabled =43
sim_boolparam_stop_toolbarbutton_enabled =44
sim_boolparam_waiting_for_trigger =45
# Integer parameters
sim_intparam_error_report_mode =0 # Check sim_api_errormessage_... constants above for valid values
sim_intparam_program_version =1 # e.g Version 2.1.4 --> 20104. Can only be read
sim_intparam_instance_count =2 # do not use anymore (always returns 1 since V-REP 2.5.11)
sim_intparam_custom_cmd_start_id =3 # can only be read
sim_intparam_compilation_version =4 # 0=evaluation version 1=full version 2=player version. Can only be read
sim_intparam_current_page =5
sim_intparam_flymode_camera_handle =6 # can only be read
sim_intparam_dynamic_step_divider =7 # can only be read
sim_intparam_dynamic_engine =8 # 0=Bullet 1=ODE. 2=Vortex.
sim_intparam_server_port_start =9 # can only be read
sim_intparam_server_port_range =10 # can only be read
sim_intparam_visible_layers =11
sim_intparam_infotext_style =12
sim_intparam_settings =13
sim_intparam_edit_mode_type =14 # can only be read
sim_intparam_server_port_next =15 # is initialized at sim_intparam_server_port_start
sim_intparam_qt_version =16 # version of the used Qt framework
sim_intparam_event_flags_read =17 # can only be read
sim_intparam_event_flags_read_clear =18 # can only be read
sim_intparam_platform =19 # can only be read
sim_intparam_scene_unique_id =20 # can only be read
sim_intparam_work_thread_count =21
sim_intparam_mouse_x =22
sim_intparam_mouse_y =23
sim_intparam_core_count =24
sim_intparam_work_thread_calc_time_ms =25
sim_intparam_idle_fps =26
sim_intparam_prox_sensor_select_down =27
sim_intparam_prox_sensor_select_up =28
sim_intparam_stop_request_counter =29
sim_intparam_program_revision =30
sim_intparam_mouse_buttons =31
sim_intparam_dynamic_warning_disabled_mask =32
sim_intparam_simulation_warning_disabled_mask =33
sim_intparam_scene_index =34
sim_intparam_motionplanning_seed =35
sim_intparam_speedmodifier =36
# Float parameters
sim_floatparam_rand=0 # random value (0.0-1.0)
sim_floatparam_simulation_time_step =1
sim_floatparam_stereo_distance =2
# String parameters
sim_stringparam_application_path=0 # path of V-REP's executable
sim_stringparam_video_filename=1
sim_stringparam_app_arg1 =2
sim_stringparam_app_arg2 =3
sim_stringparam_app_arg3 =4
sim_stringparam_app_arg4 =5
sim_stringparam_app_arg5 =6
sim_stringparam_app_arg6 =7
sim_stringparam_app_arg7 =8
sim_stringparam_app_arg8 =9
sim_stringparam_app_arg9 =10
sim_stringparam_scene_path_and_name =13
# Array parameters
sim_arrayparam_gravity =0
sim_arrayparam_fog =1
sim_arrayparam_fog_color =2
sim_arrayparam_background_color1=3
sim_arrayparam_background_color2=4
sim_arrayparam_ambient_light =5
sim_arrayparam_random_euler =6
# User interface elements
sim_gui_menubar =0x0001
sim_gui_popups =0x0002
sim_gui_toolbar1 =0x0004
sim_gui_toolbar2 =0x0008
sim_gui_hierarchy =0x0010
sim_gui_infobar =0x0020
sim_gui_statusbar =0x0040
sim_gui_scripteditor =0x0080
sim_gui_scriptsimulationparameters =0x0100
sim_gui_dialogs =0x0200
sim_gui_browser =0x0400
sim_gui_all =0xffff
# Joint modes
sim_jointmode_passive =0
sim_jointmode_motion =1
sim_jointmode_ik =2
sim_jointmode_ikdependent =3
sim_jointmode_dependent =4
sim_jointmode_force =5
# Navigation and selection modes with the mouse. Lower byte values are mutually exclusive upper byte bits can be combined
sim_navigation_passive =0x0000
sim_navigation_camerashift =0x0001
sim_navigation_camerarotate =0x0002
sim_navigation_camerazoom =0x0003
sim_navigation_cameratilt =0x0004
sim_navigation_cameraangle =0x0005
sim_navigation_camerafly =0x0006
sim_navigation_objectshift =0x0007
sim_navigation_objectrotate =0x0008
sim_navigation_reserved2 =0x0009
sim_navigation_reserved3 =0x000A
sim_navigation_jointpathtest =0x000B
sim_navigation_ikmanip =0x000C
sim_navigation_objectmultipleselection =0x000D
# Bit-combine following values and add them to one of above's values for a valid navigation mode
sim_navigation_reserved4 =0x0100
sim_navigation_clickselection =0x0200
sim_navigation_ctrlselection =0x0400
sim_navigation_shiftselection =0x0800
sim_navigation_camerazoomwheel =0x1000
sim_navigation_camerarotaterightbutton =0x2000
#Remote API constants
SIMX_VERSION =0
# Remote API message header structure
SIMX_HEADER_SIZE =18
simx_headeroffset_crc =0 # 1 simxUShort. Generated by the client or server. The CRC for the message
simx_headeroffset_version =2 # 1 byte. Generated by the client or server. The version of the remote API software
simx_headeroffset_message_id =3 # 1 simxInt. Generated by the client (and used in a reply by the server)
simx_headeroffset_client_time =7 # 1 simxInt. Client time stamp generated by the client (and sent back by the server)
simx_headeroffset_server_time =11 # 1 simxInt. Generated by the server when a reply is generated. The server timestamp
simx_headeroffset_scene_id =15 # 1 simxUShort. Generated by the server. A unique ID identifying the scene currently displayed
simx_headeroffset_server_state =17 # 1 byte. Generated by the server. Bit coded 0 set --> simulation not stopped 1 set --> simulation paused 2 set --> real-time switch on 3-5 edit mode type (0=no edit mode 1=triangle 2=vertex 3=edge 4=path 5=UI)
# Remote API command header
SIMX_SUBHEADER_SIZE =26
simx_cmdheaderoffset_mem_size =0 # 1 simxInt. Generated by the client or server. The buffer size of the command.
simx_cmdheaderoffset_full_mem_size =4 # 1 simxInt. Generated by the client or server. The full buffer size of the command (applies to split chunks).
simx_cmdheaderoffset_pdata_offset0 =8 # 1 simxUShort. Generated by the client or server. The amount of data that is part of the command identification.
simx_cmdheaderoffset_pdata_offset1 =10 # 1 simxInt. Generated by the client or server. The amount of shift of the pure data buffer (applies to split chunks).
simx_cmdheaderoffset_cmd=14 # 1 simxInt. Generated by the client (and used in a reply by the server). The command combined with the operation mode of the command.
simx_cmdheaderoffset_delay_or_split =18 # 1 simxUShort. Generated by the client or server. The amount of delay in ms of a continuous command or the max. pure data size to send at once (applies to split commands).
simx_cmdheaderoffset_sim_time =20 # 1 simxInt. Generated by the server. The simulation time (in ms) when the command was executed (or 0 if simulation is not running)
simx_cmdheaderoffset_status =24 # 1 byte. Generated by the server. (1 bit 0 is set --> error in function execution on server side). The client writes bit 1 if command cannot be overwritten
simx_cmdheaderoffset_reserved =25 # 1 byte. Not yet used
# Regular operation modes
simx_opmode_oneshot =0x000000 # sends command as one chunk. Reply will also come as one chunk. Doesn't wait for the reply.
simx_opmode_oneshot_wait =0x010000 # sends command as one chunk. Reply will also come as one chunk. Waits for the reply (_REPLY_WAIT_TIMEOUT_IN_MS is the timeout).
simx_opmode_continuous =0x020000
simx_opmode_streaming =0x020000 # sends command as one chunk. Command will be stored on the server and always executed
#(every x ms (as far as possible) where x can be 0-65535. just add x to opmode_continuous).
# A reply will be sent continuously each time as one chunk. Doesn't wait for the reply.
# Operation modes for heavy data
simx_opmode_oneshot_split =0x030000 # sends command as several chunks (max chunk size is x bytes where x can be _MIN_SPLIT_AMOUNT_IN_BYTES-65535. Just add x to opmode_oneshot_split). Reply will also come as several chunks. Doesn't wait for the reply.
simx_opmode_continuous_split =0x040000
simx_opmode_streaming_split =0x040000 # sends command as several chunks (max chunk size is x bytes where x can be _MIN_SPLIT_AMOUNT_IN_BYTES-65535. Just add x to opmode_continuous_split). Command will be stored on the server and always executed. A reply will be sent continuously each time as several chunks. Doesn't wait for the reply.
# Special operation modes
simx_opmode_discontinue =0x050000 # removes and cancels all commands stored on the client or server side (also continuous commands)
simx_opmode_buffer =0x060000 # doesn't send anything but checks if a reply for the given command is available in the input buffer (i.e. previously received from the server)
simx_opmode_remove =0x070000 # doesn't send anything and doesn't return any specific value. It just erases a similar command reply in the inbox (to free some memory)
# Command return codes
simx_return_ok =0x000000
simx_return_novalue_flag =0x000001 # input buffer doesn't contain the specified command
simx_return_timeout_flag =0x000002 # command reply not received in time for opmode_oneshot_wait operation mode
simx_return_illegal_opmode_flag =0x000004 # command doesn't support the specified operation mode
simx_return_remote_error_flag =0x000008 # command caused an error on the server side
simx_return_split_progress_flag =0x000010 # previous similar command not yet fully processed (applies to opmode_oneshot_split operation modes)
simx_return_local_error_flag =0x000020 # command caused an error on the client side
simx_return_initialize_error_flag =0x000040 # simxStart was not yet called
# Following for backward compatibility (same as above)
simx_error_noerror =0x000000
simx_error_novalue_flag =0x000001 # input buffer doesn't contain the specified command
simx_error_timeout_flag =0x000002 # command reply not received in time for opmode_oneshot_wait operation mode
simx_error_illegal_opmode_flag =0x000004 # command doesn't support the specified operation mode
simx_error_remote_error_flag =0x000008 # command caused an error on the server side
simx_error_split_progress_flag =0x000010 # previous similar command not yet fully processed (applies to opmode_oneshot_split operation modes)
simx_error_local_error_flag =0x000020 # command caused an error on the client side
simx_error_initialize_error_flag =0x000040 # simxStart was not yet called
|
USC-ACTLab/pyCreate2
|
pyCreate2/vrep/vrepConst.py
|
Python
|
mit
| 36,458
|
# Miro - an RSS based video player application
# Copyright (C) 2005, 2006, 2007, 2008, 2009, 2010, 2011
# Participatory Culture Foundation
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
# In addition, as a special exception, the copyright holders give
# permission to link the code of portions of this program with the OpenSSL
# library.
#
# You must obey the GNU General Public License in all respects for all of
# the code used other than OpenSSL. If you modify file(s) with this
# exception, you may extend this exception to your version of the file(s),
# but you are not obligated to do so. If you do not wish to do so, delete
# this exception statement from your version. If you delete this exception
# statement from all source files in the program, then also delete it here.
"""Drag aNd Drop handlers for TabLists."""
import logging
from miro import app
from miro import messages
from miro.plat.frontends.widgets import widgetset
class TabListDragHandler(object):
item_type = NotImplemented
folder_type = NotImplemented
def allowed_actions(self):
return widgetset.DRAG_ACTION_MOVE
def allowed_types(self):
return (self.item_type, self.folder_type)
def begin_drag(self, _tableview, rows):
"""Returns {(tablist.type as a str): set of ids}"""
if rows[0][0].type == 'tab': # first is a tab if and only if all are
return {}
if any(row[0].is_folder for row in rows):
typ = self.folder_type
else:
typ = self.item_type
return { str(typ): set(row[0].id for row in rows) }
class TabDnDReorder(object):
"""Handles re-ordering tabs for doing drag and drop reordering."""
def __init__(self):
self.removed_rows = []
self.removed_children = {}
self.drop_row_iter = None
self.drop_id = None
def reorder(self, model, parent, position, dragged_ids):
self.drop_row_iter = None
if position >= 0 and parent:
try:
self.drop_row_iter = model.nth_child_iter(parent, position)
except LookupError:
# 16834 - invalid drop position, that's past the end.
pass
self.drop_id = self._calc_drop_id(model)
self._remove_dragged_rows(model, dragged_ids)
return self._put_rows_back(model, parent)
def _calc_drop_id(self, model):
if self.drop_row_iter is not None:
return model[self.drop_row_iter][0].id
def _remove_dragged_rows(self, model, dragged_ids):
"""Part of reorder, separated for clarity."""
# iterating through the entire table seems inefficient, but we have to
# know the order of dragged rows so we can insert them back in the
# right order.
iter_ = model.first_iter()
if not iter_:
app.widgetapp.handle_soft_failure('_remove_dragged_rows',
"tried to drag no rows?", with_exception=False)
return
iter_ = model.child_iter(iter_)
while iter_:
row = model[iter_]
if row[0].id in dragged_ids:
# need to make a copy of the row data, since we're removing it
# from the table
children = [tuple(r) for r in row.iterchildren()]
self.removed_children[row[0].id] = children
iter_ = self._remove_row(model, iter_, tuple(row))
else:
child_iter = model.child_iter(iter_)
while child_iter:
row = model[child_iter]
if row[0].id in dragged_ids:
child_iter = self._remove_row(model, child_iter,
tuple(row))
else:
child_iter = model.next_iter(child_iter)
iter_ = model.next_iter(iter_)
def _put_rows_back(self, model, parent):
"""Part of reorder, separated for clarity."""
retval = {}
for removed_row in self.removed_rows:
if self.drop_row_iter is None:
iter_ = model.append_child(parent, *removed_row)
else:
iter_ = model.insert_before(self.drop_row_iter, *removed_row)
retval[removed_row[0].id] = iter_
children = self.removed_children.get(removed_row[0].id, [])
for child_row in children:
child_iter = model.append_child(iter_, *child_row)
retval[child_row[0].id] = child_iter
return retval
def _remove_row(self, model, iter_, row):
"""Part of _remove_dragged_rows."""
self.removed_rows.append(row)
if row[0].id == self.drop_id:
self.drop_row_iter = model.next_iter(self.drop_row_iter)
self.drop_id = self._calc_drop_id(model)
return model.remove(iter_)
class MediaTypeDropHandler(object):
"""Drop Handler that changes the media type (audio/video/other) of items
that get dropped on it.
"""
def allowed_types(self):
return ('downloaded-item', 'device-video-item', 'device-audio-item')
def allowed_actions(self):
return widgetset.DRAG_ACTION_COPY
def validate_drop(self,
_table_view, model, typ, _source_actions, parent_iter, position):
if parent_iter is None or position != -1:
return widgetset.DRAG_ACTION_NONE
parent = model[parent_iter][0].id
if typ == 'downloaded-item' and parent in ('videos', 'music', 'others'):
return widgetset.DRAG_ACTION_COPY
elif typ == 'device-%s-item' % getattr(parent, 'media_type', None):
return widgetset.DRAG_ACTION_COPY
return widgetset.DRAG_ACTION_NONE
def accept_drop(self,
_table_view, model, typ, _source_actions, parent, position, videos):
media_type = model[parent][0].media_type
messages.SetItemMediaType(media_type, videos).send_to_backend()
class NestedTabListDropHandler(object):
item_types = NotImplemented
folder_types = NotImplemented
def __init__(self, tablist):
self.tablist = tablist
def allowed_actions(self):
return widgetset.DRAG_ACTION_COPY
def allowed_types(self):
return self.item_types + self.folder_types
def validate_drop(self,
_table_view, model, typ, _source_actions, parent, position):
if parent is None: # trying to drag above the root
return widgetset.DRAG_ACTION_NONE
if model[parent][0].is_folder:
if typ in self.folder_types:
return widgetset.DRAG_ACTION_NONE
elif position < 0: # trying to drag onto non-folder
return widgetset.DRAG_ACTION_NONE
if typ not in self.allowed_types():
return widgetset.DRAG_ACTION_NONE
return widgetset.DRAG_ACTION_MOVE
def accept_drop(self,
view, model, typ, _source_actions, parent, position, dragged_ids):
# NOTE: combine 'with' statements in python2.7+
with self.tablist.preserving_expanded_rows():
with self.tablist.adding():
with self.tablist.removing():
new_iters = TabDnDReorder().reorder(
view.model, parent, position, dragged_ids)
self.tablist.iter_map.update(new_iters)
view.unselect_all(signal=False)
for iter_ in new_iters.itervalues():
try:
view.select(iter_)
except ValueError:
parent = view.model.parent_iter(iter_)
view.set_row_expanded(parent, True)
view.select(iter_)
except LookupError:
logging.error('lookup error in accept_drop')
view.select(view.model.first_iter())
view.emit('selection-changed')
message = messages.TabsReordered()
parent = view.model[view.model.first_iter()]
for row in parent.iterchildren():
message.append(row[0], self.tablist.type)
for child in row.iterchildren():
message.append_child(row[0].id, child[0])
message.send_to_backend()
class FeedListDropHandler(NestedTabListDropHandler):
item_types = ('feed',)
folder_types = ('feed-with-folder',)
class FeedListDragHandler(TabListDragHandler):
item_type = u'feed'
folder_type = u'feed-with-folder'
class PlaylistListDropHandler(NestedTabListDropHandler):
item_types = ('playlist',)
folder_types = ('playlist-with-folder',)
def allowed_actions(self):
return (NestedTabListDropHandler.allowed_actions(self) |
widgetset.DRAG_ACTION_COPY)
def allowed_types(self):
return NestedTabListDropHandler.allowed_types(self) + ('downloaded-item',)
def validate_drop(self,
table_view, model, typ, source_actions, parent, position):
if typ == 'downloaded-item':
if position != -1:
return widgetset.DRAG_ACTION_NONE
if (not parent or model[parent][0].type == 'tab'
or model[parent][0].is_folder):
return widgetset.DRAG_ACTION_NONE
return widgetset.DRAG_ACTION_COPY
return NestedTabListDropHandler.validate_drop(self,
table_view, model, typ, source_actions, parent, position)
def accept_drop(self,
table_view, model, typ, source_actions, parent, position, ids):
if typ == 'downloaded-item':
playlist_id = model[parent][0].id
messages.AddVideosToPlaylist(playlist_id, ids).send_to_backend()
else:
NestedTabListDropHandler.accept_drop(self,
table_view, model, typ, source_actions, parent, position, ids)
class PlaylistListDragHandler(TabListDragHandler):
item_type = u'playlist'
folder_type = u'playlist-with-folder'
class DeviceDropHandler(object):
def __init__(self, tablist):
self.tablist = tablist
def allowed_actions(self):
return widgetset.DRAG_ACTION_COPY
def allowed_types(self):
return ('downloaded-item',)
def validate_drop(self,
_widget, model, typ, _source_actions, parent, position):
if typ not in self.allowed_types() or not parent:
return widgetset.DRAG_ACTION_NONE
device = model[parent][0]
if not isinstance(device, messages.DeviceInfo):
return widgetset.DRAG_ACTION_NONE
if device.mount:
if position != -1:
return (widgetset.DRAG_ACTION_COPY, parent)
elif not getattr(device, 'fake', False):
return widgetset.DRAG_ACTION_COPY
else:
return (widgetset.DRAG_ACTION_COPY,
model.parent_iter(parent))
return widgetset.DRAG_ACTION_NONE
def accept_drop(self,
_widget, model, _type, _source_actions, parent, _position, videos):
device = model[parent][0]
if getattr(device, 'fake', False):
device = model[model.parent_iter(parent)][0]
messages.DeviceSyncMedia(device, videos).send_to_backend()
|
debugger06/MiroX
|
tv/lib/frontends/widgets/tablistdnd.py
|
Python
|
gpl-2.0
| 11,806
|
# -*- coding: utf-8 -*-
"""Algorithms for spectral clustering"""
# Author: Gael Varoquaux gael.varoquaux@normalesup.org
# Brian Cheung
# Wei LI <kuantkid@gmail.com>
# License: BSD 3 clause
import warnings
import numpy as np
from ..base import BaseEstimator, ClusterMixin
from ..utils import check_random_state, as_float_array
from ..utils.validation import check_array
from ..metrics.pairwise import pairwise_kernels
from ..neighbors import kneighbors_graph
from ..manifold import spectral_embedding
from .k_means_ import k_means
def discretize(vectors, copy=True, max_svd_restarts=30, n_iter_max=20,
random_state=None):
"""Search for a partition matrix (clustering) which is closest to the
eigenvector embedding.
Parameters
----------
vectors : array-like, shape: (n_samples, n_clusters)
The embedding space of the samples.
copy : boolean, optional, default: True
Whether to copy vectors, or perform in-place normalization.
max_svd_restarts : int, optional, default: 30
Maximum number of attempts to restart SVD if convergence fails
n_iter_max : int, optional, default: 30
Maximum number of iterations to attempt in rotation and partition
matrix search if machine precision convergence is not reached
random_state : int, RandomState instance or None, optional, default: None
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
labels : array of integers, shape: n_samples
The labels of the clusters.
References
----------
- Multiclass spectral clustering, 2003
Stella X. Yu, Jianbo Shi
http://www1.icsi.berkeley.edu/~stellayu/publication/doc/2003kwayICCV.pdf
Notes
-----
The eigenvector embedding is used to iteratively search for the
closest discrete partition. First, the eigenvector embedding is
normalized to the space of partition matrices. An optimal discrete
partition matrix closest to this normalized embedding multiplied by
an initial rotation is calculated. Fixing this discrete partition
matrix, an optimal rotation matrix is calculated. These two
calculations are performed until convergence. The discrete partition
matrix is returned as the clustering solution. Used in spectral
clustering, this method tends to be faster and more robust to random
initialization than k-means.
"""
from scipy.sparse import csc_matrix
from scipy.linalg import LinAlgError
random_state = check_random_state(random_state)
vectors = as_float_array(vectors, copy=copy)
eps = np.finfo(float).eps
n_samples, n_components = vectors.shape
# Normalize the eigenvectors to an equal length of a vector of ones.
# Reorient the eigenvectors to point in the negative direction with respect
# to the first element. This may have to do with constraining the
# eigenvectors to lie in a specific quadrant to make the discretization
# search easier.
norm_ones = np.sqrt(n_samples)
for i in range(vectors.shape[1]):
vectors[:, i] = (vectors[:, i] / np.linalg.norm(vectors[:, i])) \
* norm_ones
if vectors[0, i] != 0:
vectors[:, i] = -1 * vectors[:, i] * np.sign(vectors[0, i])
# Normalize the rows of the eigenvectors. Samples should lie on the unit
# hypersphere centered at the origin. This transforms the samples in the
# embedding space to the space of partition matrices.
vectors = vectors / np.sqrt((vectors ** 2).sum(axis=1))[:, np.newaxis]
svd_restarts = 0
has_converged = False
# If there is an exception we try to randomize and rerun SVD again
# do this max_svd_restarts times.
while (svd_restarts < max_svd_restarts) and not has_converged:
# Initialize first column of rotation matrix with a row of the
# eigenvectors
rotation = np.zeros((n_components, n_components))
rotation[:, 0] = vectors[random_state.randint(n_samples), :].T
# To initialize the rest of the rotation matrix, find the rows
# of the eigenvectors that are as orthogonal to each other as
# possible
c = np.zeros(n_samples)
for j in range(1, n_components):
# Accumulate c to ensure row is as orthogonal as possible to
# previous picks as well as current one
c += np.abs(np.dot(vectors, rotation[:, j - 1]))
rotation[:, j] = vectors[c.argmin(), :].T
last_objective_value = 0.0
n_iter = 0
while not has_converged:
n_iter += 1
t_discrete = np.dot(vectors, rotation)
labels = t_discrete.argmax(axis=1)
vectors_discrete = csc_matrix(
(np.ones(len(labels)), (np.arange(0, n_samples), labels)),
shape=(n_samples, n_components))
t_svd = vectors_discrete.T * vectors
try:
U, S, Vh = np.linalg.svd(t_svd)
svd_restarts += 1
except LinAlgError:
print("SVD did not converge, randomizing and trying again")
break
ncut_value = 2.0 * (n_samples - S.sum())
if ((abs(ncut_value - last_objective_value) < eps) or
(n_iter > n_iter_max)):
has_converged = True
else:
# otherwise calculate rotation and continue
last_objective_value = ncut_value
rotation = np.dot(Vh.T, U.T)
if not has_converged:
raise LinAlgError('SVD did not converge')
return labels
def spectral_clustering(affinity, n_clusters=8, n_components=None,
eigen_solver=None, random_state=None, n_init=10,
eigen_tol=0.0, assign_labels='kmeans'):
"""Apply clustering to a projection to the normalized laplacian.
In practice Spectral Clustering is very useful when the structure of
the individual clusters is highly non-convex or more generally when
a measure of the center and spread of the cluster is not a suitable
description of the complete cluster. For instance when clusters are
nested circles on the 2D plan.
If affinity is the adjacency matrix of a graph, this method can be
used to find normalized graph cuts.
Read more in the :ref:`User Guide <spectral_clustering>`.
Parameters
-----------
affinity : array-like or sparse matrix, shape: (n_samples, n_samples)
The affinity matrix describing the relationship of the samples to
embed. **Must be symmetric**.
Possible examples:
- adjacency matrix of a graph,
- heat kernel of the pairwise distance matrix of the samples,
- symmetric k-nearest neighbours connectivity matrix of the samples.
n_clusters : integer, optional
Number of clusters to extract.
n_components : integer, optional, default is n_clusters
Number of eigen vectors to use for the spectral embedding
eigen_solver : {None, 'arpack', 'lobpcg', or 'amg'}
The eigenvalue decomposition strategy to use. AMG requires pyamg
to be installed. It can be faster on very large, sparse problems,
but may also lead to instabilities
random_state : int, RandomState instance or None, optional, default: None
A pseudo random number generator used for the initialization of the
lobpcg eigen vectors decomposition when eigen_solver == 'amg' and by
the K-Means initialization. If int, random_state is the seed used by
the random number generator; If RandomState instance, random_state is
the random number generator; If None, the random number generator is
the RandomState instance used by `np.random`.
n_init : int, optional, default: 10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
eigen_tol : float, optional, default: 0.0
Stopping criterion for eigendecomposition of the Laplacian matrix
when using arpack eigen_solver.
assign_labels : {'kmeans', 'discretize'}, default: 'kmeans'
The strategy to use to assign labels in the embedding
space. There are two ways to assign labels after the laplacian
embedding. k-means can be applied and is a popular choice. But it can
also be sensitive to initialization. Discretization is another
approach which is less sensitive to random initialization. See
the 'Multiclass spectral clustering' paper referenced below for
more details on the discretization approach.
Returns
-------
labels : array of integers, shape: n_samples
The labels of the clusters.
References
----------
- Normalized cuts and image segmentation, 2000
Jianbo Shi, Jitendra Malik
http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.160.2324
- A Tutorial on Spectral Clustering, 2007
Ulrike von Luxburg
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.165.9323
- Multiclass spectral clustering, 2003
Stella X. Yu, Jianbo Shi
http://www1.icsi.berkeley.edu/~stellayu/publication/doc/2003kwayICCV.pdf
Notes
------
The graph should contain only one connect component, elsewhere
the results make little sense.
This algorithm solves the normalized cut for k=2: it is a
normalized spectral clustering.
"""
if assign_labels not in ('kmeans', 'discretize'):
raise ValueError("The 'assign_labels' parameter should be "
"'kmeans' or 'discretize', but '%s' was given"
% assign_labels)
random_state = check_random_state(random_state)
n_components = n_clusters if n_components is None else n_components
maps = spectral_embedding(affinity, n_components=n_components,
eigen_solver=eigen_solver,
random_state=random_state,
eigen_tol=eigen_tol, drop_first=False)
if assign_labels == 'kmeans':
_, labels, _ = k_means(maps, n_clusters, random_state=random_state,
n_init=n_init)
else:
labels = discretize(maps, random_state=random_state)
return labels
class SpectralClustering(BaseEstimator, ClusterMixin):
"""Apply clustering to a projection to the normalized laplacian.
In practice Spectral Clustering is very useful when the structure of
the individual clusters is highly non-convex or more generally when
a measure of the center and spread of the cluster is not a suitable
description of the complete cluster. For instance when clusters are
nested circles on the 2D plan.
If affinity is the adjacency matrix of a graph, this method can be
used to find normalized graph cuts.
When calling ``fit``, an affinity matrix is constructed using either
kernel function such the Gaussian (aka RBF) kernel of the euclidean
distanced ``d(X, X)``::
np.exp(-gamma * d(X,X) ** 2)
or a k-nearest neighbors connectivity matrix.
Alternatively, using ``precomputed``, a user-provided affinity
matrix can be used.
Read more in the :ref:`User Guide <spectral_clustering>`.
Parameters
-----------
n_clusters : integer, optional
The dimension of the projection subspace.
eigen_solver : {None, 'arpack', 'lobpcg', or 'amg'}
The eigenvalue decomposition strategy to use. AMG requires pyamg
to be installed. It can be faster on very large, sparse problems,
but may also lead to instabilities
random_state : int, RandomState instance or None, optional, default: None
A pseudo random number generator used for the initialization of the
lobpcg eigen vectors decomposition when eigen_solver == 'amg' and by
the K-Means initialization. If int, random_state is the seed used by
the random number generator; If RandomState instance, random_state is
the random number generator; If None, the random number generator is
the RandomState instance used by `np.random`.
n_init : int, optional, default: 10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
gamma : float, default=1.0
Kernel coefficient for rbf, poly, sigmoid, laplacian and chi2 kernels.
Ignored for ``affinity='nearest_neighbors'``.
affinity : string, array-like or callable, default 'rbf'
If a string, this may be one of 'nearest_neighbors', 'precomputed',
'rbf' or one of the kernels supported by
`sklearn.metrics.pairwise_kernels`.
Only kernels that produce similarity scores (non-negative values that
increase with similarity) should be used. This property is not checked
by the clustering algorithm.
n_neighbors : integer
Number of neighbors to use when constructing the affinity matrix using
the nearest neighbors method. Ignored for ``affinity='rbf'``.
eigen_tol : float, optional, default: 0.0
Stopping criterion for eigendecomposition of the Laplacian matrix
when using arpack eigen_solver.
assign_labels : {'kmeans', 'discretize'}, default: 'kmeans'
The strategy to use to assign labels in the embedding
space. There are two ways to assign labels after the laplacian
embedding. k-means can be applied and is a popular choice. But it can
also be sensitive to initialization. Discretization is another approach
which is less sensitive to random initialization.
degree : float, default=3
Degree of the polynomial kernel. Ignored by other kernels.
coef0 : float, default=1
Zero coefficient for polynomial and sigmoid kernels.
Ignored by other kernels.
kernel_params : dictionary of string to any, optional
Parameters (keyword arguments) and values for kernel passed as
callable object. Ignored by other kernels.
n_jobs : int, optional (default = 1)
The number of parallel jobs to run.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Attributes
----------
affinity_matrix_ : array-like, shape (n_samples, n_samples)
Affinity matrix used for clustering. Available only if after calling
``fit``.
labels_ :
Labels of each point
Notes
-----
If you have an affinity matrix, such as a distance matrix,
for which 0 means identical elements, and high values means
very dissimilar elements, it can be transformed in a
similarity matrix that is well suited for the algorithm by
applying the Gaussian (RBF, heat) kernel::
np.exp(- dist_matrix ** 2 / (2. * delta ** 2))
Where ``delta`` is a free parameter representing the width of the Gaussian
kernel.
Another alternative is to take a symmetric version of the k
nearest neighbors connectivity matrix of the points.
If the pyamg package is installed, it is used: this greatly
speeds up computation.
References
----------
- Normalized cuts and image segmentation, 2000
Jianbo Shi, Jitendra Malik
http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.160.2324
- A Tutorial on Spectral Clustering, 2007
Ulrike von Luxburg
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.165.9323
- Multiclass spectral clustering, 2003
Stella X. Yu, Jianbo Shi
http://www1.icsi.berkeley.edu/~stellayu/publication/doc/2003kwayICCV.pdf
"""
def __init__(self, n_clusters=8, eigen_solver=None, random_state=None,
n_init=10, gamma=1., affinity='rbf', n_neighbors=10,
eigen_tol=0.0, assign_labels='kmeans', degree=3, coef0=1,
kernel_params=None, n_jobs=1):
self.n_clusters = n_clusters
self.eigen_solver = eigen_solver
self.random_state = random_state
self.n_init = n_init
self.gamma = gamma
self.affinity = affinity
self.n_neighbors = n_neighbors
self.eigen_tol = eigen_tol
self.assign_labels = assign_labels
self.degree = degree
self.coef0 = coef0
self.kernel_params = kernel_params
self.n_jobs = n_jobs
def fit(self, X, y=None):
"""Creates an affinity matrix for X using the selected affinity,
then applies spectral clustering to this affinity matrix.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
OR, if affinity==`precomputed`, a precomputed affinity
matrix of shape (n_samples, n_samples)
"""
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'],
dtype=np.float64)
if X.shape[0] == X.shape[1] and self.affinity != "precomputed":
warnings.warn("The spectral clustering API has changed. ``fit``"
"now constructs an affinity matrix from data. To use"
" a custom affinity matrix, "
"set ``affinity=precomputed``.")
if self.affinity == 'nearest_neighbors':
connectivity = kneighbors_graph(X, n_neighbors=self.n_neighbors, include_self=True,
n_jobs=self.n_jobs)
self.affinity_matrix_ = 0.5 * (connectivity + connectivity.T)
elif self.affinity == 'precomputed':
self.affinity_matrix_ = X
else:
params = self.kernel_params
if params is None:
params = {}
if not callable(self.affinity):
params['gamma'] = self.gamma
params['degree'] = self.degree
params['coef0'] = self.coef0
self.affinity_matrix_ = pairwise_kernels(X, metric=self.affinity,
filter_params=True,
**params)
random_state = check_random_state(self.random_state)
self.labels_ = spectral_clustering(self.affinity_matrix_,
n_clusters=self.n_clusters,
eigen_solver=self.eigen_solver,
random_state=random_state,
n_init=self.n_init,
eigen_tol=self.eigen_tol,
assign_labels=self.assign_labels)
return self
@property
def _pairwise(self):
return self.affinity == "precomputed"
|
nhejazi/scikit-learn
|
sklearn/cluster/spectral.py
|
Python
|
bsd-3-clause
| 19,173
|
"""Mocks used for testing."""
import httmock
# Modoboa API mocks
@httmock.urlmatch(
netloc=r"api\.modoboa\.org$", path=r"^/1/instances/search/", method="post")
def modo_api_instance_search(url, request):
"""Return empty response."""
return {"status_code": 404}
@httmock.urlmatch(
netloc=r"api\.modoboa\.org$", path=r"^/1/instances/", method="post")
def modo_api_instance_create(url, request):
"""Simulate successful creation."""
return {
"status_code": 201,
"content": {"pk": 100}
}
@httmock.urlmatch(
netloc=r"api\.modoboa\.org$", path=r"^/1/instances/.+/", method="put")
def modo_api_instance_update(url, request):
"""Simulate successful update."""
return {"status_code": 200}
@httmock.urlmatch(
netloc=r"api\.modoboa\.org$", path=r"^/1/versions/", method="get")
def modo_api_versions(url, request):
"""Simulate versions check."""
return {
"status_code": 200,
"content": [
{"name": "modoboa", "version": "9.0.0", "url": ""},
]
}
@httmock.urlmatch(
netloc=r"api\.modoboa\.org$", path=r"^/1/versions/", method="get")
def modo_api_versions_no_update(url, request):
"""Simulate versions check."""
return {
"status_code": 200,
"content": [
{"name": "modoboa", "version": "0.0.0", "url": ""},
]
}
|
modoboa/modoboa
|
modoboa/core/mocks.py
|
Python
|
isc
| 1,365
|
#!/usr/bin/env python
# pylint: disable=missing-docstring
import pkg_resources
import yaml
import six
from functest.utils import env
class Config():
def __init__(self):
try:
with open(pkg_resources.resource_filename(
'functest', 'ci/config_functest.yaml'),
encoding='utf-8') as yfile:
self.functest_yaml = yaml.safe_load(yfile)
except Exception as error:
raise Exception(
f'Parse config failed: {str(error)}') from error
@staticmethod
def _merge_dicts(dict1, dict2):
for k in set(dict1.keys()).union(dict2.keys()):
if k in dict1 and k in dict2:
if isinstance(dict1[k], dict) and isinstance(dict2[k], dict):
yield (k, dict(Config._merge_dicts(dict1[k], dict2[k])))
else:
yield (k, dict2[k])
elif k in dict1:
yield (k, dict1[k])
else:
yield (k, dict2[k])
def patch_file(self, patch_file_path):
with open(patch_file_path, encoding='utf-8') as yfile:
patch_file = yaml.safe_load(yfile)
for key in patch_file:
if key in env.get('DEPLOY_SCENARIO'):
self.functest_yaml = dict(Config._merge_dicts(
self.functest_yaml, patch_file[key]))
def _parse(self, attr_now, left_parametes):
for param_n, param_v in six.iteritems(left_parametes):
attr_further = self._get_attr_further(attr_now, param_n)
if attr_further:
setattr(self, attr_further, param_v)
if isinstance(param_v, dict):
self._parse(attr_further, param_v)
@staticmethod
def _get_attr_further(attr_now, next): # pylint: disable=redefined-builtin
return attr_now if next == 'general' else (
f'{attr_now}_{next}' if attr_now else next)
def fill(self):
try:
self._parse(None, self.functest_yaml)
except Exception as error:
raise Exception(
f'Parse config failed: {str(error)}') from error
CONF = Config()
CONF.patch_file(pkg_resources.resource_filename(
'functest', 'ci/config_patch.yaml'))
if env.get("POD_ARCH") in ['aarch64']:
CONF.patch_file(pkg_resources.resource_filename(
'functest', 'ci/config_aarch64_patch.yaml'))
CONF.fill()
|
opnfv/functest
|
functest/utils/config.py
|
Python
|
apache-2.0
| 2,430
|
"""distutils.file_util
Utility functions for operating on single files.
"""
__revision__ = "$Id$"
import os
from distutils.errors import DistutilsFileError
from distutils import log
# for generating verbose output in 'copy_file()'
_copy_action = {None: 'copying',
'hard': 'hard linking',
'sym': 'symbolically linking'}
def _copy_file_contents(src, dst, buffer_size=16*1024):
"""Copy the file 'src' to 'dst'.
Both must be filenames. Any error opening either file, reading from
'src', or writing to 'dst', raises DistutilsFileError. Data is
read/written in chunks of 'buffer_size' bytes (default 16k). No attempt
is made to handle anything apart from regular files.
"""
# Stolen from shutil module in the standard library, but with
# custom error-handling added.
fsrc = None
fdst = None
try:
try:
fsrc = open(src, 'rb')
except os.error, (errno, errstr):
raise DistutilsFileError("could not open '%s': %s" % (src, errstr))
if os.path.exists(dst):
try:
os.unlink(dst)
except os.error, (errno, errstr):
raise DistutilsFileError(
"could not delete '%s': %s" % (dst, errstr))
try:
fdst = open(dst, 'wb')
except os.error, (errno, errstr):
raise DistutilsFileError(
"could not create '%s': %s" % (dst, errstr))
while 1:
try:
buf = fsrc.read(buffer_size)
except os.error, (errno, errstr):
raise DistutilsFileError(
"could not read from '%s': %s" % (src, errstr))
if not buf:
break
try:
fdst.write(buf)
except os.error, (errno, errstr):
raise DistutilsFileError(
"could not write to '%s': %s" % (dst, errstr))
finally:
if fdst:
fdst.close()
if fsrc:
fsrc.close()
def copy_file(src, dst, preserve_mode=1, preserve_times=1, update=0,
link=None, verbose=1, dry_run=0):
"""Copy a file 'src' to 'dst'.
If 'dst' is a directory, then 'src' is copied there with the same name;
otherwise, it must be a filename. (If the file exists, it will be
ruthlessly clobbered.) If 'preserve_mode' is true (the default),
the file's mode (type and permission bits, or whatever is analogous on
the current platform) is copied. If 'preserve_times' is true (the
default), the last-modified and last-access times are copied as well.
If 'update' is true, 'src' will only be copied if 'dst' does not exist,
or if 'dst' does exist but is older than 'src'.
'link' allows you to make hard links (os.link) or symbolic links
(os.symlink) instead of copying: set it to "hard" or "sym"; if it is
None (the default), files are copied. Don't set 'link' on systems that
don't support it: 'copy_file()' doesn't check if hard or symbolic
linking is available. If hardlink fails, falls back to
_copy_file_contents().
Under Mac OS, uses the native file copy function in macostools; on
other systems, uses '_copy_file_contents()' to copy file contents.
Return a tuple (dest_name, copied): 'dest_name' is the actual name of
the output file, and 'copied' is true if the file was copied (or would
have been copied, if 'dry_run' true).
"""
# XXX if the destination file already exists, we clobber it if
# copying, but blow up if linking. Hmmm. And I don't know what
# macostools.copyfile() does. Should definitely be consistent, and
# should probably blow up if destination exists and we would be
# changing it (ie. it's not already a hard/soft link to src OR
# (not update) and (src newer than dst).
from distutils.dep_util import newer
from stat import ST_ATIME, ST_MTIME, ST_MODE, S_IMODE
if not os.path.isfile(src):
raise DistutilsFileError(
"can't copy '%s': doesn't exist or not a regular file" % src)
if os.path.isdir(dst):
dir = dst
dst = os.path.join(dst, os.path.basename(src))
else:
dir = os.path.dirname(dst)
if update and not newer(src, dst):
if verbose >= 1:
log.debug("not copying %s (output up-to-date)", src)
return dst, 0
try:
action = _copy_action[link]
except KeyError:
raise ValueError("invalid value '%s' for 'link' argument" % link)
if verbose >= 1:
if os.path.basename(dst) == os.path.basename(src):
log.info("%s %s -> %s", action, src, dir)
else:
log.info("%s %s -> %s", action, src, dst)
if dry_run:
return (dst, 1)
# If linking (hard or symbolic), use the appropriate system call
# (Unix only, of course, but that's the caller's responsibility)
if link == 'hard':
if not (os.path.exists(dst) and os.path.samefile(src, dst)):
try:
os.link(src, dst)
return (dst, 1)
except OSError:
# If hard linking fails, fall back on copying file
# (some special filesystems don't support hard linking
# even under Unix, see issue #8876).
pass
elif link == 'sym':
if not (os.path.exists(dst) and os.path.samefile(src, dst)):
os.symlink(src, dst)
return (dst, 1)
# Otherwise (non-Mac, not linking), copy the file contents and
# (optionally) copy the times and mode.
_copy_file_contents(src, dst)
if preserve_mode or preserve_times:
st = os.stat(src)
# According to David Ascher <da@ski.org>, utime() should be done
# before chmod() (at least under NT).
if preserve_times:
os.utime(dst, (st[ST_ATIME], st[ST_MTIME]))
if preserve_mode:
os.chmod(dst, S_IMODE(st[ST_MODE]))
return (dst, 1)
# XXX I suspect this is Unix-specific -- need porting help!
def move_file (src, dst, verbose=1, dry_run=0):
"""Move a file 'src' to 'dst'.
If 'dst' is a directory, the file will be moved into it with the same
name; otherwise, 'src' is just renamed to 'dst'. Return the new
full name of the file.
Handles cross-device moves on Unix using 'copy_file()'. What about
other systems???
"""
from os.path import exists, isfile, isdir, basename, dirname
import errno
if verbose >= 1:
log.info("moving %s -> %s", src, dst)
if dry_run:
return dst
if not isfile(src):
raise DistutilsFileError("can't move '%s': not a regular file" % src)
if isdir(dst):
dst = os.path.join(dst, basename(src))
elif exists(dst):
raise DistutilsFileError(
"can't move '%s': destination '%s' already exists" %
(src, dst))
if not isdir(dirname(dst)):
raise DistutilsFileError(
"can't move '%s': destination '%s' not a valid path" % \
(src, dst))
copy_it = 0
try:
os.rename(src, dst)
except os.error, (num, msg):
if num == errno.EXDEV:
copy_it = 1
else:
raise DistutilsFileError(
"couldn't move '%s' to '%s': %s" % (src, dst, msg))
if copy_it:
copy_file(src, dst, verbose=verbose)
try:
os.unlink(src)
except os.error, (num, msg):
try:
os.unlink(dst)
except os.error:
pass
raise DistutilsFileError(
("couldn't move '%s' to '%s' by copy/delete: " +
"delete '%s' failed: %s") %
(src, dst, src, msg))
return dst
def write_file (filename, contents):
"""Create a file with the specified name and write 'contents' (a
sequence of strings without line terminators) to it.
"""
f = open(filename, "w")
try:
for line in contents:
f.write(line + "\n")
finally:
f.close()
|
nmercier/linux-cross-gcc
|
win32/bin/Lib/distutils/file_util.py
|
Python
|
bsd-3-clause
| 8,370
|
import numpy as np
def sample_to_sum(rng, target, sd, count):
s = rng.randn(count) * sd / np.sqrt(count)
s += (target - sum(s)) / count
return s
|
tcstewar/sham
|
sample.py
|
Python
|
gpl-2.0
| 158
|
# vim:set noet ts=4:
#
# ibus-anthy - The Anthy engine for IBus
#
# Copyright (c) 2007-2008 Peng Huang <shawn.p.huang@gmail.com>
# Copyright (c) 2009 Hideaki ABE <abe.sendai@gmail.com>
# Copyright (c) 2010-2017 Takao Fujiwara <takao.fujiwara1@gmail.com>
# Copyright (c) 2007-2017 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import sys
from gi import require_version as gi_require_version
gi_require_version('Gio', '2.0')
gi_require_version('GLib', '2.0')
gi_require_version('IBus', '1.0')
from gi.repository import Gio
from gi.repository import GLib
from gi.repository import GObject
from gi.repository import IBus
class DictItem():
def __init__(self,
id='',
short_label='',
long_label='',
icon='',
is_system=False,
preview_lines=-1,
embed=False,
single=True,
reverse=False,
encoding='utf-8'):
self.id = id
self.short_label = short_label
self.long_label = long_label
self.icon = icon
self.is_system = is_system
self.preview_lines = preview_lines
self.embed = embed
self.single = single
self.reverse = reverse
self.encoding = encoding
def __str__(self):
retval = ('id:', self.id,
'short-label:', self.short_label,
'long-label:', self.long_label,
'icon:', self.icon,
'is-system:', self.is_system,
'preview-lines:', self.preview_lines,
'embed:', self.embed,
'single:', self.single,
'reverse:', self.reverse,
'encoding:', self.encoding)
return str(retval)
@classmethod
def serialize(cls, dict_item):
builder = GLib.VariantBuilder(GLib.VariantType('r'))
builder.add_value(GLib.Variant.new_string(dict_item.id))
builder.add_value(GLib.Variant.new_string(dict_item.short_label))
builder.add_value(GLib.Variant.new_string(dict_item.long_label))
builder.add_value(GLib.Variant.new_string(dict_item.icon))
builder.add_value(GLib.Variant.new_boolean(dict_item.is_system))
builder.add_value(GLib.Variant.new_int32(dict_item.preview_lines))
builder.add_value(GLib.Variant.new_boolean(dict_item.embed))
builder.add_value(GLib.Variant.new_boolean(dict_item.single))
builder.add_value(GLib.Variant.new_boolean(dict_item.reverse))
builder.add_value(GLib.Variant.new_string(dict_item.encoding))
return builder.end()
class Prefs(GObject.GObject):
__gsignals__ = {
'changed' : (
GObject.SignalFlags.RUN_FIRST,
None,
(str, str, GLib.Variant)),
}
def __init__(self):
super(Prefs, self).__init__()
self.__cache = {}
self.__settings = {}
self.__schema_prefix = 'org.freedesktop.ibus.engine.anthy.'
self.__schema_sections = ['common',
'shortcut',
'romaji-typing-rule',
'kana-typing-rule',
'thumb-typing-rule',
'thumb',
'dict']
for section in self.__schema_sections:
self.__settings[section] = Gio.Settings(
schema=self.__schema_prefix + section)
self.__settings[section].connect('changed',
self.__settings_on_changed)
def __settings_on_changed(self, settings, key):
section = settings.props.schema[len(self.__schema_prefix):]
variant_value = self.__settings[section].get_value(key)
variant_key = self.__cache.get(section)
if variant_key == None:
variant_key = {}
variant_key[key] = variant_value
self.__cache[section] = variant_key
self.emit('changed', section, key, variant_value)
def variant_to_value(self, variant):
if type(variant) != GLib.Variant:
return variant
type_string = variant.get_type_string()
if type_string == 's':
return variant.get_string()
elif type_string == 'i':
return variant.get_int32()
elif type_string == 'b':
return variant.get_boolean()
elif type_string == 'v':
return variant.unpack()
elif len(type_string) > 0 and type_string[0] == 'a':
# Use unpack() instead of dup_strv() in python.
# In the latest pygobject3 3.3.4 or later, g_variant_dup_strv
# returns the allocated strv but in the previous release,
# it returned the tuple of (strv, length)
return variant.unpack()
else:
self.printerr('Unknown variant type:', type_string)
sys.abrt()
return variant
def variant_from_value(self, value):
variant = None
if type(value) == str:
variant = GLib.Variant.new_string(value)
elif type(value) == int:
variant = GLib.Variant.new_int32(value)
elif type(value) == bool:
variant = GLib.Variant.new_boolean(value)
elif type(value) == list:
variant = GLib.Variant.new_strv(value)
if variant == None:
self.printerr('Unknown value type: %s' % type(value))
return variant
def get_variant(self, section, key):
variant_key = self.__cache.get(section)
if variant_key != None:
variant_value = variant_key.get(key)
if variant_value != None:
return variant_value
variant_value = self.__settings[section].get_value(key)
if variant_key == None:
variant_key = {}
variant_key[key] = variant_value
self.__cache[section] = variant_key
return variant_value
def get_default_variant(self, section, key):
return self.__settings[section].get_default_value(key)
def get_readable_value(self, section, key, variant):
value = self.variant_to_value(variant)
if section == 'dict' and key == 'list':
dicts = {}
for item in value:
dict_item = DictItem(*item)
dicts[dict_item.id] = dict_item
value = dicts
if section == 'dict' and key == 'template':
value = DictItem(*value)
return value
def get_value(self, section, key):
variant = self.get_variant(section, key)
return self.get_readable_value(section, key, variant)
def get_default_value(self, section, key):
variant = self.get_default_variant(section, key)
return self.get_readable_value(section, key, variant)
def set_variant(self, section, key, variant):
self.__settings[section].set_value(key, variant)
self.__settings[section].apply()
def set_value(self, section, key, value):
variant = self.variant_from_value(value)
if variant == None:
return
self.set_variant(section, key, variant)
def set_list_item(self, section, key, item, values):
variant = self.get_variant(section, key)
if variant == None:
printerrr('%s:%s does not exist' % (section, key))
return
if section == 'shortcut':
variant_dict = GLib.VariantDict(variant)
array = []
for value in values:
array.append(GLib.Variant.new_string(value))
varray = GLib.Variant.new_array(GLib.VariantType('s'), array)
variant_dict.insert_value(item, varray)
# GVariantDict uses GHashTable internally and
# GVariantDict.end() does not support the order.
self.set_variant(section, key, variant_dict.end())
return
if section == 'romaji-typing-rule' or \
section == 'kana-typing-rule' or \
section == 'thumb-typing-rule':
(method, keymap_key) = item
variant_dict = GLib.VariantDict(variant)
keymap = variant_dict.lookup_value(method, None)
keymap_dict = GLib.VariantDict(keymap)
if section == 'thumb-typing-rule':
array = []
for value in values:
array.append(GLib.Variant.new_string(value))
vvalue = GLib.Variant.new_array(GLib.VariantType('s'), array)
else:
vvalue = GLib.Variant.new_string(values)
keymap_dict.insert_value(keymap_key, vvalue)
keymap = keymap_dict.end()
variant_dict.insert_value(method, keymap)
self.set_variant(section, key, variant_dict.end())
return
if section == 'dict' and key == 'files':
variant_dict = GLib.VariantDict(variant)
array = []
for value in values:
array.append(GLib.Variant.new_string(value))
varray = GLib.Variant.new_array(GLib.VariantType('s'), array)
variant_dict.insert_value(item, varray)
self.set_variant(section, key, variant_dict.end())
return
if section == 'dict' and key == 'list':
array = []
has_item = False
for v in variant:
dict_item = DictItem(*v)
if dict_item.id == values.id:
array.append(GLib.Variant.new_variant(
DictItem.serialize(values)))
has_item = True
else:
array.append(GLib.Variant.new_variant(
DictItem.serialize(dict_item)))
if not has_item:
array.append(GLib.Variant.new_variant(DictItem.serialize(values)))
varray = GLib.Variant.new_array(GLib.VariantType('v'), array)
self.set_variant(section, key, varray)
return
def delete_list_item(self, section, key, item):
variant = self.get_variant(section, key)
if variant == None:
printerrr('%s:%s does not exist' % (section, key))
return
if section == 'romaji-typing-rule' or \
section == 'kana-typing-rule' or \
section == 'thumb-typing-rule':
(method, keymap_key) = item
variant_dict = GLib.VariantDict(variant)
keymap = variant_dict.lookup_value(method, None)
keymap_dict = GLib.VariantDict(keymap)
keymap_dict.remove(keymap_key)
keymap = keymap_dict.end()
variant_dict.insert_value(method, keymap)
self.set_variant(section, key, variant_dict.end())
return
if section == 'dict' and key == 'files':
variant_dict = GLib.VariantDict(variant)
variant_dict.remove(item)
self.set_variant(section, key, variant_dict.end())
return
if section == 'dict' and key == 'list':
array = []
for v in variant:
dict_item = DictItem(*v)
if dict_item.id == item:
continue
else:
array.append(GLib.Variant.new_variant(
DictItem.serialize(dict_item)))
varray = GLib.Variant.new_array(GLib.VariantType('v'), array)
self.set_variant(section, key, varray)
return
def bind(self, section, key, object, property, flags):
self.__settings[section].bind(key, object, property, flags)
# Convert DBus.String to str
# sys.getdefaultencoding() == 'utf-8' with pygtk2 but
# sys.getdefaultencoding() == 'ascii' with gi gtk3
# so the simple str(unicode_string) causes an error and need to use
# unicode_string.encode('utf-8') instead.
def str(self, uni):
if uni == None:
return None
if type(uni) == str:
return uni
if type(uni) == unicode:
return uni.encode('utf-8')
return str(uni)
# The simple unicode(string) causes an error and need to use
# unicode(string, 'utf-8') instead.
def unicode(self, string):
if string == None:
return None
if type(string) == unicode:
return string
return unicode(string, 'utf-8')
# If the parent process exited, the std io/out/error will be lost.
@staticmethod
def printerr(sentence):
try:
print >> sys.stderr, sentence
except IOError:
pass
|
ibus/ibus-anthy
|
setup/python2/prefs.py
|
Python
|
gpl-2.0
| 13,312
|
# Copyright (c) 2014 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import uuid
from oslo_config import cfg
from oslo_log import log as logging
import six
from sahara import conductor as c
from sahara import context
from sahara import exceptions as ex
from sahara.i18n import _
from sahara.service.edp import job_utils
from sahara.service import trusts as t
from sahara.swift import utils as su
from sahara.utils.openstack import keystone as k
PROXY_DOMAIN = None
conductor = c.API
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
opts = [
cfg.BoolOpt('use_domain_for_proxy_users',
default=False,
help='Enables Sahara to use a domain for creating temporary '
'proxy users to access Swift. If this is enabled '
'a domain must be created for Sahara to use.'),
cfg.StrOpt('proxy_user_domain_name',
default=None,
help='The domain Sahara will use to create new proxy users '
'for Swift object access.'),
cfg.ListOpt('proxy_user_role_names',
default=['Member'],
help='A list of the role names that the proxy user should '
'assume through trust for Swift object access.')
]
CONF.register_opts(opts)
def create_proxy_user_for_job_execution(job_execution):
'''Creates a proxy user and adds the credentials to the job execution
:param job_execution: The job execution model to update
'''
username = 'job_{0}'.format(job_execution.id)
password = proxy_user_create(username)
current_user = k.client()
proxy_user = k.client_for_proxy_user(username, password)
trust_id = t.create_trust(trustor=current_user,
trustee=proxy_user,
role_names=CONF.proxy_user_role_names)
update = {'job_configs': job_execution.job_configs.to_dict()}
update['job_configs']['proxy_configs'] = {
'proxy_username': username,
'proxy_password': password,
'proxy_trust_id': trust_id
}
conductor.job_execution_update(context.ctx(), job_execution, update)
def delete_proxy_user_for_job_execution(job_execution):
'''Delete a proxy user based on a JobExecution
:param job_execution: The job execution with proxy user information
:returns: An updated job_configs dictionary or None
'''
proxy_configs = job_execution.job_configs.get('proxy_configs')
if proxy_configs is not None:
proxy_username = proxy_configs.get('proxy_username')
proxy_password = proxy_configs.get('proxy_password')
proxy_trust_id = proxy_configs.get('proxy_trust_id')
proxy_user = k.client_for_proxy_user(proxy_username,
proxy_password,
proxy_trust_id)
t.delete_trust(proxy_user, proxy_trust_id)
proxy_user_delete(proxy_username)
update = job_execution.job_configs.to_dict()
del update['proxy_configs']
return update
return None
def create_proxy_user_for_cluster(cluster):
'''Creates a proxy user and adds the credentials to the cluster
:param cluster: The cluster model to update
'''
if cluster.cluster_configs.get('proxy_configs'):
return cluster
username = 'cluster_{0}'.format(cluster.id)
password = proxy_user_create(username)
current_user = k.client()
proxy_user = k.client_for_proxy_user(username, password)
trust_id = t.create_trust(trustor=current_user,
trustee=proxy_user,
role_names=CONF.proxy_user_role_names)
update = {'cluster_configs': cluster.cluster_configs.to_dict()}
update['cluster_configs']['proxy_configs'] = {
'proxy_username': username,
'proxy_password': password,
'proxy_trust_id': trust_id
}
return conductor.cluster_update(context.ctx(), cluster, update)
def delete_proxy_user_for_cluster(cluster):
'''Delete a proxy user based on a Cluster
:param cluster: The cluster model with proxy user information
'''
proxy_configs = cluster.cluster_configs.get('proxy_configs')
if proxy_configs is not None:
proxy_username = proxy_configs.get('proxy_username')
proxy_password = proxy_configs.get('proxy_password')
proxy_trust_id = proxy_configs.get('proxy_trust_id')
proxy_user = k.client_for_proxy_user(proxy_username,
proxy_password,
proxy_trust_id)
t.delete_trust(proxy_user, proxy_trust_id)
proxy_user_delete(proxy_username)
update = {'cluster_configs': cluster.cluster_configs.to_dict()}
del update['cluster_configs']['proxy_configs']
conductor.cluster_update(context.ctx(), cluster, update)
def domain_for_proxy():
'''Return the proxy domain or None
If configured to use the proxy domain, this function will return that
domain. If not configured to use the proxy domain, this function will
return None. If the proxy domain can't be found this will raise an
exception.
:returns: A Keystone Domain object or None.
:raises ConfigurationError: If the domain is requested but not specified.
:raises NotFoundException: If the domain name is specified but cannot be
found.
'''
if CONF.use_domain_for_proxy_users is False:
return None
if CONF.proxy_user_domain_name is None:
raise ex.ConfigurationError(_('Proxy domain requested but not '
'specified.'))
admin = k.client_for_admin()
global PROXY_DOMAIN
if not PROXY_DOMAIN:
domain_list = admin.domains.list(name=CONF.proxy_user_domain_name)
if len(domain_list) == 0:
raise ex.NotFoundException(value=CONF.proxy_user_domain_name,
message=_('Failed to find domain %s'))
# the domain name should be globally unique in Keystone
if len(domain_list) > 1:
raise ex.NotFoundException(value=CONF.proxy_user_domain_name,
message=_('Unexpected results found '
'when searching for domain '
'%s'))
PROXY_DOMAIN = domain_list[0]
return PROXY_DOMAIN
def job_execution_requires_proxy_user(job_execution):
'''Returns True if the job execution requires a proxy user.'''
def _check_values(values):
return any(value.startswith(
su.SWIFT_INTERNAL_PREFIX) for value in values if (
isinstance(value, six.string_types)))
if CONF.use_domain_for_proxy_users is False:
return False
paths = [conductor.data_source_get(context.ctx(), job_execution.output_id),
conductor.data_source_get(context.ctx(), job_execution.input_id)]
if _check_values(ds.url for ds in paths if ds):
return True
if _check_values(six.itervalues(
job_execution.job_configs.get('configs', {}))):
return True
if _check_values(six.itervalues(
job_execution.job_configs.get('params', {}))):
return True
if _check_values(job_execution.job_configs.get('args', [])):
return True
job = conductor.job_get(context.ctx(), job_execution.job_id)
if _check_values(main.url for main in job.mains):
return True
if _check_values(lib.url for lib in job.libs):
return True
# We did the simple checks, now if data_source referencing is
# enabled and we have values that could be a name or uuid,
# query for data_sources that match and contain a swift path
by_name, by_uuid = job_utils.may_contain_data_source_refs(
job_execution.job_configs)
if by_name:
names = tuple(job_utils.find_possible_data_source_refs_by_name(
job_execution.job_configs))
# do a query here for name in names and path starts with swift-prefix
if names and conductor.data_source_count(
context.ctx(),
name=names,
url=su.SWIFT_INTERNAL_PREFIX+'%') > 0:
return True
if by_uuid:
uuids = tuple(job_utils.find_possible_data_source_refs_by_uuid(
job_execution.job_configs))
# do a query here for id in uuids and path starts with swift-prefix
if uuids and conductor.data_source_count(
context.ctx(),
id=uuids,
url=su.SWIFT_INTERNAL_PREFIX+'%') > 0:
return True
return False
def proxy_domain_users_list():
'''Return a list of all users in the proxy domain.'''
admin = k.client_for_admin()
domain = domain_for_proxy()
if domain:
return admin.users.list(domain=domain.id)
return []
def proxy_user_create(username):
'''Create a new user in the proxy domain
Creates the username specified with a random password.
:param username: The name of the new user.
:returns: The password created for the user.
'''
admin = k.client_for_admin()
domain = domain_for_proxy()
password = six.text_type(uuid.uuid4())
admin.users.create(name=username, password=password, domain=domain.id)
LOG.debug('Created proxy user {username}'.format(username=username))
return password
def proxy_user_delete(username=None, user_id=None):
'''Delete the user from the proxy domain.
:param username: The name of the user to delete.
:param user_id: The id of the user to delete, if provided this overrides
the username.
:raises NotFoundException: If there is an error locating the user in the
proxy domain.
'''
admin = k.client_for_admin()
if not user_id:
domain = domain_for_proxy()
user_list = admin.users.list(domain=domain.id, name=username)
if len(user_list) == 0:
raise ex.NotFoundException(value=username,
message=_('Failed to find user %s'))
if len(user_list) > 1:
raise ex.NotFoundException(value=username,
message=_('Unexpected results found '
'when searching for user %s'))
user_id = user_list[0].id
admin.users.delete(user_id)
LOG.debug('Deleted proxy user id {user_id}'.format(user_id=user_id))
|
henaras/sahara
|
sahara/utils/proxy.py
|
Python
|
apache-2.0
| 11,094
|
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import uuid
from eventlet.green import subprocess
from st2common import log as logging
from st2common.util.green.shell import run_command
from st2common.constants.action import LIVEACTION_STATUS_SUCCEEDED, LIVEACTION_STATUS_FAILED
from st2common.constants.runners import WINDOWS_RUNNER_DEFAULT_ACTION_TIMEOUT
from st2actions.runners.windows_runner import BaseWindowsRunner
LOG = logging.getLogger(__name__)
# constants to lookup in runner_parameters
RUNNER_HOST = 'host'
RUNNER_USERNAME = 'username'
RUNNER_PASSWORD = 'password'
RUNNER_COMMAND = 'cmd'
RUNNER_TIMEOUT = 'timeout'
def get_runner():
return WindowsCommandRunner(str(uuid.uuid4()))
class WindowsCommandRunner(BaseWindowsRunner):
"""
Runner which executes commands on a remote Windows machine.
"""
def __init__(self, runner_id, timeout=WINDOWS_RUNNER_DEFAULT_ACTION_TIMEOUT):
"""
:param timeout: Action execution timeout in seconds.
:type timeout: ``int``
"""
super(WindowsCommandRunner, self).__init__(runner_id=runner_id)
self._timeout = timeout
def pre_run(self):
super(WindowsCommandRunner, self).pre_run()
# TODO :This is awful, but the way "runner_parameters" and other variables get
# assigned on the runner instance is even worse. Those arguments should
# be passed to the constructor.
self._host = self.runner_parameters.get(RUNNER_HOST, None)
self._username = self.runner_parameters.get(RUNNER_USERNAME, None)
self._password = self.runner_parameters.get(RUNNER_PASSWORD, None)
self._command = self.runner_parameters.get(RUNNER_COMMAND, None)
self._timeout = self.runner_parameters.get(RUNNER_TIMEOUT, self._timeout)
def run(self, action_parameters):
# Make sure the dependencies are available
self._verify_winexe_exists()
args = self._get_winexe_command_args(host=self._host, username=self._username,
password=self._password,
command=self._command)
# Note: We don't send anything over stdin, we just create an unused pipe
# to avoid some obscure failures
exit_code, stdout, stderr, timed_out = run_command(cmd=args,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=False,
timeout=self._timeout)
if timed_out:
error = 'Action failed to complete in %s seconds' % (self._timeout)
else:
error = None
if exit_code != 0:
error = self._parse_winexe_error(stdout=stdout, stderr=stderr)
result = stdout
output = {
'stdout': stdout,
'stderr': stderr,
'exit_code': exit_code,
'result': result
}
if error:
output['error'] = error
status = LIVEACTION_STATUS_SUCCEEDED if exit_code == 0 else LIVEACTION_STATUS_FAILED
return (status, output, None)
|
emedvedev/st2
|
st2actions/st2actions/runners/windows_command_runner.py
|
Python
|
apache-2.0
| 4,081
|
# -*- coding: utf-8 -*-
#
# Copyright © 2012 - 2016 Michal Čihař <michal@cihar.com>
#
# This file is part of Weblate <https://weblate.org/>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
Helper code to get user special chars specific for given language.
"""
from __future__ import unicode_literals
from django.utils.translation import ugettext as _, ugettext_lazy
# Hard coded list of special chars
SPECIAL_CHARS = ('→', '↵', '…')
# Names of hardcoded chars
CHAR_NAMES = {
'→': ugettext_lazy('Insert tab character'),
'↵': ugettext_lazy('Insert new line'),
'…': ugettext_lazy('Insert horizontal ellipsis'),
'।': ugettext_lazy('Danda'),
'॥': ugettext_lazy('Double danda'),
}
# Quotes definition for each language, based on CLDR data
SINGLE_OPEN = {
'ja': '『',
'zh': '『',
'ar': '’',
'fi': '’',
'fo': '’',
'lag': '’',
'rn': '’',
'se': '’',
'sn': '’',
'sv': '’',
'ur': '’',
'eo': '‘',
'vo': '‘',
'ALL': '‘',
'agq': '‚',
'bs': '‚',
'cs': '‚',
'de': '‚',
'dsb': '‚',
'et': '‚',
'ff': '‚',
'hr': '‚',
'hsb': '‚',
'is': '‚',
'ksh': '‚',
'lb': '‚',
'luy': '‚',
'mk': '‚',
'sk': '‚',
'sl': '‚',
'ast': '“',
'bm': '“',
'ca': '“',
'cy': '“',
'dyo': '“',
'es': '“',
'ewo': '“',
'fur': '“',
'ia': '“',
'it': '“',
'kab': '“',
'mg': '“',
'mua': '“',
'nnh': '“',
'nr': '“',
'nso': '“',
'pt': '“',
'sg': '“',
'sq': '“',
'ss': '“',
'ti': '“',
'tn': '“',
'ts': '“',
've': '“',
'bas': '„',
'bg': '„',
'ky': '„',
'lt': '„',
'os': '„',
'ru': '„',
'shi': '„',
'uk': '„',
'zgh': '„',
'el': '"',
'eu': '"',
'uz': '\'',
'yi': '\'',
'hy': '«',
'ka': '«',
'nmg': '«',
'pl': '«',
'ro': '«',
'yav': '«',
'he': '׳',
'am': '‹',
'az': '‹',
'be': '‹',
'br': '‹',
'fa': '‹',
'fr': '‹',
'gsw': '‹',
'jgo': '‹',
'kkj': '‹',
'rm': '‹',
'wae': '‹',
'hu': '»',
'kl': '›',
'ug': '›',
}
SINGLE_CLOSE = {
'ja': '』',
'zh': '』',
'eo': '’',
'vo': '’',
'ALL': '’',
'ar': '‘',
'bs': '‘',
'cs': '‘',
'de': '‘',
'dsb': '‘',
'et': '‘',
'hr': '‘',
'hsb': '‘',
'is': '‘',
'ksh': '‘',
'lb': '‘',
'luy': '‘',
'mk': '‘',
'sk': '‘',
'sl': '‘',
'sr': '‘',
'ur': '‘',
'ast': '”',
'bm': '”',
'ca': '”',
'cy': '”',
'dyo': '”',
'es': '”',
'ewo': '”',
'fur': '”',
'ia': '”',
'it': '”',
'kab': '”',
'mg': '”',
'mua': '”',
'nnh': '”',
'nr': '”',
'nso': '”',
'pt': '”',
'sg': '”',
'shi': '”',
'sq': '”',
'ss': '”',
'ti': '”',
'tn': '”',
'ts': '”',
've': '”',
'zgh': '”',
'bas': '“',
'bg': '“',
'ky': '“',
'lt': '“',
'os': '“',
'ru': '“',
'uk': '“',
'el': '"',
'eu': '"',
'uz': '\'',
'yi': '\'',
'hu': '«',
'he': '׳',
'kl': '‹',
'ug': '‹',
'hy': '»',
'ka': '»',
'nmg': '»',
'pl': '»',
'ro': '»',
'yav': '»',
'am': '›',
'az': '›',
'be': '›',
'br': '›',
'fa': '›',
'fr': '›',
'gsw': '›',
'jgo': '›',
'kkj': '›',
'rm': '›',
'wae': '›',
}
DOUBLE_OPEN = {
'eu': '"',
'uz': '"',
'yi': '"',
'ja': '「',
'zh': '「',
'cy': '‘',
'fur': '‘',
'ia': '‘',
'nr': '‘',
'nso': '‘',
'ss': '‘',
'ti': '‘',
'tn': '‘',
'ts': '‘',
've': '‘',
'am': '«',
'ast': '«',
'az': '«',
'bas': '«',
'be': '«',
'bm': '«',
'br': '«',
'ca': '«',
'dua': '«',
'dyo': '«',
'el': '«',
'es': '«',
'ewo': '«',
'fa': '«',
'fr': '«',
'gsw': '«',
'hy': '«',
'it': '«',
'jgo': '«',
'kab': '«',
'kkj': '«',
'ksf': '«',
'ky': '«',
'mg': '«',
'mua': '«',
'nb': '«',
'nn': '«',
'nnh': '«',
'os': '«',
'pt': '«',
'rm': '«',
'ru': '«',
'rw': '«',
'sg': '«',
'shi': '«',
'sq': '«',
'uk': '«',
'wae': '«',
'yav': '«',
'zgh': '«',
'he': '״',
'ar': '”',
'fi': '”',
'fo': '”',
'lag': '”',
'rn': '”',
'se': '”',
'sn': '”',
'sv': '”',
'ur': '”',
'eo': '“',
'vo': '“',
'ALL': '“',
'kl': '»',
'ug': '»',
'agq': '„',
'bg': '„',
'bs': '„',
'cs': '„',
'de': '„',
'dsb': '„',
'et': '„',
'ff': '„',
'hr': '„',
'hsb': '„',
'hu': '„',
'is': '„',
'ka': '„',
'ksh': '„',
'lb': '„',
'lt': '„',
'luy': '„',
'mk': '„',
'nmg': '„',
'pl': '„',
'sk': '„',
'sl': '„',
'sr': '„',
}
DOUBLE_CLOSE = {
'eu': '"',
'kk': '"',
'uz': '"',
'yi': '"',
'he': '״',
'cy': '’',
'fur': '’',
'ia': '’',
'nr': '’',
'nso': '’',
'ss': '’',
'ti': '’',
'tn': '’',
'ts': '’',
've': '’',
'ja': '」',
'zh': '」',
'kl': '«',
'ug': '«',
'eo': '”',
'vo': '”',
'ALL': '”',
'ar': '“',
'bg': '“',
'bs': '“',
'cs': '“',
'de': '“',
'dsb': '“',
'et': '“',
'hr': '“',
'hsb': '“',
'is': '“',
'ka': '“',
'ksh': '“',
'lb': '“',
'lt': '“',
'luy': '“',
'mk': '“',
'sk': '“',
'sl': '“',
'sr': '“',
'ur': '“',
'am': '»',
'ast': '»',
'az': '»',
'bas': '»',
'be': '»',
'bm': '»',
'br': '»',
'ca': '»',
'dua': '»',
'dyo': '»',
'el': '»',
'es': '»',
'ewo': '»',
'fa': '»',
'fr': '»',
'gsw': '»',
'hy': '»',
'it': '»',
'jgo': '»',
'kab': '»',
'kkj': '»',
'ksf': '»',
'ky': '»',
'mg': '»',
'mua': '»',
'nb': '»',
'nn': '»',
'nnh': '»',
'os': '»',
'pt': '»',
'rm': '»',
'ru': '»',
'rw': '»',
'sg': '»',
'shi': '»',
'sq': '»',
'uk': '»',
'wae': '»',
'yav': '»',
'zgh': '»',
}
HYPHEN_LANGS = frozenset((
'af', 'am', 'ar', 'ast', 'az', 'bg', 'bs', 'ca', 'cs', 'cy', 'da', 'de',
'dsb', 'dz', 'ee', 'el', 'en', 'eo', 'es', 'fa', 'fi', 'fr', 'fy', 'gd',
'gl', 'gu', 'he', 'hr', 'hsb', 'id', 'is', 'ja', 'ka', 'kk', 'kn', 'ko',
'ksh', 'ky', 'lb', 'lkt', 'lt', 'lv', 'mk', 'mn', 'mr', 'nl', 'os', 'pa',
'pl', 'pt', 'ro', 'ru', 'sk', 'sr', 'sv', 'ta', 'th', 'to', 'tr', 'uz',
'vi', 'vo', 'yi', 'zh',
))
EN_DASH_LANGS = frozenset((
'af', 'am', 'ar', 'ast', 'az', 'bg', 'bs', 'ca', 'cs', 'cy', 'da', 'de',
'dsb', 'dz', 'ee', 'el', 'en', 'eo', 'es', 'fi', 'fr', 'fy', 'gd', 'gl',
'gu', 'he', 'hr', 'hsb', 'hu', 'id', 'is', 'ka', 'kk', 'kn', 'ksh', 'ky',
'lb', 'lkt', 'lt', 'lv', 'mk', 'mn', 'mr', 'nb', 'nl', 'os', 'pa', 'pl',
'pt', 'ro', 'ru', 'sk', 'sr', 'sv', 'ta', 'th', 'to', 'tr', 'uk', 'uz',
'vi', 'vo', 'yi', 'zh',
))
EM_DASH_LANGS = frozenset((
'af', 'ar', 'ast', 'az', 'bg', 'bs', 'ca', 'cy', 'de', 'dsb', 'dz', 'ee',
'el', 'en', 'eo', 'es', 'fr', 'fy', 'gd', 'gl', 'gu', 'he', 'hr', 'hsb',
'id', 'is', 'it', 'ja', 'ka', 'kk', 'kn', 'ko', 'ksh', 'ky', 'lb', 'lkt',
'lt', 'lv', 'mk', 'mn', 'mr', 'nl', 'os', 'pa', 'pl', 'pt', 'ro', 'ru',
'sv', 'ta', 'th', 'to', 'tr', 'uz', 'vi', 'vo', 'yi', 'zh',
))
EXTRA_CHARS = {
'brx': ('।', '॥'),
}
def get_quote(code, data, name):
"""
Returns special char for quote.
"""
if code in data:
return name, data[code]
return name, data['ALL']
def get_char_description(char):
"""Returns verbose description of a character."""
if char in CHAR_NAMES:
return CHAR_NAMES[char]
else:
return _('Insert character {0}').format(char)
def get_special_chars(language):
"""
Returns list of special characters.
"""
for char in SPECIAL_CHARS:
yield get_char_description(char), char
code = language.code.replace('_', '-').split('-')[0]
if code in EXTRA_CHARS:
for char in EXTRA_CHARS[code]:
yield get_char_description(char), char
yield get_quote(code, DOUBLE_OPEN, _('Opening double quote'))
yield get_quote(code, DOUBLE_CLOSE, _('Closing double quote'))
yield get_quote(code, SINGLE_OPEN, _('Opening single quote'))
yield get_quote(code, SINGLE_CLOSE, _('Closing single quote'))
if code in HYPHEN_LANGS:
yield _('Hyphen'), '-'
if code in EN_DASH_LANGS:
yield _('En dash'), '–'
if code in EM_DASH_LANGS:
yield _('Em dash'), '—'
|
jitka/weblate
|
weblate/trans/specialchars.py
|
Python
|
gpl-3.0
| 9,767
|
from django.conf.urls.defaults import *
urlpatterns = patterns('content.views',
url(r'^gallery', 'gallery', name="gallery"),
url(r'^pagetree', 'gallery', name="pagetree"),
url(r'(.*)', 'page', name="page"),
)
|
jholster/django-birdie
|
content/urls.py
|
Python
|
bsd-3-clause
| 222
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from typing import Any, Callable, Iterable, Iterator, Sequence, Tuple
from google.ads.googleads.v10.services.types import experiment_service
from google.rpc import status_pb2 # type: ignore
class ListExperimentAsyncErrorsPager:
"""A pager for iterating through ``list_experiment_async_errors`` requests.
This class thinly wraps an initial
:class:`google.ads.googleads.v10.services.types.ListExperimentAsyncErrorsResponse` object, and
provides an ``__iter__`` method to iterate through its
``errors`` field.
If there are more pages, the ``__iter__`` method will make additional
``ListExperimentAsyncErrors`` requests and continue to iterate
through the ``errors`` field on the
corresponding responses.
All the usual :class:`google.ads.googleads.v10.services.types.ListExperimentAsyncErrorsResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(
self,
method: Callable[
..., experiment_service.ListExperimentAsyncErrorsResponse
],
request: experiment_service.ListExperimentAsyncErrorsRequest,
response: experiment_service.ListExperimentAsyncErrorsResponse,
metadata: Sequence[Tuple[str, str]] = (),
):
"""Instantiate the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (:class:`google.ads.googleads.v10.services.types.ListExperimentAsyncErrorsRequest`):
The initial request object.
response (:class:`google.ads.googleads.v10.services.types.ListExperimentAsyncErrorsResponse`):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = experiment_service.ListExperimentAsyncErrorsRequest(
request
)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
def pages(
self,
) -> Iterable[experiment_service.ListExperimentAsyncErrorsResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = self._method(
self._request, metadata=self._metadata
)
yield self._response
def __iter__(self) -> Iterator[status_pb2.Status]:
for page in self.pages:
yield from page.errors
def __repr__(self) -> str:
return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
|
googleads/google-ads-python
|
google/ads/googleads/v10/services/services/experiment_service/pagers.py
|
Python
|
apache-2.0
| 3,497
|
import nose.tools as nt
import numpy as np
import theano
import theano.tensor as T
import treeano
import treeano.nodes as tn
from treeano.sandbox.nodes import spp_net
fX = theano.config.floatX
def test_spatial_pyramid_pooling_node_serialization():
tn.check_serialization(spp_net.SpatialPyramidPoolingNode("a"))
def test_spatial_pyramid_pooling_node():
# only testing size
network = tn.SequentialNode(
"s",
[tn.InputNode("i", shape=(3, 2, 17, 12)),
spp_net.SpatialPyramidPoolingNode("spp", spp_levels=[(1, 1),
(2, 2),
(3, 4),
(5, 5),
(17, 12)])]
).network()
ans_shape = (3, 2 * (1 * 1 + 2 * 2 + 3 * 4 + 5 * 5 + 17 * 12))
fn = network.function(["i"], ["s"])
x = np.random.randn(3, 2, 17, 12).astype(fX)
res = fn(x)[0]
nt.assert_equal(network["s"].get_variable("default").shape,
ans_shape)
nt.assert_equal(res.shape,
ans_shape)
# this currently doesn't work because
@nt.raises(AssertionError)
def test_spatial_pyramid_pooling_node_symbolic():
# only testing size
network = tn.SequentialNode(
"s",
[tn.InputNode("i", shape=(None, 2, None, None)),
spp_net.SpatialPyramidPoolingNode("spp", spp_levels=[(1, 1),
(2, 2),
(3, 4),
(5, 5),
(17, 12)])]
).network()
fn = network.function(["i"], ["s"])
ans_shape = (3, 2 * (1 * 1 + 2 * 2 + 3 * 4 + 5 * 5 + 17 * 12))
x1 = np.random.randn(3, 2, 17, 12).astype(fX)
nt.assert_equal(ans_shape,
fn(x1)[0].shape)
x2 = np.random.randn(100, 2, 177, 123).astype(fX)
nt.assert_equal(ans_shape,
fn(x2)[0].shape)
|
nsauder/treeano
|
treeano/sandbox/nodes/tests/spp_net_test.py
|
Python
|
apache-2.0
| 2,148
|
#!/usr/bin/python2
# Copyright 2015 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Configuration Option Checker.
Script to ensure that all configuration options for the Chrome EC are defined
in config.h.
"""
from __future__ import print_function
import os
import re
import subprocess
class Line(object):
"""Class for each changed line in diff output.
Attributes:
line_num: The integer line number that this line appears in the file.
string: The literal string of this line.
line_type: '+' or '-' indicating if this line was an addition or
deletion.
"""
def __init__(self, line_num, string, line_type):
"""Inits Line with the line number and the actual string."""
self.line_num = line_num
self.string = string
self.line_type = line_type
class Hunk(object):
"""Class for a git diff hunk.
Attributes:
filename: The name of the file that this hunk belongs to.
lines: A list of Line objects that are a part of this hunk.
"""
def __init__(self, filename, lines):
"""Inits Hunk with the filename and the list of lines of the hunk."""
self.filename = filename
self.lines = lines
# Master file which is supposed to include all CONFIG_xxxx descriptions.
CONFIG_FILE = 'include/config.h'
# Specific files which the checker should ignore.
WHITELIST = [CONFIG_FILE, 'util/config_option_check.py']
def obtain_current_config_options():
"""Obtains current config options from include/config.h.
Scans through the master config file defined in CONFIG_FILE for all CONFIG_*
options.
Returns:
config_options: A list of all the config options in the master CONFIG_FILE.
"""
config_options = []
config_option_re = re.compile(r'^#(define|undef)\s+(CONFIG_[A-Z0-9_]+)')
with open(CONFIG_FILE, 'r') as config_file:
for line in config_file:
result = config_option_re.search(line)
if not result:
continue
word = result.groups()[1]
if word not in config_options:
config_options.append(word)
return config_options
def obtain_config_options_in_use():
"""Obtains all the config options in use in the repo.
Scans through the entire repo looking for all CONFIG_* options actively used.
Returns:
options_in_use: A set of all the config options in use in the repo.
"""
file_list = []
cwd = os.getcwd()
config_option_re = re.compile(r'\b(CONFIG_[a-zA-Z0-9_]+)')
config_debug_option_re = re.compile(r'\b(CONFIG_DEBUG_[a-zA-Z0-9_]+)')
options_in_use = set()
for (dirpath, dirnames, filenames) in os.walk(cwd, topdown=True):
# Ignore the build and private directories (taken from .gitignore)
if 'build' in dirnames:
dirnames.remove('build')
if 'private' in dirnames:
dirnames.remove('private')
for f in filenames:
# Ignore hidden files.
if f.startswith('.'):
continue
# Only consider C source, assembler, and Make-style files.
if (os.path.splitext(f)[1] in ('.c', '.h', '.inc', '.S', '.mk') or
'Makefile' in f):
file_list.append(os.path.join(dirpath, f))
# Search through each file and build a set of the CONFIG_* options being
# used.
for f in file_list:
if CONFIG_FILE in f:
continue
with open(f, 'r') as cur_file:
for line in cur_file:
match = config_option_re.findall(line)
if match:
for option in match:
if not in_comment(f, line, option):
if option not in options_in_use:
options_in_use.add(option)
# Since debug options can be turned on at any time, assume that they are
# always in use in case any aren't being used.
with open(CONFIG_FILE, 'r') as config_file:
for line in config_file:
match = config_debug_option_re.findall(line)
if match:
for option in match:
if not in_comment(CONFIG_FILE, line, option):
if option not in options_in_use:
options_in_use.add(option)
return options_in_use
def print_missing_config_options(hunks, config_options):
"""Searches thru all the changes in hunks for missing options and prints them.
Args:
hunks: A list of Hunk objects which represent the hunks from the git
diff output.
config_options: A list of all the config options in the master CONFIG_FILE.
Returns:
missing_config_option: A boolean indicating if any CONFIG_* options
are missing from the master CONFIG_FILE in this commit or if any CONFIG_*
options removed are no longer being used in the repo.
"""
missing_config_option = False
print_banner = True
deprecated_options = set()
# Determine longest CONFIG_* length to be used for formatting.
max_option_length = max(len(option) for option in config_options)
config_option_re = re.compile(r'\b(CONFIG_[a-zA-Z0-9_]+)')
# Search for all CONFIG_* options in use in the repo.
options_in_use = obtain_config_options_in_use()
# Check each hunk's line for a missing config option.
for h in hunks:
for l in h.lines:
# Check for the existence of a CONFIG_* in the line.
match = config_option_re.findall(l.string)
if not match:
continue
# At this point, an option was found in the line. However, we need to
# verify that it is not within a comment.
violations = set()
for option in match:
if not in_comment(h.filename, l.string, option):
# Since the CONFIG_* option is not within a comment, we've found a
# violation. We now need to determine if this line is a deletion or
# not. For deletions, we will need to verify if this CONFIG_* option
# is no longer being used in the entire repo.
if l.line_type is '-':
if option not in options_in_use and option in config_options:
deprecated_options.add(option)
else:
violations.add(option)
# Check to see if the CONFIG_* option is in the config file and print the
# violations.
for option in match:
if option not in config_options and option in violations:
# Print the banner once.
if print_banner:
print('The following config options were found to be missing '
'from %s.\n'
'Please add new config options there along with '
'descriptions.\n\n' % CONFIG_FILE)
print_banner = False
missing_config_option = True
# Print the misssing config option.
print('> %-*s %s:%s' % (max_option_length, option,
h.filename,
l.line_num))
if deprecated_options:
print('\n\nThe following config options are being removed and also appear'
' to be the last uses\nof that option. Please remove these '
'options from %s.\n\n' % CONFIG_FILE)
for option in deprecated_options:
print('> %s' % option)
missing_config_option = True
return missing_config_option
def in_comment(filename, line, substr):
"""Checks if given substring appears in a comment.
Args:
filename: The filename where this line is from. This is used to determine
what kind of comments to look for.
line: String of line to search in.
substr: Substring to search for in the line.
Returns:
is_in_comment: Boolean indicating if substr was in a comment.
"""
c_style_ext = ('.c', '.h', '.inc', '.S')
make_style_ext = ('.mk')
is_in_comment = False
extension = os.path.splitext(filename)[1]
substr_idx = line.find(substr)
# Different files have different comment syntax; Handle appropriately.
if extension in c_style_ext:
beg_comment_idx = line.find('/*')
end_comment_idx = line.find('*/')
if end_comment_idx == -1:
end_comment_idx = len(line)
if beg_comment_idx == -1:
# Check to see if this line is from a multi-line comment.
if line.lstrip().startswith('* '):
# It _seems_ like it is.
is_in_comment = True
else:
# Check to see if its actually inside the comment.
if beg_comment_idx < substr_idx < end_comment_idx:
is_in_comment = True
elif extension in make_style_ext or 'Makefile' in filename:
beg_comment_idx = line.find('#')
# Ignore everything to the right of the hash.
if beg_comment_idx < substr_idx and beg_comment_idx != -1:
is_in_comment = True
return is_in_comment
def get_hunks():
"""Gets the hunks of the most recent commit.
States:
new_file: Searching for a new file in the git diff.
filename_search: Searching for the filename of this hunk.
hunk: Searching for the beginning of a new hunk.
lines: Counting line numbers and searching for changes.
Returns:
hunks: A list of Hunk objects which represent the hunks in the git diff
output.
"""
diff = []
hunks = []
hunk_lines = []
line = ''
filename = ''
i = 0
line_num = 0
# Regex patterns
new_file_re = re.compile(r'^diff --git')
filename_re = re.compile(r'^[+]{3} (.*)')
hunk_line_num_re = re.compile(r'^@@ -[0-9]+,[0-9]+ \+([0-9]+),[0-9]+ @@.*')
line_re = re.compile(r'^([+| |-])(.*)')
# Get the diff output.
cmd = 'git diff --cached -GCONFIG_* --no-prefix --no-ext-diff HEAD~1'
diff = subprocess.check_output(cmd.split()).split('\n')
line = diff[0]
current_state = 'new_file'
while True:
# Search for the beginning of a new file.
if current_state is 'new_file':
match = new_file_re.search(line)
if match:
current_state = 'filename_search'
# Search the diff output for a file name.
elif current_state is 'filename_search':
# Search for a file name.
match = filename_re.search(line)
if match:
filename = match.groups(1)[0]
if filename in WHITELIST:
# Skip the file if it's whitelisted.
current_state = 'new_file'
else:
current_state = 'hunk'
# Search for a hunk. Each hunk starts with a line describing the line
# numbers in the file.
elif current_state is 'hunk':
hunk_lines = []
match = hunk_line_num_re.search(line)
if match:
# Extract the line number offset.
line_num = int(match.groups(1)[0])
current_state = 'lines'
# Start looking for changes.
elif current_state is 'lines':
# Check if state needs updating.
new_hunk = hunk_line_num_re.search(line)
new_file = new_file_re.search(line)
if new_hunk:
current_state = 'hunk'
hunks.append(Hunk(filename, hunk_lines))
continue
elif new_file:
current_state = 'new_file'
hunks.append(Hunk(filename, hunk_lines))
continue
match = line_re.search(line)
if match:
line_type = match.groups(1)[0]
# We only care about modifications.
if line_type is not ' ':
hunk_lines.append(Line(line_num, match.groups(2)[1], line_type))
# Deletions don't count towards the line numbers.
if line_type is not '-':
line_num += 1
# Advance to the next line
try:
i += 1
line = diff[i]
except IndexError:
# We've reached the end of the diff. Return what we have.
if hunk_lines:
hunks.append(Hunk(filename, hunk_lines))
return hunks
def main():
"""Searches through committed changes for missing config options.
Checks through committed changes for CONFIG_* options. Then checks to make
sure that all CONFIG_* options used are defined in include/config.h. Finally,
reports any missing config options.
"""
# Obtain the hunks of the commit to search through.
hunks = get_hunks()
# Obtain config options from include/config.h.
config_options = obtain_current_config_options()
# Find any missing config options from the hunks and print them.
missing_opts = print_missing_config_options(hunks, config_options)
if missing_opts:
print('\nIt may also be possible that you have a typo.')
os.sys.exit(1)
if __name__ == '__main__':
main()
|
akappy7/ChromeOS_EC_LED_Diagnostics
|
util/config_option_check.py
|
Python
|
bsd-3-clause
| 12,167
|
# -*- coding: utf-8 -*-
# Requires Python 2.7 or later
import io, os, sys, unittest
if sys.platform == "win32":
import XiteWin as Xite
else:
import XiteQt as Xite
keywordsHTML = [
b"b body content head href html link meta "
b"name rel script strong title type xmlns",
b"function",
b"sub"
]
keywordsPerl = [
b"NULL __FILE__ __LINE__ __PACKAGE__ __DATA__ __END__ AUTOLOAD "
b"BEGIN CORE DESTROY END EQ GE GT INIT LE LT NE CHECK abs accept "
b"alarm and atan2 bind binmode bless caller chdir chmod chomp chop "
b"chown chr chroot close closedir cmp connect continue cos crypt "
b"dbmclose dbmopen defined delete die do dump each else elsif endgrent "
b"endhostent endnetent endprotoent endpwent endservent eof eq eval "
b"exec exists exit exp fcntl fileno flock for foreach fork format "
b"formline ge getc getgrent getgrgid getgrnam gethostbyaddr gethostbyname "
b"gethostent getlogin getnetbyaddr getnetbyname getnetent getpeername "
b"getpgrp getppid getpriority getprotobyname getprotobynumber getprotoent "
b"getpwent getpwnam getpwuid getservbyname getservbyport getservent "
b"getsockname getsockopt glob gmtime goto grep gt hex if index "
b"int ioctl join keys kill last lc lcfirst le length link listen "
b"local localtime lock log lstat lt map mkdir msgctl msgget msgrcv "
b"msgsnd my ne next no not oct open opendir or ord our pack package "
b"pipe pop pos print printf prototype push quotemeta qu "
b"rand read readdir readline readlink readpipe recv redo "
b"ref rename require reset return reverse rewinddir rindex rmdir "
b"scalar seek seekdir select semctl semget semop send setgrent "
b"sethostent setnetent setpgrp setpriority setprotoent setpwent "
b"setservent setsockopt shift shmctl shmget shmread shmwrite shutdown "
b"sin sleep socket socketpair sort splice split sprintf sqrt srand "
b"stat study sub substr symlink syscall sysopen sysread sysseek "
b"system syswrite tell telldir tie tied time times truncate "
b"uc ucfirst umask undef unless unlink unpack unshift untie until "
b"use utime values vec wait waitpid wantarray warn while write "
b"xor "
b"given when default break say state UNITCHECK __SUB__ fc"
]
class TestLexers(unittest.TestCase):
def setUp(self):
self.xite = Xite.xiteFrame
self.ed = self.xite.ed
self.ed.ClearAll()
self.ed.EmptyUndoBuffer()
def AsStyled(self, withWindowsLineEnds):
text = self.ed.Contents()
data = io.BytesIO()
prevStyle = -1
for o in range(self.ed.Length):
styleNow = self.ed.GetStyleAt(o)
if styleNow != prevStyle:
styleBuf = "{%0d}" % styleNow
data.write(styleBuf.encode('utf-8'))
prevStyle = styleNow
data.write(text[o:o+1])
if withWindowsLineEnds:
return data.getvalue().replace(b"\n", b"\r\n")
else:
return data.getvalue()
def LexExample(self, name, lexerName, keywords, fileMode="b"):
self.ed.ClearAll()
self.ed.EmptyUndoBuffer()
self.ed.SetCodePage(65001)
self.ed.LexerLanguage = lexerName
mask = 0xff
for i in range(len(keywords)):
self.ed.SetKeyWords(i, keywords[i])
nameExample = os.path.join("examples", name)
namePrevious = nameExample +".styled"
nameNew = nameExample +".new"
with open(nameExample, "rb") as f:
prog = f.read()
if fileMode == "t" and sys.platform == "win32":
prog = prog.replace(b"\r\n", b"\n")
BOM = b"\xEF\xBB\xBF"
if prog.startswith(BOM):
prog = prog[len(BOM):]
lenDocument = len(prog)
self.ed.AddText(lenDocument, prog)
self.ed.Colourise(0, lenDocument)
self.assertEquals(self.ed.EndStyled, lenDocument)
try:
with open(namePrevious, "rb") as f:
prevStyled = f.read()
if fileMode == "t" and sys.platform == "win32":
prog = prog.replace(b"\r\n", b"\n")
except EnvironmentError:
prevStyled = ""
progStyled = self.AsStyled(fileMode == "t" and sys.platform == "win32")
if progStyled != prevStyled:
with open(nameNew, "wb") as f:
f.write(progStyled)
print("Incorrect lex for " + name)
print(progStyled)
print(prevStyled)
self.assertEquals(progStyled, prevStyled)
# The whole file doesn't parse like it did before so don't try line by line
# as that is likely to fail many times.
return
if fileMode == "b": # "t" files are large and this is a quadratic check
# Try partial lexes from the start of every line which should all be identical.
for line in range(self.ed.LineCount):
lineStart = self.ed.PositionFromLine(line)
self.ed.StartStyling(lineStart, mask)
self.assertEquals(self.ed.EndStyled, lineStart)
self.ed.Colourise(lineStart, lenDocument)
progStyled = self.AsStyled(fileMode == "t" and sys.platform == "win32")
if progStyled != prevStyled:
print("Incorrect partial lex for " + name + " at line " + line)
with open(nameNew, "wb") as f:
f.write(progStyled)
self.assertEquals(progStyled, prevStyled)
# Give up after one failure
return
# Test lexing just once from beginning to end in text form.
# This is used for test cases that are too long to be exhaustively tested by lines and
# may be sensitive to line ends so are tested as if using Unix LF line ends.
def LexLongCase(self, name, lexerName, keywords, fileMode="b"):
self.LexExample(name, lexerName, keywords, "t")
def testCXX(self):
self.LexExample("x.cxx", b"cpp", [b"int"])
def testPython(self):
self.LexExample("x.py", b"python",
[b"class def else for if import in print return while"])
def testHTML(self):
self.LexExample("x.html", b"hypertext", keywordsHTML)
def testASP(self):
self.LexExample("x.asp", b"hypertext", keywordsHTML)
def testPHP(self):
self.LexExample("x.php", b"hypertext", keywordsHTML)
def testVB(self):
self.LexExample("x.vb", b"vb", [b"as dim or string"])
def testLua(self):
self.LexExample("x.lua", b"lua", [b"function end"])
def testNim(self):
self.LexExample("x.nim", b"nim", [b"else end if let proc"])
def testRuby(self):
self.LexExample("x.rb", b"ruby", [b"class def end"])
def testPerl(self):
self.LexExample("x.pl", b"perl", keywordsPerl)
def testPerl52(self):
self.LexLongCase("perl-test-5220delta.pl", b"perl", keywordsPerl)
def testPerlPrototypes(self):
self.LexLongCase("perl-test-sub-prototypes.pl", b"perl", keywordsPerl)
def testD(self):
self.LexExample("x.d", b"d",
[b"keyword1", b"keyword2", b"", b"keyword4", b"keyword5",
b"keyword6", b"keyword7"])
def testTCL(self):
self.LexExample("x.tcl", b"tcl", [b"proc set socket vwait"])
if __name__ == '__main__':
Xite.main("lexTests")
|
apmckinlay/csuneido
|
vs2019scintilla/test/lexTests.py
|
Python
|
gpl-2.0
| 6,652
|
import logging
import subprocess
from subprocess import CalledProcessError
from .runnable import Runnable
class Shell(Runnable):
"""
:param str cmd: Command with arguments to be run.
:param str title: A title to be displayed in log outputs.
If None, :attr:`cmd` will be shown.
:param bool stdout: Whether or not to display standard output.
Default is *False*.
:param bool stderr: Whether or not to display standard error.
Default is *True*.
"""
def __init__(self, cmd, title=None, stdout=False, stderr=True):
self._cmd = self._redirect_outputs(cmd, stdout, stderr)
self._title, self._stdout, self._stderr = title, stdout, stderr
def _redirect_outputs(self, cmd, stdout, stderr):
"""
Avoid transfering unnecessary output through the network.
If local, keeps the output clean.
"""
suffix = ''
if not stdout:
suffix += ' 1>/dev/null'
if not stderr:
suffix += ' 2>/dev/null'
return cmd + suffix
@property
def command(self):
"""
The command to be run in shell.
:rtype: str
"""
return self._cmd
@property
def title(self):
"""
If the title is not set, returns :func:`command`.
:rtype: str
"""
if self._title is None:
return self._cmd
return self._title
def run_pre(self):
logger = logging.getLogger('shell')
logger.debug('beginning:' + self.title)
def run(self):
"""If the command exits 0, returns 0 or the stdout/stderr output.
Otherwise, raises CalledProcessError.
:return: shell return code (0) or output.
:rtype: int or str
:raises CalledProcessError: if return code is not 0.
"""
self.run_pre()
kwargs = {'shell': True, 'universal_newlines': True}
if self._stderr:
kwargs['stderr'] = subprocess.STDOUT
if not (self._stdout or self._stderr):
output = subprocess.check_call(self._cmd, **kwargs)
else:
output = subprocess.check_output(self._cmd, **kwargs).strip()
self.run_pos()
return output
def was_successful(self):
"""Runs the command and returns whether the return code is 0.
:return: whether the command was successful.
:rtype: bool
"""
try:
self.run()
return True
except CalledProcessError:
return False
def has_failed(self):
"""Runs the command and returns whether the return code differs from 0.
:return: whether the command has failed.
:rtype: bool
"""
return not self.was_successful()
def run_pos(self):
logger = logging.getLogger('shell')
logger.debug('finished:' + self.title)
|
cemsbr/expyrimenter
|
expyrimenter/shell.py
|
Python
|
gpl-3.0
| 2,943
|
#!/usr/bin/env python3
import os
import yaml
class Service():
def __init__(self, port, should_log, frequency, decimation=None):
self.port = port
self.should_log = should_log
self.frequency = frequency
self.decimation = decimation
service_list_path = os.path.join(os.path.dirname(__file__), "service_list.yaml")
service_list = {}
with open(service_list_path, "r") as f:
for k, v in yaml.safe_load(f).items():
decimation = None
if len(v) == 4:
decimation = v[3]
service_list[k] = Service(v[0], v[1], v[2], decimation)
if __name__ == "__main__":
print("/* THIS IS AN AUTOGENERATED FILE, PLEASE EDIT service_list.yaml */")
print("#ifndef __SERVICES_H")
print("#define __SERVICES_H")
print("struct service { char name[0x100]; int port; bool should_log; int frequency; int decimation; };")
print("static struct service services[] = {")
for k, v in service_list.items():
print(' { .name = "%s", .port = %d, .should_log = %s, .frequency = %d, .decimation = %d },' % (k, v.port, "true" if v.should_log else "false", v.frequency, -1 if v.decimation is None else v.decimation))
print("};")
print("#endif")
|
vntarasov/openpilot
|
cereal/services.py
|
Python
|
mit
| 1,161
|
from collections import OrderedDict
from sympy import true
from devito.tools import as_tuple, is_integer, memoized_meth
from devito.types import Dimension
__all__ = ['Vector', 'LabeledVector', 'vmin', 'vmax']
class Vector(tuple):
"""
An object in an N-dimensional space.
The elements of a vector can be anything as long as they support the
comparison operators (`__eq__`, `__lt__`, ...). Also, the `__sub__`
operator must be available.
Notes
-----
1) Comparison of a Vector with a scalar
If a comparison between a vector and a non-vector is attempted, then the
non-vector is promoted to a vector; if this is not possible, an exception
is raised. This is handy because it turns a vector-scalar comparison into
a vector-vector comparison with the scalar broadcasted to as many vector
entries as necessary. For example:
(3, 4, 5) > 4 => (3, 4, 5) > (4, 4, 4) => False
2) Comparison of Vectors whose elements are SymPy expressions
We treat vectors of SymPy expressions as a very special case. When we
compare two elements, it might not be possible to determine the truth value
of the relation. For example, the truth value of `3*i < 4*j` cannot be
determined (unless some information about `i` and `j` is available). In
some cases, however, the comparison is feasible; for example, `i + 4 < i`
is definitely False. A sufficient condition for two Vectors to be
comparable is that their pair-wise indices are affine functions of the same
variables, with identical coefficient. If the Vector is instantiated
passing the keyword argument ``smart = True``, some manipulation will be
attempted to infer the truth value of a non-trivial symbolic relation. This
increases the cost of the comparison (and not always an answer may be
derived), so use it judiciously. By default, ``smart = False``.
Raises
------
TypeError
If two Vectors cannot be compared, e.g. due to incomparable symbolic entries.
"""
def __new__(cls, *items, smart=False):
obj = super(Vector, cls).__new__(cls, items)
obj.smart = smart
return obj
def _asvector(relax=False):
def __asvector(func):
def wrapper(self, other):
if not isinstance(other, Vector):
try:
other = Vector(*other)
except TypeError:
# Not iterable
other = Vector(*(as_tuple(other)*len(self)))
if relax is False and len(self) != len(other):
raise TypeError("Cannot operate with Vectors of different rank")
return func(self, other)
return wrapper
return __asvector
def __hash__(self):
return super(Vector, self).__hash__()
@_asvector()
def __add__(self, other):
return Vector(*[i + j for i, j in zip(self, other)], smart=self.smart)
@_asvector()
def __radd__(self, other):
return self + other
@_asvector()
def __sub__(self, other):
return Vector(*[i - j for i, j in zip(self, other)], smart=self.smart)
@_asvector()
def __rsub__(self, other):
return self - other
@_asvector(relax=True)
def __eq__(self, other):
return super(Vector, self).__eq__(other)
@_asvector(relax=True)
def __ne__(self, other):
return super(Vector, self).__ne__(other)
def __lt__(self, other):
# This might raise an exception if the distance between the i-th entry
# of `self` and `other` isn't integer, but rather a generic expression
# not comparable to 0. However, the implementation is "smart", in the
# sense that it will return as soon as the first two comparable entries
# (i.e., such that their distance is a non-zero integer) are found
for i in self.distance(other):
try:
val = int(i)
if val < 0:
return True
elif val > 0:
return False
except TypeError:
if self.smart:
if (i < 0) == true:
return True
elif (i <= 0) == true:
# If `i` can assume the value 0 in at least one case, then
# definitely `i < 0` is generally False, so __lt__ must
# return False
return False
elif (i >= 0) == true:
return False
raise TypeError("Non-comparable index functions")
return False
def __gt__(self, other):
# This method is "symmetric" to `__lt__`, but instead of just returning
# `other.__lt__(self)` we implement it explicitly because this way we
# can avoid computing the distance in the special case `other is 0`
# This might raise an exception if the distance between the i-th entry
# of `self` and `other` isn't integer, but rather a generic expression
# not comparable to 0. However, the implementation is "smart", in the
# sense that it will return as soon as the first two comparable entries
# (i.e., such that their distance is a non-zero integer) are found
for i in self.distance(other):
try:
val = int(i)
if val > 0:
return True
elif val < 0:
return False
except TypeError:
if self.smart:
if (i > 0) == true:
return True
elif (i >= 0) == true:
# If `i` can assume the value 0 in at least one case, then
# definitely `i > 0` is generally False, so __gt__ must
# return False
return False
elif (i <= 0) == true:
return False
raise TypeError("Non-comparable index functions")
return False
def __le__(self, other):
if self.__eq__(other):
return True
# We cannot simply resort to `__lt__` as it might happen that:
# * v0 < v1 --> False
# * v0 == v1 --> False
# But
# * v0 <= v1 --> True
#
# For example, take `v0 = (a + 2)` and `v1 = (2)`; if `a` is attached
# the property that definitely `a >= 0`, then surely `v1 <= v0`, even
# though it can't be assumed anything about `v1 < 0` and `v1 == v0`
for i in self.distance(other):
try:
val = int(i)
if val < 0:
return True
elif val > 0:
return False
except TypeError:
if self.smart:
if (i < 0) == true:
return True
elif (i <= 0) == true:
continue
elif (i > 0) == true:
return False
elif (i >= 0) == true:
# See analogous considerations in __lt__
return False
raise TypeError("Non-comparable index functions")
# Note: unlike `__lt__`, if we end up here, then *it is* <=. For example,
# with `v0` and `v1` as above, we would get here
return True
@_asvector()
def __ge__(self, other):
return other.__le__(self)
def __getitem__(self, key):
ret = super(Vector, self).__getitem__(key)
return Vector(*ret, smart=self.smart) if isinstance(key, slice) else ret
def __repr__(self):
return "(%s)" % ','.join(str(i) for i in self)
@property
def rank(self):
return len(self)
@property
def sum(self):
return sum(self)
@property
def is_constant(self):
return all(is_integer(i) for i in self)
def distance(self, other):
"""
Compute the distance from ``self`` to ``other``.
The distance is a reflexive, transitive, and anti-symmetric relation,
which establishes a total ordering amongst Vectors.
The distance is a function [Vector x Vector --> D]. D is a tuple of length
equal to the Vector ``rank``. The i-th entry of D, D_i, indicates whether
the i-th component of ``self``, self_i, precedes (< 0), equals (== 0), or
succeeds (> 0) the i-th component of ``other``, other_i.
In particular, the *absolute value* of D_i represents the number of
integer points that exist between self_i and sink_i.
Examples
--------
| 3 | | 1 | | 2 |
source = | 2 | , sink = | 4 | , distance => | -2 |
| 1 | | 5 | | -4 |
There are 2, 2, and 4 points between [3-2], [2-4], and [1-5], respectively.
"""
try:
# Handle quickly the special (yet relevant) cases `other == 0`
if is_integer(other) and other == 0:
return self
elif all(i == 0 for i in other) and self.rank == other.rank:
return self
except TypeError:
pass
return self - other
class LabeledVector(Vector):
"""
A Vector that associates a Dimension to each element.
"""
def __new__(cls, items=None):
try:
labels, values = zip(*items)
except (ValueError, TypeError):
labels, values = (), ()
if not all(isinstance(i, Dimension) for i in labels):
raise ValueError("All labels must be of type Dimension, got [%s]"
% ','.join(i.__class__.__name__ for i in labels))
obj = super(LabeledVector, cls).__new__(cls, *values)
obj.labels = labels
return obj
@classmethod
def transpose(cls, *vectors):
"""
Transpose a matrix represented as an iterable of homogeneous LabeledVectors.
"""
if len(vectors) == 0:
return LabeledVector()
if not all(isinstance(v, LabeledVector) for v in vectors):
raise ValueError("All items must be of type LabeledVector, got [%s]"
% ','.join(i.__class__.__name__ for i in vectors))
T = OrderedDict()
for v in vectors:
for l, i in zip(v.labels, v):
T.setdefault(l, []).append(i)
return tuple((l, Vector(*i)) for l, i in T.items())
def __repr__(self):
return "(%s)" % ','.join('%s:%s' % (l, i) for l, i in zip(self.labels, self))
def __hash__(self):
return hash((tuple(self), self.labels))
def __eq__(self, other):
if isinstance(other, LabeledVector) and self.labels != other.labels:
raise TypeError("Cannot compare due to mismatching `labels`")
return super(LabeledVector, self).__eq__(other)
def __ne__(self, other):
return not self.__eq__(other)
def __lt__(self, other):
if isinstance(other, LabeledVector) and self.labels != other.labels:
raise TypeError("Cannot compare due to mismatching `labels`")
return super(LabeledVector, self).__lt__(other)
def __gt__(self, other):
return other.__lt__(self)
def __ge__(self, other):
return self.__eq__(other) or self.__gt__(other)
def __le__(self, other):
return self.__eq__(other) or self.__lt__(other)
def __getitem__(self, index):
if isinstance(index, (slice, int)):
return super(LabeledVector, self).__getitem__(index)
elif isinstance(index, Dimension):
for d in self.labels:
if d._defines & index._defines:
i = self.labels.index(d)
return super(LabeledVector, self).__getitem__(i)
return None
else:
raise TypeError("Indices must be integers, slices, or Dimensions, not %s"
% type(index))
def fromlabel(self, label, v=None):
return self[label] if label in self.labels else v
def items(self):
return zip(self.labels, self)
@memoized_meth
def distance(self, other):
"""
Compute the distance from ``self`` to ``other``.
Parameters
----------
other : LabeledVector
The LabeledVector from which the distance is computed.
"""
if not isinstance(other, LabeledVector):
raise TypeError("Cannot compute distance from obj of type %s", type(other))
if self.labels != other.labels:
raise TypeError("Cannot compute distance due to mismatching `labels`")
return LabeledVector(list(zip(self.labels, self - other)))
# Utility functions
def vmin(*vectors):
"""
Retrieve the minimum out of an iterable of Vectors.
Raises
------
TypeError
If there are two incomparable Vectors.
ValueError
If an empty sequence is supplied
"""
if not all(isinstance(i, Vector) for i in vectors):
raise TypeError("Expected an iterable of Vectors")
if len(vectors) == 0:
raise ValueError("min() arg is an empty sequence")
ret = vectors[0]
for i in vectors[1:]:
if i < ret or i <= ret:
ret = i
return ret
def vmax(*vectors):
"""
Retrieve the maximum out of an iterable of Vectors.
Raises
------
TypeError
If there are two incomparable Vectors.
ValueError
If an empty sequence is supplied
"""
if not all(isinstance(i, Vector) for i in vectors):
raise TypeError("Expected an iterable of Vectors")
if len(vectors) == 0:
raise ValueError("min() arg is an empty sequence")
ret = vectors[0]
for i in vectors[1:]:
if i > ret or i >= ret:
ret = i
return ret
|
opesci/devito
|
devito/ir/support/vector.py
|
Python
|
mit
| 14,039
|
import os
from Foundation import *
def getApplicationDirectory(appName):
"""Return appliction directory path"""
paths = NSSearchPathForDirectoriesInDomains(NSApplicationSupportDirectory,
NSUserDomainMask, True)
basePath = (len(paths) > 0 and paths[0]) or NSTemporaryDirectory()
fullPath = basePath.stringByAppendingPathComponent_(appName)
if not os.path.exists(fullPath):
os.makedirs(fullPath)
return fullPath
def initAppDirs(settings, app_name):
"""Assign application direcotries variables to settings object"""
APP_DIR = getApplicationDirectory(app_name)
settings.APP_DIR = APP_DIR
settings.APP_NAME = app_name
settings.LOG_PATH = '{0}/{1}'.format(APP_DIR, 'log.txt')
settings.USER_PREFS_PATH = '{0}/{1}'.format(APP_DIR, 'user_prefs.ini')
settings.PROJECTS_DATA_PATH = '{0}/{1}'.format(APP_DIR, 'projects')
settings.SLACKING_DATA_PATH = '{0}/{1}'.format(APP_DIR, 'slacking')
|
SPlyer/MacTimeLog
|
common/settings_utils.py
|
Python
|
gpl-2.0
| 957
|
import re
from typing import Tuple
import pytest
from .._receivebuffer import ReceiveBuffer
def test_receivebuffer() -> None:
b = ReceiveBuffer()
assert not b
assert len(b) == 0
assert bytes(b) == b""
b += b"123"
assert b
assert len(b) == 3
assert bytes(b) == b"123"
assert bytes(b) == b"123"
assert b.maybe_extract_at_most(2) == b"12"
assert b
assert len(b) == 1
assert bytes(b) == b"3"
assert bytes(b) == b"3"
assert b.maybe_extract_at_most(10) == b"3"
assert bytes(b) == b""
assert b.maybe_extract_at_most(10) is None
assert not b
################################################################
# maybe_extract_until_next
################################################################
b += b"123\n456\r\n789\r\n"
assert b.maybe_extract_next_line() == b"123\n456\r\n"
assert bytes(b) == b"789\r\n"
assert b.maybe_extract_next_line() == b"789\r\n"
assert bytes(b) == b""
b += b"12\r"
assert b.maybe_extract_next_line() is None
assert bytes(b) == b"12\r"
b += b"345\n\r"
assert b.maybe_extract_next_line() is None
assert bytes(b) == b"12\r345\n\r"
# here we stopped at the middle of b"\r\n" delimiter
b += b"\n6789aaa123\r\n"
assert b.maybe_extract_next_line() == b"12\r345\n\r\n"
assert b.maybe_extract_next_line() == b"6789aaa123\r\n"
assert b.maybe_extract_next_line() is None
assert bytes(b) == b""
################################################################
# maybe_extract_lines
################################################################
b += b"123\r\na: b\r\nfoo:bar\r\n\r\ntrailing"
lines = b.maybe_extract_lines()
assert lines == [b"123", b"a: b", b"foo:bar"]
assert bytes(b) == b"trailing"
assert b.maybe_extract_lines() is None
b += b"\r\n\r"
assert b.maybe_extract_lines() is None
assert b.maybe_extract_at_most(100) == b"trailing\r\n\r"
assert not b
# Empty body case (as happens at the end of chunked encoding if there are
# no trailing headers, e.g.)
b += b"\r\ntrailing"
assert b.maybe_extract_lines() == []
assert bytes(b) == b"trailing"
@pytest.mark.parametrize(
"data",
[
pytest.param(
(
b"HTTP/1.1 200 OK\r\n",
b"Content-type: text/plain\r\n",
b"Connection: close\r\n",
b"\r\n",
b"Some body",
),
id="with_crlf_delimiter",
),
pytest.param(
(
b"HTTP/1.1 200 OK\n",
b"Content-type: text/plain\n",
b"Connection: close\n",
b"\n",
b"Some body",
),
id="with_lf_only_delimiter",
),
pytest.param(
(
b"HTTP/1.1 200 OK\n",
b"Content-type: text/plain\r\n",
b"Connection: close\n",
b"\n",
b"Some body",
),
id="with_mixed_crlf_and_lf",
),
],
)
def test_receivebuffer_for_invalid_delimiter(data: Tuple[bytes]) -> None:
b = ReceiveBuffer()
for line in data:
b += line
lines = b.maybe_extract_lines()
assert lines == [
b"HTTP/1.1 200 OK",
b"Content-type: text/plain",
b"Connection: close",
]
assert bytes(b) == b"Some body"
|
python-hyper/h11
|
h11/tests/test_receivebuffer.py
|
Python
|
mit
| 3,454
|
''' The validation module provides the capability to perform integrity
checks on an entire collection of Bokeh models.
To create a Bokeh visualization, the central task is to assemble a collection
model objects from |bokeh.models| into a graph that represents the scene that
should be created in the client. It is possible to to this "by hand", using the
model objects directly. However, to make this process easier, Bokeh provides
higher level interfaces such as |bokeh.plotting| and |bokeh.charts| for users.
These interfaces automate common "assembly" steps, to ensure a Bokeh object
graph is created in a consistent, predictable way. However, regardless of what
interface is used, it is possible to put Bokeh models together in ways that are
incomplete, or that do not make sense in some way.
To assist with diagnosing potential problems, Bokeh performs a validation step
when outputting a visualization for display. This module contains error and
warning codes as well as helper functions for defining validation checks.
'''
from __future__ import absolute_import
from .check import check_integrity
from .decorators import error, warning
|
schoolie/bokeh
|
bokeh/core/validation/__init__.py
|
Python
|
bsd-3-clause
| 1,147
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: ucs_query
short_description: Queries UCS Manager objects by class or distinguished name
description:
-Queries UCS Manager objects by class or distinguished name.
- Examples can be used with the UCS Platform Emulator U(https://cs.co/ucspe).
extends_documentation_fragment: ucs
options:
class_ids:
description:
- One or more UCS Manager Class IDs to query.
- As a comma separtated list
type: str
distinguished_names:
description:
- One or more UCS Manager Distinguished Names to query.
- As a comma separtated list
type: str
delegate_to:
description:
- Where the module will be run
default: localhost
type: str
requirements:
- ucsmsdk
author:
- John McDonough (@movinalot)
- CiscoUcs (@CiscoUcs)
version_added: "2.10"
'''
EXAMPLES = r'''
- name: Query UCS Class ID
ucs_query:
hostname: "{{ ucs_hostname }}"
username: "{{ ucs_username }}"
password: "{{ ucs_password }}"
class_ids: computeBlade
delegate_to: localhost
- name: Query UCS Class IDs
ucs_query:
hostname: "{{ ucs_hostname }}"
username: "{{ ucs_username }}"
password: "{{ ucs_password }}"
class_ids: computeBlade, fabricVlan
delegate_to: localhost
- name: Query UCS Distinguished Name
ucs_query:
hostname: "{{ ucs_hostname }}"
username: "{{ ucs_username }}"
password: "{{ ucs_password }}"
distinguished_names: org-root
delegate_to: localhost
- name: Query UCS Distinguished Names
ucs_query:
hostname: "{{ ucs_hostname }}"
username: "{{ ucs_username }}"
password: "{{ ucs_password }}"
distinguished_names: org-root, sys/rack-unit-1, sys/chassis-1/blade-2
delegate_to: localhost
'''
RETURN = r'''
#
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.remote_management.ucs import UCSModule, ucs_argument_spec
def retrieve_class_id(class_id, ucs):
return ucs.login_handle.query_classid(class_id)
def retrieve_distinguished_name(distinguished_name, ucs):
return ucs.login_handle.query_dn(distinguished_name)
def make_mo_dict(ucs_mo):
obj_dict = {}
for mo_property in ucs_mo.prop_map.values():
obj_dict[mo_property] = getattr(ucs_mo, mo_property)
return obj_dict
def main():
argument_spec = ucs_argument_spec
argument_spec.update(
class_ids=dict(type='str'),
distinguished_names=dict(type='str'),
delegate_to=dict(type='str', default='localhost'),
)
module = AnsibleModule(
argument_spec,
supports_check_mode=False,
mutually_exclusive=[
['class_ids', 'distinguished_names'],
],
)
# UCSModule verifies ucsmsdk is present and exits on failure.
# Imports are below for UCS object creation.
ucs = UCSModule(module)
err = False
query_result = {}
try:
if module.params['class_ids']:
class_ids = [
x.strip() for x in module.params['class_ids'].split(',')
]
for class_id in class_ids:
query_result[class_id] = []
ucs_mos = retrieve_class_id(class_id, ucs)
if ucs_mos:
for ucs_mo in ucs_mos:
query_result[class_id].append(make_mo_dict(ucs_mo))
ucs.result['objects'] = query_result
elif module.params['distinguished_names']:
distinguished_names = [
x.strip()
for x in module.params['distinguished_names'].split(',')
]
for distinguished_name in distinguished_names:
query_result[distinguished_name] = {}
ucs_mo = retrieve_distinguished_name(distinguished_name, ucs)
if ucs_mo:
query_result[distinguished_name] = make_mo_dict(ucs_mo)
ucs.result['objects'] = query_result
except Exception as e:
err = True
ucs.result['msg'] = "setup error: %s " % str(e)
if err:
module.fail_json(**ucs.result)
ucs.result['changed'] = False
module.exit_json(**ucs.result)
if __name__ == '__main__':
main()
|
hyperized/ansible
|
lib/ansible/modules/remote_management/ucs/ucs_query.py
|
Python
|
gpl-3.0
| 4,605
|
import json
import time
from indy import ledger, did, wallet, pool
from src.utils import get_pool_genesis_txn_path, run_coroutine, PROTOCOL_VERSION
import logging
logger = logging.getLogger(__name__)
async def demo():
logger.info("Ledger sample -> started")
# Set protocol version 2 to work with Indy Node 1.4
await pool.set_protocol_version(PROTOCOL_VERSION)
trustee = {
'seed': '000000000000000000000000Trustee1',
'wallet_config': json.dumps({'id': 'trustee_wallet'}),
'wallet_credentials': json.dumps({'key': 'trustee_wallet_key'}),
'pool_name': 'trustee_pool',
}
# 1. Trustee open pool ledger
trustee['genesis_txn_path'] = get_pool_genesis_txn_path(trustee['pool_name'])
trustee['pool_config'] = json.dumps({"genesis_txn": str(trustee['genesis_txn_path'])})
await pool.create_pool_ledger_config(trustee['pool_name'], trustee['pool_config'])
trustee['pool'] = await pool.open_pool_ledger(trustee['pool_name'], None)
# 2. Create Trustee Wallet and Get Wallet Handle
await wallet.create_wallet(trustee['wallet_config'], trustee['wallet_credentials'])
trustee['wallet'] = await wallet.open_wallet(trustee['wallet_config'], trustee['wallet_credentials'])
# 3. Create Trustee DID
(trustee['did'], trustee['verkey']) = \
await did.create_and_store_my_did(trustee['wallet'], json.dumps({"seed": trustee['seed']}))
# 4. User init
user = {
'wallet_config': json.dumps({'id': 'user_wallet'}),
'wallet_credentials': json.dumps({'key': 'user_wallet_key'}),
'pool_name': 'user_pool'
}
user['genesis_txn_path'] = get_pool_genesis_txn_path(user['pool_name'])
user['pool_config'] = json.dumps({"genesis_txn": str(user['genesis_txn_path'])})
await pool.create_pool_ledger_config(user['pool_name'], user['pool_config'])
user['pool'] = await pool.open_pool_ledger(user['pool_name'], None)
await wallet.create_wallet(user['wallet_config'], user['wallet_credentials'])
user['wallet'] = await wallet.open_wallet(user['wallet_config'], user['wallet_credentials'])
# 5. User create DID
(user['did'], user['verkey']) = await did.create_and_store_my_did(user['wallet'], "{}")
trustee['user_did'] = user['did']
trustee['user_verkey'] = user['verkey']
# 6. Trustee prepare and send NYM transaction for user
nym_req = await ledger.build_nym_request(trustee['did'], trustee['user_did'], trustee['user_verkey'], None, None)
await ledger.sign_and_submit_request(trustee['pool'], trustee['wallet'], trustee['did'], nym_req)
# 7. User send ATTRIB transaction to Ledger
attr_req = \
await ledger.build_attrib_request(user['did'], user['did'], None, '{"endpoint":{"ha":"127.0.0.1:5555"}}', None)
resp = await ledger.sign_and_submit_request(user['pool'], user['wallet'], user['did'], attr_req)
assert json.loads(resp)['op'] == 'REPLY'
# 8. Close and delete Trustee wallet
await wallet.close_wallet(trustee['wallet'])
await wallet.delete_wallet(trustee['wallet_config'], trustee['wallet_credentials'])
# 9. Close and delete User wallet
await wallet.close_wallet(user['wallet'])
await wallet.delete_wallet(user['wallet_config'], user['wallet_credentials'])
# 10. Close Trustee and User pools
await pool.close_pool_ledger(trustee['pool'])
await pool.close_pool_ledger(user['pool'])
# 11 Delete pool ledger config
await pool.delete_pool_ledger_config(trustee['pool_name'])
await pool.delete_pool_ledger_config(user['pool_name'])
logger.info("Ledger sample -> completed")
if __name__ == '__main__':
run_coroutine(demo)
time.sleep(1) # FIXME waiting for libindy thread complete
|
srottem/indy-sdk
|
samples/python/src/ledger.py
|
Python
|
apache-2.0
| 3,737
|
#############################################################################
#
# Copyright (C) 2013 Navi-X
#
# This file is part of Navi-X.
#
# Navi-X is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# Navi-X is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Navi-X. If not, see <http://www.gnu.org/licenses/>.
#
#############################################################################
#############################################################################
#
# CDownloader:
# This class handles user login to the Navi-Xtreme website.
#############################################################################
from string import *
import sys, os.path
import urllib
import urllib2
import re, random, string
import xbmc, xbmcgui, xbmcaddon
import re, os, time, datetime, traceback
import shutil
import os
from libs2 import *
try: Emulating = xbmcgui.Emulating
except: Emulating = False
LABEL_USRNAME = 141
LABEL_PASSWORD = 142
BUTTON_USRNAME = 143
BUTTON_PASSWORD = 1144
BUTTON_LOGIN = 145
BUTTON_CANCEL = 146
class CDialogLogin(xbmcgui.WindowXMLDialog):
def __init__(self,strXMLname, strFallbackPath):#, strDefaultName, forceFallback):
# self.setCoordinateResolution(PAL_4x3)
#user background image
# self.bg = xbmcgui.ControlImage(100,100,520,376, imageDir + "background_txt.png")
# self.addControl(self.bg)
self.userloggedin = False
#read user ID from file
self.user_id=''
pass
def onAction(self, action):
if (action == ACTION_PREVIOUS_MENU) or (action == ACTION_PARENT_DIR) or (action == ACTION_PREVIOUS_MENU2):# or (action == ACTION_MOVE_LEFT):
self.close()
def onFocus( self, controlId ):
pass
def onClick( self, controlId ):
pass
def onControl(self, control):
#self.setFocus(control)
pass
def login(self):
#display GUI window
self.doModal()
#perform login to the Navi-Xtreme server
#if success
self.save_user_id()
def logout(self):
self.user_id=''
self.write_user_id() #There is no such function.
def is_user_logged_in(self):
if self.user_id != '':
return True
return False
def rate_item(self, mediaitem):
pass
def read_user_id(self):
pass
def save_user_id(self):
pass
#end of class
#use singleton
#login = CDialogLogin("CLoginskin.xml", os.getcwd())
login = CDialogLogin("CLoginskin2.xml", addon.getAddonInfo('path'))
|
JamesLinEngineer/RKMC
|
addons/script.navi-x/src/CLogin.py
|
Python
|
gpl-2.0
| 3,203
|
import logging
import uuid
import datetime
import mimetypes
from Cookie import BaseCookie
from google.appengine.runtime import DeadlineExceededError
from google.appengine.runtime.apiproxy_errors import CapabilityDisabledError, ApplicationError
from google.appengine.ext import webapp, db
from google.appengine.ext.webapp import util
from google.appengine.api import urlfetch, memcache
from django.utils import simplejson
PASSIFOX_GITHUB_URL = "https://github.com/pfn/passifox/raw/master"
KEEPASSHTTP_GITHUB_URL = "https://github.com/pfn/keepasshttp/raw/master"
class StatusException(Exception):
def __init__(self, msg, code):
super(StatusException, self).__init__()
self.code = code
self.msg = msg
class Page(db.Model):
content = db.BlobProperty()
create_ts = db.DateTimeProperty(auto_now_add=True)
class Hit(db.Model):
hit_ts = db.DateTimeProperty(auto_now=True)
uri = db.StringProperty()
ip = db.StringProperty()
count = db.IntegerProperty()
vers = db.StringProperty()
class PostReceiveHandler(webapp.RequestHandler):
def post(self):
payload = self.request.get('payload', None)
if not payload:
self.error(400)
self.response.out.write("bad request")
return
data = simplejson.loads(payload)
if data['repository']['url'].find("/pfn/passifox") == -1 and data['repository']['url'].find("/pfn/keepasshttp") == -1:
self.error(403)
self.response.out.write("bad repository")
return
if data.has_key('commits'):
for commit in data['commits']:
if commit.has_key('modified'):
modified = commit['modified']
if len(modified) > 0:
db.delete(Page.all())
memcache.flush_all()
else:
self.error(304)
self.response.out.write("nothing to do")
return
def set_cookie(key, value, response):
max_age = 365 * 24 * 60 * 60
path = '/'
# taken from webob.Response, don't use these fields for now
domain = None
secure = None
httponly = None
comment = None
version = None
cookies = BaseCookie()
cookies[key] = value
for var_name, var_value in [
('max_age', max_age),
('path', path),
('domain', domain),
('secure', secure),
('HttpOnly', httponly),
('version', version),
('comment', comment),
]:
if var_value is not None and var_value is not False:
cookies[key][var_name.replace('_', '-')] = str(var_value)
header_value = cookies[key].output(header='').lstrip()
response.headers['Set-Cookie'] = header_value
def get_ua_version(request):
ua = request.headers['user-agent']
gecko = None
ff = None
if ua:
if ua.find("Gecko/") != -1:
end = ua.find(")")
start = -1
if end != -1:
start = ua.find(":") + 1
if start > 0:
gecko = ua[start:end]
start = ua.rfind("/") + 1
end = ua.rfind(",")
if end == -1: end = None
if gecko is not None and start > 0:
ff = ua[start:end]
if gecko and ff:
return "%s - %s" % (ff, gecko)
def update_install_tracker(request, response):
name = 'passifox-install'
if request.cookies.has_key(name):
uid = request.cookies[name]
else:
uid = str(uuid.uuid4())
set_cookie(name, uid, response)
hit = Hit.get_by_key_name(uid)
if not hit:
hit = Hit(key_name=uid)
hit.uri = request.path
hit.ip = request.remote_addr
ua = get_ua_version(request)
if ua:
hit.vers = ua
if not hit.count:
hit.count = 0
hit.count += 1
try:
hit.put()
except (CapabilityDisabledError, DeadlineExceededError, ApplicationError), e:
logging.error("Unable to save hit count update: " + str(e))
def get_page_content(request, response, uri=None, source=PASSIFOX_GITHUB_URL):
if uri is None:
# only update for file download, not README
#update_install_tracker(request, response)
uri = request.path
content = memcache.get(uri)
if content:
return content
page = Page.get_by_key_name(uri)
if page:
memcache.set(uri, page.content)
else:
url = "%s%s" % (source, uri)
res = urlfetch.fetch(url)
if res.status_code == 200:
page = Page(key_name=uri)
page.content = db.Blob(res.content)
memcache.set(uri, page.content)
try:
page.put()
except (CapabilityDisabledError, DeadlineExceededError, ApplicationError), e:
logging.error("Unable to save page content: " + str(e))
else:
ex = StatusException(res.content, res.status_code)
raise ex
return page.content
class UpdateFileHandler(webapp.RequestHandler):
def get(self):
try:
c = get_page_content(self.request, self.response)
self.response.headers['Content-type'] = "application/xml"
self.response.out.write(c)
except StatusException, e:
self.error(e.code)
self.response.out.write(e.msg)
class InstallFileHandler(webapp.RequestHandler):
def get(self):
try:
c = get_page_content(self.request, self.response)
self.response.headers['Content-type'] = "application/x-xpinstall"
self.response.out.write(c)
except StatusException, e:
self.error(e.code)
self.response.out.write(e.msg)
class KeePassHttpPLGXHandler(webapp.RequestHandler):
def get(self):
try:
c = get_page_content(self.request, self.response, '/KeePassHttp.plgx', KEEPASSHTTP_GITHUB_URL)
self.response.headers['Content-type'] = "application/octet-stream"
self.response.headers['Content-disposition'] = "attachment; filename=KeePassHttp.plgx"
self.response.out.write(c)
except StatusException, e:
self.error(e.code)
self.response.out.write(e.msg)
class KeePassHttpUpdateHandler(webapp.RequestHandler):
def get(self):
try:
c = get_page_content(self.request, self.response, '/update-version.txt', KEEPASSHTTP_GITHUB_URL)
self.response.headers['Content-type'] = "text/plain"
self.response.out.write(c)
except StatusException, e:
self.error(e.code)
self.response.out.write(e.msg)
class KPHContentHandler(webapp.RequestHandler):
def get(self):
try:
p = self.request.path[len("/kph"):]
fn = p[p.rindex("/") + 1:]
mtype = mimetypes.guess_type(fn, False)
if mtype[1]:
mime = "%s; charset=%s" % (mtype[0], mtype[1])
else:
mime = mtype[0]
c = get_page_content(self.request, self.response, p, KEEPASSHTTP_GITHUB_URL)
self.response.headers['Content-type'] = mime
if mime.startswith("application/"):
disp = "attachment; filename=%s" % fn
else:
disp = "inline"
self.response.headers['Content-disposition'] = disp
self.response.out.write(c)
except StatusException, e:
self.error(e.code)
self.response.out.write(e.msg)
class PIFContentHandler(webapp.RequestHandler):
def get(self):
try:
p = self.request.path[len("/ext"):]
fn = p[p.rindex("/") + 1:]
mtype = mimetypes.guess_type(fn, False)
if mtype[1]:
mime = "%s; charset=%s" % (mtype[0], mtype[1])
else:
mime = mtype[0]
c = get_page_content(self.request, self.response, p)
self.response.headers['Content-type'] = mime
if mime.startswith("application/"):
disp = "attachment; filename=%s" % fn
else:
disp = "inline"
self.response.headers['Content-disposition'] = disp
self.response.out.write(c)
except StatusException, e:
self.error(e.code)
self.response.out.write(e.msg)
class RedirectToRootHandler(webapp.RequestHandler):
def get(self):
self.redirect('/')
class ClearHitsHandler(webapp.RequestHandler):
def get(self):
now = datetime.datetime.now()
lastweek = now - datetime.timedelta(days=7)
hits = db.GqlQuery("SELECT __key__ FROM Hit WHERE hit_ts < :1",
lastweek)
db.delete(hits)
class RootHandler(webapp.RequestHandler):
def get(self):
try:
c = get_page_content(self.request, self.response, '/README.md')
self.response.headers['Content-type'] = "text/plain"
self.response.out.write(c)
except StatusException, e:
self.error(e.code)
self.response.out.write(e.msg)
application = webapp.WSGIApplication([
('/', RootHandler),
('/cron/clearhits', ClearHitsHandler),
('/update.rdf', UpdateFileHandler),
('/update-version.txt', KeePassHttpUpdateHandler),
('/passifox.xpi', InstallFileHandler),
('/KeePassHttp.plgx', KeePassHttpPLGXHandler),
('/github-post-receive', PostReceiveHandler),
(r'/kph/.*', KPHContentHandler),
(r'/ext/.*', PIFContentHandler),
(r'/.*', RedirectToRootHandler),
], debug=True)
def main():
util.run_wsgi_app(application)
if __name__ == '__main__':
main()
|
pfn/passifox-appengine
|
main.py
|
Python
|
gpl-2.0
| 9,782
|
import textwrap
import string
import pathlib, contextlib
from keypad import buffers, control, core, options
from keypad.util import listdict
from keypad.core import Keys
from keypad.testutil import mocks
from keypad.buffers.testutil import placeholder_insert
from keypad.plugins.pymodel import PythonCodeModelPlugin
PythonCodeModelPlugin(None).attach()
def make_bctl():
conf = core.Config.root.derive()
settings = options.GeneralSettings.from_config(conf)
settings.indent_text = '\t'
bctl = control.BufferController(buffer_set=None,
view=mocks.MockCodeView(),
buff=buffers.Buffer(),
provide_interaction_mode=True,
config=conf)
bctl.path = pathlib.Path('/tmp/a.py')
return bctl
def text2keys(text):
for ch in text:
if ch == '\n':
yield Keys.return_
elif ch == '\t':
yield Keys.tab
elif ch in shifted_keys:
yield core.SimpleKeySequence(core.Modifiers.Shift, ord(ch))
else:
yield core.SimpleKeySequence(0, ord(ch.upper()))
def typekeys(bctl, *textandkeys):
for part in textandkeys:
if isinstance(part, str):
bctl.view.press(*text2keys(part))
else:
bctl.view.press(part)
def addtext(bctl, text):
return placeholder_insert(buffers.Cursor(bctl.buffer),
textwrap.dedent(text))
shifted_keys = listdict.ListDict()
for key in '~!@#$%^&*()_+|{}:"<>?' + string.ascii_uppercase:
shifted_keys[key] = None
def test_simple():
with make_bctl() as bctl:
text = 'def foo():`c`'
cursors = addtext(bctl, text)
bctl.selection.move(cursors.c)
typekeys(bctl, Keys.return_, 'abcd')
assert bctl.buffer.text == 'def foo():\n\tabcd'
def test_indent_align():
with make_bctl() as bctl:
text = 'def foo():\n\tdef bar(a,`c`'
cursors = addtext(bctl, text)
expected = bctl.buffer.text + '\n\t b'
bctl.selection.move(cursors.c)
typekeys(bctl, Keys.return_, 'b')
assert bctl.buffer.text == expected
def test_quoted_mismatch():
with make_bctl() as bctl:
typekeys(bctl, '\t\'(\'\n', Keys.backspace, '\'{}')
assert not bctl.buffer.lines[1].text.startswith('\t')
|
sam-roth/Keypad
|
keypad_tests/test_indent.py
|
Python
|
gpl-3.0
| 2,458
|
__author__ = 'asergeev'
|
ZJlblDEHb/ya-test-tasks
|
tests/__init__.py
|
Python
|
mit
| 24
|
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
from queue import Queue
class IndexableQueue(Queue):
""" Taken from http://stackoverflow.com/questions/1293966/best-way-to-obtain-indexed-access-to-a-python-queue-thread-safe"""
def __getitem__(self, index):
with self.mutex:
return self.queue[index]
def find_and_remove(self, expr):
with self.mutex:
for item in self.queue:
if expr(item):
self.queue.remove(item)
return item
return None
|
Bounti/avatar-python
|
avatar/util/indexable_queue.py
|
Python
|
apache-2.0
| 732
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2016-08-22 12:25
from __future__ import unicode_literals
import datetime
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Article',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200, verbose_name='title')),
('slug', models.SlugField(unique_for_date='publish', verbose_name='slug')),
('body', models.TextField(verbose_name='body')),
('intro', models.TextField(blank=True, help_text='Inroduction text', verbose_name='intro')),
('status', models.IntegerField(choices=[(1, 'Draft'), (2, 'Public')], default=2, verbose_name='status')),
('allow_comments', models.BooleanField(default=True, verbose_name='allow comments')),
('publish', models.DateTimeField(default=datetime.datetime.now, verbose_name='publish')),
('created', models.DateTimeField(auto_now_add=True, verbose_name='created')),
('modified', models.DateTimeField(auto_now=True, verbose_name='modified')),
('author', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ('-publish',),
'db_table': 'library_articles',
'verbose_name': 'article',
'verbose_name_plural': 'articles',
'get_latest_by': 'publish',
},
),
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=255, verbose_name='title')),
('slug', models.SlugField(unique=True, verbose_name='slug')),
],
options={
'ordering': ('title',),
'db_table': 'library_categories',
'verbose_name': 'category',
'verbose_name_plural': 'categories',
},
),
migrations.AddField(
model_name='article',
name='categories',
field=models.ManyToManyField(blank=True, to='library.Category'),
),
]
|
DmitryRendov/mob-site
|
mob/library/migrations/0001_initial.py
|
Python
|
mit
| 2,704
|
#!/usr/bin/python
from yyparse.ZCCparser import parser, printAST
from yyparse.ZCClex import lexer as ZCClexer
from symbol.symtab import c_types
from public.ZCCglobal import global_context, FuncType, error, Context
from generation.generation import generator
import os
import sys
def preprocess(source):
stream = os.popen("gcc -E " + source)
return stream.read()
if __name__ == '__main__':
if len(sys.argv) < 3:
print "Usage: python main.py <source_file> <x86asm_file>\nEnvironment: Python2.7, Linux."
exit(1)
File = sys.argv[1]
codes = preprocess(os.path.abspath("test/"+File))
pt = parser.parse(codes, lexer=ZCClexer)
# print "errorCounter=", parser.errorCounter
printAST(pt)
# with open("test.s","w") as output:
# print global_context
# print error
# printAST(global_context.local['main'].compound_statement.ast)
if(not error[0]):
gen = generator()
gen.generate()
gen.output(sys.argv[2])
|
BadStudent/ZCC
|
main.py
|
Python
|
gpl-2.0
| 986
|
# -*- coding: utf-8 -*-
# Taboot - Client utility for performing deployments with Func.
# Copyright © 2009,2010, Red Hat, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from taboot.tasks import command, TaskResult
class RPMBase(command.Run):
"""
Base class for rpm commands
"""
def __init__(self, pcmd, **kwargs):
super(RPMBase, self).__init__(pcmd, **kwargs)
class PreManifest(RPMBase):
"""
Gather list of installed RPMs. A later invocation of :class:`PostManifest`
is then used to output the RPMs changed during intermediate tasks.
"""
def __init__(self, **kwargs):
super(PreManifest, self).__init__('rpm -qa | sort', **kwargs)
def run(self, runner):
"""
Override the default :class:`command.Run` to strip the output
from the result because we're really not interested in the
contents of the pre-manifest; we just want to collect it to
compare later on with PostManifest.
"""
result = super(PreManifest, self).run(runner)
runner['rpm.PreManifest'] = result.output
result.output = ''
return result
class PostManifest(RPMBase):
"""
Gather list of installed RPMs and compare against a previously
taken :class:`PreManifest`
"""
from difflib import Differ as _Differ
def __init__(self, **kwargs):
super(PostManifest, self).__init__('rpm -qa | sort', **kwargs)
def run(self, runner):
"""
The runner that gets passed in contains state that can be
access via dict-like access. PreManifest uses this to write
to the rpm.Premanifest field. So we'll check to make sure the
pre-manifest is there by looking for that state.
"""
try:
pre_manifest = runner['rpm.PreManifest']
except:
return TaskResult(self, success=False,
output="You must use PreManifest before PostManifest")
# ok, so now we have something to compare against so we get
# new state...
result = super(command.Run, self).run(runner)
old_list = pre_manifest.splitlines(1)
new_list = result.output.splitlines(1)
differ = self._Differ()
diff_output = list(differ.compare(old_list, new_list))
diff_output = [line for line in diff_output if line[0] in ('+', '-')]
result.output = ''.join(diff_output)
return RPMTaskResult(result.taskObj, result.success,
result.output, result.ignore_errors)
class RPMTaskResult(TaskResult):
"""
Wrapper around TaskResult to be able to differentiate in output class
"""
def __init__(self, task, success=False, output='', ignore_errors=False):
super(RPMTaskResult, self).__init__(task, success, output,
ignore_errors)
|
tbielawa/Taboot
|
taboot/tasks/rpm.py
|
Python
|
gpl-3.0
| 3,484
|
from . import api
from . import utils
|
jvanasco/pyramid_caching_api
|
pyramid_caching_api/__init__.py
|
Python
|
mit
| 38
|
import random
import numpy as np
from sklearn.base import RegressorMixin, BaseEstimator
from sklearn import cross_validation
class EnsembleRegressor(RegressorMixin, BaseEstimator):
"""Basic EnsembleRegressor model.
Parameters
----------
models : seq
Models to constitute the ensemble
weights : seq, len(weights) == len(models)
Relative importance of constituent models. If
keywords: seq, len(keywords) == len(models)
Keyword arguments to supply to constituent models. Should be a list of
dicts.
"""
models = []
weights = []
keywords = []
def __init__(self, models, weights=None, keywords=None, train_size=0.5):
if weights is None:
weights = np.array([ 1.0 for i in models ])
assert len(weights) == len(models)
self.weights = np.array(weights) / sum(weights)
if keywords is None:
keywords = [ {} for i in models ]
self.models = [ m(**kw) for m, kw in zip(models, keywords) ]
self.train_size = train_size
def fit(self, X, y):
for model in self.models:
X1, X2, y1, y2 = cross_validation.train_test_split(X, y,
train_size=self.train_size)
model.fit(X1, y1)
return self
def predict(self, X):
predictions = []
for model, weight in zip(self.models, self.weights):
predictions.append(model.predict(X)*weight)
predictions = np.sum(predictions, 0)
return predictions
|
skirklin/modopt
|
modopt/ensemble.py
|
Python
|
mit
| 1,558
|
# encoding: utf-8
# module pyexpat
# from /usr/lib/python2.7/lib-dynload/pyexpat.x86_64-linux-gnu.so
# by generator 1.135
""" Python wrapper for Expat parser. """
# imports
import pyexpat.errors as errors # <module 'pyexpat.errors' (built-in)>
import pyexpat.model as model # <module 'pyexpat.model' (built-in)>
# Variables with simple values
EXPAT_VERSION = 'expat_2.1.0'
native_encoding = 'UTF-8'
XML_PARAM_ENTITY_PARSING_ALWAYS = 2
XML_PARAM_ENTITY_PARSING_NEVER = 0
XML_PARAM_ENTITY_PARSING_UNLESS_STANDALONE = 1
__version__ = '2.7.8'
# functions
def ErrorString(errno): # real signature unknown; restored from __doc__
"""
ErrorString(errno) -> string
Returns string error for given number.
"""
return ""
def ParserCreate(encoding=None, namespace_separator=None): # real signature unknown; restored from __doc__
"""
ParserCreate([encoding[, namespace_separator]]) -> parser
Return a new XML parser object.
"""
pass
# classes
from Exception import Exception
class ExpatError(Exception):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
__weakref__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""list of weak references to the object (if defined)"""
error = ExpatError
from object import object
class XMLParserType(object):
""" XML parser """
def __init__(self, *args, **kwargs): # real signature unknown
pass
# variables with complex values
expat_CAPI = None # (!) real value is ''
features = [
(
'sizeof(XML_Char)',
1,
),
(
'sizeof(XML_LChar)',
1,
),
(
'XML_DTD',
0,
),
(
'XML_CONTEXT_BYTES',
1024,
),
(
'XML_NS',
0,
),
]
version_info = (
2,
1,
0,
)
|
ProfessorX/Config
|
.PyCharm30/system/python_stubs/-1247972723/pyexpat/__init__.py
|
Python
|
gpl-2.0
| 1,861
|
from .views import admin
|
joehand/DataNews
|
data_news/admin/__init__.py
|
Python
|
bsd-3-clause
| 24
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from setuptools import setup, find_packages
__author__ = 'Tim Martin'
__pkg_name__ = 'ripozo'
version = '1.2.4.dev0'
setup(
author=__author__,
author_email='tim.martin@vertical-knowledge.com',
classifiers=[
'Development Status :: 3 - Alpha',
'License :: OSI Approved :: GNU General Public License v2 (GPLv2)',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: Implementation :: PyPy',
'Topic :: Software Development :: Libraries :: Python Modules'
],
description='A tool for easily making RESTful interfaces',
extras_require={
'examples': [
'flask',
'requests',
'sqlalchemy',
],
'docs': [
'sphinx'
]
},
install_requires=[
'six>=1.4.1,!=1.7.1'
],
keywords='REST HATEOAS Hypermedia RESTful SIREN HAL API JSONAPI web framework',
name='ripozo',
packages=find_packages(include=['ripozo', 'ripozo.*']),
tests_require=[
'unittest2',
'tox',
'mock',
],
test_suite="ripozo_tests",
url='http://ripozo.readthedocs.org/',
version=version
)
|
AlexMaskovyak/ripozo
|
setup.py
|
Python
|
gpl-2.0
| 1,658
|
# -*- coding: utf-8 -*-
'''
A module to pull data from Cobbler via its API into the Pillar dictionary
Configuring the Cobbler ext_pillar
==================================
The same cobbler.* parameters are used for both the Cobbler tops and Cobbler pillar
modules.
.. code-block:: yaml
ext_pillar:
- cobbler:
- key: cobbler # Nest results within this key. By default, values are not nested.
- only: [parameters] # Add only these keys to pillar.
cobbler.url: https://example.com/cobbler_api #default is http://localhost/cobbler_api
cobbler.user: username # default is no username
cobbler.password: password # default is no password
Module Documentation
====================
'''
# Import python libs
import logging
import xmlrpclib
__opts__ = {'cobbler.url': 'http://localhost/cobbler_api',
'cobbler.user': None,
'cobbler.password': None
}
# Set up logging
log = logging.getLogger(__name__)
def ext_pillar(minion_id, pillar, key=None, only=()):
'''
Read pillar data from Cobbler via its API.
'''
url = __opts__['cobbler.url']
user = __opts__['cobbler.user']
password = __opts__['cobbler.password']
log.info("Querying cobbler at %r for information for %r", url, minion_id)
try:
server = xmlrpclib.Server(url, allow_none=True)
if user:
server = xmlrpclib.Server(server, server.login(user, password))
result = server.get_blended_data(None, minion_id)
except Exception:
log.exception(
'Could not connect to cobbler.'
)
return {}
if only:
result = dict((k, result[k]) for k in only if k in result)
if key:
result = {key: result}
return result
|
MadeiraCloud/salt
|
sources/salt/pillar/cobbler.py
|
Python
|
apache-2.0
| 1,743
|
#!/usr/bin/env python2.6
import sys
import pika
host = sys.argv[1]
port = int(sys.argv[2])
user = sys.argv[3]
password = sys.argv[4]
ephemeral = sys.argv[5].lower() == 'true' # Indicates if the exchange is only temporary (for testing)
key = sys.argv[6]
body = sys.argv[7]
exchange='irods'
credentials = pika.PlainCredentials(user, password)
connection = pika.BlockingConnection(
pika.ConnectionParameters(
host=host,
port=port,
credentials=credentials
)
)
channel = connection.channel()
channel.exchange_declare(exchange=exchange,
type='topic',
durable=(not ephemeral),
auto_delete=ephemeral)
channel.basic_publish(exchange=exchange,
routing_key=key,
body=body,
properties=pika.BasicProperties(
delivery_mode = 2,
))
connection.close()
|
angrygoat/DE-Reference
|
ansible/roles/irods-config/files/amqptopicsend.py
|
Python
|
bsd-2-clause
| 944
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.