repo_name stringlengths 6 100 | path stringlengths 4 294 | copies stringlengths 1 5 | size stringlengths 4 6 | content stringlengths 606 896k | license stringclasses 15
values |
|---|---|---|---|---|---|
MassStash/htc_m9_kernel_sense_5.0.2 | tools/perf/scripts/python/event_analyzing_sample.py | 4719 | 7393 | # event_analyzing_sample.py: general event handler in python
#
# Current perf report is already very powerful with the annotation integrated,
# and this script is not trying to be as powerful as perf report, but
# providing end user/developer a flexible way to analyze the events other
# than trace points.
#
# The 2 database related functions in this script just show how to gather
# the basic information, and users can modify and write their own functions
# according to their specific requirement.
#
# The first function "show_general_events" just does a basic grouping for all
# generic events with the help of sqlite, and the 2nd one "show_pebs_ll" is
# for a x86 HW PMU event: PEBS with load latency data.
#
import os
import sys
import math
import struct
import sqlite3
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from EventClass import *
#
# If the perf.data has a big number of samples, then the insert operation
# will be very time consuming (about 10+ minutes for 10000 samples) if the
# .db database is on disk. Move the .db file to RAM based FS to speedup
# the handling, which will cut the time down to several seconds.
#
con = sqlite3.connect("/dev/shm/perf.db")
con.isolation_level = None
def trace_begin():
print "In trace_begin:\n"
#
# Will create several tables at the start, pebs_ll is for PEBS data with
# load latency info, while gen_events is for general event.
#
con.execute("""
create table if not exists gen_events (
name text,
symbol text,
comm text,
dso text
);""")
con.execute("""
create table if not exists pebs_ll (
name text,
symbol text,
comm text,
dso text,
flags integer,
ip integer,
status integer,
dse integer,
dla integer,
lat integer
);""")
#
# Create and insert event object to a database so that user could
# do more analysis with simple database commands.
#
def process_event(param_dict):
event_attr = param_dict["attr"]
sample = param_dict["sample"]
raw_buf = param_dict["raw_buf"]
comm = param_dict["comm"]
name = param_dict["ev_name"]
# Symbol and dso info are not always resolved
if (param_dict.has_key("dso")):
dso = param_dict["dso"]
else:
dso = "Unknown_dso"
if (param_dict.has_key("symbol")):
symbol = param_dict["symbol"]
else:
symbol = "Unknown_symbol"
# Create the event object and insert it to the right table in database
event = create_event(name, comm, dso, symbol, raw_buf)
insert_db(event)
def insert_db(event):
if event.ev_type == EVTYPE_GENERIC:
con.execute("insert into gen_events values(?, ?, ?, ?)",
(event.name, event.symbol, event.comm, event.dso))
elif event.ev_type == EVTYPE_PEBS_LL:
event.ip &= 0x7fffffffffffffff
event.dla &= 0x7fffffffffffffff
con.execute("insert into pebs_ll values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)",
(event.name, event.symbol, event.comm, event.dso, event.flags,
event.ip, event.status, event.dse, event.dla, event.lat))
def trace_end():
print "In trace_end:\n"
# We show the basic info for the 2 type of event classes
show_general_events()
show_pebs_ll()
con.close()
#
# As the event number may be very big, so we can't use linear way
# to show the histogram in real number, but use a log2 algorithm.
#
def num2sym(num):
# Each number will have at least one '#'
snum = '#' * (int)(math.log(num, 2) + 1)
return snum
def show_general_events():
# Check the total record number in the table
count = con.execute("select count(*) from gen_events")
for t in count:
print "There is %d records in gen_events table" % t[0]
if t[0] == 0:
return
print "Statistics about the general events grouped by thread/symbol/dso: \n"
# Group by thread
commq = con.execute("select comm, count(comm) from gen_events group by comm order by -count(comm)")
print "\n%16s %8s %16s\n%s" % ("comm", "number", "histogram", "="*42)
for row in commq:
print "%16s %8d %s" % (row[0], row[1], num2sym(row[1]))
# Group by symbol
print "\n%32s %8s %16s\n%s" % ("symbol", "number", "histogram", "="*58)
symbolq = con.execute("select symbol, count(symbol) from gen_events group by symbol order by -count(symbol)")
for row in symbolq:
print "%32s %8d %s" % (row[0], row[1], num2sym(row[1]))
# Group by dso
print "\n%40s %8s %16s\n%s" % ("dso", "number", "histogram", "="*74)
dsoq = con.execute("select dso, count(dso) from gen_events group by dso order by -count(dso)")
for row in dsoq:
print "%40s %8d %s" % (row[0], row[1], num2sym(row[1]))
#
# This function just shows the basic info, and we could do more with the
# data in the tables, like checking the function parameters when some
# big latency events happen.
#
def show_pebs_ll():
count = con.execute("select count(*) from pebs_ll")
for t in count:
print "There is %d records in pebs_ll table" % t[0]
if t[0] == 0:
return
print "Statistics about the PEBS Load Latency events grouped by thread/symbol/dse/latency: \n"
# Group by thread
commq = con.execute("select comm, count(comm) from pebs_ll group by comm order by -count(comm)")
print "\n%16s %8s %16s\n%s" % ("comm", "number", "histogram", "="*42)
for row in commq:
print "%16s %8d %s" % (row[0], row[1], num2sym(row[1]))
# Group by symbol
print "\n%32s %8s %16s\n%s" % ("symbol", "number", "histogram", "="*58)
symbolq = con.execute("select symbol, count(symbol) from pebs_ll group by symbol order by -count(symbol)")
for row in symbolq:
print "%32s %8d %s" % (row[0], row[1], num2sym(row[1]))
# Group by dse
dseq = con.execute("select dse, count(dse) from pebs_ll group by dse order by -count(dse)")
print "\n%32s %8s %16s\n%s" % ("dse", "number", "histogram", "="*58)
for row in dseq:
print "%32s %8d %s" % (row[0], row[1], num2sym(row[1]))
# Group by latency
latq = con.execute("select lat, count(lat) from pebs_ll group by lat order by lat")
print "\n%32s %8s %16s\n%s" % ("latency", "number", "histogram", "="*58)
for row in latq:
print "%32s %8d %s" % (row[0], row[1], num2sym(row[1]))
def trace_unhandled(event_name, context, event_fields_dict):
print ' '.join(['%s=%s'%(k,str(v))for k,v in sorted(event_fields_dict.items())])
| gpl-2.0 |
Yury191/brownstonetutors | allauth/socialaccount/forms.py | 62 | 2703 | from __future__ import absolute_import
from django import forms
from django.utils.translation import ugettext_lazy as _
from allauth.account.forms import BaseSignupForm
from allauth.account.utils import (user_username, user_email,
user_field)
from .models import SocialAccount
from .adapter import get_adapter
from . import app_settings
from . import signals
class SignupForm(BaseSignupForm):
def __init__(self, *args, **kwargs):
self.sociallogin = kwargs.pop('sociallogin')
user = self.sociallogin.user
# TODO: Should become more generic, not listing
# a few fixed properties.
initial = {'email': user_email(user) or '',
'username': user_username(user) or '',
'first_name': user_field(user, 'first_name') or '',
'last_name': user_field(user, 'last_name') or ''}
kwargs.update({
'initial': initial,
'email_required': kwargs.get('email_required',
app_settings.EMAIL_REQUIRED)})
super(SignupForm, self).__init__(*args, **kwargs)
def save(self, request):
adapter = get_adapter()
user = adapter.save_user(request, self.sociallogin, form=self)
self.custom_signup(request, user)
return user
def raise_duplicate_email_error(self):
raise forms.ValidationError(
_("An account already exists with this e-mail address."
" Please sign in to that account first, then connect"
" your %s account.")
% self.sociallogin.account.get_provider().name)
class DisconnectForm(forms.Form):
account = forms.ModelChoiceField(queryset=SocialAccount.objects.none(),
widget=forms.RadioSelect,
required=True)
def __init__(self, *args, **kwargs):
self.request = kwargs.pop('request')
self.accounts = SocialAccount.objects.filter(user=self.request.user)
super(DisconnectForm, self).__init__(*args, **kwargs)
self.fields['account'].queryset = self.accounts
def clean(self):
cleaned_data = super(DisconnectForm, self).clean()
account = cleaned_data.get('account')
if account:
get_adapter().validate_disconnect(account, self.accounts)
return cleaned_data
def save(self):
account = self.cleaned_data['account']
account.delete()
signals.social_account_removed.send(sender=SocialAccount,
request=self.request,
socialaccount=account)
| unlicense |
silentfuzzle/calibre | src/calibre/ebooks/pdf/render/links.py | 11 | 5627 | #!/usr/bin/env python2
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:fdm=marker:ai
from __future__ import (unicode_literals, division, absolute_import,
print_function)
__license__ = 'GPL v3'
__copyright__ = '2012, Kovid Goyal <kovid at kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import os
from urlparse import urlparse
from urllib2 import unquote
from calibre.ebooks.pdf.render.common import Array, Name, Dictionary, String, UTF16String
class Destination(Array):
def __init__(self, start_page, pos, get_pageref):
pnum = start_page + pos['column']
try:
pref = get_pageref(pnum)
except IndexError:
pref = get_pageref(pnum-1)
super(Destination, self).__init__([
pref, Name('XYZ'), pos['left'], pos['top'], None
])
class Links(object):
def __init__(self, pdf, mark_links, page_size):
self.anchors = {}
self.links = []
self.start = {'top':page_size[1], 'column':0, 'left':0}
self.pdf = pdf
self.mark_links = mark_links
def add(self, base_path, start_page, links, anchors):
path = os.path.normcase(os.path.abspath(base_path))
self.anchors[path] = a = {}
a[None] = Destination(start_page, self.start, self.pdf.get_pageref)
for anchor, pos in anchors.iteritems():
a[anchor] = Destination(start_page, pos, self.pdf.get_pageref)
for link in links:
href, page, rect = link
p, frag = href.partition('#')[0::2]
try:
pref = self.pdf.get_pageref(page).obj
except IndexError:
try:
pref = self.pdf.get_pageref(page-1).obj
except IndexError:
self.pdf.debug('Unable to find page for link: %r, ignoring it' % link)
continue
self.pdf.debug('The link %s points to non-existent page, moving it one page back' % href)
self.links.append(((path, p, frag or None), pref, Array(rect)))
def add_links(self):
for link in self.links:
path, href, frag = link[0]
page, rect = link[1:]
combined_path = os.path.normcase(os.path.abspath(os.path.join(os.path.dirname(path), *unquote(href).split('/'))))
is_local = not href or combined_path in self.anchors
annot = Dictionary({
'Type':Name('Annot'), 'Subtype':Name('Link'),
'Rect':rect, 'Border':Array([0,0,0]),
})
if self.mark_links:
annot.update({'Border':Array([16, 16, 1]), 'C':Array([1.0, 0,
0])})
if is_local:
path = combined_path if href else path
try:
annot['Dest'] = self.anchors[path][frag]
except KeyError:
try:
annot['Dest'] = self.anchors[path][None]
except KeyError:
pass
else:
url = href + (('#'+frag) if frag else '')
try:
purl = urlparse(url)
except Exception:
self.pdf.debug('Ignoring unparseable URL: %r' % url)
continue
if purl.scheme and purl.scheme != 'file':
action = Dictionary({
'Type':Name('Action'), 'S':Name('URI'),
})
# Do not try to normalize/quote/unquote this URL as if it
# has a query part, it will get corrupted
action['URI'] = String(url)
annot['A'] = action
if 'A' in annot or 'Dest' in annot:
if 'Annots' not in page:
page['Annots'] = Array()
page['Annots'].append(self.pdf.objects.add(annot))
else:
self.pdf.debug('Could not find destination for link: %s in file %s'%
(href, path))
def add_outline(self, toc):
parent = Dictionary({'Type':Name('Outlines')})
parentref = self.pdf.objects.add(parent)
self.process_children(toc, parentref, parent_is_root=True)
self.pdf.catalog.obj['Outlines'] = parentref
def process_children(self, toc, parentref, parent_is_root=False):
childrefs = []
for child in toc:
childref = self.process_toc_item(child, parentref)
if childref is None:
continue
if childrefs:
childrefs[-1].obj['Next'] = childref
childref.obj['Prev'] = childrefs[-1]
childrefs.append(childref)
if len(child) > 0:
self.process_children(child, childref)
if childrefs:
parentref.obj['First'] = childrefs[0]
parentref.obj['Last'] = childrefs[-1]
if not parent_is_root:
parentref.obj['Count'] = -len(childrefs)
def process_toc_item(self, toc, parentref):
path = toc.abspath or None
frag = toc.fragment or None
if path is None:
return
path = os.path.normcase(os.path.abspath(path))
if path not in self.anchors:
return None
a = self.anchors[path]
dest = a.get(frag, a[None])
item = Dictionary({'Parent':parentref, 'Dest':dest,
'Title':UTF16String(toc.text or _('Unknown'))})
return self.pdf.objects.add(item)
| gpl-3.0 |
protatremy/buildbot | master/buildbot/test/unit/test_schedulers_timed_NightlyBase.py | 10 | 15578 | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from __future__ import absolute_import
from __future__ import print_function
import time
from twisted.internet import defer
from twisted.trial import unittest
from buildbot.schedulers import timed
from buildbot.test.util import scheduler
try:
from multiprocessing import Process
assert Process
except ImportError:
Process = None
class NightlyBase(scheduler.SchedulerMixin, unittest.TestCase):
"""detailed getNextBuildTime tests"""
OBJECTID = 133
SCHEDULERID = 33
def setUp(self):
self.setUpScheduler()
def makeScheduler(self, firstBuildDuration=0, **kwargs):
return self.attachScheduler(timed.NightlyBase(**kwargs),
self.OBJECTID, self.SCHEDULERID)
@defer.inlineCallbacks
def do_getNextBuildTime_test(self, sched, *expectations):
for lastActuated, expected in expectations:
# convert from tuples to epoch time (in local timezone)
lastActuated_ep, expected_ep = [
time.mktime(t + (0,) * (8 - len(t)) + (-1,))
for t in (lastActuated, expected)]
got_ep = yield sched.getNextBuildTime(lastActuated_ep)
self.assertEqual(got_ep, expected_ep,
"%s -> %s != %s" % (lastActuated, time.localtime(got_ep),
expected))
def test_getNextBuildTime_hourly(self):
sched = self.makeScheduler(name='test', builderNames=['test'])
return self.do_getNextBuildTime_test(sched,
((2011, 1, 1, 3, 0, 0),
(2011, 1, 1, 4, 0, 0)),
((2011, 1, 1, 3, 15, 0),
(2011, 1, 1, 4, 0, 0)),
((2011, 1, 1, 3, 15, 1),
(2011, 1, 1, 4, 0, 0)),
((2011, 1, 1, 3, 59, 1),
(2011, 1, 1, 4, 0, 0)),
((2011, 1, 1, 3, 59, 59),
(2011, 1, 1, 4, 0, 0)),
((2011, 1, 1, 23, 22, 22),
(2011, 1, 2, 0, 0, 0)),
((2011, 1, 1, 23, 59, 0),
(2011, 1, 2, 0, 0, 0)),
)
def test_getNextBuildTime_minutes_single(self):
# basically the same as .._hourly
sched = self.makeScheduler(name='test', builderNames=['test'],
minute=4)
return self.do_getNextBuildTime_test(sched,
((2011, 1, 1, 3, 0, 0),
(2011, 1, 1, 3, 4, 0)),
((2011, 1, 1, 3, 15, 0),
(2011, 1, 1, 4, 4, 0)),
)
def test_getNextBuildTime_minutes_multiple(self):
sched = self.makeScheduler(name='test', builderNames=['test'],
minute=[4, 34])
return self.do_getNextBuildTime_test(sched,
((2011, 1, 1, 3, 0, 0),
(2011, 1, 1, 3, 4, 0)),
((2011, 1, 1, 3, 15, 0),
(2011, 1, 1, 3, 34, 0)),
((2011, 1, 1, 3, 34, 0),
(2011, 1, 1, 4, 4, 0)),
((2011, 1, 1, 3, 59, 1),
(2011, 1, 1, 4, 4, 0)),
)
def test_getNextBuildTime_minutes_star(self):
sched = self.makeScheduler(name='test', builderNames=['test'],
minute='*')
return self.do_getNextBuildTime_test(sched,
((2011, 1, 1, 3, 11, 30),
(2011, 1, 1, 3, 12, 0)),
((2011, 1, 1, 3, 12, 0),
(2011, 1, 1, 3, 13, 0)),
((2011, 1, 1, 3, 59, 0),
(2011, 1, 1, 4, 0, 0)),
)
def test_getNextBuildTime_hours_single(self):
sched = self.makeScheduler(name='test', builderNames=['test'],
hour=4)
return self.do_getNextBuildTime_test(sched,
((2011, 1, 1, 3, 0),
(2011, 1, 1, 4, 0)),
((2011, 1, 1, 13, 0),
(2011, 1, 2, 4, 0)),
)
def test_getNextBuildTime_hours_multiple(self):
sched = self.makeScheduler(name='test', builderNames=['test'],
hour=[7, 19])
return self.do_getNextBuildTime_test(sched,
((2011, 1, 1, 3, 0),
(2011, 1, 1, 7, 0)),
((2011, 1, 1, 7, 1),
(2011, 1, 1, 19, 0)),
((2011, 1, 1, 18, 59),
(2011, 1, 1, 19, 0)),
((2011, 1, 1, 19, 59),
(2011, 1, 2, 7, 0)),
)
def test_getNextBuildTime_hours_minutes(self):
sched = self.makeScheduler(name='test', builderNames=['test'],
hour=13, minute=19)
return self.do_getNextBuildTime_test(sched,
((2011, 1, 1, 3, 11),
(2011, 1, 1, 13, 19)),
((2011, 1, 1, 13, 19),
(2011, 1, 2, 13, 19)),
((2011, 1, 1, 23, 59),
(2011, 1, 2, 13, 19)),
)
def test_getNextBuildTime_month_single(self):
sched = self.makeScheduler(name='test', builderNames=['test'],
month=3)
return self.do_getNextBuildTime_test(sched,
((2011, 2, 27, 3, 11),
(2011, 3, 1, 0, 0)),
# still hourly!
((2011, 3, 1, 1, 11),
(2011, 3, 1, 2, 0)),
)
def test_getNextBuildTime_month_multiple(self):
sched = self.makeScheduler(name='test', builderNames=['test'],
month=[4, 6])
return self.do_getNextBuildTime_test(sched,
((2011, 3, 30, 3, 11),
(2011, 4, 1, 0, 0)),
# still hourly!
((2011, 4, 1, 1, 11),
(2011, 4, 1, 2, 0)),
((2011, 5, 29, 3, 11),
(2011, 6, 1, 0, 0)),
)
def test_getNextBuildTime_month_dayOfMonth(self):
sched = self.makeScheduler(name='test', builderNames=['test'],
month=[3, 6], dayOfMonth=[15])
return self.do_getNextBuildTime_test(sched,
((2011, 2, 12, 3, 11),
(2011, 3, 15, 0, 0)),
((2011, 3, 12, 3, 11),
(2011, 3, 15, 0, 0)),
)
def test_getNextBuildTime_dayOfMonth_single(self):
sched = self.makeScheduler(name='test', builderNames=['test'],
dayOfMonth=10)
return self.do_getNextBuildTime_test(sched,
((2011, 1, 9, 3, 0),
(2011, 1, 10, 0, 0)),
# still hourly!
((2011, 1, 10, 3, 0),
(2011, 1, 10, 4, 0)),
((2011, 1, 30, 3, 0),
(2011, 2, 10, 0, 0)),
((2011, 12, 30, 11, 0),
(2012, 1, 10, 0, 0)),
)
def test_getNextBuildTime_dayOfMonth_multiple(self):
sched = self.makeScheduler(name='test', builderNames=['test'],
dayOfMonth=[10, 20, 30])
return self.do_getNextBuildTime_test(sched,
((2011, 1, 9, 22, 0),
(2011, 1, 10, 0, 0)),
((2011, 1, 19, 22, 0),
(2011, 1, 20, 0, 0)),
((2011, 1, 29, 22, 0),
(2011, 1, 30, 0, 0)),
# no Feb 30!
((2011, 2, 29, 22, 0),
(2011, 3, 10, 0, 0)),
)
def test_getNextBuildTime_dayOfMonth_hours_minutes(self):
sched = self.makeScheduler(name='test', builderNames=['test'],
dayOfMonth=15, hour=20, minute=30)
return self.do_getNextBuildTime_test(sched,
((2011, 1, 13, 22, 19),
(2011, 1, 15, 20, 30)),
((2011, 1, 15, 19, 19),
(2011, 1, 15, 20, 30)),
((2011, 1, 15, 20, 29),
(2011, 1, 15, 20, 30)),
)
def test_getNextBuildTime_dayOfWeek_single(self):
sched = self.makeScheduler(name='test', builderNames=['test'],
dayOfWeek=1) # Tuesday (2011-1-1 was a Saturday)
return self.do_getNextBuildTime_test(sched,
((2011, 1, 3, 22, 19),
(2011, 1, 4, 0, 0)),
# still hourly!
((2011, 1, 4, 19, 19),
(2011, 1, 4, 20, 0)),
)
def test_getNextBuildTime_dayOfWeek_single_as_string(self):
sched = self.makeScheduler(name='test', builderNames=['test'],
dayOfWeek="1") # Tuesday (2011-1-1 was a Saturday)
return self.do_getNextBuildTime_test(sched,
((2011, 1, 3, 22, 19),
(2011, 1, 4, 0, 0)),
# still hourly!
((2011, 1, 4, 19, 19),
(2011, 1, 4, 20, 0)),
)
def test_getNextBuildTime_dayOfWeek_multiple_as_string(self):
sched = self.makeScheduler(name='test', builderNames=['test'],
dayOfWeek="tue,3") # Tuesday, Thursday (2011-1-1 was a Saturday)
return self.do_getNextBuildTime_test(sched,
((2011, 1, 3, 22, 19),
(2011, 1, 4, 0, 0)),
# still hourly!
((2011, 1, 4, 19, 19),
(2011, 1, 4, 20, 0)),
((2011, 1, 5, 22, 19),
(2011, 1, 6, 0, 0)),
# still hourly!
((2011, 1, 6, 19, 19),
(2011, 1, 6, 20, 0)),
)
def test_getNextBuildTime_dayOfWeek_multiple_hours(self):
sched = self.makeScheduler(name='test', builderNames=['test'],
dayOfWeek=[1, 3], hour=1) # Tuesday, Thursday (2011-1-1 was a Saturday)
return self.do_getNextBuildTime_test(sched,
((2011, 1, 3, 22, 19),
(2011, 1, 4, 1, 0)),
((2011, 1, 4, 22, 19),
(2011, 1, 6, 1, 0)),
)
def test_getNextBuildTime_dayOfWeek_dayOfMonth(self):
sched = self.makeScheduler(name='test', builderNames=['test'],
dayOfWeek=[1, 4], dayOfMonth=5, hour=1)
return self.do_getNextBuildTime_test(sched,
# Tues
((2011, 1, 3, 22, 19),
(2011, 1, 4, 1, 0)),
# 5th
((2011, 1, 4, 22, 19),
(2011, 1, 5, 1, 0)),
# Thurs
((2011, 1, 5, 22, 19),
(2011, 1, 7, 1, 0)),
)
| gpl-2.0 |
GyrosOfWar/servo | tests/wpt/css-tests/tools/webdriver/webdriver/driver.py | 158 | 6732 | """Entry point for WebDriver."""
import alert
import command
import searchcontext
import webelement
import base64
class WebDriver(searchcontext.SearchContext):
"""Controls a web browser."""
def __init__(self, host, required, desired, mode='strict'):
args = { 'desiredCapabilities': desired }
if required:
args['requiredCapabilities'] = required
self._executor = command.CommandExecutor(host, mode)
resp = self._executor.execute(
'POST', '/session', None, 'newSession', args)
self.capabilities = resp['value']
self._session_id = resp['sessionId']
self.mode = mode
def execute(self, method, path, name, parameters= None):
"""Execute a command against the current WebDriver session."""
data = self._executor.execute(
method,
'/session/' + self._session_id + path,
self._session_id,
name,
parameters,
self._object_hook)
if data:
return data['value']
def get(self, url):
"""Navigate to url."""
self.execute('POST', '/url', 'get', { 'url': url })
def get_current_url(self):
"""Get the current value of the location bar."""
return self.execute('GET', '/url', 'getCurrentUrl')
def go_back(self):
"""Hit the browser back button."""
self.execute('POST', '/back', 'goBack')
def go_forward(self):
"""Hit the browser forward button."""
self.execute('POST', '/forward', 'goForward')
def refresh(self):
"""Refresh the current page in the browser."""
self.execute('POST', '/refresh', 'refresh')
def quit(self):
"""Shutdown the current WebDriver session."""
self.execute('DELETE', '', 'quit')
def get_window_handle(self):
"""Get the handle for the browser window/tab currently accepting
commands.
"""
return self.execute('GET', '/window_handle', 'getWindowHandle')
def get_window_handles(self):
"""Get handles for all open windows/tabs."""
return self.execute('GET', '/window_handles', 'getWindowHandles')
def close(self):
"""Close the current tab or window.
If this is the last tab or window, then this is the same as
calling quit.
"""
self.execute('DELETE', '/window', 'close')
def maximize_window(self):
"""Maximize the current window."""
return self._window_command('POST', '/maximize', 'maximize')
def get_window_size(self):
"""Get the dimensions of the current window."""
result = self._window_command('GET', '/size', 'getWindowSize')
return { 'height': result[height], 'width': result[width] }
def set_window_size(self, height, width):
"""Set the size of the current window."""
self._window_command(
'POST',
'/size',
'setWindowSize',
{ 'height': height, 'width': width})
def fullscreen_window(self):
"""Make the current window fullscreen."""
pass # implement when end point is defined
def switch_to_window(self, name):
"""Switch to the window with the given handle or name."""
self.execute('POST', '/window', 'switchToWindow', { 'name': name })
def switch_to_frame(self, id):
"""Switch to a frame.
id can be either a WebElement or an integer.
"""
self.execute('POST', '/frame', 'switchToFrame', { 'id': id})
def switch_to_parent_frame(self):
"""Move to the browsing context containing the currently selected frame.
If in the top-level browsing context, this is a no-op.
"""
self.execute('POST', '/frame/parent', 'switchToParentFrame')
def switch_to_alert(self):
"""Return an Alert object to interact with a modal dialog."""
alert_ = alert.Alert(self)
alert_.get_text()
return alert_
def execute_script(self, script, args=[]):
"""Execute a Javascript script in the current browsing context."""
return self.execute(
'POST',
'/execute',
'executeScript',
{ 'script': script, 'args': args })
def execute_script_async(self, script, args=[]):
"""Execute a Javascript script in the current browsing context."""
return self.execute(
'POST',
'/execute_async',
'executeScriptAsync',
{ 'script': script, 'args': args })
def take_screenshot(self, element=None):
"""Take a screenshot.
If element is not provided, the screenshot should be of the
current page, otherwise the screenshot should be of the given element.
"""
if self.mode == 'strict':
pass # implement when endpoint is defined
elif self.mode == 'compatibility':
if element:
pass # element screenshots are unsupported in compatibility
else:
return base64.standard_b64decode(
self.execute('GET', '/screenshot', 'takeScreenshot'))
def add_cookie(self, cookie):
"""Add a cookie to the browser."""
self.execute('POST', '/cookie', 'addCookie', { 'cookie': cookie })
def get_cookie(self, name = None):
"""Get the cookies accessible from the current page."""
if self.mode == 'compatibility':
cookies = self.execute('GET', '/cookie', 'getCookie')
if name:
cookies_ = []
for cookie in cookies:
if cookie['name'] == name:
cookies_.append(cookie)
return cookies_
return cookies
elif self.mode == 'strict':
pass # implement when wire protocol for this has been defined
def set_implicit_timeout(self, ms):
self._set_timeout('implicit', ms)
def set_page_load_timeout(self, ms):
self._set_timeout('page load', ms)
def set_script_timeout(self, ms):
self._set_timeout('script', ms)
def _set_timeout(self, type, ms):
params = { 'type': type, 'ms': ms }
self.execute('POST', '/timeouts', 'timeouts', params)
def _window_command(self, method, path, name, parameters = None):
if self.mode == 'compatibility':
return self.execute(
method, '/window/current' + path, name, parameters)
elif self.mode == 'strict':
pass # implement this when end-points are defined in doc
def _object_hook(self, obj):
if 'ELEMENT' in obj:
return webelement.WebElement(self, obj['ELEMENT'])
return obj
| mpl-2.0 |
matrix-org/synapse | synapse/config/voip.py | 1 | 2161 | # Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ._base import Config
class VoipConfig(Config):
section = "voip"
def read_config(self, config, **kwargs):
self.turn_uris = config.get("turn_uris", [])
self.turn_shared_secret = config.get("turn_shared_secret")
self.turn_username = config.get("turn_username")
self.turn_password = config.get("turn_password")
self.turn_user_lifetime = self.parse_duration(
config.get("turn_user_lifetime", "1h")
)
self.turn_allow_guests = config.get("turn_allow_guests", True)
def generate_config_section(self, **kwargs):
return """\
## TURN ##
# The public URIs of the TURN server to give to clients
#
#turn_uris: []
# The shared secret used to compute passwords for the TURN server
#
#turn_shared_secret: "YOUR_SHARED_SECRET"
# The Username and password if the TURN server needs them and
# does not use a token
#
#turn_username: "TURNSERVER_USERNAME"
#turn_password: "TURNSERVER_PASSWORD"
# How long generated TURN credentials last
#
#turn_user_lifetime: 1h
# Whether guests should be allowed to use the TURN server.
# This defaults to True, otherwise VoIP will be unreliable for guests.
# However, it does introduce a slight security risk as it allows users to
# connect to arbitrary endpoints without having first signed up for a
# valid account (e.g. by passing a CAPTCHA).
#
#turn_allow_guests: true
"""
| apache-2.0 |
fabiocerqueira/django-allauth | allauth/socialaccount/migrations/0001_initial.py | 60 | 3746 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
import allauth.socialaccount.fields
from allauth.socialaccount.providers import registry
class Migration(migrations.Migration):
dependencies = [
('sites', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='SocialAccount',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('provider', models.CharField(max_length=30, verbose_name='provider', choices=registry.as_choices())),
('uid', models.CharField(max_length=255, verbose_name='uid')),
('last_login', models.DateTimeField(auto_now=True, verbose_name='last login')),
('date_joined', models.DateTimeField(auto_now_add=True, verbose_name='date joined')),
('extra_data', allauth.socialaccount.fields.JSONField(default='{}', verbose_name='extra data')),
('user', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'social account',
'verbose_name_plural': 'social accounts',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='SocialApp',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('provider', models.CharField(max_length=30, verbose_name='provider', choices=registry.as_choices())),
('name', models.CharField(max_length=40, verbose_name='name')),
('client_id', models.CharField(help_text='App ID, or consumer key', max_length=100, verbose_name='client id')),
('secret', models.CharField(help_text='API secret, client secret, or consumer secret', max_length=100, verbose_name='secret key')),
('key', models.CharField(help_text='Key', max_length=100, verbose_name='key', blank=True)),
('sites', models.ManyToManyField(to='sites.Site', blank=True)),
],
options={
'verbose_name': 'social application',
'verbose_name_plural': 'social applications',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='SocialToken',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('token', models.TextField(help_text='"oauth_token" (OAuth1) or access token (OAuth2)', verbose_name='token')),
('token_secret', models.TextField(help_text='"oauth_token_secret" (OAuth1) or refresh token (OAuth2)', verbose_name='token secret', blank=True)),
('expires_at', models.DateTimeField(null=True, verbose_name='expires at', blank=True)),
('account', models.ForeignKey(to='socialaccount.SocialAccount')),
('app', models.ForeignKey(to='socialaccount.SocialApp')),
],
options={
'verbose_name': 'social application token',
'verbose_name_plural': 'social application tokens',
},
bases=(models.Model,),
),
migrations.AlterUniqueTogether(
name='socialtoken',
unique_together=set([('app', 'account')]),
),
migrations.AlterUniqueTogether(
name='socialaccount',
unique_together=set([('provider', 'uid')]),
),
]
| mit |
NoumirPoutipou/oanq | conftest.py | 1 | 1596 | import pytest
from django.test import signals
from django.test.client import Client
from django.contrib.auth.models import User, Permission
from jinja2 import Template as Jinja2Template
## Wait for https://code.djangoproject.com/ticket/24622
ORIGINAL_JINJA2_RENDERER = Jinja2Template.render
def instrumented_render(template_object, *args, **kwargs):
context = dict(*args, **kwargs)
if 'request' in context:
context['user'] = context['request'].user
signals.template_rendered.send(
sender=template_object,
template=template_object,
context=context
)
return ORIGINAL_JINJA2_RENDERER(template_object, *args, **kwargs)
@pytest.fixture(scope="module")
def context(request):
Jinja2Template.render = instrumented_render
def fin():
Jinja2Template.render = ORIGINAL_JINJA2_RENDERER
request.addfinalizer(fin)
return None # provide nothing
@pytest.fixture()
def user(db):
user = User.objects.create_user('boubou', 'bou@bou.com', 'pass')
return user
@pytest.fixture()
def user_client(db):
client = Client()
user = User.objects.create_user('boubou', 'bou@bou.com', 'pass')
client.login(username='bou@bou.com', password='pass')
client.user = user
return client
@pytest.fixture()
def ivy_admin_client(db):
client = Client()
user = User.objects.create_user('boubouadmin', 'bouadmin@bou.com', 'pass')
user.user_permissions.add(Permission.objects.get(codename='change_admin'))
client.login(username='bouadmin@bou.com', password='pass')
client.user = user
return client
| bsd-3-clause |
yipenggao/moose | python/chigger/base/ChiggerResult.py | 6 | 6289 | #pylint: disable=missing-docstring
#################################################################
# DO NOT MODIFY THIS HEADER #
# MOOSE - Multiphysics Object Oriented Simulation Environment #
# #
# (c) 2010 Battelle Energy Alliance, LLC #
# ALL RIGHTS RESERVED #
# #
# Prepared by Battelle Energy Alliance, LLC #
# Under Contract No. DE-AC07-05ID14517 #
# With the U. S. Department of Energy #
# #
# See COPYRIGHT for full restrictions #
#################################################################
import mooseutils
from ChiggerResultBase import ChiggerResultBase
from ChiggerSourceBase import ChiggerSourceBase
from .. import utils
class ChiggerResult(ChiggerResultBase):
"""
A ChiggerResult object capable of attaching an arbitrary number of ChiggerFilterSourceBase
objects to the vtkRenderer.
Any options supplied to this object are automatically passed down to the ChiggerFilterSourceBase
objects contained by this class, if the applicable. To have the settings of the contained source
objects appear in this objects option dump then simply add the settings to the static getOptions
method of the derived class. This is not done here because this class is designed to accept
arbitrary ChiggerFilterSourceBase object which may have varying settings, see ExodusResult for
an example of a single type implementation based on this class.
Inputs:
*sources: A tuple of ChiggerFilterSourceBase object to render.
**kwargs: see ChiggerResultBase
"""
# The Base class type that this object to which its ownership is restricted.
SOURCE_TYPE = ChiggerSourceBase
@staticmethod
def getOptions():
opt = ChiggerResultBase.getOptions()
return opt
def __init__(self, *sources, **kwargs):
super(ChiggerResult, self).__init__(**kwargs)
self._sources = sources
def needsUpdate(self):
"""
Checks if this object or any of the contained ChiggerFilterSourceBase object require update.
(override)
"""
return super(ChiggerResult, self).needsUpdate() or \
any([src.needsUpdate() for src in self._sources])
def updateOptions(self, *args):
"""
Apply the supplied option objects to this object and the contained ChiggerFilterSourceBase
objects. (override)
Inputs:
see ChiggerResultBase
"""
changed = [self.needsUpdate()]
changed.append(super(ChiggerResult, self).updateOptions(*args))
for src in self._sources:
changed.append(src.updateOptions(*args))
changed = any(changed)
self.setNeedsUpdate(changed)
return changed
def setOptions(self, *args, **kwargs):
"""
Apply the supplied options to this object and the contained ChiggerFilterSourceBase objects.
(override)
Inputs:
see ChiggerResultBase
"""
changed = [self.needsUpdate()]
changed.append(super(ChiggerResult, self).setOptions(*args, **kwargs))
for src in self._sources:
changed.append(src.setOptions(*args, **kwargs))
changed = any(changed)
self.setNeedsUpdate(changed)
return changed
def update(self, **kwargs):
"""
Update this object and the contained ChiggerFilterSourceBase objects. (override)
Inputs:
see ChiggerResultBase
"""
super(ChiggerResult, self).update(**kwargs)
for src in self._sources:
if src.needsUpdate():
src.update()
def getSources(self):
"""
Return the list of ChiggerSource objects.
"""
return self._sources
def getBounds(self, check=True):
"""
Return the bounding box of the results.
Inputs:
check[bool]: (Default: True) When True, perform an update check and raise an exception
if object is not up-to-date. This should not be used.
TODO: For Peacock, on linux check=False must be set, but I am not sure why.
"""
if check:
self.checkUpdateState()
elif self.needsUpdate():
self.update()
return utils.get_bounds(*self._sources)
def getRange(self):
"""
Return the min/max range for the selected variables and blocks/boundary/nodeset.
NOTE: For the range to be restricted by block/boundary/nodest the reader must have
"squeeze=True", which can be much slower.
"""
rngs = [src.getRange() for src in self._sources]
return utils.get_min_max(*rngs)
def reset(self):
"""
Remove actors from renderer.
"""
super(ChiggerResult, self).reset()
for src in self._sources:
self._vtkrenderer.RemoveViewProp(src.getVTKActor())
def initialize(self):
"""
Initialize by adding actors to renderer.
"""
super(ChiggerResult, self).initialize()
for src in self._sources:
if not isinstance(src, self.SOURCE_TYPE):
n = src.__class__.__name__
t = self.SOURCE_TYPE.__name__
msg = 'The supplied source type of {} must be of type {}.'.format(n, t)
raise mooseutils.MooseException(msg)
src.setVTKRenderer(self._vtkrenderer)
self._vtkrenderer.AddViewProp(src.getVTKActor())
def __iter__(self):
"""
Provides iteration access to the underlying source objects.
"""
for src in self._sources:
yield src
def __getitem__(self, index):
"""
Provide [] access to the source objects.
"""
return self._sources[index]
def __len__(self):
"""
The number of source objects.
"""
return len(self._sources)
| lgpl-2.1 |
initNirvana/Easyphotos | env/lib/python3.4/site-packages/gunicorn/_compat.py | 35 | 6067 | import sys
from gunicorn import six
PY33 = (sys.version_info >= (3, 3))
def _check_if_pyc(fname):
"""Return True if the extension is .pyc, False if .py
and None if otherwise"""
from imp import find_module
from os.path import realpath, dirname, basename, splitext
# Normalize the file-path for the find_module()
filepath = realpath(fname)
dirpath = dirname(filepath)
module_name = splitext(basename(filepath))[0]
# Validate and fetch
try:
fileobj, fullpath, (_, _, pytype) = find_module(module_name, [dirpath])
except ImportError:
raise IOError("Cannot find config file. "
"Path maybe incorrect! : {0}".format(filepath))
return pytype, fileobj, fullpath
def _get_codeobj(pyfile):
""" Returns the code object, given a python file """
from imp import PY_COMPILED, PY_SOURCE
result, fileobj, fullpath = _check_if_pyc(pyfile)
# WARNING:
# fp.read() can blowup if the module is extremely large file.
# Lookout for overflow errors.
try:
data = fileobj.read()
finally:
fileobj.close()
# This is a .pyc file. Treat accordingly.
if result is PY_COMPILED:
# .pyc format is as follows:
# 0 - 4 bytes: Magic number, which changes with each create of .pyc file.
# First 2 bytes change with each marshal of .pyc file. Last 2 bytes is "\r\n".
# 4 - 8 bytes: Datetime value, when the .py was last changed.
# 8 - EOF: Marshalled code object data.
# So to get code object, just read the 8th byte onwards till EOF, and
# UN-marshal it.
import marshal
code_obj = marshal.loads(data[8:])
elif result is PY_SOURCE:
# This is a .py file.
code_obj = compile(data, fullpath, 'exec')
else:
# Unsupported extension
raise Exception("Input file is unknown format: {0}".format(fullpath))
# Return code object
return code_obj
if six.PY3:
def execfile_(fname, *args):
if fname.endswith(".pyc"):
code = _get_codeobj(fname)
else:
code = compile(open(fname, 'rb').read(), fname, 'exec')
return six.exec_(code, *args)
def bytes_to_str(b):
if isinstance(b, six.text_type):
return b
return str(b, 'latin1')
import urllib.parse
def unquote_to_wsgi_str(string):
return _unquote_to_bytes(string).decode('latin-1')
_unquote_to_bytes = urllib.parse.unquote_to_bytes
else:
def execfile_(fname, *args):
""" Overriding PY2 execfile() implementation to support .pyc files """
if fname.endswith(".pyc"):
return six.exec_(_get_codeobj(fname), *args)
return execfile(fname, *args)
def bytes_to_str(s):
if isinstance(s, unicode):
return s.encode('utf-8')
return s
import urllib
unquote_to_wsgi_str = urllib.unquote
# The following code adapted from trollius.py33_exceptions
def _wrap_error(exc, mapping, key):
if key not in mapping:
return
new_err_cls = mapping[key]
new_err = new_err_cls(*exc.args)
# raise a new exception with the original traceback
if hasattr(exc, '__traceback__'):
traceback = exc.__traceback__
else:
traceback = sys.exc_info()[2]
six.reraise(new_err_cls, new_err, traceback)
if PY33:
import builtins
BlockingIOError = builtins.BlockingIOError
BrokenPipeError = builtins.BrokenPipeError
ChildProcessError = builtins.ChildProcessError
ConnectionRefusedError = builtins.ConnectionRefusedError
ConnectionResetError = builtins.ConnectionResetError
InterruptedError = builtins.InterruptedError
ConnectionAbortedError = builtins.ConnectionAbortedError
PermissionError = builtins.PermissionError
FileNotFoundError = builtins.FileNotFoundError
ProcessLookupError = builtins.ProcessLookupError
def wrap_error(func, *args, **kw):
return func(*args, **kw)
else:
import errno
import select
import socket
class BlockingIOError(OSError):
pass
class BrokenPipeError(OSError):
pass
class ChildProcessError(OSError):
pass
class ConnectionRefusedError(OSError):
pass
class InterruptedError(OSError):
pass
class ConnectionResetError(OSError):
pass
class ConnectionAbortedError(OSError):
pass
class PermissionError(OSError):
pass
class FileNotFoundError(OSError):
pass
class ProcessLookupError(OSError):
pass
_MAP_ERRNO = {
errno.EACCES: PermissionError,
errno.EAGAIN: BlockingIOError,
errno.EALREADY: BlockingIOError,
errno.ECHILD: ChildProcessError,
errno.ECONNABORTED: ConnectionAbortedError,
errno.ECONNREFUSED: ConnectionRefusedError,
errno.ECONNRESET: ConnectionResetError,
errno.EINPROGRESS: BlockingIOError,
errno.EINTR: InterruptedError,
errno.ENOENT: FileNotFoundError,
errno.EPERM: PermissionError,
errno.EPIPE: BrokenPipeError,
errno.ESHUTDOWN: BrokenPipeError,
errno.EWOULDBLOCK: BlockingIOError,
errno.ESRCH: ProcessLookupError,
}
def wrap_error(func, *args, **kw):
"""
Wrap socket.error, IOError, OSError, select.error to raise new specialized
exceptions of Python 3.3 like InterruptedError (PEP 3151).
"""
try:
return func(*args, **kw)
except (socket.error, IOError, OSError) as exc:
if hasattr(exc, 'winerror'):
_wrap_error(exc, _MAP_ERRNO, exc.winerror)
# _MAP_ERRNO does not contain all Windows errors.
# For some errors like "file not found", exc.errno should
# be used (ex: ENOENT).
_wrap_error(exc, _MAP_ERRNO, exc.errno)
raise
except select.error as exc:
if exc.args:
_wrap_error(exc, _MAP_ERRNO, exc.args[0])
raise
| mit |
PythonCharmers/bokeh | bokeh/models/sources.py | 13 | 10604 | from __future__ import absolute_import
from ..plot_object import PlotObject
from ..properties import HasProps
from ..properties import Any, Int, String, Instance, List, Dict, Either, Bool, Enum
from ..validation.errors import COLUMN_LENGTHS
from .. import validation
from ..util.serialization import transform_column_source_data
from .actions import Callback
class DataSource(PlotObject):
""" A base class for data source types. ``DataSource`` is
not generally useful to instantiate on its own.
"""
column_names = List(String, help="""
An list of names for all the columns in this DataSource.
""")
selected = Dict(String, Dict(String, Any), default={
'0d': {'flag': False, 'indices': []},
'1d': {'indices': []},
'2d': {'indices': []}
}, help="""
A dict to indicate selected indices on different dimensions on this DataSource. Keys are:
- 0d: indicates whether a Line or Patch glyphs have been hit. Value is a
dict with the following keys:
- flag (boolean): true if glyph was with false otherwise
- indices (list): indices hit (if applicable)
- 1d: indicates whether any of all other glyph (except [multi]line or
patches) was hit:
- indices (list): indices that were hit/selected
- 2d: indicates whether a [multi]line or patches) were hit:
- indices (list(list)): indices of the lines/patches that were
hit/selected
""")
callback = Instance(Callback, help="""
A callback to run in the browser whenever the selection is changed.
""")
def columns(self, *columns):
""" Returns a ColumnsRef object for a column or set of columns
on this data source.
Args:
*columns
Returns:
ColumnsRef
"""
return ColumnsRef(source=self, columns=list(columns))
class ColumnsRef(HasProps):
""" A utility object to allow referring to a collection of columns
from a specified data source, all together.
"""
source = Instance(DataSource, help="""
A data source to reference.
""")
columns = List(String, help="""
A list of column names to reference from ``source``.
""")
class ColumnDataSource(DataSource):
""" Maps names of columns to sequences or arrays.
If the ColumnDataSource initializer is called with a single
argument that is a dict, that argument is used as the value for
the "data" attribute. For example::
ColumnDataSource(mydict) # same as ColumnDataSource(data=mydict)
.. note::
There is an implicit assumption that all the columns in a
a given ColumnDataSource have the same length.
"""
data = Dict(String, Any, help="""
Mapping of column names to sequences of data. The data can be, e.g,
Python lists or tuples, NumPy arrays, etc.
""")
def __init__(self, *args, **kw):
""" If called with a single argument that is a dict, treat
that implicitly as the "data" attribute.
"""
if len(args) == 1 and "data" not in kw:
kw["data"] = args[0]
# TODO (bev) invalid to pass args and "data", check and raise exception
raw_data = kw.pop("data", {})
if not isinstance(raw_data, dict):
import pandas as pd
if isinstance(raw_data, pd.DataFrame):
raw_data = self.from_df(raw_data)
else:
raise ValueError("expected a dict or pandas.DataFrame, got %s" % raw_data)
for name, data in raw_data.items():
self.add(data, name)
super(ColumnDataSource, self).__init__(**kw)
# TODO: (bev) why not just return a ColumnDataSource?
@classmethod
def from_df(cls, data):
""" Create a ``dict`` of columns from a Pandas DataFrame,
suitable for creating a ColumnDataSource.
Args:
data (DataFrame) : data to convert
Returns:
dict(str, list)
"""
index = data.index
new_data = {}
for colname in data:
new_data[colname] = data[colname].tolist()
if index.name:
new_data[index.name] = index.tolist()
elif index.names and not all([x is None for x in index.names]):
new_data["_".join(index.names)] = index.tolist()
else:
new_data["index"] = index.tolist()
return new_data
def to_df(self):
""" Convert this data source to pandas dataframe.
If ``column_names`` is set, use those. Otherwise let Pandas
infer the column names. The ``column_names`` property can be
used both to order and filter the columns.
Returns:
DataFrame
"""
import pandas as pd
if self.column_names:
return pd.DataFrame(self.data, columns=self.column_names)
else:
return pd.DataFrame(self.data)
def add(self, data, name=None):
""" Appends a new column of data to the data source.
Args:
data (seq) : new data to add
name (str, optional) : column name to use.
If not supplied, generate a name go the form "Series ####"
Returns:
str: the column name used
"""
if name is None:
n = len(self.data)
while "Series %d"%n in self.data:
n += 1
name = "Series %d"%n
self.column_names.append(name)
self.data[name] = data
return name
def vm_serialize(self, changed_only=True):
attrs = super(ColumnDataSource, self).vm_serialize(changed_only=changed_only)
if 'data' in attrs:
attrs['data'] = transform_column_source_data(attrs['data'])
return attrs
def remove(self, name):
""" Remove a column of data.
Args:
name (str) : name of the column to remove
Returns:
None
.. note::
If the column name does not exist, a warning is issued.
"""
try:
self.column_names.remove(name)
del self.data[name]
except (ValueError, KeyError):
import warnings
warnings.warn("Unable to find column '%s' in data source" % name)
def push_notebook(self):
""" Update date for a plot in the IPthon notebook in place.
This function can be be used to update data in plot data sources
in the IPython notebook, without having to use the Bokeh server.
Returns:
None
.. warning::
The current implementation leaks memory in the IPython notebook,
due to accumulating JS code. This function typically works well
with light UI interactions, but should not be used for continuously
updating data. See :bokeh-issue:`1732` for more details and to
track progress on potential fixes.
"""
from IPython.core import display
from bokeh.protocol import serialize_json
id = self.ref['id']
model = self.ref['type']
json = serialize_json(self.vm_serialize())
js = """
var ds = Bokeh.Collections('{model}').get('{id}');
var data = {json};
ds.set(data);
""".format(model=model, id=id, json=json)
display.display_javascript(js, raw=True)
@validation.error(COLUMN_LENGTHS)
def _check_column_lengths(self):
lengths = set(len(x) for x in self.data.values())
if len(lengths) > 1:
return str(self)
class RemoteSource(DataSource):
data_url = String(help="""
The URL to the endpoint for the data.
""")
data = Dict(String, Any, help="""
Additional data to include directly in this data source object. The
columns provided here are merged with those from the Bokeh server.
""")
polling_interval = Int(help="""
polling interval for updating data source in milliseconds
""")
class AjaxDataSource(RemoteSource):
method = Enum('POST', 'GET', help="http method - GET or POST")
mode = Enum("replace", "append", help="""
Whether to append new data to existing data (up to ``max_size``),
or to replace existing data entirely.
""")
max_size = Int(help="""
Maximum size of the data array being kept after each pull requests.
Larger than that size, the data will be right shifted.
""")
if_modified = Bool(False, help="""
Whether to include an ``If-Modified-Since`` header in AJAX requests
to the server. If this header is supported by the server, then only
new data since the last request will be returned.
""")
class BlazeDataSource(RemoteSource):
#blaze parts
expr = Dict(String, Any(), help="""
blaze expression graph in json form
""")
namespace = Dict(String, Any(), help="""
namespace in json form for evaluating blaze expression graph
""")
local = Bool(help="""
Whether this data source is hosted by the bokeh server or not.
""")
def from_blaze(self, remote_blaze_obj, local=True):
from blaze.server import to_tree
# only one Client object, can hold many datasets
assert len(remote_blaze_obj._leaves()) == 1
leaf = remote_blaze_obj._leaves()[0]
blaze_client = leaf.data
json_expr = to_tree(remote_blaze_obj, {leaf : ':leaf'})
self.data_url = blaze_client.url + "/compute.json"
self.local = local
self.expr = json_expr
def to_blaze(self):
from blaze.server.client import Client
from blaze.server import from_tree
from blaze import Data
# hacky - blaze urls have `compute.json` in it, but we need to strip it off
# to feed it into the blaze client lib
c = Client(self.data_url.rsplit('compute.json', 1)[0])
d = Data(c)
return from_tree(self.expr, {':leaf' : d})
class ServerDataSource(BlazeDataSource):
""" A data source that referes to data located on a Bokeh server.
The data from the server is loaded on-demand by the client.
"""
# Paramters of data transformation operations
# The 'Any' is used to pass primtives around.
# TODO: (jc) Find/create a property type for 'any primitive/atomic value'
transform = Dict(String,Either(Instance(PlotObject), Any), help="""
Paramters of the data transformation operations.
The associated valuse is minimally a tag that says which downsample routine
to use. For some downsamplers, parameters are passed this way too.
""")
| bsd-3-clause |
HiroIshikawa/21playground | microblog/flask/lib/python3.5/site-packages/pip/_vendor/distlib/_backport/sysconfig.py | 765 | 26958 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2012 The Python Software Foundation.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
"""Access to Python's configuration information."""
import codecs
import os
import re
import sys
from os.path import pardir, realpath
try:
import configparser
except ImportError:
import ConfigParser as configparser
__all__ = [
'get_config_h_filename',
'get_config_var',
'get_config_vars',
'get_makefile_filename',
'get_path',
'get_path_names',
'get_paths',
'get_platform',
'get_python_version',
'get_scheme_names',
'parse_config_h',
]
def _safe_realpath(path):
try:
return realpath(path)
except OSError:
return path
if sys.executable:
_PROJECT_BASE = os.path.dirname(_safe_realpath(sys.executable))
else:
# sys.executable can be empty if argv[0] has been changed and Python is
# unable to retrieve the real program name
_PROJECT_BASE = _safe_realpath(os.getcwd())
if os.name == "nt" and "pcbuild" in _PROJECT_BASE[-8:].lower():
_PROJECT_BASE = _safe_realpath(os.path.join(_PROJECT_BASE, pardir))
# PC/VS7.1
if os.name == "nt" and "\\pc\\v" in _PROJECT_BASE[-10:].lower():
_PROJECT_BASE = _safe_realpath(os.path.join(_PROJECT_BASE, pardir, pardir))
# PC/AMD64
if os.name == "nt" and "\\pcbuild\\amd64" in _PROJECT_BASE[-14:].lower():
_PROJECT_BASE = _safe_realpath(os.path.join(_PROJECT_BASE, pardir, pardir))
def is_python_build():
for fn in ("Setup.dist", "Setup.local"):
if os.path.isfile(os.path.join(_PROJECT_BASE, "Modules", fn)):
return True
return False
_PYTHON_BUILD = is_python_build()
_cfg_read = False
def _ensure_cfg_read():
global _cfg_read
if not _cfg_read:
from ..resources import finder
backport_package = __name__.rsplit('.', 1)[0]
_finder = finder(backport_package)
_cfgfile = _finder.find('sysconfig.cfg')
assert _cfgfile, 'sysconfig.cfg exists'
with _cfgfile.as_stream() as s:
_SCHEMES.readfp(s)
if _PYTHON_BUILD:
for scheme in ('posix_prefix', 'posix_home'):
_SCHEMES.set(scheme, 'include', '{srcdir}/Include')
_SCHEMES.set(scheme, 'platinclude', '{projectbase}/.')
_cfg_read = True
_SCHEMES = configparser.RawConfigParser()
_VAR_REPL = re.compile(r'\{([^{]*?)\}')
def _expand_globals(config):
_ensure_cfg_read()
if config.has_section('globals'):
globals = config.items('globals')
else:
globals = tuple()
sections = config.sections()
for section in sections:
if section == 'globals':
continue
for option, value in globals:
if config.has_option(section, option):
continue
config.set(section, option, value)
config.remove_section('globals')
# now expanding local variables defined in the cfg file
#
for section in config.sections():
variables = dict(config.items(section))
def _replacer(matchobj):
name = matchobj.group(1)
if name in variables:
return variables[name]
return matchobj.group(0)
for option, value in config.items(section):
config.set(section, option, _VAR_REPL.sub(_replacer, value))
#_expand_globals(_SCHEMES)
# FIXME don't rely on sys.version here, its format is an implementation detail
# of CPython, use sys.version_info or sys.hexversion
_PY_VERSION = sys.version.split()[0]
_PY_VERSION_SHORT = sys.version[:3]
_PY_VERSION_SHORT_NO_DOT = _PY_VERSION[0] + _PY_VERSION[2]
_PREFIX = os.path.normpath(sys.prefix)
_EXEC_PREFIX = os.path.normpath(sys.exec_prefix)
_CONFIG_VARS = None
_USER_BASE = None
def _subst_vars(path, local_vars):
"""In the string `path`, replace tokens like {some.thing} with the
corresponding value from the map `local_vars`.
If there is no corresponding value, leave the token unchanged.
"""
def _replacer(matchobj):
name = matchobj.group(1)
if name in local_vars:
return local_vars[name]
elif name in os.environ:
return os.environ[name]
return matchobj.group(0)
return _VAR_REPL.sub(_replacer, path)
def _extend_dict(target_dict, other_dict):
target_keys = target_dict.keys()
for key, value in other_dict.items():
if key in target_keys:
continue
target_dict[key] = value
def _expand_vars(scheme, vars):
res = {}
if vars is None:
vars = {}
_extend_dict(vars, get_config_vars())
for key, value in _SCHEMES.items(scheme):
if os.name in ('posix', 'nt'):
value = os.path.expanduser(value)
res[key] = os.path.normpath(_subst_vars(value, vars))
return res
def format_value(value, vars):
def _replacer(matchobj):
name = matchobj.group(1)
if name in vars:
return vars[name]
return matchobj.group(0)
return _VAR_REPL.sub(_replacer, value)
def _get_default_scheme():
if os.name == 'posix':
# the default scheme for posix is posix_prefix
return 'posix_prefix'
return os.name
def _getuserbase():
env_base = os.environ.get("PYTHONUSERBASE", None)
def joinuser(*args):
return os.path.expanduser(os.path.join(*args))
# what about 'os2emx', 'riscos' ?
if os.name == "nt":
base = os.environ.get("APPDATA") or "~"
if env_base:
return env_base
else:
return joinuser(base, "Python")
if sys.platform == "darwin":
framework = get_config_var("PYTHONFRAMEWORK")
if framework:
if env_base:
return env_base
else:
return joinuser("~", "Library", framework, "%d.%d" %
sys.version_info[:2])
if env_base:
return env_base
else:
return joinuser("~", ".local")
def _parse_makefile(filename, vars=None):
"""Parse a Makefile-style file.
A dictionary containing name/value pairs is returned. If an
optional dictionary is passed in as the second argument, it is
used instead of a new dictionary.
"""
# Regexes needed for parsing Makefile (and similar syntaxes,
# like old-style Setup files).
_variable_rx = re.compile("([a-zA-Z][a-zA-Z0-9_]+)\s*=\s*(.*)")
_findvar1_rx = re.compile(r"\$\(([A-Za-z][A-Za-z0-9_]*)\)")
_findvar2_rx = re.compile(r"\${([A-Za-z][A-Za-z0-9_]*)}")
if vars is None:
vars = {}
done = {}
notdone = {}
with codecs.open(filename, encoding='utf-8', errors="surrogateescape") as f:
lines = f.readlines()
for line in lines:
if line.startswith('#') or line.strip() == '':
continue
m = _variable_rx.match(line)
if m:
n, v = m.group(1, 2)
v = v.strip()
# `$$' is a literal `$' in make
tmpv = v.replace('$$', '')
if "$" in tmpv:
notdone[n] = v
else:
try:
v = int(v)
except ValueError:
# insert literal `$'
done[n] = v.replace('$$', '$')
else:
done[n] = v
# do variable interpolation here
variables = list(notdone.keys())
# Variables with a 'PY_' prefix in the makefile. These need to
# be made available without that prefix through sysconfig.
# Special care is needed to ensure that variable expansion works, even
# if the expansion uses the name without a prefix.
renamed_variables = ('CFLAGS', 'LDFLAGS', 'CPPFLAGS')
while len(variables) > 0:
for name in tuple(variables):
value = notdone[name]
m = _findvar1_rx.search(value) or _findvar2_rx.search(value)
if m is not None:
n = m.group(1)
found = True
if n in done:
item = str(done[n])
elif n in notdone:
# get it on a subsequent round
found = False
elif n in os.environ:
# do it like make: fall back to environment
item = os.environ[n]
elif n in renamed_variables:
if (name.startswith('PY_') and
name[3:] in renamed_variables):
item = ""
elif 'PY_' + n in notdone:
found = False
else:
item = str(done['PY_' + n])
else:
done[n] = item = ""
if found:
after = value[m.end():]
value = value[:m.start()] + item + after
if "$" in after:
notdone[name] = value
else:
try:
value = int(value)
except ValueError:
done[name] = value.strip()
else:
done[name] = value
variables.remove(name)
if (name.startswith('PY_') and
name[3:] in renamed_variables):
name = name[3:]
if name not in done:
done[name] = value
else:
# bogus variable reference (e.g. "prefix=$/opt/python");
# just drop it since we can't deal
done[name] = value
variables.remove(name)
# strip spurious spaces
for k, v in done.items():
if isinstance(v, str):
done[k] = v.strip()
# save the results in the global dictionary
vars.update(done)
return vars
def get_makefile_filename():
"""Return the path of the Makefile."""
if _PYTHON_BUILD:
return os.path.join(_PROJECT_BASE, "Makefile")
if hasattr(sys, 'abiflags'):
config_dir_name = 'config-%s%s' % (_PY_VERSION_SHORT, sys.abiflags)
else:
config_dir_name = 'config'
return os.path.join(get_path('stdlib'), config_dir_name, 'Makefile')
def _init_posix(vars):
"""Initialize the module as appropriate for POSIX systems."""
# load the installed Makefile:
makefile = get_makefile_filename()
try:
_parse_makefile(makefile, vars)
except IOError as e:
msg = "invalid Python installation: unable to open %s" % makefile
if hasattr(e, "strerror"):
msg = msg + " (%s)" % e.strerror
raise IOError(msg)
# load the installed pyconfig.h:
config_h = get_config_h_filename()
try:
with open(config_h) as f:
parse_config_h(f, vars)
except IOError as e:
msg = "invalid Python installation: unable to open %s" % config_h
if hasattr(e, "strerror"):
msg = msg + " (%s)" % e.strerror
raise IOError(msg)
# On AIX, there are wrong paths to the linker scripts in the Makefile
# -- these paths are relative to the Python source, but when installed
# the scripts are in another directory.
if _PYTHON_BUILD:
vars['LDSHARED'] = vars['BLDSHARED']
def _init_non_posix(vars):
"""Initialize the module as appropriate for NT"""
# set basic install directories
vars['LIBDEST'] = get_path('stdlib')
vars['BINLIBDEST'] = get_path('platstdlib')
vars['INCLUDEPY'] = get_path('include')
vars['SO'] = '.pyd'
vars['EXE'] = '.exe'
vars['VERSION'] = _PY_VERSION_SHORT_NO_DOT
vars['BINDIR'] = os.path.dirname(_safe_realpath(sys.executable))
#
# public APIs
#
def parse_config_h(fp, vars=None):
"""Parse a config.h-style file.
A dictionary containing name/value pairs is returned. If an
optional dictionary is passed in as the second argument, it is
used instead of a new dictionary.
"""
if vars is None:
vars = {}
define_rx = re.compile("#define ([A-Z][A-Za-z0-9_]+) (.*)\n")
undef_rx = re.compile("/[*] #undef ([A-Z][A-Za-z0-9_]+) [*]/\n")
while True:
line = fp.readline()
if not line:
break
m = define_rx.match(line)
if m:
n, v = m.group(1, 2)
try:
v = int(v)
except ValueError:
pass
vars[n] = v
else:
m = undef_rx.match(line)
if m:
vars[m.group(1)] = 0
return vars
def get_config_h_filename():
"""Return the path of pyconfig.h."""
if _PYTHON_BUILD:
if os.name == "nt":
inc_dir = os.path.join(_PROJECT_BASE, "PC")
else:
inc_dir = _PROJECT_BASE
else:
inc_dir = get_path('platinclude')
return os.path.join(inc_dir, 'pyconfig.h')
def get_scheme_names():
"""Return a tuple containing the schemes names."""
return tuple(sorted(_SCHEMES.sections()))
def get_path_names():
"""Return a tuple containing the paths names."""
# xxx see if we want a static list
return _SCHEMES.options('posix_prefix')
def get_paths(scheme=_get_default_scheme(), vars=None, expand=True):
"""Return a mapping containing an install scheme.
``scheme`` is the install scheme name. If not provided, it will
return the default scheme for the current platform.
"""
_ensure_cfg_read()
if expand:
return _expand_vars(scheme, vars)
else:
return dict(_SCHEMES.items(scheme))
def get_path(name, scheme=_get_default_scheme(), vars=None, expand=True):
"""Return a path corresponding to the scheme.
``scheme`` is the install scheme name.
"""
return get_paths(scheme, vars, expand)[name]
def get_config_vars(*args):
"""With no arguments, return a dictionary of all configuration
variables relevant for the current platform.
On Unix, this means every variable defined in Python's installed Makefile;
On Windows and Mac OS it's a much smaller set.
With arguments, return a list of values that result from looking up
each argument in the configuration variable dictionary.
"""
global _CONFIG_VARS
if _CONFIG_VARS is None:
_CONFIG_VARS = {}
# Normalized versions of prefix and exec_prefix are handy to have;
# in fact, these are the standard versions used most places in the
# distutils2 module.
_CONFIG_VARS['prefix'] = _PREFIX
_CONFIG_VARS['exec_prefix'] = _EXEC_PREFIX
_CONFIG_VARS['py_version'] = _PY_VERSION
_CONFIG_VARS['py_version_short'] = _PY_VERSION_SHORT
_CONFIG_VARS['py_version_nodot'] = _PY_VERSION[0] + _PY_VERSION[2]
_CONFIG_VARS['base'] = _PREFIX
_CONFIG_VARS['platbase'] = _EXEC_PREFIX
_CONFIG_VARS['projectbase'] = _PROJECT_BASE
try:
_CONFIG_VARS['abiflags'] = sys.abiflags
except AttributeError:
# sys.abiflags may not be defined on all platforms.
_CONFIG_VARS['abiflags'] = ''
if os.name in ('nt', 'os2'):
_init_non_posix(_CONFIG_VARS)
if os.name == 'posix':
_init_posix(_CONFIG_VARS)
# Setting 'userbase' is done below the call to the
# init function to enable using 'get_config_var' in
# the init-function.
if sys.version >= '2.6':
_CONFIG_VARS['userbase'] = _getuserbase()
if 'srcdir' not in _CONFIG_VARS:
_CONFIG_VARS['srcdir'] = _PROJECT_BASE
else:
_CONFIG_VARS['srcdir'] = _safe_realpath(_CONFIG_VARS['srcdir'])
# Convert srcdir into an absolute path if it appears necessary.
# Normally it is relative to the build directory. However, during
# testing, for example, we might be running a non-installed python
# from a different directory.
if _PYTHON_BUILD and os.name == "posix":
base = _PROJECT_BASE
try:
cwd = os.getcwd()
except OSError:
cwd = None
if (not os.path.isabs(_CONFIG_VARS['srcdir']) and
base != cwd):
# srcdir is relative and we are not in the same directory
# as the executable. Assume executable is in the build
# directory and make srcdir absolute.
srcdir = os.path.join(base, _CONFIG_VARS['srcdir'])
_CONFIG_VARS['srcdir'] = os.path.normpath(srcdir)
if sys.platform == 'darwin':
kernel_version = os.uname()[2] # Kernel version (8.4.3)
major_version = int(kernel_version.split('.')[0])
if major_version < 8:
# On Mac OS X before 10.4, check if -arch and -isysroot
# are in CFLAGS or LDFLAGS and remove them if they are.
# This is needed when building extensions on a 10.3 system
# using a universal build of python.
for key in ('LDFLAGS', 'BASECFLAGS',
# a number of derived variables. These need to be
# patched up as well.
'CFLAGS', 'PY_CFLAGS', 'BLDSHARED'):
flags = _CONFIG_VARS[key]
flags = re.sub('-arch\s+\w+\s', ' ', flags)
flags = re.sub('-isysroot [^ \t]*', ' ', flags)
_CONFIG_VARS[key] = flags
else:
# Allow the user to override the architecture flags using
# an environment variable.
# NOTE: This name was introduced by Apple in OSX 10.5 and
# is used by several scripting languages distributed with
# that OS release.
if 'ARCHFLAGS' in os.environ:
arch = os.environ['ARCHFLAGS']
for key in ('LDFLAGS', 'BASECFLAGS',
# a number of derived variables. These need to be
# patched up as well.
'CFLAGS', 'PY_CFLAGS', 'BLDSHARED'):
flags = _CONFIG_VARS[key]
flags = re.sub('-arch\s+\w+\s', ' ', flags)
flags = flags + ' ' + arch
_CONFIG_VARS[key] = flags
# If we're on OSX 10.5 or later and the user tries to
# compiles an extension using an SDK that is not present
# on the current machine it is better to not use an SDK
# than to fail.
#
# The major usecase for this is users using a Python.org
# binary installer on OSX 10.6: that installer uses
# the 10.4u SDK, but that SDK is not installed by default
# when you install Xcode.
#
CFLAGS = _CONFIG_VARS.get('CFLAGS', '')
m = re.search('-isysroot\s+(\S+)', CFLAGS)
if m is not None:
sdk = m.group(1)
if not os.path.exists(sdk):
for key in ('LDFLAGS', 'BASECFLAGS',
# a number of derived variables. These need to be
# patched up as well.
'CFLAGS', 'PY_CFLAGS', 'BLDSHARED'):
flags = _CONFIG_VARS[key]
flags = re.sub('-isysroot\s+\S+(\s|$)', ' ', flags)
_CONFIG_VARS[key] = flags
if args:
vals = []
for name in args:
vals.append(_CONFIG_VARS.get(name))
return vals
else:
return _CONFIG_VARS
def get_config_var(name):
"""Return the value of a single variable using the dictionary returned by
'get_config_vars()'.
Equivalent to get_config_vars().get(name)
"""
return get_config_vars().get(name)
def get_platform():
"""Return a string that identifies the current platform.
This is used mainly to distinguish platform-specific build directories and
platform-specific built distributions. Typically includes the OS name
and version and the architecture (as supplied by 'os.uname()'),
although the exact information included depends on the OS; eg. for IRIX
the architecture isn't particularly important (IRIX only runs on SGI
hardware), but for Linux the kernel version isn't particularly
important.
Examples of returned values:
linux-i586
linux-alpha (?)
solaris-2.6-sun4u
irix-5.3
irix64-6.2
Windows will return one of:
win-amd64 (64bit Windows on AMD64 (aka x86_64, Intel64, EM64T, etc)
win-ia64 (64bit Windows on Itanium)
win32 (all others - specifically, sys.platform is returned)
For other non-POSIX platforms, currently just returns 'sys.platform'.
"""
if os.name == 'nt':
# sniff sys.version for architecture.
prefix = " bit ("
i = sys.version.find(prefix)
if i == -1:
return sys.platform
j = sys.version.find(")", i)
look = sys.version[i+len(prefix):j].lower()
if look == 'amd64':
return 'win-amd64'
if look == 'itanium':
return 'win-ia64'
return sys.platform
if os.name != "posix" or not hasattr(os, 'uname'):
# XXX what about the architecture? NT is Intel or Alpha,
# Mac OS is M68k or PPC, etc.
return sys.platform
# Try to distinguish various flavours of Unix
osname, host, release, version, machine = os.uname()
# Convert the OS name to lowercase, remove '/' characters
# (to accommodate BSD/OS), and translate spaces (for "Power Macintosh")
osname = osname.lower().replace('/', '')
machine = machine.replace(' ', '_')
machine = machine.replace('/', '-')
if osname[:5] == "linux":
# At least on Linux/Intel, 'machine' is the processor --
# i386, etc.
# XXX what about Alpha, SPARC, etc?
return "%s-%s" % (osname, machine)
elif osname[:5] == "sunos":
if release[0] >= "5": # SunOS 5 == Solaris 2
osname = "solaris"
release = "%d.%s" % (int(release[0]) - 3, release[2:])
# fall through to standard osname-release-machine representation
elif osname[:4] == "irix": # could be "irix64"!
return "%s-%s" % (osname, release)
elif osname[:3] == "aix":
return "%s-%s.%s" % (osname, version, release)
elif osname[:6] == "cygwin":
osname = "cygwin"
rel_re = re.compile(r'[\d.]+')
m = rel_re.match(release)
if m:
release = m.group()
elif osname[:6] == "darwin":
#
# For our purposes, we'll assume that the system version from
# distutils' perspective is what MACOSX_DEPLOYMENT_TARGET is set
# to. This makes the compatibility story a bit more sane because the
# machine is going to compile and link as if it were
# MACOSX_DEPLOYMENT_TARGET.
cfgvars = get_config_vars()
macver = cfgvars.get('MACOSX_DEPLOYMENT_TARGET')
if True:
# Always calculate the release of the running machine,
# needed to determine if we can build fat binaries or not.
macrelease = macver
# Get the system version. Reading this plist is a documented
# way to get the system version (see the documentation for
# the Gestalt Manager)
try:
f = open('/System/Library/CoreServices/SystemVersion.plist')
except IOError:
# We're on a plain darwin box, fall back to the default
# behaviour.
pass
else:
try:
m = re.search(r'<key>ProductUserVisibleVersion</key>\s*'
r'<string>(.*?)</string>', f.read())
finally:
f.close()
if m is not None:
macrelease = '.'.join(m.group(1).split('.')[:2])
# else: fall back to the default behaviour
if not macver:
macver = macrelease
if macver:
release = macver
osname = "macosx"
if ((macrelease + '.') >= '10.4.' and
'-arch' in get_config_vars().get('CFLAGS', '').strip()):
# The universal build will build fat binaries, but not on
# systems before 10.4
#
# Try to detect 4-way universal builds, those have machine-type
# 'universal' instead of 'fat'.
machine = 'fat'
cflags = get_config_vars().get('CFLAGS')
archs = re.findall('-arch\s+(\S+)', cflags)
archs = tuple(sorted(set(archs)))
if len(archs) == 1:
machine = archs[0]
elif archs == ('i386', 'ppc'):
machine = 'fat'
elif archs == ('i386', 'x86_64'):
machine = 'intel'
elif archs == ('i386', 'ppc', 'x86_64'):
machine = 'fat3'
elif archs == ('ppc64', 'x86_64'):
machine = 'fat64'
elif archs == ('i386', 'ppc', 'ppc64', 'x86_64'):
machine = 'universal'
else:
raise ValueError(
"Don't know machine value for archs=%r" % (archs,))
elif machine == 'i386':
# On OSX the machine type returned by uname is always the
# 32-bit variant, even if the executable architecture is
# the 64-bit variant
if sys.maxsize >= 2**32:
machine = 'x86_64'
elif machine in ('PowerPC', 'Power_Macintosh'):
# Pick a sane name for the PPC architecture.
# See 'i386' case
if sys.maxsize >= 2**32:
machine = 'ppc64'
else:
machine = 'ppc'
return "%s-%s-%s" % (osname, release, machine)
def get_python_version():
return _PY_VERSION_SHORT
def _print_dict(title, data):
for index, (key, value) in enumerate(sorted(data.items())):
if index == 0:
print('%s: ' % (title))
print('\t%s = "%s"' % (key, value))
def _main():
"""Display all information sysconfig detains."""
print('Platform: "%s"' % get_platform())
print('Python version: "%s"' % get_python_version())
print('Current installation scheme: "%s"' % _get_default_scheme())
print()
_print_dict('Paths', get_paths())
print()
_print_dict('Variables', get_config_vars())
if __name__ == '__main__':
_main()
| mit |
tomchristie/django | tests/managers_regress/tests.py | 43 | 10902 | from django.db import models
from django.template import Context, Template
from django.test import TestCase, override_settings
from django.test.utils import isolate_apps
from .models import (
AbstractBase1, AbstractBase2, AbstractBase3, Child1, Child2, Child3,
Child4, Child5, Child6, Child7, RelatedModel, RelationModel,
)
class ManagersRegressionTests(TestCase):
def test_managers(self):
Child1.objects.create(name='fred', data='a1')
Child1.objects.create(name='barney', data='a2')
Child2.objects.create(name='fred', data='b1', value=1)
Child2.objects.create(name='barney', data='b2', value=42)
Child3.objects.create(name='fred', data='c1', comment='yes')
Child3.objects.create(name='barney', data='c2', comment='no')
Child4.objects.create(name='fred', data='d1')
Child4.objects.create(name='barney', data='d2')
Child5.objects.create(name='fred', comment='yes')
Child5.objects.create(name='barney', comment='no')
Child6.objects.create(name='fred', data='f1', value=42)
Child6.objects.create(name='barney', data='f2', value=42)
Child7.objects.create(name='fred')
Child7.objects.create(name='barney')
self.assertQuerysetEqual(Child1.manager1.all(), ["<Child1: a1>"])
self.assertQuerysetEqual(Child1.manager2.all(), ["<Child1: a2>"])
self.assertQuerysetEqual(Child1._default_manager.all(), ["<Child1: a1>"])
self.assertQuerysetEqual(Child2._default_manager.all(), ["<Child2: b1>"])
self.assertQuerysetEqual(Child2.restricted.all(), ["<Child2: b2>"])
self.assertQuerysetEqual(Child3._default_manager.all(), ["<Child3: c1>"])
self.assertQuerysetEqual(Child3.manager1.all(), ["<Child3: c1>"])
self.assertQuerysetEqual(Child3.manager2.all(), ["<Child3: c2>"])
# Since Child6 inherits from Child4, the corresponding rows from f1 and
# f2 also appear here. This is the expected result.
self.assertQuerysetEqual(Child4._default_manager.order_by('data'), [
"<Child4: d1>",
"<Child4: d2>",
"<Child4: f1>",
"<Child4: f2>",
])
self.assertQuerysetEqual(Child4.manager1.all(), ["<Child4: d1>", "<Child4: f1>"], ordered=False)
self.assertQuerysetEqual(Child5._default_manager.all(), ["<Child5: fred>"])
self.assertQuerysetEqual(Child6._default_manager.all(), ["<Child6: f1>", "<Child6: f2>"], ordered=False)
self.assertQuerysetEqual(
Child7._default_manager.order_by('name'),
["<Child7: barney>", "<Child7: fred>"]
)
def test_abstract_manager(self):
# Accessing the manager on an abstract model should
# raise an attribute error with an appropriate message.
# This error message isn't ideal, but if the model is abstract and
# a lot of the class instantiation logic isn't invoked; if the
# manager is implied, then we don't get a hook to install the
# error-raising manager.
msg = "type object 'AbstractBase3' has no attribute 'objects'"
with self.assertRaisesMessage(AttributeError, msg):
AbstractBase3.objects.all()
def test_custom_abstract_manager(self):
# Accessing the manager on an abstract model with an custom
# manager should raise an attribute error with an appropriate
# message.
msg = "Manager isn't available; AbstractBase2 is abstract"
with self.assertRaisesMessage(AttributeError, msg):
AbstractBase2.restricted.all()
def test_explicit_abstract_manager(self):
# Accessing the manager on an abstract model with an explicit
# manager should raise an attribute error with an appropriate
# message.
msg = "Manager isn't available; AbstractBase1 is abstract"
with self.assertRaisesMessage(AttributeError, msg):
AbstractBase1.objects.all()
@override_settings(TEST_SWAPPABLE_MODEL='managers_regress.Parent')
@isolate_apps('managers_regress')
def test_swappable_manager(self):
class SwappableModel(models.Model):
class Meta:
swappable = 'TEST_SWAPPABLE_MODEL'
# Accessing the manager on a swappable model should
# raise an attribute error with a helpful message
msg = (
"Manager isn't available; 'managers_regress.SwappableModel' "
"has been swapped for 'managers_regress.Parent'"
)
with self.assertRaisesMessage(AttributeError, msg):
SwappableModel.objects.all()
@override_settings(TEST_SWAPPABLE_MODEL='managers_regress.Parent')
@isolate_apps('managers_regress')
def test_custom_swappable_manager(self):
class SwappableModel(models.Model):
stuff = models.Manager()
class Meta:
swappable = 'TEST_SWAPPABLE_MODEL'
# Accessing the manager on a swappable model with an
# explicit manager should raise an attribute error with a
# helpful message
msg = (
"Manager isn't available; 'managers_regress.SwappableModel' "
"has been swapped for 'managers_regress.Parent'"
)
with self.assertRaisesMessage(AttributeError, msg):
SwappableModel.stuff.all()
@override_settings(TEST_SWAPPABLE_MODEL='managers_regress.Parent')
@isolate_apps('managers_regress')
def test_explicit_swappable_manager(self):
class SwappableModel(models.Model):
objects = models.Manager()
class Meta:
swappable = 'TEST_SWAPPABLE_MODEL'
# Accessing the manager on a swappable model with an
# explicit manager should raise an attribute error with a
# helpful message
msg = (
"Manager isn't available; 'managers_regress.SwappableModel' "
"has been swapped for 'managers_regress.Parent'"
)
with self.assertRaisesMessage(AttributeError, msg):
SwappableModel.objects.all()
def test_regress_3871(self):
related = RelatedModel.objects.create()
relation = RelationModel()
relation.fk = related
relation.gfk = related
relation.save()
relation.m2m.add(related)
t = Template('{{ related.test_fk.all.0 }}{{ related.test_gfk.all.0 }}{{ related.test_m2m.all.0 }}')
self.assertEqual(
t.render(Context({'related': related})),
''.join([str(relation.pk)] * 3),
)
def test_field_can_be_called_exact(self):
# Make sure related managers core filters don't include an
# explicit `__exact` lookup that could be interpreted as a
# reference to a foreign `exact` field. refs #23940.
related = RelatedModel.objects.create(exact=False)
relation = related.test_fk.create()
self.assertEqual(related.test_fk.get(), relation)
@isolate_apps('managers_regress')
class TestManagerInheritance(TestCase):
def test_implicit_inheritance(self):
class CustomManager(models.Manager):
pass
class AbstractModel(models.Model):
custom_manager = CustomManager()
class Meta:
abstract = True
class PlainModel(models.Model):
custom_manager = CustomManager()
self.assertIsInstance(PlainModel._base_manager, models.Manager)
self.assertIsInstance(PlainModel._default_manager, CustomManager)
class ModelWithAbstractParent(AbstractModel):
pass
self.assertIsInstance(ModelWithAbstractParent._base_manager, models.Manager)
self.assertIsInstance(ModelWithAbstractParent._default_manager, CustomManager)
class ProxyModel(PlainModel):
class Meta:
proxy = True
self.assertIsInstance(ProxyModel._base_manager, models.Manager)
self.assertIsInstance(ProxyModel._default_manager, CustomManager)
class MTIModel(PlainModel):
pass
self.assertIsInstance(MTIModel._base_manager, models.Manager)
self.assertIsInstance(MTIModel._default_manager, CustomManager)
def test_default_manager_inheritance(self):
class CustomManager(models.Manager):
pass
class AbstractModel(models.Model):
another_manager = models.Manager()
custom_manager = CustomManager()
class Meta:
default_manager_name = 'custom_manager'
abstract = True
class PlainModel(models.Model):
another_manager = models.Manager()
custom_manager = CustomManager()
class Meta:
default_manager_name = 'custom_manager'
self.assertIsInstance(PlainModel._default_manager, CustomManager)
class ModelWithAbstractParent(AbstractModel):
pass
self.assertIsInstance(ModelWithAbstractParent._default_manager, CustomManager)
class ProxyModel(PlainModel):
class Meta:
proxy = True
self.assertIsInstance(ProxyModel._default_manager, CustomManager)
class MTIModel(PlainModel):
pass
self.assertIsInstance(MTIModel._default_manager, CustomManager)
def test_base_manager_inheritance(self):
class CustomManager(models.Manager):
pass
class AbstractModel(models.Model):
another_manager = models.Manager()
custom_manager = CustomManager()
class Meta:
base_manager_name = 'custom_manager'
abstract = True
class PlainModel(models.Model):
another_manager = models.Manager()
custom_manager = CustomManager()
class Meta:
base_manager_name = 'custom_manager'
self.assertIsInstance(PlainModel._base_manager, CustomManager)
class ModelWithAbstractParent(AbstractModel):
pass
self.assertIsInstance(ModelWithAbstractParent._base_manager, CustomManager)
class ProxyModel(PlainModel):
class Meta:
proxy = True
self.assertIsInstance(ProxyModel._base_manager, CustomManager)
class MTIModel(PlainModel):
pass
self.assertIsInstance(MTIModel._base_manager, CustomManager)
def test_manager_no_duplicates(self):
class CustomManager(models.Manager):
pass
class AbstractModel(models.Model):
custom_manager = models.Manager()
class Meta:
abstract = True
class TestModel(AbstractModel):
custom_manager = CustomManager()
self.assertEqual(TestModel._meta.managers, (TestModel.custom_manager,))
self.assertEqual(TestModel._meta.managers_map, {'custom_manager': TestModel.custom_manager})
| bsd-3-clause |
zicklag/godot | platform/javascript/detect.py | 4 | 3849 | import os
import string
import sys
def is_active():
return True
def get_name():
return "JavaScript"
def can_build():
return ("EMSCRIPTEN_ROOT" in os.environ or "EMSCRIPTEN" in os.environ)
def get_opts():
from SCons.Variables import BoolVariable
return [
BoolVariable('wasm', 'Compile to WebAssembly', False),
BoolVariable('javascript_eval', 'Enable JavaScript eval interface', True),
]
def get_flags():
return [
('tools', False),
('module_theora_enabled', False),
]
def create(env):
# remove Windows' .exe suffix
return env.Clone(tools=['textfile', 'zip'], PROGSUFFIX='')
def escape_sources_backslashes(target, source, env, for_signature):
return [path.replace('\\','\\\\') for path in env.GetBuildPath(source)]
def escape_target_backslashes(target, source, env, for_signature):
return env.GetBuildPath(target[0]).replace('\\','\\\\')
def configure(env):
## Build type
if (env["target"] == "release"):
env.Append(CCFLAGS=['-O3'])
env.Append(LINKFLAGS=['-O3'])
elif (env["target"] == "release_debug"):
env.Append(CCFLAGS=['-O2', '-DDEBUG_ENABLED'])
env.Append(LINKFLAGS=['-O2', '-s', 'ASSERTIONS=1'])
# retain function names at the cost of file size, for backtraces and profiling
env.Append(LINKFLAGS=['--profiling-funcs'])
elif (env["target"] == "debug"):
env.Append(CCFLAGS=['-O1', '-D_DEBUG', '-g', '-DDEBUG_ENABLED'])
env.Append(LINKFLAGS=['-O1', '-g'])
## Compiler configuration
env['ENV'] = os.environ
if ("EMSCRIPTEN_ROOT" in os.environ):
env.PrependENVPath('PATH', os.environ['EMSCRIPTEN_ROOT'])
elif ("EMSCRIPTEN" in os.environ):
env.PrependENVPath('PATH', os.environ['EMSCRIPTEN'])
env['CC'] = 'emcc'
env['CXX'] = 'em++'
env['LINK'] = 'emcc'
env['RANLIB'] = 'emranlib'
# Emscripten's ar has issues with duplicate file names, so use cc
env['AR'] = 'emcc'
env['ARFLAGS'] = '-o'
if (os.name == 'nt'):
# use TempFileMunge on Windows since some commands get too long for
# cmd.exe even with spawn_fix
# need to escape backslashes for this
env['ESCAPED_SOURCES'] = escape_sources_backslashes
env['ESCAPED_TARGET'] = escape_target_backslashes
env['ARCOM'] = '${TEMPFILE("%s")}' % env['ARCOM'].replace('$SOURCES', '$ESCAPED_SOURCES').replace('$TARGET', '$ESCAPED_TARGET')
env['OBJSUFFIX'] = '.bc'
env['LIBSUFFIX'] = '.bc'
## Compile flags
env.Append(CPPPATH=['#platform/javascript'])
env.Append(CPPFLAGS=['-DJAVASCRIPT_ENABLED', '-DUNIX_ENABLED', '-DPTHREAD_NO_RENAME', '-DTYPED_METHOD_BIND', '-DNO_THREADS'])
env.Append(CPPFLAGS=['-DGLES3_ENABLED'])
# These flags help keep the file size down
env.Append(CPPFLAGS=["-fno-exceptions", '-DNO_SAFE_CAST', '-fno-rtti'])
if env['javascript_eval']:
env.Append(CPPFLAGS=['-DJAVASCRIPT_EVAL_ENABLED'])
## Link flags
env.Append(LINKFLAGS=['-s', 'EXTRA_EXPORTED_RUNTIME_METHODS="[\'FS\']"'])
env.Append(LINKFLAGS=['-s', 'USE_WEBGL2=1'])
if env['wasm']:
env.Append(LINKFLAGS=['-s', 'BINARYEN=1'])
# In contrast to asm.js, enabling memory growth on WebAssembly has no
# major performance impact, and causes only a negligible increase in
# memory size.
env.Append(LINKFLAGS=['-s', 'ALLOW_MEMORY_GROWTH=1'])
env.extra_suffix = '.webassembly' + env.extra_suffix
else:
env.Append(LINKFLAGS=['-s', 'ASM_JS=1'])
env.Append(LINKFLAGS=['--separate-asm'])
env.Append(LINKFLAGS=['--memory-init-file', '1'])
# TODO: Move that to opus module's config
if 'module_opus_enabled' in env and env['module_opus_enabled']:
env.opus_fixed_point = "yes"
| mit |
j0nathan33/CouchPotatoServer | couchpotato/core/notifications/xmpp_.py | 96 | 2910 | from time import sleep
import traceback
from couchpotato.core.logger import CPLog
from couchpotato.core.notifications.base import Notification
import xmpp
log = CPLog(__name__)
autoload = 'Xmpp'
class Xmpp(Notification):
def notify(self, message = '', data = None, listener = None):
if not data: data = {}
try:
jid = xmpp.protocol.JID(self.conf('username'))
client = xmpp.Client(jid.getDomain(), debug = [])
# Connect
if not client.connect(server = (self.conf('hostname'), self.conf('port'))):
log.error('XMPP failed: Connection to server failed.')
return False
# Authenticate
if not client.auth(jid.getNode(), self.conf('password'), resource = jid.getResource()):
log.error('XMPP failed: Failed to authenticate.')
return False
# Send message
client.send(xmpp.protocol.Message(to = self.conf('to'), body = message, typ = 'chat'))
# Disconnect
# some older servers will not send the message if you disconnect immediately after sending
sleep(1)
client.disconnect()
log.info('XMPP notifications sent.')
return True
except:
log.error('XMPP failed: %s', traceback.format_exc())
return False
config = [{
'name': 'xmpp',
'groups': [
{
'tab': 'notifications',
'list': 'notification_providers',
'name': 'xmpp',
'label': 'XMPP',
'description`': 'for Jabber, Hangouts (Google Talk), AIM...',
'options': [
{
'name': 'enabled',
'default': 0,
'type': 'enabler',
},
{
'name': 'username',
'description': 'User sending the message. For Hangouts, e-mail of a single-step authentication Google account.',
},
{
'name': 'password',
'type': 'Password',
},
{
'name': 'hostname',
'default': 'talk.google.com',
},
{
'name': 'to',
'description': 'Username (or e-mail for Hangouts) of the person to send the messages to.',
},
{
'name': 'port',
'type': 'int',
'default': 5222,
},
{
'name': 'on_snatch',
'default': 0,
'type': 'bool',
'advanced': True,
'description': 'Also send message when movie is snatched.',
},
],
}
],
}]
| gpl-3.0 |
srsman/odoo | addons/base_report_designer/plugin/openerp_report_designer/bin/script/ConvertBracesToField.py | 384 | 12556 | #########################################################################
#
# Copyright (c) 2003-2004 Danny Brewer d29583@groovegarden.com
# Copyright (C) 2004-2010 OpenERP SA (<http://openerp.com>).
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# See: http://www.gnu.org/licenses/lgpl.html
#
#############################################################################
import uno
import unohelper
import string
import re
import base64
from com.sun.star.task import XJobExecutor
if __name__<>"package":
from lib.gui import *
from LoginTest import *
from lib.logreport import *
from lib.rpc import *
database="test"
uid = 1
class ConvertBracesToField( unohelper.Base, XJobExecutor ):
def __init__(self, ctx):
self.ctx = ctx
self.module = "openerp_report"
self.version = "0.1"
LoginTest()
self.logobj=Logger()
if not loginstatus and __name__=="package":
exit(1)
global passwd
self.password = passwd
global url
self.sock=RPCSession(url)
self.aReportSyntex=[]
self.getBraces(self.aReportSyntex)
self.setValue()
def setValue(self):
desktop=getDesktop()
doc = desktop.getCurrentComponent()
docinfo= doc.getDocumentInfo()
count = 0
regexes = [
['[a-zA-Z0-9_]+\.[a-zA-Z0-9_.]+',"Field"],
['\\[\\[ *repeatIn\\( *([a-zA-Z0-9_\.]+), *\'([a-zA-Z0-9_]+)\' *\\) *\\]\\]', "RepeatIn"],
['\\[\\[ *([a-zA-Z0-9_\.]+) *\\]\\]', "Field"]
# ['\\[\\[ ([a-zA-Z0-9_]+\.[a-zA-Z1-9]) \\]\\]',"Field"],
# ['\\[\\[ [a-zA-Z0-9_\.]+ and ([a-zA-Z0-9_\.]+) or .+? \\]\\]',"Field"],
# ['\\[\\[ ([a-zA-Z0-9_\.]+) or .+? \\]\\]',"Field"],
# ['\\[\\[ ([a-zA-Z0-9_\.]+) and .+? \\]\\]',"Field"],
# ['\\[\\[ .+? or ([a-zA-Z0-9_\.]+) \\]\\]',"Field"],
# ['\\[\\[ (.+?) and ([a-zA-Z0-9_\.]+) \\]\\]',"Field"],
# ['\\[\\[ .+? % ([a-zA-Z0-9_\.]+) \\]\\]',"Field"]
]
oFieldObject = []
oRepeatInObjects = []
saRepeatInList = []
sHost = docinfo.getUserFieldValue(0)
nCount = 0
oParEnum = doc.getTextFields().createEnumeration()
while oParEnum.hasMoreElements():
oPar = oParEnum.nextElement()
nCount += 1
getList(oRepeatInObjects,sHost,nCount)
for ro in oRepeatInObjects:
if ro.find("(")<>-1:
saRepeatInList.append( [ ro[:ro.find("(")], ro[ro.find("(")+1:ro.find(")")] ])
try:
oParEnum = doc.getTextFields().createEnumeration()
while oParEnum.hasMoreElements():
oPar = oParEnum.nextElement()
if oPar.supportsService("com.sun.star.text.TextField.DropDown"):
for reg in regexes:
res=re.findall(reg[0],oPar.Items[1])
if len(res) <> 0:
if res[0][0] == "objects":
sTemp = docinfo.getUserFieldValue(3)
sTemp = "|-." + sTemp[sTemp.rfind(".")+1:] + ".-|"
oPar.Items=(sTemp.encode("utf-8"),oPar.Items[1].replace(' ',""))
oPar.update()
elif type(res[0]) <> type(u''):
sObject = self.getRes(self.sock, docinfo.getUserFieldValue(3), res[0][0][res[0][0].find(".")+1:].replace(".","/"))
r = self.sock.execute(database, uid, self.password, docinfo.getUserFieldValue(3) , 'fields_get')
sExpr="|-." + r[res[0][0][res[0][0].rfind(".")+1:]]["string"] + ".-|"
oPar.Items=(sExpr.encode("utf-8"),oPar.Items[1].replace(' ',""))
oPar.update()
else:
obj = None
for rl in saRepeatInList:
if rl[0] == res[0][:res[0].find(".")]:
obj=rl[1]
try:
sObject = self.getRes(self.sock, obj, res[0][res[0].find(".")+1:].replace(".","/"))
r = self.sock.execute(database, uid, self.password, sObject , 'read',[1])
except Exception,e:
r = "TTT"
self.logobj.log_write('ConvertBracesToField', LOG_ERROR, str(e))
if len(r) <> 0:
if r <> "TTT":
if len(res)>1:
sExpr=""
print res
if reg[1] == 'Field':
for ires in res:
try:
sExpr=r[0][ires[ires.rfind(".")+1:]]
break
except Exception,e:
import traceback,sys
info = reduce(lambda x, y: x+y, traceback.format_exception(sys.exc_type, sys.exc_value, sys.exc_traceback))
self.logobj.log_write('ConvertBracesToField', LOG_ERROR,info)
try:
oPar.Items=(sExpr.encode("utf-8") ,oPar.Items[1])
oPar.update()
except:
oPar.Items=(str(sExpr) ,oPar.Items[1])
oPar.update()
import traceback,sys
info = reduce(lambda x, y: x+y, traceback.format_exception(sys.exc_type, sys.exc_value, sys.exc_traceback))
self.logobj.log_write('ConvertBracesToField', LOG_ERROR, info)
else:
sExpr=r[0][res[0][res[0].rfind(".")+1:]]
try:
if sExpr:
oPar.Items=(sExpr.encode("utf-8") ,oPar.Items[1])
oPar.update()
else:
oPar.Items=(u"/",oPar.Items[1])
oPar.update()
except:
oPar.Items=(str(sExpr) ,oPar.Items[1])
oPar.update()
import traceback,sys
info = reduce(lambda x, y: x+y, traceback.format_exception(sys.exc_type, sys.exc_value, sys.exc_traceback))
self.logobj.log_write('ConvertBracesToField', LOG_ERROR,info)
else:
oPar.Items=(u""+r,oPar.Items[1])
oPar.update()
else:
oPar.Items=(u"TTT",oPar.Items[1])
oPar.update()
except:
import traceback,sys
info = reduce(lambda x, y: x+y, traceback.format_exception(sys.exc_type, sys.exc_value, sys.exc_traceback))
self.logobj.log_write('ConvertBraceToField', LOG_ERROR, info)
def getRes(self, sock, sObject, sVar):
desktop=getDesktop()
doc =desktop.getCurrentComponent()
docinfo=doc.getDocumentInfo()
res = sock.execute(database, uid, self.password, sObject , 'fields_get')
key = res.keys()
key.sort()
myval=None
if not sVar.find("/")==-1:
myval=sVar[:sVar.find("/")]
else:
myval=sVar
for k in key:
if (res[k]['type'] in ['many2one']) and k==myval:
sObject = self.getRes(sock,res[myval]['relation'], sVar[sVar.find("/")+1:])
return sObject
def getBraces(self, aReportSyntex=None):
if aReportSyntex is None:
aReportSyntex = []
desktop=getDesktop()
doc = desktop.getCurrentComponent()
aSearchString=[]
aReplaceString=[]
aRes=[]
try:
regexes = [
['\\[\\[ *repeatIn\\( *([a-zA-Z0-9_\.]+), *\'([a-zA-Z0-9_]+)\' *\\) *\\]\\]', "RepeatIn"],
['\\[\\[ *([a-zA-Z0-9_\.]+) *\\]\\]', "Field"],
['\\[\\[ *.+? *\\]\\]', "Expression"]
]
search = doc.createSearchDescriptor()
search.SearchRegularExpression = True
for reg in regexes:
search.SearchString = reg[0]
found = doc.findFirst( search )
while found:
res=re.findall(reg[0],found.String)
print len(res)
if found.String not in [r[0] for r in aReportSyntex] and len(res) == 1 :
text=found.getText()
oInputList = doc.createInstance("com.sun.star.text.TextField.DropDown")
if reg[1]<>"Expression":
oInputList.Items=(u""+found.String,u""+found.String)
else:
oInputList.Items=(u"?",u""+found.String)
aReportSyntex.append([oInputList,reg[1]])
text.insertTextContent(found,oInputList,False)
found.String =""
else:
aRes.append([res,reg[1]])
found = doc.findNext(found.End, search)
search = doc.createSearchDescriptor()
search.SearchRegularExpression = False
for res in aRes:
for r in res[0]:
search.SearchString=r
found=doc.findFirst(search)
while found:
text=found.getText()
oInputList = doc.createInstance("com.sun.star.text.TextField.DropDown")
if res[1]<>"Expression":
oInputList.Items=(u""+found.String,u""+found.String)
else:
oInputList.Items=(u"?",u""+found.String)
aReportSyntex.append([oInputList,res[1]])
text.insertTextContent(found,oInputList,False)
found.String =""
found = doc.findNext(found.End, search)
except:
import traceback,sys
info = reduce(lambda x, y: x+y, traceback.format_exception(sys.exc_type, sys.exc_value, sys.exc_traceback))
self.logobj.log_write('ConvertBraceToField', LOG_ERROR, info)
if __name__<>"package":
ConvertBracesToField(None)
else:
g_ImplementationHelper.addImplementation( ConvertBracesToField, "org.openoffice.openerp.report.convertBF", ("com.sun.star.task.Job",),)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
IronLanguages/ironpython3 | Src/StdLib/Lib/_dummy_thread.py | 106 | 4872 | """Drop-in replacement for the thread module.
Meant to be used as a brain-dead substitute so that threaded code does
not need to be rewritten for when the thread module is not present.
Suggested usage is::
try:
import _thread
except ImportError:
import _dummy_thread as _thread
"""
# Exports only things specified by thread documentation;
# skipping obsolete synonyms allocate(), start_new(), exit_thread().
__all__ = ['error', 'start_new_thread', 'exit', 'get_ident', 'allocate_lock',
'interrupt_main', 'LockType']
# A dummy value
TIMEOUT_MAX = 2**31
# NOTE: this module can be imported early in the extension building process,
# and so top level imports of other modules should be avoided. Instead, all
# imports are done when needed on a function-by-function basis. Since threads
# are disabled, the import lock should not be an issue anyway (??).
error = RuntimeError
def start_new_thread(function, args, kwargs={}):
"""Dummy implementation of _thread.start_new_thread().
Compatibility is maintained by making sure that ``args`` is a
tuple and ``kwargs`` is a dictionary. If an exception is raised
and it is SystemExit (which can be done by _thread.exit()) it is
caught and nothing is done; all other exceptions are printed out
by using traceback.print_exc().
If the executed function calls interrupt_main the KeyboardInterrupt will be
raised when the function returns.
"""
if type(args) != type(tuple()):
raise TypeError("2nd arg must be a tuple")
if type(kwargs) != type(dict()):
raise TypeError("3rd arg must be a dict")
global _main
_main = False
try:
function(*args, **kwargs)
except SystemExit:
pass
except:
import traceback
traceback.print_exc()
_main = True
global _interrupt
if _interrupt:
_interrupt = False
raise KeyboardInterrupt
def exit():
"""Dummy implementation of _thread.exit()."""
raise SystemExit
def get_ident():
"""Dummy implementation of _thread.get_ident().
Since this module should only be used when _threadmodule is not
available, it is safe to assume that the current process is the
only thread. Thus a constant can be safely returned.
"""
return -1
def allocate_lock():
"""Dummy implementation of _thread.allocate_lock()."""
return LockType()
def stack_size(size=None):
"""Dummy implementation of _thread.stack_size()."""
if size is not None:
raise error("setting thread stack size not supported")
return 0
def _set_sentinel():
"""Dummy implementation of _thread._set_sentinel()."""
return LockType()
class LockType(object):
"""Class implementing dummy implementation of _thread.LockType.
Compatibility is maintained by maintaining self.locked_status
which is a boolean that stores the state of the lock. Pickling of
the lock, though, should not be done since if the _thread module is
then used with an unpickled ``lock()`` from here problems could
occur from this class not having atomic methods.
"""
def __init__(self):
self.locked_status = False
def acquire(self, waitflag=None, timeout=-1):
"""Dummy implementation of acquire().
For blocking calls, self.locked_status is automatically set to
True and returned appropriately based on value of
``waitflag``. If it is non-blocking, then the value is
actually checked and not set if it is already acquired. This
is all done so that threading.Condition's assert statements
aren't triggered and throw a little fit.
"""
if waitflag is None or waitflag:
self.locked_status = True
return True
else:
if not self.locked_status:
self.locked_status = True
return True
else:
if timeout > 0:
import time
time.sleep(timeout)
return False
__enter__ = acquire
def __exit__(self, typ, val, tb):
self.release()
def release(self):
"""Release the dummy lock."""
# XXX Perhaps shouldn't actually bother to test? Could lead
# to problems for complex, threaded code.
if not self.locked_status:
raise error
self.locked_status = False
return True
def locked(self):
return self.locked_status
# Used to signal that interrupt_main was called in a "thread"
_interrupt = False
# True when not executing in a "thread"
_main = True
def interrupt_main():
"""Set _interrupt flag to True to have start_new_thread raise
KeyboardInterrupt upon exiting."""
if _main:
raise KeyboardInterrupt
else:
global _interrupt
_interrupt = True
| apache-2.0 |
mrquim/repository.mrquim | script.module.liveresolver/lib/js2py/utils/injector.py | 34 | 5447 | __all__ = ['fix_js_args']
import types
import opcode
# Opcode constants used for comparison and replacecment
LOAD_FAST = opcode.opmap['LOAD_FAST']
LOAD_GLOBAL = opcode.opmap['LOAD_GLOBAL']
STORE_FAST = opcode.opmap['STORE_FAST']
def fix_js_args(func):
'''Use this function when unsure whether func takes this and arguments as its last 2 args.
It will append 2 args if it does not.'''
fcode = func.func_code
fargs = fcode.co_varnames[fcode.co_argcount-2:fcode.co_argcount]
if fargs==('this', 'arguments') or fargs==('arguments', 'var'):
return func
code = append_arguments(func.func_code, ('this','arguments'))
return types.FunctionType(code, func.func_globals, func.func_name, closure=func.func_closure)
def append_arguments(code_obj, new_locals):
co_varnames = code_obj.co_varnames # Old locals
co_names = code_obj.co_names # Old globals
co_names+=tuple(e for e in new_locals if e not in co_names)
co_argcount = code_obj.co_argcount # Argument count
co_code = code_obj.co_code # The actual bytecode as a string
# Make one pass over the bytecode to identify names that should be
# left in code_obj.co_names.
not_removed = set(opcode.hasname) - set([LOAD_GLOBAL])
saved_names = set()
for inst in instructions(co_code):
if inst[0] in not_removed:
saved_names.add(co_names[inst[1]])
# Build co_names for the new code object. This should consist of
# globals that were only accessed via LOAD_GLOBAL
names = tuple(name for name in co_names
if name not in set(new_locals) - saved_names)
# Build a dictionary that maps the indices of the entries in co_names
# to their entry in the new co_names
name_translations = dict((co_names.index(name), i)
for i, name in enumerate(names))
# Build co_varnames for the new code object. This should consist of
# the entirety of co_varnames with new_locals spliced in after the
# arguments
new_locals_len = len(new_locals)
varnames = (co_varnames[:co_argcount] + new_locals +
co_varnames[co_argcount:])
# Build the dictionary that maps indices of entries in the old co_varnames
# to their indices in the new co_varnames
range1, range2 = xrange(co_argcount), xrange(co_argcount, len(co_varnames))
varname_translations = dict((i, i) for i in range1)
varname_translations.update((i, i + new_locals_len) for i in range2)
# Build the dictionary that maps indices of deleted entries of co_names
# to their indices in the new co_varnames
names_to_varnames = dict((co_names.index(name), varnames.index(name))
for name in new_locals)
# Now we modify the actual bytecode
modified = []
for inst in instructions(code_obj.co_code):
# If the instruction is a LOAD_GLOBAL, we have to check to see if
# it's one of the globals that we are replacing. Either way,
# update its arg using the appropriate dict.
if inst[0] == LOAD_GLOBAL:
if inst[1] in names_to_varnames:
inst[0] = LOAD_FAST
inst[1] = names_to_varnames[inst[1]]
elif inst[1] in name_translations:
inst[1] = name_translations[inst[1]]
else:
raise ValueError("a name was lost in translation")
# If it accesses co_varnames or co_names then update its argument.
elif inst[0] in opcode.haslocal:
inst[1] = varname_translations[inst[1]]
elif inst[0] in opcode.hasname:
inst[1] = name_translations[inst[1]]
modified.extend(write_instruction(inst))
code = ''.join(modified)
# Done modifying codestring - make the code object
return types.CodeType(co_argcount + new_locals_len,
code_obj.co_nlocals + new_locals_len,
code_obj.co_stacksize,
code_obj.co_flags,
code,
code_obj.co_consts,
names,
varnames,
code_obj.co_filename,
code_obj.co_name,
code_obj.co_firstlineno,
code_obj.co_lnotab,
code_obj.co_freevars,
code_obj.co_cellvars)
def instructions(code):
code = map(ord, code)
i, L = 0, len(code)
extended_arg = 0
while i < L:
op = code[i]
i+= 1
if op < opcode.HAVE_ARGUMENT:
yield [op, None]
continue
oparg = code[i] + (code[i+1] << 8) + extended_arg
extended_arg = 0
i += 2
if op == opcode.EXTENDED_ARG:
extended_arg = oparg << 16
continue
yield [op, oparg]
def write_instruction(inst):
op, oparg = inst
if oparg is None:
return [chr(op)]
elif oparg <= 65536L:
return [chr(op), chr(oparg & 255), chr((oparg >> 8) & 255)]
elif oparg <= 4294967296L:
return [chr(opcode.EXTENDED_ARG),
chr((oparg >> 16) & 255),
chr((oparg >> 24) & 255),
chr(op),
chr(oparg & 255),
chr((oparg >> 8) & 255)]
else:
raise ValueError("Invalid oparg: {0} is too large".format(oparg))
| gpl-2.0 |
htwenhe/DJOA | env/Lib/site-packages/pip/_vendor/requests/packages/chardet/langhebrewmodel.py | 2763 | 11318 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Simon Montagu
# Portions created by the Initial Developer are Copyright (C) 2005
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
# Shoshannah Forbes - original C code (?)
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# 255: Control characters that usually does not exist in any text
# 254: Carriage/Return
# 253: symbol (punctuation) that does not belong to word
# 252: 0 - 9
# Windows-1255 language model
# Character Mapping Table:
win1255_CharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253, 69, 91, 79, 80, 92, 89, 97, 90, 68,111,112, 82, 73, 95, 85, # 40
78,121, 86, 71, 67,102,107, 84,114,103,115,253,253,253,253,253, # 50
253, 50, 74, 60, 61, 42, 76, 70, 64, 53,105, 93, 56, 65, 54, 49, # 60
66,110, 51, 43, 44, 63, 81, 77, 98, 75,108,253,253,253,253,253, # 70
124,202,203,204,205, 40, 58,206,207,208,209,210,211,212,213,214,
215, 83, 52, 47, 46, 72, 32, 94,216,113,217,109,218,219,220,221,
34,116,222,118,100,223,224,117,119,104,125,225,226, 87, 99,227,
106,122,123,228, 55,229,230,101,231,232,120,233, 48, 39, 57,234,
30, 59, 41, 88, 33, 37, 36, 31, 29, 35,235, 62, 28,236,126,237,
238, 38, 45,239,240,241,242,243,127,244,245,246,247,248,249,250,
9, 8, 20, 16, 3, 2, 24, 14, 22, 1, 25, 15, 4, 11, 6, 23,
12, 19, 13, 26, 18, 27, 21, 17, 7, 10, 5,251,252,128, 96,253,
)
# Model Table:
# total sequences: 100%
# first 512 sequences: 98.4004%
# first 1024 sequences: 1.5981%
# rest sequences: 0.087%
# negative sequences: 0.0015%
HebrewLangModel = (
0,3,3,3,3,3,3,3,3,3,3,2,3,3,3,3,3,3,3,3,3,3,3,2,3,2,1,2,0,1,0,0,
3,0,3,1,0,0,1,3,2,0,1,1,2,0,2,2,2,1,1,1,1,2,1,1,1,2,0,0,2,2,0,1,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,2,2,2,
1,2,1,2,1,2,0,0,2,0,0,0,0,0,1,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,2,2,
1,2,1,3,1,1,0,0,2,0,0,0,1,0,1,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,1,0,1,2,2,1,3,
1,2,1,1,2,2,0,0,2,2,0,0,0,0,1,0,1,0,0,0,1,0,0,0,0,0,0,1,0,1,1,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,3,2,2,2,2,3,2,
1,2,1,2,2,2,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,3,2,3,2,2,3,2,2,2,1,2,2,2,2,
1,2,1,1,2,2,0,1,2,0,0,0,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,0,2,2,2,2,2,
0,2,0,2,2,2,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,0,2,2,2,
0,2,1,2,2,2,0,0,2,1,0,0,0,0,1,0,1,0,0,0,0,0,0,2,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,2,3,3,3,3,3,3,3,3,3,3,3,3,3,2,1,2,3,2,2,2,
1,2,1,2,2,2,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,1,1,0,
3,3,3,3,3,3,3,3,3,2,3,3,3,2,3,3,3,3,3,3,3,3,3,3,3,3,3,1,0,2,0,2,
0,2,1,2,2,2,0,0,1,2,0,0,0,0,1,0,1,0,0,0,0,0,0,1,0,0,0,2,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,2,3,2,2,3,2,1,2,1,1,1,
0,1,1,1,1,1,3,0,1,0,0,0,0,2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
3,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,1,1,0,1,1,0,0,1,0,0,1,0,0,0,0,
0,0,1,0,0,0,0,0,2,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,2,2,2,2,2,2,
0,2,0,1,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,2,3,3,3,2,1,2,3,3,2,3,3,3,3,2,3,2,1,2,0,2,1,2,
0,2,0,2,2,2,0,0,1,2,0,0,0,0,1,0,1,0,0,0,0,0,0,0,0,0,0,1,0,0,1,0,
3,3,3,3,3,3,3,3,3,2,3,3,3,1,2,2,3,3,2,3,2,3,2,2,3,1,2,2,0,2,2,2,
0,2,1,2,2,2,0,0,1,2,0,0,0,0,1,0,0,0,0,0,1,0,0,1,0,0,0,1,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,3,3,2,3,3,2,2,2,3,3,3,3,1,3,2,2,2,
0,2,0,1,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,2,3,3,3,2,3,2,2,2,1,2,2,0,2,2,2,2,
0,2,0,2,2,2,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,2,3,3,3,1,3,2,3,3,2,3,3,2,2,1,2,2,2,2,2,2,
0,2,1,2,1,2,0,0,1,0,0,0,0,0,1,0,0,0,0,0,1,0,0,1,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,2,3,2,3,3,2,3,3,3,3,2,3,2,3,3,3,3,3,2,2,2,2,2,2,2,1,
0,2,0,1,2,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,2,1,2,3,3,3,3,3,3,3,2,3,2,3,2,1,2,3,0,2,1,2,2,
0,2,1,1,2,1,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,2,0,
3,3,3,3,3,3,3,3,3,2,3,3,3,3,2,1,3,1,2,2,2,1,2,3,3,1,2,1,2,2,2,2,
0,1,1,1,1,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,2,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,0,2,3,3,3,1,3,3,3,1,2,2,2,2,1,1,2,2,2,2,2,2,
0,2,0,1,1,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,2,3,3,3,2,2,3,3,3,2,1,2,3,2,3,2,2,2,2,1,2,1,1,1,2,2,
0,2,1,1,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
3,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,1,0,0,0,1,0,0,0,0,0,
1,0,1,0,0,0,0,0,2,0,0,0,0,0,1,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,2,3,3,2,3,1,2,2,2,2,3,2,3,1,1,2,2,1,2,2,1,1,0,2,2,2,2,
0,1,0,1,2,2,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0,
3,0,0,1,1,0,1,0,0,1,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,2,2,0,
0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,1,0,1,0,1,1,0,1,1,0,0,0,1,1,0,1,1,1,0,0,0,0,0,0,1,0,0,0,0,0,
0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,0,0,1,1,0,1,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,
3,2,2,1,2,2,2,2,2,2,2,1,2,2,1,2,2,1,1,1,1,1,1,1,1,2,1,1,0,3,3,3,
0,3,0,2,2,2,2,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
2,2,2,3,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,2,2,1,2,2,2,1,1,1,2,0,1,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,2,2,2,2,2,2,2,2,2,2,1,2,2,2,2,2,2,2,2,2,2,2,0,2,2,0,0,0,0,0,0,
0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,3,1,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,2,1,0,2,1,0,
0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,1,1,1,1,1,1,1,1,1,1,0,0,1,1,1,1,0,1,1,1,1,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,
0,3,1,1,2,2,2,2,2,1,2,2,2,1,1,2,2,2,2,2,2,2,1,2,2,1,0,1,1,1,1,0,
0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,2,1,1,1,1,2,1,1,2,1,0,1,1,1,1,1,1,1,1,1,1,1,0,1,0,0,0,0,0,0,0,
0,0,2,0,0,0,0,0,0,0,0,1,1,0,0,0,0,1,1,0,0,1,1,0,0,0,0,0,0,1,0,0,
2,1,1,2,2,2,2,2,2,2,2,2,2,2,1,2,2,2,2,2,1,2,1,2,1,1,1,1,0,0,0,0,
0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,2,1,2,2,2,2,2,2,2,2,2,2,1,2,1,2,1,1,2,1,1,1,2,1,2,1,2,0,1,0,1,
0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,1,2,2,2,1,2,2,2,2,2,2,2,2,1,2,1,1,1,1,1,1,2,1,2,1,1,0,1,0,1,
0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,1,2,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,2,2,
0,2,0,1,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
3,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,1,1,1,1,1,1,1,0,1,1,0,1,0,0,1,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,2,0,1,1,1,0,1,0,0,0,1,1,0,1,1,0,0,0,0,0,1,1,0,0,
0,1,1,1,2,1,2,2,2,0,2,0,2,0,1,1,2,1,1,1,1,2,1,0,1,1,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,1,0,0,0,0,0,1,0,1,2,2,0,1,0,0,1,1,2,2,1,2,0,2,0,0,0,1,2,0,1,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,2,0,2,1,2,0,2,0,0,1,1,1,1,1,1,0,1,0,0,0,1,0,0,1,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,1,0,0,0,0,0,1,0,2,1,1,0,1,0,0,1,1,1,2,2,0,0,1,0,0,0,1,0,0,1,
1,1,2,1,0,1,1,1,0,1,0,1,1,1,1,0,0,0,1,0,1,0,0,0,0,0,0,0,0,2,2,1,
0,2,0,1,2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,1,0,0,1,0,1,1,1,1,0,0,0,0,0,1,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,1,1,1,1,1,1,1,1,2,1,0,1,1,1,1,1,1,1,1,1,1,1,0,1,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,1,1,1,0,0,0,0,1,1,1,0,1,1,0,1,0,0,0,1,1,0,1,
2,0,1,0,1,0,1,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,1,0,1,1,1,0,1,0,0,1,1,2,1,1,2,0,1,0,0,0,1,1,0,1,
1,0,0,1,0,0,1,0,0,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,1,0,1,1,2,0,1,0,0,0,0,2,1,1,2,0,2,0,0,0,1,1,0,1,
1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,1,0,2,1,1,0,1,0,0,2,2,1,2,1,1,0,1,0,0,0,1,1,0,1,
2,0,1,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,1,2,2,0,0,0,0,0,1,1,0,1,0,0,1,0,0,0,0,1,0,1,
1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,1,2,2,0,0,0,0,2,1,1,1,0,2,1,1,0,0,0,2,1,0,1,
1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,1,0,1,1,2,0,1,0,0,1,1,0,2,1,1,0,1,0,0,0,1,1,0,1,
2,2,1,1,1,0,1,1,0,1,1,0,1,0,0,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,1,0,2,1,1,0,1,0,0,1,1,0,1,2,1,0,2,0,0,0,1,1,0,1,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,
0,1,0,0,2,0,2,1,1,0,1,0,1,0,0,1,0,0,0,0,1,0,0,0,1,0,0,0,0,0,1,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,0,1,0,0,1,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,1,0,1,1,2,0,1,0,0,1,1,1,0,1,0,0,1,0,0,0,1,0,0,1,
1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,0,0,0,0,0,0,1,0,1,1,0,0,1,0,0,2,1,1,1,1,1,0,1,0,0,0,0,1,0,1,
0,1,1,1,2,1,1,1,1,0,1,1,1,1,1,1,1,1,1,1,1,1,0,1,1,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,1,2,1,0,0,0,0,0,1,1,1,1,1,0,1,0,0,0,1,1,0,0,
)
Win1255HebrewModel = {
'charToOrderMap': win1255_CharToOrderMap,
'precedenceMatrix': HebrewLangModel,
'mTypicalPositiveRatio': 0.984004,
'keepEnglishLetter': False,
'charsetName': "windows-1255"
}
# flake8: noqa
| mit |
Lagovas/themis | src/wrappers/themis/python/pythemis/scomparator.py | 3 | 4433 | #!/usr/bin/env python
# coding: utf-8
#
# Copyright (c) 2015 Cossack Labs Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
import ctypes
from ctypes.util import find_library
from enum import IntEnum
from . import exception as exception
from .exception import THEMIS_CODES
themis = ctypes.cdll.LoadLibrary(find_library("themis"))
themis.secure_comparator_get_result.restype = ctypes.c_int64
scomparator_create = themis.secure_comparator_create
scomparator_create.restype = ctypes.POINTER(ctypes.c_int)
class SCOMPARATOR_CODES(IntEnum):
MATCH = 21
NOT_MATCH = 22
NOT_READY = 0
class SComparator(object):
def __init__(self, shared_secret):
self.session_ctx = ctypes.POINTER(ctypes.c_int)
self.comparator_ctx = scomparator_create()
if self.comparator_ctx is None:
raise exception.ThemisError(THEMIS_CODES.FAIL,
"Secure Comparator failed creating")
res = themis.secure_comparator_append_secret(
self.comparator_ctx,
ctypes.byref(ctypes.create_string_buffer(shared_secret)),
len(shared_secret))
if res != THEMIS_CODES.SUCCESS:
raise exception.ThemisError(
THEMIS_CODES.FAIL, "Secure Comparator failed appending secret")
self.comparation_complete = False
def __del__(self):
themis.secure_comparator_destroy(self.comparator_ctx)
def begin_compare(self):
req_size = ctypes.c_int(0)
res = themis.secure_comparator_begin_compare(self.comparator_ctx, None,
ctypes.byref(req_size))
if res != THEMIS_CODES.BUFFER_TOO_SMALL:
raise exception.ThemisError(
res, "Secure Comparator failed making initialization message")
req_buffer = ctypes.create_string_buffer(req_size.value)
res = themis.secure_comparator_begin_compare(
self.comparator_ctx, ctypes.byref(req_buffer),
ctypes.byref(req_size))
if res != THEMIS_CODES.SUCCESS and res != THEMIS_CODES.SEND_AS_IS:
raise exception.ThemisError(
res, "Secure Comparator failed making initialization message")
return ctypes.string_at(req_buffer, req_size)
def proceed_compare(self, control_message):
c_message = ctypes.create_string_buffer(control_message)
req_size = ctypes.c_int(0)
res = themis.secure_comparator_proceed_compare(
self.comparator_ctx, ctypes.byref(c_message), len(control_message),
None, ctypes.byref(req_size))
if res == THEMIS_CODES.SUCCESS:
return ""
if res != THEMIS_CODES.BUFFER_TOO_SMALL:
raise exception.ThemisError(
res, "Secure Comparator failed proceeding message")
req_buffer = ctypes.create_string_buffer(req_size.value)
res = themis.secure_comparator_proceed_compare(
self.comparator_ctx, ctypes.byref(c_message), len(control_message),
req_buffer, ctypes.byref(req_size))
if res == THEMIS_CODES.SEND_AS_IS or res == THEMIS_CODES.SUCCESS:
return ctypes.string_at(req_buffer, req_size)
raise exception.ThemisError(
res, "Secure Comparator failed proceeding message")
def is_compared(self):
return not (themis.secure_comparator_get_result(self.comparator_ctx) ==
SCOMPARATOR_CODES.NOT_READY)
def is_equal(self):
return (themis.secure_comparator_get_result(self.comparator_ctx) ==
SCOMPARATOR_CODES.MATCH)
def result(self):
return themis.secure_comparator_get_result(self.comparator_ctx)
class scomparator(SComparator):
def __init__(self, *args, **kwargs):
warnings.warn("scomparator is deprecated in favor of SComparator.")
super(scomparator, self).__init__(*args, **kwargs)
| apache-2.0 |
PaulFranklin/python_koans | python2/contemplate_koans.py | 86 | 1295 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Acknowledgment:
#
# Python Koans is a port of Ruby Koans originally written by Jim Weirich
# and Joe O'brien of Edgecase. There are some differences and tweaks specific
# to the Python language, but a great deal of it has been copied wholesale.
# So thank guys!
#
import sys
if __name__ == '__main__':
if sys.version_info >= (3, 0):
print("\nThis is the Python 2 version of Python Koans, but you are " +
"running it with Python 3 or newer!\n\n"
"Did you accidentally use the wrong python script? \nTry:\n\n" +
" python contemplate_koans.py\n")
else:
if sys.version_info < (2, 7):
print("\n" +
"********************************************************\n" +
"WARNING:\n" +
"This version of Python Koans was designed for " +
"Python 2.7 or greater.\n" +
"Your version of Python is older, so you may run into " +
"problems!\n\n" +
"But lets see how far we get...\n" +
"********************************************************\n")
from runner.mountain import Mountain
Mountain().walk_the_path(sys.argv)
| mit |
zozo123/buildbot | master/buildbot/status/client.py | 1 | 1078 | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from buildbot.status import base
from twisted.python import log
class PBListener(base.StatusReceiverBase):
# This class is still present in users' configs, so keep it here.
def __init__(self, port, user="statusClient", passwd="clientpw"):
log.msg("The PBListener status listener is unused and can be removed "
"from the configuration")
| gpl-3.0 |
Aquafina-water-bottle/Command-Compiler-Unlimited | test_fena/v1_13/test_teams.py | 1 | 1532 | from test_fena.test_common import test_cmd
def test_teams():
test_cmd("team add _team team_test", "team add fena.team team_test")
test_cmd("team add _team team test", "team add fena.team team test")
test_cmd("team empty _team", "team empty fena.team")
test_cmd("team _team + @a", "team join fena.team @a")
test_cmd("team _team + target", "team join fena.team target")
test_cmd("team leave @a", "team leave @a")
test_cmd("team leave target", "team leave target")
test_cmd("team remove _team", "team remove fena.team")
test_cmd("team _team friendlyfire = true", "team option fena.team friendlyfire true")
test_cmd("team _team color = green", "team option fena.team color green")
test_cmd("team _team seeFriendlyInvisibles = false", "team option fena.team seeFriendlyInvisibles false")
test_cmd("team _team nametagVisibility = hideForOwnTeam", "team option fena.team nametagVisibility hideForOwnTeam")
test_cmd("team _team deathMessageVisibility = never", "team option fena.team deathMessageVisibility never")
test_cmd("team _team collisionRule = pushOwnTeam", "team option fena.team collisionRule pushOwnTeam")
test_cmd(r'team _team prefix = {"text":"PREFIX","color":"blue"}', r'team option fena.team prefix {"text":"PREFIX","color":"blue"}')
test_cmd(r'team _team suffix = {"text":"SUFFIX","color":"red"}', r'team option fena.team suffix {"text":"SUFFIX","color":"red"}')
| mit |
nmercier/linux-cross-gcc | linux/lib/python2.7/hotshot/log.py | 175 | 6239 | import _hotshot
import os.path
import parser
import symbol
from _hotshot import \
WHAT_ENTER, \
WHAT_EXIT, \
WHAT_LINENO, \
WHAT_DEFINE_FILE, \
WHAT_DEFINE_FUNC, \
WHAT_ADD_INFO
__all__ = ["LogReader", "ENTER", "EXIT", "LINE"]
ENTER = WHAT_ENTER
EXIT = WHAT_EXIT
LINE = WHAT_LINENO
class LogReader:
def __init__(self, logfn):
# fileno -> filename
self._filemap = {}
# (fileno, lineno) -> filename, funcname
self._funcmap = {}
self._reader = _hotshot.logreader(logfn)
self._nextitem = self._reader.next
self._info = self._reader.info
if 'current-directory' in self._info:
self.cwd = self._info['current-directory']
else:
self.cwd = None
# This mirrors the call stack of the profiled code as the log
# is read back in. It contains tuples of the form:
#
# (file name, line number of function def, function name)
#
self._stack = []
self._append = self._stack.append
self._pop = self._stack.pop
def close(self):
self._reader.close()
def fileno(self):
"""Return the file descriptor of the log reader's log file."""
return self._reader.fileno()
def addinfo(self, key, value):
"""This method is called for each additional ADD_INFO record.
This can be overridden by applications that want to receive
these events. The default implementation does not need to be
called by alternate implementations.
The initial set of ADD_INFO records do not pass through this
mechanism; this is only needed to receive notification when
new values are added. Subclasses can inspect self._info after
calling LogReader.__init__().
"""
pass
def get_filename(self, fileno):
try:
return self._filemap[fileno]
except KeyError:
raise ValueError, "unknown fileno"
def get_filenames(self):
return self._filemap.values()
def get_fileno(self, filename):
filename = os.path.normcase(os.path.normpath(filename))
for fileno, name in self._filemap.items():
if name == filename:
return fileno
raise ValueError, "unknown filename"
def get_funcname(self, fileno, lineno):
try:
return self._funcmap[(fileno, lineno)]
except KeyError:
raise ValueError, "unknown function location"
# Iteration support:
# This adds an optional (& ignored) parameter to next() so that the
# same bound method can be used as the __getitem__() method -- this
# avoids using an additional method call which kills the performance.
def next(self, index=0):
while 1:
# This call may raise StopIteration:
what, tdelta, fileno, lineno = self._nextitem()
# handle the most common cases first
if what == WHAT_ENTER:
filename, funcname = self._decode_location(fileno, lineno)
t = (filename, lineno, funcname)
self._append(t)
return what, t, tdelta
if what == WHAT_EXIT:
try:
return what, self._pop(), tdelta
except IndexError:
raise StopIteration
if what == WHAT_LINENO:
filename, firstlineno, funcname = self._stack[-1]
return what, (filename, lineno, funcname), tdelta
if what == WHAT_DEFINE_FILE:
filename = os.path.normcase(os.path.normpath(tdelta))
self._filemap[fileno] = filename
elif what == WHAT_DEFINE_FUNC:
filename = self._filemap[fileno]
self._funcmap[(fileno, lineno)] = (filename, tdelta)
elif what == WHAT_ADD_INFO:
# value already loaded into self.info; call the
# overridable addinfo() handler so higher-level code
# can pick up the new value
if tdelta == 'current-directory':
self.cwd = lineno
self.addinfo(tdelta, lineno)
else:
raise ValueError, "unknown event type"
def __iter__(self):
return self
#
# helpers
#
def _decode_location(self, fileno, lineno):
try:
return self._funcmap[(fileno, lineno)]
except KeyError:
#
# This should only be needed when the log file does not
# contain all the DEFINE_FUNC records needed to allow the
# function name to be retrieved from the log file.
#
if self._loadfile(fileno):
filename = funcname = None
try:
filename, funcname = self._funcmap[(fileno, lineno)]
except KeyError:
filename = self._filemap.get(fileno)
funcname = None
self._funcmap[(fileno, lineno)] = (filename, funcname)
return filename, funcname
def _loadfile(self, fileno):
try:
filename = self._filemap[fileno]
except KeyError:
print "Could not identify fileId", fileno
return 1
if filename is None:
return 1
absname = os.path.normcase(os.path.join(self.cwd, filename))
try:
fp = open(absname)
except IOError:
return
st = parser.suite(fp.read())
fp.close()
# Scan the tree looking for def and lambda nodes, filling in
# self._funcmap with all the available information.
funcdef = symbol.funcdef
lambdef = symbol.lambdef
stack = [st.totuple(1)]
while stack:
tree = stack.pop()
try:
sym = tree[0]
except (IndexError, TypeError):
continue
if sym == funcdef:
self._funcmap[(fileno, tree[2][2])] = filename, tree[2][1]
elif sym == lambdef:
self._funcmap[(fileno, tree[1][2])] = filename, "<lambda>"
stack.extend(list(tree[1:]))
| bsd-3-clause |
appleseedhq/gaffer | python/GafferArnoldTest/__init__.py | 1 | 2815 | ##########################################################################
#
# Copyright (c) 2012, John Haddon. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
from ArnoldShaderTest import ArnoldShaderTest
from ArnoldRenderTest import ArnoldRenderTest
from ArnoldOptionsTest import ArnoldOptionsTest
from ArnoldAttributesTest import ArnoldAttributesTest
from ArnoldVDBTest import ArnoldVDBTest
from ArnoldLightTest import ArnoldLightTest
from ArnoldMeshLightTest import ArnoldMeshLightTest
from InteractiveArnoldRenderTest import InteractiveArnoldRenderTest
from ArnoldDisplacementTest import ArnoldDisplacementTest
from LightToCameraTest import LightToCameraTest
from IECoreArnoldPreviewTest import *
from ArnoldAOVShaderTest import ArnoldAOVShaderTest
from ArnoldAtmosphereTest import ArnoldAtmosphereTest
from ArnoldBackgroundTest import ArnoldBackgroundTest
from ArnoldTextureBakeTest import ArnoldTextureBakeTest
from ModuleTest import ModuleTest
from ArnoldShaderBallTest import ArnoldShaderBallTest
from ArnoldCameraShadersTest import ArnoldCameraShadersTest
from ArnoldLightFilterTest import ArnoldLightFilterTest
if __name__ == "__main__":
import unittest
unittest.main()
| bsd-3-clause |
yqm/sl4a | python-build/python-libs/gdata/samples/finance/test_finance.py | 128 | 10785 | #!/usr/bin/python
#
# Copyright (C) 2009 Tan Swee Heng
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = 'thesweeheng@gmail.com'
from gdata.finance.service import \
FinanceService, PortfolioQuery, PositionQuery
from gdata.finance import \
PortfolioEntry, PortfolioData, TransactionEntry, TransactionData, \
Price, Commission, Money
import datetime
import sys
def PrintReturns(pfx, d):
"""Print returns."""
print pfx, '%1.5f(1w) %1.5f(4w) %1.5f(3m) %1.5f(YTD)' % tuple(
float(i) for i in (d.return1w, d.return4w, d.return3m, d.returnYTD))
pfx = ' ' * len(pfx)
print pfx, '%1.5f(1y) %1.5f(3y) %1.5f(5y) %1.5f(overall)' % tuple(
float(i) for i in (d.return1y, d.return3y, d.return5y, d.return_overall))
PrRtn = PrintReturns
def PrintTransactions(transactions):
"""Print transactions."""
print " Transactions:"
fmt = ' %4s %-23s %-10s %6s %-11s %-11s'
print fmt % ('ID','Date','Type','Shares','Price','Commission')
for txn in transactions:
d = txn.transaction_data
print fmt % (txn.transaction_id, d.date or '----', d.type,
d.shares, d.price.money[0], d.commission.money[0])
if d.notes:
print " Notes:", d.notes
print
def PrintPosition(pos, with_returns=False):
"""Print single position."""
print ' Position :', pos.position_title
print ' Ticker ID :', pos.ticker_id
print ' Symbol :', pos.symbol
print ' Last updated :', pos.updated.text
d = pos.position_data
print ' Shares :', d.shares
if with_returns:
print ' Gain % :', d.gain_percentage
PrRtn(' Returns :', d)
print ' Cost basis :', d.cost_basis
print ' Days gain :', d.days_gain
print ' Gain :', d.gain
print ' Market value :', d.market_value
print
if pos.transactions:
print " <inlined transactions>\n"
PrintTransactions(pos.transactions)
print " </inlined transactions>\n"
def PrintPositions(positions, with_returns=False):
for pos in positions:
PrintPosition(pos, with_returns)
def PrintPortfolio(pfl, with_returns=False):
"""Print single portfolio."""
print 'Portfolio Title :', pfl.portfolio_title
print 'Portfolio ID :', pfl.portfolio_id
print ' Last updated :', pfl.updated.text
d = pfl.portfolio_data
print ' Currency :', d.currency_code
if with_returns:
print ' Gain % :', d.gain_percentage
PrRtn(' Returns :', d)
print ' Cost basis :', d.cost_basis
print ' Days gain :', d.days_gain
print ' Gain :', d.gain
print ' Market value :', d.market_value
print
if pfl.positions:
print " <inlined positions>\n"
PrintPositions(pfl.positions, with_returns)
print " </inlined positions>\n"
def PrintPortfolios(portfolios, with_returns=False):
for pfl in portfolios:
PrintPortfolio(pfl, with_returns)
def ShowCallDetails(meth):
def wrap(*args, **kwargs):
print '@', meth.__name__, args[1:], kwargs
meth(*args, **kwargs)
return wrap
class FinanceTester(object):
def __init__(self, email, password):
self.client = FinanceService(source='gdata-finance-test')
self.client.ClientLogin(email, password)
def GetPortfolios(self, with_returns=False, inline_positions=False):
query = PortfolioQuery()
query.returns = with_returns
query.positions = inline_positions
return self.client.GetPortfolioFeed(query=query).entry
def GetPositions(self, portfolio, with_returns=False, inline_transactions=False):
query = PositionQuery()
query.returns = with_returns
query.transactions = inline_transactions
return self.client.GetPositionFeed(portfolio, query=query).entry
def GetTransactions(self, position=None, portfolio=None, ticker=None):
if position:
feed = self.client.GetTransactionFeed(position)
elif portfolio and ticker:
feed = self.client.GetTransactionFeed(
portfolio_id=portfolio.portfolio_id, ticker_id=ticker)
return feed.entry
@ShowCallDetails
def TestShowDetails(self, with_returns=False, inline_positions=False,
inline_transactions=False):
portfolios = self.GetPortfolios(with_returns, inline_positions)
for pfl in portfolios:
PrintPortfolio(pfl, with_returns)
positions = self.GetPositions(pfl, with_returns, inline_transactions)
for pos in positions:
PrintPosition(pos, with_returns)
PrintTransactions(self.GetTransactions(pos))
def DeletePortfoliosByName(self, portfolio_titles):
for pfl in self.GetPortfolios():
if pfl.portfolio_title in portfolio_titles:
self.client.DeletePortfolio(pfl)
def AddPortfolio(self, portfolio_title, currency_code):
pfl = PortfolioEntry(portfolio_data=PortfolioData(
currency_code=currency_code))
pfl.portfolio_title = portfolio_title
return self.client.AddPortfolio(pfl)
def UpdatePortfolio(self, portfolio,
portfolio_title=None, currency_code=None):
if portfolio_title:
portfolio.portfolio_title = portfolio_title
if currency_code:
portfolio.portfolio_data.currency_code = currency_code
return self.client.UpdatePortfolio(portfolio)
def DeletePortfolio(self, portfolio):
self.client.DeletePortfolio(portfolio)
@ShowCallDetails
def TestManagePortfolios(self):
pfl_one = 'Portfolio Test: Emerging Markets 12345'
pfl_two = 'Portfolio Test: Renewable Energy 31415'
print '---- Deleting portfolios ----'
self.DeletePortfoliosByName([pfl_one, pfl_two])
PrintPortfolios(self.GetPortfolios())
print '---- Adding new portfolio ----'
pfl = self.AddPortfolio(pfl_one, 'SGD')
PrintPortfolios(self.GetPortfolios())
print '---- Changing portfolio title and currency code ----'
pfl = self.UpdatePortfolio(pfl, pfl_two, 'USD')
PrintPortfolios(self.GetPortfolios())
print '---- Deleting portfolio ----'
self.DeletePortfolio(pfl)
PrintPortfolios(self.GetPortfolios())
def Transact(self, type, portfolio, ticker, date=None, shares=None,
notes=None, price=None, commission=None, currency_code=None):
if price is not None:
price = Price(money=[Money(amount=str(price),
currency_code=currency_code or
portfolio.portfolio_data.currency_code)])
if commission is not None:
commission = Commission(money=[Money(amount=str(comission),
currency_code=currency_code or
portfolio.portfolio_data.currency_code)])
if date is not None and isinstance(date, datetime.datetime):
date = date.isoformat()
if shares is not None:
shares = str(shares)
txn = TransactionEntry(transaction_data=TransactionData(type=type,
date=date, shares=shares, notes=notes, price=price,
commission=commission))
return self.client.AddTransaction(txn,
portfolio_id=portfolio.portfolio_id, ticker_id=ticker)
def Buy(self, portfolio, ticker, **kwargs):
return self.Transact('Buy', portfolio, ticker, **kwargs)
def Sell(self, portfolio, ticker, **kwargs):
return self.Transact('Sell', portfolio, ticker, **kwargs)
def GetPosition(self, portfolio, ticker, with_returns=False, inline_transactions=False):
query = PositionQuery()
query.returns = with_returns
query.transactions = inline_transactions
return self.client.GetPosition(
portfolio_id=portfolio.portfolio_id, ticker_id=ticker, query=query)
def DeletePosition(self, position):
self.client.DeletePosition(position_entry=position)
def UpdateTransaction(self, transaction):
self.client.UpdateTransaction(transaction)
def DeleteTransaction(self, transaction):
self.client.DeleteTransaction(transaction)
@ShowCallDetails
def TestManageTransactions(self):
pfl_title = 'Transaction Test: Technology 27182'
self.DeletePortfoliosByName([pfl_title])
print '---- Adding new portfolio ----'
pfl = self.AddPortfolio(pfl_title, 'USD')
PrintPortfolios(self.GetPortfolios())
print '---- Adding buy transactions ----'
tkr1 = 'NASDAQ:GOOG'
date = datetime.datetime(2009,04,01)
days = datetime.timedelta(1)
txn1 = self.Buy(pfl, tkr1, shares=500, price=321.00, date=date)
txn2 = self.Buy(pfl, tkr1, shares=150, price=312.00, date=date+15*days)
pos = self.GetPosition(portfolio=pfl, ticker=tkr1, with_returns=True)
PrintPosition(pos, with_returns=True)
PrintTransactions(self.GetTransactions(pos))
print '---- Adding sell transactions ----'
txn3 = self.Sell(pfl, tkr1, shares=400, price=322.00, date=date+30*days)
txn4 = self.Sell(pfl, tkr1, shares=200, price=330.00, date=date+45*days)
pos = self.GetPosition(portfolio=pfl, ticker=tkr1, with_returns=True)
PrintPosition(pos, with_returns=True)
PrintTransactions(self.GetTransactions(pos))
print "---- Modifying first and deleting third ----"
txn1.transaction_data.shares = '400.0'
self.UpdateTransaction(txn1)
self.DeleteTransaction(txn3)
pos = self.GetPosition(portfolio=pfl, ticker=tkr1, with_returns=True)
PrintPosition(pos, with_returns=True)
PrintTransactions(self.GetTransactions(pos))
print "---- Deleting position ----"
print "Number of positions (before):", len(self.GetPositions(pfl))
self.DeletePosition(pos)
print "Number of positions (after) :", len(self.GetPositions(pfl))
print '---- Deleting portfolio ----'
self.DeletePortfolio(pfl)
PrintPortfolios(self.GetPortfolios())
if __name__ == '__main__':
try:
email = sys.argv[1]
password = sys.argv[2]
cases = sys.argv[3:]
except IndexError:
print "Usage: test_finance account@google.com password [0 1 2...]"
sys.exit(1)
tester = FinanceTester(email, password)
tests = [
tester.TestShowDetails,
lambda: tester.TestShowDetails(with_returns=True),
tester.TestManagePortfolios,
tester.TestManageTransactions,
lambda: tester.TestShowDetails(with_returns=True, inline_positions=True),
lambda: tester.TestShowDetails(with_returns=True, inline_positions=True,
inline_transactions=True),]
if not cases:
cases = range(len(tests))
for i in cases:
print "===== TEST CASE", i, "="*50
tests[int(i)]()
| apache-2.0 |
KokareIITP/django | django/contrib/postgres/aggregates/statistics.py | 493 | 2033 | from django.db.models import FloatField, IntegerField
from django.db.models.aggregates import Aggregate
__all__ = [
'CovarPop', 'Corr', 'RegrAvgX', 'RegrAvgY', 'RegrCount', 'RegrIntercept',
'RegrR2', 'RegrSlope', 'RegrSXX', 'RegrSXY', 'RegrSYY', 'StatAggregate',
]
class StatAggregate(Aggregate):
def __init__(self, y, x, output_field=FloatField()):
if not x or not y:
raise ValueError('Both y and x must be provided.')
super(StatAggregate, self).__init__(y=y, x=x, output_field=output_field)
self.x = x
self.y = y
self.source_expressions = self._parse_expressions(self.y, self.x)
def get_source_expressions(self):
return self.y, self.x
def set_source_expressions(self, exprs):
self.y, self.x = exprs
def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False):
return super(Aggregate, self).resolve_expression(query, allow_joins, reuse, summarize)
class Corr(StatAggregate):
function = 'CORR'
class CovarPop(StatAggregate):
def __init__(self, y, x, sample=False):
self.function = 'COVAR_SAMP' if sample else 'COVAR_POP'
super(CovarPop, self).__init__(y, x)
class RegrAvgX(StatAggregate):
function = 'REGR_AVGX'
class RegrAvgY(StatAggregate):
function = 'REGR_AVGY'
class RegrCount(StatAggregate):
function = 'REGR_COUNT'
def __init__(self, y, x):
super(RegrCount, self).__init__(y=y, x=x, output_field=IntegerField())
def convert_value(self, value, expression, connection, context):
if value is None:
return 0
return int(value)
class RegrIntercept(StatAggregate):
function = 'REGR_INTERCEPT'
class RegrR2(StatAggregate):
function = 'REGR_R2'
class RegrSlope(StatAggregate):
function = 'REGR_SLOPE'
class RegrSXX(StatAggregate):
function = 'REGR_SXX'
class RegrSXY(StatAggregate):
function = 'REGR_SXY'
class RegrSYY(StatAggregate):
function = 'REGR_SYY'
| bsd-3-clause |
ColorFuzzy/tornado | tornado/test/httpclient_test.py | 3 | 26307 | #!/usr/bin/env python
from __future__ import absolute_import, division, print_function, with_statement
import base64
import binascii
from contextlib import closing
import copy
import functools
import sys
import threading
import datetime
from io import BytesIO
from tornado.escape import utf8
from tornado import gen
from tornado.httpclient import HTTPRequest, HTTPResponse, _RequestProxy, HTTPError, HTTPClient
from tornado.httpserver import HTTPServer
from tornado.ioloop import IOLoop
from tornado.iostream import IOStream
from tornado.log import gen_log
from tornado import netutil
from tornado.stack_context import ExceptionStackContext, NullContext
from tornado.testing import AsyncHTTPTestCase, bind_unused_port, gen_test, ExpectLog
from tornado.test.util import unittest, skipOnTravis
from tornado.web import Application, RequestHandler, url
from tornado.httputil import format_timestamp, HTTPHeaders
class HelloWorldHandler(RequestHandler):
def get(self):
name = self.get_argument("name", "world")
self.set_header("Content-Type", "text/plain")
self.finish("Hello %s!" % name)
class PostHandler(RequestHandler):
def post(self):
self.finish("Post arg1: %s, arg2: %s" % (
self.get_argument("arg1"), self.get_argument("arg2")))
class PutHandler(RequestHandler):
def put(self):
self.write("Put body: ")
self.write(self.request.body)
class RedirectHandler(RequestHandler):
def prepare(self):
self.write('redirects can have bodies too')
self.redirect(self.get_argument("url"),
status=int(self.get_argument("status", "302")))
class ChunkHandler(RequestHandler):
@gen.coroutine
def get(self):
self.write("asdf")
self.flush()
# Wait a bit to ensure the chunks are sent and received separately.
yield gen.sleep(0.01)
self.write("qwer")
class AuthHandler(RequestHandler):
def get(self):
self.finish(self.request.headers["Authorization"])
class CountdownHandler(RequestHandler):
def get(self, count):
count = int(count)
if count > 0:
self.redirect(self.reverse_url("countdown", count - 1))
else:
self.write("Zero")
class EchoPostHandler(RequestHandler):
def post(self):
self.write(self.request.body)
class UserAgentHandler(RequestHandler):
def get(self):
self.write(self.request.headers.get('User-Agent', 'User agent not set'))
class ContentLength304Handler(RequestHandler):
def get(self):
self.set_status(304)
self.set_header('Content-Length', 42)
def _clear_headers_for_304(self):
# Tornado strips content-length from 304 responses, but here we
# want to simulate servers that include the headers anyway.
pass
class PatchHandler(RequestHandler):
def patch(self):
"Return the request payload - so we can check it is being kept"
self.write(self.request.body)
class AllMethodsHandler(RequestHandler):
SUPPORTED_METHODS = RequestHandler.SUPPORTED_METHODS + ('OTHER',)
def method(self):
self.write(self.request.method)
get = post = put = delete = options = patch = other = method
# These tests end up getting run redundantly: once here with the default
# HTTPClient implementation, and then again in each implementation's own
# test suite.
class HTTPClientCommonTestCase(AsyncHTTPTestCase):
def get_app(self):
return Application([
url("/hello", HelloWorldHandler),
url("/post", PostHandler),
url("/put", PutHandler),
url("/redirect", RedirectHandler),
url("/chunk", ChunkHandler),
url("/auth", AuthHandler),
url("/countdown/([0-9]+)", CountdownHandler, name="countdown"),
url("/echopost", EchoPostHandler),
url("/user_agent", UserAgentHandler),
url("/304_with_content_length", ContentLength304Handler),
url("/all_methods", AllMethodsHandler),
url('/patch', PatchHandler),
], gzip=True)
def test_patch_receives_payload(self):
body = b"some patch data"
response = self.fetch("/patch", method='PATCH', body=body)
self.assertEqual(response.code, 200)
self.assertEqual(response.body, body)
@skipOnTravis
def test_hello_world(self):
response = self.fetch("/hello")
self.assertEqual(response.code, 200)
self.assertEqual(response.headers["Content-Type"], "text/plain")
self.assertEqual(response.body, b"Hello world!")
self.assertEqual(int(response.request_time), 0)
response = self.fetch("/hello?name=Ben")
self.assertEqual(response.body, b"Hello Ben!")
def test_streaming_callback(self):
# streaming_callback is also tested in test_chunked
chunks = []
response = self.fetch("/hello",
streaming_callback=chunks.append)
# with streaming_callback, data goes to the callback and not response.body
self.assertEqual(chunks, [b"Hello world!"])
self.assertFalse(response.body)
def test_post(self):
response = self.fetch("/post", method="POST",
body="arg1=foo&arg2=bar")
self.assertEqual(response.code, 200)
self.assertEqual(response.body, b"Post arg1: foo, arg2: bar")
def test_chunked(self):
response = self.fetch("/chunk")
self.assertEqual(response.body, b"asdfqwer")
chunks = []
response = self.fetch("/chunk",
streaming_callback=chunks.append)
self.assertEqual(chunks, [b"asdf", b"qwer"])
self.assertFalse(response.body)
def test_chunked_close(self):
# test case in which chunks spread read-callback processing
# over several ioloop iterations, but the connection is already closed.
sock, port = bind_unused_port()
with closing(sock):
def write_response(stream, request_data):
if b"HTTP/1." not in request_data:
self.skipTest("requires HTTP/1.x")
stream.write(b"""\
HTTP/1.1 200 OK
Transfer-Encoding: chunked
1
1
1
2
0
""".replace(b"\n", b"\r\n"), callback=stream.close)
def accept_callback(conn, address):
# fake an HTTP server using chunked encoding where the final chunks
# and connection close all happen at once
stream = IOStream(conn, io_loop=self.io_loop)
stream.read_until(b"\r\n\r\n",
functools.partial(write_response, stream))
netutil.add_accept_handler(sock, accept_callback, self.io_loop)
self.http_client.fetch("http://127.0.0.1:%d/" % port, self.stop)
resp = self.wait()
resp.rethrow()
self.assertEqual(resp.body, b"12")
self.io_loop.remove_handler(sock.fileno())
def test_streaming_stack_context(self):
chunks = []
exc_info = []
def error_handler(typ, value, tb):
exc_info.append((typ, value, tb))
return True
def streaming_cb(chunk):
chunks.append(chunk)
if chunk == b'qwer':
1 / 0
with ExceptionStackContext(error_handler):
self.fetch('/chunk', streaming_callback=streaming_cb)
self.assertEqual(chunks, [b'asdf', b'qwer'])
self.assertEqual(1, len(exc_info))
self.assertIs(exc_info[0][0], ZeroDivisionError)
def test_basic_auth(self):
self.assertEqual(self.fetch("/auth", auth_username="Aladdin",
auth_password="open sesame").body,
b"Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ==")
def test_basic_auth_explicit_mode(self):
self.assertEqual(self.fetch("/auth", auth_username="Aladdin",
auth_password="open sesame",
auth_mode="basic").body,
b"Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ==")
def test_unsupported_auth_mode(self):
# curl and simple clients handle errors a bit differently; the
# important thing is that they don't fall back to basic auth
# on an unknown mode.
with ExpectLog(gen_log, "uncaught exception", required=False):
with self.assertRaises((ValueError, HTTPError)):
response = self.fetch("/auth", auth_username="Aladdin",
auth_password="open sesame",
auth_mode="asdf")
response.rethrow()
def test_follow_redirect(self):
response = self.fetch("/countdown/2", follow_redirects=False)
self.assertEqual(302, response.code)
self.assertTrue(response.headers["Location"].endswith("/countdown/1"))
response = self.fetch("/countdown/2")
self.assertEqual(200, response.code)
self.assertTrue(response.effective_url.endswith("/countdown/0"))
self.assertEqual(b"Zero", response.body)
def test_credentials_in_url(self):
url = self.get_url("/auth").replace("http://", "http://me:secret@")
self.http_client.fetch(url, self.stop)
response = self.wait()
self.assertEqual(b"Basic " + base64.b64encode(b"me:secret"),
response.body)
def test_body_encoding(self):
unicode_body = u"\xe9"
byte_body = binascii.a2b_hex(b"e9")
# unicode string in body gets converted to utf8
response = self.fetch("/echopost", method="POST", body=unicode_body,
headers={"Content-Type": "application/blah"})
self.assertEqual(response.headers["Content-Length"], "2")
self.assertEqual(response.body, utf8(unicode_body))
# byte strings pass through directly
response = self.fetch("/echopost", method="POST",
body=byte_body,
headers={"Content-Type": "application/blah"})
self.assertEqual(response.headers["Content-Length"], "1")
self.assertEqual(response.body, byte_body)
# Mixing unicode in headers and byte string bodies shouldn't
# break anything
response = self.fetch("/echopost", method="POST", body=byte_body,
headers={"Content-Type": "application/blah"},
user_agent=u"foo")
self.assertEqual(response.headers["Content-Length"], "1")
self.assertEqual(response.body, byte_body)
def test_types(self):
response = self.fetch("/hello")
self.assertEqual(type(response.body), bytes)
self.assertEqual(type(response.headers["Content-Type"]), str)
self.assertEqual(type(response.code), int)
self.assertEqual(type(response.effective_url), str)
def test_header_callback(self):
first_line = []
headers = {}
chunks = []
def header_callback(header_line):
if header_line.startswith('HTTP/1.1 101'):
# Upgrading to HTTP/2
pass
elif header_line.startswith('HTTP/'):
first_line.append(header_line)
elif header_line != '\r\n':
k, v = header_line.split(':', 1)
headers[k.lower()] = v.strip()
def streaming_callback(chunk):
# All header callbacks are run before any streaming callbacks,
# so the header data is available to process the data as it
# comes in.
self.assertEqual(headers['content-type'], 'text/html; charset=UTF-8')
chunks.append(chunk)
self.fetch('/chunk', header_callback=header_callback,
streaming_callback=streaming_callback)
self.assertEqual(len(first_line), 1, first_line)
self.assertRegexpMatches(first_line[0], 'HTTP/[0-9]\\.[0-9] 200.*\r\n')
self.assertEqual(chunks, [b'asdf', b'qwer'])
def test_header_callback_stack_context(self):
exc_info = []
def error_handler(typ, value, tb):
exc_info.append((typ, value, tb))
return True
def header_callback(header_line):
if header_line.lower().startswith('content-type:'):
1 / 0
with ExceptionStackContext(error_handler):
self.fetch('/chunk', header_callback=header_callback)
self.assertEqual(len(exc_info), 1)
self.assertIs(exc_info[0][0], ZeroDivisionError)
def test_configure_defaults(self):
defaults = dict(user_agent='TestDefaultUserAgent', allow_ipv6=False)
# Construct a new instance of the configured client class
client = self.http_client.__class__(self.io_loop, force_instance=True,
defaults=defaults)
try:
client.fetch(self.get_url('/user_agent'), callback=self.stop)
response = self.wait()
self.assertEqual(response.body, b'TestDefaultUserAgent')
finally:
client.close()
def test_header_types(self):
# Header values may be passed as character or utf8 byte strings,
# in a plain dictionary or an HTTPHeaders object.
# Keys must always be the native str type.
# All combinations should have the same results on the wire.
for value in [u"MyUserAgent", b"MyUserAgent"]:
for container in [dict, HTTPHeaders]:
headers = container()
headers['User-Agent'] = value
resp = self.fetch('/user_agent', headers=headers)
self.assertEqual(
resp.body, b"MyUserAgent",
"response=%r, value=%r, container=%r" %
(resp.body, value, container))
def test_multi_line_headers(self):
# Multi-line http headers are rare but rfc-allowed
# http://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html#sec4.2
sock, port = bind_unused_port()
with closing(sock):
def write_response(stream, request_data):
if b"HTTP/1." not in request_data:
self.skipTest("requires HTTP/1.x")
stream.write(b"""\
HTTP/1.1 200 OK
X-XSS-Protection: 1;
\tmode=block
""".replace(b"\n", b"\r\n"), callback=stream.close)
def accept_callback(conn, address):
stream = IOStream(conn, io_loop=self.io_loop)
stream.read_until(b"\r\n\r\n",
functools.partial(write_response, stream))
netutil.add_accept_handler(sock, accept_callback, self.io_loop)
self.http_client.fetch("http://127.0.0.1:%d/" % port, self.stop)
resp = self.wait()
resp.rethrow()
self.assertEqual(resp.headers['X-XSS-Protection'], "1; mode=block")
self.io_loop.remove_handler(sock.fileno())
def test_304_with_content_length(self):
# According to the spec 304 responses SHOULD NOT include
# Content-Length or other entity headers, but some servers do it
# anyway.
# http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html#sec10.3.5
response = self.fetch('/304_with_content_length')
self.assertEqual(response.code, 304)
self.assertEqual(response.headers['Content-Length'], '42')
def test_final_callback_stack_context(self):
# The final callback should be run outside of the httpclient's
# stack_context. We want to ensure that there is not stack_context
# between the user's callback and the IOLoop, so monkey-patch
# IOLoop.handle_callback_exception and disable the test harness's
# context with a NullContext.
# Note that this does not apply to secondary callbacks (header
# and streaming_callback), as errors there must be seen as errors
# by the http client so it can clean up the connection.
exc_info = []
def handle_callback_exception(callback):
exc_info.append(sys.exc_info())
self.stop()
self.io_loop.handle_callback_exception = handle_callback_exception
with NullContext():
self.http_client.fetch(self.get_url('/hello'),
lambda response: 1 / 0)
self.wait()
self.assertEqual(exc_info[0][0], ZeroDivisionError)
@gen_test
def test_future_interface(self):
response = yield self.http_client.fetch(self.get_url('/hello'))
self.assertEqual(response.body, b'Hello world!')
@gen_test
def test_future_http_error(self):
with self.assertRaises(HTTPError) as context:
yield self.http_client.fetch(self.get_url('/notfound'))
self.assertEqual(context.exception.code, 404)
self.assertEqual(context.exception.response.code, 404)
@gen_test
def test_future_http_error_no_raise(self):
response = yield self.http_client.fetch(self.get_url('/notfound'), raise_error=False)
self.assertEqual(response.code, 404)
@gen_test
def test_reuse_request_from_response(self):
# The response.request attribute should be an HTTPRequest, not
# a _RequestProxy.
# This test uses self.http_client.fetch because self.fetch calls
# self.get_url on the input unconditionally.
url = self.get_url('/hello')
response = yield self.http_client.fetch(url)
self.assertEqual(response.request.url, url)
self.assertTrue(isinstance(response.request, HTTPRequest))
response2 = yield self.http_client.fetch(response.request)
self.assertEqual(response2.body, b'Hello world!')
def test_all_methods(self):
for method in ['GET', 'DELETE', 'OPTIONS']:
response = self.fetch('/all_methods', method=method)
self.assertEqual(response.body, utf8(method))
for method in ['POST', 'PUT', 'PATCH']:
response = self.fetch('/all_methods', method=method, body=b'')
self.assertEqual(response.body, utf8(method))
response = self.fetch('/all_methods', method='HEAD')
self.assertEqual(response.body, b'')
response = self.fetch('/all_methods', method='OTHER',
allow_nonstandard_methods=True)
self.assertEqual(response.body, b'OTHER')
def test_body_sanity_checks(self):
# These methods require a body.
for method in ('POST', 'PUT', 'PATCH'):
with self.assertRaises(ValueError) as context:
resp = self.fetch('/all_methods', method=method)
resp.rethrow()
self.assertIn('must not be None', str(context.exception))
resp = self.fetch('/all_methods', method=method,
allow_nonstandard_methods=True)
self.assertEqual(resp.code, 200)
# These methods don't allow a body.
for method in ('GET', 'DELETE', 'OPTIONS'):
with self.assertRaises(ValueError) as context:
resp = self.fetch('/all_methods', method=method, body=b'asdf')
resp.rethrow()
self.assertIn('must be None', str(context.exception))
# In most cases this can be overridden, but curl_httpclient
# does not allow body with a GET at all.
if method != 'GET':
resp = self.fetch('/all_methods', method=method, body=b'asdf',
allow_nonstandard_methods=True)
resp.rethrow()
self.assertEqual(resp.code, 200)
# This test causes odd failures with the combination of
# curl_httpclient (at least with the version of libcurl available
# on ubuntu 12.04), TwistedIOLoop, and epoll. For POST (but not PUT),
# curl decides the response came back too soon and closes the connection
# to start again. It does this *before* telling the socket callback to
# unregister the FD. Some IOLoop implementations have special kernel
# integration to discover this immediately. Tornado's IOLoops
# ignore errors on remove_handler to accommodate this behavior, but
# Twisted's reactor does not. The removeReader call fails and so
# do all future removeAll calls (which our tests do at cleanup).
#
# def test_post_307(self):
# response = self.fetch("/redirect?status=307&url=/post",
# method="POST", body=b"arg1=foo&arg2=bar")
# self.assertEqual(response.body, b"Post arg1: foo, arg2: bar")
def test_put_307(self):
response = self.fetch("/redirect?status=307&url=/put",
method="PUT", body=b"hello")
response.rethrow()
self.assertEqual(response.body, b"Put body: hello")
class RequestProxyTest(unittest.TestCase):
def test_request_set(self):
proxy = _RequestProxy(HTTPRequest('http://example.com/',
user_agent='foo'),
dict())
self.assertEqual(proxy.user_agent, 'foo')
def test_default_set(self):
proxy = _RequestProxy(HTTPRequest('http://example.com/'),
dict(network_interface='foo'))
self.assertEqual(proxy.network_interface, 'foo')
def test_both_set(self):
proxy = _RequestProxy(HTTPRequest('http://example.com/',
proxy_host='foo'),
dict(proxy_host='bar'))
self.assertEqual(proxy.proxy_host, 'foo')
def test_neither_set(self):
proxy = _RequestProxy(HTTPRequest('http://example.com/'),
dict())
self.assertIs(proxy.auth_username, None)
def test_bad_attribute(self):
proxy = _RequestProxy(HTTPRequest('http://example.com/'),
dict())
with self.assertRaises(AttributeError):
proxy.foo
def test_defaults_none(self):
proxy = _RequestProxy(HTTPRequest('http://example.com/'), None)
self.assertIs(proxy.auth_username, None)
class HTTPResponseTestCase(unittest.TestCase):
def test_str(self):
response = HTTPResponse(HTTPRequest('http://example.com'),
200, headers={}, buffer=BytesIO())
s = str(response)
self.assertTrue(s.startswith('HTTPResponse('))
self.assertIn('code=200', s)
class SyncHTTPClientTest(unittest.TestCase):
def setUp(self):
if IOLoop.configured_class().__name__ in ('TwistedIOLoop',
'AsyncIOMainLoop'):
# TwistedIOLoop only supports the global reactor, so we can't have
# separate IOLoops for client and server threads.
# AsyncIOMainLoop doesn't work with the default policy
# (although it could with some tweaks to this test and a
# policy that created loops for non-main threads).
raise unittest.SkipTest(
'Sync HTTPClient not compatible with TwistedIOLoop or '
'AsyncIOMainLoop')
self.server_ioloop = IOLoop()
sock, self.port = bind_unused_port()
app = Application([('/', HelloWorldHandler)])
self.server = HTTPServer(app, io_loop=self.server_ioloop)
self.server.add_socket(sock)
self.server_thread = threading.Thread(target=self.server_ioloop.start)
self.server_thread.start()
self.http_client = HTTPClient()
def tearDown(self):
def stop_server():
self.server.stop()
# Delay the shutdown of the IOLoop by one iteration because
# the server may still have some cleanup work left when
# the client finishes with the response (this is noticable
# with http/2, which leaves a Future with an unexamined
# StreamClosedError on the loop).
self.server_ioloop.add_callback(self.server_ioloop.stop)
self.server_ioloop.add_callback(stop_server)
self.server_thread.join()
self.http_client.close()
self.server_ioloop.close(all_fds=True)
def get_url(self, path):
return 'http://127.0.0.1:%d%s' % (self.port, path)
def test_sync_client(self):
response = self.http_client.fetch(self.get_url('/'))
self.assertEqual(b'Hello world!', response.body)
def test_sync_client_error(self):
# Synchronous HTTPClient raises errors directly; no need for
# response.rethrow()
with self.assertRaises(HTTPError) as assertion:
self.http_client.fetch(self.get_url('/notfound'))
self.assertEqual(assertion.exception.code, 404)
class HTTPRequestTestCase(unittest.TestCase):
def test_headers(self):
request = HTTPRequest('http://example.com', headers={'foo': 'bar'})
self.assertEqual(request.headers, {'foo': 'bar'})
def test_headers_setter(self):
request = HTTPRequest('http://example.com')
request.headers = {'bar': 'baz'}
self.assertEqual(request.headers, {'bar': 'baz'})
def test_null_headers_setter(self):
request = HTTPRequest('http://example.com')
request.headers = None
self.assertEqual(request.headers, {})
def test_body(self):
request = HTTPRequest('http://example.com', body='foo')
self.assertEqual(request.body, utf8('foo'))
def test_body_setter(self):
request = HTTPRequest('http://example.com')
request.body = 'foo'
self.assertEqual(request.body, utf8('foo'))
def test_if_modified_since(self):
http_date = datetime.datetime.utcnow()
request = HTTPRequest('http://example.com', if_modified_since=http_date)
self.assertEqual(request.headers,
{'If-Modified-Since': format_timestamp(http_date)})
class HTTPErrorTestCase(unittest.TestCase):
def test_copy(self):
e = HTTPError(403)
e2 = copy.copy(e)
self.assertIsNot(e, e2)
self.assertEqual(e.code, e2.code)
def test_str(self):
e = HTTPError(403)
self.assertEqual(str(e), "HTTP 403: Forbidden")
| apache-2.0 |
cognitivefashion/cf-sdk-python | dominant_colors_product.py | 1 | 1998 | #------------------------------------------------------------------------------
# Get the dominant colors for an image in the catalog.
# GET /v1/catalog/{catalog_name}/dominant_colors/{id}/{image_id}
#------------------------------------------------------------------------------
import os
import json
import requests
from urlparse import urljoin
from pprint import pprint
from props import *
# Replace this with the custom url generated for you.
api_gateway_url = props['api_gateway_url']
# Pass the api key into the header.
headers = {'X-Api-Key': props['X-Api-Key']}
# Query parameters.
params = {}
# Optional parameters.
#params['fraction_pixels_threshold'] = 0.1
# Path parameters
catalog_name = props['catalog_name']
id ='SHRES16AWFSDR9346B'
image_id = '1'
api_endpoint = '/v1/catalog/%s/dominant_colors/%s/%s'%(catalog_name,id,image_id)
url = urljoin(api_gateway_url,api_endpoint)
response = requests.get(url,
headers=headers,
params=params)
print response.status_code
pprint(response.json())
# Human friendly repsonse.
results = response.json()
print('[image url ] %s'%(results['image_url']))
image_location = '%s?api_key=%s'%(urljoin(api_gateway_url,results['image_location']),
props['X-Api-Key'])
print('[original image ] %s'%(image_location))
image_location = '%s&api_key=%s'%(urljoin(api_gateway_url,results['bounding_box']['image_location']),
props['X-Api-Key'])
print('[bounding box ] %s'%(image_location))
for color_info in results['dominant_colors']:
print('[dominant colors] %s - %1.2f - %s - %s - %s - %s'%(color_info['hex'],
color_info['fraction_pixels'],
color_info['name'],
color_info['entrylevel_name'],
color_info['universal_name'],
color_info['pantone_id']))
| apache-2.0 |
PatKayongo/patkayongo.github.io | node_modules/pygmentize-bundled/vendor/pygments/pygments/__init__.py | 269 | 2974 | # -*- coding: utf-8 -*-
"""
Pygments
~~~~~~~~
Pygments is a syntax highlighting package written in Python.
It is a generic syntax highlighter for general use in all kinds of software
such as forum systems, wikis or other applications that need to prettify
source code. Highlights are:
* a wide range of common languages and markup formats is supported
* special attention is paid to details, increasing quality by a fair amount
* support for new languages and formats are added easily
* a number of output formats, presently HTML, LaTeX, RTF, SVG, all image
formats that PIL supports, and ANSI sequences
* it is usable as a command-line tool and as a library
* ... and it highlights even Brainfuck!
The `Pygments tip`_ is installable with ``easy_install Pygments==dev``.
.. _Pygments tip:
http://bitbucket.org/birkenfeld/pygments-main/get/tip.zip#egg=Pygments-dev
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
__version__ = '1.6'
__docformat__ = 'restructuredtext'
__all__ = ['lex', 'format', 'highlight']
import sys
from pygments.util import StringIO, BytesIO
def lex(code, lexer):
"""
Lex ``code`` with ``lexer`` and return an iterable of tokens.
"""
try:
return lexer.get_tokens(code)
except TypeError, err:
if isinstance(err.args[0], str) and \
'unbound method get_tokens' in err.args[0]:
raise TypeError('lex() argument must be a lexer instance, '
'not a class')
raise
def format(tokens, formatter, outfile=None):
"""
Format a tokenlist ``tokens`` with the formatter ``formatter``.
If ``outfile`` is given and a valid file object (an object
with a ``write`` method), the result will be written to it, otherwise
it is returned as a string.
"""
try:
if not outfile:
#print formatter, 'using', formatter.encoding
realoutfile = formatter.encoding and BytesIO() or StringIO()
formatter.format(tokens, realoutfile)
return realoutfile.getvalue()
else:
formatter.format(tokens, outfile)
except TypeError, err:
if isinstance(err.args[0], str) and \
'unbound method format' in err.args[0]:
raise TypeError('format() argument must be a formatter instance, '
'not a class')
raise
def highlight(code, lexer, formatter, outfile=None):
"""
Lex ``code`` with ``lexer`` and format it with the formatter ``formatter``.
If ``outfile`` is given and a valid file object (an object
with a ``write`` method), the result will be written to it, otherwise
it is returned as a string.
"""
return format(lex(code, lexer), formatter, outfile)
if __name__ == '__main__':
from pygments.cmdline import main
sys.exit(main(sys.argv))
| mit |
cxxgtxy/tensorflow | tensorflow/contrib/tfprof/python/tools/tfprof/print_model_analysis_test.py | 4 | 6999 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""print_model_analysis test."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from google.protobuf import text_format
from tensorflow.python.client import session
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import test
from tensorflow.tools.tfprof import tfprof_options_pb2
from tensorflow.tools.tfprof import tfprof_output_pb2
# XXX: this depends on pywrap_tensorflow and must come later
from tensorflow.contrib.tfprof.python.tools.tfprof import pywrap_tensorflow_print_model_analysis_lib as print_mdl
# pylint: disable=bad-whitespace
# pylint: disable=bad-continuation
TEST_OPTIONS = {
'max_depth': 10000,
'min_bytes': 0,
'min_micros': 0,
'min_params': 0,
'min_float_ops': 0,
'order_by': 'name',
'account_type_regexes': ['.*'],
'start_name_regexes': ['.*'],
'trim_name_regexes': [],
'show_name_regexes': ['.*'],
'hide_name_regexes': [],
'account_displayed_op_only': True,
'select': ['params'],
'output': 'stdout',
}
# pylint: enable=bad-whitespace
# pylint: enable=bad-continuation
class PrintModelAnalysisTest(test.TestCase):
def _BuildSmallModel(self):
image = array_ops.zeros([2, 6, 6, 3])
kernel = variable_scope.get_variable(
'DW', [6, 6, 3, 6],
dtypes.float32,
initializer=init_ops.random_normal_initializer(stddev=0.001))
x = nn_ops.conv2d(image, kernel, [1, 2, 2, 1], padding='SAME')
return x
def testPrintModelAnalysis(self):
opts = tfprof_options_pb2.OptionsProto()
opts.max_depth = TEST_OPTIONS['max_depth']
opts.min_bytes = TEST_OPTIONS['min_bytes']
opts.min_micros = TEST_OPTIONS['min_micros']
opts.min_params = TEST_OPTIONS['min_params']
opts.min_float_ops = TEST_OPTIONS['min_float_ops']
opts.order_by = TEST_OPTIONS['order_by']
for p in TEST_OPTIONS['account_type_regexes']:
opts.account_type_regexes.append(p)
for p in TEST_OPTIONS['start_name_regexes']:
opts.start_name_regexes.append(p)
for p in TEST_OPTIONS['trim_name_regexes']:
opts.trim_name_regexes.append(p)
for p in TEST_OPTIONS['show_name_regexes']:
opts.show_name_regexes.append(p)
for p in TEST_OPTIONS['hide_name_regexes']:
opts.hide_name_regexes.append(p)
opts.account_displayed_op_only = TEST_OPTIONS['account_displayed_op_only']
for p in TEST_OPTIONS['select']:
opts.select.append(p)
opts.output = TEST_OPTIONS['output']
with session.Session() as sess, ops.device('/cpu:0'):
_ = self._BuildSmallModel()
tfprof_pb = tfprof_output_pb2.TFGraphNodeProto()
tfprof_pb.ParseFromString(
print_mdl.PrintModelAnalysis(
sess.graph.as_graph_def().SerializeToString(),
b'', b'', b'scope', opts.SerializeToString()))
expected_pb = tfprof_output_pb2.TFGraphNodeProto()
text_format.Merge(r"""name: "_TFProfRoot"
exec_micros: 0
requested_bytes: 0
total_exec_micros: 0
total_requested_bytes: 0
total_parameters: 648
children {
name: "Conv2D"
exec_micros: 0
requested_bytes: 0
total_exec_micros: 0
total_requested_bytes: 0
total_parameters: 0
float_ops: 0
total_float_ops: 0
}
children {
name: "DW"
exec_micros: 0
requested_bytes: 0
parameters: 648
total_exec_micros: 0
total_requested_bytes: 0
total_parameters: 648
children {
name: "DW/Assign"
exec_micros: 0
requested_bytes: 0
total_exec_micros: 0
total_requested_bytes: 0
total_parameters: 0
float_ops: 0
total_float_ops: 0
}
children {
name: "DW/Initializer"
exec_micros: 0
requested_bytes: 0
total_exec_micros: 0
total_requested_bytes: 0
total_parameters: 0
children {
name: "DW/Initializer/random_normal"
exec_micros: 0
requested_bytes: 0
total_exec_micros: 0
total_requested_bytes: 0
total_parameters: 0
children {
name: "DW/Initializer/random_normal/RandomStandardNormal"
exec_micros: 0
requested_bytes: 0
total_exec_micros: 0
total_requested_bytes: 0
total_parameters: 0
float_ops: 0
total_float_ops: 0
}
children {
name: "DW/Initializer/random_normal/mean"
exec_micros: 0
requested_bytes: 0
total_exec_micros: 0
total_requested_bytes: 0
total_parameters: 0
float_ops: 0
total_float_ops: 0
}
children {
name: "DW/Initializer/random_normal/mul"
exec_micros: 0
requested_bytes: 0
total_exec_micros: 0
total_requested_bytes: 0
total_parameters: 0
float_ops: 0
total_float_ops: 0
}
children {
name: "DW/Initializer/random_normal/shape"
exec_micros: 0
requested_bytes: 0
total_exec_micros: 0
total_requested_bytes: 0
total_parameters: 0
float_ops: 0
total_float_ops: 0
}
children {
name: "DW/Initializer/random_normal/stddev"
exec_micros: 0
requested_bytes: 0
total_exec_micros: 0
total_requested_bytes: 0
total_parameters: 0
float_ops: 0
total_float_ops: 0
}
float_ops: 0
total_float_ops: 0
}
float_ops: 0
total_float_ops: 0
}
children {
name: "DW/read"
exec_micros: 0
requested_bytes: 0
total_exec_micros: 0
total_requested_bytes: 0
total_parameters: 0
float_ops: 0
total_float_ops: 0
}
float_ops: 0
total_float_ops: 0
}
children {
name: "zeros"
exec_micros: 0
requested_bytes: 0
total_exec_micros: 0
total_requested_bytes: 0
total_parameters: 0
float_ops: 0
total_float_ops: 0
}
float_ops: 0
total_float_ops: 0""", expected_pb)
self.assertEqual(expected_pb, tfprof_pb)
if __name__ == '__main__':
test.main()
| apache-2.0 |
campbe13/openhatch | vendor/packages/kombu/kombu/async/timer.py | 27 | 6546 | # -*- coding: utf-8 -*-
"""
kombu.async.timer
=================
Timer scheduling Python callbacks.
"""
from __future__ import absolute_import
import heapq
import sys
from collections import namedtuple
from datetime import datetime
from functools import wraps
from time import time
from weakref import proxy as weakrefproxy
from kombu.five import monotonic
from kombu.log import get_logger
from kombu.utils.compat import timedelta_seconds
try:
from pytz import utc
except ImportError:
utc = None
DEFAULT_MAX_INTERVAL = 2
EPOCH = datetime.utcfromtimestamp(0).replace(tzinfo=utc)
IS_PYPY = hasattr(sys, 'pypy_version_info')
logger = get_logger(__name__)
__all__ = ['Entry', 'Timer', 'to_timestamp']
scheduled = namedtuple('scheduled', ('eta', 'priority', 'entry'))
def to_timestamp(d, default_timezone=utc):
if isinstance(d, datetime):
if d.tzinfo is None:
d = d.replace(tzinfo=default_timezone)
return timedelta_seconds(d - EPOCH)
return d
class Entry(object):
if not IS_PYPY: # pragma: no cover
__slots__ = (
'fun', 'args', 'kwargs', 'tref', 'cancelled',
'_last_run', '__weakref__',
)
def __init__(self, fun, args=None, kwargs=None):
self.fun = fun
self.args = args or []
self.kwargs = kwargs or {}
self.tref = weakrefproxy(self)
self._last_run = None
self.cancelled = False
def __call__(self):
return self.fun(*self.args, **self.kwargs)
def cancel(self):
try:
self.tref.cancelled = True
except ReferenceError: # pragma: no cover
pass
def __repr__(self):
return '<TimerEntry: {0}(*{1!r}, **{2!r})'.format(
self.fun.__name__, self.args, self.kwargs)
def __hash__(self):
return hash((self.fun, repr(self.args), repr(self.kwargs)))
# must not use hash() to order entries
def __lt__(self, other):
return id(self) < id(other)
def __gt__(self, other):
return id(self) > id(other)
def __le__(self, other):
return id(self) <= id(other)
def __ge__(self, other):
return id(self) >= id(other)
def __eq__(self, other):
return hash(self) == hash(other)
def __ne__(self, other):
return not self.__eq__(other)
class Timer(object):
"""ETA scheduler."""
Entry = Entry
on_error = None
def __init__(self, max_interval=None, on_error=None, **kwargs):
self.max_interval = float(max_interval or DEFAULT_MAX_INTERVAL)
self.on_error = on_error or self.on_error
self._queue = []
def __enter__(self):
return self
def __exit__(self, *exc_info):
self.stop()
def call_at(self, eta, fun, args=(), kwargs={}, priority=0):
return self.enter_at(self.Entry(fun, args, kwargs), eta, priority)
def call_after(self, secs, fun, args=(), kwargs={}, priority=0):
return self.enter_after(secs, self.Entry(fun, args, kwargs), priority)
def call_repeatedly(self, secs, fun, args=(), kwargs={}, priority=0):
tref = self.Entry(fun, args, kwargs)
@wraps(fun)
def _reschedules(*args, **kwargs):
last, now = tref._last_run, monotonic()
lsince = (now - tref._last_run) if last else secs
try:
if lsince and lsince >= secs:
tref._last_run = now
return fun(*args, **kwargs)
finally:
if not tref.cancelled:
last = tref._last_run
next = secs - (now - last) if last else secs
self.enter_after(next, tref, priority)
tref.fun = _reschedules
tref._last_run = None
return self.enter_after(secs, tref, priority)
def enter_at(self, entry, eta=None, priority=0, time=time):
"""Enter function into the scheduler.
:param entry: Item to enter.
:keyword eta: Scheduled time as a :class:`datetime.datetime` object.
:keyword priority: Unused.
"""
if eta is None:
eta = time()
if isinstance(eta, datetime):
try:
eta = to_timestamp(eta)
except Exception as exc:
if not self.handle_error(exc):
raise
return
return self._enter(eta, priority, entry)
def enter_after(self, secs, entry, priority=0, time=time):
return self.enter_at(entry, time() + secs, priority)
def _enter(self, eta, priority, entry, push=heapq.heappush):
push(self._queue, scheduled(eta, priority, entry))
return entry
def apply_entry(self, entry):
try:
entry()
except Exception as exc:
if not self.handle_error(exc):
logger.error('Error in timer: %r', exc, exc_info=True)
def handle_error(self, exc_info):
if self.on_error:
self.on_error(exc_info)
return True
def stop(self):
pass
def __iter__(self, min=min, nowfun=time,
pop=heapq.heappop, push=heapq.heappush):
"""This iterator yields a tuple of ``(entry, wait_seconds)``,
where if entry is :const:`None` the caller should wait
for ``wait_seconds`` until it polls the schedule again."""
max_interval = self.max_interval
queue = self._queue
while 1:
if queue:
eventA = queue[0]
now, eta = nowfun(), eventA[0]
if now < eta:
yield min(eta - now, max_interval), None
else:
eventB = pop(queue)
if eventB is eventA:
entry = eventA[2]
if not entry.cancelled:
yield None, entry
continue
else:
push(queue, eventB)
else:
yield None, None
def clear(self):
self._queue[:] = [] # atomic, without creating a new list.
def cancel(self, tref):
tref.cancel()
def __len__(self):
return len(self._queue)
def __nonzero__(self):
return True
@property
def queue(self, _pop=heapq.heappop):
"""Snapshot of underlying datastructure."""
events = list(self._queue)
return [_pop(v) for v in [events] * len(events)]
@property
def schedule(self):
return self
| agpl-3.0 |
oppia/oppia | core/controllers/subscriptions.py | 4 | 1781 | # Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Controllers related to user subscriptions."""
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
from core.controllers import acl_decorators
from core.controllers import base
from core.domain import subscription_services
from core.domain import user_services
class SubscribeHandler(base.BaseHandler):
"""Handles operations relating to new subscriptions."""
@acl_decorators.can_subscribe_to_users
def post(self):
creator_id = user_services.get_user_id_from_username(
self.payload.get('creator_username'))
subscription_services.subscribe_to_creator(self.user_id, creator_id)
self.render_json(self.values)
class UnsubscribeHandler(base.BaseHandler):
"""Handles operations related to unsubscriptions."""
@acl_decorators.can_subscribe_to_users
def post(self):
creator_id = user_services.get_user_id_from_username(
self.payload.get('creator_username'))
subscription_services.unsubscribe_from_creator(
self.user_id, creator_id)
self.render_json(self.values)
| apache-2.0 |
bhaugen/foodnetwork | django_extensions/admin/widgets.py | 25 | 3014 | from django import forms
from django.conf import settings
from django.utils.safestring import mark_safe
from django.utils.text import truncate_words
from django.template.loader import render_to_string
from django.contrib.admin.widgets import ForeignKeyRawIdWidget
class ForeignKeySearchInput(ForeignKeyRawIdWidget):
"""
A Widget for displaying ForeignKeys in an autocomplete search input
instead in a <select> box.
"""
# Set in subclass to render the widget with a different template
widget_template = None
# Set this to the patch of the search view
search_path = '../foreignkey_autocomplete/'
class Media:
css = {
'all': ('django_extensions/css/jquery.autocomplete.css',)
}
js = (
'django_extensions/js/jquery.js',
'django_extensions/js/jquery.bgiframe.min.js',
'django_extensions/js/jquery.ajaxQueue.js',
'django_extensions/js/jquery.autocomplete.js',
)
def label_for_value(self, value):
key = self.rel.get_related_field().name
obj = self.rel.to._default_manager.get(**{key: value})
return truncate_words(obj, 14)
def __init__(self, rel, search_fields, attrs=None):
self.search_fields = search_fields
super(ForeignKeySearchInput, self).__init__(rel, attrs)
def render(self, name, value, attrs=None):
if attrs is None:
attrs = {}
output = [super(ForeignKeySearchInput, self).render(name, value, attrs)]
opts = self.rel.to._meta
app_label = opts.app_label
model_name = opts.object_name.lower()
related_url = '../../../%s/%s/' % (app_label, model_name)
params = self.url_parameters()
if params:
url = '?' + '&'.join(['%s=%s' % (k, v) for k, v in params.items()])
else:
url = ''
if not attrs.has_key('class'):
attrs['class'] = 'vForeignKeyRawIdAdminField'
# Call the TextInput render method directly to have more control
output = [forms.TextInput.render(self, name, value, attrs)]
if value:
label = self.label_for_value(value)
else:
label = u''
context = {
'url': url,
'related_url': related_url,
'admin_media_prefix': settings.ADMIN_MEDIA_PREFIX,
'search_path': self.search_path,
'search_fields': ','.join(self.search_fields),
'model_name': model_name,
'app_label': app_label,
'label': label,
'name': name,
}
output.append(render_to_string(self.widget_template or (
'django_extensions/widgets/%s/%s/foreignkey_searchinput.html' % (app_label, model_name),
'django_extensions/widgets/%s/foreignkey_searchinput.html' % app_label,
'django_extensions/widgets/foreignkey_searchinput.html',
), context))
output.reverse()
return mark_safe(u''.join(output))
| mit |
esthermm/enco | enco_category/models/purchase_report.py | 1 | 4459 | # -*- coding: utf-8 -*-
# © 2017 Esther Martín - AvanzOSC
# License AGPL-3 - See http://www.gnu.org/licenses/agpl-3.0.html
from openerp import fields, models
from openerp import tools
class PurchaseReport(models.Model):
_inherit = 'purchase.report'
purchase_categ_id = fields.Many2one(
comodel_name='crm.case.categ', readonly=True)
def init(self, cr):
tools.sql.drop_view_if_exists(cr, 'purchase_report')
cr.execute("""
create or replace view purchase_report as (
WITH currency_rate (currency_id, rate, date_start, date_end) AS (
SELECT r.currency_id, r.rate, r.name AS date_start,
(SELECT name FROM res_currency_rate r2
WHERE r2.name > r.name AND
r2.currency_id = r.currency_id
ORDER BY r2.name ASC
LIMIT 1) AS date_end
FROM res_currency_rate r
)
select
min(l.id) as id,
s.date_order as date,
l.state,
s.date_approve,
s.minimum_planned_date as expected_date,
s.dest_address_id,
s.pricelist_id,
s.validator,
spt.warehouse_id as picking_type_id,
s.partner_id as partner_id,
s.create_uid as user_id,
s.company_id as company_id,
l.product_id,
t.categ_id as category_id,
t.uom_id as product_uom,
s.location_id as location_id,
s.period_ack as period_ack,
s.purchase_categ_id as purchase_categ_id,
sum(l.product_qty/u.factor*u2.factor) as quantity,
extract(epoch from age(s.date_approve,s.date_order))/(24*60*60)::decimal(16,2) as delay,
extract(epoch from age(l.date_planned,s.date_order))/(24*60*60)::decimal(16,2) as delay_pass,
count(*) as nbr,
sum(l.price_unit/cr.rate*l.product_qty)::decimal(16,2) as price_total,
avg(100.0 * (l.price_unit/cr.rate*l.product_qty) / NULLIF(ip.value_float*l.product_qty/u.factor*u2.factor, 0.0))::decimal(16,2) as negociation,
sum(ip.value_float*l.product_qty/u.factor*u2.factor)::decimal(16,2) as price_standard,
(sum(l.product_qty*l.price_unit/cr.rate)/NULLIF(sum(l.product_qty/u.factor*u2.factor),0.0))::decimal(16,2) as price_average
from purchase_order_line l
join purchase_order s on (l.order_id=s.id)
left join product_product p on (l.product_id=p.id)
left join product_template t on (p.product_tmpl_id=t.id)
LEFT JOIN ir_property ip ON (ip.name='standard_price' AND ip.res_id=CONCAT('product.template,',t.id) AND ip.company_id=s.company_id)
left join product_uom u on (u.id=l.product_uom)
left join product_uom u2 on (u2.id=t.uom_id)
left join stock_picking_type spt on (spt.id=s.picking_type_id)
join currency_rate cr on (cr.currency_id = s.currency_id and
cr.date_start <= coalesce(s.date_order, now()) and
(cr.date_end is null or cr.date_end > coalesce(s.date_order, now())))
group by
s.company_id,
s.create_uid,
s.partner_id,
u.factor,
s.location_id,
l.price_unit,
s.date_approve,
l.date_planned,
l.product_uom,
s.minimum_planned_date,
s.pricelist_id,
s.validator,
s.dest_address_id,
l.product_id,
t.categ_id,
s.date_order,
l.state,
spt.warehouse_id,
u.uom_type,
u.category_id,
t.uom_id,
u.id,
u2.factor,
s.purchase_categ_id,
s.period_ack
)
""")
| gpl-3.0 |
DailyActie/Surrogate-Model | 01-codes/scipy-master/scipy/linalg/tests/test_matfuncs.py | 1 | 33360 | #!/usr/bin/env python
#
# Created by: Pearu Peterson, March 2002
#
""" Test functions for linalg.matfuncs module
"""
from __future__ import division, print_function, absolute_import
import functools
import random
import warnings
import numpy as np
import scipy.linalg
import scipy.linalg._expm_frechet
from numpy import array, matrix, identity, dot, sqrt
from numpy.testing import (TestCase, run_module_suite,
assert_array_equal, assert_array_less, assert_equal,
assert_array_almost_equal, assert_allclose, assert_, decorators)
from scipy._lib._numpy_compat import _assert_warns
from scipy.linalg import _matfuncs_inv_ssq
from scipy.linalg import (funm, signm, logm, sqrtm, fractional_matrix_power,
expm, expm_frechet, expm_cond, norm)
from scipy.linalg.matfuncs import expm2, expm3
from scipy.optimize import minimize
def _get_al_mohy_higham_2012_experiment_1():
"""
Return the test matrix from Experiment (1) of [1]_.
References
----------
.. [1] Awad H. Al-Mohy and Nicholas J. Higham (2012)
"Improved Inverse Scaling and Squaring Algorithms
for the Matrix Logarithm."
SIAM Journal on Scientific Computing, 34 (4). C152-C169.
ISSN 1095-7197
"""
A = np.array([
[3.2346e-1, 3e4, 3e4, 3e4],
[0, 3.0089e-1, 3e4, 3e4],
[0, 0, 3.2210e-1, 3e4],
[0, 0, 0, 3.0744e-1]], dtype=float)
return A
class TestSignM(TestCase):
def test_nils(self):
a = array([[29.2, -24.2, 69.5, 49.8, 7.],
[-9.2, 5.2, -18., -16.8, -2.],
[-10., 6., -20., -18., -2.],
[-9.6, 9.6, -25.5, -15.4, -2.],
[9.8, -4.8, 18., 18.2, 2.]])
cr = array([[11.94933333, -2.24533333, 15.31733333, 21.65333333, -2.24533333],
[-3.84266667, 0.49866667, -4.59066667, -7.18666667, 0.49866667],
[-4.08, 0.56, -4.92, -7.6, 0.56],
[-4.03466667, 1.04266667, -5.59866667, -7.02666667, 1.04266667],
[4.15733333, -0.50133333, 4.90933333, 7.81333333, -0.50133333]])
r = signm(a)
assert_array_almost_equal(r, cr)
def test_defective1(self):
a = array([[0.0, 1, 0, 0], [1, 0, 1, 0], [0, 0, 0, 1], [0, 0, 1, 0]])
r = signm(a, disp=False)
# XXX: what would be the correct result?
def test_defective2(self):
a = array((
[29.2, -24.2, 69.5, 49.8, 7.0],
[-9.2, 5.2, -18.0, -16.8, -2.0],
[-10.0, 6.0, -20.0, -18.0, -2.0],
[-9.6, 9.6, -25.5, -15.4, -2.0],
[9.8, -4.8, 18.0, 18.2, 2.0]))
r = signm(a, disp=False)
# XXX: what would be the correct result?
def test_defective3(self):
a = array([[-2., 25., 0., 0., 0., 0., 0.],
[0., -3., 10., 3., 3., 3., 0.],
[0., 0., 2., 15., 3., 3., 0.],
[0., 0., 0., 0., 15., 3., 0.],
[0., 0., 0., 0., 3., 10., 0.],
[0., 0., 0., 0., 0., -2., 25.],
[0., 0., 0., 0., 0., 0., -3.]])
r = signm(a, disp=False)
# XXX: what would be the correct result?
class TestLogM(TestCase):
def test_nils(self):
a = array([[-2., 25., 0., 0., 0., 0., 0.],
[0., -3., 10., 3., 3., 3., 0.],
[0., 0., 2., 15., 3., 3., 0.],
[0., 0., 0., 0., 15., 3., 0.],
[0., 0., 0., 0., 3., 10., 0.],
[0., 0., 0., 0., 0., -2., 25.],
[0., 0., 0., 0., 0., 0., -3.]])
m = (identity(7) * 3.1 + 0j) - a
logm(m, disp=False)
# XXX: what would be the correct result?
def test_al_mohy_higham_2012_experiment_1_logm(self):
# The logm completes the round trip successfully.
# Note that the expm leg of the round trip is badly conditioned.
A = _get_al_mohy_higham_2012_experiment_1()
A_logm, info = logm(A, disp=False)
A_round_trip = expm(A_logm)
assert_allclose(A_round_trip, A, rtol=1e-5, atol=1e-14)
def test_al_mohy_higham_2012_experiment_1_funm_log(self):
# The raw funm with np.log does not complete the round trip.
# Note that the expm leg of the round trip is badly conditioned.
A = _get_al_mohy_higham_2012_experiment_1()
A_funm_log, info = funm(A, np.log, disp=False)
A_round_trip = expm(A_funm_log)
assert_(not np.allclose(A_round_trip, A, rtol=1e-5, atol=1e-14))
def test_round_trip_random_float(self):
np.random.seed(1234)
for n in range(1, 6):
M_unscaled = np.random.randn(n, n)
for scale in np.logspace(-4, 4, 9):
M = M_unscaled * scale
# Eigenvalues are related to the branch cut.
W = np.linalg.eigvals(M)
err_msg = 'M:{0} eivals:{1}'.format(M, W)
# Check sqrtm round trip because it is used within logm.
M_sqrtm, info = sqrtm(M, disp=False)
M_sqrtm_round_trip = M_sqrtm.dot(M_sqrtm)
assert_allclose(M_sqrtm_round_trip, M)
# Check logm round trip.
M_logm, info = logm(M, disp=False)
M_logm_round_trip = expm(M_logm)
assert_allclose(M_logm_round_trip, M, err_msg=err_msg)
def test_round_trip_random_complex(self):
np.random.seed(1234)
for n in range(1, 6):
M_unscaled = np.random.randn(n, n) + 1j * np.random.randn(n, n)
for scale in np.logspace(-4, 4, 9):
M = M_unscaled * scale
M_logm, info = logm(M, disp=False)
M_round_trip = expm(M_logm)
assert_allclose(M_round_trip, M)
def test_logm_type_preservation_and_conversion(self):
# The logm matrix function should preserve the type of a matrix
# whose eigenvalues are positive with zero imaginary part.
# Test this preservation for variously structured matrices.
complex_dtype_chars = ('F', 'D', 'G')
for matrix_as_list in (
[[1, 0], [0, 1]],
[[1, 0], [1, 1]],
[[2, 1], [1, 1]],
[[2, 3], [1, 2]]):
# check that the spectrum has the expected properties
W = scipy.linalg.eigvals(matrix_as_list)
assert_(not any(w.imag or w.real < 0 for w in W))
# check float type preservation
A = np.array(matrix_as_list, dtype=float)
A_logm, info = logm(A, disp=False)
assert_(A_logm.dtype.char not in complex_dtype_chars)
# check complex type preservation
A = np.array(matrix_as_list, dtype=complex)
A_logm, info = logm(A, disp=False)
assert_(A_logm.dtype.char in complex_dtype_chars)
# check float->complex type conversion for the matrix negation
A = -np.array(matrix_as_list, dtype=float)
A_logm, info = logm(A, disp=False)
assert_(A_logm.dtype.char in complex_dtype_chars)
def test_complex_spectrum_real_logm(self):
# This matrix has complex eigenvalues and real logm.
# Its output dtype depends on its input dtype.
M = [[1, 1, 2], [2, 1, 1], [1, 2, 1]]
for dt in float, complex:
X = np.array(M, dtype=dt)
w = scipy.linalg.eigvals(X)
assert_(1e-2 < np.absolute(w.imag).sum())
Y, info = logm(X, disp=False)
assert_(np.issubdtype(Y.dtype, dt))
assert_allclose(expm(Y), X)
def test_real_mixed_sign_spectrum(self):
# These matrices have real eigenvalues with mixed signs.
# The output logm dtype is complex, regardless of input dtype.
for M in (
[[1, 0], [0, -1]],
[[0, 1], [1, 0]]):
for dt in float, complex:
A = np.array(M, dtype=dt)
A_logm, info = logm(A, disp=False)
assert_(np.issubdtype(A_logm.dtype, complex))
def test_exactly_singular(self):
A = np.array([[0, 0], [1j, 1j]])
B = np.asarray([[1, 1], [0, 0]])
for M in A, A.T, B, B.T:
expected_warning = _matfuncs_inv_ssq.LogmExactlySingularWarning
L, info = _assert_warns(expected_warning, logm, M, disp=False)
E = expm(L)
assert_allclose(E, M, atol=1e-14)
def test_nearly_singular(self):
M = np.array([[1e-100]])
expected_warning = _matfuncs_inv_ssq.LogmNearlySingularWarning
L, info = _assert_warns(expected_warning, logm, M, disp=False)
E = expm(L)
assert_allclose(E, M, atol=1e-14)
def test_opposite_sign_complex_eigenvalues(self):
# See gh-6113
E = [[0, 1], [-1, 0]]
L = [[0, np.pi * 0.5], [-np.pi * 0.5, 0]]
assert_allclose(expm(L), E, atol=1e-14)
assert_allclose(logm(E), L, atol=1e-14)
E = [[1j, 4], [0, -1j]]
L = [[1j * np.pi * 0.5, 2 * np.pi], [0, -1j * np.pi * 0.5]]
assert_allclose(expm(L), E, atol=1e-14)
assert_allclose(logm(E), L, atol=1e-14)
E = [[1j, 0], [0, -1j]]
L = [[1j * np.pi * 0.5, 0], [0, -1j * np.pi * 0.5]]
assert_allclose(expm(L), E, atol=1e-14)
assert_allclose(logm(E), L, atol=1e-14)
class TestSqrtM(TestCase):
def test_round_trip_random_float(self):
np.random.seed(1234)
for n in range(1, 6):
M_unscaled = np.random.randn(n, n)
for scale in np.logspace(-4, 4, 9):
M = M_unscaled * scale
M_sqrtm, info = sqrtm(M, disp=False)
M_sqrtm_round_trip = M_sqrtm.dot(M_sqrtm)
assert_allclose(M_sqrtm_round_trip, M)
def test_round_trip_random_complex(self):
np.random.seed(1234)
for n in range(1, 6):
M_unscaled = np.random.randn(n, n) + 1j * np.random.randn(n, n)
for scale in np.logspace(-4, 4, 9):
M = M_unscaled * scale
M_sqrtm, info = sqrtm(M, disp=False)
M_sqrtm_round_trip = M_sqrtm.dot(M_sqrtm)
assert_allclose(M_sqrtm_round_trip, M)
def test_bad(self):
# See http://www.maths.man.ac.uk/~nareports/narep336.ps.gz
e = 2 ** -5
se = sqrt(e)
a = array([[1.0, 0, 0, 1],
[0, e, 0, 0],
[0, 0, e, 0],
[0, 0, 0, 1]])
sa = array([[1, 0, 0, 0.5],
[0, se, 0, 0],
[0, 0, se, 0],
[0, 0, 0, 1]])
n = a.shape[0]
assert_array_almost_equal(dot(sa, sa), a)
# Check default sqrtm.
esa = sqrtm(a, disp=False, blocksize=n)[0]
assert_array_almost_equal(dot(esa, esa), a)
# Check sqrtm with 2x2 blocks.
esa = sqrtm(a, disp=False, blocksize=2)[0]
assert_array_almost_equal(dot(esa, esa), a)
def test_sqrtm_type_preservation_and_conversion(self):
# The sqrtm matrix function should preserve the type of a matrix
# whose eigenvalues are nonnegative with zero imaginary part.
# Test this preservation for variously structured matrices.
complex_dtype_chars = ('F', 'D', 'G')
for matrix_as_list in (
[[1, 0], [0, 1]],
[[1, 0], [1, 1]],
[[2, 1], [1, 1]],
[[2, 3], [1, 2]],
[[1, 1], [1, 1]]):
# check that the spectrum has the expected properties
W = scipy.linalg.eigvals(matrix_as_list)
assert_(not any(w.imag or w.real < 0 for w in W))
# check float type preservation
A = np.array(matrix_as_list, dtype=float)
A_sqrtm, info = sqrtm(A, disp=False)
assert_(A_sqrtm.dtype.char not in complex_dtype_chars)
# check complex type preservation
A = np.array(matrix_as_list, dtype=complex)
A_sqrtm, info = sqrtm(A, disp=False)
assert_(A_sqrtm.dtype.char in complex_dtype_chars)
# check float->complex type conversion for the matrix negation
A = -np.array(matrix_as_list, dtype=float)
A_sqrtm, info = sqrtm(A, disp=False)
assert_(A_sqrtm.dtype.char in complex_dtype_chars)
def test_sqrtm_type_conversion_mixed_sign_or_complex_spectrum(self):
complex_dtype_chars = ('F', 'D', 'G')
for matrix_as_list in (
[[1, 0], [0, -1]],
[[0, 1], [1, 0]],
[[0, 1, 0], [0, 0, 1], [1, 0, 0]]):
# check that the spectrum has the expected properties
W = scipy.linalg.eigvals(matrix_as_list)
assert_(any(w.imag or w.real < 0 for w in W))
# check complex->complex
A = np.array(matrix_as_list, dtype=complex)
A_sqrtm, info = sqrtm(A, disp=False)
assert_(A_sqrtm.dtype.char in complex_dtype_chars)
# check float->complex
A = np.array(matrix_as_list, dtype=float)
A_sqrtm, info = sqrtm(A, disp=False)
assert_(A_sqrtm.dtype.char in complex_dtype_chars)
def test_blocksizes(self):
# Make sure I do not goof up the blocksizes when they do not divide n.
np.random.seed(1234)
for n in range(1, 8):
A = np.random.rand(n, n) + 1j * np.random.randn(n, n)
A_sqrtm_default, info = sqrtm(A, disp=False, blocksize=n)
assert_allclose(A, np.linalg.matrix_power(A_sqrtm_default, 2))
for blocksize in range(1, 10):
A_sqrtm_new, info = sqrtm(A, disp=False, blocksize=blocksize)
assert_allclose(A_sqrtm_default, A_sqrtm_new)
def test_al_mohy_higham_2012_experiment_1(self):
# Matrix square root of a tricky upper triangular matrix.
A = _get_al_mohy_higham_2012_experiment_1()
A_sqrtm, info = sqrtm(A, disp=False)
A_round_trip = A_sqrtm.dot(A_sqrtm)
assert_allclose(A_round_trip, A, rtol=1e-5)
assert_allclose(np.tril(A_round_trip), np.tril(A))
def test_strict_upper_triangular(self):
# This matrix has no square root.
for dt in int, float:
A = np.array([
[0, 3, 0, 0],
[0, 0, 3, 0],
[0, 0, 0, 3],
[0, 0, 0, 0]], dtype=dt)
A_sqrtm, info = sqrtm(A, disp=False)
assert_(np.isnan(A_sqrtm).all())
def test_weird_matrix(self):
# The square root of matrix B exists.
for dt in int, float:
A = np.array([
[0, 0, 1],
[0, 0, 0],
[0, 1, 0]], dtype=dt)
B = np.array([
[0, 1, 0],
[0, 0, 0],
[0, 0, 0]], dtype=dt)
assert_array_equal(B, A.dot(A))
# But scipy sqrtm is not clever enough to find it.
B_sqrtm, info = sqrtm(B, disp=False)
assert_(np.isnan(B_sqrtm).all())
def test_disp(self):
np.random.seed(1234)
A = np.random.rand(3, 3)
B = sqrtm(A, disp=True)
assert_allclose(B.dot(B), A)
def test_opposite_sign_complex_eigenvalues(self):
M = [[2j, 4], [0, -2j]]
R = [[1 + 1j, 2], [0, 1 - 1j]]
assert_allclose(np.dot(R, R), M, atol=1e-14)
assert_allclose(sqrtm(M), R, atol=1e-14)
class TestFractionalMatrixPower(TestCase):
def test_round_trip_random_complex(self):
np.random.seed(1234)
for p in range(1, 5):
for n in range(1, 5):
M_unscaled = np.random.randn(n, n) + 1j * np.random.randn(n, n)
for scale in np.logspace(-4, 4, 9):
M = M_unscaled * scale
M_root = fractional_matrix_power(M, 1 / p)
M_round_trip = np.linalg.matrix_power(M_root, p)
assert_allclose(M_round_trip, M)
def test_round_trip_random_float(self):
# This test is more annoying because it can hit the branch cut;
# this happens when the matrix has an eigenvalue
# with no imaginary component and with a real negative component,
# and it means that the principal branch does not exist.
np.random.seed(1234)
for p in range(1, 5):
for n in range(1, 5):
M_unscaled = np.random.randn(n, n)
for scale in np.logspace(-4, 4, 9):
M = M_unscaled * scale
M_root = fractional_matrix_power(M, 1 / p)
M_round_trip = np.linalg.matrix_power(M_root, p)
assert_allclose(M_round_trip, M)
def test_larger_abs_fractional_matrix_powers(self):
np.random.seed(1234)
for n in (2, 3, 5):
for i in range(10):
M = np.random.randn(n, n) + 1j * np.random.randn(n, n)
M_one_fifth = fractional_matrix_power(M, 0.2)
# Test the round trip.
M_round_trip = np.linalg.matrix_power(M_one_fifth, 5)
assert_allclose(M, M_round_trip)
# Test a large abs fractional power.
X = fractional_matrix_power(M, -5.4)
Y = np.linalg.matrix_power(M_one_fifth, -27)
assert_allclose(X, Y)
# Test another large abs fractional power.
X = fractional_matrix_power(M, 3.8)
Y = np.linalg.matrix_power(M_one_fifth, 19)
assert_allclose(X, Y)
def test_random_matrices_and_powers(self):
# Each independent iteration of this fuzz test picks random parameters.
# It tries to hit some edge cases.
np.random.seed(1234)
nsamples = 20
for i in range(nsamples):
# Sample a matrix size and a random real power.
n = random.randrange(1, 5)
p = np.random.randn()
# Sample a random real or complex matrix.
matrix_scale = np.exp(random.randrange(-4, 5))
A = np.random.randn(n, n)
if random.choice((True, False)):
A = A + 1j * np.random.randn(n, n)
A = A * matrix_scale
# Check a couple of analytically equivalent ways
# to compute the fractional matrix power.
# These can be compared because they both use the principal branch.
A_power = fractional_matrix_power(A, p)
A_logm, info = logm(A, disp=False)
A_power_expm_logm = expm(A_logm * p)
assert_allclose(A_power, A_power_expm_logm)
def test_al_mohy_higham_2012_experiment_1(self):
# Fractional powers of a tricky upper triangular matrix.
A = _get_al_mohy_higham_2012_experiment_1()
# Test remainder matrix power.
A_funm_sqrt, info = funm(A, np.sqrt, disp=False)
A_sqrtm, info = sqrtm(A, disp=False)
A_rem_power = _matfuncs_inv_ssq._remainder_matrix_power(A, 0.5)
A_power = fractional_matrix_power(A, 0.5)
assert_array_equal(A_rem_power, A_power)
assert_allclose(A_sqrtm, A_power)
assert_allclose(A_sqrtm, A_funm_sqrt)
# Test more fractional powers.
for p in (1 / 2, 5 / 3):
A_power = fractional_matrix_power(A, p)
A_round_trip = fractional_matrix_power(A_power, 1 / p)
assert_allclose(A_round_trip, A, rtol=1e-2)
assert_allclose(np.tril(A_round_trip, 1), np.tril(A, 1))
def test_briggs_helper_function(self):
np.random.seed(1234)
for a in np.random.randn(10) + 1j * np.random.randn(10):
for k in range(5):
x_observed = _matfuncs_inv_ssq._briggs_helper_function(a, k)
x_expected = a ** np.exp2(-k) - 1
assert_allclose(x_observed, x_expected)
def test_type_preservation_and_conversion(self):
# The fractional_matrix_power matrix function should preserve
# the type of a matrix whose eigenvalues
# are positive with zero imaginary part.
# Test this preservation for variously structured matrices.
complex_dtype_chars = ('F', 'D', 'G')
for matrix_as_list in (
[[1, 0], [0, 1]],
[[1, 0], [1, 1]],
[[2, 1], [1, 1]],
[[2, 3], [1, 2]]):
# check that the spectrum has the expected properties
W = scipy.linalg.eigvals(matrix_as_list)
assert_(not any(w.imag or w.real < 0 for w in W))
# Check various positive and negative powers
# with absolute values bigger and smaller than 1.
for p in (-2.4, -0.9, 0.2, 3.3):
# check float type preservation
A = np.array(matrix_as_list, dtype=float)
A_power = fractional_matrix_power(A, p)
assert_(A_power.dtype.char not in complex_dtype_chars)
# check complex type preservation
A = np.array(matrix_as_list, dtype=complex)
A_power = fractional_matrix_power(A, p)
assert_(A_power.dtype.char in complex_dtype_chars)
# check float->complex for the matrix negation
A = -np.array(matrix_as_list, dtype=float)
A_power = fractional_matrix_power(A, p)
assert_(A_power.dtype.char in complex_dtype_chars)
def test_type_conversion_mixed_sign_or_complex_spectrum(self):
complex_dtype_chars = ('F', 'D', 'G')
for matrix_as_list in (
[[1, 0], [0, -1]],
[[0, 1], [1, 0]],
[[0, 1, 0], [0, 0, 1], [1, 0, 0]]):
# check that the spectrum has the expected properties
W = scipy.linalg.eigvals(matrix_as_list)
assert_(any(w.imag or w.real < 0 for w in W))
# Check various positive and negative powers
# with absolute values bigger and smaller than 1.
for p in (-2.4, -0.9, 0.2, 3.3):
# check complex->complex
A = np.array(matrix_as_list, dtype=complex)
A_power = fractional_matrix_power(A, p)
assert_(A_power.dtype.char in complex_dtype_chars)
# check float->complex
A = np.array(matrix_as_list, dtype=float)
A_power = fractional_matrix_power(A, p)
assert_(A_power.dtype.char in complex_dtype_chars)
@decorators.knownfailureif(True, 'Too unstable across LAPACKs.')
def test_singular(self):
# Negative fractional powers do not work with singular matrices.
for matrix_as_list in (
[[0, 0], [0, 0]],
[[1, 1], [1, 1]],
[[1, 2], [3, 6]],
[[0, 0, 0], [0, 1, 1], [0, -1, 1]]):
# Check fractional powers both for float and for complex types.
for newtype in (float, complex):
A = np.array(matrix_as_list, dtype=newtype)
for p in (-0.7, -0.9, -2.4, -1.3):
A_power = fractional_matrix_power(A, p)
assert_(np.isnan(A_power).all())
for p in (0.2, 1.43):
A_power = fractional_matrix_power(A, p)
A_round_trip = fractional_matrix_power(A_power, 1 / p)
assert_allclose(A_round_trip, A)
def test_opposite_sign_complex_eigenvalues(self):
M = [[2j, 4], [0, -2j]]
R = [[1 + 1j, 2], [0, 1 - 1j]]
assert_allclose(np.dot(R, R), M, atol=1e-14)
assert_allclose(fractional_matrix_power(M, 0.5), R, atol=1e-14)
class TestExpM(TestCase):
def test_zero(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
a = array([[0., 0], [0, 0]])
assert_array_almost_equal(expm(a), [[1, 0], [0, 1]])
assert_array_almost_equal(expm2(a), [[1, 0], [0, 1]])
assert_array_almost_equal(expm3(a), [[1, 0], [0, 1]])
def test_consistency(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
a = array([[0., 1], [-1, 0]])
assert_array_almost_equal(expm(a), expm2(a))
assert_array_almost_equal(expm(a), expm3(a))
a = array([[1j, 1], [-1, -2j]])
assert_array_almost_equal(expm(a), expm2(a))
assert_array_almost_equal(expm(a), expm3(a))
def test_npmatrix(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
a = matrix([[3., 0], [0, -3.]])
assert_array_almost_equal(expm(a), expm2(a))
def test_single_elt(self):
# See gh-5853
from scipy.sparse import csc_matrix
vOne = -2.02683397006j
vTwo = -2.12817566856j
mOne = csc_matrix([[vOne]], dtype='complex')
mTwo = csc_matrix([[vTwo]], dtype='complex')
outOne = expm(mOne)
outTwo = expm(mTwo)
assert_equal(type(outOne), type(mOne))
assert_equal(type(outTwo), type(mTwo))
assert_allclose(outOne[0, 0], complex(-0.44039415155949196,
-0.8978045395698304))
assert_allclose(outTwo[0, 0], complex(-0.52896401032626006,
-0.84864425749518878))
class TestExpmFrechet(TestCase):
def test_expm_frechet(self):
# a test of the basic functionality
M = np.array([
[1, 2, 3, 4],
[5, 6, 7, 8],
[0, 0, 1, 2],
[0, 0, 5, 6],
], dtype=float)
A = np.array([
[1, 2],
[5, 6],
], dtype=float)
E = np.array([
[3, 4],
[7, 8],
], dtype=float)
expected_expm = scipy.linalg.expm(A)
expected_frechet = scipy.linalg.expm(M)[:2, 2:]
for kwargs in ({}, {'method': 'SPS'}, {'method': 'blockEnlarge'}):
observed_expm, observed_frechet = expm_frechet(A, E, **kwargs)
assert_allclose(expected_expm, observed_expm)
assert_allclose(expected_frechet, observed_frechet)
def test_small_norm_expm_frechet(self):
# methodically test matrices with a range of norms, for better coverage
M_original = np.array([
[1, 2, 3, 4],
[5, 6, 7, 8],
[0, 0, 1, 2],
[0, 0, 5, 6],
], dtype=float)
A_original = np.array([
[1, 2],
[5, 6],
], dtype=float)
E_original = np.array([
[3, 4],
[7, 8],
], dtype=float)
A_original_norm_1 = scipy.linalg.norm(A_original, 1)
selected_m_list = [1, 3, 5, 7, 9, 11, 13, 15]
m_neighbor_pairs = zip(selected_m_list[:-1], selected_m_list[1:])
for ma, mb in m_neighbor_pairs:
ell_a = scipy.linalg._expm_frechet.ell_table_61[ma]
ell_b = scipy.linalg._expm_frechet.ell_table_61[mb]
target_norm_1 = 0.5 * (ell_a + ell_b)
scale = target_norm_1 / A_original_norm_1
M = scale * M_original
A = scale * A_original
E = scale * E_original
expected_expm = scipy.linalg.expm(A)
expected_frechet = scipy.linalg.expm(M)[:2, 2:]
observed_expm, observed_frechet = expm_frechet(A, E)
assert_allclose(expected_expm, observed_expm)
assert_allclose(expected_frechet, observed_frechet)
def test_fuzz(self):
# try a bunch of crazy inputs
rfuncs = (
np.random.uniform,
np.random.normal,
np.random.standard_cauchy,
np.random.exponential)
ntests = 100
for i in range(ntests):
rfunc = random.choice(rfuncs)
target_norm_1 = random.expovariate(1.0)
n = random.randrange(2, 16)
A_original = rfunc(size=(n, n))
E_original = rfunc(size=(n, n))
A_original_norm_1 = scipy.linalg.norm(A_original, 1)
scale = target_norm_1 / A_original_norm_1
A = scale * A_original
E = scale * E_original
M = np.vstack([
np.hstack([A, E]),
np.hstack([np.zeros_like(A), A])])
expected_expm = scipy.linalg.expm(A)
expected_frechet = scipy.linalg.expm(M)[:n, n:]
observed_expm, observed_frechet = expm_frechet(A, E)
assert_allclose(expected_expm, observed_expm)
assert_allclose(expected_frechet, observed_frechet)
def test_problematic_matrix(self):
# this test case uncovered a bug which has since been fixed
A = np.array([
[1.50591997, 1.93537998],
[0.41203263, 0.23443516],
], dtype=float)
E = np.array([
[1.87864034, 2.07055038],
[1.34102727, 0.67341123],
], dtype=float)
A_norm_1 = scipy.linalg.norm(A, 1)
sps_expm, sps_frechet = expm_frechet(
A, E, method='SPS')
blockEnlarge_expm, blockEnlarge_frechet = expm_frechet(
A, E, method='blockEnlarge')
assert_allclose(sps_expm, blockEnlarge_expm)
assert_allclose(sps_frechet, blockEnlarge_frechet)
@decorators.slow
@decorators.skipif(True, 'this test is deliberately slow')
def test_medium_matrix(self):
# profile this to see the speed difference
n = 1000
A = np.random.exponential(size=(n, n))
E = np.random.exponential(size=(n, n))
sps_expm, sps_frechet = expm_frechet(
A, E, method='SPS')
blockEnlarge_expm, blockEnlarge_frechet = expm_frechet(
A, E, method='blockEnlarge')
assert_allclose(sps_expm, blockEnlarge_expm)
assert_allclose(sps_frechet, blockEnlarge_frechet)
def _help_expm_cond_search(A, A_norm, X, X_norm, eps, p):
p = np.reshape(p, A.shape)
p_norm = norm(p)
perturbation = eps * p * (A_norm / p_norm)
X_prime = expm(A + perturbation)
scaled_relative_error = norm(X_prime - X) / (X_norm * eps)
return -scaled_relative_error
def _normalized_like(A, B):
return A * (scipy.linalg.norm(B) / scipy.linalg.norm(A))
def _relative_error(f, A, perturbation):
X = f(A)
X_prime = f(A + perturbation)
return norm(X_prime - X) / norm(X)
class TestExpmConditionNumber(TestCase):
def test_expm_cond_smoke(self):
np.random.seed(1234)
for n in range(1, 4):
A = np.random.randn(n, n)
kappa = expm_cond(A)
assert_array_less(0, kappa)
def test_expm_bad_condition_number(self):
A = np.array([
[-1.128679820, 9.614183771e4, -4.524855739e9, 2.924969411e14],
[0, -1.201010529, 9.634696872e4, -4.681048289e9],
[0, 0, -1.132893222, 9.532491830e4],
[0, 0, 0, -1.179475332],
])
kappa = expm_cond(A)
assert_array_less(1e36, kappa)
def test_univariate(self):
np.random.seed(12345)
for x in np.linspace(-5, 5, num=11):
A = np.array([[x]])
assert_allclose(expm_cond(A), abs(x))
for x in np.logspace(-2, 2, num=11):
A = np.array([[x]])
assert_allclose(expm_cond(A), abs(x))
for i in range(10):
A = np.random.randn(1, 1)
assert_allclose(expm_cond(A), np.absolute(A)[0, 0])
@decorators.slow
def test_expm_cond_fuzz(self):
np.random.seed(12345)
eps = 1e-5
nsamples = 10
for i in range(nsamples):
n = np.random.randint(2, 5)
A = np.random.randn(n, n)
A_norm = scipy.linalg.norm(A)
X = expm(A)
X_norm = scipy.linalg.norm(X)
kappa = expm_cond(A)
# Look for the small perturbation that gives the greatest
# relative error.
f = functools.partial(_help_expm_cond_search,
A, A_norm, X, X_norm, eps)
guess = np.ones(n * n)
out = minimize(f, guess, method='L-BFGS-B')
xopt = out.x
yopt = f(xopt)
p_best = eps * _normalized_like(np.reshape(xopt, A.shape), A)
p_best_relerr = _relative_error(expm, A, p_best)
assert_allclose(p_best_relerr, -yopt * eps)
# Check that the identified perturbation indeed gives greater
# relative error than random perturbations with similar norms.
for j in range(5):
p_rand = eps * _normalized_like(np.random.randn(*A.shape), A)
assert_allclose(norm(p_best), norm(p_rand))
p_rand_relerr = _relative_error(expm, A, p_rand)
assert_array_less(p_rand_relerr, p_best_relerr)
# The greatest relative error should not be much greater than
# eps times the condition number kappa.
# In the limit as eps approaches zero it should never be greater.
assert_array_less(p_best_relerr, (1 + 2 * eps) * eps * kappa)
if __name__ == "__main__":
run_module_suite()
| mit |
oubiwann/myriad-worlds | myriad/story.py | 1 | 3923 | import yaml
from myriad.character import Player
from myriad.world import Map, World
from myriad.item import Item, OpenableItem, OpenableReadableItem, ReadableItem
# XXX maybe the story object should have a map attribute assigned based on
# story type... e.g., provided ASCII map, procedurally generated map, etc.
class Story(object):
def __init__(self, filename):
self.storyFile = filename
self.stream = open(self.storyFile)
self.data = yaml.load(self.stream)
# XXX map should be an attribute of the world
self.map = Map(self.data.get("map"))
self.world = World()
self.world.setScapes(self.map.getScapes())
# XXX what form do these take when the map is procedural?
self.createItems()
self.updateScapes()
self.createCharacters()
def _getItem(self, itemName):
for item in self.data.get("items"):
if item.get("name") == itemName:
return item
def getMap(self):
return self.map.getData()
def createItems(self):
itemsData = self.data.get("items")
if not itemsData:
return
for itemData in itemsData:
self.createItem(itemData)
def updateScapes(self):
scapesData = self.data.get("scapes")
if not scapesData:
return
for scapeData in scapesData:
scape = self.world.scapes.get(scapeData.get("room-key"))
startingPlace = scapeData.get("startingPlace")
if startingPlace:
scape.startingPlace = True
self.setStartingPlace(scape)
scape.name = scapeData.get("name")
self.world.scapes[scape.name] = scape
scape.desc = scapeData.get("description")
scape.gameOver = scapeData.get("gameOver")
itemsList = scapeData.get("items")
if not itemsList:
continue
for itemName in itemsList:
self.processItem(itemName, scape)
def createItem(self, itemData):
items = []
if itemData.has_key("items"):
itemNames = itemData.pop("items")
items = [Item.items[x] for x in itemNames]
if itemData.get("isOpenable") and itemData.get("isReadable"):
itemData.pop("isReadable")
item = OpenableReadableItem(itemData.get("name"), items)
elif itemData.get("isOpenable"):
item = OpenableItem(itemData.get("name"), items)
elif itemData.get("isReadable"):
itemData.pop("isReadable")
item = ReadableItem(**itemData)
else:
item = Item(**itemData)
return item
def processItem(self, itemName, scape):
# XXX I don't like the way that all items are tracked on the Item
# object... it doesn't make sense that every item in the world would
# know about all other items in the world. Once that's fixed, we just
# use the scape object's addItem method
self.world.putItemInScape(itemName, scape)
def setStartingPlace(self, tile):
self.map.setStartingPlace(tile)
def getStartingPlace(self):
return self.map.getStartingPlace()
def createCharacters(self):
charactersData = self.data.get("characters")
if not charactersData:
return
for characterData in charactersData:
if characterData.get("isPlayer") == True:
player = Player(characterData.get("name"))
for itemName in characterData.get("inventory"):
player.take(Item.items[itemName])
self.world.placeCharacterInScape(
player, self.getStartingPlace(), isPlayer=True)
def createLayers(self):
layersData = self.data.get("layers")
if not layersData:
return
for layerData in layersData:
pass
| mit |
t0mm0/youtube-dl | youtube_dl/extractor/nerdcubed.py | 124 | 1131 | # coding: utf-8
from __future__ import unicode_literals
import datetime
from .common import InfoExtractor
class NerdCubedFeedIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?nerdcubed\.co\.uk/feed\.json'
_TEST = {
'url': 'http://www.nerdcubed.co.uk/feed.json',
'info_dict': {
'id': 'nerdcubed-feed',
'title': 'nerdcubed.co.uk feed',
},
'playlist_mincount': 1300,
}
def _real_extract(self, url):
feed = self._download_json(url, url, "Downloading NerdCubed JSON feed")
entries = [{
'_type': 'url',
'title': feed_entry['title'],
'uploader': feed_entry['source']['name'] if feed_entry['source'] else None,
'upload_date': datetime.datetime.strptime(feed_entry['date'], '%Y-%m-%d').strftime('%Y%m%d'),
'url': "http://www.youtube.com/watch?v=" + feed_entry['youtube_id'],
} for feed_entry in feed]
return {
'_type': 'playlist',
'title': 'nerdcubed.co.uk feed',
'id': 'nerdcubed-feed',
'entries': entries,
}
| unlicense |
xflows/clowdflows-backend | workflows/api/serializers.py | 1 | 14887 | import json
from django.contrib.auth.models import User
from django.db.models import Prefetch
from django.template.loader import render_to_string
from rest_framework import serializers
from rest_framework.reverse import reverse
from mothra.settings import STATIC_URL, MEDIA_URL
from streams.models import Stream
from workflows.models import *
class UserSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = User
fields = ('username',)
class AbstractOptionSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = AbstractOption
fields = ('name', 'value')
read_only_fields = ('name', 'value')
class AbstractInputSerializer(serializers.HyperlinkedModelSerializer):
id = serializers.IntegerField()
options = AbstractOptionSerializer(many=True, read_only=True)
class Meta:
model = AbstractInput
fields = (
'id', 'name', 'short_name', 'description', 'variable', 'required', 'parameter', 'multi', 'default',
'parameter_type',
'order', 'options')
read_only_fields = (
'id', 'name', 'short_name', 'description', 'variable', 'required', 'parameter', 'multi', 'default',
'parameter_type',
'order', 'options')
class AbstractOutputSerializer(serializers.HyperlinkedModelSerializer):
id = serializers.IntegerField()
class Meta:
model = AbstractOutput
fields = ('id', 'name', 'short_name', 'description', 'variable', 'order')
read_only_fields = ('id', 'name', 'short_name', 'description', 'variable', 'order')
class AbstractWidgetSerializer(serializers.HyperlinkedModelSerializer):
id = serializers.IntegerField()
inputs = AbstractInputSerializer(many=True, read_only=True)
outputs = AbstractOutputSerializer(many=True, read_only=True)
cfpackage = serializers.SerializerMethodField()
visualization = serializers.SerializerMethodField()
def get_cfpackage(self, obj):
return obj.package
def get_visualization(self, obj):
return obj.visualization_view != ''
class Meta:
model = AbstractWidget
fields = ('id', 'name', 'interactive', 'visualization', 'static_image', 'order', 'outputs', 'inputs', 'cfpackage', 'description', 'always_save_results')
read_only_fields = ('id', 'name', 'interactive', 'visualization', 'static_image', 'order', 'outputs', 'inputs', 'cfpackage', 'always_save_results')
class CategorySerializer(serializers.HyperlinkedModelSerializer):
widgets = AbstractWidgetSerializer(many=True, read_only=True)
class Meta:
model = Category
fields = ('name', 'user', 'order', 'children', 'widgets')
read_only_fields = ('name', 'user', 'order', 'children', 'widgets')
CategorySerializer._declared_fields['children'] = CategorySerializer(many=True, read_only=True)
class ConnectionSerializer(serializers.HyperlinkedModelSerializer):
output_widget = serializers.SerializerMethodField()
input_widget = serializers.SerializerMethodField()
def get_output_widget(self, obj):
request = self.context['request']
return request.build_absolute_uri(reverse('widget-detail', kwargs={'pk': obj.output.widget_id}))
# return WidgetListSerializer(obj.output.widget, context=self.context).data["url"]
def get_input_widget(self, obj):
request = self.context['request']
return request.build_absolute_uri(reverse('widget-detail', kwargs={'pk': obj.input.widget_id}))
# return WidgetListSerializer(obj.input.widget, context=self.context).data["url"]
class Meta:
model = Connection
class OptionSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Option
fields = ('name', 'value')
class InputSerializer(serializers.HyperlinkedModelSerializer):
id = serializers.IntegerField(read_only=True)
deserialized_value = serializers.SerializerMethodField()
options = OptionSerializer(many=True, read_only=True)
abstract_input_id = serializers.SerializerMethodField()
def get_deserialized_value(self, obj):
if obj.parameter:
try:
json.dumps(obj.value)
except:
return repr(obj.value)
else:
return obj.value
else:
return ''
def get_abstract_input_id(self, obj):
return obj.abstract_input_id
class Meta:
model = Input
exclude = ('value', 'abstract_input')
read_only_fields = ('id', 'url', 'widget')
class OutputSerializer(serializers.HyperlinkedModelSerializer):
id = serializers.IntegerField(read_only=True)
abstract_output_id = serializers.SerializerMethodField()
def get_abstract_output_id(self, obj):
return obj.abstract_output_id
class Meta:
model = Output
exclude = ('value', 'abstract_output')
read_only_fields = ('id', 'url', 'widget')
def get_workflow_preview(request, obj):
min_x = 10000
min_y = 10000
max_x = 0
max_y = 0
max_width = 300
max_height = 200
normalized_values = {}
obj.normalized_widgets = obj.widgets.all()
obj.unique_connections = []
obj.pairs = []
for widget in obj.normalized_widgets:
if widget.x > max_x:
max_x = widget.x
if widget.x < min_x:
min_x = widget.x
if widget.y > max_y:
max_y = widget.y
if widget.y < min_y:
min_y = widget.y
for widget in obj.normalized_widgets:
x = (widget.x - min_x) * 1.0
y = (widget.y - min_y) * 1.0
normalized_max_x = max_x - min_x
if x == 0:
x = 1
if y == 0:
y = 1
if normalized_max_x == 0:
normalized_max_x = x * 2
normalized_max_y = max_y - min_y
if normalized_max_y == 0:
normalized_max_y = y * 2
widget.norm_x = (x / normalized_max_x) * max_width
widget.norm_y = (y / normalized_max_y) * max_height
normalized_values[widget.id] = (widget.norm_x, widget.norm_y)
for c in obj.connections.all():
if not (c.output.widget_id, c.input.widget_id) in obj.pairs:
obj.pairs.append((c.output.widget_id, c.input.widget_id))
for pair in obj.pairs:
conn = {}
conn['x1'] = normalized_values[pair[0]][0] + 40
conn['y1'] = normalized_values[pair[0]][1] + 15
conn['x2'] = normalized_values[pair[1]][0] - 10
conn['y2'] = normalized_values[pair[1]][1] + 15
obj.unique_connections.append(conn)
base_url = request.build_absolute_uri('/')[:-1]
images_url = '{}{}'.format(base_url, STATIC_URL)
preview_html = render_to_string('preview.html', {'w': obj, 'images_url': images_url})
return preview_html
class StreamSerializer(serializers.HyperlinkedModelSerializer):
id = serializers.IntegerField(read_only=True)
last_executed = serializers.DateTimeField(read_only=True)
period = serializers.IntegerField()
active = serializers.BooleanField(read_only=True)
class Meta:
model = Stream
class WorkflowListSerializer(serializers.HyperlinkedModelSerializer):
id = serializers.IntegerField(read_only=True)
user = UserSerializer(read_only=True)
stream = StreamSerializer()
is_subprocess = serializers.SerializerMethodField()
is_public = serializers.BooleanField(source='public')
can_be_streaming = serializers.SerializerMethodField()
def get_is_subprocess(self, obj):
if obj.widget == None:
return False
else:
return True
def get_can_be_streaming(self, obj):
return obj.can_be_streaming()
def get_stream_active(self, obj):
return None
class Meta:
model = Workflow
exclude = ('public',)
class WorkflowPreviewSerializer(WorkflowListSerializer):
preview = serializers.SerializerMethodField()
def get_preview(self, obj):
return get_workflow_preview(self.context['request'], obj)
class WidgetSerializer(serializers.HyperlinkedModelSerializer):
id = serializers.IntegerField(read_only=True)
inputs = InputSerializer(many=True, read_only=True)
outputs = OutputSerializer(many=True, read_only=True)
description = serializers.CharField(source='abstract_widget.description', read_only=True)
icon = serializers.SerializerMethodField()
must_save = serializers.SerializerMethodField()
can_interact = serializers.SerializerMethodField()
workflow_link = serializers.HyperlinkedRelatedField(
read_only=True,
view_name='workflow-detail'
)
abstract_widget = serializers.PrimaryKeyRelatedField(queryset=AbstractWidget.objects.all(), allow_null=True)
def create(self, validated_data):
'''
Overrides the default create method to support nested creates
'''
w = Widget.objects.create(**validated_data)
aw = w.abstract_widget
input_order, param_order = 0, 0
for i in aw.inputs.all():
j = Input()
j.name = i.name
j.short_name = i.short_name
j.description = i.description
j.variable = i.variable
j.widget = w
j.required = i.required
j.parameter = i.parameter
j.value = None
j.abstract_input = i
if (i.parameter):
param_order += 1
j.order = param_order
else:
input_order += 1
j.order = input_order
if not i.multi:
j.value = i.default
j.parameter_type = i.parameter_type
if i.multi:
j.multi_id = i.id
j.save()
for k in i.options.all():
o = Option()
o.name = k.name
o.value = k.value
o.input = j
o.save()
outputOrder = 0
for i in aw.outputs.all():
j = Output()
j.name = i.name
j.short_name = i.short_name
j.description = i.description
j.variable = i.variable
j.widget = w
j.abstract_output = i
outputOrder += 1
j.order = outputOrder
j.save()
w.defered_outputs = w.outputs.defer("value").all()
w.defered_inputs = w.inputs.defer("value").all()
return w
def update(self, widget, validated_data):
'''
Overrides the default update method to support nested creates
'''
# Ignore inputs and outputs on patch - we allow only nested creates
if 'inputs' in validated_data:
validated_data.pop('inputs')
if 'outputs' in validated_data:
validated_data.pop('outputs')
widget, _ = Widget.objects.update_or_create(pk=widget.pk, defaults=validated_data)
if widget.type == 'subprocess':
widget.workflow_link.name = widget.name
widget.workflow_link.save()
return widget
def get_must_save(self, widget):
'''
Some widget always require their inputs and outputs to be saved.
'''
must_save = False
if widget.abstract_widget:
must_save = widget.abstract_widget.interactive or widget.is_visualization() or widget.abstract_widget.always_save_results
return must_save
def get_can_interact(self, widget):
can_interact = False
if widget.abstract_widget:
can_interact = widget.abstract_widget.interactive
return can_interact
def get_icon(self, widget):
full_path_tokens = self.context['request'].build_absolute_uri().split('/')
protocol = full_path_tokens[0]
base_url = full_path_tokens[2]
icon_path = 'special_icons/question-mark.png'
static_or_media = STATIC_URL
if widget.abstract_widget:
if widget.abstract_widget.static_image:
icon_path = '{}/icons/widget/{}'.format(widget.abstract_widget.package,
widget.abstract_widget.static_image)
elif widget.abstract_widget.image:
static_or_media = MEDIA_URL
icon_path = widget.abstract_widget.image
elif widget.abstract_widget.wsdl:
icon_path = 'special_icons/ws.png'
elif hasattr(widget, 'workflow_link'):
icon_path = 'special_icons/subprocess.png'
elif widget.type == 'input':
icon_path = 'special_icons/forward-arrow.png'
elif widget.type == 'output':
icon_path = 'special_icons/forward-arrow.png'
elif widget.type == 'output':
icon_path = 'special_icons/loop.png'
icon_url = '{}//{}{}{}'.format(protocol, base_url, static_or_media, icon_path)
return icon_url
class Meta:
model = Widget
fields = (
'id', 'url', 'workflow', 'x', 'y', 'name', 'save_results', 'must_save', 'can_interact', 'abstract_widget', 'finished',
'error', 'running', 'interaction_waiting', 'description', 'icon', 'type', 'progress', 'inputs', 'outputs',
'workflow_link')
class WidgetPositionSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Widget
fields = ('x', 'y')
class WidgetListSerializer(serializers.HyperlinkedModelSerializer):
abstract_widget = serializers.PrimaryKeyRelatedField(read_only=True)
class Meta:
model = Widget
# exclude = ('abstract_widget',)
class StreamWidgetSerializer(serializers.HyperlinkedModelSerializer):
id = serializers.IntegerField(read_only=True)
class Meta:
model = Widget
fields = ('id', 'url', 'name')
class WorkflowSerializer(serializers.HyperlinkedModelSerializer):
id = serializers.IntegerField(read_only=True)
widgets = WidgetSerializer(many=True, read_only=True)
user = UserSerializer(read_only=True)
connections = ConnectionSerializer(many=True, read_only=True)
is_subprocess = serializers.SerializerMethodField()
is_public = serializers.BooleanField(source='public')
def get_is_subprocess(self, obj):
if obj.widget == None:
return False
else:
return True
class Meta:
model = Workflow
exclude = ('public',)
class StreamDetailSerializer(StreamSerializer):
workflow = WorkflowListSerializer(read_only=True)
stream_visualization_widgets = serializers.SerializerMethodField()
def get_stream_visualization_widgets(self, obj):
widgets = obj.stream_visualization_widgets()
data = StreamWidgetSerializer(widgets, many=True, read_only=True, context={'request': self.context['request']}).data
return data
| mit |
fighterCui/L4ReFiascoOC | l4/pkg/python/contrib/Lib/distutils/command/install_egg_info.py | 438 | 2587 | """distutils.command.install_egg_info
Implements the Distutils 'install_egg_info' command, for installing
a package's PKG-INFO metadata."""
from distutils.cmd import Command
from distutils import log, dir_util
import os, sys, re
class install_egg_info(Command):
"""Install an .egg-info file for the package"""
description = "Install package's PKG-INFO metadata as an .egg-info file"
user_options = [
('install-dir=', 'd', "directory to install to"),
]
def initialize_options(self):
self.install_dir = None
def finalize_options(self):
self.set_undefined_options('install_lib',('install_dir','install_dir'))
basename = "%s-%s-py%s.egg-info" % (
to_filename(safe_name(self.distribution.get_name())),
to_filename(safe_version(self.distribution.get_version())),
sys.version[:3]
)
self.target = os.path.join(self.install_dir, basename)
self.outputs = [self.target]
def run(self):
target = self.target
if os.path.isdir(target) and not os.path.islink(target):
dir_util.remove_tree(target, dry_run=self.dry_run)
elif os.path.exists(target):
self.execute(os.unlink,(self.target,),"Removing "+target)
elif not os.path.isdir(self.install_dir):
self.execute(os.makedirs, (self.install_dir,),
"Creating "+self.install_dir)
log.info("Writing %s", target)
if not self.dry_run:
f = open(target, 'w')
self.distribution.metadata.write_pkg_file(f)
f.close()
def get_outputs(self):
return self.outputs
# The following routines are taken from setuptools' pkg_resources module and
# can be replaced by importing them from pkg_resources once it is included
# in the stdlib.
def safe_name(name):
"""Convert an arbitrary string to a standard distribution name
Any runs of non-alphanumeric/. characters are replaced with a single '-'.
"""
return re.sub('[^A-Za-z0-9.]+', '-', name)
def safe_version(version):
"""Convert an arbitrary string to a standard version string
Spaces become dots, and all other non-alphanumeric characters become
dashes, with runs of multiple dashes condensed to a single dash.
"""
version = version.replace(' ','.')
return re.sub('[^A-Za-z0-9.]+', '-', version)
def to_filename(name):
"""Convert a project or version name to its filename-escaped form
Any '-' characters are currently replaced with '_'.
"""
return name.replace('-','_')
| gpl-2.0 |
knabar/openmicroscopy | components/tools/OmeroWeb/test/integration/test_metadata.py | 3 | 2890 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2015 Glencoe Software, Inc.
# All rights reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Tests display of metadata in webclient
"""
import omero
from omeroweb.testlib import IWebTest
from omeroweb.testlib import get
from django.core.urlresolvers import reverse
from omero.model.enums import UnitsLength
from omero_model_ImageI import ImageI
from omero.rtypes import rstring
class TestCoreMetadata(IWebTest):
"""
Tests display of core metatada
"""
def test_pixel_size_units(self):
# Create image
iid = self.create_test_image(size_c=2, session=self.sf).id.val
# show right panel for image
request_url = reverse('load_metadata_details', args=['image', iid])
data = {}
rsp = get(self.django_client, request_url, data, status_code=200)
html = rsp.content
# Units are µm by default
assert "Pixels Size (XYZ) (µm):" in html
# Now save units as PIXELs and view again
conn = omero.gateway.BlitzGateway(client_obj=self.client)
i = conn.getObject("Image", iid)
u = omero.model.LengthI(1.2, UnitsLength.PIXEL)
p = i.getPrimaryPixels()._obj
p.setPhysicalSizeX(u)
p.setPhysicalSizeY(u)
conn.getUpdateService().saveObject(p)
# Should now be showning pixels
rsp = get(self.django_client, request_url, data, status_code=200)
html = rsp.content
assert "Pixels Size (XYZ):" in html
assert "1.20 (pixel)" in html
def test_none_pixel_size(self):
"""
Tests display of core metatada still works even when image
doesn't have pixels data
"""
img = ImageI()
img.setName(rstring("no_pixels"))
img.setDescription(rstring("empty image without pixels data"))
conn = omero.gateway.BlitzGateway(client_obj=self.client)
img = conn.getUpdateService().saveAndReturnObject(img)
request_url = reverse('load_metadata_details',
args=['image', img.id.val])
# Just check that the metadata panel is loaded
rsp = get(self.django_client, request_url, status_code=200)
assert "no_pixels" in rsp.content
| gpl-2.0 |
jlopezmalla/spark | python/pyspark/streaming/dstream.py | 85 | 27812 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import operator
import time
from itertools import chain
from datetime import datetime
if sys.version < "3":
from itertools import imap as map, ifilter as filter
from py4j.protocol import Py4JJavaError
from pyspark import RDD
from pyspark.storagelevel import StorageLevel
from pyspark.streaming.util import rddToFileName, TransformFunction
from pyspark.rdd import portable_hash
from pyspark.resultiterable import ResultIterable
__all__ = ["DStream"]
class DStream(object):
"""
A Discretized Stream (DStream), the basic abstraction in Spark Streaming,
is a continuous sequence of RDDs (of the same type) representing a
continuous stream of data (see L{RDD} in the Spark core documentation
for more details on RDDs).
DStreams can either be created from live data (such as, data from TCP
sockets, Kafka, Flume, etc.) using a L{StreamingContext} or it can be
generated by transforming existing DStreams using operations such as
`map`, `window` and `reduceByKeyAndWindow`. While a Spark Streaming
program is running, each DStream periodically generates a RDD, either
from live data or by transforming the RDD generated by a parent DStream.
DStreams internally is characterized by a few basic properties:
- A list of other DStreams that the DStream depends on
- A time interval at which the DStream generates an RDD
- A function that is used to generate an RDD after each time interval
"""
def __init__(self, jdstream, ssc, jrdd_deserializer):
self._jdstream = jdstream
self._ssc = ssc
self._sc = ssc._sc
self._jrdd_deserializer = jrdd_deserializer
self.is_cached = False
self.is_checkpointed = False
def context(self):
"""
Return the StreamingContext associated with this DStream
"""
return self._ssc
def count(self):
"""
Return a new DStream in which each RDD has a single element
generated by counting each RDD of this DStream.
"""
return self.mapPartitions(lambda i: [sum(1 for _ in i)]).reduce(operator.add)
def filter(self, f):
"""
Return a new DStream containing only the elements that satisfy predicate.
"""
def func(iterator):
return filter(f, iterator)
return self.mapPartitions(func, True)
def flatMap(self, f, preservesPartitioning=False):
"""
Return a new DStream by applying a function to all elements of
this DStream, and then flattening the results
"""
def func(s, iterator):
return chain.from_iterable(map(f, iterator))
return self.mapPartitionsWithIndex(func, preservesPartitioning)
def map(self, f, preservesPartitioning=False):
"""
Return a new DStream by applying a function to each element of DStream.
"""
def func(iterator):
return map(f, iterator)
return self.mapPartitions(func, preservesPartitioning)
def mapPartitions(self, f, preservesPartitioning=False):
"""
Return a new DStream in which each RDD is generated by applying
mapPartitions() to each RDDs of this DStream.
"""
def func(s, iterator):
return f(iterator)
return self.mapPartitionsWithIndex(func, preservesPartitioning)
def mapPartitionsWithIndex(self, f, preservesPartitioning=False):
"""
Return a new DStream in which each RDD is generated by applying
mapPartitionsWithIndex() to each RDDs of this DStream.
"""
return self.transform(lambda rdd: rdd.mapPartitionsWithIndex(f, preservesPartitioning))
def reduce(self, func):
"""
Return a new DStream in which each RDD has a single element
generated by reducing each RDD of this DStream.
"""
return self.map(lambda x: (None, x)).reduceByKey(func, 1).map(lambda x: x[1])
def reduceByKey(self, func, numPartitions=None):
"""
Return a new DStream by applying reduceByKey to each RDD.
"""
if numPartitions is None:
numPartitions = self._sc.defaultParallelism
return self.combineByKey(lambda x: x, func, func, numPartitions)
def combineByKey(self, createCombiner, mergeValue, mergeCombiners,
numPartitions=None):
"""
Return a new DStream by applying combineByKey to each RDD.
"""
if numPartitions is None:
numPartitions = self._sc.defaultParallelism
def func(rdd):
return rdd.combineByKey(createCombiner, mergeValue, mergeCombiners, numPartitions)
return self.transform(func)
def partitionBy(self, numPartitions, partitionFunc=portable_hash):
"""
Return a copy of the DStream in which each RDD are partitioned
using the specified partitioner.
"""
return self.transform(lambda rdd: rdd.partitionBy(numPartitions, partitionFunc))
def foreachRDD(self, func):
"""
Apply a function to each RDD in this DStream.
"""
if func.__code__.co_argcount == 1:
old_func = func
func = lambda t, rdd: old_func(rdd)
jfunc = TransformFunction(self._sc, func, self._jrdd_deserializer)
api = self._ssc._jvm.PythonDStream
api.callForeachRDD(self._jdstream, jfunc)
def pprint(self, num=10):
"""
Print the first num elements of each RDD generated in this DStream.
@param num: the number of elements from the first will be printed.
"""
def takeAndPrint(time, rdd):
taken = rdd.take(num + 1)
print("-------------------------------------------")
print("Time: %s" % time)
print("-------------------------------------------")
for record in taken[:num]:
print(record)
if len(taken) > num:
print("...")
print("")
self.foreachRDD(takeAndPrint)
def mapValues(self, f):
"""
Return a new DStream by applying a map function to the value of
each key-value pairs in this DStream without changing the key.
"""
map_values_fn = lambda kv: (kv[0], f(kv[1]))
return self.map(map_values_fn, preservesPartitioning=True)
def flatMapValues(self, f):
"""
Return a new DStream by applying a flatmap function to the value
of each key-value pairs in this DStream without changing the key.
"""
flat_map_fn = lambda kv: ((kv[0], x) for x in f(kv[1]))
return self.flatMap(flat_map_fn, preservesPartitioning=True)
def glom(self):
"""
Return a new DStream in which RDD is generated by applying glom()
to RDD of this DStream.
"""
def func(iterator):
yield list(iterator)
return self.mapPartitions(func)
def cache(self):
"""
Persist the RDDs of this DStream with the default storage level
(C{MEMORY_ONLY}).
"""
self.is_cached = True
self.persist(StorageLevel.MEMORY_ONLY)
return self
def persist(self, storageLevel):
"""
Persist the RDDs of this DStream with the given storage level
"""
self.is_cached = True
javaStorageLevel = self._sc._getJavaStorageLevel(storageLevel)
self._jdstream.persist(javaStorageLevel)
return self
def checkpoint(self, interval):
"""
Enable periodic checkpointing of RDDs of this DStream
@param interval: time in seconds, after each period of that, generated
RDD will be checkpointed
"""
self.is_checkpointed = True
self._jdstream.checkpoint(self._ssc._jduration(interval))
return self
def groupByKey(self, numPartitions=None):
"""
Return a new DStream by applying groupByKey on each RDD.
"""
if numPartitions is None:
numPartitions = self._sc.defaultParallelism
return self.transform(lambda rdd: rdd.groupByKey(numPartitions))
def countByValue(self):
"""
Return a new DStream in which each RDD contains the counts of each
distinct value in each RDD of this DStream.
"""
return self.map(lambda x: (x, 1)).reduceByKey(lambda x, y: x+y)
def saveAsTextFiles(self, prefix, suffix=None):
"""
Save each RDD in this DStream as at text file, using string
representation of elements.
"""
def saveAsTextFile(t, rdd):
path = rddToFileName(prefix, suffix, t)
try:
rdd.saveAsTextFile(path)
except Py4JJavaError as e:
# after recovered from checkpointing, the foreachRDD may
# be called twice
if 'FileAlreadyExistsException' not in str(e):
raise
return self.foreachRDD(saveAsTextFile)
# TODO: uncomment this until we have ssc.pickleFileStream()
# def saveAsPickleFiles(self, prefix, suffix=None):
# """
# Save each RDD in this DStream as at binary file, the elements are
# serialized by pickle.
# """
# def saveAsPickleFile(t, rdd):
# path = rddToFileName(prefix, suffix, t)
# try:
# rdd.saveAsPickleFile(path)
# except Py4JJavaError as e:
# # after recovered from checkpointing, the foreachRDD may
# # be called twice
# if 'FileAlreadyExistsException' not in str(e):
# raise
# return self.foreachRDD(saveAsPickleFile)
def transform(self, func):
"""
Return a new DStream in which each RDD is generated by applying a function
on each RDD of this DStream.
`func` can have one argument of `rdd`, or have two arguments of
(`time`, `rdd`)
"""
if func.__code__.co_argcount == 1:
oldfunc = func
func = lambda t, rdd: oldfunc(rdd)
assert func.__code__.co_argcount == 2, "func should take one or two arguments"
return TransformedDStream(self, func)
def transformWith(self, func, other, keepSerializer=False):
"""
Return a new DStream in which each RDD is generated by applying a function
on each RDD of this DStream and 'other' DStream.
`func` can have two arguments of (`rdd_a`, `rdd_b`) or have three
arguments of (`time`, `rdd_a`, `rdd_b`)
"""
if func.__code__.co_argcount == 2:
oldfunc = func
func = lambda t, a, b: oldfunc(a, b)
assert func.__code__.co_argcount == 3, "func should take two or three arguments"
jfunc = TransformFunction(self._sc, func, self._jrdd_deserializer, other._jrdd_deserializer)
dstream = self._sc._jvm.PythonTransformed2DStream(self._jdstream.dstream(),
other._jdstream.dstream(), jfunc)
jrdd_serializer = self._jrdd_deserializer if keepSerializer else self._sc.serializer
return DStream(dstream.asJavaDStream(), self._ssc, jrdd_serializer)
def repartition(self, numPartitions):
"""
Return a new DStream with an increased or decreased level of parallelism.
"""
return self.transform(lambda rdd: rdd.repartition(numPartitions))
@property
def _slideDuration(self):
"""
Return the slideDuration in seconds of this DStream
"""
return self._jdstream.dstream().slideDuration().milliseconds() / 1000.0
def union(self, other):
"""
Return a new DStream by unifying data of another DStream with this DStream.
@param other: Another DStream having the same interval (i.e., slideDuration)
as this DStream.
"""
if self._slideDuration != other._slideDuration:
raise ValueError("the two DStream should have same slide duration")
return self.transformWith(lambda a, b: a.union(b), other, True)
def cogroup(self, other, numPartitions=None):
"""
Return a new DStream by applying 'cogroup' between RDDs of this
DStream and `other` DStream.
Hash partitioning is used to generate the RDDs with `numPartitions` partitions.
"""
if numPartitions is None:
numPartitions = self._sc.defaultParallelism
return self.transformWith(lambda a, b: a.cogroup(b, numPartitions), other)
def join(self, other, numPartitions=None):
"""
Return a new DStream by applying 'join' between RDDs of this DStream and
`other` DStream.
Hash partitioning is used to generate the RDDs with `numPartitions`
partitions.
"""
if numPartitions is None:
numPartitions = self._sc.defaultParallelism
return self.transformWith(lambda a, b: a.join(b, numPartitions), other)
def leftOuterJoin(self, other, numPartitions=None):
"""
Return a new DStream by applying 'left outer join' between RDDs of this DStream and
`other` DStream.
Hash partitioning is used to generate the RDDs with `numPartitions`
partitions.
"""
if numPartitions is None:
numPartitions = self._sc.defaultParallelism
return self.transformWith(lambda a, b: a.leftOuterJoin(b, numPartitions), other)
def rightOuterJoin(self, other, numPartitions=None):
"""
Return a new DStream by applying 'right outer join' between RDDs of this DStream and
`other` DStream.
Hash partitioning is used to generate the RDDs with `numPartitions`
partitions.
"""
if numPartitions is None:
numPartitions = self._sc.defaultParallelism
return self.transformWith(lambda a, b: a.rightOuterJoin(b, numPartitions), other)
def fullOuterJoin(self, other, numPartitions=None):
"""
Return a new DStream by applying 'full outer join' between RDDs of this DStream and
`other` DStream.
Hash partitioning is used to generate the RDDs with `numPartitions`
partitions.
"""
if numPartitions is None:
numPartitions = self._sc.defaultParallelism
return self.transformWith(lambda a, b: a.fullOuterJoin(b, numPartitions), other)
def _jtime(self, timestamp):
""" Convert datetime or unix_timestamp into Time
"""
if isinstance(timestamp, datetime):
timestamp = time.mktime(timestamp.timetuple())
return self._sc._jvm.Time(long(timestamp * 1000))
def slice(self, begin, end):
"""
Return all the RDDs between 'begin' to 'end' (both included)
`begin`, `end` could be datetime.datetime() or unix_timestamp
"""
jrdds = self._jdstream.slice(self._jtime(begin), self._jtime(end))
return [RDD(jrdd, self._sc, self._jrdd_deserializer) for jrdd in jrdds]
def _validate_window_param(self, window, slide):
duration = self._jdstream.dstream().slideDuration().milliseconds()
if int(window * 1000) % duration != 0:
raise ValueError("windowDuration must be multiple of the slide duration (%d ms)"
% duration)
if slide and int(slide * 1000) % duration != 0:
raise ValueError("slideDuration must be multiple of the slide duration (%d ms)"
% duration)
def window(self, windowDuration, slideDuration=None):
"""
Return a new DStream in which each RDD contains all the elements in seen in a
sliding window of time over this DStream.
@param windowDuration: width of the window; must be a multiple of this DStream's
batching interval
@param slideDuration: sliding interval of the window (i.e., the interval after which
the new DStream will generate RDDs); must be a multiple of this
DStream's batching interval
"""
self._validate_window_param(windowDuration, slideDuration)
d = self._ssc._jduration(windowDuration)
if slideDuration is None:
return DStream(self._jdstream.window(d), self._ssc, self._jrdd_deserializer)
s = self._ssc._jduration(slideDuration)
return DStream(self._jdstream.window(d, s), self._ssc, self._jrdd_deserializer)
def reduceByWindow(self, reduceFunc, invReduceFunc, windowDuration, slideDuration):
"""
Return a new DStream in which each RDD has a single element generated by reducing all
elements in a sliding window over this DStream.
if `invReduceFunc` is not None, the reduction is done incrementally
using the old window's reduced value :
1. reduce the new values that entered the window (e.g., adding new counts)
2. "inverse reduce" the old values that left the window (e.g., subtracting old counts)
This is more efficient than `invReduceFunc` is None.
@param reduceFunc: associative and commutative reduce function
@param invReduceFunc: inverse reduce function of `reduceFunc`; such that for all y,
and invertible x:
`invReduceFunc(reduceFunc(x, y), x) = y`
@param windowDuration: width of the window; must be a multiple of this DStream's
batching interval
@param slideDuration: sliding interval of the window (i.e., the interval after which
the new DStream will generate RDDs); must be a multiple of this
DStream's batching interval
"""
keyed = self.map(lambda x: (1, x))
reduced = keyed.reduceByKeyAndWindow(reduceFunc, invReduceFunc,
windowDuration, slideDuration, 1)
return reduced.map(lambda kv: kv[1])
def countByWindow(self, windowDuration, slideDuration):
"""
Return a new DStream in which each RDD has a single element generated
by counting the number of elements in a window over this DStream.
windowDuration and slideDuration are as defined in the window() operation.
This is equivalent to window(windowDuration, slideDuration).count(),
but will be more efficient if window is large.
"""
return self.map(lambda x: 1).reduceByWindow(operator.add, operator.sub,
windowDuration, slideDuration)
def countByValueAndWindow(self, windowDuration, slideDuration, numPartitions=None):
"""
Return a new DStream in which each RDD contains the count of distinct elements in
RDDs in a sliding window over this DStream.
@param windowDuration: width of the window; must be a multiple of this DStream's
batching interval
@param slideDuration: sliding interval of the window (i.e., the interval after which
the new DStream will generate RDDs); must be a multiple of this
DStream's batching interval
@param numPartitions: number of partitions of each RDD in the new DStream.
"""
keyed = self.map(lambda x: (x, 1))
counted = keyed.reduceByKeyAndWindow(operator.add, operator.sub,
windowDuration, slideDuration, numPartitions)
return counted.filter(lambda kv: kv[1] > 0)
def groupByKeyAndWindow(self, windowDuration, slideDuration, numPartitions=None):
"""
Return a new DStream by applying `groupByKey` over a sliding window.
Similar to `DStream.groupByKey()`, but applies it over a sliding window.
@param windowDuration: width of the window; must be a multiple of this DStream's
batching interval
@param slideDuration: sliding interval of the window (i.e., the interval after which
the new DStream will generate RDDs); must be a multiple of this
DStream's batching interval
@param numPartitions: Number of partitions of each RDD in the new DStream.
"""
ls = self.mapValues(lambda x: [x])
grouped = ls.reduceByKeyAndWindow(lambda a, b: a.extend(b) or a, lambda a, b: a[len(b):],
windowDuration, slideDuration, numPartitions)
return grouped.mapValues(ResultIterable)
def reduceByKeyAndWindow(self, func, invFunc, windowDuration, slideDuration=None,
numPartitions=None, filterFunc=None):
"""
Return a new DStream by applying incremental `reduceByKey` over a sliding window.
The reduced value of over a new window is calculated using the old window's reduce value :
1. reduce the new values that entered the window (e.g., adding new counts)
2. "inverse reduce" the old values that left the window (e.g., subtracting old counts)
`invFunc` can be None, then it will reduce all the RDDs in window, could be slower
than having `invFunc`.
@param func: associative and commutative reduce function
@param invFunc: inverse function of `reduceFunc`
@param windowDuration: width of the window; must be a multiple of this DStream's
batching interval
@param slideDuration: sliding interval of the window (i.e., the interval after which
the new DStream will generate RDDs); must be a multiple of this
DStream's batching interval
@param numPartitions: number of partitions of each RDD in the new DStream.
@param filterFunc: function to filter expired key-value pairs;
only pairs that satisfy the function are retained
set this to null if you do not want to filter
"""
self._validate_window_param(windowDuration, slideDuration)
if numPartitions is None:
numPartitions = self._sc.defaultParallelism
reduced = self.reduceByKey(func, numPartitions)
if invFunc:
def reduceFunc(t, a, b):
b = b.reduceByKey(func, numPartitions)
r = a.union(b).reduceByKey(func, numPartitions) if a else b
if filterFunc:
r = r.filter(filterFunc)
return r
def invReduceFunc(t, a, b):
b = b.reduceByKey(func, numPartitions)
joined = a.leftOuterJoin(b, numPartitions)
return joined.mapValues(lambda kv: invFunc(kv[0], kv[1])
if kv[1] is not None else kv[0])
jreduceFunc = TransformFunction(self._sc, reduceFunc, reduced._jrdd_deserializer)
jinvReduceFunc = TransformFunction(self._sc, invReduceFunc, reduced._jrdd_deserializer)
if slideDuration is None:
slideDuration = self._slideDuration
dstream = self._sc._jvm.PythonReducedWindowedDStream(
reduced._jdstream.dstream(),
jreduceFunc, jinvReduceFunc,
self._ssc._jduration(windowDuration),
self._ssc._jduration(slideDuration))
return DStream(dstream.asJavaDStream(), self._ssc, self._sc.serializer)
else:
return reduced.window(windowDuration, slideDuration).reduceByKey(func, numPartitions)
def updateStateByKey(self, updateFunc, numPartitions=None, initialRDD=None):
"""
Return a new "state" DStream where the state for each key is updated by applying
the given function on the previous state of the key and the new values of the key.
@param updateFunc: State update function. If this function returns None, then
corresponding state key-value pair will be eliminated.
"""
if numPartitions is None:
numPartitions = self._sc.defaultParallelism
if initialRDD and not isinstance(initialRDD, RDD):
initialRDD = self._sc.parallelize(initialRDD)
def reduceFunc(t, a, b):
if a is None:
g = b.groupByKey(numPartitions).mapValues(lambda vs: (list(vs), None))
else:
g = a.cogroup(b.partitionBy(numPartitions), numPartitions)
g = g.mapValues(lambda ab: (list(ab[1]), list(ab[0])[0] if len(ab[0]) else None))
state = g.mapValues(lambda vs_s: updateFunc(vs_s[0], vs_s[1]))
return state.filter(lambda k_v: k_v[1] is not None)
jreduceFunc = TransformFunction(self._sc, reduceFunc,
self._sc.serializer, self._jrdd_deserializer)
if initialRDD:
initialRDD = initialRDD._reserialize(self._jrdd_deserializer)
dstream = self._sc._jvm.PythonStateDStream(self._jdstream.dstream(), jreduceFunc,
initialRDD._jrdd)
else:
dstream = self._sc._jvm.PythonStateDStream(self._jdstream.dstream(), jreduceFunc)
return DStream(dstream.asJavaDStream(), self._ssc, self._sc.serializer)
class TransformedDStream(DStream):
"""
TransformedDStream is a DStream generated by an Python function
transforming each RDD of a DStream to another RDDs.
Multiple continuous transformations of DStream can be combined into
one transformation.
"""
def __init__(self, prev, func):
self._ssc = prev._ssc
self._sc = self._ssc._sc
self._jrdd_deserializer = self._sc.serializer
self.is_cached = False
self.is_checkpointed = False
self._jdstream_val = None
# Using type() to avoid folding the functions and compacting the DStreams which is not
# not strictly an object of TransformedDStream.
# Changed here is to avoid bug in KafkaTransformedDStream when calling offsetRanges().
if (type(prev) is TransformedDStream and
not prev.is_cached and not prev.is_checkpointed):
prev_func = prev.func
self.func = lambda t, rdd: func(t, prev_func(t, rdd))
self.prev = prev.prev
else:
self.prev = prev
self.func = func
@property
def _jdstream(self):
if self._jdstream_val is not None:
return self._jdstream_val
jfunc = TransformFunction(self._sc, self.func, self.prev._jrdd_deserializer)
dstream = self._sc._jvm.PythonTransformedDStream(self.prev._jdstream.dstream(), jfunc)
self._jdstream_val = dstream.asJavaDStream()
return self._jdstream_val
| apache-2.0 |
8l/beri | cheritest/trunk/tests/fpu/test_raw_fpu_mov_gpr.py | 1 | 2722 | #-
# Copyright (c) 2012 Ben Thorner
# Copyright (c) 2013 Colin Rothwell
# All rights reserved.
#
# This software was developed by Ben Thorner as part of his summer internship
# and Colin Rothwell as part of his final year undergraduate project.
#
# @BERI_LICENSE_HEADER_START@
#
# Licensed to BERI Open Systems C.I.C. (BERI) under one or more contributor
# license agreements. See the NOTICE file distributed with this work for
# additional information regarding copyright ownership. BERI licenses this
# file to you under the BERI Hardware-Software License, Version 1.0 (the
# "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at:
#
# http://www.beri-open-systems.org/legal/license-1-0.txt
#
# Unless required by applicable law or agreed to in writing, Work distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
# @BERI_LICENSE_HEADER_END@
#
from beritest_tools import BaseBERITestCase
from nose.plugins.attrib import attr
class test_raw_fpu_mov_gpr(BaseBERITestCase):
@attr('floatcmove')
def test_mov_gpr(self):
'''Test we can move conditional on a GPR'''
self.assertRegisterEqual(self.MIPS.s0, 0x41000000, "Failed MOVN on condition true in single precision");
self.assertRegisterEqual(self.MIPS.s1, 0x4000000000000000, "Failed MOVN on condition true in double precision");
self.assertRegisterEqual(self.MIPS.s2, 0x4000000041000000, "Failed MOVN on condition true in paired single precision");
self.assertRegisterEqual(self.MIPS.s3, 0x0, "Failed MOVN on condition false in single precision");
self.assertRegisterEqual(self.MIPS.s4, 0x0, "Failed MOVN on condition false in double precision");
self.assertRegisterEqual(self.MIPS.s5, 0x0, "Failed MOVN on condition false in paired single precision");
self.assertRegisterEqual(self.MIPS.s6, 0x41000000, "Failed MOVZ on condition true in single precision");
self.assertRegisterEqual(self.MIPS.s7, 0x4000000000000000, "Failed MOVZ on condition true in double precision");
self.assertRegisterEqual(self.MIPS.a0, 0x4000000041000000, "Failed MOVZ on condition true in paired single precision");
self.assertRegisterEqual(self.MIPS.a1, 0x0, "Failed MOVZ on condition false in single precision");
self.assertRegisterEqual(self.MIPS.a2, 0x0, "Failed MOVZ on condition false in double precision");
self.assertRegisterEqual(self.MIPS.a3, 0x0, "Failed MOVZ on condition false in paired single precision");
| apache-2.0 |
chaen/DIRAC | ProductionSystem/Service/ProductionManagerHandler.py | 2 | 5720 | """ DISET request handler base class for the ProductionDB.
"""
__RCSID__ = "$Id$"
from DIRAC import gLogger, S_OK, S_ERROR
from DIRAC.Core.DISET.RequestHandler import RequestHandler
from DIRAC.ProductionSystem.DB.ProductionDB import ProductionDB
prodTypes = [basestring, int]
transTypes = [basestring, int, list]
class ProductionManagerHandlerBase(RequestHandler):
def _parseRes(self, res):
if not res['OK']:
gLogger.error('ProductionManager failure', res['Message'])
return res
def setDatabase(self, oDatabase):
global database
database = oDatabase
####################################################################
#
# These are the methods to manipulate the Productions table
#
types_addProduction = [basestring, basestring]
def export_addProduction(self, prodName, prodDescription):
credDict = self.getRemoteCredentials()
authorDN = credDict['DN']
authorGroup = credDict['group']
res = database.addProduction(prodName, prodDescription, authorDN, authorGroup)
if res['OK']:
gLogger.info("Added production %d" % res['Value'])
return self._parseRes(res)
types_deleteProduction = [prodTypes]
def export_deleteProduction(self, prodName):
credDict = self.getRemoteCredentials()
authorDN = credDict['DN']
res = database.deleteProduction(prodName, author=authorDN)
return self._parseRes(res)
types_getProductions = []
def export_getProductions(self, condDict=None, older=None, newer=None, timeStamp='CreationDate',
orderAttribute=None, limit=None, offset=None):
if not condDict:
condDict = {}
res = database.getProductions(condDict=condDict,
older=older,
newer=newer,
timeStamp=timeStamp,
orderAttribute=orderAttribute,
limit=limit,
offset=offset)
return self._parseRes(res)
types_getProduction = [prodTypes]
def export_getProduction(self, prodName):
res = database.getProduction(prodName)
return self._parseRes(res)
types_getProductionParameters = [prodTypes, [basestring, list, tuple]]
def export_getProductionParameters(self, prodName, parameters):
res = database.getProductionParameters(prodName, parameters)
return self._parseRes(res)
types_setProductionStatus = [prodTypes, basestring]
def export_setProductionStatus(self, prodName, status):
res = database.setProductionStatus(prodName, status)
return self._parseRes(res)
types_startProduction = [prodTypes]
def export_startProduction(self, prodName):
res = database.startProduction(prodName)
return self._parseRes(res)
####################################################################
#
# These are the methods to manipulate the ProductionTransformations table
#
types_addTransformationsToProduction = [prodTypes, transTypes, transTypes]
def export_addTransformationsToProduction(self, prodName, transIDs, parentTransIDs):
res = database.addTransformationsToProduction(prodName, transIDs, parentTransIDs=parentTransIDs)
return self._parseRes(res)
types_getProductionTransformations = []
def export_getProductionTransformations(
self,
prodName,
condDict=None,
older=None,
newer=None,
timeStamp='CreationTime',
orderAttribute=None,
limit=None,
offset=None):
if not condDict:
condDict = {}
res = database.getProductionTransformations(
prodName,
condDict=condDict,
older=older,
newer=newer,
timeStamp=timeStamp,
orderAttribute=orderAttribute,
limit=limit,
offset=offset)
return self._parseRes(res)
####################################################################
#
# These are the methods to manipulate the ProductionSteps table
#
types_addProductionStep = [dict]
def export_addProductionStep(self, prodStep):
stepName = prodStep['name']
stepDescription = prodStep['description']
stepLongDescription = prodStep['longDescription']
stepBody = prodStep['body']
stepType = prodStep['stepType']
stepPlugin = prodStep['plugin']
stepAgentType = prodStep['agentType']
stepGroupSize = prodStep['groupsize']
stepInputQuery = prodStep['inputquery']
stepOutputQuery = prodStep['outputquery']
res = database.addProductionStep(stepName, stepDescription, stepLongDescription, stepBody, stepType, stepPlugin,
stepAgentType, stepGroupSize, stepInputQuery, stepOutputQuery)
if res['OK']:
gLogger.info("Added production step %d" % res['Value'])
return self._parseRes(res)
types_getProductionStep = [int]
def export_getProductionStep(self, stepID):
res = database.getProductionStep(stepID)
return self._parseRes(res)
####################################################################
#
# These are the methods for production logging manipulation
#
####################################################################
#
# These are the methods used for web monitoring
#
###########################################################################
database = False
def initializeProductionManagerHandler(serviceInfo):
global database
database = ProductionDB('ProductionDB', 'Production/ProductionDB')
return S_OK()
class ProductionManagerHandler(ProductionManagerHandlerBase):
def __init__(self, *args, **kargs):
self.setDatabase(database)
ProductionManagerHandlerBase.__init__(self, *args, **kargs)
| gpl-3.0 |
avatarnewyork/daily_harvest | packages/requests/packages/chardet/big5prober.py | 2931 | 1684 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .mbcharsetprober import MultiByteCharSetProber
from .codingstatemachine import CodingStateMachine
from .chardistribution import Big5DistributionAnalysis
from .mbcssm import Big5SMModel
class Big5Prober(MultiByteCharSetProber):
def __init__(self):
MultiByteCharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(Big5SMModel)
self._mDistributionAnalyzer = Big5DistributionAnalysis()
self.reset()
def get_charset_name(self):
return "Big5"
| mit |
google-code-export/los-cocos | cocos/collision_model.py | 3 | 27719 | # ----------------------------------------------------------------------------
# cocos2d
# Copyright (c) 2008-2012 Daniel Moisset, Ricardo Quesada, Rayentray Tappa,
# Lucio Torre
# Copyright (c) 2009-2014 Richard Jones, Claudio Canepa
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of cocos2d nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
from __future__ import division, print_function, unicode_literals
__docformat__ = 'restructuredtext'
import operator as op
import math
import cocos.euclid as eu
###### interfaces, abstract base clases ######################################
# cshape reference interfase
class Cshape(object):
"""
Represents an abstract geometric shape in the 2D space, and can
answer questions about proximity or intersection with other shapes.
Implementations are free to restrict the type of geometrical shapes
that will accept, by example circles or axis aligned rectangles.
"""
def overlaps(self, other):
"""
Returns True if overlapping other, False otherwise
:rtype: bool
"""
pass
def distance(self, other):
"""
Returns a float, distance from itself to other;
Not necessarily euclidean distance.
It is distances between boundaries.
:rtype: float
"""
pass
def near_than(self, other, near_distance):
"""
Returns a boolean, True if distance(self, other)<=near_distance
:rtype: bool
"""
pass
def touches_point(self, x, y):
"""
Returns True if the point (x,y) overlaps the shape, False otherwise
:rtype: bool
"""
pass
def fits_in_box(self, packed_box):
"""
Returns a boolean, True if the shape fully fits into the axis aligned
rectangle defined by packed_box, False otherwise.
:Parameters:
`packed_box` : 4-tuple floats
An axis aligned rectangle expressed as (minx, maxx, miny, maxy)
:rtype: bool
"""
pass
def minmax(self):
"""
Returns the smallest axis aligned rectangle that contains all shape points.
The rectangle is expressed as a 4-tuple of floats (minx, maxx, miny, maxy)
Such a rectangle is also know as the Axis Aligned Bounding Box for shape;
AABB for short.
:rtype: 4-tuple of floats
"""
pass
def copy(self):
"""
Returns a copy of itself
:rtype: Cshape
"""
pass
# collision manager interfase
class CollisionManager(object):
"""
Answers questions about proximity or collision with known objects.
After instantiation or after calling its 'clear' method the instance
don't knows any object.
An object is made known to the CollisionManager instance by calling its
'add' method with the object instance.
Example questions are:
- which known objects collides with <this object> ?
- which known objects are near than 6.0 from <this object> ?
Note that explicit objects in the question (call) don't need to be known to
the collision manager answering the question.
If the explicit object indeed is known, then it is omitted in the answer as a
trivial case.
There can be multiple CollisionManager instances in the same scope, and
an object can be known to many collision managers at the same time.
Objects that can be known or can be presented to a Collision Manager in
a question must comply with:
- obj has a member called cshape
- obj.cshape supports the interface Cshape
Such an object can be called 'a collidable' in the documentation, and when
'obj' or 'other' is seen in the code you can assume it means collidable.
As a limitation imposed by the current Cshapes implementations, all the
collidables that interacts with a particular instance of CollisionManager
must share the same concrete Cshape subclass: by example, all
objects should have a CircleShape cshape, or all objects should have a
AARectShape cshape.
The known objects collective for each CollisionManager instance is
manipulated by calling the methods
- clean() \: forgets all objects and empties internal data structures
- add(obj) \: remember obj as a known object
- remove_tricky(obj) \: forgets obj
When objects are made known to a collision manager, internal data structures
are updated based on the obj.cshape value at the 'add' moment.
In particular, the obj.cshape indirectly tells where in the internal
structures certain info will be stored.
Later, the internal data structures are used to accelerate answers.
This means that modifying obj.cshape after an 'add' can produce a memory
leak in the next 'remove_tricky', and that in the same situation some
answers can be partially wrong.
What type of wrong ? It can sometimes miss a collision with a know
object that changed it cshape.
It is user code responsibility to drive the know objects update when
obj.cshape values changes.
Common use patterns that are safe and efficient:
When most of the known objects update cshape each frame
You do::
# updating collision info
collision_manager.clear() # fast, no leaks even if changed cshapes
for actor in moving_actors:
collision_manager.add(actor)
# game logic
# do what you need, but defer changes in cshape to next block
# by example
for actor in moving_actors:
actor.new_pos = actor.cshape.center + dt * vel
#other logic that potentially needs collision info;
#it will be accurate because you have not changed cshapes
...
# update cshapes for next frame
for actor in moving actors:
actor.cshape.center = actor.new_pos
Example actors for this case are player, enemies, soldiers.
All of the known objects don't change cshapes
- At level start you add all objects
- When an actor reaches end of life use 'remove_tricky' to make it not known, no problem because his cshape has not changed
Examples actors for this case are food, coins, trees, rocks.
"""
def add(self, obj):
"""
Makes obj a know entity
"""
pass
def remove_tricky(self, obj):
"""
*(obj should have the same .cshape value that when added)*
Makes collision manager forget about obj, thus no further query will
return obj.
obj is required to be a known object.
"""
def clear(self):
"""
Empties the known set
"""
pass
def they_collide(self, obj1, obj2):
"""
Returns a boolean, True if obj1 overlaps objs2
obj1, obj2 are not required to be known objects
"""
pass
def objs_colliding(self, obj):
"""
Returns a container with known objects that overlaps obj,
excluding obj itself
obj is not required to be a known object
"""
pass
def iter_colliding(self, obj):
"""
A lazy iterator over objects colliding with obj, allows to spare some
CPU when the loop procesing the colissions breaks before exausting
the collisions.
obj is not required to be a known object
Usage::
for other in collision_manager.iter_colliding(obj):
# process event 'obj touches other'
"""
pass
def any_near(self, obj, near_distance):
"""
Returns None if no know object (except itself) is near than
near_distance, else an arbitrary known object with distance
less than near_distance
obj is not required to be a known object
"""
pass
def objs_near(self, obj, near_distance):
"""
Returns a container with the objects known by collision manager that
are at distance to obj less or equal than near_distance, excluding
itself.
Notice that it includes the ones colliding with obj.
obj is not required to be a known object
"""
pass
def objs_near_wdistance(self, obj, near_distance):
"""
Returns a list with the (other, distance) pairs that with all the
known objects at distance less or equal than near_distance to obj,
except obj itself.
Notice that it includes the ones colliding with obj.
obj is not required to be a known object
If the game logic wants the list ordered by ascending distances, use
ranked_objs_near instead.
"""
pass
def ranked_objs_near(self, obj, near_distance):
"""
Same as objs_near_wdistance but the list is ordered in increasing distance
obj is not required to be a known object
"""
pass
def iter_all_collisions(self):
"""
Iterator that exposes all collisions between known objects.
At each step it will yield a pair (obj, other).
If (obj1, obj2) is seen when consuming the iterator, then (obj2, obj1)
will not be seen.
In other worlds, 'obj1 collides with obj2' means (obj1, obj2) or
(obj2, obj1) will appear in the iterator output but not both.
"""
def knows(self, obj):
"""Returns True if obj was added to the collision manager, false otherwise
Used for debug and testing.
"""
pass
def known_objs(self):
"""Reurns a set with all the objects known by the CollisionManager
Used for debug and testing.
"""
pass
def objs_touching_point(self, x, y):
"""Returns a container with known objects touching point (x, y)
Useful for mouse pick
"""
pass
def objs_into_box(self, minx, maxx, miny, maxy):
"""Returns a container with know objects that fully fits into the axis
aligned rectangle defined by params
Useful for elastic box selection
"""
pass
###### Cshape implementations #################################################
class CircleShape(object):
"""
Implements the Cshape interface that uses discs as geometric shape.
Distance is the euclidean distance.
Look at Cshape for other class and methods documentation.
"""
def __init__(self, center, r):
"""
:Parameters:
`center` : euclid.Vector2
rectangle center
`r` : float
disc radius
"""
self.center = center
self.r = r
def overlaps(self, other):
return abs(self.center - other.center) < self.r + other.r
def distance(self, other):
d = abs(self.center - other.center) - self.r - other.r
if d<0.0:
d = 0.0
return d
def near_than(self, other, near_distance):
return abs(self.center - other.center) <= self.r + other.r + near_distance
def touches_point(self, x, y):
return abs(self.center - (x,y)) <= self.r
def fits_in_box(self, packed_box):
r = self.r
return ( ((packed_box[0] + r) <= self.center[0] <= (packed_box[1] - r)) and
((packed_box[2] + r) <= self.center[1] <= (packed_box[3] - r)) )
def minmax(self):
r = self.r
return (self.center[0]-r, self.center[0]+r,
self.center[1]-r, self.center[1]+r)
def copy(self):
return CircleShape(eu.Vector2(*self.center), self.r)
class AARectShape(object):
"""
Implements the Cshape interface that uses rectangles with sides
paralell to the coordinate axis as geometric shape.
Distance is not the euclidean distance but the rectangular or max-min
distance, max( min(x0 - x1), min(y0 - y1) : (xi, yi) in recti )
Good if actors don't rotate.
Look at Cshape for other class and methods documentation.
"""
def __init__(self, center, half_width, half_height):
"""
:Parameters:
`center` : euclid.Vector2
rectangle center
`half_width` : float
half width of rectangle
`half_height` : float
half height of rectangle
"""
self.center = center
self.rx = half_width
self.ry = half_height
def overlaps(self, other):
return ( abs(self.center[0] - other.center[0]) < self.rx + other.rx and
abs(self.center[1] - other.center[1]) < self.ry + other.ry )
def distance(self, other):
d = max((abs(self.center[0] - other.center[0])-self.rx - other.rx,
abs(self.center[1] - other.center[1])-self.ry - other.ry ))
if d<0.0:
d = 0.0
return d
def near_than(self, other, near_distance):
return ( abs(self.center[0] - other.center[0]) - self.rx - other.rx < near_distance and
abs(self.center[1] - other.center[1]) - self.ry - other.ry < near_distance)
def touches_point(self, x, y):
return ( abs(self.center[0] - x) < self.rx and
abs(self.center[1] - y) < self.ry )
def fits_in_box(self, packed_box):
return ( (packed_box[0] + self.rx <= self.center[0] <= packed_box[1] - self.rx) and
(packed_box[2] + self.ry <= self.center[1] <= packed_box[3] - self.ry) )
def minmax(self):
return (self.center[0] - self.rx, self.center[0] + self.rx,
self.center[1] - self.ry, self.center[1] + self.ry)
def copy(self):
return AARectShape(eu.Vector2(*self.center), self.rx, self.ry)
###### CollisionManager implementations #######################################
class CollisionManagerBruteForce(object):
"""
Implements the CollisionManager interface with with the simpler code possible.
Intended for reference and debuging, it has very bad performance.
Look at CollisionManager for other class and methods documentation.
"""
def __init__(self):
self.objs = set()
def add(self, obj):
#? use weakref ? python 2.7 has weakset
self.objs.add(obj)
def remove_tricky(self, obj):
self.objs.remove(obj)
def clear(self):
self.objs.clear()
def they_collide(self, obj1, obj2):
return obj1.cshape.overlaps(obj2.cshape)
def objs_colliding(self, obj):
f_overlaps = obj.cshape.overlaps
return [other for other in self.objs if
(other is not obj) and f_overlaps(other.cshape)]
def iter_colliding(self, obj):
f_overlaps = obj.cshape.overlaps
for other in self.objs:
if other is not obj and f_overlaps(other.cshape):
yield other
def any_near(self, obj, near_distance):
f_near_than = obj.cshape.near_than
for other in self.objs:
if other is not obj and f_near_than(other.cshape,near_distance):
return other
return None
def objs_near(self, obj, near_distance):
f_near_than = obj.cshape.near_than
return [other for other in self.objs if
(other is not obj) and f_near_than(other.cshape,near_distance)]
def objs_near_wdistance(self, obj, near_distance):
f_distance = obj.cshape.distance
res = []
for other in self.objs:
if other is obj:
continue
d = f_distance(other.cshape)
if d<= near_distance:
res.append((other, d))
return res
## def objs_near_wdistance(self, obj, near_distance):
## # alternative version, needs python 2.5+
## f_distance = obj.cshape.distance
## def f(other):
## return other, f_distance(other.cshape)
## import itertools as it
## return [(other, d) for other,d in it.imap(f, self.objs) if
## (other is not obj) and
## (d <= near_distance)]
def ranked_objs_near(self, obj, near_distance):
tmp = objs_near_wdistance(obj, near_distance)
tmp.sort(key=op.itemgetter(1))
return tmp
def iter_all_collisions(self):
# O(n**2)
for i, obj in enumerate(self.objs):
f_overlaps = obj.cshape.overlaps
for j, other in enumerate(self.objs):
if j>=i:
break
if f_overlaps(other.cshape):
yield (obj, other)
def knows(self, obj):
return obj in self.objs
def known_objs(self):
return self.objs
def objs_touching_point(self, x, y):
touching = set()
for obj in self.objs:
if obj.cshape.touches_point(x, y):
touching.add(obj)
return touching
def objs_into_box(self, minx, maxx, miny, maxy):
into = set()
packed_box = minx, maxx, miny, maxy
for obj in self.objs:
if obj.cshape.fits_in_box(packed_box):
into.add(obj)
return into
class CollisionManagerGrid(object):
"""
Implements the CollisionManager interface based on the scheme
known as spatial hashing.
The idea behind is to divide the space in rectangles with a given width and
height, and have a table telling which objects overlaps each rectangle.
Later, when the question 'which know objects has such and such spatial
relation with <some object>' arrives, only the objects in rectangles
overlaping <some object> (or nearby ones) needs to be examined for the
condition.
Look at CollisionManager for other class and methods documentation.
"""
def __init__(self, xmin, xmax, ymin, ymax, cell_width, cell_height):
"""
Cell width and height have impact on performance.
For objects with same with, and with width==height, a good value
is 1.25 * (object width).
For mixed widths, a good guess can be
~ 1.25 * { width(object): all objects not exceptionlly big}
:Parameters:
`xmin` : float
minimun x coordinate for a point in world
`xmax` : float
maximun x coordinate for a point in world
`ymin` : float
minimun y coordinate for a point in world
`ymax` : float
maximun y coordinate for a point in world
`cell_width` : float
width for the rectangles the space will be broken
`cell_height` : float
heigh for the rectangles the space will be broken
"""
self.xmin = xmin
self.xmax = xmax
self.ymin = ymin
self.ymax = ymax
self.cell_width = cell_width
self.cell_height = cell_height
cols = int(math.ceil((xmax - xmin) / cell_width))
rows = int(math.ceil((ymax - ymin) / cell_height))
self.cols = cols
self.rows = rows
numbuckets = cols*rows
# buckets maps cell identifier -> objs that potentially overlaps the cell
self.buckets = [set() for k in range(numbuckets)]
def add(self, obj):
# add to any bucket it overlaps
# for the collision logic algorithm is fine if a number of buckets
# that don't overlap are included; this allows to use a faster
# 'buckets_for_objects' at the cost of potentially some extra buckets
for cell_idx in self._iter_cells_for_aabb(obj.cshape.minmax()):
self.buckets[cell_idx].add(obj)
def remove_tricky(self, obj):
for cell_idx in self._iter_cells_for_aabb(obj.cshape.minmax()):
self.buckets[cell_idx].remove(obj)
def clear(self):
for bucket in self.buckets:
bucket.clear()
def they_collide(self, obj1, obj2):
return obj1.cshape.overlaps(obj2.cshape)
def objs_colliding(self, obj):
aabb = obj.cshape.minmax()
f_overlaps = obj.cshape.overlaps
collides = set()
collides.add(obj)
# do brute force with others in all the buckets obj overlaps
for cell_id in self._iter_cells_for_aabb(aabb):
for other in self.buckets[cell_id]:
if other not in collides and f_overlaps(other.cshape):
collides.add(other)
collides.remove(obj)
return collides
def iter_colliding(self, obj):
aabb = obj.cshape.minmax()
f_overlaps = obj.cshape.overlaps
collides = set()
collides.add(obj)
# do brute force with others in all the buckets obj overlaps
for cell_id in self._iter_cells_for_aabb(aabb):
for other in self.buckets[cell_id]:
if (other not in collides) and f_overlaps(other.cshape):
collides.add(other)
yield other
def any_near(self, obj, near_distance):
minx, maxx, miny, maxy = obj.cshape.minmax()
minx -= near_distance
maxx += near_distance
miny -= near_distance
maxy += near_distance
f_distance = obj.cshape.distance
# do brute force with others in all the buckets inflated shape overlaps
for cell_id in self._iter_cells_for_aabb((minx, maxx, miny, maxy)):
for other in self.buckets[cell_id]:
if other is not obj and f_distance(other.cshape) < near_distance:
return other
return None
def objs_near(self, obj, near_distance):
minx, maxx, miny, maxy = obj.cshape.minmax()
minx -= near_distance
maxx += near_distance
miny -= near_distance
maxy += near_distance
f_distance = obj.cshape.distance
collides = set()
# do brute force with others in all the buckets inflated shape overlaps
for cell_id in self._iter_cells_for_aabb((minx, maxx, miny, maxy)):
for other in self.buckets[cell_id]:
if (other not in collides and
(f_distance(other.cshape) < near_distance)):
collides.add(other)
collides.remove(obj)
return collides
def objs_near_wdistance(self, obj, near_distance):
minx, maxx, miny, maxy = obj.cshape.minmax()
minx -= near_distance
maxx += near_distance
miny -= near_distance
maxy += near_distance
f_distance = obj.cshape.distance
collides = {}
collides[obj] = 0.0
# do brute force with others in all the buckets inflated shape overlaps
for cell_id in self._iter_cells_for_aabb((minx, maxx, miny, maxy)):
for other in self.buckets[cell_id]:
if other not in collides:
d = f_distance(other.cshape)
if d <= near_distance:
collides[other] = d
#yield (other, d)
del collides[obj]
return [ (other, collides[other]) for other in collides ]
def ranked_objs_near(self, obj, near_distance):
tmp = self.objs_near_wdistance(obj, near_distance)
tmp.sort(key=op.itemgetter(1))
return tmp
def iter_all_collisions(self):
# implemented using the fact: 'a collides b' iff (there is a bucket B
# with a in B, b in B and 'a collides b')
known_collisions = set()
for bucket in self.buckets:
for i, obj in enumerate(bucket):
f_overlaps = obj.cshape.overlaps
for j, other in enumerate(bucket):
if j>=i:
break
if f_overlaps(other.cshape):
if id(obj)<id(other):
coll_id = (id(obj), id(other))
else:
coll_id = (id(other), id(obj))
if not coll_id in known_collisions:
known_collisions.add(coll_id)
yield (obj, other)
def knows(self, obj):
for bucket in self.buckets:
if obj in bucket:
return True
return False
def known_objs(self):
objs = set()
for bucket in self.buckets:
objs |= bucket
return objs
def objs_touching_point(self, x, y):
touching = set()
for cell_id in self._iter_cells_for_aabb((x, x, y, y)):
for obj in self.buckets[cell_id]:
if obj.cshape.touches_point(x, y):
touching.add(obj)
return touching
def objs_into_box(self, minx, maxx, miny, maxy):
into = set()
buckets = self.buckets
packed_box = (minx, maxx, miny, maxy)
for cell_idx in self._iter_cells_for_aabb(packed_box):
for obj in buckets[cell_idx]:
if (obj not in into) and (obj.cshape.fits_in_box(packed_box)):
into.add(obj)
return into
def _iter_cells_for_aabb(self, aabb):
# iterate all buckets overlapping the rectangle minmax
minx, maxx, miny, maxy = aabb
ix_lo = int(math.floor((minx - self.xmin) / self.cell_width))
ix_sup = int(math.ceil((maxx - self.xmin) / self.cell_width))
iy_lo = int(math.floor((miny - self.ymin) / self.cell_height))
iy_sup = int(math.ceil((maxy - self.ymin) / self.cell_height))
# but disregard cells ouside world, can come from near questions
if ix_lo < 0:
ix_lo = 0
if ix_sup > self.cols:
ix_sup = self.cols
if iy_lo < 0:
iy_lo = 0
if iy_sup > self.rows:
iy_sup = self.rows
for iy in range(iy_lo, iy_sup):
contrib_y = iy * self.cols
for ix in range(ix_lo, ix_sup):
cell_id = ix + contrib_y
yield cell_id
| bsd-3-clause |
ai-se/george | Models/usp05.py | 1 | 7552 | """
# The USP05 Data Set
Standard header:
"""
from __future__ import division,print_function
import sys
sys.dont_write_bytecode = True
from lib import *
"""
@attribute ObjType {FT,PJ,RQ}
@attribute IntComplx {5.0,2.0,1.0,4.0,3.0,3.5,2.5,4.5,NULL}
@attribute DataFile {18.0,9.0,7.0,12.0,2.0,5.0,4.0,3.0,1.0,11.0,0.0,75.0,13.0,6.0,8.0,NULL,32.0}
@attribute DataEn {94.0,240.0,15.0,90.0,314.0,1.0,4.0,3.0,2.0,6.0,0.0,20.0,60.0,30.0,5.0,17.0,10.0,7.0,45.0,48.0,12.0,83.0,150.0,36.0,186.0,9.0,11.0,52.0,25.0,14.0,8.0,NULL,50.0,13.0}
@attribute DataOut {NULL,0.0,1.0,2.0,4.0,20.0,5.0,50.0,12.0,76.0,6.0,69.0,200.0,34.0,108.0,9.0,3.0,8.0,7.0,10.0,18.0,16.0,17.0,13.0,14.0,11.0}
@attribute UFP {NULL,0.0,2.0,3.0,4.0,50.0,46.0,66.0,48.0,36.0,44.0,14.0,8.0,10.0,20.0,25.0,35.0,1.0,6.0,49.0,19.0,64.0,55.0,30.0,180.0,190.0,250.0,1085.0,510.0,210.0,1714.0,11.0,5.0,7.0,17.0,27.0,34.0,154.0,18.0,321.0,90.0,75.0,60.0,40.0,95.0,29.0,23.0,15.0,32.0,31.0,26.0,37.0,12.0,16.0,224.0,22.0,235.0,59.0,147.0,153.0,166.0,137.0,33.0,56.0,57.0,76.0,104.0,105.0}
@attribute AppExpr numeric
@attribute Effort numeric
Data:
"""
def usp05(weighFeature = False,
split="median"):
vl=1;l=2;n=3;h=4;vh=5;xh=6;_=0;
FT=0;PJ=1;RQ=2;NULL=0;
return data(indep= [
# 0..6
'ObjType','IntComplx','DataFile','DataEn','DataOut','UFP','AppExpr'],
less = ['effort'],
_rows=[
[FT,5,18,94,NULL,NULL,4,2.5],
[FT,5,9,240,NULL,NULL,4,2.5],
[FT,2,9,15,0,0,4,2],
[FT,2,9,15,0,0,4,2],
[FT,2,9,15,0,0,5,3.5],
[FT,1,7,90,0,0,4,2],
[FT,2,9,90,0,0,5,2],
[FT,2,9,90,0,0,5,2],
[FT,5,12,314,0,0,5,16],
[FT,2,2,1,1,2,2,1],
[FT,1,2,4,1,0,1,2],
[FT,1,2,4,1,0,1,1],
[FT,4,2,3,1,0,3,5],
[FT,1,2,1,1,0,2,2],
[FT,1,2,1,1,0,2,2],
[FT,1,2,1,1,0,3,3],
[FT,2,5,2,2,0,2,7],
[FT,1,2,2,1,0,2,1],
[FT,1,2,2,1,0,2,1],
[FT,1,4,4,1,0,2,1],
[FT,1,2,2,1,0,1,1],
[FT,1,2,2,1,0,2,1],
[FT,1,2,2,1,0,2,1],
[FT,1,4,4,1,0,2,1],
[FT,1,2,2,1,0,2,1],
[FT,1,2,2,1,0,1,1],
[FT,1,2,4,1,0,1,1],
[FT,1,2,2,1,0,1,1],
[FT,1,2,2,1,0,1,1],
[FT,1,2,1,1,0,1,1],
[FT,1,2,2,1,0,1,1],
[FT,1,2,4,1,0,1,1],
[FT,1,2,2,1,0,1,1],
[FT,1,2,2,1,0,1,1],
[FT,1,2,1,1,0,1,1],
[FT,1,2,1,1,0,1,1],
[FT,1,2,3,1,3,1,1],
[FT,1,2,2,1,3,1,1],
[FT,5,3,1,1,0,1,1],
[FT,1,2,1,1,0,1,1],
[FT,1,2,3,1,3,1,1],
[FT,1,2,2,1,3,1,1],
[FT,5,4,1,1,0,5,1],
[FT,1,2,1,1,0,2,1],
[FT,1,4,1,1,0,2,1],
[FT,4,4,6,1,4,4,1],
[FT,1,4,4,1,0,3,1],
[FT,2,4,2,1,0,3,1],
[FT,3,3,2,1,50,2,40],
[FT,2,3,1,1,46,2,40],
[FT,3,1,2,4,66,2,20],
[FT,3,2,1,2,48,2,20],
[FT,2,2,1,1,36,2,10],
[FT,4,2,3,1,44,2,20],
[FT,2,7,3,2,14,2,8],
[FT,3,2,2,1,8,4,3],
[FT,2,2,3,1,10,1,3],
[FT,2,12,0,0,10,1,6],
[FT,4,1,20,20,20,1,10],
[FT,3,5,20,5,25,2,6],
[FT,4,11,60,50,35,1,12],
[FT,1,4,30,12,20,3,8],
[FT,1,0,0,0,1,5,0.5],
[FT,1,0,0,0,1,4,1],
[FT,2,3,2,1,6,1,24],
[FT,1,2,2,0,4,4,0.5],
[FT,1,2,2,0,4,4,0.5],
[FT,1,2,1,0,4,4,0.5],
[FT,1,2,0,2,6,4,0.5],
[FT,3,0,15,1,49,4,24],
[FT,2,0,5,1,19,4,8],
[FT,3,0,20,1,64,4,20],
[FT,2,0,17,1,55,4,4],
[FT,4,0,10,0,30,4,30],
[FT,3,0,7,1,25,4,8],
[FT,3,0,45,0,180,5,5],
[PJ,4,75,48,76,190,4,75],
[PJ,3,13,12,6,250,2,220],
[PJ,3,7,83,69,1085,3,400],
[PJ,3,12,150,200,510,2,100],
[PJ,2,5,36,34,210,4,70],
[PJ,3,12,186,108,1714,3,69],
[RQ,3,5,4,2,10,5,2.5],
[RQ,3,5,4,2,10,5,2.5],
[RQ,3,4,0,9,10,5,2],
[RQ,3,3,7,4,11,5,1.5],
[RQ,2,3,3,2,4,5,2],
[RQ,4,6,6,2,5,5,2.5],
[RQ,3,4,4,4,2,5,2.5],
[RQ,1,9,15,0,0,5,2],
[RQ,1,9,15,0,0,5,1],
[RQ,1,9,15,0,0,5,1],
[RQ,1,9,15,0,0,5,0.5],
[RQ,3,8,1,1,14,3,7],
[RQ,3,8,4,1,14,3,5],
[RQ,3,3,1,1,6,3,15],
[RQ,3,2,3,1,4,2,2],
[RQ,3,3,2,1,8,2,8],
[RQ,1,2,1,1,7,1,2],
[RQ,1,2,1,1,7,1,2],
[RQ,4,5,9,1,8,3,11],
[RQ,4,5,11,1,8,3,11],
[RQ,2,3,2,6,7,2,5],
[RQ,2,3,2,6,8,2,3],
[RQ,3,4,1,4,7,2,3],
[RQ,3,3,9,1,8,3,2],
[RQ,3,3,11,1,5,3,2],
[RQ,2,2,4,1,5,3,2],
[RQ,3,2,4,1,5,2,2],
[RQ,2,3,1,5,17,2,3],
[RQ,5,4,10,3,27,5,20],
[RQ,3,8,2,2,5,3,5],
[RQ,1,1,1,1,0,1,1],
[RQ,1,2,1,5,2,2,1],
[RQ,1,1,1,8,0,1,1],
[RQ,5,1,3,1,34,2,20],
[RQ,2,2,1,1,36,2,10],
[RQ,4,13,3,1,154,2,30],
[RQ,2,1,2,0,18,2,10],
[RQ,3.5,6,52,7,321,3.5,20],
[RQ,2.5,3,4,1,14,1,15],
[RQ,3.5,4,5,10,30,1,20],
[RQ,3.5,2,3,1,14,1,20],
[RQ,3.5,2,30,18,90,2,15],
[RQ,4,2,25,16,75,1,15],
[RQ,4.5,5,7,5,30,1,40],
[RQ,2,2,3,2,10,1,3],
[RQ,4,2,25,16,75,1,15],
[RQ,3,2,3,1,14,1,20],
[RQ,4,4,25,12,50,4,10],
[RQ,2,2,20,10,60,2,6],
[RQ,3,1,14,8,40,3,8],
[RQ,3,1,8,10,35,3,8],
[RQ,4,12,2,20,95,1,12],
[RQ,2,2,4,10,30,2,10],
[RQ,2,3,1,1,5,4,8],
[RQ,1,0,0,0,1,4,2],
[RQ,1,1,0,0,2,5,1],
[RQ,1,0,0,0,1,5,1.5],
[RQ,5,3,17,17,29,5,25],
[RQ,5,3,17,17,29,5,9],
[RQ,4,1,5,2,10,5,15],
[RQ,3,3,17,17,23,5,2],
[RQ,3,0,3,3,4,2,5],
[RQ,5,2,2,1,4,5,45],
[RQ,4,3,11,1,19,5,35],
[RQ,5,3,4,4,14,5,50],
[RQ,5,2,2,2,5,5,25],
[RQ,5,1,3,3,10,5,35],
[RQ,4,2,2,2,7,5,20],
[RQ,3,3,9,4,20,5,25],
[RQ,3,3,1,1,6,4,10],
[RQ,2,3,2,1,6,4,33],
[RQ,4,3,8,1,14,4,24],
[RQ,4,3,9,1,15,4,36],
[RQ,1,1,1,0,6,4,1],
[RQ,1,1,2,0,4,4,1],
[RQ,4,0,4,2,4,4,1],
[RQ,3,2,4,10,32,4,2],
[RQ,3,3,12,4,31,4,2],
[RQ,5,4,9,6,26,4,2],
[RQ,2,1,9,9,23,4,1],
[RQ,1,1,9,9,37,4,1],
[RQ,1,1,12,0,18,4,1],
[RQ,2,1,1,0,20,4,1],
[RQ,2,1,12,0,36,4,1],
[RQ,3,2,1,0,4,4,1],
[RQ,3,2,1,0,4,4,1],
[RQ,2,2,10,0,12,4,1],
[RQ,2,2,10,10,10,4,1],
[RQ,3,1,12,12,10,4,1],
[RQ,1,0,0,0,6,4,0.5],
[RQ,1,0,0,12,8,4,0.5],
[RQ,NULL,NULL,NULL,NULL,NULL,4,8],
[RQ,2,0,4,1,16,4,6],
[RQ,2,0,5,1,19,4,6],
[RQ,4,0,5,1,19,4,4],
[RQ,2,0,1,1,7,4,1],
[RQ,1,1,3,0,16,1,4],
[RQ,2,0,1,0,3,4,6],
[RQ,4,32,0,0,224,1,12],
[RQ,3,NULL,NULL,NULL,NULL,1,6],
[RQ,1,1,10,0,7,5,6],
[RQ,2,0,6,1,22,4,4],
[RQ,2,0,6,1,22,4,4],
[RQ,2,3,50,1,235,3,7],
[RQ,2,1,3,1,27,3,2],
[RQ,3,3,6,1,59,3,3],
[RQ,2,1,2,1,23,3,3],
[RQ,2,3,13,13,147,3,4],
[RQ,3,4,12,13,153,3,5],
[RQ,4,4,14,14,166,3,6],
[RQ,2,2,13,13,137,3,2],
[RQ,3,2,2,1,33,3,6],
[RQ,2,1,4,1,31,3,2],
[RQ,1,1,4,4,46,3,1],
[RQ,3,2,4,4,56,3,4],
[RQ,4,3,3,3,57,3,4],
[RQ,3,2,4,8,76,3,3],
[RQ,1,2,1,1,29,3,2],
[RQ,3,3,6,10,104,3,5],
[RQ,2,1,0,8,50,3,3],
[RQ,1,5,0,11,105,2,0.5]
],
_tunings =[[
# vlow low nom high vhigh xhigh
#scale factors:
'Prec', 6.20, 4.96, 3.72, 2.48, 1.24, _ ],[
'Flex', 5.07, 4.05, 3.04, 2.03, 1.01, _ ],[
'Resl', 7.07, 5.65, 4.24, 2.83, 1.41, _ ],[
'Pmat', 7.80, 6.24, 4.68, 3.12, 1.56, _ ],[
'Team', 5.48, 4.38, 3.29, 2.19, 1.01, _ ]],
weighFeature = weighFeature,
_split = split,
_isCocomo = False,
ignores=[5]
)
"""
Demo code:
"""
def _usp05(): print(usp05())
#if __name__ == '__main__': eval(todo('_nasa93()')) | mit |
keimlink/django-cms | cms/views.py | 2 | 7752 | # -*- coding: utf-8 -*-
from django.conf import settings
from django.contrib.auth.views import redirect_to_login
from django.core.urlresolvers import resolve, Resolver404, reverse
from django.http import HttpResponseRedirect, HttpResponse
from django.utils.http import urlquote
from django.utils.translation import get_language
from cms.apphook_pool import apphook_pool
from cms.appresolver import get_app_urls
from cms.cache.page import get_page_cache
from cms.page_rendering import _handle_no_page, render_page
from cms.utils import get_language_code, get_language_from_request, get_cms_setting
from cms.utils.i18n import (get_fallback_languages, force_language, get_public_languages,
get_redirect_on_fallback, get_language_list,
is_language_prefix_patterns_used)
from cms.utils.page_resolver import get_page_from_request
def details(request, slug):
"""
The main view of the Django-CMS! Takes a request and a slug, renders the
page.
"""
if get_cms_setting("PAGE_CACHE") and (
not hasattr(request, 'toolbar') or (
not request.toolbar.edit_mode and
not request.toolbar.show_toolbar and
not request.user.is_authenticated()
)
):
cache_content = get_page_cache(request)
if cache_content is not None:
content, headers = cache_content
response = HttpResponse(content)
response._headers = headers
return response
# Get a Page model object from the request
page = get_page_from_request(request, use_path=slug)
if not page:
return _handle_no_page(request, slug)
current_language = request.GET.get('language', None)
if not current_language:
current_language = request.POST.get('language', None)
if current_language:
current_language = get_language_code(current_language)
if current_language not in get_language_list(page.site_id):
current_language = None
if current_language is None:
current_language = get_language_code(getattr(request, 'LANGUAGE_CODE', None))
if current_language:
current_language = get_language_code(current_language)
if current_language not in get_language_list(page.site_id):
current_language = None
if current_language is None:
current_language = get_language_code(get_language())
# Check that the current page is available in the desired (current) language
available_languages = []
# this will return all languages in draft mode, and published only in live mode
page_languages = list(page.get_published_languages())
if hasattr(request, 'user') and request.user.is_staff:
user_languages = get_language_list()
else:
user_languages = get_public_languages()
for frontend_lang in user_languages:
if frontend_lang in page_languages:
available_languages.append(frontend_lang)
# Check that the language is in FRONTEND_LANGUAGES:
own_urls = [
'http%s://%s%s' % ('s' if request.is_secure() else '', request.get_host(), request.path),
'/%s' % request.path,
request.path,
]
if current_language not in user_languages:
#are we on root?
if not slug:
#redirect to supported language
languages = []
for language in available_languages:
languages.append((language, language))
if languages:
# get supported language
new_language = get_language_from_request(request)
if new_language in get_public_languages():
with force_language(new_language):
pages_root = reverse('pages-root')
if (hasattr(request, 'toolbar') and request.user.is_staff and request.toolbar.edit_mode):
request.toolbar.redirect_url = pages_root
elif pages_root not in own_urls:
return HttpResponseRedirect(pages_root)
elif not hasattr(request, 'toolbar') or not request.toolbar.redirect_url:
_handle_no_page(request, slug)
else:
return _handle_no_page(request, slug)
if current_language not in available_languages:
# If we didn't find the required page in the requested (current)
# language, let's try to find a fallback
found = False
for alt_lang in get_fallback_languages(current_language):
if alt_lang in available_languages:
if get_redirect_on_fallback(current_language) or slug == "":
with force_language(alt_lang):
path = page.get_absolute_url(language=alt_lang, fallback=True)
# In the case where the page is not available in the
# preferred language, *redirect* to the fallback page. This
# is a design decision (instead of rendering in place)).
if (hasattr(request, 'toolbar') and request.user.is_staff
and request.toolbar.edit_mode):
request.toolbar.redirect_url = path
elif path not in own_urls:
return HttpResponseRedirect(path)
else:
found = True
if not found and (not hasattr(request, 'toolbar') or not request.toolbar.redirect_url):
# There is a page object we can't find a proper language to render it
_handle_no_page(request, slug)
if apphook_pool.get_apphooks():
# There are apphooks in the pool. Let's see if there is one for the
# current page
# since we always have a page at this point, applications_page_check is
# pointless
# page = applications_page_check(request, page, slug)
# Check for apphooks! This time for real!
app_urls = page.get_application_urls(current_language, False)
skip_app = False
if (not page.is_published(current_language) and hasattr(request, 'toolbar')
and request.toolbar.edit_mode):
skip_app = True
if app_urls and not skip_app:
app = apphook_pool.get_apphook(app_urls)
pattern_list = []
for urlpatterns in get_app_urls(app.urls):
pattern_list += urlpatterns
try:
view, args, kwargs = resolve('/', tuple(pattern_list))
return view(request, *args, **kwargs)
except Resolver404:
pass
# Check if the page has a redirect url defined for this language.
redirect_url = page.get_redirect(language=current_language)
if redirect_url:
if (is_language_prefix_patterns_used() and redirect_url[0] == "/"
and not redirect_url.startswith('/%s/' % current_language)):
# add language prefix to url
redirect_url = "/%s/%s" % (current_language, redirect_url.lstrip("/"))
# prevent redirect to self
if hasattr(request, 'toolbar') and request.user.is_staff and request.toolbar.edit_mode:
request.toolbar.redirect_url = redirect_url
elif redirect_url not in own_urls:
return HttpResponseRedirect(redirect_url)
# permission checks
if page.login_required and not request.user.is_authenticated():
return redirect_to_login(urlquote(request.get_full_path()), settings.LOGIN_URL)
if hasattr(request, 'toolbar'):
request.toolbar.set_object(page)
response = render_page(request, page, current_language=current_language, slug=slug)
return response
| bsd-3-clause |
V-FEXrt/Pokemon-Spoof-Plus | CableClub/cable_club_trade_center.py | 1 | 1950 | from AI.team_manager import TeamManager
from cable_club_constants import TradeCenterState, Com
def reset():
global tradeCenterState, counter, eat_byte, ate_byte, choice_byte
tradeCenterState = TradeCenterState.CHOOSING_TRADE
counter = 416
eat_byte = False
ate_byte = 0x0
choice_byte = 0
reset()
def set_reset_callback(func):
global reset_to_init
reset_to_init = func
def choosing_trade_process(byte):
global counter, tradeCenterState, eat_byte, choice_byte
## Eat 'random' 96 byte
if byte == 96 and counter > 0:
counter = 0
return byte
if byte >= 96 and byte <= 101:
# TODO: 'seen first wait' solves this eating bytes problem better. Should use it instead
if not eat_byte:
choice_byte = TeamManager.trade_center.offerIndex(byte)
eat_byte = True
return choice_byte
if eat_byte:
tradeCenterState = TradeCenterState.CONFIRMING_TRADE
eat_byte = False
return byte
return byte
def confirming_trade_process(byte):
global tradeCenterState, eat_byte, ate_byte, counter
if byte == 97 or byte == 98:
eat_byte = True
ate_byte = byte
return byte
if eat_byte:
eat_byte = False
if ate_byte == 97:
# Cancelled by partner
tradeCenterState = TradeCenterState.CHOOSING_TRADE
print "Trade cancelled by Player"
if ate_byte == 98:
# Confirmed by partner
print "Trade confirmed by Player"
reset_to_init()
reset()
TeamManager.trade_center.trade_confirmed()
return byte
functionSwitch = [choosing_trade_process, confirming_trade_process]
def trade_center_process_byte(byte):
if (tradeCenterState >= len(functionSwitch)):
print "Warning: no function for Trade Center State"
return byte
return functionSwitch[tradeCenterState](byte)
| mit |
mateor/pants | tests/python/pants_test/help/test_scope_info_iterator.py | 11 | 2502 | # coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import unittest
from pants.help.scope_info_iterator import ScopeInfoIterator
from pants.option.arg_splitter import GLOBAL_SCOPE
from pants.option.global_options import GlobalOptionsRegistrar
from pants.option.scope import ScopeInfo
from pants.subsystem.subsystem import Subsystem
from pants.subsystem.subsystem_client_mixin import SubsystemDependency
from pants.task.task import Task
class ScopeInfoIteratorTest(unittest.TestCase):
def test_iteration(self):
self.maxDiff = None
class Subsys1(Subsystem):
options_scope = 'subsys1'
class Subsys2(Subsystem):
options_scope = 'subsys2'
@classmethod
def subsystem_dependencies(cls):
return (SubsystemDependency(Subsys1, 'subsys2'),)
class Goal1Task2(Task):
options_scope = 'goal1.task12'
@classmethod
def subsystem_dependencies(cls):
return (SubsystemDependency(Subsys1, 'goal1.task12'),)
infos = [
ScopeInfo(GLOBAL_SCOPE, ScopeInfo.GLOBAL, GlobalOptionsRegistrar),
ScopeInfo('subsys2', ScopeInfo.SUBSYSTEM, Subsys2),
ScopeInfo('subsys1.subsys2', ScopeInfo.SUBSYSTEM, Subsys1),
ScopeInfo('goal1', ScopeInfo.INTERMEDIATE),
ScopeInfo('goal1.task11', ScopeInfo.TASK),
ScopeInfo('goal1.task12', ScopeInfo.TASK, Goal1Task2),
ScopeInfo('subsys1.goal1.task12', ScopeInfo.SUBSYSTEM, Subsys1),
ScopeInfo('goal2', ScopeInfo.INTERMEDIATE),
ScopeInfo('goal2.task21', ScopeInfo.TASK),
ScopeInfo('goal2.task22', ScopeInfo.TASK),
ScopeInfo('goal3', ScopeInfo.INTERMEDIATE),
ScopeInfo('goal3.task31', ScopeInfo.TASK),
ScopeInfo('goal3.task32', ScopeInfo.TASK),
]
scope_to_infos = dict((x.scope, x) for x in infos)
it = ScopeInfoIterator(scope_to_infos)
actual = list(it.iterate([GLOBAL_SCOPE, 'goal1', 'goal2.task21', 'goal3']))
expected_scopes = [
GLOBAL_SCOPE,
'subsys2',
'subsys1.subsys2',
'goal1', 'goal1.task11', 'goal1.task12', 'subsys1.goal1.task12',
'goal2.task21',
'goal3', 'goal3.task31', 'goal3.task32',
]
expected_scope_infos = [scope_to_infos[x] for x in expected_scopes]
self.assertEquals(expected_scope_infos, actual)
| apache-2.0 |
ehashman/oh-mainline | vendor/packages/staticgenerator/staticgenerator/__init__.py | 17 | 7869 | #!/usr/bin/env python
#-*- coding:utf-8 -*-
"""Static file generator for Django."""
import stat
from django.utils.functional import Promise
from filesystem import FileSystem
from handlers import DummyHandler
class StaticGeneratorException(Exception):
pass
class StaticGenerator(object):
"""
The StaticGenerator class is created for Django applications, like a blog,
that are not updated per request.
Usage is simple::
from staticgenerator import quick_publish
quick_publish('/', Post.objects.live(), FlatPage)
The class accepts a list of 'resources' which can be any of the
following: URL path (string), Model (class or instance), Manager, or
QuerySet.
As of v1.1, StaticGenerator includes file and path deletion::
from staticgenerator import quick_delete
quick_delete('/page-to-delete/')
The most effective usage is to associate a StaticGenerator with a model's
post_save and post_delete signal.
The reason for having all the optional parameters is to reduce coupling
with django in order for more effectively unit testing.
"""
def __init__(self, *resources, **kw):
self.parse_dependencies(kw)
self.resources = self.extract_resources(resources)
self.server_name = self.get_server_name()
try:
self.web_root = getattr(self.settings, 'WEB_ROOT')
except AttributeError:
raise StaticGeneratorException('You must specify WEB_ROOT in settings.py')
def parse_dependencies(self, kw):
http_request = kw.get('http_request', None)
model_base = kw.get('model_base', None)
manager = kw.get('manager', None)
model = kw.get('model', None)
queryset = kw.get('queryset', None)
settings = kw.get('settings', None)
site = kw.get('site', None)
fs = kw.get('fs', None)
self.http_request = http_request
if not http_request:
from django.http import HttpRequest
self.http_request = HttpRequest
self.model_base = model_base
if not model_base:
from django.db.models.base import ModelBase
self.model_base = ModelBase
self.manager = manager
if not manager:
from django.db.models.manager import Manager
self.manager = Manager
self.model = model
if not model:
from django.db.models import Model
self.model = Model
self.queryset = queryset
if not queryset:
from django.db.models.query import QuerySet
self.queryset = QuerySet
self.settings = settings
if not settings:
from django.conf import settings
self.settings = settings
self.fs = fs
if not fs:
self.fs = FileSystem()
self.site = site
def extract_resources(self, resources):
"""Takes a list of resources, and gets paths by type"""
extracted = []
for resource in resources:
# A URL string
if isinstance(resource, (str, unicode, Promise)):
extracted.append(str(resource))
continue
# A model instance; requires get_absolute_url method
if isinstance(resource, self.model):
extracted.append(resource.get_absolute_url())
continue
# If it's a Model, we get the base Manager
if isinstance(resource, self.model_base):
resource = resource._default_manager
# If it's a Manager, we get the QuerySet
if isinstance(resource, self.manager):
resource = resource.all()
# Append all paths from obj.get_absolute_url() to list
if isinstance(resource, self.queryset):
extracted += [obj.get_absolute_url() for obj in resource]
return extracted
def get_server_name(self):
'''Tries to get the server name.
First we look in the django settings.
If it's not found we try to get it from the current Site.
Otherwise, return "localhost".
'''
try:
return getattr(self.settings, 'SERVER_NAME')
except:
pass
try:
if not self.site:
from django.contrib.sites.models import Site
self.site = Site
return self.site.objects.get_current().domain
except:
print '*** Warning ***: Using "localhost" for domain name. Use django.contrib.sites or set settings.SERVER_NAME to disable this warning.'
return 'localhost'
def get_content_from_path(self, path):
"""
Imitates a basic http request using DummyHandler to retrieve
resulting output (HTML, XML, whatever)
"""
request = self.http_request()
request.path_info = path
request.META.setdefault('SERVER_PORT', 80)
request.META.setdefault('SERVER_NAME', self.server_name)
handler = DummyHandler()
try:
response = handler(request)
except Exception, err:
raise StaticGeneratorException("The requested page(\"%s\") raised an exception. Static Generation failed. Error: %s" % (path, str(err)))
if int(response.status_code) != 200:
raise StaticGeneratorException("The requested page(\"%s\") returned http code %d. Static Generation failed." % (path, int(response.status_code)))
return response.content
def get_filename_from_path(self, path):
"""
Returns (filename, directory)
Creates index.html for path if necessary
"""
if path.endswith('/'):
path = '%sindex.html' % path
filename = self.fs.join(self.web_root, path.lstrip('/')).encode('utf-8')
return filename, self.fs.dirname(filename)
def publish_from_path(self, path, content=None):
"""
Gets filename and content for a path, attempts to create directory if
necessary, writes to file.
"""
filename, directory = self.get_filename_from_path(path)
if not content:
content = self.get_content_from_path(path)
if not self.fs.exists(directory):
try:
self.fs.makedirs(directory)
except:
raise StaticGeneratorException('Could not create the directory: %s' % directory)
try:
f, tmpname = self.fs.tempfile(directory=directory)
self.fs.write(f, content)
self.fs.close(f)
self.fs.chmod(tmpname, stat.S_IREAD | stat.S_IWRITE | stat.S_IWUSR | stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH)
self.fs.rename(tmpname, filename)
except:
raise StaticGeneratorException('Could not create the file: %s' % filename)
def delete_from_path(self, path):
"""Deletes file, attempts to delete directory"""
filename, directory = self.get_filename_from_path(path)
try:
if self.fs.exists(filename):
self.fs.remove(filename)
except:
raise StaticGeneratorException('Could not delete file: %s' % filename)
try:
self.fs.rmdir(directory)
except OSError:
# Will fail if a directory is not empty, in which case we don't
# want to delete it anyway
pass
def do_all(self, func):
return [func(path) for path in self.resources]
def delete(self):
return self.do_all(self.delete_from_path)
def publish(self):
return self.do_all(self.publish_from_path)
def quick_publish(*resources):
return StaticGenerator(*resources).publish()
def quick_delete(*resources):
return StaticGenerator(*resources).delete()
| agpl-3.0 |
srikk595/Multilingual-Search-System-for-tweets | partA/venv/lib/python2.7/site-packages/pip/vcs/bazaar.py | 280 | 4427 | from __future__ import absolute_import
import logging
import os
import tempfile
import re
# TODO: Get this into six.moves.urllib.parse
try:
from urllib import parse as urllib_parse
except ImportError:
import urlparse as urllib_parse
from pip.utils import rmtree, display_path
from pip.vcs import vcs, VersionControl
from pip.download import path_to_url
logger = logging.getLogger(__name__)
class Bazaar(VersionControl):
name = 'bzr'
dirname = '.bzr'
repo_name = 'branch'
schemes = (
'bzr', 'bzr+http', 'bzr+https', 'bzr+ssh', 'bzr+sftp', 'bzr+ftp',
'bzr+lp',
)
def __init__(self, url=None, *args, **kwargs):
super(Bazaar, self).__init__(url, *args, **kwargs)
# Python >= 2.7.4, 3.3 doesn't have uses_fragment or non_hierarchical
# Register lp but do not expose as a scheme to support bzr+lp.
if getattr(urllib_parse, 'uses_fragment', None):
urllib_parse.uses_fragment.extend(['lp'])
urllib_parse.non_hierarchical.extend(['lp'])
def export(self, location):
"""
Export the Bazaar repository at the url to the destination location
"""
temp_dir = tempfile.mkdtemp('-export', 'pip-')
self.unpack(temp_dir)
if os.path.exists(location):
# Remove the location to make sure Bazaar can export it correctly
rmtree(location)
try:
self.run_command(['export', location], cwd=temp_dir,
show_stdout=False)
finally:
rmtree(temp_dir)
def switch(self, dest, url, rev_options):
self.run_command(['switch', url], cwd=dest)
def update(self, dest, rev_options):
self.run_command(['pull', '-q'] + rev_options, cwd=dest)
def obtain(self, dest):
url, rev = self.get_url_rev()
if rev:
rev_options = ['-r', rev]
rev_display = ' (to revision %s)' % rev
else:
rev_options = []
rev_display = ''
if self.check_destination(dest, url, rev_options, rev_display):
logger.info(
'Checking out %s%s to %s',
url,
rev_display,
display_path(dest),
)
self.run_command(['branch', '-q'] + rev_options + [url, dest])
def get_url_rev(self):
# hotfix the URL scheme after removing bzr+ from bzr+ssh:// readd it
url, rev = super(Bazaar, self).get_url_rev()
if url.startswith('ssh://'):
url = 'bzr+' + url
return url, rev
def get_url(self, location):
urls = self.run_command(['info'], show_stdout=False, cwd=location)
for line in urls.splitlines():
line = line.strip()
for x in ('checkout of branch: ',
'parent branch: '):
if line.startswith(x):
repo = line.split(x)[1]
if self._is_local_repository(repo):
return path_to_url(repo)
return repo
return None
def get_revision(self, location):
revision = self.run_command(
['revno'], show_stdout=False, cwd=location)
return revision.splitlines()[-1]
def get_tag_revs(self, location):
tags = self.run_command(
['tags'], show_stdout=False, cwd=location)
tag_revs = []
for line in tags.splitlines():
tags_match = re.search(r'([.\w-]+)\s*(.*)$', line)
if tags_match:
tag = tags_match.group(1)
rev = tags_match.group(2)
tag_revs.append((rev.strip(), tag.strip()))
return dict(tag_revs)
def get_src_requirement(self, dist, location, find_tags):
repo = self.get_url(location)
if not repo:
return None
if not repo.lower().startswith('bzr:'):
repo = 'bzr+' + repo
egg_project_name = dist.egg_name().split('-', 1)[0]
current_rev = self.get_revision(location)
tag_revs = self.get_tag_revs(location)
if current_rev in tag_revs:
# It's a tag
full_egg_name = '%s-%s' % (egg_project_name, tag_revs[current_rev])
else:
full_egg_name = '%s-dev_r%s' % (dist.egg_name(), current_rev)
return '%s@%s#egg=%s' % (repo, current_rev, full_egg_name)
vcs.register(Bazaar)
| gpl-2.0 |
frdb194/django | tests/utils_tests/test_lazyobject.py | 268 | 8629 | from __future__ import unicode_literals
import copy
import pickle
import sys
from unittest import TestCase
from django.utils import six
from django.utils.functional import LazyObject, SimpleLazyObject, empty
class Foo(object):
"""
A simple class with just one attribute.
"""
foo = 'bar'
def __eq__(self, other):
return self.foo == other.foo
class LazyObjectTestCase(TestCase):
def lazy_wrap(self, wrapped_object):
"""
Wrap the given object into a LazyObject
"""
class AdHocLazyObject(LazyObject):
def _setup(self):
self._wrapped = wrapped_object
return AdHocLazyObject()
def test_getattr(self):
obj = self.lazy_wrap(Foo())
self.assertEqual(obj.foo, 'bar')
def test_setattr(self):
obj = self.lazy_wrap(Foo())
obj.foo = 'BAR'
obj.bar = 'baz'
self.assertEqual(obj.foo, 'BAR')
self.assertEqual(obj.bar, 'baz')
def test_setattr2(self):
# Same as test_setattr but in reversed order
obj = self.lazy_wrap(Foo())
obj.bar = 'baz'
obj.foo = 'BAR'
self.assertEqual(obj.foo, 'BAR')
self.assertEqual(obj.bar, 'baz')
def test_delattr(self):
obj = self.lazy_wrap(Foo())
obj.bar = 'baz'
self.assertEqual(obj.bar, 'baz')
del obj.bar
with self.assertRaises(AttributeError):
obj.bar
def test_cmp(self):
obj1 = self.lazy_wrap('foo')
obj2 = self.lazy_wrap('bar')
obj3 = self.lazy_wrap('foo')
self.assertEqual(obj1, 'foo')
self.assertEqual(obj1, obj3)
self.assertNotEqual(obj1, obj2)
self.assertNotEqual(obj1, 'bar')
def test_bytes(self):
obj = self.lazy_wrap(b'foo')
self.assertEqual(bytes(obj), b'foo')
def test_text(self):
obj = self.lazy_wrap('foo')
self.assertEqual(six.text_type(obj), 'foo')
def test_bool(self):
# Refs #21840
for f in [False, 0, (), {}, [], None, set()]:
self.assertFalse(self.lazy_wrap(f))
for t in [True, 1, (1,), {1: 2}, [1], object(), {1}]:
self.assertTrue(t)
def test_dir(self):
obj = self.lazy_wrap('foo')
self.assertEqual(dir(obj), dir('foo'))
def test_len(self):
for seq in ['asd', [1, 2, 3], {'a': 1, 'b': 2, 'c': 3}]:
obj = self.lazy_wrap(seq)
self.assertEqual(len(obj), 3)
def test_class(self):
self.assertIsInstance(self.lazy_wrap(42), int)
class Bar(Foo):
pass
self.assertIsInstance(self.lazy_wrap(Bar()), Foo)
def test_hash(self):
obj = self.lazy_wrap('foo')
d = {}
d[obj] = 'bar'
self.assertIn('foo', d)
self.assertEqual(d['foo'], 'bar')
def test_contains(self):
test_data = [
('c', 'abcde'),
(2, [1, 2, 3]),
('a', {'a': 1, 'b': 2, 'c': 3}),
(2, {1, 2, 3}),
]
for needle, haystack in test_data:
self.assertIn(needle, self.lazy_wrap(haystack))
# __contains__ doesn't work when the haystack is a string and the needle a LazyObject
for needle_haystack in test_data[1:]:
self.assertIn(self.lazy_wrap(needle), haystack)
self.assertIn(self.lazy_wrap(needle), self.lazy_wrap(haystack))
def test_getitem(self):
obj_list = self.lazy_wrap([1, 2, 3])
obj_dict = self.lazy_wrap({'a': 1, 'b': 2, 'c': 3})
self.assertEqual(obj_list[0], 1)
self.assertEqual(obj_list[-1], 3)
self.assertEqual(obj_list[1:2], [2])
self.assertEqual(obj_dict['b'], 2)
with self.assertRaises(IndexError):
obj_list[3]
with self.assertRaises(KeyError):
obj_dict['f']
def test_setitem(self):
obj_list = self.lazy_wrap([1, 2, 3])
obj_dict = self.lazy_wrap({'a': 1, 'b': 2, 'c': 3})
obj_list[0] = 100
self.assertEqual(obj_list, [100, 2, 3])
obj_list[1:2] = [200, 300, 400]
self.assertEqual(obj_list, [100, 200, 300, 400, 3])
obj_dict['a'] = 100
obj_dict['d'] = 400
self.assertEqual(obj_dict, {'a': 100, 'b': 2, 'c': 3, 'd': 400})
def test_delitem(self):
obj_list = self.lazy_wrap([1, 2, 3])
obj_dict = self.lazy_wrap({'a': 1, 'b': 2, 'c': 3})
del obj_list[-1]
del obj_dict['c']
self.assertEqual(obj_list, [1, 2])
self.assertEqual(obj_dict, {'a': 1, 'b': 2})
with self.assertRaises(IndexError):
del obj_list[3]
with self.assertRaises(KeyError):
del obj_dict['f']
def test_iter(self):
# Tests whether an object's custom `__iter__` method is being
# used when iterating over it.
class IterObject(object):
def __init__(self, values):
self.values = values
def __iter__(self):
return iter(self.values)
original_list = ['test', '123']
self.assertEqual(
list(self.lazy_wrap(IterObject(original_list))),
original_list
)
def test_pickle(self):
# See ticket #16563
obj = self.lazy_wrap(Foo())
pickled = pickle.dumps(obj)
unpickled = pickle.loads(pickled)
self.assertIsInstance(unpickled, Foo)
self.assertEqual(unpickled, obj)
self.assertEqual(unpickled.foo, obj.foo)
def test_deepcopy(self):
# Check that we *can* do deep copy, and that it returns the right
# objects.
l = [1, 2, 3]
obj = self.lazy_wrap(l)
len(l) # forces evaluation
obj2 = copy.deepcopy(obj)
self.assertIsInstance(obj2, list)
self.assertEqual(obj2, [1, 2, 3])
def test_deepcopy_no_evaluation(self):
# copying doesn't force evaluation
l = [1, 2, 3]
obj = self.lazy_wrap(l)
obj2 = copy.deepcopy(obj)
# Copying shouldn't force evaluation
self.assertIs(obj._wrapped, empty)
self.assertIs(obj2._wrapped, empty)
class SimpleLazyObjectTestCase(LazyObjectTestCase):
# By inheriting from LazyObjectTestCase and redefining the lazy_wrap()
# method which all testcases use, we get to make sure all behaviors
# tested in the parent testcase also apply to SimpleLazyObject.
def lazy_wrap(self, wrapped_object):
return SimpleLazyObject(lambda: wrapped_object)
def test_repr(self):
# First, for an unevaluated SimpleLazyObject
obj = self.lazy_wrap(42)
# __repr__ contains __repr__ of setup function and does not evaluate
# the SimpleLazyObject
six.assertRegex(self, repr(obj), '^<SimpleLazyObject:')
self.assertIs(obj._wrapped, empty) # make sure evaluation hasn't been triggered
self.assertEqual(obj, 42) # evaluate the lazy object
self.assertIsInstance(obj._wrapped, int)
self.assertEqual(repr(obj), '<SimpleLazyObject: 42>')
def test_trace(self):
# See ticket #19456
old_trace_func = sys.gettrace()
try:
def trace_func(frame, event, arg):
frame.f_locals['self'].__class__
if old_trace_func is not None:
old_trace_func(frame, event, arg)
sys.settrace(trace_func)
self.lazy_wrap(None)
finally:
sys.settrace(old_trace_func)
def test_none(self):
i = [0]
def f():
i[0] += 1
return None
x = SimpleLazyObject(f)
self.assertEqual(str(x), "None")
self.assertEqual(i, [1])
self.assertEqual(str(x), "None")
self.assertEqual(i, [1])
def test_dict(self):
# See ticket #18447
lazydict = SimpleLazyObject(lambda: {'one': 1})
self.assertEqual(lazydict['one'], 1)
lazydict['one'] = -1
self.assertEqual(lazydict['one'], -1)
self.assertIn('one', lazydict)
self.assertNotIn('two', lazydict)
self.assertEqual(len(lazydict), 1)
del lazydict['one']
with self.assertRaises(KeyError):
lazydict['one']
def test_list_set(self):
lazy_list = SimpleLazyObject(lambda: [1, 2, 3, 4, 5])
lazy_set = SimpleLazyObject(lambda: {1, 2, 3, 4})
self.assertIn(1, lazy_list)
self.assertIn(1, lazy_set)
self.assertNotIn(6, lazy_list)
self.assertNotIn(6, lazy_set)
self.assertEqual(len(lazy_list), 5)
self.assertEqual(len(lazy_set), 4)
| bsd-3-clause |
jcpowermac/ansible | lib/ansible/modules/cloud/ovirt/ovirt_group_facts.py | 74 | 3548 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016 Red Hat, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ovirt_group_facts
short_description: Retrieve facts about one or more oVirt/RHV groups
author: "Ondra Machacek (@machacekondra)"
version_added: "2.3"
description:
- "Retrieve facts about one or more oVirt/RHV groups."
notes:
- "This module creates a new top-level C(ovirt_groups) fact, which
contains a list of groups."
options:
pattern:
description:
- "Search term which is accepted by oVirt/RHV search backend."
- "For example to search group X use following pattern: name=X"
extends_documentation_fragment: ovirt_facts
'''
EXAMPLES = '''
# Examples don't contain auth parameter for simplicity,
# look at ovirt_auth module to see how to reuse authentication:
# Gather facts about all groups which names start with C(admin):
- ovirt_group_facts:
pattern: name=admin*
- debug:
var: ovirt_groups
'''
RETURN = '''
ovirt_groups:
description: "List of dictionaries describing the groups. Group attribues are mapped to dictionary keys,
all groups attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/group."
returned: On success.
type: list
'''
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ovirt import (
check_sdk,
create_connection,
get_dict_of_struct,
ovirt_facts_full_argument_spec,
)
def main():
argument_spec = ovirt_facts_full_argument_spec(
pattern=dict(default='', required=False),
)
module = AnsibleModule(argument_spec)
if module._name == 'ovirt_groups_facts':
module.deprecate("The 'ovirt_groups_facts' module is being renamed 'ovirt_group_facts'", version=2.8)
check_sdk(module)
try:
auth = module.params.pop('auth')
connection = create_connection(auth)
groups_service = connection.system_service().groups_service()
groups = groups_service.list(search=module.params['pattern'])
module.exit_json(
changed=False,
ansible_facts=dict(
ovirt_groups=[
get_dict_of_struct(
struct=c,
connection=connection,
fetch_nested=module.params.get('fetch_nested'),
attributes=module.params.get('nested_attributes'),
) for c in groups
],
),
)
except Exception as e:
module.fail_json(msg=str(e), exception=traceback.format_exc())
finally:
connection.close(logout=auth.get('token') is None)
if __name__ == '__main__':
main()
| gpl-3.0 |
jorgebodega/PDL | library/test/yacc_badrule.py | 174 | 1525 | # -----------------------------------------------------------------------------
# yacc_badrule.py
#
# Syntax problems in the rule strings
# -----------------------------------------------------------------------------
import sys
if ".." not in sys.path: sys.path.insert(0,"..")
import ply.yacc as yacc
from calclex import tokens
# Parsing rules
precedence = (
('left','PLUS','MINUS'),
('left','TIMES','DIVIDE'),
('right','UMINUS'),
)
# dictionary of names
names = { }
def p_statement_assign(t):
'statement NAME EQUALS expression'
names[t[1]] = t[3]
def p_statement_expr(t):
'statement'
print(t[1])
def p_expression_binop(t):
'''expression : expression PLUS expression
expression MINUS expression
| expression TIMES expression
| expression DIVIDE expression'''
if t[2] == '+' : t[0] = t[1] + t[3]
elif t[2] == '-': t[0] = t[1] - t[3]
elif t[2] == '*': t[0] = t[1] * t[3]
elif t[2] == '/': t[0] = t[1] / t[3]
def p_expression_uminus(t):
'expression: MINUS expression %prec UMINUS'
t[0] = -t[2]
def p_expression_group(t):
'expression : LPAREN expression RPAREN'
t[0] = t[2]
def p_expression_number(t):
'expression : NUMBER'
t[0] = t[1]
def p_expression_name(t):
'expression : NAME'
try:
t[0] = names[t[1]]
except LookupError:
print("Undefined name '%s'" % t[1])
t[0] = 0
def p_error(t):
print("Syntax error at '%s'" % t.value)
yacc.yacc()
| gpl-3.0 |
axilleas/ansible-modules-core | utilities/helper/accelerate.py | 64 | 26967 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013, James Cammarata <jcammarata@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: accelerate
short_description: Enable accelerated mode on remote node
description:
- This modules launches an ephemeral I(accelerate) daemon on the remote node which
Ansible can use to communicate with nodes at high speed.
- The daemon listens on a configurable port for a configurable amount of time.
- Fireball mode is AES encrypted
version_added: "1.3"
options:
port:
description:
- TCP port for the socket connection
required: false
default: 5099
aliases: []
timeout:
description:
- The number of seconds the socket will wait for data. If none is received when the timeout value is reached, the connection will be closed.
required: false
default: 300
aliases: []
minutes:
description:
- The I(accelerate) listener daemon is started on nodes and will stay around for
this number of minutes before turning itself off.
required: false
default: 30
ipv6:
description:
- The listener daemon on the remote host will bind to the ipv6 localhost socket
if this parameter is set to true.
required: false
default: false
multi_key:
description:
- When enabled, the daemon will open a local socket file which can be used by future daemon executions to
upload a new key to the already running daemon, so that multiple users can connect using different keys.
This access still requires an ssh connection as the uid for which the daemon is currently running.
required: false
default: no
version_added: "1.6"
notes:
- See the advanced playbooks chapter for more about using accelerated mode.
requirements: [ "python-keyczar" ]
author: James Cammarata
'''
EXAMPLES = '''
# To use accelerate mode, simply add "accelerate: true" to your play. The initial
# key exchange and starting up of the daemon will occur over SSH, but all commands and
# subsequent actions will be conducted over the raw socket connection using AES encryption
- hosts: devservers
accelerate: true
tasks:
- command: /usr/bin/anything
'''
import base64
import errno
import getpass
import json
import os
import os.path
import pwd
import signal
import socket
import struct
import sys
import syslog
import tempfile
import time
import traceback
import SocketServer
from datetime import datetime
from threading import Thread, Lock
# import module snippets
# we must import this here at the top so we can use get_module_path()
from ansible.module_utils.basic import *
syslog.openlog('ansible-%s' % os.path.basename(__file__))
# the chunk size to read and send, assuming mtu 1500 and
# leaving room for base64 (+33%) encoding and header (100 bytes)
# 4 * (975/3) + 100 = 1400
# which leaves room for the TCP/IP header
CHUNK_SIZE=10240
# FIXME: this all should be moved to module_common, as it's
# pretty much a copy from the callbacks/util code
DEBUG_LEVEL=0
def log(msg, cap=0):
global DEBUG_LEVEL
if DEBUG_LEVEL >= cap:
syslog.syslog(syslog.LOG_NOTICE|syslog.LOG_DAEMON, msg)
def v(msg):
log(msg, cap=1)
def vv(msg):
log(msg, cap=2)
def vvv(msg):
log(msg, cap=3)
def vvvv(msg):
log(msg, cap=4)
HAS_KEYCZAR = False
try:
from keyczar.keys import AesKey
HAS_KEYCZAR = True
except ImportError:
pass
SOCKET_FILE = os.path.join(get_module_path(), '.ansible-accelerate', ".local.socket")
def get_pid_location(module):
"""
Try to find a pid directory in the common locations, falling
back to the user's home directory if no others exist
"""
for dir in ['/var/run', '/var/lib/run', '/run', os.path.expanduser("~/")]:
try:
if os.path.isdir(dir) and os.access(dir, os.R_OK|os.W_OK):
return os.path.join(dir, '.accelerate.pid')
except:
pass
module.fail_json(msg="couldn't find any valid directory to use for the accelerate pid file")
# NOTE: this shares a fair amount of code in common with async_wrapper, if async_wrapper were a new module we could move
# this into utils.module_common and probably should anyway
def daemonize_self(module, password, port, minutes, pid_file):
# daemonizing code: http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/66012
try:
pid = os.fork()
if pid > 0:
vvv("exiting pid %s" % pid)
# exit first parent
module.exit_json(msg="daemonized accelerate on port %s for %s minutes with pid %s" % (port, minutes, str(pid)))
except OSError, e:
log("fork #1 failed: %d (%s)" % (e.errno, e.strerror))
sys.exit(1)
# decouple from parent environment
os.chdir("/")
os.setsid()
os.umask(022)
# do second fork
try:
pid = os.fork()
if pid > 0:
log("daemon pid %s, writing %s" % (pid, pid_file))
pid_file = open(pid_file, "w")
pid_file.write("%s" % pid)
pid_file.close()
vvv("pid file written")
sys.exit(0)
except OSError, e:
log("fork #2 failed: %d (%s)" % (e.errno, e.strerror))
sys.exit(1)
dev_null = file('/dev/null','rw')
os.dup2(dev_null.fileno(), sys.stdin.fileno())
os.dup2(dev_null.fileno(), sys.stdout.fileno())
os.dup2(dev_null.fileno(), sys.stderr.fileno())
log("daemonizing successful")
class LocalSocketThread(Thread):
server = None
terminated = False
def __init__(self, group=None, target=None, name=None, args=(), kwargs={}, Verbose=None):
self.server = kwargs.get('server')
Thread.__init__(self, group, target, name, args, kwargs, Verbose)
def run(self):
try:
if os.path.exists(SOCKET_FILE):
os.remove(SOCKET_FILE)
else:
dir = os.path.dirname(SOCKET_FILE)
if os.path.exists(dir):
if not os.path.isdir(dir):
log("The socket file path (%s) exists, but is not a directory. No local connections will be available" % dir)
return
else:
# make sure the directory is accessible only to this
# user, as socket files derive their permissions from
# the directory that contains them
os.chmod(dir, 0700)
elif not os.path.exists(dir):
os.makedirs(dir, 0700)
except OSError:
pass
self.s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self.s.bind(SOCKET_FILE)
self.s.listen(5)
while not self.terminated:
try:
conn, addr = self.s.accept()
vv("received local connection")
data = ""
while "\n" not in data:
data += conn.recv(2048)
try:
new_key = AesKey.Read(data.strip())
found = False
for key in self.server.key_list:
try:
new_key.Decrypt(key.Encrypt("foo"))
found = True
break
except:
pass
if not found:
vv("adding new key to the key list")
self.server.key_list.append(new_key)
conn.sendall("OK\n")
else:
vv("key already exists in the key list, ignoring")
conn.sendall("EXISTS\n")
# update the last event time so the server doesn't
# shutdown sooner than expected for new cliets
try:
self.server.last_event_lock.acquire()
self.server.last_event = datetime.now()
finally:
self.server.last_event_lock.release()
except Exception, e:
vv("key loaded locally was invalid, ignoring (%s)" % e)
conn.sendall("BADKEY\n")
finally:
try:
conn.close()
except:
pass
except:
pass
def terminate(self):
self.terminated = True
self.s.shutdown(socket.SHUT_RDWR)
self.s.close()
class ThreadWithReturnValue(Thread):
def __init__(self, group=None, target=None, name=None, args=(), kwargs={}, Verbose=None):
Thread.__init__(self, group, target, name, args, kwargs, Verbose)
self._return = None
def run(self):
if self._Thread__target is not None:
self._return = self._Thread__target(*self._Thread__args,
**self._Thread__kwargs)
def join(self,timeout=None):
Thread.join(self, timeout=timeout)
return self._return
class ThreadedTCPServer(SocketServer.ThreadingTCPServer):
key_list = []
last_event = datetime.now()
last_event_lock = Lock()
def __init__(self, server_address, RequestHandlerClass, module, password, timeout, use_ipv6=False):
self.module = module
self.key_list.append(AesKey.Read(password))
self.allow_reuse_address = True
self.timeout = timeout
if use_ipv6:
self.address_family = socket.AF_INET6
if self.module.params.get('multi_key', False):
vv("starting thread to handle local connections for multiple keys")
self.local_thread = LocalSocketThread(kwargs=dict(server=self))
self.local_thread.start()
SocketServer.ThreadingTCPServer.__init__(self, server_address, RequestHandlerClass)
def shutdown(self):
self.local_thread.terminate()
self.running = False
SocketServer.ThreadingTCPServer.shutdown(self)
class ThreadedTCPRequestHandler(SocketServer.BaseRequestHandler):
# the key to use for this connection
active_key = None
def send_data(self, data):
try:
self.server.last_event_lock.acquire()
self.server.last_event = datetime.now()
finally:
self.server.last_event_lock.release()
packed_len = struct.pack('!Q', len(data))
return self.request.sendall(packed_len + data)
def recv_data(self):
header_len = 8 # size of a packed unsigned long long
data = ""
vvvv("in recv_data(), waiting for the header")
while len(data) < header_len:
try:
d = self.request.recv(header_len - len(data))
if not d:
vvv("received nothing, bailing out")
return None
data += d
except:
# probably got a connection reset
vvvv("exception received while waiting for recv(), returning None")
return None
vvvv("in recv_data(), got the header, unpacking")
data_len = struct.unpack('!Q',data[:header_len])[0]
data = data[header_len:]
vvvv("data received so far (expecting %d): %d" % (data_len,len(data)))
while len(data) < data_len:
try:
d = self.request.recv(data_len - len(data))
if not d:
vvv("received nothing, bailing out")
return None
data += d
vvvv("data received so far (expecting %d): %d" % (data_len,len(data)))
except:
# probably got a connection reset
vvvv("exception received while waiting for recv(), returning None")
return None
vvvv("received all of the data, returning")
try:
self.server.last_event_lock.acquire()
self.server.last_event = datetime.now()
finally:
self.server.last_event_lock.release()
return data
def handle(self):
try:
while True:
vvvv("waiting for data")
data = self.recv_data()
if not data:
vvvv("received nothing back from recv_data(), breaking out")
break
vvvv("got data, decrypting")
if not self.active_key:
for key in self.server.key_list:
try:
data = key.Decrypt(data)
self.active_key = key
break
except:
pass
else:
vv("bad decrypt, exiting the connection handler")
return
else:
try:
data = self.active_key.Decrypt(data)
except:
vv("bad decrypt, exiting the connection handler")
return
vvvv("decryption done, loading json from the data")
data = json.loads(data)
mode = data['mode']
response = {}
last_pong = datetime.now()
if mode == 'command':
vvvv("received a command request, running it")
twrv = ThreadWithReturnValue(target=self.command, args=(data,))
twrv.start()
response = None
while twrv.is_alive():
if (datetime.now() - last_pong).seconds >= 15:
last_pong = datetime.now()
vvvv("command still running, sending keepalive packet")
data2 = json.dumps(dict(pong=True))
data2 = self.active_key.Encrypt(data2)
self.send_data(data2)
time.sleep(0.1)
response = twrv._return
vvvv("thread is done, response from join was %s" % response)
elif mode == 'put':
vvvv("received a put request, putting it")
response = self.put(data)
elif mode == 'fetch':
vvvv("received a fetch request, getting it")
response = self.fetch(data)
elif mode == 'validate_user':
vvvv("received a request to validate the user id")
response = self.validate_user(data)
vvvv("response result is %s" % str(response))
json_response = json.dumps(response)
vvvv("dumped json is %s" % json_response)
data2 = self.active_key.Encrypt(json_response)
vvvv("sending the response back to the controller")
self.send_data(data2)
vvvv("done sending the response")
if mode == 'validate_user' and response.get('rc') == 1:
vvvv("detected a uid mismatch, shutting down")
self.server.shutdown()
except:
tb = traceback.format_exc()
log("encountered an unhandled exception in the handle() function")
log("error was:\n%s" % tb)
if self.active_key:
data2 = json.dumps(dict(rc=1, failed=True, msg="unhandled error in the handle() function"))
data2 = self.active_key.Encrypt(data2)
self.send_data(data2)
def validate_user(self, data):
if 'username' not in data:
return dict(failed=True, msg='No username specified')
vvvv("validating we're running as %s" % data['username'])
# get the current uid
c_uid = os.getuid()
try:
# the target uid
t_uid = pwd.getpwnam(data['username']).pw_uid
except:
vvvv("could not find user %s" % data['username'])
return dict(failed=True, msg='could not find user %s' % data['username'])
# and return rc=0 for success, rc=1 for failure
if c_uid == t_uid:
return dict(rc=0)
else:
return dict(rc=1)
def command(self, data):
if 'cmd' not in data:
return dict(failed=True, msg='internal error: cmd is required')
if 'tmp_path' not in data:
return dict(failed=True, msg='internal error: tmp_path is required')
vvvv("executing: %s" % data['cmd'])
use_unsafe_shell = False
executable = data.get('executable')
if executable:
use_unsafe_shell = True
rc, stdout, stderr = self.server.module.run_command(data['cmd'], executable=executable, use_unsafe_shell=use_unsafe_shell, close_fds=True)
if stdout is None:
stdout = ''
if stderr is None:
stderr = ''
vvvv("got stdout: %s" % stdout)
vvvv("got stderr: %s" % stderr)
return dict(rc=rc, stdout=stdout, stderr=stderr)
def fetch(self, data):
if 'in_path' not in data:
return dict(failed=True, msg='internal error: in_path is required')
try:
fd = file(data['in_path'], 'rb')
fstat = os.stat(data['in_path'])
vvv("FETCH file is %d bytes" % fstat.st_size)
while fd.tell() < fstat.st_size:
data = fd.read(CHUNK_SIZE)
last = False
if fd.tell() >= fstat.st_size:
last = True
data = dict(data=base64.b64encode(data), last=last)
data = json.dumps(data)
data = self.active_key.Encrypt(data)
if self.send_data(data):
return dict(failed=True, stderr="failed to send data")
response = self.recv_data()
if not response:
log("failed to get a response, aborting")
return dict(failed=True, stderr="Failed to get a response from %s" % self.host)
response = self.active_key.Decrypt(response)
response = json.loads(response)
if response.get('failed',False):
log("got a failed response from the master")
return dict(failed=True, stderr="Master reported failure, aborting transfer")
except Exception, e:
fd.close()
tb = traceback.format_exc()
log("failed to fetch the file: %s" % tb)
return dict(failed=True, stderr="Could not fetch the file: %s" % str(e))
fd.close()
return dict()
def put(self, data):
if 'data' not in data:
return dict(failed=True, msg='internal error: data is required')
if 'out_path' not in data:
return dict(failed=True, msg='internal error: out_path is required')
final_path = None
if 'user' in data and data.get('user') != getpass.getuser():
vvv("the target user doesn't match this user, we'll move the file into place via sudo")
tmp_path = os.path.expanduser('~/.ansible/tmp/')
if not os.path.exists(tmp_path):
try:
os.makedirs(tmp_path, 0700)
except:
return dict(failed=True, msg='could not create a temporary directory at %s' % tmp_path)
(fd,out_path) = tempfile.mkstemp(prefix='ansible.', dir=tmp_path)
out_fd = os.fdopen(fd, 'w', 0)
final_path = data['out_path']
else:
out_path = data['out_path']
out_fd = open(out_path, 'w')
try:
bytes=0
while True:
out = base64.b64decode(data['data'])
bytes += len(out)
out_fd.write(out)
response = json.dumps(dict())
response = self.active_key.Encrypt(response)
self.send_data(response)
if data['last']:
break
data = self.recv_data()
if not data:
raise ""
data = self.active_key.Decrypt(data)
data = json.loads(data)
except:
out_fd.close()
tb = traceback.format_exc()
log("failed to put the file: %s" % tb)
return dict(failed=True, stdout="Could not write the file")
vvvv("wrote %d bytes" % bytes)
out_fd.close()
if final_path:
vvv("moving %s to %s" % (out_path, final_path))
self.server.module.atomic_move(out_path, final_path)
return dict()
def daemonize(module, password, port, timeout, minutes, use_ipv6, pid_file):
try:
daemonize_self(module, password, port, minutes, pid_file)
def timer_handler(signum, _):
try:
server.last_event_lock.acquire()
td = datetime.now() - server.last_event
# older python timedelta objects don't have total_seconds(),
# so we use the formula from the docs to calculate it
total_seconds = (td.microseconds + (td.seconds + td.days * 24 * 3600) * 10**6) / 10**6
if total_seconds >= minutes * 60:
log("server has been idle longer than the timeout, shutting down")
server.running = False
server.shutdown()
else:
# reschedule the check
vvvv("daemon idle for %d seconds (timeout=%d)" % (total_seconds,minutes*60))
signal.alarm(30)
except:
pass
finally:
server.last_event_lock.release()
signal.signal(signal.SIGALRM, timer_handler)
signal.alarm(30)
tries = 5
while tries > 0:
try:
if use_ipv6:
address = ("::", port)
else:
address = ("0.0.0.0", port)
server = ThreadedTCPServer(address, ThreadedTCPRequestHandler, module, password, timeout, use_ipv6=use_ipv6)
server.allow_reuse_address = True
break
except Exception, e:
vv("Failed to create the TCP server (tries left = %d) (error: %s) " % (tries,e))
tries -= 1
time.sleep(0.2)
if tries == 0:
vv("Maximum number of attempts to create the TCP server reached, bailing out")
raise Exception("max # of attempts to serve reached")
# run the server in a separate thread to make signal handling work
server_thread = Thread(target=server.serve_forever, kwargs=dict(poll_interval=0.1))
server_thread.start()
server.running = True
v("serving!")
while server.running:
time.sleep(1)
# wait for the thread to exit fully
server_thread.join()
v("server thread terminated, exiting!")
sys.exit(0)
except Exception, e:
tb = traceback.format_exc()
log("exception caught, exiting accelerated mode: %s\n%s" % (e, tb))
sys.exit(0)
def main():
global DEBUG_LEVEL
module = AnsibleModule(
argument_spec = dict(
port=dict(required=False, default=5099),
ipv6=dict(required=False, default=False, type='bool'),
multi_key=dict(required=False, default=False, type='bool'),
timeout=dict(required=False, default=300),
password=dict(required=True),
minutes=dict(required=False, default=30),
debug=dict(required=False, default=0, type='int')
),
supports_check_mode=True
)
password = base64.b64decode(module.params['password'])
port = int(module.params['port'])
timeout = int(module.params['timeout'])
minutes = int(module.params['minutes'])
debug = int(module.params['debug'])
ipv6 = module.params['ipv6']
multi_key = module.params['multi_key']
if not HAS_KEYCZAR:
module.fail_json(msg="keyczar is not installed (on the remote side)")
DEBUG_LEVEL=debug
pid_file = get_pid_location(module)
daemon_pid = None
daemon_running = False
if os.path.exists(pid_file):
try:
daemon_pid = int(open(pid_file).read())
try:
# sending signal 0 doesn't do anything to the
# process, other than tell the calling program
# whether other signals can be sent
os.kill(daemon_pid, 0)
except OSError, e:
if e.errno == errno.EPERM:
# no permissions means the pid is probably
# running, but as a different user, so fail
module.fail_json(msg="the accelerate daemon appears to be running as a different user that this user cannot access (pid=%d)" % daemon_pid)
else:
daemon_running = True
except ValueError:
# invalid pid file, unlink it - otherwise we don't care
try:
os.unlink(pid_file)
except:
pass
if daemon_running and multi_key:
# try to connect to the file socket for the daemon if it exists
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
try:
s.connect(SOCKET_FILE)
s.sendall(password + '\n')
data = ""
while '\n' not in data:
data += s.recv(2048)
res = data.strip()
except:
module.fail_json(msg="failed to connect to the local socket file")
finally:
try:
s.close()
except:
pass
if res in ("OK", "EXISTS"):
module.exit_json(msg="transferred new key to the existing daemon")
else:
module.fail_json(msg="could not transfer new key: %s" % data.strip())
else:
# try to start up the daemon
daemonize(module, password, port, timeout, minutes, ipv6, pid_file)
main()
| gpl-3.0 |
mediawiki-utilities/python-mwcites | mwcites/extractors/tests/test_arxiv.py | 3 | 1419 | import pprint
from nose.tools import eq_
from .. import arxiv
from ...identifier import Identifier
INPUT_TEXT = """
This is a doi randomly placed in the text 10.0000/m1
Here's a typo that might be construed as a doi 10.60 people were there.
{{cite|...|arxiv=0706.0001v1|pmid=10559875}}
<ref>Halfaker, A., Geiger, R. S., Morgan, J. T., & Riedl, J. (2012).
The rise and decline of an open collaboration system: How Wikipedia’s
reaction to popularity is causing its decline.
American Behavioral Scientist,
0002764212469365 arxiv:0706.0002v1</ref>. Hats pants and banana
[http://arxiv.org/0706.0003]
[http://arxiv.org/abs/0706.0004v1]
[https://arxiv.org/abs/0706.0005v1]
[https://arxiv.org/abs/math.GT/0309001]
[https://arxiv.org/abs/-math.gs/0309002]
{{cite|...|arxiv=foobar.hats/0101003|issue=1656}}
http://www.google.com/sky/#latitude=3.362&longitude=160.1238441&zoom=
10.2387/234310.2347/39423
<!--
10.2387/234310.2347/39423-->
"""
EXPECTED = [
Identifier('arxiv', "0706.0001"),
Identifier('arxiv', "0706.0002"),
Identifier('arxiv', "0706.0003"),
Identifier('arxiv', "0706.0004"),
Identifier('arxiv', "0706.0005"),
Identifier('arxiv', "math.gt/0309001"),
Identifier('arxiv', "math.gs/0309002"),
Identifier('arxiv', "foobar.hats/0101003")
]
def test_extract():
ids = list(arxiv.extract(INPUT_TEXT))
pprint.pprint(ids)
pprint.pprint(EXPECTED)
eq_(ids, EXPECTED)
| mit |
crosswalk-project/crosswalk-test-suite | webapi/tct-csp-w3c-tests/csp-py/csp_default-src_none_script-manual.py | 30 | 2381 | def main(request, response):
response.headers.set("Content-Security-Policy", "default-src 'none'")
response.headers.set("X-Content-Security-Policy", "default-src 'none'")
response.headers.set("X-WebKit-CSP", "default-src 'none'")
return """<!DOCTYPE html>
<!--
Copyright (c) 2013 Intel Corporation.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of works must retain the original copyright notice, this list
of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the original copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of Intel Corporation nor the names of its contributors
may be used to endorse or promote products derived from this work without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Authors:
Hao, Yunfei <yunfeix.hao@intel.com>
-->
<html>
<head>
<title>CSP Test: csp_default-src_none_script</title>
<link rel="author" title="Intel" href="http://www.intel.com"/>
<link rel="help" href="http://www.w3.org/TR/2012/CR-CSP-20121115/#default-src"/>
<meta name="flags" content=""/>
<meta name="assert" content="default-src 'none'"/>
<meta charset="utf-8"/>
<script>
function show_fail() {
document.getElementById("log").innerHTML = "FAIL";
}
</script>
</head>
<body onload="show_fail()">
<p>Test passes if text "PASS" appears below.</p>
<div id="log">PASS</div>
</body>
</html> """
| bsd-3-clause |
lzw120/django | build/lib/django/db/models/sql/subqueries.py | 87 | 8259 | """
Query subclasses which provide extra functionality beyond simple data retrieval.
"""
from django.core.exceptions import FieldError
from django.db.models.fields import DateField, FieldDoesNotExist
from django.db.models.sql.constants import *
from django.db.models.sql.datastructures import Date
from django.db.models.sql.query import Query
from django.db.models.sql.where import AND, Constraint
from django.utils.functional import Promise
from django.utils.encoding import force_unicode
__all__ = ['DeleteQuery', 'UpdateQuery', 'InsertQuery', 'DateQuery',
'AggregateQuery']
class DeleteQuery(Query):
"""
Delete queries are done through this class, since they are more constrained
than general queries.
"""
compiler = 'SQLDeleteCompiler'
def do_query(self, table, where, using):
self.tables = [table]
self.where = where
self.get_compiler(using).execute_sql(None)
def delete_batch(self, pk_list, using, field=None):
"""
Set up and execute delete queries for all the objects in pk_list.
More than one physical query may be executed if there are a
lot of values in pk_list.
"""
if not field:
field = self.model._meta.pk
for offset in range(0, len(pk_list), GET_ITERATOR_CHUNK_SIZE):
where = self.where_class()
where.add((Constraint(None, field.column, field), 'in',
pk_list[offset:offset + GET_ITERATOR_CHUNK_SIZE]), AND)
self.do_query(self.model._meta.db_table, where, using=using)
class UpdateQuery(Query):
"""
Represents an "update" SQL query.
"""
compiler = 'SQLUpdateCompiler'
def __init__(self, *args, **kwargs):
super(UpdateQuery, self).__init__(*args, **kwargs)
self._setup_query()
def _setup_query(self):
"""
Runs on initialization and after cloning. Any attributes that would
normally be set in __init__ should go in here, instead, so that they
are also set up after a clone() call.
"""
self.values = []
self.related_ids = None
if not hasattr(self, 'related_updates'):
self.related_updates = {}
def clone(self, klass=None, **kwargs):
return super(UpdateQuery, self).clone(klass,
related_updates=self.related_updates.copy(), **kwargs)
def update_batch(self, pk_list, values, using):
pk_field = self.model._meta.pk
self.add_update_values(values)
for offset in range(0, len(pk_list), GET_ITERATOR_CHUNK_SIZE):
self.where = self.where_class()
self.where.add((Constraint(None, pk_field.column, pk_field), 'in',
pk_list[offset:offset + GET_ITERATOR_CHUNK_SIZE]),
AND)
self.get_compiler(using).execute_sql(None)
def add_update_values(self, values):
"""
Convert a dictionary of field name to value mappings into an update
query. This is the entry point for the public update() method on
querysets.
"""
values_seq = []
for name, val in values.iteritems():
field, model, direct, m2m = self.model._meta.get_field_by_name(name)
if not direct or m2m:
raise FieldError('Cannot update model field %r (only non-relations and foreign keys permitted).' % field)
if model:
self.add_related_update(model, field, val)
continue
values_seq.append((field, model, val))
return self.add_update_fields(values_seq)
def add_update_fields(self, values_seq):
"""
Turn a sequence of (field, model, value) triples into an update query.
Used by add_update_values() as well as the "fast" update path when
saving models.
"""
# Check that no Promise object passes to the query. Refs #10498.
values_seq = [(value[0], value[1], force_unicode(value[2]))
if isinstance(value[2], Promise) else value
for value in values_seq]
self.values.extend(values_seq)
def add_related_update(self, model, field, value):
"""
Adds (name, value) to an update query for an ancestor model.
Updates are coalesced so that we only run one update query per ancestor.
"""
try:
self.related_updates[model].append((field, None, value))
except KeyError:
self.related_updates[model] = [(field, None, value)]
def get_related_updates(self):
"""
Returns a list of query objects: one for each update required to an
ancestor model. Each query will have the same filtering conditions as
the current query but will only update a single table.
"""
if not self.related_updates:
return []
result = []
for model, values in self.related_updates.iteritems():
query = UpdateQuery(model)
query.values = values
if self.related_ids is not None:
query.add_filter(('pk__in', self.related_ids))
result.append(query)
return result
class InsertQuery(Query):
compiler = 'SQLInsertCompiler'
def __init__(self, *args, **kwargs):
super(InsertQuery, self).__init__(*args, **kwargs)
self.fields = []
self.objs = []
def clone(self, klass=None, **kwargs):
extras = {
'fields': self.fields[:],
'objs': self.objs[:],
'raw': self.raw,
}
extras.update(kwargs)
return super(InsertQuery, self).clone(klass, **extras)
def insert_values(self, fields, objs, raw=False):
"""
Set up the insert query from the 'insert_values' dictionary. The
dictionary gives the model field names and their target values.
If 'raw_values' is True, the values in the 'insert_values' dictionary
are inserted directly into the query, rather than passed as SQL
parameters. This provides a way to insert NULL and DEFAULT keywords
into the query, for example.
"""
self.fields = fields
# Check that no Promise object reaches the DB. Refs #10498.
for field in fields:
for obj in objs:
value = getattr(obj, field.attname)
if isinstance(value, Promise):
setattr(obj, field.attname, force_unicode(value))
self.objs = objs
self.raw = raw
class DateQuery(Query):
"""
A DateQuery is a normal query, except that it specifically selects a single
date field. This requires some special handling when converting the results
back to Python objects, so we put it in a separate class.
"""
compiler = 'SQLDateCompiler'
def add_date_select(self, field_name, lookup_type, order='ASC'):
"""
Converts the query into a date extraction query.
"""
try:
result = self.setup_joins(
field_name.split(LOOKUP_SEP),
self.get_meta(),
self.get_initial_alias(),
False
)
except FieldError:
raise FieldDoesNotExist("%s has no field named '%s'" % (
self.model._meta.object_name, field_name
))
field = result[0]
assert isinstance(field, DateField), "%r isn't a DateField." \
% field.name
alias = result[3][-1]
select = Date((alias, field.column), lookup_type)
self.select = [select]
self.select_fields = [None]
self.select_related = False # See #7097.
self.set_extra_mask([])
self.distinct = True
self.order_by = order == 'ASC' and [1] or [-1]
if field.null:
self.add_filter(("%s__isnull" % field_name, False))
class AggregateQuery(Query):
"""
An AggregateQuery takes another query as a parameter to the FROM
clause and only selects the elements in the provided list.
"""
compiler = 'SQLAggregateCompiler'
def add_subquery(self, query, using):
self.subquery, self.sub_params = query.get_compiler(using).as_sql(with_col_aliases=True)
| bsd-3-clause |
crs4/omero.biobank | test/kb/test_individual.py | 1 | 2096 | # BEGIN_COPYRIGHT
# END_COPYRIGHT
import os, unittest, logging
logging.basicConfig(level=logging.ERROR)
from bl.vl.kb import KnowledgeBase as KB
from kb_object_creator import KBObjectCreator
OME_HOST = os.getenv("OME_HOST", "localhost")
OME_USER = os.getenv("OME_USER", "root")
OME_PASS = os.getenv("OME_PASS", "romeo")
class TestKB(KBObjectCreator):
def __init__(self, name):
super(TestKB, self).__init__(name)
self.kill_list = []
def setUp(self):
self.kb = KB(driver='omero')(OME_HOST, OME_USER, OME_PASS)
def tearDown(self):
self.kill_list.reverse()
for x in self.kill_list:
self.kb.delete(x)
self.kill_list = []
def check_object(self, o, conf, otype):
try:
self.assertTrue(isinstance(o, otype))
for k in conf.keys():
v = conf[k]
# FIXME this is omero specific...
if hasattr(v, 'ome_obj'):
self.assertEqual(getattr(o, k).id, v.id)
self.assertEqual(type(getattr(o, k)), type(v))
elif hasattr(v, '_id'):
self.assertEqual(getattr(o, k)._id, v._id)
else:
self.assertEqual(getattr(o, k), v)
except:
pass
def test_individual(self):
conf, i = self.create_individual()
self.kill_list.append(i.save())
self.check_object(i, conf, self.kb.Individual)
def test_enrollment(self):
conf, e = self.create_enrollment()
self.kill_list.append(e.save())
self.check_object(e, conf, self.kb.Enrollment)
def test_enrollment_ops(self):
conf, e = self.create_enrollment()
e.save()
study = e.study
xe = self.kb.get_enrollment(study, conf['studyCode'])
self.assertTrue(not xe is None)
self.assertEqual(xe.id, e.id)
self.kb.delete(e)
self.assertEqual(self.kb.get_enrollment(study, conf['studyCode']), None)
def suite():
suite = unittest.TestSuite()
suite.addTest(TestKB('test_individual'))
suite.addTest(TestKB('test_enrollment'))
suite.addTest(TestKB('test_enrollment_ops'))
return suite
if __name__ == '__main__':
runner = unittest.TextTestRunner(verbosity=2)
runner.run((suite()))
| gpl-2.0 |
adityacs/ansible | lib/ansible/plugins/action/pause.py | 79 | 7055 | # Copyright 2012, Tim Bielawa <tbielawa@redhat.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import datetime
import signal
import termios
import time
import tty
from os import isatty
from ansible.errors import AnsibleError
from ansible.plugins.action import ActionBase
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
class AnsibleTimeoutExceeded(Exception):
pass
def timeout_handler(signum, frame):
raise AnsibleTimeoutExceeded
class ActionModule(ActionBase):
''' pauses execution for a length or time, or until input is received '''
PAUSE_TYPES = ['seconds', 'minutes', 'prompt', '']
BYPASS_HOST_LOOP = True
def run(self, tmp=None, task_vars=None):
''' run the pause action module '''
if task_vars is None:
task_vars = dict()
result = super(ActionModule, self).run(tmp, task_vars)
duration_unit = 'minutes'
prompt = None
seconds = None
result.update(dict(
changed = False,
rc = 0,
stderr = '',
stdout = '',
start = None,
stop = None,
delta = None,
))
# Is 'args' empty, then this is the default prompted pause
if self._task.args is None or len(self._task.args.keys()) == 0:
prompt = "[%s]\nPress enter to continue:" % self._task.get_name().strip()
# Are 'minutes' or 'seconds' keys that exist in 'args'?
elif 'minutes' in self._task.args or 'seconds' in self._task.args:
try:
if 'minutes' in self._task.args:
# The time() command operates in seconds so we need to
# recalculate for minutes=X values.
seconds = int(self._task.args['minutes']) * 60
else:
seconds = int(self._task.args['seconds'])
duration_unit = 'seconds'
except ValueError as e:
result['failed'] = True
result['msg'] = "non-integer value given for prompt duration:\n%s" % str(e)
return result
# Is 'prompt' a key in 'args'?
elif 'prompt' in self._task.args:
prompt = "[%s]\n%s:" % (self._task.get_name().strip(), self._task.args['prompt'])
else:
# I have no idea what you're trying to do. But it's so wrong.
result['failed'] = True
result['msg'] = "invalid pause type given. must be one of: %s" % ", ".join(self.PAUSE_TYPES)
return result
########################################################################
# Begin the hard work!
start = time.time()
result['start'] = str(datetime.datetime.now())
result['user_input'] = ''
fd = None
old_settings = None
try:
if seconds is not None:
if seconds < 1:
seconds = 1
# setup the alarm handler
signal.signal(signal.SIGALRM, timeout_handler)
signal.alarm(seconds)
# show the prompt
display.display("Pausing for %d seconds" % seconds)
display.display("(ctrl+C then 'C' = continue early, ctrl+C then 'A' = abort)\r"),
else:
display.display(prompt)
# save the attributes on the existing (duped) stdin so
# that we can restore them later after we set raw mode
fd = None
try:
fd = self._connection._new_stdin.fileno()
except ValueError:
# someone is using a closed file descriptor as stdin
pass
if fd is not None:
if isatty(fd):
old_settings = termios.tcgetattr(fd)
tty.setraw(fd)
# flush the buffer to make sure no previous key presses
# are read in below
termios.tcflush(self._connection._new_stdin, termios.TCIFLUSH)
while True:
try:
if fd is not None:
key_pressed = self._connection._new_stdin.read(1)
if key_pressed == '\x03':
raise KeyboardInterrupt
if not seconds:
if fd is None or not isatty(fd):
display.warning("Not waiting from prompt as stdin is not interactive")
break
# read key presses and act accordingly
if key_pressed == '\r':
break
else:
result['user_input'] += key_pressed
except KeyboardInterrupt:
if seconds is not None:
signal.alarm(0)
display.display("Press 'C' to continue the play or 'A' to abort \r"),
if self._c_or_a():
break
else:
raise AnsibleError('user requested abort!')
except AnsibleTimeoutExceeded:
# this is the exception we expect when the alarm signal
# fires, so we simply ignore it to move into the cleanup
pass
finally:
# cleanup and save some information
# restore the old settings for the duped stdin fd
if not(None in (fd, old_settings)) and isatty(fd):
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
duration = time.time() - start
result['stop'] = str(datetime.datetime.now())
result['delta'] = int(duration)
if duration_unit == 'minutes':
duration = round(duration / 60.0, 2)
else:
duration = round(duration, 2)
result['stdout'] = "Paused for %s %s" % (duration, duration_unit)
return result
def _c_or_a(self):
while True:
key_pressed = self._connection._new_stdin.read(1)
if key_pressed.lower() == 'a':
return False
elif key_pressed.lower() == 'c':
return True
| gpl-3.0 |
wger-project/wger | wger/exercises/tests/test_categories.py | 1 | 3817 | # This file is part of wger Workout Manager.
#
# wger Workout Manager is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# wger Workout Manager is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# Django
from django.core.cache import cache
from django.core.cache.utils import make_template_fragment_key
from django.urls import reverse
# wger
from wger.core.tests import api_base_test
from wger.core.tests.base_testcase import (
WgerAccessTestCase,
WgerAddTestCase,
WgerDeleteTestCase,
WgerEditTestCase,
WgerTestCase,
)
from wger.exercises.models import ExerciseCategory
class ExerciseCategoryRepresentationTestCase(WgerTestCase):
"""
Test the representation of a model
"""
def test_representation(self):
"""
Test that the representation of an object is correct
"""
self.assertEqual("{0}".format(ExerciseCategory.objects.get(pk=1)), 'Category')
class CategoryOverviewTestCase(WgerAccessTestCase):
"""
Test that only admins see the edit links
"""
url = 'exercise:category:list'
anonymous_fail = True
user_success = 'admin'
user_fail = (
'manager1',
'manager2'
'general_manager1',
'manager3',
'manager4',
'test',
'member1',
'member2',
'member3',
'member4',
'member5',
)
class DeleteExerciseCategoryTestCase(WgerDeleteTestCase):
"""
Exercise category delete test case
"""
object_class = ExerciseCategory
url = 'exercise:category:delete'
pk = 4
user_success = 'admin'
user_fail = 'test'
class EditExerciseCategoryTestCase(WgerEditTestCase):
"""
Tests editing an exercise category
"""
object_class = ExerciseCategory
url = 'exercise:category:edit'
pk = 3
data = {'name': 'A different name'}
class AddExerciseCategoryTestCase(WgerAddTestCase):
"""
Tests adding an exercise category
"""
object_class = ExerciseCategory
url = 'exercise:category:add'
data = {'name': 'A new category'}
class ExerciseCategoryCacheTestCase(WgerTestCase):
"""
Cache test case
"""
def test_overview_cache_update(self):
"""
Test that the template cache for the overview is correctly reseted when
performing certain operations
"""
self.client.get(reverse('exercise:exercise:overview'))
self.client.get(reverse('exercise:exercise:view', kwargs={'id': 2}))
old_exercise_overview = cache.get(make_template_fragment_key('exercise-overview', [2]))
category = ExerciseCategory.objects.get(pk=2)
category.name = 'Cool category'
category.save()
self.assertFalse(cache.get(make_template_fragment_key('exercise-overview', [2])))
self.client.get(reverse('exercise:exercise:overview'))
self.client.get(reverse('exercise:muscle:overview'))
self.client.get(reverse('exercise:exercise:view', kwargs={'id': 2}))
new_exercise_overview = cache.get(make_template_fragment_key('exercise-overview', [2]))
self.assertNotEqual(old_exercise_overview, new_exercise_overview)
class ExerciseCategoryApiTestCase(api_base_test.ApiBaseResourceTestCase):
"""
Tests the exercise category overview resource
"""
pk = 2
resource = ExerciseCategory
private_resource = False
| agpl-3.0 |
rgayon/plaso | plaso/winnt/known_folder_ids.py | 1 | 17463 | # -*- coding: utf-8 -*-
"""This file contains the Windows NT Known Folder identifier definitions."""
from __future__ import unicode_literals
# For now ignore the line too long errors.
# pylint: disable=line-too-long
# For now copied from:
# https://code.google.com/p/libfwsi/wiki/KnownFolderIdentifiers
# TODO: store these in a database or equiv.
DESCRIPTIONS = {
'008ca0b1-55b4-4c56-b8a8-4de4b299d3be': 'Account Pictures',
'00bcfc5a-ed94-4e48-96a1-3f6217f21990': 'Roaming Tiles',
'0139d44e-6afe-49f2-8690-3dafcae6ffb8': '(Common) Programs',
'0482af6c-08f1-4c34-8c90-e17ec98b1e17': 'Public Account Pictures',
'054fae61-4dd8-4787-80b6-090220c4b700': 'Game Explorer (Game Tasks)',
'0762d272-c50a-4bb0-a382-697dcd729b80': 'Users (User Profiles)',
'0ac0837c-bbf8-452a-850d-79d08e667ca7': 'Computer (My Computer)',
'0d4c3db6-03a3-462f-a0e6-08924c41b5d4': 'History',
'0f214138-b1d3-4a90-bba9-27cbc0c5389a': 'Sync Setup',
'15ca69b3-30ee-49c1-ace1-6b5ec372afb5': 'Sample Playlists',
'1777f761-68ad-4d8a-87bd-30b759fa33dd': 'Favorites',
'18989b1d-99b5-455b-841c-ab7c74e4ddfc': 'Videos (My Video)',
'190337d1-b8ca-4121-a639-6d472d16972a': 'Search Results (Search Home)',
'1a6fdba2-f42d-4358-a798-b74d745926c5': 'Recorded TV',
'1ac14e77-02e7-4e5d-b744-2eb1ae5198b7': 'System32 (System)',
'1b3ea5dc-b587-4786-b4ef-bd1dc332aeae': 'Libraries',
'1e87508d-89c2-42f0-8a7e-645a0f50ca58': 'Applications',
'2112ab0a-c86a-4ffe-a368-0de96e47012e': 'Music',
'2400183a-6185-49fb-a2d8-4a392a602ba3': 'Public Videos (Common Video)',
'24d89e24-2f19-4534-9dde-6a6671fbb8fe': 'One Drive Documents',
'289a9a43-be44-4057-a41b-587a76d7e7f9': 'Sync Results',
'2a00375e-224c-49de-b8d1-440df7ef3ddc': 'Localized Resources (Directory)',
'2b0f765d-c0e9-4171-908e-08a611b84ff6': 'Cookies',
'2c36c0aa-5812-4b87-bfd0-4cd0dfb19b39': 'Original Images',
'3214fab5-9757-4298-bb61-92a9deaa44ff': 'Public Music (Common Music)',
'339719b5-8c47-4894-94c2-d8f77add44a6': 'One Drive Pictures',
'33e28130-4e1e-4676-835a-98395c3bc3bb': 'Pictures (My Pictures)',
'352481e8-33be-4251-ba85-6007caedcf9d': 'Internet Cache (Temporary Internet Files)',
'374de290-123f-4565-9164-39c4925e467b': 'Downloads',
'3d644c9b-1fb8-4f30-9b45-f670235f79c0': 'Public Downloads (Common Downloads)',
'3eb685db-65f9-4cf6-a03a-e3ef65729f3d': 'Roaming Application Data (Roaming)',
'43668bf8-c14e-49b2-97c9-747784d784b7': 'Sync Center (Sync Manager)',
'48daf80b-e6cf-4f4e-b800-0e69d84ee384': 'Libraries',
'491e922f-5643-4af4-a7eb-4e7a138d8174': 'Videos',
'4bd8d571-6d19-48d3-be97-422220080e43': 'Music (My Music)',
'4bfefb45-347d-4006-a5be-ac0cb0567192': 'Conflicts',
'4c5c32ff-bb9d-43b0-b5b4-2d72e54eaaa4': 'Saved Games',
'4d9f7874-4e0c-4904-967b-40b0d20c3e4b': 'Internet (The Internet)',
'52528a6b-b9e3-4add-b60d-588c2dba842d': 'Homegroup',
'52a4f021-7b75-48a9-9f6b-4b87a210bc8f': 'Quick Launch',
'56784854-c6cb-462b-8169-88e350acb882': 'Contacts',
'5b3749ad-b49f-49c1-83eb-15370fbd4882': 'Tree Properties',
'5cd7aee2-2219-4a67-b85d-6c9ce15660cb': 'Programs',
'5ce4a5e9-e4eb-479d-b89f-130c02886155': 'Device Metadata Store',
'5e6c858f-0e22-4760-9afe-ea3317b67173': 'Profile (User\'s name)',
'625b53c3-ab48-4ec1-ba1f-a1ef4146fc19': 'Start Menu',
'62ab5d82-fdc1-4dc3-a9dd-070d1d495d97': 'Program Data',
'6365d5a7-0f0d-45e5-87f6-0da56b6a4f7d': 'Common Files (x64)',
'69d2cf90-fc33-4fb7-9a0c-ebb0f0fcb43c': 'Slide Shows (Photo Albums)',
'6d809377-6af0-444b-8957-a3773f02200e': 'Program Files (x64)',
'6f0cd92b-2e97-45d1-88ff-b0d186b8dedd': 'Network Connections',
'724ef170-a42d-4fef-9f26-b60e846fba4f': 'Administrative Tools',
'767e6811-49cb-4273-87c2-20f355e1085b': 'One Drive Camera Roll',
'76fc4e2d-d6ad-4519-a663-37bd56068185': 'Printers',
'7b0db17d-9cd2-4a93-9733-46cc89022e7c': 'Documents',
'7b396e54-9ec5-4300-be0a-2482ebae1a26': 'Default Gadgets (Sidebar Default Parts)',
'7c5a40ef-a0fb-4bfc-874a-c0f2e0b9fa8e': 'Program Files (x86)',
'7d1d3a04-debb-4115-95cf-2f29da2920da': 'Saved Searches (Searches)',
'7e636bfe-dfa9-4d5e-b456-d7b39851d8a9': 'Templates',
'82a5ea35-d9cd-47c5-9629-e15d2f714e6e': '(Common) Startup',
'82a74aeb-aeb4-465c-a014-d097ee346d63': 'Control Panel',
'859ead94-2e85-48ad-a71a-0969cb56a6cd': 'Sample Videos',
'8983036c-27c0-404b-8f08-102d10dcfd74': 'Send To',
'8ad10c31-2adb-4296-a8f7-e4701232c972': 'Resources (Resources Directory)',
'905e63b6-c1bf-494e-b29c-65b732d3d21a': 'Program Files',
'9274bd8d-cfd1-41c3-b35e-b13f55a758f4': 'Printer Shortcuts (PrintHood)',
'98ec0e18-2098-4d44-8644-66979315a281': 'Microsoft Office Outlook (MAPI)',
'9b74b6a3-0dfd-4f11-9e78-5f7800f2e772': 'User\'s name',
'9e3995ab-1f9c-4f13-b827-48b24b6c7174': 'User Pinned',
'9e52ab10-f80d-49df-acb8-4330f5687855': 'Temporary Burn Folder (CD Burning)',
'a302545d-deff-464b-abe8-61c8648d939b': 'Libraries',
'a305ce99-f527-492b-8b1a-7e76fa98d6e4': 'Installed Updates (Application Updates)',
'a3918781-e5f2-4890-b3d9-a7e54332328c': 'Application Shortcuts',
'a4115719-d62e-491d-aa7c-e74b8be3b067': '(Common) Start Menu',
'a520a1a4-1780-4ff6-bd18-167343c5af16': 'Local Application Data Low (Local Low)',
'a52bba46-e9e1-435f-b3d9-28daa648c0f6': 'One Drive',
'a63293e8-664e-48db-a079-df759e0509f7': 'Templates',
'a75d362e-50fc-4fb7-ac2c-a8beaa314493': 'Gadgets (Sidebar Parts)',
'a77f5d77-2e2b-44c3-a6a2-aba601054a51': 'Programs',
'a990ae9f-a03b-4e80-94bc-9912d7504104': 'Pictures',
'aaa8d5a5-f1d6-4259-baa8-78e7ef60835e': 'Roamed Tile Images',
'ab5fb87b-7ce2-4f83-915d-550846c9537b': 'Camera Roll',
'ae50c081-ebd2-438a-8655-8a092e34987a': 'Recent (Recent Items)',
'b250c668-f57d-4ee1-a63c-290ee7d1aa1f': 'Sample Music',
'b4bfcc3a-db2c-424c-b029-7fe99a87c641': 'Desktop',
'b6ebfb86-6907-413c-9af7-4fc2abf07cc5': 'Public Pictures (Common Pictures)',
'b7534046-3ecb-4c18-be4e-64cd4cb7d6ac': 'Recycle Bin (Bit Bucket)',
'b7bede81-df94-4682-a7d8-57a52620b86f': 'Screenshots',
'b94237e7-57ac-4347-9151-b08c6c32d1f7': '(Common) Templates',
'b97d20bb-f46a-4c97-ba10-5e3608430854': 'Startup',
'bcb5256f-79f6-4cee-b725-dc34e402fd46': 'Implicit Application Shortcuts',
'bcbd3057-ca5c-4622-b42d-bc56db0ae516': 'Programs',
'bd85e001-112e-431e-983b-7b15ac09fff1': 'Recorded TV',
'bfb9d5e0-c6a9-404c-b2b2-ae6db6af4968': 'Links',
'c1bae2d0-10df-4334-bedd-7aa20b227a9d': '(Common) OEM Links',
'c4900540-2379-4c75-844b-64e6faf8716b': 'Sample Pictures',
'c4aa340d-f20f-4863-afef-f87ef2e6ba25': 'Public Desktop (Common Desktop)',
'c5abbf53-e17f-4121-8900-86626fc2c973': 'Network Shortcuts (NetHood)',
'c870044b-f49e-4126-a9c3-b52a1ff411e8': 'Ringtones',
'cac52c1a-b53d-4edc-92d7-6b2e8ac19434': 'Games',
'd0384e7d-bac3-4797-8f14-cba229b392b5': '(Common) Administrative Tools',
'd20beec4-5ca8-4905-ae3b-bf251ea09b53': 'Network (Places)',
'd65231b0-b2f1-4857-a4ce-a8e7c6ea7d27': 'System32 (x86)',
'd9dc8a3b-b784-432e-a781-5a1130a75963': 'History',
'de61d971-5ebc-4f02-a3a9-6c82895e5c04': 'Add New Programs (Get Programs)',
'de92c1c7-837f-4f69-a3bb-86e631204a23': 'Playlists',
'de974d24-d9c6-4d3e-bf91-f4455120b917': 'Common Files (x86)',
'debf2536-e1a8-4c59-b6a2-414586476aea': 'Game Explorer (Public Game Tasks)',
'df7266ac-9274-4867-8d55-3bd661de872d': 'Programs and Features (Change and Remove Programs)',
'dfdf76a2-c82a-4d63-906a-5644ac457385': 'Public',
'e555ab60-153b-4d17-9f04-a5fe99fc15ec': 'Ringtones',
'ed4824af-dce4-45a8-81e2-fc7965083634': 'Public Documents (Common Documents)',
'ee32e446-31ca-4aba-814f-a5ebd2fd6d5e': 'Offline Files (CSC)',
'f1b32785-6fba-4fcf-9d55-7b8e7f157091': 'Local Application Data',
'f38bf404-1d43-42f2-9305-67de0b28fc23': 'Windows',
'f3ce0f7c-4901-4acc-8648-d5d44b04ef8f': 'User\'s Files',
'f7f1ed05-9f6d-47a2-aaae-29d317c6f066': 'Common Files',
'fd228cb7-ae11-4ae3-864c-16f3910ab8fe': 'Fonts',
'fdd39ad0-238f-46af-adb4-6c85480369c7': 'Documents (Personal)',
}
PATHS = {
'008ca0b1-55b4-4c56-b8a8-4de4b299d3be': '%APPDATA%\\Microsoft\\Windows\\AccountPictures',
'00bcfc5a-ed94-4e48-96a1-3f6217f21990': '%LOCALAPPDATA%\\Microsoft\\Windows\\RoamingTiles',
'0139d44e-6afe-49f2-8690-3dafcae6ffb8': '%ALLUSERSPROFILE%\\Microsoft\\Windows\\Start Menu\\Programs',
'0482af6c-08f1-4c34-8c90-e17ec98b1e17': '%PUBLIC%\\AccountPictures',
'054fae61-4dd8-4787-80b6-090220c4b700': '%LOCALAPPDATA%\\Microsoft\\Windows\\GameExplorer',
'0762d272-c50a-4bb0-a382-697dcd729b80': '%SYSTEMDRIVE%\\Users',
'0ac0837c-bbf8-452a-850d-79d08e667ca7': '',
'0d4c3db6-03a3-462f-a0e6-08924c41b5d4': '%LOCALAPPDATA%\\Microsoft\\Windows\\ConnectedSearch\\History',
'0f214138-b1d3-4a90-bba9-27cbc0c5389a': '',
'15ca69b3-30ee-49c1-ace1-6b5ec372afb5': '%PUBLIC%\\Music\\Sample Playlists',
'1777f761-68ad-4d8a-87bd-30b759fa33dd': '%USERPROFILE%\\Favorites',
'18989b1d-99b5-455b-841c-ab7c74e4ddfc': '%USERPROFILE%\\Videos',
'190337d1-b8ca-4121-a639-6d472d16972a': '',
'1a6fdba2-f42d-4358-a798-b74d745926c5': '%PUBLIC%\\RecordedTV.library-ms',
'1ac14e77-02e7-4e5d-b744-2eb1ae5198b7': '%WINDIR%\\System32',
'1b3ea5dc-b587-4786-b4ef-bd1dc332aeae': '%APPDATA%\\Microsoft\\Windows\\Libraries',
'1e87508d-89c2-42f0-8a7e-645a0f50ca58': '',
'2112ab0a-c86a-4ffe-a368-0de96e47012e': '%APPDATA%\\Microsoft\\Windows\\Libraries\\Music.library-ms',
'2400183a-6185-49fb-a2d8-4a392a602ba3': '%PUBLIC%\\Videos',
'24d89e24-2f19-4534-9dde-6a6671fbb8fe': '%USERPROFILE%\\OneDrive\\Documents',
'289a9a43-be44-4057-a41b-587a76d7e7f9': '',
'2a00375e-224c-49de-b8d1-440df7ef3ddc': '%WINDIR%\\resources\\%CODEPAGE%',
'2b0f765d-c0e9-4171-908e-08a611b84ff6': '%APPDATA%\\Microsoft\\Windows\\Cookies',
'2c36c0aa-5812-4b87-bfd0-4cd0dfb19b39': '%LOCALAPPDATA%\\Microsoft\\Windows Photo Gallery\\Original Images',
'3214fab5-9757-4298-bb61-92a9deaa44ff': '%PUBLIC%\\Music',
'339719b5-8c47-4894-94c2-d8f77add44a6': '%USERPROFILE%\\OneDrive\\Pictures',
'33e28130-4e1e-4676-835a-98395c3bc3bb': '%USERPROFILE%\\Pictures',
'352481e8-33be-4251-ba85-6007caedcf9d': '%LOCALAPPDATA%\\Microsoft\\Windows\\Temporary Internet Files',
'374de290-123f-4565-9164-39c4925e467b': '%USERPROFILE%\\Downloads',
'3d644c9b-1fb8-4f30-9b45-f670235f79c0': '%PUBLIC%\\Downloads',
'3eb685db-65f9-4cf6-a03a-e3ef65729f3d': '%USERPROFILE%\\AppData\\Roaming',
'43668bf8-c14e-49b2-97c9-747784d784b7': '',
'48daf80b-e6cf-4f4e-b800-0e69d84ee384': '%ALLUSERSPROFILE%\\Microsoft\\Windows\\Libraries',
'491e922f-5643-4af4-a7eb-4e7a138d8174': '%APPDATA%\\Microsoft\\Windows\\Libraries\\Videos.library-ms',
'4bd8d571-6d19-48d3-be97-422220080e43': '%USERPROFILE%\\Music',
'4bfefb45-347d-4006-a5be-ac0cb0567192': '',
'4c5c32ff-bb9d-43b0-b5b4-2d72e54eaaa4': '%USERPROFILE%\\Saved Games',
'4d9f7874-4e0c-4904-967b-40b0d20c3e4b': '',
'52528a6b-b9e3-4add-b60d-588c2dba842d': '',
'52a4f021-7b75-48a9-9f6b-4b87a210bc8f': '%APPDATA%\\Microsoft\\Internet Explorer\\Quick Launch',
'56784854-c6cb-462b-8169-88e350acb882': '',
'5b3749ad-b49f-49c1-83eb-15370fbd4882': '',
'5cd7aee2-2219-4a67-b85d-6c9ce15660cb': '%LOCALAPPDATA%\\Programs',
'5ce4a5e9-e4eb-479d-b89f-130c02886155': '%ALLUSERSPROFILE%\\Microsoft\\Windows\\DeviceMetadataStore',
'5e6c858f-0e22-4760-9afe-ea3317b67173': '%SYSTEMDRIVE%\\Users\\%USERNAME%',
'625b53c3-ab48-4ec1-ba1f-a1ef4146fc19': '%APPDATA%\\Microsoft\\Windows\\Start Menu',
'62ab5d82-fdc1-4dc3-a9dd-070d1d495d97': '%SYSTEMDRIVE%\\ProgramData',
'6365d5a7-0f0d-45e5-87f6-0da56b6a4f7d': '%PROGRAMFILES%\\Common Files',
'69d2cf90-fc33-4fb7-9a0c-ebb0f0fcb43c': '%USERPROFILE%\\Pictures\\Slide Shows',
'6d809377-6af0-444b-8957-a3773f02200e': '%SYSTEMDRIVE%\\Program Files',
'6f0cd92b-2e97-45d1-88ff-b0d186b8dedd': '',
'724ef170-a42d-4fef-9f26-b60e846fba4f': '%APPDATA%\\Microsoft\\Windows\\Start Menu\\Programs\\Administrative Tools',
'767e6811-49cb-4273-87c2-20f355e1085b': '%USERPROFILE%\\OneDrive\\Pictures\\Camera Roll',
'76fc4e2d-d6ad-4519-a663-37bd56068185': '',
'7b0db17d-9cd2-4a93-9733-46cc89022e7c': '%APPDATA%\\Microsoft\\Windows\\Libraries\\Documents.library-ms',
'7b396e54-9ec5-4300-be0a-2482ebae1a26': '%PROGRAMFILES%\\Windows Sidebar\\Gadgets',
'7c5a40ef-a0fb-4bfc-874a-c0f2e0b9fa8e': '%PROGRAMFILES% (%SYSTEMDRIVE%\\Program Files)',
'7d1d3a04-debb-4115-95cf-2f29da2920da': '%USERPROFILE%\\Searches',
'7e636bfe-dfa9-4d5e-b456-d7b39851d8a9': '%LOCALAPPDATA%\\Microsoft\\Windows\\ConnectedSearch\\Templates',
'82a5ea35-d9cd-47c5-9629-e15d2f714e6e': '%ALLUSERSPROFILE%\\Microsoft\\Windows\\Start Menu\\Programs\\StartUp',
'82a74aeb-aeb4-465c-a014-d097ee346d63': '',
'859ead94-2e85-48ad-a71a-0969cb56a6cd': '%PUBLIC%\\Videos\\Sample Videos',
'8983036c-27c0-404b-8f08-102d10dcfd74': '%APPDATA%\\Microsoft\\Windows\\SendTo',
'8ad10c31-2adb-4296-a8f7-e4701232c972': '%WINDIR%\\Resources',
'905e63b6-c1bf-494e-b29c-65b732d3d21a': '%SYSTEMDRIVE%\\Program Files',
'9274bd8d-cfd1-41c3-b35e-b13f55a758f4': '%APPDATA%\\Microsoft\\Windows\\Printer Shortcuts',
'98ec0e18-2098-4d44-8644-66979315a281': '',
'9b74b6a3-0dfd-4f11-9e78-5f7800f2e772': '',
'9e3995ab-1f9c-4f13-b827-48b24b6c7174': '%APPDATA%\\Microsoft\\Internet Explorer\\Quick Launch\\User Pinned',
'9e52ab10-f80d-49df-acb8-4330f5687855': '%LOCALAPPDATA%\\Microsoft\\Windows\\Burn\\Burn',
'a302545d-deff-464b-abe8-61c8648d939b': '',
'a305ce99-f527-492b-8b1a-7e76fa98d6e4': '',
'a3918781-e5f2-4890-b3d9-a7e54332328c': '%LOCALAPPDATA%\\Microsoft\\Windows\\Application Shortcuts',
'a4115719-d62e-491d-aa7c-e74b8be3b067': '%ALLUSERSPROFILE%\\Microsoft\\Windows\\Start Menu',
'a520a1a4-1780-4ff6-bd18-167343c5af16': '%USERPROFILE%\\AppData\\LocalLow',
'a52bba46-e9e1-435f-b3d9-28daa648c0f6': '%USERPROFILE%\\OneDrive',
'a63293e8-664e-48db-a079-df759e0509f7': '%APPDATA%\\Microsoft\\Windows\\Templates',
'a75d362e-50fc-4fb7-ac2c-a8beaa314493': '%LOCALAPPDATA%\\Microsoft\\Windows Sidebar\\Gadgets',
'a77f5d77-2e2b-44c3-a6a2-aba601054a51': '%APPDATA%\\Microsoft\\Windows\\Start Menu\\Programs',
'a990ae9f-a03b-4e80-94bc-9912d7504104': '%APPDATA%\\Microsoft\\Windows\\Libraries\\Pictures.library-ms',
'aaa8d5a5-f1d6-4259-baa8-78e7ef60835e': '%LOCALAPPDATA%\\Microsoft\\Windows\\RoamedTileImages',
'ab5fb87b-7ce2-4f83-915d-550846c9537b': '%USERPROFILE%\\Pictures\\Camera Roll',
'ae50c081-ebd2-438a-8655-8a092e34987a': '%APPDATA%\\Microsoft\\Windows\\Recent',
'b250c668-f57d-4ee1-a63c-290ee7d1aa1f': '%PUBLIC%\\Music\\Sample Music',
'b4bfcc3a-db2c-424c-b029-7fe99a87c641': '%USERPROFILE%\\Desktop',
'b6ebfb86-6907-413c-9af7-4fc2abf07cc5': '%PUBLIC%\\Pictures',
'b7534046-3ecb-4c18-be4e-64cd4cb7d6ac': '',
'b7bede81-df94-4682-a7d8-57a52620b86f': '%USERPROFILE%\\Pictures\\Screenshots',
'b94237e7-57ac-4347-9151-b08c6c32d1f7': '%ALLUSERSPROFILE%\\Microsoft\\Windows\\Templates',
'b97d20bb-f46a-4c97-ba10-5e3608430854': '%APPDATA%\\Microsoft\\Windows\\Start Menu\\Programs\\StartUp',
'bcb5256f-79f6-4cee-b725-dc34e402fd46': '%APPDATA%\\Microsoft\\Internet Explorer\\Quick Launch\\User Pinned\\ImplicitAppShortcuts',
'bcbd3057-ca5c-4622-b42d-bc56db0ae516': '%LOCALAPPDATA%\\Programs\\Common',
'bd85e001-112e-431e-983b-7b15ac09fff1': '',
'bfb9d5e0-c6a9-404c-b2b2-ae6db6af4968': '%USERPROFILE%\\Links',
'c1bae2d0-10df-4334-bedd-7aa20b227a9d': '%ALLUSERSPROFILE%\\OEM Links',
'c4900540-2379-4c75-844b-64e6faf8716b': '%PUBLIC%\\Pictures\\Sample Pictures',
'c4aa340d-f20f-4863-afef-f87ef2e6ba25': '%PUBLIC%\\Desktop',
'c5abbf53-e17f-4121-8900-86626fc2c973': '%APPDATA%\\Microsoft\\Windows\\Network Shortcuts',
'c870044b-f49e-4126-a9c3-b52a1ff411e8': '%LOCALAPPDATA%\\Microsoft\\Windows\\Ringtones',
'cac52c1a-b53d-4edc-92d7-6b2e8ac19434': '',
'd0384e7d-bac3-4797-8f14-cba229b392b5': '%ALLUSERSPROFILE%\\Microsoft\\Windows\\Start Menu\\Programs\\Administrative Tools',
'd20beec4-5ca8-4905-ae3b-bf251ea09b53': '',
'd65231b0-b2f1-4857-a4ce-a8e7c6ea7d27': '%WINDIR%\\system32',
'd9dc8a3b-b784-432e-a781-5a1130a75963': '%LOCALAPPDATA%\\Microsoft\\Windows\\History',
'de61d971-5ebc-4f02-a3a9-6c82895e5c04': '',
'de92c1c7-837f-4f69-a3bb-86e631204a23': '%USERPROFILE%\\Music\\Playlists',
'de974d24-d9c6-4d3e-bf91-f4455120b917': '%PROGRAMFILES%\\Common Files',
'debf2536-e1a8-4c59-b6a2-414586476aea': '%ALLUSERSPROFILE%\\Microsoft\\Windows\\GameExplorer',
'df7266ac-9274-4867-8d55-3bd661de872d': '',
'dfdf76a2-c82a-4d63-906a-5644ac457385': '%SYSTEMDRIVE%\\Users\\Public',
'e555ab60-153b-4d17-9f04-a5fe99fc15ec': '%ALLUSERSPROFILE%\\Microsoft\\Windows\\Ringtones',
'ed4824af-dce4-45a8-81e2-fc7965083634': '%PUBLIC%\\Documents',
'ee32e446-31ca-4aba-814f-a5ebd2fd6d5e': '',
'f1b32785-6fba-4fcf-9d55-7b8e7f157091': '%USERPROFILE%\\AppData\\Local',
'f38bf404-1d43-42f2-9305-67de0b28fc23': '%WINDIR%',
'f3ce0f7c-4901-4acc-8648-d5d44b04ef8f': '',
'f7f1ed05-9f6d-47a2-aaae-29d317c6f066': '%PROGRAMFILES%\\Common Files',
'fd228cb7-ae11-4ae3-864c-16f3910ab8fe': '%WINDIR%\\Fonts',
'fdd39ad0-238f-46af-adb4-6c85480369c7': '%USERPROFILE%\\Documents',
}
| apache-2.0 |
ke4roh/RPiNWR | tests/test_cache.py | 1 | 16156 | # -*- coding: utf-8 -*-
__author__ = 'ke4roh'
# Copyright © 2016 James E. Scarborough
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import unittest
from RPiNWR.SAME import *
from RPiNWR.cache import *
from RPiNWR.VTEC import *
import pickle
import os
class TestCache(unittest.TestCase):
def test_buffer_for_radio_against_storm_system(self):
# Test to see that the correct events are reported in priority order as a storm progresses
# This test is a little long in this file, but it's somewhat readable.
alerts = [SAMEMessage("WXL58", x) for x in [
"-WXR-SVR-037183+0045-1232003-KRAH/NWS-",
"-WXR-SVR-037151+0030-1232003-KRAH/NWS-",
"-WXR-SVR-037037+0045-1232023-KRAH/NWS-",
"-WXR-SVR-037001-037151+0100-1232028-KRAH/NWS-",
"-WXR-SVR-037069-037077-037183+0045-1232045-KRAH/NWS-",
"-WXR-SVR-037001+0045-1232110-KRAH/NWS-",
"-WXR-SVR-037069-037181-037185+0045-1232116-KRAH/NWS-",
"-WXR-FFW-037125+0300-1232209-KRAH/NWS-",
"-WXR-SVA-037001-037037-037063-037069-037077-037085-037101-037105-037125-037135-037145-037151-037181-037183-037185+0600-1241854-KRAH/NWS-",
"-WXR-SVR-037001-037037-037151+0045-1242011-KRAH/NWS-",
"-WXR-SVR-037001-037037-037135+0100-1242044-KRAH/NWS-",
"-WXR-SVR-037037-037063-037135-037183+0045-1242120-KRAH/NWS-",
"-WXR-SVR-037183+0100-1242156-KRAH/NWS-",
"-WXR-TOR-037183+0015-1242204-KRAH/NWS-",
"-WXR-SVR-037101-037183+0100-1242235-KRAH/NWS-",
"-WXR-SVR-037151+0100-1242339-KRAH/NWS-",
"-WXR-SVR-037101+0100-1250011-KRAH/NWS-",
"-WXR-SVR-037125-037151+0100-1250029-KRAH/NWS-",
"-WXR-SVR-037085-037105-037183+0100-1250153-KRAH/NWS-",
"-WXR-SVR-037085-037101+0100-1250218-KRAH/NWS-"
]]
expected = """123 20:03 SVR --- SVR
123 20:08 SVR --- SVR
123 20:13 SVR --- SVR
123 20:18 SVR --- SVR
123 20:23 SVR --- SVR,SVR
123 20:28 SVR --- SVR,SVR,SVR
123 20:33 SVR --- SVR,SVR
123 20:38 SVR --- SVR,SVR
123 20:43 SVR --- SVR,SVR
123 20:48 SVR --- SVR,SVR
123 20:53 SVR --- SVR,SVR
123 20:58 SVR --- SVR,SVR
123 21:03 SVR --- SVR,SVR
123 21:08 SVR --- SVR
123 21:13 SVR --- SVR,SVR
123 21:18 SVR --- SVR,SVR,SVR
123 21:23 SVR --- SVR,SVR,SVR
123 21:28 SVR --- SVR,SVR
123 21:33 --- SVR,SVR
123 21:38 --- SVR,SVR
123 21:43 --- SVR,SVR
123 21:48 --- SVR,SVR
123 21:53 --- SVR,SVR
123 21:58 --- SVR
123 22:03 ---
123 22:08 ---
123 22:13 --- FFW
123 22:18 --- FFW
123 22:23 --- FFW
123 22:28 --- FFW
123 22:33 --- FFW
123 22:38 --- FFW
123 22:43 --- FFW
123 22:48 --- FFW
123 22:53 --- FFW
123 22:58 --- FFW
123 23:03 --- FFW
123 23:08 --- FFW
123 23:13 --- FFW
123 23:18 --- FFW
123 23:23 --- FFW
123 23:28 --- FFW
123 23:33 --- FFW
123 23:38 --- FFW
123 23:43 --- FFW
123 23:48 --- FFW
123 23:53 --- FFW
123 23:58 --- FFW
124 00:03 --- FFW
124 00:08 --- FFW
124 00:13 --- FFW
124 00:18 --- FFW
124 00:23 --- FFW
124 00:28 --- FFW
124 00:33 --- FFW
124 00:38 --- FFW
124 00:43 --- FFW
124 00:48 --- FFW
124 00:53 --- FFW
124 00:58 --- FFW
124 01:03 --- FFW
124 01:08 --- FFW
124 01:13 ---
124 01:18 ---
124 01:23 ---
124 01:28 ---
124 01:33 ---
124 01:38 ---
124 01:43 ---
124 01:48 ---
124 01:53 ---
124 01:58 ---
124 02:03 ---
124 02:08 ---
124 02:13 ---
124 02:18 ---
124 02:23 ---
124 02:28 ---
124 02:33 ---
124 02:38 ---
124 02:43 ---
124 02:48 ---
124 02:53 ---
124 02:58 ---
124 03:03 ---
124 03:08 ---
124 03:13 ---
124 03:18 ---
124 03:23 ---
124 03:28 ---
124 03:33 ---
124 03:38 ---
124 03:43 ---
124 03:48 ---
124 03:53 ---
124 03:58 ---
124 04:03 ---
124 04:08 ---
124 04:13 ---
124 04:18 ---
124 04:23 ---
124 04:28 ---
124 04:33 ---
124 04:38 ---
124 04:43 ---
124 04:48 ---
124 04:53 ---
124 04:58 ---
124 05:03 ---
124 05:08 ---
124 05:13 ---
124 05:18 ---
124 05:23 ---
124 05:28 ---
124 05:33 ---
124 05:38 ---
124 05:43 ---
124 05:48 ---
124 05:53 ---
124 05:58 ---
124 06:03 ---
124 06:08 ---
124 06:13 ---
124 06:18 ---
124 06:23 ---
124 06:28 ---
124 06:33 ---
124 06:38 ---
124 06:43 ---
124 06:48 ---
124 06:53 ---
124 06:58 ---
124 07:03 ---
124 07:08 ---
124 07:13 ---
124 07:18 ---
124 07:23 ---
124 07:28 ---
124 07:33 ---
124 07:38 ---
124 07:43 ---
124 07:48 ---
124 07:53 ---
124 07:58 ---
124 08:03 ---
124 08:08 ---
124 08:13 ---
124 08:18 ---
124 08:23 ---
124 08:28 ---
124 08:33 ---
124 08:38 ---
124 08:43 ---
124 08:48 ---
124 08:53 ---
124 08:58 ---
124 09:03 ---
124 09:08 ---
124 09:13 ---
124 09:18 ---
124 09:23 ---
124 09:28 ---
124 09:33 ---
124 09:38 ---
124 09:43 ---
124 09:48 ---
124 09:53 ---
124 09:58 ---
124 10:03 ---
124 10:08 ---
124 10:13 ---
124 10:18 ---
124 10:23 ---
124 10:28 ---
124 10:33 ---
124 10:38 ---
124 10:43 ---
124 10:48 ---
124 10:53 ---
124 10:58 ---
124 11:03 ---
124 11:08 ---
124 11:13 ---
124 11:18 ---
124 11:23 ---
124 11:28 ---
124 11:33 ---
124 11:38 ---
124 11:43 ---
124 11:48 ---
124 11:53 ---
124 11:58 ---
124 12:03 ---
124 12:08 ---
124 12:13 ---
124 12:18 ---
124 12:23 ---
124 12:28 ---
124 12:33 ---
124 12:38 ---
124 12:43 ---
124 12:48 ---
124 12:53 ---
124 12:58 ---
124 13:03 ---
124 13:08 ---
124 13:13 ---
124 13:18 ---
124 13:23 ---
124 13:28 ---
124 13:33 ---
124 13:38 ---
124 13:43 ---
124 13:48 ---
124 13:53 ---
124 13:58 ---
124 14:03 ---
124 14:08 ---
124 14:13 ---
124 14:18 ---
124 14:23 ---
124 14:28 ---
124 14:33 ---
124 14:38 ---
124 14:43 ---
124 14:48 ---
124 14:53 ---
124 14:58 ---
124 15:03 ---
124 15:08 ---
124 15:13 ---
124 15:18 ---
124 15:23 ---
124 15:28 ---
124 15:33 ---
124 15:38 ---
124 15:43 ---
124 15:48 ---
124 15:53 ---
124 15:58 ---
124 16:03 ---
124 16:08 ---
124 16:13 ---
124 16:18 ---
124 16:23 ---
124 16:28 ---
124 16:33 ---
124 16:38 ---
124 16:43 ---
124 16:48 ---
124 16:53 ---
124 16:58 ---
124 17:03 ---
124 17:08 ---
124 17:13 ---
124 17:18 ---
124 17:23 ---
124 17:28 ---
124 17:33 ---
124 17:38 ---
124 17:43 ---
124 17:48 ---
124 17:53 ---
124 17:58 ---
124 18:03 ---
124 18:08 ---
124 18:13 ---
124 18:18 ---
124 18:23 ---
124 18:28 ---
124 18:33 ---
124 18:38 ---
124 18:43 ---
124 18:48 ---
124 18:53 ---
124 18:58 SVA ---
124 19:03 SVA ---
124 19:08 SVA ---
124 19:13 SVA ---
124 19:18 SVA ---
124 19:23 SVA ---
124 19:28 SVA ---
124 19:33 SVA ---
124 19:38 SVA ---
124 19:43 SVA ---
124 19:48 SVA ---
124 19:53 SVA ---
124 19:58 SVA ---
124 20:03 SVA ---
124 20:08 SVA ---
124 20:13 SVA --- SVR
124 20:18 SVA --- SVR
124 20:23 SVA --- SVR
124 20:28 SVA --- SVR
124 20:33 SVA --- SVR
124 20:38 SVA --- SVR
124 20:43 SVA --- SVR
124 20:48 SVA --- SVR,SVR
124 20:53 SVA --- SVR,SVR
124 20:58 SVA --- SVR
124 21:03 SVA --- SVR
124 21:08 SVA --- SVR
124 21:13 SVA --- SVR
124 21:18 SVA --- SVR
124 21:23 SVR,SVA --- SVR
124 21:28 SVR,SVA --- SVR
124 21:33 SVR,SVA --- SVR
124 21:38 SVR,SVA --- SVR
124 21:43 SVR,SVA --- SVR
124 21:48 SVR,SVA ---
124 21:53 SVR,SVA ---
124 21:58 SVR,SVR,SVA ---
124 22:03 SVR,SVR,SVA ---
124 22:08 TOR,SVR,SVA ---
124 22:13 TOR,SVR,SVA ---
124 22:18 TOR,SVR,SVA ---
124 22:23 SVR,SVA ---
124 22:28 SVR,SVA ---
124 22:33 SVR,SVA ---
124 22:38 SVR,SVR,SVA ---
124 22:43 SVR,SVR,SVA ---
124 22:48 SVR,SVR,SVA ---
124 22:53 SVR,SVR,SVA ---
124 22:58 SVR,SVA ---
124 23:03 SVR,SVA ---
124 23:08 SVR,SVA ---
124 23:13 SVR,SVA ---
124 23:18 SVR,SVA ---
124 23:23 SVR,SVA ---
124 23:28 SVR,SVA ---
124 23:33 SVR,SVA ---
124 23:38 SVA ---
124 23:43 SVA --- SVR
124 23:48 SVA --- SVR
124 23:53 SVA --- SVR
124 23:58 SVA --- SVR
125 00:03 SVA --- SVR
125 00:08 SVA --- SVR
125 00:13 SVA --- SVR,SVR
125 00:18 SVA --- SVR,SVR
125 00:23 SVA --- SVR,SVR
125 00:28 SVA --- SVR,SVR
125 00:33 SVA --- SVR,SVR,SVR
125 00:38 SVA --- SVR,SVR,SVR
125 00:43 SVA --- SVR,SVR
125 00:48 SVA --- SVR,SVR
125 00:53 SVA --- SVR,SVR
125 00:58 --- SVR,SVR
125 01:03 --- SVR,SVR
125 01:08 --- SVR,SVR
125 01:13 --- SVR
125 01:18 --- SVR
125 01:23 --- SVR
125 01:28 --- SVR
125 01:33 ---
125 01:38 ---
125 01:43 ---
125 01:48 ---
125 01:53 SVR ---
125 01:58 SVR ---
125 02:03 SVR ---
125 02:08 SVR ---
125 02:13 SVR ---
125 02:18 SVR --- SVR
125 02:23 SVR --- SVR
125 02:28 SVR --- SVR
125 02:33 SVR --- SVR
125 02:38 SVR --- SVR
125 02:43 SVR --- SVR
125 02:48 SVR --- SVR
125 02:53 --- SVR
125 02:58 --- SVR
125 03:03 --- SVR
125 03:08 --- SVR
125 03:13 --- SVR
125 03:18 ---
125 03:23 ---
125 03:28 ---
125 03:33 ---
""".split("\n")
buf = MessageCache((35.73, -78.85), "037183", default_SAME_sort)
# Iterate through this storm system 5 minutes at a time
aix = 0
eix = 0
for t in range(int(alerts[0].get_start_time_sec()),
int(alerts[-1].get_start_time_sec() + alerts[-1].get_duration_sec() + 1000),
300):
while aix < len(alerts) and alerts[aix].get_start_time_sec() <= t:
buf.add_message(alerts[aix])
aix += 1
ptime = time.strftime("%j %H:%M ", time.gmtime(t))
here = buf.get_active_messages(when=t)
elsewhere = buf.get_active_messages(when=t, here=False)
stat = ptime + ",".join([x.get_event_type() for x in here]) \
+ " --- " + ",".join([x.get_event_type() for x in elsewhere])
self.assertEqual(expected[eix].strip(), stat.strip())
eix += 1
def test_net_alerts(self):
expected = """146 01:24 KGLD.TO.W.0028 --- KGLD.TO.A.0204
146 01:26 KGLD.TO.W.0028 --- KGLD.TO.A.0204
146 01:34 KGLD.TO.W.0028 --- KGLD.TO.A.0204
146 01:36 KGLD.TO.W.0028 --- KGLD.TO.A.0204
146 01:45 KGLD.TO.W.0029 --- KGLD.TO.A.0204
146 02:02 --- KGLD.TO.W.0029,KGLD.TO.A.0204
146 02:13 KGLD.TO.A.0206 --- KGLD.TO.W.0029,KGLD.TO.A.0204
146 02:17 KGLD.TO.A.0206 --- KGLD.TO.W.0030,KGLD.TO.A.0204
146 02:33 KGLD.TO.A.0206 --- KGLD.TO.W.0030,KGLD.TO.A.0204
146 02:33 KGLD.TO.A.0206 --- KGLD.TO.W.0030,KGLD.TO.A.0204
146 02:46 KGLD.TO.A.0206 --- KGLD.TO.W.0031,KGLD.TO.A.0204
146 03:04 KGLD.TO.A.0206 --- KGLD.TO.W.0031,KGLD.TO.A.0204
146 03:13 KGLD.TO.A.0206 --- KGLD.TO.W.0031,KGLD.TO.A.0204
146 03:16 KGLD.TO.A.0206 --- KGLD.TO.W.0032,KGLD.TO.A.0204
146 03:39 KGLD.TO.A.0206 --- KGLD.TO.W.0032,KGLD.TO.A.0204
146 03:50 KGLD.TO.A.0206 --- KGLD.TO.W.0032,KGLD.TO.A.0204
146 04:05 KGLD.TO.A.0206 --- KGLD.TO.A.0204
146 04:33 KGLD.TO.A.0206 --- KGLD.SV.W.0094,KGLD.TO.A.0204
146 04:55 KGLD.TO.A.0206 --- KGLD.SV.W.0094,KGLD.TO.A.0204
146 04:56 KGLD.TO.A.0206 --- KGLD.SV.W.0094,KGLD.TO.A.0204
146 05:09 KGLD.TO.A.0206 --- KGLD.SV.W.0094
146 05:10 KGLD.TO.A.0206 --- KGLD.SV.W.0094""".split("\n")
with open(os.path.join(os.path.dirname(os.path.realpath(__file__)), "kgld.cap.p"), "rb") as f:
alerts = pickle.load(f)
# https://mesonet.agron.iastate.edu/vtec/#2016-O-NEW-KGLD-TO-W-0029/USCOMP-N0Q-201605250145
buf = MessageCache((40.321909, -102.718192), "008125", default_VTEC_sort)
aix = eix = 0
for t in range(alerts[0][0], alerts[-1][0] + 2):
delta = False
while aix < len(alerts) and alerts[aix][0] <= t:
for v in alerts[aix][1].vtec:
buf.add_message(v)
aix += 1
delta = True
if delta:
here = buf.get_active_messages(when=t)
display_time = time.strftime("%j %H:%M ", time.gmtime(t))
try:
elsewhere = buf.get_active_messages(when=t, here=False)
except TypeError:
# TODO fix the comparator to handle null times
print([str(x) for x in filter(lambda m: m.is_effective(t), buf._MessageCache__messages.values())])
raise
line = display_time + ",".join([x.get_event_id() for x in here]) \
+ " --- " + ",".join([x.get_event_id() for x in elsewhere])
# print(line)
self.assertEqual(expected[eix], line)
eix += 1
self.assertIsNot(0, eix, 'need assertions')
def test_not_here_with_polygon(self):
with open(os.path.join(os.path.dirname(os.path.realpath(__file__)), "kgld.cap.p"), "rb") as f:
alerts = pickle.load(f)
valerts = list(filter(lambda v: v.event_id == "KGLD.TO.W.0028", [item for sublist in [c.vtec for a, c in alerts]
for item in sublist]))
buf = EventMessageGroup()
buf.add_message(valerts[0])
self.assertTrue(buf.is_effective((40.321909, -102.718192), "008125", True, valerts[0].published))
self.assertFalse(buf.is_effective((40.321909, -102.718192), "008125", False, valerts[0].published))
def test_not_here_sans_polygon(self):
with open(os.path.join(os.path.dirname(os.path.realpath(__file__)), "kgld.cap.p"), "rb") as f:
alerts = pickle.load(f)
valerts = list(filter(lambda v: v.event_id == "KGLD.TO.A.0206", [item for sublist in [c.vtec for a, c in alerts]
for item in sublist]))
buf = EventMessageGroup()
buf.add_message(valerts[0])
self.assertTrue(buf.is_effective((40.321909, -102.718192), "008125", True, valerts[0].published))
self.assertFalse(buf.is_effective((40.321909, -102.718192), "008125", False, valerts[0].published))
| gpl-3.0 |
Ttl/scikit-rf | skrf/media/tests/test_media.py | 6 | 3363 | import unittest
import os
import numpy as npy
from skrf.media import DefinedGammaZ0, Media
from skrf.network import Network
from skrf.frequency import Frequency
import skrf
class DefinedGammaZ0TestCase(unittest.TestCase):
def setUp(self):
self.files_dir = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
'qucs_prj'
)
self.dummy_media = DefinedGammaZ0(
frequency = Frequency(1,100,21,'ghz'),
gamma=1j,
z0 = 50 ,
)
def test_impedance_mismatch(self):
'''
'''
fname = os.path.join(self.files_dir,\
'impedanceMismatch,50to25.s2p')
qucs_ntwk = Network(fname)
self.dummy_media.frequency = qucs_ntwk.frequency
skrf_ntwk = self.dummy_media.thru(z0=50)**\
self.dummy_media.thru(z0=25)
self.assertEqual(qucs_ntwk, skrf_ntwk)
def test_resistor(self):
'''
'''
fname = os.path.join(self.files_dir,\
'resistor,1ohm.s2p')
qucs_ntwk = Network(fname)
self.dummy_media.frequency = qucs_ntwk.frequency
skrf_ntwk = self.dummy_media.resistor(1)
self.assertEqual(qucs_ntwk, skrf_ntwk)
def test_capacitor(self):
'''
'''
fname = os.path.join(self.files_dir,\
'capacitor,p01pF.s2p')
qucs_ntwk = Network(fname)
self.dummy_media.frequency = qucs_ntwk.frequency
skrf_ntwk = self.dummy_media.capacitor(.01e-12)
self.assertEqual(qucs_ntwk, skrf_ntwk)
def test_inductor(self):
'''
'''
fname = os.path.join(self.files_dir,\
'inductor,p1nH.s2p')
qucs_ntwk = Network(fname)
self.dummy_media.frequency = qucs_ntwk.frequency
skrf_ntwk = self.dummy_media.inductor(.1e-9)
self.assertEqual(qucs_ntwk, skrf_ntwk)
def test_scalar_gamma_z0_media(self):
'''
test ability to create a Media from scalar quanties for gamma/z0
and change frequency resolution
'''
a = DefinedGammaZ0 (Frequency(1,10,101),gamma=1j,z0 = 50)
self.assertEqual(a.line(1),a.line(1))
# we should be able to re-sample the media
a.npoints = 21
self.assertEqual(len(a.gamma), len(a))
self.assertEqual(len(a.z0), len(a))
self.assertEqual(len(a.z0), len(a))
def test_vector_gamma_z0_media(self):
'''
test ability to create a Media from vector quanties for gamma/z0
'''
freq = Frequency(1,10,101)
a = DefinedGammaZ0(freq,
gamma = 1j*npy.ones(len(freq)) ,
z0 = 50*npy.ones(len(freq)),
)
self.assertEqual(a.line(1),a.line(1))
with self.assertRaises(NotImplementedError):
a.npoints=4
def test_write_csv(self):
fname = os.path.join(self.files_dir,\
'out.csv')
self.dummy_media.write_csv(fname)
os.remove(fname)
def test_from_csv(self):
fname = os.path.join(self.files_dir,\
'out.csv')
self.dummy_media.write_csv(fname)
a_media = DefinedGammaZ0.from_csv(fname)
self.assertEqual(a_media,self.dummy_media)
os.remove(fname)
| bsd-3-clause |
ondrokrc/gramps | gramps/plugins/lib/libhtmlconst.py | 2 | 4656 | # -*- coding: utf-8 -*-
#!/usr/bin/python
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2000-2007 Donald N. Allingham
# Copyright (C) 2007 Johan Gonqvist <johan.gronqvist@gmail.com>
# Copyright (C) 2007 Gary Burton <gary.burton@zen.co.uk>
# Copyright (C) 2007-2009 Stephane Charette <stephanecharette@gmail.com>
# Copyright (C) 2008 Brian G. Matherly
# Copyright (C) 2008 Jason M. Simanek <jason@bohemianalps.com>
# Copyright (C) 2008-2011 Rob G. Healey <robhealey1@gmail.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""
General constants used in different html enabled plugins
"""
from gramps.gen.const import GRAMPS_LOCALE as glocale
_ = glocale.translation.gettext
#------------------------------------------------------------------------
#
# GRAMPS modules
#
#------------------------------------------------------------------------
#------------------------------------------------------------------------
#
# Constants
#
#------------------------------------------------------------------------
_CHARACTER_SETS = [
# First is used as default selection.
# As seen on the internet, ISO-xxx are listed as capital letters
[_('Unicode UTF-8 (recommended)'), 'UTF-8'],
['ISO-8859-1', 'ISO-8859-1' ],
['ISO-8859-2', 'ISO-8859-2' ],
['ISO-8859-3', 'ISO-8859-3' ],
['ISO-8859-4', 'ISO-8859-4' ],
['ISO-8859-5', 'ISO-8859-5' ],
['ISO-8859-6', 'ISO-8859-6' ],
['ISO-8859-7', 'ISO-8859-7' ],
['ISO-8859-8', 'ISO-8859-8' ],
['ISO-8859-9', 'ISO-8859-9' ],
['ISO-8859-10', 'ISO-8859-10' ],
['ISO-8859-13', 'ISO-8859-13' ],
['ISO-8859-14', 'ISO-8859-14' ],
['ISO-8859-15', 'ISO-8859-15' ],
['koi8_r', 'koi8_r', ],
]
_CC = [
'',
'<a rel="license" href="http://creativecommons.org/licenses/by/2.5/">'
'<img alt="Creative Commons License - By attribution" '
'title="Creative Commons License - By attribution" '
'src="%(gif_fname)s" /></a>',
'<a rel="license" href="http://creativecommons.org/licenses/by-nd/2.5/">'
'<img alt="Creative Commons License - By attribution, No derivations" '
'title="Creative Commons License - By attribution, No derivations" '
'src="%(gif_fname)s" /></a>',
'<a rel="license" href="http://creativecommons.org/licenses/by-sa/2.5/">'
'<img alt="Creative Commons License - By attribution, Share-alike" '
'title="Creative Commons License - By attribution, Share-alike" '
'src="%(gif_fname)s" /></a>',
'<a rel="license" href="http://creativecommons.org/licenses/by-nc/2.5/">'
'<img alt="Creative Commons License - By attribution, Non-commercial" '
'title="Creative Commons License - By attribution, Non-commercial" '
'src="%(gif_fname)s" /></a>',
'<a rel="license" href="http://creativecommons.org/licenses/by-nc-nd/2.5/">'
'<img alt="Creative Commons License - By attribution, Non-commercial, '
'No derivations" '
'title="Creative Commons License - By attribution, Non-commercial, '
'No derivations" '
'src="%(gif_fname)s" /></a>',
'<a rel="license" href="http://creativecommons.org/licenses/by-nc-sa/2.5/">'
'<img alt="Creative Commons License - By attribution, Non-commerical, '
'Share-alike" '
'title="Creative Commons License - By attribution, Non-commerical, '
'Share-alike" '
'src="%(gif_fname)s" /></a>'
]
_COPY_OPTIONS = [
_('Standard copyright'),
# This must match _CC
# translators, long strings, have a look at Web report dialogs
_('Creative Commons - By attribution'),
_('Creative Commons - By attribution, No derivations'),
_('Creative Commons - By attribution, Share-alike'),
_('Creative Commons - By attribution, Non-commercial'),
_('Creative Commons - By attribution, Non-commercial, No derivations'),
_('Creative Commons - By attribution, Non-commercial, Share-alike'),
_('No copyright notice'),
]
| gpl-2.0 |
kamcpp/tensorflow | tensorflow/contrib/learn/python/learn/estimators/head_test.py | 3 | 7722 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for head.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.contrib.learn.python.learn.estimators import head as head_lib
class RegressionModelHeadTest(tf.test.TestCase):
# TODO(zakaria): test multilabel regresssion.
def testRegression(self):
head = head_lib._regression_head()
with tf.Graph().as_default(), tf.Session() as sess:
prediction = tf.constant([[1.], [1.], [3.]])
targets = tf.constant([[0.], [1.], [1.]])
model_fn_ops = head.head_ops({}, targets,
tf.contrib.learn.ModeKeys.TRAIN,
_noop_train_op, logits=prediction)
self.assertAlmostEqual(5. / 3, sess.run(model_fn_ops.loss))
def testRegressionWithWeights(self):
head = head_lib._regression_head(
weight_column_name="label_weight")
with tf.Graph().as_default(), tf.Session() as sess:
features = {"label_weight": tf.constant([[2.], [5.], [0.]])}
prediction = tf.constant([[1.], [1.], [3.]])
targets = tf.constant([[0.], [1.], [1.]])
model_fn_ops = head.head_ops(features, targets,
tf.contrib.learn.ModeKeys.TRAIN,
_noop_train_op, logits=prediction)
self.assertAlmostEqual(2. / 3, sess.run(model_fn_ops.loss), places=3)
def testErrorInSparseTensorTarget(self):
head = head_lib._regression_head()
with tf.Graph().as_default():
prediction = tf.constant([[1.], [1.], [3.]])
targets = tf.SparseTensor(
indices=tf.constant([[0, 0], [1, 0], [2, 0]], dtype=tf.int64),
values=tf.constant([0., 1., 1.]),
shape=[3, 1])
with self.assertRaisesRegexp(
ValueError, "SparseTensor is not supported as a target"):
head.head_ops({}, targets, tf.contrib.learn.ModeKeys.TRAIN,
_noop_train_op, logits=prediction)
class MultiClassModelHeadTest(tf.test.TestCase):
def testBinaryClassification(self):
head = head_lib._multi_class_head(n_classes=2)
with tf.Graph().as_default(), tf.Session() as sess:
logits = tf.constant([[1.], [1.]])
targets = tf.constant([[1.], [0.]])
# logloss: z:label, x:logit
# z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
model_fn_ops = head.head_ops({}, targets,
tf.contrib.learn.ModeKeys.TRAIN,
_noop_train_op, logits=logits)
self.assertAlmostEqual(.81326163, sess.run(model_fn_ops.loss))
def testErrorInSparseTensorTarget(self):
head = head_lib._multi_class_head(n_classes=2)
with tf.Graph().as_default():
prediction = tf.constant([[1.], [1.], [3.]])
targets = tf.SparseTensor(
indices=tf.constant([[0, 0], [1, 0], [2, 0]], dtype=tf.int64),
values=tf.constant([0, 1, 1]),
shape=[3, 1])
with self.assertRaisesRegexp(
ValueError, "SparseTensor is not supported as a target"):
head.head_ops({}, targets, tf.contrib.learn.ModeKeys.TRAIN,
_noop_train_op, logits=prediction)
def testBinaryClassificationWithWeights(self):
head = head_lib._multi_class_head(
n_classes=2, weight_column_name="label_weight")
with tf.Graph().as_default(), tf.Session() as sess:
features = {"label_weight": tf.constant([[1.], [0.]])}
logits = tf.constant([[1.], [1.]])
targets = tf.constant([[1.], [0.]])
# logloss: z:label, x:logit
# z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
model_fn_ops = head.head_ops(features, targets,
tf.contrib.learn.ModeKeys.TRAIN,
_noop_train_op, logits=logits)
self.assertAlmostEqual(.31326166 / 2, sess.run(model_fn_ops.loss))
def testMultiClass(self):
head = head_lib._multi_class_head(n_classes=3)
with tf.Graph().as_default(), tf.Session() as sess:
logits = tf.constant([[1., 0., 0.]])
targets = tf.constant([2])
# logloss: z:label, x:logit
# z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
model_fn_ops = head.head_ops({}, targets,
tf.contrib.learn.ModeKeys.TRAIN,
_noop_train_op, logits=logits)
self.assertAlmostEqual(1.5514446, sess.run(model_fn_ops.loss))
def testMultiClassWithWeight(self):
head = head_lib._multi_class_head(
n_classes=3, weight_column_name="label_weight")
with tf.Graph().as_default(), tf.Session() as sess:
features = {"label_weight": tf.constant([0.1])}
logits = tf.constant([[1., 0., 0.]])
targets = tf.constant([2])
# logloss: z:label, x:logit
# z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
model_fn_ops = head.head_ops(features, targets,
tf.contrib.learn.ModeKeys.TRAIN,
_noop_train_op, logits=logits)
self.assertAlmostEqual(.15514446, sess.run(model_fn_ops.loss))
def testMultiClassWithInvalidNClass(self):
try:
head_lib._multi_class_head(n_classes=1)
self.fail("Softmax with no n_classes did not raise error.")
except ValueError:
# Expected
pass
class BinarySvmModelHeadTest(tf.test.TestCase):
def testBinarySVMDefaultWeights(self):
head = head_lib._binary_svm_head()
predictions = tf.constant([[-0.5], [1.2]])
targets = tf.constant([0, 1])
model_fn_ops = head.head_ops({}, targets,
tf.contrib.learn.ModeKeys.TRAIN,
_noop_train_op, logits=predictions)
# Prediction for first example is in the right side of the hyperplane (i.e.,
# < 0) but it is within the [-1,1] margin. There is a 0.5 loss incurred by
# this example. The 2nd prediction is outside the margin so it incurs no
# loss at all. The overall (normalized) loss is therefore 0.5/(1+1) = 0.25.
with tf.Session() as sess:
self.assertAlmostEqual(0.25, sess.run(model_fn_ops.loss))
def testBinarySVMWithWeights(self):
head = head_lib._binary_svm_head(
weight_column_name="weights")
predictions = tf.constant([[-0.7], [0.2]])
targets = tf.constant([0, 1])
features = {"weights": tf.constant([2.0, 10.0])}
model_fn_ops = head.head_ops(features, targets,
tf.contrib.learn.ModeKeys.TRAIN,
_noop_train_op, logits=predictions)
# Prediction for both examples are in the right side of the hyperplane but
# within the margin. The (weighted) loss incurred is 2*0.3=0.6 and 10*0.8=8
# respectively. The overall (normalized) loss is therefore 8.6/12.
with tf.Session() as sess:
self.assertAlmostEqual(8.6 / 2, sess.run(model_fn_ops.loss), places=3)
def _noop_train_op(unused_loss):
return tf.no_op()
if __name__ == "__main__":
tf.test.main()
| apache-2.0 |
soldag/home-assistant | homeassistant/components/http/ban.py | 5 | 6691 | """Ban logic for HTTP component."""
from collections import defaultdict
from datetime import datetime
from ipaddress import ip_address
import logging
from socket import gethostbyaddr, herror
from typing import List, Optional
from aiohttp.web import middleware
from aiohttp.web_exceptions import HTTPForbidden, HTTPUnauthorized
import voluptuous as vol
from homeassistant.config import load_yaml_config_file
from homeassistant.const import HTTP_BAD_REQUEST
from homeassistant.core import HomeAssistant, callback
from homeassistant.exceptions import HomeAssistantError
import homeassistant.helpers.config_validation as cv
from homeassistant.util.yaml import dump
# mypy: allow-untyped-defs, no-check-untyped-defs
_LOGGER = logging.getLogger(__name__)
KEY_BANNED_IPS = "ha_banned_ips"
KEY_FAILED_LOGIN_ATTEMPTS = "ha_failed_login_attempts"
KEY_LOGIN_THRESHOLD = "ha_login_threshold"
NOTIFICATION_ID_BAN = "ip-ban"
NOTIFICATION_ID_LOGIN = "http-login"
IP_BANS_FILE = "ip_bans.yaml"
ATTR_BANNED_AT = "banned_at"
SCHEMA_IP_BAN_ENTRY = vol.Schema(
{vol.Optional("banned_at"): vol.Any(None, cv.datetime)}
)
@callback
def setup_bans(hass, app, login_threshold):
"""Create IP Ban middleware for the app."""
app.middlewares.append(ban_middleware)
app[KEY_FAILED_LOGIN_ATTEMPTS] = defaultdict(int)
app[KEY_LOGIN_THRESHOLD] = login_threshold
async def ban_startup(app):
"""Initialize bans when app starts up."""
app[KEY_BANNED_IPS] = await async_load_ip_bans_config(
hass, hass.config.path(IP_BANS_FILE)
)
app.on_startup.append(ban_startup)
@middleware
async def ban_middleware(request, handler):
"""IP Ban middleware."""
if KEY_BANNED_IPS not in request.app:
_LOGGER.error("IP Ban middleware loaded but banned IPs not loaded")
return await handler(request)
# Verify if IP is not banned
ip_address_ = ip_address(request.remote)
is_banned = any(
ip_ban.ip_address == ip_address_ for ip_ban in request.app[KEY_BANNED_IPS]
)
if is_banned:
raise HTTPForbidden()
try:
return await handler(request)
except HTTPUnauthorized:
await process_wrong_login(request)
raise
def log_invalid_auth(func):
"""Decorate function to handle invalid auth or failed login attempts."""
async def handle_req(view, request, *args, **kwargs):
"""Try to log failed login attempts if response status >= 400."""
resp = await func(view, request, *args, **kwargs)
if resp.status >= HTTP_BAD_REQUEST:
await process_wrong_login(request)
return resp
return handle_req
async def process_wrong_login(request):
"""Process a wrong login attempt.
Increase failed login attempts counter for remote IP address.
Add ip ban entry if failed login attempts exceeds threshold.
"""
hass = request.app["hass"]
remote_addr = ip_address(request.remote)
remote_host = request.remote
try:
remote_host, _, _ = await hass.async_add_executor_job(
gethostbyaddr, request.remote
)
except herror:
pass
msg = f"Login attempt or request with invalid authentication from {remote_host} ({remote_addr})"
user_agent = request.headers.get("user-agent")
if user_agent:
msg = f"{msg} ({user_agent})"
_LOGGER.warning(msg)
hass.components.persistent_notification.async_create(
msg, "Login attempt failed", NOTIFICATION_ID_LOGIN
)
# Check if ban middleware is loaded
if KEY_BANNED_IPS not in request.app or request.app[KEY_LOGIN_THRESHOLD] < 1:
return
request.app[KEY_FAILED_LOGIN_ATTEMPTS][remote_addr] += 1
# Supervisor IP should never be banned
if (
"hassio" in hass.config.components
and hass.components.hassio.get_supervisor_ip() == str(remote_addr)
):
return
if (
request.app[KEY_FAILED_LOGIN_ATTEMPTS][remote_addr]
>= request.app[KEY_LOGIN_THRESHOLD]
):
new_ban = IpBan(remote_addr)
request.app[KEY_BANNED_IPS].append(new_ban)
await hass.async_add_executor_job(
update_ip_bans_config, hass.config.path(IP_BANS_FILE), new_ban
)
_LOGGER.warning("Banned IP %s for too many login attempts", remote_addr)
hass.components.persistent_notification.async_create(
f"Too many login attempts from {remote_addr}",
"Banning IP address",
NOTIFICATION_ID_BAN,
)
async def process_success_login(request):
"""Process a success login attempt.
Reset failed login attempts counter for remote IP address.
No release IP address from banned list function, it can only be done by
manual modify ip bans config file.
"""
remote_addr = ip_address(request.remote)
# Check if ban middleware is loaded
if KEY_BANNED_IPS not in request.app or request.app[KEY_LOGIN_THRESHOLD] < 1:
return
if (
remote_addr in request.app[KEY_FAILED_LOGIN_ATTEMPTS]
and request.app[KEY_FAILED_LOGIN_ATTEMPTS][remote_addr] > 0
):
_LOGGER.debug(
"Login success, reset failed login attempts counter from %s", remote_addr
)
request.app[KEY_FAILED_LOGIN_ATTEMPTS].pop(remote_addr)
class IpBan:
"""Represents banned IP address."""
def __init__(self, ip_ban: str, banned_at: Optional[datetime] = None) -> None:
"""Initialize IP Ban object."""
self.ip_address = ip_address(ip_ban)
self.banned_at = banned_at or datetime.utcnow()
async def async_load_ip_bans_config(hass: HomeAssistant, path: str) -> List[IpBan]:
"""Load list of banned IPs from config file."""
ip_list: List[IpBan] = []
try:
list_ = await hass.async_add_executor_job(load_yaml_config_file, path)
except FileNotFoundError:
return ip_list
except HomeAssistantError as err:
_LOGGER.error("Unable to load %s: %s", path, str(err))
return ip_list
for ip_ban, ip_info in list_.items():
try:
ip_info = SCHEMA_IP_BAN_ENTRY(ip_info)
ip_list.append(IpBan(ip_ban, ip_info["banned_at"]))
except vol.Invalid as err:
_LOGGER.error("Failed to load IP ban %s: %s", ip_info, err)
continue
return ip_list
def update_ip_bans_config(path: str, ip_ban: IpBan) -> None:
"""Update config file with new banned IP address."""
with open(path, "a") as out:
ip_ = {
str(ip_ban.ip_address): {
ATTR_BANNED_AT: ip_ban.banned_at.strftime("%Y-%m-%dT%H:%M:%S")
}
}
out.write("\n")
out.write(dump(ip_))
| apache-2.0 |
johnkit/vtk-dev | ThirdParty/Twisted/twisted/test/test_sip.py | 39 | 34356 | # -*- test-case-name: twisted.test.test_sip -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""Session Initialization Protocol tests."""
from twisted.trial import unittest, util
from twisted.protocols import sip
from twisted.internet import defer, reactor, utils
from twisted.python.versions import Version
from twisted.test import proto_helpers
from twisted import cred
import twisted.cred.portal
import twisted.cred.checkers
from zope.interface import implements
# request, prefixed by random CRLFs
request1 = "\n\r\n\n\r" + """\
INVITE sip:foo SIP/2.0
From: mo
To: joe
Content-Length: 4
abcd""".replace("\n", "\r\n")
# request, no content-length
request2 = """INVITE sip:foo SIP/2.0
From: mo
To: joe
1234""".replace("\n", "\r\n")
# request, with garbage after
request3 = """INVITE sip:foo SIP/2.0
From: mo
To: joe
Content-Length: 4
1234
lalalal""".replace("\n", "\r\n")
# three requests
request4 = """INVITE sip:foo SIP/2.0
From: mo
To: joe
Content-Length: 0
INVITE sip:loop SIP/2.0
From: foo
To: bar
Content-Length: 4
abcdINVITE sip:loop SIP/2.0
From: foo
To: bar
Content-Length: 4
1234""".replace("\n", "\r\n")
# response, no content
response1 = """SIP/2.0 200 OK
From: foo
To:bar
Content-Length: 0
""".replace("\n", "\r\n")
# short header version
request_short = """\
INVITE sip:foo SIP/2.0
f: mo
t: joe
l: 4
abcd""".replace("\n", "\r\n")
request_natted = """\
INVITE sip:foo SIP/2.0
Via: SIP/2.0/UDP 10.0.0.1:5060;rport
""".replace("\n", "\r\n")
# multiline headers (example from RFC 3621).
response_multiline = """\
SIP/2.0 200 OK
Via: SIP/2.0/UDP server10.biloxi.com
;branch=z9hG4bKnashds8;received=192.0.2.3
Via: SIP/2.0/UDP bigbox3.site3.atlanta.com
;branch=z9hG4bK77ef4c2312983.1;received=192.0.2.2
Via: SIP/2.0/UDP pc33.atlanta.com
;branch=z9hG4bK776asdhds ;received=192.0.2.1
To: Bob <sip:bob@biloxi.com>;tag=a6c85cf
From: Alice <sip:alice@atlanta.com>;tag=1928301774
Call-ID: a84b4c76e66710@pc33.atlanta.com
CSeq: 314159 INVITE
Contact: <sip:bob@192.0.2.4>
Content-Type: application/sdp
Content-Length: 0
\n""".replace("\n", "\r\n")
class TestRealm:
def requestAvatar(self, avatarId, mind, *interfaces):
return sip.IContact, None, lambda: None
class MessageParsingTestCase(unittest.TestCase):
def setUp(self):
self.l = []
self.parser = sip.MessagesParser(self.l.append)
def feedMessage(self, message):
self.parser.dataReceived(message)
self.parser.dataDone()
def validateMessage(self, m, method, uri, headers, body):
"""Validate Requests."""
self.assertEqual(m.method, method)
self.assertEqual(m.uri.toString(), uri)
self.assertEqual(m.headers, headers)
self.assertEqual(m.body, body)
self.assertEqual(m.finished, 1)
def testSimple(self):
l = self.l
self.feedMessage(request1)
self.assertEqual(len(l), 1)
self.validateMessage(
l[0], "INVITE", "sip:foo",
{"from": ["mo"], "to": ["joe"], "content-length": ["4"]},
"abcd")
def testTwoMessages(self):
l = self.l
self.feedMessage(request1)
self.feedMessage(request2)
self.assertEqual(len(l), 2)
self.validateMessage(
l[0], "INVITE", "sip:foo",
{"from": ["mo"], "to": ["joe"], "content-length": ["4"]},
"abcd")
self.validateMessage(l[1], "INVITE", "sip:foo",
{"from": ["mo"], "to": ["joe"]},
"1234")
def testGarbage(self):
l = self.l
self.feedMessage(request3)
self.assertEqual(len(l), 1)
self.validateMessage(
l[0], "INVITE", "sip:foo",
{"from": ["mo"], "to": ["joe"], "content-length": ["4"]},
"1234")
def testThreeInOne(self):
l = self.l
self.feedMessage(request4)
self.assertEqual(len(l), 3)
self.validateMessage(
l[0], "INVITE", "sip:foo",
{"from": ["mo"], "to": ["joe"], "content-length": ["0"]},
"")
self.validateMessage(
l[1], "INVITE", "sip:loop",
{"from": ["foo"], "to": ["bar"], "content-length": ["4"]},
"abcd")
self.validateMessage(
l[2], "INVITE", "sip:loop",
{"from": ["foo"], "to": ["bar"], "content-length": ["4"]},
"1234")
def testShort(self):
l = self.l
self.feedMessage(request_short)
self.assertEqual(len(l), 1)
self.validateMessage(
l[0], "INVITE", "sip:foo",
{"from": ["mo"], "to": ["joe"], "content-length": ["4"]},
"abcd")
def testSimpleResponse(self):
l = self.l
self.feedMessage(response1)
self.assertEqual(len(l), 1)
m = l[0]
self.assertEqual(m.code, 200)
self.assertEqual(m.phrase, "OK")
self.assertEqual(
m.headers,
{"from": ["foo"], "to": ["bar"], "content-length": ["0"]})
self.assertEqual(m.body, "")
self.assertEqual(m.finished, 1)
def test_multiLine(self):
"""
A header may be split across multiple lines. Subsequent lines begin
with C{" "} or C{"\\t"}.
"""
l = self.l
self.feedMessage(response_multiline)
self.assertEquals(len(l), 1)
m = l[0]
self.assertEquals(
m.headers['via'][0],
"SIP/2.0/UDP server10.biloxi.com;"
"branch=z9hG4bKnashds8;received=192.0.2.3")
self.assertEquals(
m.headers['via'][1],
"SIP/2.0/UDP bigbox3.site3.atlanta.com;"
"branch=z9hG4bK77ef4c2312983.1;received=192.0.2.2")
self.assertEquals(
m.headers['via'][2],
"SIP/2.0/UDP pc33.atlanta.com;"
"branch=z9hG4bK776asdhds ;received=192.0.2.1")
class MessageParsingTestCase2(MessageParsingTestCase):
"""Same as base class, but feed data char by char."""
def feedMessage(self, message):
for c in message:
self.parser.dataReceived(c)
self.parser.dataDone()
class MakeMessageTestCase(unittest.TestCase):
def testRequest(self):
r = sip.Request("INVITE", "sip:foo")
r.addHeader("foo", "bar")
self.assertEqual(
r.toString(),
"INVITE sip:foo SIP/2.0\r\nFoo: bar\r\n\r\n")
def testResponse(self):
r = sip.Response(200, "OK")
r.addHeader("foo", "bar")
r.addHeader("Content-Length", "4")
r.bodyDataReceived("1234")
self.assertEqual(
r.toString(),
"SIP/2.0 200 OK\r\nFoo: bar\r\nContent-Length: 4\r\n\r\n1234")
def testStatusCode(self):
r = sip.Response(200)
self.assertEqual(r.toString(), "SIP/2.0 200 OK\r\n\r\n")
class ViaTestCase(unittest.TestCase):
def checkRoundtrip(self, v):
s = v.toString()
self.assertEqual(s, sip.parseViaHeader(s).toString())
def testExtraWhitespace(self):
v1 = sip.parseViaHeader('SIP/2.0/UDP 192.168.1.1:5060')
v2 = sip.parseViaHeader('SIP/2.0/UDP 192.168.1.1:5060')
self.assertEqual(v1.transport, v2.transport)
self.assertEqual(v1.host, v2.host)
self.assertEqual(v1.port, v2.port)
def test_complex(self):
"""
Test parsing a Via header with one of everything.
"""
s = ("SIP/2.0/UDP first.example.com:4000;ttl=16;maddr=224.2.0.1"
" ;branch=a7c6a8dlze (Example)")
v = sip.parseViaHeader(s)
self.assertEqual(v.transport, "UDP")
self.assertEqual(v.host, "first.example.com")
self.assertEqual(v.port, 4000)
self.assertEqual(v.rport, None)
self.assertEqual(v.rportValue, None)
self.assertEqual(v.rportRequested, False)
self.assertEqual(v.ttl, 16)
self.assertEqual(v.maddr, "224.2.0.1")
self.assertEqual(v.branch, "a7c6a8dlze")
self.assertEqual(v.hidden, 0)
self.assertEqual(v.toString(),
"SIP/2.0/UDP first.example.com:4000"
";ttl=16;branch=a7c6a8dlze;maddr=224.2.0.1")
self.checkRoundtrip(v)
def test_simple(self):
"""
Test parsing a simple Via header.
"""
s = "SIP/2.0/UDP example.com;hidden"
v = sip.parseViaHeader(s)
self.assertEqual(v.transport, "UDP")
self.assertEqual(v.host, "example.com")
self.assertEqual(v.port, 5060)
self.assertEqual(v.rport, None)
self.assertEqual(v.rportValue, None)
self.assertEqual(v.rportRequested, False)
self.assertEqual(v.ttl, None)
self.assertEqual(v.maddr, None)
self.assertEqual(v.branch, None)
self.assertEqual(v.hidden, True)
self.assertEqual(v.toString(),
"SIP/2.0/UDP example.com:5060;hidden")
self.checkRoundtrip(v)
def testSimpler(self):
v = sip.Via("example.com")
self.checkRoundtrip(v)
def test_deprecatedRPort(self):
"""
Setting rport to True is deprecated, but still produces a Via header
with the expected properties.
"""
v = sip.Via("foo.bar", rport=True)
warnings = self.flushWarnings(
offendingFunctions=[self.test_deprecatedRPort])
self.assertEqual(len(warnings), 1)
self.assertEqual(
warnings[0]['message'],
'rport=True is deprecated since Twisted 9.0.')
self.assertEqual(
warnings[0]['category'],
DeprecationWarning)
self.assertEqual(v.toString(), "SIP/2.0/UDP foo.bar:5060;rport")
self.assertEqual(v.rport, True)
self.assertEqual(v.rportRequested, True)
self.assertEqual(v.rportValue, None)
def test_rport(self):
"""
An rport setting of None should insert the parameter with no value.
"""
v = sip.Via("foo.bar", rport=None)
self.assertEqual(v.toString(), "SIP/2.0/UDP foo.bar:5060;rport")
self.assertEqual(v.rportRequested, True)
self.assertEqual(v.rportValue, None)
def test_rportValue(self):
"""
An rport numeric setting should insert the parameter with the number
value given.
"""
v = sip.Via("foo.bar", rport=1)
self.assertEqual(v.toString(), "SIP/2.0/UDP foo.bar:5060;rport=1")
self.assertEqual(v.rportRequested, False)
self.assertEqual(v.rportValue, 1)
self.assertEqual(v.rport, 1)
def testNAT(self):
s = "SIP/2.0/UDP 10.0.0.1:5060;received=22.13.1.5;rport=12345"
v = sip.parseViaHeader(s)
self.assertEqual(v.transport, "UDP")
self.assertEqual(v.host, "10.0.0.1")
self.assertEqual(v.port, 5060)
self.assertEqual(v.received, "22.13.1.5")
self.assertEqual(v.rport, 12345)
self.assertNotEquals(v.toString().find("rport=12345"), -1)
def test_unknownParams(self):
"""
Parsing and serializing Via headers with unknown parameters should work.
"""
s = "SIP/2.0/UDP example.com:5060;branch=a12345b;bogus;pie=delicious"
v = sip.parseViaHeader(s)
self.assertEqual(v.toString(), s)
class URLTestCase(unittest.TestCase):
def testRoundtrip(self):
for url in [
"sip:j.doe@big.com",
"sip:j.doe:secret@big.com;transport=tcp",
"sip:j.doe@big.com?subject=project",
"sip:example.com",
]:
self.assertEqual(sip.parseURL(url).toString(), url)
def testComplex(self):
s = ("sip:user:pass@hosta:123;transport=udp;user=phone;method=foo;"
"ttl=12;maddr=1.2.3.4;blah;goo=bar?a=b&c=d")
url = sip.parseURL(s)
for k, v in [("username", "user"), ("password", "pass"),
("host", "hosta"), ("port", 123),
("transport", "udp"), ("usertype", "phone"),
("method", "foo"), ("ttl", 12),
("maddr", "1.2.3.4"), ("other", ["blah", "goo=bar"]),
("headers", {"a": "b", "c": "d"})]:
self.assertEqual(getattr(url, k), v)
class ParseTestCase(unittest.TestCase):
def testParseAddress(self):
for address, name, urls, params in [
('"A. G. Bell" <sip:foo@example.com>',
"A. G. Bell", "sip:foo@example.com", {}),
("Anon <sip:foo@example.com>", "Anon", "sip:foo@example.com", {}),
("sip:foo@example.com", "", "sip:foo@example.com", {}),
("<sip:foo@example.com>", "", "sip:foo@example.com", {}),
("foo <sip:foo@example.com>;tag=bar;foo=baz", "foo",
"sip:foo@example.com", {"tag": "bar", "foo": "baz"}),
]:
gname, gurl, gparams = sip.parseAddress(address)
self.assertEqual(name, gname)
self.assertEqual(gurl.toString(), urls)
self.assertEqual(gparams, params)
class DummyLocator:
implements(sip.ILocator)
def getAddress(self, logicalURL):
return defer.succeed(sip.URL("server.com", port=5060))
class FailingLocator:
implements(sip.ILocator)
def getAddress(self, logicalURL):
return defer.fail(LookupError())
class ProxyTestCase(unittest.TestCase):
def setUp(self):
self.proxy = sip.Proxy("127.0.0.1")
self.proxy.locator = DummyLocator()
self.sent = []
self.proxy.sendMessage = lambda dest, msg: self.sent.append((dest, msg))
def testRequestForward(self):
r = sip.Request("INVITE", "sip:foo")
r.addHeader("via", sip.Via("1.2.3.4").toString())
r.addHeader("via", sip.Via("1.2.3.5").toString())
r.addHeader("foo", "bar")
r.addHeader("to", "<sip:joe@server.com>")
r.addHeader("contact", "<sip:joe@1.2.3.5>")
self.proxy.datagramReceived(r.toString(), ("1.2.3.4", 5060))
self.assertEqual(len(self.sent), 1)
dest, m = self.sent[0]
self.assertEqual(dest.port, 5060)
self.assertEqual(dest.host, "server.com")
self.assertEqual(m.uri.toString(), "sip:foo")
self.assertEqual(m.method, "INVITE")
self.assertEqual(m.headers["via"],
["SIP/2.0/UDP 127.0.0.1:5060",
"SIP/2.0/UDP 1.2.3.4:5060",
"SIP/2.0/UDP 1.2.3.5:5060"])
def testReceivedRequestForward(self):
r = sip.Request("INVITE", "sip:foo")
r.addHeader("via", sip.Via("1.2.3.4").toString())
r.addHeader("foo", "bar")
r.addHeader("to", "<sip:joe@server.com>")
r.addHeader("contact", "<sip:joe@1.2.3.4>")
self.proxy.datagramReceived(r.toString(), ("1.1.1.1", 5060))
dest, m = self.sent[0]
self.assertEqual(m.headers["via"],
["SIP/2.0/UDP 127.0.0.1:5060",
"SIP/2.0/UDP 1.2.3.4:5060;received=1.1.1.1"])
def testResponseWrongVia(self):
# first via must match proxy's address
r = sip.Response(200)
r.addHeader("via", sip.Via("foo.com").toString())
self.proxy.datagramReceived(r.toString(), ("1.1.1.1", 5060))
self.assertEqual(len(self.sent), 0)
def testResponseForward(self):
r = sip.Response(200)
r.addHeader("via", sip.Via("127.0.0.1").toString())
r.addHeader("via", sip.Via("client.com", port=1234).toString())
self.proxy.datagramReceived(r.toString(), ("1.1.1.1", 5060))
self.assertEqual(len(self.sent), 1)
dest, m = self.sent[0]
self.assertEqual((dest.host, dest.port), ("client.com", 1234))
self.assertEqual(m.code, 200)
self.assertEqual(m.headers["via"], ["SIP/2.0/UDP client.com:1234"])
def testReceivedResponseForward(self):
r = sip.Response(200)
r.addHeader("via", sip.Via("127.0.0.1").toString())
r.addHeader(
"via",
sip.Via("10.0.0.1", received="client.com").toString())
self.proxy.datagramReceived(r.toString(), ("1.1.1.1", 5060))
self.assertEqual(len(self.sent), 1)
dest, m = self.sent[0]
self.assertEqual((dest.host, dest.port), ("client.com", 5060))
def testResponseToUs(self):
r = sip.Response(200)
r.addHeader("via", sip.Via("127.0.0.1").toString())
l = []
self.proxy.gotResponse = lambda *a: l.append(a)
self.proxy.datagramReceived(r.toString(), ("1.1.1.1", 5060))
self.assertEqual(len(l), 1)
m, addr = l[0]
self.assertEqual(len(m.headers.get("via", [])), 0)
self.assertEqual(m.code, 200)
def testLoop(self):
r = sip.Request("INVITE", "sip:foo")
r.addHeader("via", sip.Via("1.2.3.4").toString())
r.addHeader("via", sip.Via("127.0.0.1").toString())
self.proxy.datagramReceived(r.toString(), ("client.com", 5060))
self.assertEqual(self.sent, [])
def testCantForwardRequest(self):
r = sip.Request("INVITE", "sip:foo")
r.addHeader("via", sip.Via("1.2.3.4").toString())
r.addHeader("to", "<sip:joe@server.com>")
self.proxy.locator = FailingLocator()
self.proxy.datagramReceived(r.toString(), ("1.2.3.4", 5060))
self.assertEqual(len(self.sent), 1)
dest, m = self.sent[0]
self.assertEqual((dest.host, dest.port), ("1.2.3.4", 5060))
self.assertEqual(m.code, 404)
self.assertEqual(m.headers["via"], ["SIP/2.0/UDP 1.2.3.4:5060"])
def testCantForwardResponse(self):
pass
#testCantForwardResponse.skip = "not implemented yet"
class RegistrationTestCase(unittest.TestCase):
def setUp(self):
self.proxy = sip.RegisterProxy(host="127.0.0.1")
self.registry = sip.InMemoryRegistry("bell.example.com")
self.proxy.registry = self.proxy.locator = self.registry
self.sent = []
self.proxy.sendMessage = lambda dest, msg: self.sent.append((dest, msg))
setUp = utils.suppressWarnings(setUp,
util.suppress(category=DeprecationWarning,
message=r'twisted.protocols.sip.DigestAuthorizer was deprecated'))
def tearDown(self):
for d, uri in self.registry.users.values():
d.cancel()
del self.proxy
def register(self):
r = sip.Request("REGISTER", "sip:bell.example.com")
r.addHeader("to", "sip:joe@bell.example.com")
r.addHeader("contact", "sip:joe@client.com:1234")
r.addHeader("via", sip.Via("client.com").toString())
self.proxy.datagramReceived(r.toString(), ("client.com", 5060))
def unregister(self):
r = sip.Request("REGISTER", "sip:bell.example.com")
r.addHeader("to", "sip:joe@bell.example.com")
r.addHeader("contact", "*")
r.addHeader("via", sip.Via("client.com").toString())
r.addHeader("expires", "0")
self.proxy.datagramReceived(r.toString(), ("client.com", 5060))
def testRegister(self):
self.register()
dest, m = self.sent[0]
self.assertEqual((dest.host, dest.port), ("client.com", 5060))
self.assertEqual(m.code, 200)
self.assertEqual(m.headers["via"], ["SIP/2.0/UDP client.com:5060"])
self.assertEqual(m.headers["to"], ["sip:joe@bell.example.com"])
self.assertEqual(m.headers["contact"], ["sip:joe@client.com:5060"])
self.failUnless(
int(m.headers["expires"][0]) in (3600, 3601, 3599, 3598))
self.assertEqual(len(self.registry.users), 1)
dc, uri = self.registry.users["joe"]
self.assertEqual(uri.toString(), "sip:joe@client.com:5060")
d = self.proxy.locator.getAddress(sip.URL(username="joe",
host="bell.example.com"))
d.addCallback(lambda desturl : (desturl.host, desturl.port))
d.addCallback(self.assertEqual, ('client.com', 5060))
return d
def testUnregister(self):
self.register()
self.unregister()
dest, m = self.sent[1]
self.assertEqual((dest.host, dest.port), ("client.com", 5060))
self.assertEqual(m.code, 200)
self.assertEqual(m.headers["via"], ["SIP/2.0/UDP client.com:5060"])
self.assertEqual(m.headers["to"], ["sip:joe@bell.example.com"])
self.assertEqual(m.headers["contact"], ["sip:joe@client.com:5060"])
self.assertEqual(m.headers["expires"], ["0"])
self.assertEqual(self.registry.users, {})
def addPortal(self):
r = TestRealm()
p = cred.portal.Portal(r)
c = cred.checkers.InMemoryUsernamePasswordDatabaseDontUse()
c.addUser('userXname@127.0.0.1', 'passXword')
p.registerChecker(c)
self.proxy.portal = p
def testFailedAuthentication(self):
self.addPortal()
self.register()
self.assertEqual(len(self.registry.users), 0)
self.assertEqual(len(self.sent), 1)
dest, m = self.sent[0]
self.assertEqual(m.code, 401)
def test_basicAuthentication(self):
"""
Test that registration with basic authentication suceeds.
"""
self.addPortal()
self.proxy.authorizers = self.proxy.authorizers.copy()
self.proxy.authorizers['basic'] = sip.BasicAuthorizer()
warnings = self.flushWarnings(
offendingFunctions=[self.test_basicAuthentication])
self.assertEqual(len(warnings), 1)
self.assertEqual(
warnings[0]['message'],
"twisted.protocols.sip.BasicAuthorizer was deprecated in "
"Twisted 9.0.0")
self.assertEqual(
warnings[0]['category'],
DeprecationWarning)
r = sip.Request("REGISTER", "sip:bell.example.com")
r.addHeader("to", "sip:joe@bell.example.com")
r.addHeader("contact", "sip:joe@client.com:1234")
r.addHeader("via", sip.Via("client.com").toString())
r.addHeader("authorization",
"Basic " + "userXname:passXword".encode('base64'))
self.proxy.datagramReceived(r.toString(), ("client.com", 5060))
self.assertEqual(len(self.registry.users), 1)
self.assertEqual(len(self.sent), 1)
dest, m = self.sent[0]
self.assertEqual(m.code, 200)
def test_failedBasicAuthentication(self):
"""
Failed registration with basic authentication results in an
unauthorized error response.
"""
self.addPortal()
self.proxy.authorizers = self.proxy.authorizers.copy()
self.proxy.authorizers['basic'] = sip.BasicAuthorizer()
warnings = self.flushWarnings(
offendingFunctions=[self.test_failedBasicAuthentication])
self.assertEqual(len(warnings), 1)
self.assertEqual(
warnings[0]['message'],
"twisted.protocols.sip.BasicAuthorizer was deprecated in "
"Twisted 9.0.0")
self.assertEqual(
warnings[0]['category'],
DeprecationWarning)
r = sip.Request("REGISTER", "sip:bell.example.com")
r.addHeader("to", "sip:joe@bell.example.com")
r.addHeader("contact", "sip:joe@client.com:1234")
r.addHeader("via", sip.Via("client.com").toString())
r.addHeader(
"authorization", "Basic " + "userXname:password".encode('base64'))
self.proxy.datagramReceived(r.toString(), ("client.com", 5060))
self.assertEqual(len(self.registry.users), 0)
self.assertEqual(len(self.sent), 1)
dest, m = self.sent[0]
self.assertEqual(m.code, 401)
def testWrongDomainRegister(self):
r = sip.Request("REGISTER", "sip:wrong.com")
r.addHeader("to", "sip:joe@bell.example.com")
r.addHeader("contact", "sip:joe@client.com:1234")
r.addHeader("via", sip.Via("client.com").toString())
self.proxy.datagramReceived(r.toString(), ("client.com", 5060))
self.assertEqual(len(self.sent), 0)
def testWrongToDomainRegister(self):
r = sip.Request("REGISTER", "sip:bell.example.com")
r.addHeader("to", "sip:joe@foo.com")
r.addHeader("contact", "sip:joe@client.com:1234")
r.addHeader("via", sip.Via("client.com").toString())
self.proxy.datagramReceived(r.toString(), ("client.com", 5060))
self.assertEqual(len(self.sent), 0)
def testWrongDomainLookup(self):
self.register()
url = sip.URL(username="joe", host="foo.com")
d = self.proxy.locator.getAddress(url)
self.assertFailure(d, LookupError)
return d
def testNoContactLookup(self):
self.register()
url = sip.URL(username="jane", host="bell.example.com")
d = self.proxy.locator.getAddress(url)
self.assertFailure(d, LookupError)
return d
class Client(sip.Base):
def __init__(self):
sip.Base.__init__(self)
self.received = []
self.deferred = defer.Deferred()
def handle_response(self, response, addr):
self.received.append(response)
self.deferred.callback(self.received)
class LiveTest(unittest.TestCase):
def setUp(self):
self.proxy = sip.RegisterProxy(host="127.0.0.1")
self.registry = sip.InMemoryRegistry("bell.example.com")
self.proxy.registry = self.proxy.locator = self.registry
self.serverPort = reactor.listenUDP(
0, self.proxy, interface="127.0.0.1")
self.client = Client()
self.clientPort = reactor.listenUDP(
0, self.client, interface="127.0.0.1")
self.serverAddress = (self.serverPort.getHost().host,
self.serverPort.getHost().port)
setUp = utils.suppressWarnings(setUp,
util.suppress(category=DeprecationWarning,
message=r'twisted.protocols.sip.DigestAuthorizer was deprecated'))
def tearDown(self):
for d, uri in self.registry.users.values():
d.cancel()
d1 = defer.maybeDeferred(self.clientPort.stopListening)
d2 = defer.maybeDeferred(self.serverPort.stopListening)
return defer.gatherResults([d1, d2])
def testRegister(self):
p = self.clientPort.getHost().port
r = sip.Request("REGISTER", "sip:bell.example.com")
r.addHeader("to", "sip:joe@bell.example.com")
r.addHeader("contact", "sip:joe@127.0.0.1:%d" % p)
r.addHeader("via", sip.Via("127.0.0.1", port=p).toString())
self.client.sendMessage(
sip.URL(host="127.0.0.1", port=self.serverAddress[1]), r)
d = self.client.deferred
def check(received):
self.assertEqual(len(received), 1)
r = received[0]
self.assertEqual(r.code, 200)
d.addCallback(check)
return d
def test_amoralRPort(self):
"""
rport is allowed without a value, apparently because server
implementors might be too stupid to check the received port
against 5060 and see if they're equal, and because client
implementors might be too stupid to bind to port 5060, or set a
value on the rport parameter they send if they bind to another
port.
"""
p = self.clientPort.getHost().port
r = sip.Request("REGISTER", "sip:bell.example.com")
r.addHeader("to", "sip:joe@bell.example.com")
r.addHeader("contact", "sip:joe@127.0.0.1:%d" % p)
r.addHeader("via", sip.Via("127.0.0.1", port=p, rport=True).toString())
warnings = self.flushWarnings(
offendingFunctions=[self.test_amoralRPort])
self.assertEqual(len(warnings), 1)
self.assertEqual(
warnings[0]['message'],
'rport=True is deprecated since Twisted 9.0.')
self.assertEqual(
warnings[0]['category'],
DeprecationWarning)
self.client.sendMessage(sip.URL(host="127.0.0.1",
port=self.serverAddress[1]),
r)
d = self.client.deferred
def check(received):
self.assertEqual(len(received), 1)
r = received[0]
self.assertEqual(r.code, 200)
d.addCallback(check)
return d
registerRequest = """
REGISTER sip:intarweb.us SIP/2.0\r
Via: SIP/2.0/UDP 192.168.1.100:50609\r
From: <sip:exarkun@intarweb.us:50609>\r
To: <sip:exarkun@intarweb.us:50609>\r
Contact: "exarkun" <sip:exarkun@192.168.1.100:50609>\r
Call-ID: 94E7E5DAF39111D791C6000393764646@intarweb.us\r
CSeq: 9898 REGISTER\r
Expires: 500\r
User-Agent: X-Lite build 1061\r
Content-Length: 0\r
\r
"""
challengeResponse = """\
SIP/2.0 401 Unauthorized\r
Via: SIP/2.0/UDP 192.168.1.100:50609;received=127.0.0.1;rport=5632\r
To: <sip:exarkun@intarweb.us:50609>\r
From: <sip:exarkun@intarweb.us:50609>\r
Call-ID: 94E7E5DAF39111D791C6000393764646@intarweb.us\r
CSeq: 9898 REGISTER\r
WWW-Authenticate: Digest nonce="92956076410767313901322208775",opaque="1674186428",qop-options="auth",algorithm="MD5",realm="intarweb.us"\r
\r
"""
authRequest = """\
REGISTER sip:intarweb.us SIP/2.0\r
Via: SIP/2.0/UDP 192.168.1.100:50609\r
From: <sip:exarkun@intarweb.us:50609>\r
To: <sip:exarkun@intarweb.us:50609>\r
Contact: "exarkun" <sip:exarkun@192.168.1.100:50609>\r
Call-ID: 94E7E5DAF39111D791C6000393764646@intarweb.us\r
CSeq: 9899 REGISTER\r
Expires: 500\r
Authorization: Digest username="exarkun",realm="intarweb.us",nonce="92956076410767313901322208775",response="4a47980eea31694f997369214292374b",uri="sip:intarweb.us",algorithm=MD5,opaque="1674186428"\r
User-Agent: X-Lite build 1061\r
Content-Length: 0\r
\r
"""
okResponse = """\
SIP/2.0 200 OK\r
Via: SIP/2.0/UDP 192.168.1.100:50609;received=127.0.0.1;rport=5632\r
To: <sip:exarkun@intarweb.us:50609>\r
From: <sip:exarkun@intarweb.us:50609>\r
Call-ID: 94E7E5DAF39111D791C6000393764646@intarweb.us\r
CSeq: 9899 REGISTER\r
Contact: sip:exarkun@127.0.0.1:5632\r
Expires: 3600\r
Content-Length: 0\r
\r
"""
class FakeDigestAuthorizer(sip.DigestAuthorizer):
def generateNonce(self):
return '92956076410767313901322208775'
def generateOpaque(self):
return '1674186428'
class FakeRegistry(sip.InMemoryRegistry):
"""Make sure expiration is always seen to be 3600.
Otherwise slow reactors fail tests incorrectly.
"""
def _cbReg(self, reg):
if 3600 < reg.secondsToExpiry or reg.secondsToExpiry < 3598:
raise RuntimeError(
"bad seconds to expire: %s" % reg.secondsToExpiry)
reg.secondsToExpiry = 3600
return reg
def getRegistrationInfo(self, uri):
d = sip.InMemoryRegistry.getRegistrationInfo(self, uri)
return d.addCallback(self._cbReg)
def registerAddress(self, domainURL, logicalURL, physicalURL):
d = sip.InMemoryRegistry.registerAddress(
self, domainURL, logicalURL, physicalURL)
return d.addCallback(self._cbReg)
class AuthorizationTestCase(unittest.TestCase):
def setUp(self):
self.proxy = sip.RegisterProxy(host="intarweb.us")
self.proxy.authorizers = self.proxy.authorizers.copy()
self.proxy.authorizers['digest'] = FakeDigestAuthorizer()
self.registry = FakeRegistry("intarweb.us")
self.proxy.registry = self.proxy.locator = self.registry
self.transport = proto_helpers.FakeDatagramTransport()
self.proxy.transport = self.transport
r = TestRealm()
p = cred.portal.Portal(r)
c = cred.checkers.InMemoryUsernamePasswordDatabaseDontUse()
c.addUser('exarkun@intarweb.us', 'password')
p.registerChecker(c)
self.proxy.portal = p
setUp = utils.suppressWarnings(setUp,
util.suppress(category=DeprecationWarning,
message=r'twisted.protocols.sip.DigestAuthorizer was deprecated'))
def tearDown(self):
for d, uri in self.registry.users.values():
d.cancel()
del self.proxy
def testChallenge(self):
self.proxy.datagramReceived(registerRequest, ("127.0.0.1", 5632))
self.assertEqual(
self.transport.written[-1],
((challengeResponse, ("127.0.0.1", 5632)))
)
self.transport.written = []
self.proxy.datagramReceived(authRequest, ("127.0.0.1", 5632))
self.assertEqual(
self.transport.written[-1],
((okResponse, ("127.0.0.1", 5632)))
)
testChallenge.suppress = [
util.suppress(
category=DeprecationWarning,
message=r'twisted.protocols.sip.DigestAuthorizer was deprecated'),
util.suppress(
category=DeprecationWarning,
message=r'twisted.protocols.sip.DigestedCredentials was deprecated'),
util.suppress(
category=DeprecationWarning,
message=r'twisted.protocols.sip.DigestCalcHA1 was deprecated'),
util.suppress(
category=DeprecationWarning,
message=r'twisted.protocols.sip.DigestCalcResponse was deprecated')]
class DeprecationTests(unittest.TestCase):
"""
Tests for deprecation of obsolete components of L{twisted.protocols.sip}.
"""
def test_deprecatedDigestCalcHA1(self):
"""
L{sip.DigestCalcHA1} is deprecated.
"""
self.callDeprecated(Version("Twisted", 9, 0, 0),
sip.DigestCalcHA1, '', '', '', '', '', '')
def test_deprecatedDigestCalcResponse(self):
"""
L{sip.DigestCalcResponse} is deprecated.
"""
self.callDeprecated(Version("Twisted", 9, 0, 0),
sip.DigestCalcResponse, '', '', '', '', '', '', '',
'')
def test_deprecatedBasicAuthorizer(self):
"""
L{sip.BasicAuthorizer} is deprecated.
"""
self.callDeprecated(Version("Twisted", 9, 0, 0), sip.BasicAuthorizer)
def test_deprecatedDigestAuthorizer(self):
"""
L{sip.DigestAuthorizer} is deprecated.
"""
self.callDeprecated(Version("Twisted", 9, 0, 0), sip.DigestAuthorizer)
def test_deprecatedDigestedCredentials(self):
"""
L{sip.DigestedCredentials} is deprecated.
"""
self.callDeprecated(Version("Twisted", 9, 0, 0),
sip.DigestedCredentials, '', {}, {})
| bsd-3-clause |
RavenB/opensim | OpenSim/Region/OptionalModules/Avatar/Concierge/ConciergeServer.py | 44 | 5053 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
#
# Copyright (c) Contributors, http://opensimulator.org/
# See CONTRIBUTORS.TXT for a full list of copyright holders.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the OpenSim Project nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE DEVELOPERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import logging
import BaseHTTPServer
import optparse
import xml.etree.ElementTree as ET
import xml.parsers.expat
# enable debug level logging
logging.basicConfig(level = logging.DEBUG,
format='%(asctime)s %(levelname)s %(message)s')
options = None
# subclassed HTTPRequestHandler
class ConciergeHandler(BaseHTTPServer.BaseHTTPRequestHandler):
def logRequest(self):
logging.info('[ConciergeHandler] %(command)s request: %(host)s:%(port)d --- %(path)s',
dict(command = self.command,
host = self.client_address[0],
port = self.client_address[1],
path = self.path))
def logResponse(self, status):
logging.info('[ConciergeHandler] %(command)s returned %(status)d',
dict(command = self.command,
status = status))
def do_HEAD(self):
self.logRequest()
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
self.logResponse(200)
def dumpXml(self, xml):
logging.debug('[ConciergeHandler] %s', xml.tag)
for attr in xml.attrib:
logging.debug('[ConciergeHandler] %s [%s] %s', xml.tag, attr, xml.attrib[attr])
for kid in xml.getchildren():
self.dumpXml(kid)
def do_POST(self):
self.logRequest()
hdrs = {}
for hdr in self.headers.headers:
logging.debug('[ConciergeHandler] POST: header: %s', hdr.rstrip())
length = int(self.headers.getheader('Content-Length'))
content = self.rfile.read(length)
self.rfile.close()
logging.debug('[ConciergeHandler] POST: content: %s', content)
try:
postXml = ET.fromstring(content)
self.dumpXml(postXml)
except xml.parsers.expat.ExpatError, xmlError:
logging.error('[ConciergeHandler] POST illformed:%s', xmlError)
self.send_response(500)
return
if not options.fail:
self.send_response(200)
self.send_header('Content-Type', 'text/html')
self.send_header('Content-Length', len('<success/>'))
self.end_headers()
self.logResponse(200)
self.wfile.write('<success/>')
self.wfile.close()
else:
self.send_response(500)
self.send_header('Content-Type', 'text/html')
self.send_header('Content-Length', len('<error>gotcha!</error>'))
self.end_headers()
self.wfile.write('<error>gotcha!</error>')
self.wfile.close()
self.logResponse(500)
def log_request(code, size):
pass
if __name__ == '__main__':
logging.info('[ConciergeServer] Concierge Broker Test Server starting')
parser = optparse.OptionParser()
parser.add_option('-p', '--port', dest = 'port', help = 'port to listen on', metavar = 'PORT')
parser.add_option('-f', '--fail', dest = 'fail', action = 'store_true', help = 'always fail POST requests')
(options, args) = parser.parse_args()
httpServer = BaseHTTPServer.HTTPServer(('', 8080), ConciergeHandler)
try:
httpServer.serve_forever()
except KeyboardInterrupt:
logging.info('[ConciergeServer] terminating')
httpServer.server_close()
| bsd-3-clause |
zhangkun456/TeamTalk | win-client/3rdParty/src/json/devtools/licenseupdater.py | 177 | 3924 | """Updates the license text in source file.
"""
# An existing license is found if the file starts with the string below,
# and ends with the first blank line.
LICENSE_BEGIN = "// Copyright "
BRIEF_LICENSE = LICENSE_BEGIN + """2007-2010 Baptiste Lepilleur
// Distributed under MIT license, or public domain if desired and
// recognized in your jurisdiction.
// See file LICENSE for detail or copy at http://jsoncpp.sourceforge.net/LICENSE
""".replace('\r\n','\n')
def update_license( path, dry_run, show_diff ):
"""Update the license statement in the specified file.
Parameters:
path: path of the C++ source file to update.
dry_run: if True, just print the path of the file that would be updated,
but don't change it.
show_diff: if True, print the path of the file that would be modified,
as well as the change made to the file.
"""
with open( path, 'rt' ) as fin:
original_text = fin.read().replace('\r\n','\n')
newline = fin.newlines and fin.newlines[0] or '\n'
if not original_text.startswith( LICENSE_BEGIN ):
# No existing license found => prepend it
new_text = BRIEF_LICENSE + original_text
else:
license_end_index = original_text.index( '\n\n' ) # search first blank line
new_text = BRIEF_LICENSE + original_text[license_end_index+2:]
if original_text != new_text:
if not dry_run:
with open( path, 'wb' ) as fout:
fout.write( new_text.replace('\n', newline ) )
print 'Updated', path
if show_diff:
import difflib
print '\n'.join( difflib.unified_diff( original_text.split('\n'),
new_text.split('\n') ) )
return True
return False
def update_license_in_source_directories( source_dirs, dry_run, show_diff ):
"""Updates license text in C++ source files found in directory source_dirs.
Parameters:
source_dirs: list of directory to scan for C++ sources. Directories are
scanned recursively.
dry_run: if True, just print the path of the file that would be updated,
but don't change it.
show_diff: if True, print the path of the file that would be modified,
as well as the change made to the file.
"""
from devtools import antglob
prune_dirs = antglob.prune_dirs + 'scons-local* ./build* ./libs ./dist'
for source_dir in source_dirs:
cpp_sources = antglob.glob( source_dir,
includes = '''**/*.h **/*.cpp **/*.inl''',
prune_dirs = prune_dirs )
for source in cpp_sources:
update_license( source, dry_run, show_diff )
def main():
usage = """%prog DIR [DIR2...]
Updates license text in sources of the project in source files found
in the directory specified on the command-line.
Example of call:
python devtools\licenseupdater.py include src -n --diff
=> Show change that would be made to the sources.
python devtools\licenseupdater.py include src
=> Update license statement on all sources in directories include/ and src/.
"""
from optparse import OptionParser
parser = OptionParser(usage=usage)
parser.allow_interspersed_args = False
parser.add_option('-n', '--dry-run', dest="dry_run", action='store_true', default=False,
help="""Only show what files are updated, do not update the files""")
parser.add_option('--diff', dest="show_diff", action='store_true', default=False,
help="""On update, show change made to the file.""")
parser.enable_interspersed_args()
options, args = parser.parse_args()
update_license_in_source_directories( args, options.dry_run, options.show_diff )
print 'Done'
if __name__ == '__main__':
import sys
import os.path
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
main()
| apache-2.0 |
philanthropy-u/edx-platform | pavelib/prereqs.py | 1 | 11951 | """
Install Python and Node prerequisites.
"""
from __future__ import print_function
import hashlib
import os
import re
import sys
import subprocess
import io
from distutils import sysconfig
from paver.easy import BuildFailure, sh, task
from .utils.envs import Env
from .utils.timer import timed
PREREQS_STATE_DIR = os.getenv('PREREQ_CACHE_DIR', Env.REPO_ROOT / '.prereqs_cache')
NO_PREREQ_MESSAGE = "NO_PREREQ_INSTALL is set, not installing prereqs"
NO_PYTHON_UNINSTALL_MESSAGE = 'NO_PYTHON_UNINSTALL is set. No attempts will be made to uninstall old Python libs.'
COVERAGE_REQ_FILE = 'requirements/edx/coverage.txt'
# If you make any changes to this list you also need to make
# a corresponding change to circle.yml, which is how the python
# prerequisites are installed for builds on circleci.com
if 'TOXENV' in os.environ:
PYTHON_REQ_FILES = ['requirements/edx/testing.txt']
else:
PYTHON_REQ_FILES = ['requirements/edx/development.txt']
# Developers can have private requirements, for local copies of github repos,
# or favorite debugging tools, etc.
if 'TOXENV' in os.environ:
PRIVATE_REQS = 'requirements/philu/testing.txt'
else:
PRIVATE_REQS = 'requirements/philu/base.txt'
if os.path.exists(PRIVATE_REQS):
PYTHON_REQ_FILES.append(PRIVATE_REQS)
def str2bool(s):
s = str(s)
return s.lower() in ('yes', 'true', 't', '1')
def no_prereq_install():
"""
Determine if NO_PREREQ_INSTALL should be truthy or falsy.
"""
return str2bool(os.environ.get('NO_PREREQ_INSTALL', 'False'))
def no_python_uninstall():
""" Determine if we should run the uninstall_python_packages task. """
return str2bool(os.environ.get('NO_PYTHON_UNINSTALL', 'False'))
def create_prereqs_cache_dir():
"""Create the directory for storing the hashes, if it doesn't exist already."""
try:
os.makedirs(PREREQS_STATE_DIR)
except OSError:
if not os.path.isdir(PREREQS_STATE_DIR):
raise
def compute_fingerprint(path_list):
"""
Hash the contents of all the files and directories in `path_list`.
Returns the hex digest.
"""
hasher = hashlib.sha1()
for path_item in path_list:
# For directories, create a hash based on the modification times
# of first-level subdirectories
if os.path.isdir(path_item):
for dirname in sorted(os.listdir(path_item)):
path_name = os.path.join(path_item, dirname)
if os.path.isdir(path_name):
hasher.update(str(os.stat(path_name).st_mtime))
# For files, hash the contents of the file
if os.path.isfile(path_item):
with io.open(path_item, "rb") as file_handle:
hasher.update(file_handle.read())
return hasher.hexdigest()
def prereq_cache(cache_name, paths, install_func):
"""
Conditionally execute `install_func()` only if the files/directories
specified by `paths` have changed.
If the code executes successfully (no exceptions are thrown), the cache
is updated with the new hash.
"""
# Retrieve the old hash
cache_filename = cache_name.replace(" ", "_")
cache_file_path = os.path.join(PREREQS_STATE_DIR, "{}.sha1".format(cache_filename))
old_hash = None
if os.path.isfile(cache_file_path):
with io.open(cache_file_path, "rb") as cache_file:
old_hash = cache_file.read()
# Compare the old hash to the new hash
# If they do not match (either the cache hasn't been created, or the files have changed),
# then execute the code within the block.
new_hash = compute_fingerprint(paths)
if new_hash != old_hash:
install_func()
# Update the cache with the new hash
# If the code executed within the context fails (throws an exception),
# then this step won't get executed.
create_prereqs_cache_dir()
with io.open(cache_file_path, "wb") as cache_file:
# Since the pip requirement files are modified during the install
# process, we need to store the hash generated AFTER the installation
post_install_hash = compute_fingerprint(paths)
cache_file.write(post_install_hash)
else:
print('{cache} unchanged, skipping...'.format(cache=cache_name))
def node_prereqs_installation():
"""
Configures npm and installs Node prerequisites
"""
# NPM installs hang sporadically. Log the installation process so that we
# determine if any packages are chronic offenders.
shard_str = os.getenv('SHARD', None)
if shard_str:
npm_log_file_path = '{}/npm-install.{}.log'.format(Env.GEN_LOG_DIR, shard_str)
else:
npm_log_file_path = '{}/npm-install.log'.format(Env.GEN_LOG_DIR)
npm_log_file = io.open(npm_log_file_path, 'wb')
npm_command = 'npm install --verbose'.split()
cb_error_text = "Subprocess return code: 1"
# Error handling around a race condition that produces "cb() never called" error. This
# evinces itself as `cb_error_text` and it ought to disappear when we upgrade
# npm to 3 or higher. TODO: clean this up when we do that.
try:
# The implementation of Paver's `sh` function returns before the forked
# actually returns. Using a Popen object so that we can ensure that
# the forked process has returned
proc = subprocess.Popen(npm_command, stderr=npm_log_file)
proc.wait()
except BuildFailure, error_text:
if cb_error_text in error_text:
print("npm install error detected. Retrying...")
proc = subprocess.Popen(npm_command, stderr=npm_log_file)
proc.wait()
else:
raise BuildFailure(error_text)
print("Successfully installed NPM packages. Log found at {}".format(
npm_log_file_path
))
def python_prereqs_installation():
"""
Installs Python prerequisites
"""
for req_file in PYTHON_REQ_FILES:
pip_install_req_file(req_file)
def pip_install_req_file(req_file):
"""Pip install the requirements file."""
pip_cmd = 'pip install -q --disable-pip-version-check --exists-action w'
sh("{pip_cmd} -r {req_file}".format(pip_cmd=pip_cmd, req_file=req_file))
@task
@timed
def install_node_prereqs():
"""
Installs Node prerequisites
"""
if no_prereq_install():
print(NO_PREREQ_MESSAGE)
return
prereq_cache("Node prereqs", ["package.json"], node_prereqs_installation)
# To add a package to the uninstall list, just add it to this list! No need
# to touch any other part of this file.
PACKAGES_TO_UNINSTALL = [
"South", # Because it interferes with Django 1.8 migrations.
"edxval", # Because it was bork-installed somehow.
"django-storages",
"django-oauth2-provider", # Because now it's called edx-django-oauth2-provider.
"edx-oauth2-provider", # Because it moved from github to pypi
"i18n-tools", # Because now it's called edx-i18n-tools
]
@task
@timed
def uninstall_python_packages():
"""
Uninstall Python packages that need explicit uninstallation.
Some Python packages that we no longer want need to be explicitly
uninstalled, notably, South. Some other packages were once installed in
ways that were resistant to being upgraded, like edxval. Also uninstall
them.
"""
if no_python_uninstall():
print(NO_PYTHON_UNINSTALL_MESSAGE)
return
# So that we don't constantly uninstall things, use a hash of the packages
# to be uninstalled. Check it, and skip this if we're up to date.
hasher = hashlib.sha1()
hasher.update(repr(PACKAGES_TO_UNINSTALL))
expected_version = hasher.hexdigest()
state_file_path = os.path.join(PREREQS_STATE_DIR, "Python_uninstall.sha1")
create_prereqs_cache_dir()
if os.path.isfile(state_file_path):
with io.open(state_file_path) as state_file:
version = state_file.read()
if version == expected_version:
print('Python uninstalls unchanged, skipping...')
return
# Run pip to find the packages we need to get rid of. Believe it or not,
# edx-val is installed in a way that it is present twice, so we have a loop
# to really really get rid of it.
for _ in range(3):
uninstalled = False
frozen = sh("pip freeze", capture=True)
for package_name in PACKAGES_TO_UNINSTALL:
if package_in_frozen(package_name, frozen):
# Uninstall the pacakge
sh("pip uninstall --disable-pip-version-check -y {}".format(package_name))
uninstalled = True
if not uninstalled:
break
else:
# We tried three times and didn't manage to get rid of the pests.
print("Couldn't uninstall unwanted Python packages!")
return
# Write our version.
with io.open(state_file_path, "wb") as state_file:
state_file.write(expected_version)
def package_in_frozen(package_name, frozen_output):
"""Is this package in the output of 'pip freeze'?"""
# Look for either:
#
# PACKAGE-NAME==
#
# or:
#
# blah_blah#egg=package_name-version
#
pattern = r"(?mi)^{pkg}==|#egg={pkg_under}-".format(
pkg=re.escape(package_name),
pkg_under=re.escape(package_name.replace("-", "_")),
)
return bool(re.search(pattern, frozen_output))
@task
@timed
def install_coverage_prereqs():
""" Install python prereqs for measuring coverage. """
if no_prereq_install():
print(NO_PREREQ_MESSAGE)
return
pip_install_req_file(COVERAGE_REQ_FILE)
@task
@timed
def install_python_prereqs():
"""
Installs Python prerequisites.
"""
if no_prereq_install():
print(NO_PREREQ_MESSAGE)
return
uninstall_python_packages()
# Include all of the requirements files in the fingerprint.
files_to_fingerprint = list(PYTHON_REQ_FILES)
# Also fingerprint the directories where packages get installed:
# ("/edx/app/edxapp/venvs/edxapp/lib/python2.7/site-packages")
files_to_fingerprint.append(sysconfig.get_python_lib())
# In a virtualenv, "-e installs" get put in a src directory.
src_dir = os.path.join(sys.prefix, "src")
if os.path.isdir(src_dir):
files_to_fingerprint.append(src_dir)
# Also fingerprint this source file, so that if the logic for installations
# changes, we will redo the installation.
this_file = __file__
if this_file.endswith(".pyc"):
this_file = this_file[:-1] # use the .py file instead of the .pyc
files_to_fingerprint.append(this_file)
prereq_cache("Python prereqs", files_to_fingerprint, python_prereqs_installation)
@task
@timed
def install_prereqs():
"""
Installs Node and Python prerequisites
"""
if no_prereq_install():
print(NO_PREREQ_MESSAGE)
return
if not str2bool(os.environ.get('SKIP_NPM_INSTALL', 'False')):
install_node_prereqs()
install_python_prereqs()
log_installed_python_prereqs()
print_devstack_warning()
def log_installed_python_prereqs():
""" Logs output of pip freeze for debugging. """
sh("pip freeze > {}".format(Env.GEN_LOG_DIR + "/pip_freeze.log"))
return
def print_devstack_warning():
if Env.USING_DOCKER: # pragma: no cover
print("********************************************************************************")
print("* WARNING: Mac users should run this from both the lms and studio shells")
print("* in docker devstack to avoid startup errors that kill your CPU.")
print("* For more details, see:")
print("* https://github.com/edx/devstack#docker-is-using-lots-of-cpu-time-when-it-should-be-idle")
print("********************************************************************************")
| agpl-3.0 |
exploreodoo/datStruct | odoo/addons/purchase_analytic_plans/__openerp__.py | 260 | 1725 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Purchase Analytic Plans',
'version': '1.0',
'category': 'Purchase Management',
'description': """
The base module to manage analytic distribution and purchase orders.
====================================================================
Allows the user to maintain several analysis plans. These let you split a line
on a supplier purchase order into several accounts and analytic plans.
""",
'author': 'OpenERP SA',
'website': 'https://www.odoo.com/page/purchase',
'depends': ['purchase', 'account_analytic_plans'],
'data': ['purchase_analytic_plans_view.xml'],
'demo': [],
'installable': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| gpl-2.0 |
shirishgoyal/rational_crowd | config/urls.py | 1 | 1607 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.contrib import admin
from django.views.generic import TemplateView
from django.views import defaults as default_views
from rational_crowd.api.router import router
urlpatterns = [
url(r'^$', TemplateView.as_view(template_name='pages/home.html'), name='home'),
# url(r'^about/$', TemplateView.as_view(template_name='pages/about.html'), name='about'),
# Django Admin, use {% url 'admin:index' %}
url(settings.ADMIN_URL, include(admin.site.urls)),
# User management
# url(r'^users/', include('rational_crowd.users.urls', namespace='users')),
url(r'^accounts/', include('allauth.urls')),
url(r'^api/', include(router.urls)),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.DEBUG:
# This allows the error pages to be debugged during development, just visit
# these url in browser to see how these error pages look like.
urlpatterns += [
url(r'^400/$', default_views.bad_request, kwargs={'exception': Exception('Bad Request!')}),
url(r'^403/$', default_views.permission_denied, kwargs={'exception': Exception('Permission Denied')}),
url(r'^404/$', default_views.page_not_found, kwargs={'exception': Exception('Page not Found')}),
url(r'^500/$', default_views.server_error),
]
| mit |
LinusU/ansible | lib/ansible/plugins/filter/ipaddr.py | 149 | 18795 | # (c) 2014, Maciej Delmanowski <drybjed@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from functools import partial
import types
try:
import netaddr
except ImportError:
# in this case, we'll make the filters return error messages (see bottom)
netaddr = None
else:
class mac_linux(netaddr.mac_unix):
pass
mac_linux.word_fmt = '%.2x'
from ansible import errors
# ---- IP address and network query helpers ----
def _empty_ipaddr_query(v, vtype):
# We don't have any query to process, so just check what type the user
# expects, and return the IP address in a correct format
if v:
if vtype == 'address':
return str(v.ip)
elif vtype == 'network':
return str(v)
def _6to4_query(v, vtype, value):
if v.version == 4:
if v.size == 1:
ipconv = str(v.ip)
elif v.size > 1:
if v.ip != v.network:
ipconv = str(v.ip)
else:
ipconv = False
if ipaddr(ipconv, 'public'):
numbers = list(map(int, ipconv.split('.')))
try:
return '2002:{:02x}{:02x}:{:02x}{:02x}::1/48'.format(*numbers)
except:
return False
elif v.version == 6:
if vtype == 'address':
if ipaddr(str(v), '2002::/16'):
return value
elif vtype == 'network':
if v.ip != v.network:
if ipaddr(str(v.ip), '2002::/16'):
return value
else:
return False
def _ip_query(v):
if v.size == 1:
return str(v.ip)
if v.size > 1:
if v.ip != v.network:
return str(v.ip)
def _gateway_query(v):
if v.size > 1:
if v.ip != v.network:
return str(v.ip) + '/' + str(v.prefixlen)
def _bool_ipaddr_query(v):
if v:
return True
def _broadcast_query(v):
if v.size > 1:
return str(v.broadcast)
def _cidr_query(v):
return str(v)
def _cidr_lookup_query(v, iplist, value):
try:
if v in iplist:
return value
except:
return False
def _host_query(v):
if v.size == 1:
return str(v)
elif v.size > 1:
if v.ip != v.network:
return str(v.ip) + '/' + str(v.prefixlen)
def _hostmask_query(v):
return str(v.hostmask)
def _int_query(v, vtype):
if vtype == 'address':
return int(v.ip)
elif vtype == 'network':
return str(int(v.ip)) + '/' + str(int(v.prefixlen))
def _ipv4_query(v, value):
if v.version == 6:
try:
return str(v.ipv4())
except:
return False
else:
return value
def _ipv6_query(v, value):
if v.version == 4:
return str(v.ipv6())
else:
return value
def _link_local_query(v, value):
v_ip = netaddr.IPAddress(str(v.ip))
if v.version == 4:
if ipaddr(str(v_ip), '169.254.0.0/24'):
return value
elif v.version == 6:
if ipaddr(str(v_ip), 'fe80::/10'):
return value
def _loopback_query(v, value):
v_ip = netaddr.IPAddress(str(v.ip))
if v_ip.is_loopback():
return value
def _multicast_query(v, value):
if v.is_multicast():
return value
def _net_query(v):
if v.size > 1:
if v.ip == v.network:
return str(v.network) + '/' + str(v.prefixlen)
def _netmask_query(v):
if v.size > 1:
return str(v.netmask)
def _network_query(v):
if v.size > 1:
return str(v.network)
def _prefix_query(v):
return int(v.prefixlen)
def _private_query(v, value):
if v.is_private():
return value
def _public_query(v, value):
v_ip = netaddr.IPAddress(str(v.ip))
if v_ip.is_unicast() and not v_ip.is_private() and \
not v_ip.is_loopback() and not v_ip.is_netmask() and \
not v_ip.is_hostmask():
return value
def _revdns_query(v):
v_ip = netaddr.IPAddress(str(v.ip))
return v_ip.reverse_dns
def _size_query(v):
return v.size
def _subnet_query(v):
return str(v.cidr)
def _type_query(v):
if v.size == 1:
return 'address'
if v.size > 1:
if v.ip != v.network:
return 'address'
else:
return 'network'
def _unicast_query(v, value):
if v.is_unicast():
return value
def _version_query(v):
return v.version
def _wrap_query(v, vtype, value):
if v.version == 6:
if vtype == 'address':
return '[' + str(v.ip) + ']'
elif vtype == 'network':
return '[' + str(v.ip) + ']/' + str(v.prefixlen)
else:
return value
# ---- HWaddr query helpers ----
def _bare_query(v):
v.dialect = netaddr.mac_bare
return str(v)
def _bool_hwaddr_query(v):
if v:
return True
def _cisco_query(v):
v.dialect = netaddr.mac_cisco
return str(v)
def _empty_hwaddr_query(v, value):
if v:
return value
def _linux_query(v):
v.dialect = mac_linux
return str(v)
def _postgresql_query(v):
v.dialect = netaddr.mac_pgsql
return str(v)
def _unix_query(v):
v.dialect = netaddr.mac_unix
return str(v)
def _win_query(v):
v.dialect = netaddr.mac_eui48
return str(v)
# ---- IP address and network filters ----
def ipaddr(value, query = '', version = False, alias = 'ipaddr'):
''' Check if string is an IP address or network and filter it '''
query_func_extra_args = {
'': ('vtype',),
'6to4': ('vtype', 'value'),
'cidr_lookup': ('iplist', 'value'),
'int': ('vtype',),
'ipv4': ('value',),
'ipv6': ('value',),
'link-local': ('value',),
'loopback': ('value',),
'lo': ('value',),
'multicast': ('value',),
'private': ('value',),
'public': ('value',),
'unicast': ('value',),
'wrap': ('vtype', 'value'),
}
query_func_map = {
'': _empty_ipaddr_query,
'6to4': _6to4_query,
'address': _ip_query,
'address/prefix': _gateway_query,
'bool': _bool_ipaddr_query,
'broadcast': _broadcast_query,
'cidr': _cidr_query,
'cidr_lookup': _cidr_lookup_query,
'gateway': _gateway_query,
'gw': _gateway_query,
'host': _host_query,
'host/prefix': _gateway_query,
'hostmask': _hostmask_query,
'hostnet': _gateway_query,
'int': _int_query,
'ip': _ip_query,
'ipv4': _ipv4_query,
'ipv6': _ipv6_query,
'link-local': _link_local_query,
'lo': _loopback_query,
'loopback': _loopback_query,
'multicast': _multicast_query,
'net': _net_query,
'netmask': _netmask_query,
'network': _network_query,
'prefix': _prefix_query,
'private': _private_query,
'public': _public_query,
'revdns': _revdns_query,
'router': _gateway_query,
'size': _size_query,
'subnet': _subnet_query,
'type': _type_query,
'unicast': _unicast_query,
'v4': _ipv4_query,
'v6': _ipv6_query,
'version': _version_query,
'wrap': _wrap_query,
}
vtype = None
if not value:
return False
elif value == True:
return False
# Check if value is a list and parse each element
elif isinstance(value, (list, tuple, types.GeneratorType)):
_ret = []
for element in value:
if ipaddr(element, str(query), version):
_ret.append(ipaddr(element, str(query), version))
if _ret:
return _ret
else:
return list()
# Check if value is a number and convert it to an IP address
elif str(value).isdigit():
# We don't know what IP version to assume, so let's check IPv4 first,
# then IPv6
try:
if ((not version) or (version and version == 4)):
v = netaddr.IPNetwork('0.0.0.0/0')
v.value = int(value)
v.prefixlen = 32
elif version and version == 6:
v = netaddr.IPNetwork('::/0')
v.value = int(value)
v.prefixlen = 128
# IPv4 didn't work the first time, so it definitely has to be IPv6
except:
try:
v = netaddr.IPNetwork('::/0')
v.value = int(value)
v.prefixlen = 128
# The value is too big for IPv6. Are you a nanobot?
except:
return False
# We got an IP address, let's mark it as such
value = str(v)
vtype = 'address'
# value has not been recognized, check if it's a valid IP string
else:
try:
v = netaddr.IPNetwork(value)
# value is a valid IP string, check if user specified
# CIDR prefix or just an IP address, this will indicate default
# output format
try:
address, prefix = value.split('/')
vtype = 'network'
except:
vtype = 'address'
# value hasn't been recognized, maybe it's a numerical CIDR?
except:
try:
address, prefix = value.split('/')
address.isdigit()
address = int(address)
prefix.isdigit()
prefix = int(prefix)
# It's not numerical CIDR, give up
except:
return False
# It is something, so let's try and build a CIDR from the parts
try:
v = netaddr.IPNetwork('0.0.0.0/0')
v.value = address
v.prefixlen = prefix
# It's not a valid IPv4 CIDR
except:
try:
v = netaddr.IPNetwork('::/0')
v.value = address
v.prefixlen = prefix
# It's not a valid IPv6 CIDR. Give up.
except:
return False
# We have a valid CIDR, so let's write it in correct format
value = str(v)
vtype = 'network'
# We have a query string but it's not in the known query types. Check if
# that string is a valid subnet, if so, we can check later if given IP
# address/network is inside that specific subnet
try:
### ?? 6to4 and link-local were True here before. Should they still?
if query and (query not in query_func_map or query == 'cidr_lookup') and ipaddr(query, 'network'):
iplist = netaddr.IPSet([netaddr.IPNetwork(query)])
query = 'cidr_lookup'
except:
pass
# This code checks if value maches the IP version the user wants, ie. if
# it's any version ("ipaddr()"), IPv4 ("ipv4()") or IPv6 ("ipv6()")
# If version does not match, return False
if version and v.version != version:
return False
extras = []
for arg in query_func_extra_args.get(query, tuple()):
extras.append(locals()[arg])
try:
return query_func_map[query](v, *extras)
except KeyError:
try:
float(query)
if v.size == 1:
if vtype == 'address':
return str(v.ip)
elif vtype == 'network':
return str(v)
elif v.size > 1:
try:
return str(v[query]) + '/' + str(v.prefixlen)
except:
return False
else:
return value
except:
raise errors.AnsibleFilterError(alias + ': unknown filter type: %s' % query)
return False
def ipwrap(value, query = ''):
try:
if isinstance(value, (list, tuple, types.GeneratorType)):
_ret = []
for element in value:
if ipaddr(element, query, version = False, alias = 'ipwrap'):
_ret.append(ipaddr(element, 'wrap'))
else:
_ret.append(element)
return _ret
else:
_ret = ipaddr(value, query, version = False, alias = 'ipwrap')
if _ret:
return ipaddr(_ret, 'wrap')
else:
return value
except:
return value
def ipv4(value, query = ''):
return ipaddr(value, query, version = 4, alias = 'ipv4')
def ipv6(value, query = ''):
return ipaddr(value, query, version = 6, alias = 'ipv6')
# Split given subnet into smaller subnets or find out the biggest subnet of
# a given IP address with given CIDR prefix
# Usage:
#
# - address or address/prefix | ipsubnet
# returns CIDR subnet of a given input
#
# - address/prefix | ipsubnet(cidr)
# returns number of possible subnets for given CIDR prefix
#
# - address/prefix | ipsubnet(cidr, index)
# returns new subnet with given CIDR prefix
#
# - address | ipsubnet(cidr)
# returns biggest subnet with given CIDR prefix that address belongs to
#
# - address | ipsubnet(cidr, index)
# returns next indexed subnet which contains given address
def ipsubnet(value, query = '', index = 'x'):
''' Manipulate IPv4/IPv6 subnets '''
try:
vtype = ipaddr(value, 'type')
if vtype == 'address':
v = ipaddr(value, 'cidr')
elif vtype == 'network':
v = ipaddr(value, 'subnet')
value = netaddr.IPNetwork(v)
except:
return False
if not query:
return str(value)
elif str(query).isdigit():
vsize = ipaddr(v, 'size')
query = int(query)
try:
float(index)
index = int(index)
if vsize > 1:
try:
return str(list(value.subnet(query))[index])
except:
return False
elif vsize == 1:
try:
return str(value.supernet(query)[index])
except:
return False
except:
if vsize > 1:
try:
return str(len(list(value.subnet(query))))
except:
return False
elif vsize == 1:
try:
return str(value.supernet(query)[0])
except:
return False
return False
# Returns the nth host within a network described by value.
# Usage:
#
# - address or address/prefix | nthhost(nth)
# returns the nth host within the given network
def nthhost(value, query=''):
''' Get the nth host within a given network '''
try:
vtype = ipaddr(value, 'type')
if vtype == 'address':
v = ipaddr(value, 'cidr')
elif vtype == 'network':
v = ipaddr(value, 'subnet')
value = netaddr.IPNetwork(v)
except:
return False
if not query:
return False
try:
vsize = ipaddr(v, 'size')
nth = int(query)
if value.size > nth:
return value[nth]
except ValueError:
return False
return False
# Returns the SLAAC address within a network for a given HW/MAC address.
# Usage:
#
# - prefix | slaac(mac)
def slaac(value, query = ''):
''' Get the SLAAC address within given network '''
try:
vtype = ipaddr(value, 'type')
if vtype == 'address':
v = ipaddr(value, 'cidr')
elif vtype == 'network':
v = ipaddr(value, 'subnet')
if v.version != 6:
return False
value = netaddr.IPNetwork(v)
except:
return False
if not query:
return False
try:
mac = hwaddr(query, alias = 'slaac')
eui = netaddr.EUI(mac)
except:
return False
return eui.ipv6(value.network)
# ---- HWaddr / MAC address filters ----
def hwaddr(value, query = '', alias = 'hwaddr'):
''' Check if string is a HW/MAC address and filter it '''
query_func_extra_args = {
'': ('value',),
}
query_func_map = {
'': _empty_hwaddr_query,
'bare': _bare_query,
'bool': _bool_hwaddr_query,
'cisco': _cisco_query,
'eui48': _win_query,
'linux': _linux_query,
'pgsql': _postgresql_query,
'postgresql': _postgresql_query,
'psql': _postgresql_query,
'unix': _unix_query,
'win': _win_query,
}
try:
v = netaddr.EUI(value)
except:
if query and query != 'bool':
raise errors.AnsibleFilterError(alias + ': not a hardware address: %s' % value)
extras = []
for arg in query_func_extra_args.get(query, tuple()):
extras.append(locals()[arg])
try:
return query_func_map[query](v, *extras)
except KeyError:
raise errors.AnsibleFilterError(alias + ': unknown filter type: %s' % query)
return False
def macaddr(value, query = ''):
return hwaddr(value, query, alias = 'macaddr')
def _need_netaddr(f_name, *args, **kwargs):
raise errors.AnsibleFilterError('The {0} filter requires python-netaddr be'
' installed on the ansible controller'.format(f_name))
# ---- Ansible filters ----
class FilterModule(object):
''' IP address and network manipulation filters '''
filter_map = {
# IP addresses and networks
'ipaddr': ipaddr,
'ipwrap': ipwrap,
'ipv4': ipv4,
'ipv6': ipv6,
'ipsubnet': ipsubnet,
'nthhost': nthhost,
'slaac': slaac,
# MAC / HW addresses
'hwaddr': hwaddr,
'macaddr': macaddr
}
def filters(self):
if netaddr:
return self.filter_map
else:
# Need to install python-netaddr for these filters to work
return dict((f, partial(_need_netaddr, f)) for f in self.filter_map)
| gpl-3.0 |
tomjelinek/pcs | pcs/cli/routing/qdevice.py | 3 | 1098 | from pcs import (
qdevice,
usage,
)
from pcs.cli.common.routing import create_router
qdevice_cmd = create_router(
{
"help": lambda lib, argv, modifiers: usage.qdevice(argv),
"status": qdevice.qdevice_status_cmd,
"setup": qdevice.qdevice_setup_cmd,
"destroy": qdevice.qdevice_destroy_cmd,
"start": qdevice.qdevice_start_cmd,
"stop": qdevice.qdevice_stop_cmd,
"kill": qdevice.qdevice_kill_cmd,
"enable": qdevice.qdevice_enable_cmd,
"disable": qdevice.qdevice_disable_cmd,
# following commands are internal use only, called from pcsd
"sign-net-cert-request": qdevice.qdevice_sign_net_cert_request_cmd,
"net-client": create_router(
{
"setup": qdevice.qdevice_net_client_setup_cmd,
"import-certificate": (
qdevice.qdevice_net_client_import_certificate_cmd
),
"destroy": qdevice.qdevice_net_client_destroy,
},
["qdevice", "net-client"],
),
},
["qdevice"],
)
| gpl-2.0 |
kvar/ansible | test/units/modules/net_tools/nios/test_nios_ptr_record.py | 19 | 7158 | # This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.modules.net_tools.nios import nios_ptr_record
from ansible.module_utils.net_tools.nios import api
from units.compat.mock import patch, MagicMock, Mock
from .test_nios_module import TestNiosModule, load_fixture
class TestNiosPTRRecordModule(TestNiosModule):
module = nios_ptr_record
def setUp(self):
super(TestNiosPTRRecordModule, self).setUp()
self.module = MagicMock(name='ansible.modules.net_tools.nios.nios_ptr_record.WapiModule')
self.module.check_mode = False
self.module.params = {'provider': None}
self.mock_wapi = patch('ansible.modules.net_tools.nios.nios_ptr_record.WapiModule')
self.exec_command = self.mock_wapi.start()
self.mock_wapi_run = patch('ansible.modules.net_tools.nios.nios_ptr_record.WapiModule.run')
self.mock_wapi_run.start()
self.load_config = self.mock_wapi_run.start()
def tearDown(self):
super(TestNiosPTRRecordModule, self).tearDown()
self.mock_wapi.stop()
def _get_wapi(self, test_object):
wapi = api.WapiModule(self.module)
wapi.get_object = Mock(name='get_object', return_value=test_object)
wapi.create_object = Mock(name='create_object')
wapi.update_object = Mock(name='update_object')
wapi.delete_object = Mock(name='delete_object')
return wapi
def load_fixtures(self, commands=None):
self.exec_command.return_value = (0, load_fixture('nios_result.txt').strip(), None)
self.load_config.return_value = dict(diff=None, session='session')
def test_nios_ptr_record_create(self):
self.module.params = {'provider': None, 'state': 'present', 'ptrdname': 'ansible.test.com',
'ipv4addr': '10.36.241.14', 'comment': None, 'extattrs': None, 'view': 'default'}
test_object = None
test_spec = {
"ipv4addr": {"ib_req": True},
"ptrdname": {"ib_req": True},
"comment": {},
"extattrs": {},
"view": {"ib_req": True}
}
wapi = self._get_wapi(test_object)
print("WAPI: ", wapi)
res = wapi.run('testobject', test_spec)
self.assertTrue(res['changed'])
wapi.create_object.assert_called_once_with('testobject', {'ipv4addr': '10.36.241.14', 'ptrdname': 'ansible.test.com', 'view': 'default'})
def test_nios_ptr_record_remove(self):
self.module.params = {'provider': None, 'state': 'absent', 'ptrdname': 'ansible.test.com',
'ipv4addr': '10.36.241.14', 'comment': None, 'extattrs': None, 'view': 'default'}
ref = "record:ptr/ZG5zLm5ldHdvcmtfdmlldyQw:14.241.36.10.in-addr.arpa/default"
test_object = [{
"comment": "test comment",
"_ref": ref,
"ptrdname": "ansible.test.com",
"ipv4addr": "10.36.241.14",
"view": "default",
"extattrs": {'Site': {'value': 'test'}}
}]
test_spec = {
"ipv4addr": {"ib_req": True},
"ptrdname": {"ib_req": True},
"comment": {},
"extattrs": {},
"view": {"ib_req": True}
}
wapi = self._get_wapi(test_object)
res = wapi.run('testobject', test_spec)
self.assertTrue(res['changed'])
wapi.delete_object.assert_called_once_with(ref)
def test_nios_ptr_record_update_comment(self):
self.module.params = {'provider': None, 'state': 'present', 'ptrdname': 'ansible.test.com',
'ipv4addr': '10.36.241.14', 'comment': 'updated comment', 'extattrs': None, 'view': 'default'}
test_object = [
{
"comment": "test comment",
"_ref": "record:ptr/ZG5zLm5ldHdvcmtfdmlldyQw:14.241.36.10.in-addr.arpa/default",
"ptrdname": "ansible.test.com",
"ipv4addr": "10.36.241.14",
"extattrs": {},
"view": "default"
}
]
test_spec = {
"ipv4addr": {"ib_req": True},
"ptrdname": {"ib_req": True},
"comment": {},
"extattrs": {},
"view": {"ib_req": True}
}
wapi = self._get_wapi(test_object)
res = wapi.run('testobject', test_spec)
self.assertTrue(res['changed'])
wapi.update_object.called_once_with(test_object)
def test_nios_ptr_record_update_record_ptrdname(self):
self.module.params = {'provider': None, 'state': 'present', 'ptrdname': 'ansible.test.org',
'ipv4addr': '10.36.241.14', 'comment': 'comment', 'extattrs': None, 'view': 'default'}
test_object = [
{
"comment": "test comment",
"_ref": "record:ptr/ZG5zLm5ldHdvcmtfdmlldyQw:14.241.36.10.in-addr.arpa/default",
"ptrdname": "ansible.test.com",
"ipv4addr": "10.36.241.14",
"extattrs": {},
"view": "default"
}
]
test_spec = {
"ipv4addr": {"ib_req": True},
"ptrdname": {"ib_req": True},
"comment": {},
"extattrs": {},
"view": {"ib_req": True}
}
wapi = self._get_wapi(test_object)
res = wapi.run('testobject', test_spec)
self.assertTrue(res['changed'])
wapi.update_object.called_once_with(test_object)
def test_nios_ptr6_record_create(self):
self.module.params = {'provider': None, 'state': 'present', 'ptrdname': 'ansible6.test.com',
'ipv6addr': '2002:8ac3:802d:1242:20d:60ff:fe38:6d16', 'comment': None, 'extattrs': None, 'view': 'default'}
test_object = None
test_spec = {"ipv6addr": {"ib_req": True},
"ptrdname": {"ib_req": True},
"comment": {},
"extattrs": {},
"view": {"ib_req": True}}
wapi = self._get_wapi(test_object)
print("WAPI: ", wapi)
res = wapi.run('testobject', test_spec)
self.assertTrue(res['changed'])
wapi.create_object.assert_called_once_with('testobject', {'ipv6addr': '2002:8ac3:802d:1242:20d:60ff:fe38:6d16',
'ptrdname': 'ansible6.test.com', 'view': 'default'})
| gpl-3.0 |
MWisBest/PyBot | Modules/requests/packages/idna/intranges.py | 154 | 1521 | """
Given a list of integers, made up of (hopefully) a small number of long runs
of consecutive integers, compute a representation of the form
((start1, end1), (start2, end2) ...). Then answer the question "was x present
in the original list?" in time O(log(# runs)).
"""
import bisect
def intranges_from_list(list_):
"""Represent a list of integers as a sequence of ranges:
((start_0, end_0), (start_1, end_1), ...), such that the original
integers are exactly those x such that start_i <= x < end_i for some i.
"""
sorted_list = sorted(list_)
ranges = []
last_write = -1
for i in range(len(sorted_list)):
if i+1 < len(sorted_list):
if sorted_list[i] == sorted_list[i+1]-1:
continue
current_range = sorted_list[last_write+1:i+1]
range_tuple = (current_range[0], current_range[-1] + 1)
ranges.append(range_tuple)
last_write = i
return tuple(ranges)
def intranges_contain(int_, ranges):
"""Determine if `int_` falls into one of the ranges in `ranges`."""
tuple_ = (int_, int_)
pos = bisect.bisect_left(ranges, tuple_)
# we could be immediately ahead of a tuple (start, end)
# with start < int_ <= end
if pos > 0:
left, right = ranges[pos-1]
if left <= int_ < right:
return True
# or we could be immediately behind a tuple (int_, end)
if pos < len(ranges):
left, _ = ranges[pos]
if left == int_:
return True
return False
| gpl-3.0 |
gamecredits-project/electrum-client | setup-release.py | 20 | 2827 | """
py2app/py2exe build script for Electrum
Usage (Mac OS X):
python setup.py py2app
Usage (Windows):
python setup.py py2exe
"""
from setuptools import setup
import os
import re
import shutil
import sys
from lib.util import print_error
from lib.version import ELECTRUM_VERSION as version
name = "Electrum"
mainscript = 'electrum'
if sys.version_info[:3] < (2, 6, 0):
print_error("Error: " + name + " requires Python version >= 2.6.0...")
sys.exit(1)
if sys.platform == 'darwin':
from plistlib import Plist
plist = Plist.fromFile('Info.plist')
plist.update(dict(CFBundleIconFile='electrum.icns'))
shutil.copy(mainscript, mainscript + '.py')
mainscript += '.py'
extra_options = dict(
setup_requires=['py2app'],
app=[mainscript],
options=dict(py2app=dict(argv_emulation=False,
includes=['PyQt4.QtCore', 'PyQt4.QtGui', 'PyQt4.QtWebKit', 'PyQt4.QtNetwork', 'sip'],
packages=['lib', 'gui', 'plugins', 'packages'],
iconfile='electrum.icns',
plist=plist,
resources=["icons"])),
)
elif sys.platform == 'win32':
extra_options = dict(
setup_requires=['py2exe'],
app=[mainscript],
)
else:
extra_options = dict(
# Normally unix-like platforms will use "setup.py install"
# and install the main script as such
scripts=[mainscript],
)
setup(
name=name,
version=version,
**extra_options
)
from distutils import dir_util
if sys.platform == 'darwin':
# Remove the copied py file
os.remove(mainscript)
resource = "dist/" + name + ".app/Contents/Resources/"
# Try to locate qt_menu
# Let's try the port version first!
if os.path.isfile("/opt/local/lib/Resources/qt_menu.nib"):
qt_menu_location = "/opt/local/lib/Resources/qt_menu.nib"
else:
# No dice? Then let's try the brew version
if os.path.exists("/usr/local/Cellar"):
qt_menu_location = os.popen("find /usr/local/Cellar -name qt_menu.nib | tail -n 1").read()
# no brew, check /opt/local
else:
qt_menu_location = os.popen("find /opt/local -name qt_menu.nib | tail -n 1").read()
qt_menu_location = re.sub('\n', '', qt_menu_location)
if (len(qt_menu_location) == 0):
print "Sorry couldn't find your qt_menu.nib this probably won't work"
else:
print "Found your qib: " + qt_menu_location
# Need to include a copy of qt_menu.nib
shutil.copytree(qt_menu_location, resource + "qt_menu.nib")
# Need to touch qt.conf to avoid loading 2 sets of Qt libraries
fname = resource + "qt.conf"
with file(fname, 'a'):
os.utime(fname, None)
| gpl-3.0 |
rockyzhang/zhangyanhit-python-for-android-mips | python3-alpha/extra_modules/gdata/apps/adminsettings/service.py | 48 | 13680 | #!/usr/bin/python
#
# Copyright (C) 2008 Google, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Allow Google Apps domain administrators to set domain admin settings.
AdminSettingsService: Set admin settings."""
__author__ = 'jlee@pbu.edu'
import gdata.apps
import gdata.apps.service
import gdata.service
API_VER='2.0'
class AdminSettingsService(gdata.apps.service.PropertyService):
"""Client for the Google Apps Admin Settings service."""
def _serviceUrl(self, setting_id, domain=None):
if domain is None:
domain = self.domain
return '/a/feeds/domain/%s/%s/%s' % (API_VER, domain, setting_id)
def genericGet(self, location):
"""Generic HTTP Get Wrapper
Args:
location: relative uri to Get
Returns:
A dict containing the result of the get operation."""
uri = self._serviceUrl(location)
try:
return self._GetProperties(uri)
except gdata.service.RequestError as e:
raise AppsForYourDomainException(e.args[0])
def GetDefaultLanguage(self):
"""Gets Domain Default Language
Args:
None
Returns:
Default Language as a string. All possible values are listed at:
http://code.google.com/apis/apps/email_settings/developers_guide_protocol.html#GA_email_language_tags"""
result = self.genericGet('general/defaultLanguage')
return result['defaultLanguage']
def UpdateDefaultLanguage(self, defaultLanguage):
"""Updates Domain Default Language
Args:
defaultLanguage: Domain Language to set
possible values are at:
http://code.google.com/apis/apps/email_settings/developers_guide_protocol.html#GA_email_language_tags
Returns:
A dict containing the result of the put operation"""
uri = self._serviceUrl('general/defaultLanguage')
properties = {'defaultLanguage': defaultLanguage}
return self._PutProperties(uri, properties)
def GetOrganizationName(self):
"""Gets Domain Default Language
Args:
None
Returns:
Organization Name as a string."""
result = self.genericGet('general/organizationName')
return result['organizationName']
def UpdateOrganizationName(self, organizationName):
"""Updates Organization Name
Args:
organizationName: Name of organization
Returns:
A dict containing the result of the put operation"""
uri = self._serviceUrl('general/organizationName')
properties = {'organizationName': organizationName}
return self._PutProperties(uri, properties)
def GetMaximumNumberOfUsers(self):
"""Gets Maximum Number of Users Allowed
Args:
None
Returns: An integer, the maximum number of users"""
result = self.genericGet('general/maximumNumberOfUsers')
return int(result['maximumNumberOfUsers'])
def GetCurrentNumberOfUsers(self):
"""Gets Current Number of Users
Args:
None
Returns: An integer, the current number of users"""
result = self.genericGet('general/currentNumberOfUsers')
return int(result['currentNumberOfUsers'])
def IsDomainVerified(self):
"""Is the domain verified
Args:
None
Returns: Boolean, is domain verified"""
result = self.genericGet('accountInformation/isVerified')
if result['isVerified'] == 'true':
return True
else:
return False
def GetSupportPIN(self):
"""Gets Support PIN
Args:
None
Returns: A string, the Support PIN"""
result = self.genericGet('accountInformation/supportPIN')
return result['supportPIN']
def GetEdition(self):
"""Gets Google Apps Domain Edition
Args:
None
Returns: A string, the domain's edition (premier, education, partner)"""
result = self.genericGet('accountInformation/edition')
return result['edition']
def GetCustomerPIN(self):
"""Gets Customer PIN
Args:
None
Returns: A string, the customer PIN"""
result = self.genericGet('accountInformation/customerPIN')
return result['customerPIN']
def GetCreationTime(self):
"""Gets Domain Creation Time
Args:
None
Returns: A string, the domain's creation time"""
result = self.genericGet('accountInformation/creationTime')
return result['creationTime']
def GetCountryCode(self):
"""Gets Domain Country Code
Args:
None
Returns: A string, the domain's country code. Possible values at:
http://www.iso.org/iso/country_codes/iso_3166_code_lists/english_country_names_and_code_elements.htm"""
result = self.genericGet('accountInformation/countryCode')
return result['countryCode']
def GetAdminSecondaryEmail(self):
"""Gets Domain Admin Secondary Email Address
Args:
None
Returns: A string, the secondary email address for domain admin"""
result = self.genericGet('accountInformation/adminSecondaryEmail')
return result['adminSecondaryEmail']
def UpdateAdminSecondaryEmail(self, adminSecondaryEmail):
"""Gets Domain Creation Time
Args:
adminSecondaryEmail: string, secondary email address of admin
Returns: A dict containing the result of the put operation"""
uri = self._serviceUrl('accountInformation/adminSecondaryEmail')
properties = {'adminSecondaryEmail': adminSecondaryEmail}
return self._PutProperties(uri, properties)
def GetDomainLogo(self):
"""Gets Domain Logo
This function does not make use of the Google Apps Admin Settings API,
it does an HTTP Get of a url specific to the Google Apps domain. It is
included for completeness sake.
Args:
None
Returns: binary image file"""
import urllib.request, urllib.parse, urllib.error
url = 'http://www.google.com/a/cpanel/'+self.domain+'/images/logo.gif'
response = urllib.request.urlopen(url)
return response.read()
def UpdateDomainLogo(self, logoImage):
"""Update Domain's Custom Logo
Args:
logoImage: binary image data
Returns: A dict containing the result of the put operation"""
from base64 import b64encode
uri = self._serviceUrl('appearance/customLogo')
properties = {'logoImage': b64encode(logoImage)}
return self._PutProperties(uri, properties)
def GetCNAMEVerificationStatus(self):
"""Gets Domain CNAME Verification Status
Args:
None
Returns: A dict {recordName, verified, verifiedMethod}"""
return self.genericGet('verification/cname')
def UpdateCNAMEVerificationStatus(self, verified):
"""Updates CNAME Verification Status
Args:
verified: boolean, True will retry verification process
Returns: A dict containing the result of the put operation"""
uri = self._serviceUrl('verification/cname')
properties = self.GetCNAMEVerificationStatus()
properties['verified'] = verified
return self._PutProperties(uri, properties)
def GetMXVerificationStatus(self):
"""Gets Domain MX Verification Status
Args:
None
Returns: A dict {verified, verifiedMethod}"""
return self.genericGet('verification/mx')
def UpdateMXVerificationStatus(self, verified):
"""Updates MX Verification Status
Args:
verified: boolean, True will retry verification process
Returns: A dict containing the result of the put operation"""
uri = self._serviceUrl('verification/mx')
properties = self.GetMXVerificationStatus()
properties['verified'] = verified
return self._PutProperties(uri, properties)
def GetSSOSettings(self):
"""Gets Domain Single Sign-On Settings
Args:
None
Returns: A dict {samlSignonUri, samlLogoutUri, changePasswordUri, enableSSO, ssoWhitelist, useDomainSpecificIssuer}"""
return self.genericGet('sso/general')
def UpdateSSOSettings(self, enableSSO=None, samlSignonUri=None,
samlLogoutUri=None, changePasswordUri=None,
ssoWhitelist=None, useDomainSpecificIssuer=None):
"""Update SSO Settings.
Args:
enableSSO: boolean, SSO Master on/off switch
samlSignonUri: string, SSO Login Page
samlLogoutUri: string, SSO Logout Page
samlPasswordUri: string, SSO Password Change Page
ssoWhitelist: string, Range of IP Addresses which will see SSO
useDomainSpecificIssuer: boolean, Include Google Apps Domain in Issuer
Returns:
A dict containing the result of the update operation.
"""
uri = self._serviceUrl('sso/general')
#Get current settings, replace Nones with ''
properties = self.GetSSOSettings()
if properties['samlSignonUri'] == None:
properties['samlSignonUri'] = ''
if properties['samlLogoutUri'] == None:
properties['samlLogoutUri'] = ''
if properties['changePasswordUri'] == None:
properties['changePasswordUri'] = ''
if properties['ssoWhitelist'] == None:
properties['ssoWhitelist'] = ''
#update only the values we were passed
if enableSSO != None:
properties['enableSSO'] = gdata.apps.service._bool2str(enableSSO)
if samlSignonUri != None:
properties['samlSignonUri'] = samlSignonUri
if samlLogoutUri != None:
properties['samlLogoutUri'] = samlLogoutUri
if changePasswordUri != None:
properties['changePasswordUri'] = changePasswordUri
if ssoWhitelist != None:
properties['ssoWhitelist'] = ssoWhitelist
if useDomainSpecificIssuer != None:
properties['useDomainSpecificIssuer'] = gdata.apps.service._bool2str(useDomainSpecificIssuer)
return self._PutProperties(uri, properties)
def GetSSOKey(self):
"""Gets Domain Single Sign-On Signing Key
Args:
None
Returns: A dict {modulus, exponent, algorithm, format}"""
return self.genericGet('sso/signingkey')
def UpdateSSOKey(self, signingKey):
"""Update SSO Settings.
Args:
signingKey: string, public key to be uploaded
Returns:
A dict containing the result of the update operation."""
uri = self._serviceUrl('sso/signingkey')
properties = {'signingKey': signingKey}
return self._PutProperties(uri, properties)
def IsUserMigrationEnabled(self):
"""Is User Migration Enabled
Args:
None
Returns:
boolean, is user migration enabled"""
result = self.genericGet('email/migration')
if result['enableUserMigration'] == 'true':
return True
else:
return False
def UpdateUserMigrationStatus(self, enableUserMigration):
"""Update User Migration Status
Args:
enableUserMigration: boolean, user migration enable/disable
Returns:
A dict containing the result of the update operation."""
uri = self._serviceUrl('email/migration')
properties = {'enableUserMigration': enableUserMigration}
return self._PutProperties(uri, properties)
def GetOutboundGatewaySettings(self):
"""Get Outbound Gateway Settings
Args:
None
Returns:
A dict {smartHost, smtpMode}"""
uri = self._serviceUrl('email/gateway')
try:
return self._GetProperties(uri)
except gdata.service.RequestError as e:
raise AppsForYourDomainException(e.args[0])
except TypeError:
#if no outbound gateway is set, we get a TypeError,
#catch it and return nothing...
return {'smartHost': None, 'smtpMode': None}
def UpdateOutboundGatewaySettings(self, smartHost=None, smtpMode=None):
"""Update Outbound Gateway Settings
Args:
smartHost: string, ip address or hostname of outbound gateway
smtpMode: string, SMTP or SMTP_TLS
Returns:
A dict containing the result of the update operation."""
uri = self._serviceUrl('email/gateway')
#Get current settings, replace Nones with ''
properties = GetOutboundGatewaySettings()
if properties['smartHost'] == None:
properties['smartHost'] = ''
if properties['smtpMode'] == None:
properties['smtpMode'] = ''
#If we were passed new values for smartHost or smtpMode, update them
if smartHost != None:
properties['smartHost'] = smartHost
if smtpMode != None:
properties['smtpMode'] = smtpMode
return self._PutProperties(uri, properties)
def AddEmailRoute(self, routeDestination, routeRewriteTo, routeEnabled, bounceNotifications, accountHandling):
"""Adds Domain Email Route
Args:
routeDestination: string, destination ip address or hostname
routeRewriteTo: boolean, rewrite smtp envelop To:
routeEnabled: boolean, enable disable email routing
bounceNotifications: boolean, send bound notificiations to sender
accountHandling: string, which to route, "allAccounts", "provisionedAccounts", "unknownAccounts"
Returns:
A dict containing the result of the update operation."""
uri = self._serviceUrl('emailrouting')
properties = {}
properties['routeDestination'] = routeDestination
properties['routeRewriteTo'] = gdata.apps.service._bool2str(routeRewriteTo)
properties['routeEnabled'] = gdata.apps.service._bool2str(routeEnabled)
properties['bounceNotifications'] = gdata.apps.service._bool2str(bounceNotifications)
properties['accountHandling'] = accountHandling
return self._PostProperties(uri, properties)
| apache-2.0 |
ortylp/scipy | scipy/sparse/csgraph/tests/test_reordering.py | 93 | 3457 | from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.testing import assert_equal
from scipy.sparse.csgraph import reverse_cuthill_mckee,\
maximum_bipartite_matching
from scipy.sparse import diags, csr_matrix, coo_matrix
def test_graph_reverse_cuthill_mckee():
A = np.array([[1, 0, 0, 0, 1, 0, 0, 0],
[0, 1, 1, 0, 0, 1, 0, 1],
[0, 1, 1, 0, 1, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 1, 0],
[1, 0, 1, 0, 1, 0, 0, 0],
[0, 1, 0, 0, 0, 1, 0, 1],
[0, 0, 0, 1, 0, 0, 1, 0],
[0, 1, 0, 0, 0, 1, 0, 1]], dtype=int)
graph = csr_matrix(A)
perm = reverse_cuthill_mckee(graph)
correct_perm = np.array([6, 3, 7, 5, 1, 2, 4, 0])
assert_equal(perm, correct_perm)
# Test int64 indices input
graph.indices = graph.indices.astype('int64')
graph.indptr = graph.indptr.astype('int64')
perm = reverse_cuthill_mckee(graph, True)
assert_equal(perm, correct_perm)
def test_graph_reverse_cuthill_mckee_ordering():
data = np.ones(63,dtype=int)
rows = np.array([0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2,
2, 2, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5,
6, 6, 6, 7, 7, 7, 7, 8, 8, 8, 8, 9, 9,
9, 10, 10, 10, 10, 10, 11, 11, 11, 11,
12, 12, 12, 13, 13, 13, 13, 14, 14, 14,
14, 15, 15, 15, 15, 15])
cols = np.array([0, 2, 5, 8, 10, 1, 3, 9, 11, 0, 2,
7, 10, 1, 3, 11, 4, 6, 12, 14, 0, 7, 13,
15, 4, 6, 14, 2, 5, 7, 15, 0, 8, 10, 13,
1, 9, 11, 0, 2, 8, 10, 15, 1, 3, 9, 11,
4, 12, 14, 5, 8, 13, 15, 4, 6, 12, 14,
5, 7, 10, 13, 15])
graph = coo_matrix((data, (rows,cols))).tocsr()
perm = reverse_cuthill_mckee(graph)
correct_perm = np.array([12, 14, 4, 6, 10, 8, 2, 15,
0, 13, 7, 5, 9, 11, 1, 3])
assert_equal(perm, correct_perm)
def test_graph_maximum_bipartite_matching():
A = diags(np.ones(25), offsets=0, format='csc')
rand_perm = np.random.permutation(25)
rand_perm2 = np.random.permutation(25)
Rrow = np.arange(25)
Rcol = rand_perm
Rdata = np.ones(25,dtype=int)
Rmat = coo_matrix((Rdata,(Rrow,Rcol))).tocsc()
Crow = rand_perm2
Ccol = np.arange(25)
Cdata = np.ones(25,dtype=int)
Cmat = coo_matrix((Cdata,(Crow,Ccol))).tocsc()
# Randomly permute identity matrix
B = Rmat*A*Cmat
# Row permute
perm = maximum_bipartite_matching(B,perm_type='row')
Rrow = np.arange(25)
Rcol = perm
Rdata = np.ones(25,dtype=int)
Rmat = coo_matrix((Rdata,(Rrow,Rcol))).tocsc()
C1 = Rmat*B
# Column permute
perm2 = maximum_bipartite_matching(B,perm_type='column')
Crow = perm2
Ccol = np.arange(25)
Cdata = np.ones(25,dtype=int)
Cmat = coo_matrix((Cdata,(Crow,Ccol))).tocsc()
C2 = B*Cmat
# Should get identity matrix back
assert_equal(any(C1.diagonal() == 0), False)
assert_equal(any(C2.diagonal() == 0), False)
# Test int64 indices input
B.indices = B.indices.astype('int64')
B.indptr = B.indptr.astype('int64')
perm = maximum_bipartite_matching(B,perm_type='row')
Rrow = np.arange(25)
Rcol = perm
Rdata = np.ones(25,dtype=int)
Rmat = coo_matrix((Rdata,(Rrow,Rcol))).tocsc()
C3 = Rmat*B
assert_equal(any(C3.diagonal() == 0), False)
| bsd-3-clause |
igemsoftware/SYSU-Software2013 | project/Python27_32/Lib/htmllib.py | 312 | 12869 | """HTML 2.0 parser.
See the HTML 2.0 specification:
http://www.w3.org/hypertext/WWW/MarkUp/html-spec/html-spec_toc.html
"""
from warnings import warnpy3k
warnpy3k("the htmllib module has been removed in Python 3.0",
stacklevel=2)
del warnpy3k
import sgmllib
from formatter import AS_IS
__all__ = ["HTMLParser", "HTMLParseError"]
class HTMLParseError(sgmllib.SGMLParseError):
"""Error raised when an HTML document can't be parsed."""
class HTMLParser(sgmllib.SGMLParser):
"""This is the basic HTML parser class.
It supports all entity names required by the XHTML 1.0 Recommendation.
It also defines handlers for all HTML 2.0 and many HTML 3.0 and 3.2
elements.
"""
from htmlentitydefs import entitydefs
def __init__(self, formatter, verbose=0):
"""Creates an instance of the HTMLParser class.
The formatter parameter is the formatter instance associated with
the parser.
"""
sgmllib.SGMLParser.__init__(self, verbose)
self.formatter = formatter
def error(self, message):
raise HTMLParseError(message)
def reset(self):
sgmllib.SGMLParser.reset(self)
self.savedata = None
self.isindex = 0
self.title = None
self.base = None
self.anchor = None
self.anchorlist = []
self.nofill = 0
self.list_stack = []
# ------ Methods used internally; some may be overridden
# --- Formatter interface, taking care of 'savedata' mode;
# shouldn't need to be overridden
def handle_data(self, data):
if self.savedata is not None:
self.savedata = self.savedata + data
else:
if self.nofill:
self.formatter.add_literal_data(data)
else:
self.formatter.add_flowing_data(data)
# --- Hooks to save data; shouldn't need to be overridden
def save_bgn(self):
"""Begins saving character data in a buffer instead of sending it
to the formatter object.
Retrieve the stored data via the save_end() method. Use of the
save_bgn() / save_end() pair may not be nested.
"""
self.savedata = ''
def save_end(self):
"""Ends buffering character data and returns all data saved since
the preceding call to the save_bgn() method.
If the nofill flag is false, whitespace is collapsed to single
spaces. A call to this method without a preceding call to the
save_bgn() method will raise a TypeError exception.
"""
data = self.savedata
self.savedata = None
if not self.nofill:
data = ' '.join(data.split())
return data
# --- Hooks for anchors; should probably be overridden
def anchor_bgn(self, href, name, type):
"""This method is called at the start of an anchor region.
The arguments correspond to the attributes of the <A> tag with
the same names. The default implementation maintains a list of
hyperlinks (defined by the HREF attribute for <A> tags) within
the document. The list of hyperlinks is available as the data
attribute anchorlist.
"""
self.anchor = href
if self.anchor:
self.anchorlist.append(href)
def anchor_end(self):
"""This method is called at the end of an anchor region.
The default implementation adds a textual footnote marker using an
index into the list of hyperlinks created by the anchor_bgn()method.
"""
if self.anchor:
self.handle_data("[%d]" % len(self.anchorlist))
self.anchor = None
# --- Hook for images; should probably be overridden
def handle_image(self, src, alt, *args):
"""This method is called to handle images.
The default implementation simply passes the alt value to the
handle_data() method.
"""
self.handle_data(alt)
# --------- Top level elememts
def start_html(self, attrs): pass
def end_html(self): pass
def start_head(self, attrs): pass
def end_head(self): pass
def start_body(self, attrs): pass
def end_body(self): pass
# ------ Head elements
def start_title(self, attrs):
self.save_bgn()
def end_title(self):
self.title = self.save_end()
def do_base(self, attrs):
for a, v in attrs:
if a == 'href':
self.base = v
def do_isindex(self, attrs):
self.isindex = 1
def do_link(self, attrs):
pass
def do_meta(self, attrs):
pass
def do_nextid(self, attrs): # Deprecated
pass
# ------ Body elements
# --- Headings
def start_h1(self, attrs):
self.formatter.end_paragraph(1)
self.formatter.push_font(('h1', 0, 1, 0))
def end_h1(self):
self.formatter.end_paragraph(1)
self.formatter.pop_font()
def start_h2(self, attrs):
self.formatter.end_paragraph(1)
self.formatter.push_font(('h2', 0, 1, 0))
def end_h2(self):
self.formatter.end_paragraph(1)
self.formatter.pop_font()
def start_h3(self, attrs):
self.formatter.end_paragraph(1)
self.formatter.push_font(('h3', 0, 1, 0))
def end_h3(self):
self.formatter.end_paragraph(1)
self.formatter.pop_font()
def start_h4(self, attrs):
self.formatter.end_paragraph(1)
self.formatter.push_font(('h4', 0, 1, 0))
def end_h4(self):
self.formatter.end_paragraph(1)
self.formatter.pop_font()
def start_h5(self, attrs):
self.formatter.end_paragraph(1)
self.formatter.push_font(('h5', 0, 1, 0))
def end_h5(self):
self.formatter.end_paragraph(1)
self.formatter.pop_font()
def start_h6(self, attrs):
self.formatter.end_paragraph(1)
self.formatter.push_font(('h6', 0, 1, 0))
def end_h6(self):
self.formatter.end_paragraph(1)
self.formatter.pop_font()
# --- Block Structuring Elements
def do_p(self, attrs):
self.formatter.end_paragraph(1)
def start_pre(self, attrs):
self.formatter.end_paragraph(1)
self.formatter.push_font((AS_IS, AS_IS, AS_IS, 1))
self.nofill = self.nofill + 1
def end_pre(self):
self.formatter.end_paragraph(1)
self.formatter.pop_font()
self.nofill = max(0, self.nofill - 1)
def start_xmp(self, attrs):
self.start_pre(attrs)
self.setliteral('xmp') # Tell SGML parser
def end_xmp(self):
self.end_pre()
def start_listing(self, attrs):
self.start_pre(attrs)
self.setliteral('listing') # Tell SGML parser
def end_listing(self):
self.end_pre()
def start_address(self, attrs):
self.formatter.end_paragraph(0)
self.formatter.push_font((AS_IS, 1, AS_IS, AS_IS))
def end_address(self):
self.formatter.end_paragraph(0)
self.formatter.pop_font()
def start_blockquote(self, attrs):
self.formatter.end_paragraph(1)
self.formatter.push_margin('blockquote')
def end_blockquote(self):
self.formatter.end_paragraph(1)
self.formatter.pop_margin()
# --- List Elements
def start_ul(self, attrs):
self.formatter.end_paragraph(not self.list_stack)
self.formatter.push_margin('ul')
self.list_stack.append(['ul', '*', 0])
def end_ul(self):
if self.list_stack: del self.list_stack[-1]
self.formatter.end_paragraph(not self.list_stack)
self.formatter.pop_margin()
def do_li(self, attrs):
self.formatter.end_paragraph(0)
if self.list_stack:
[dummy, label, counter] = top = self.list_stack[-1]
top[2] = counter = counter+1
else:
label, counter = '*', 0
self.formatter.add_label_data(label, counter)
def start_ol(self, attrs):
self.formatter.end_paragraph(not self.list_stack)
self.formatter.push_margin('ol')
label = '1.'
for a, v in attrs:
if a == 'type':
if len(v) == 1: v = v + '.'
label = v
self.list_stack.append(['ol', label, 0])
def end_ol(self):
if self.list_stack: del self.list_stack[-1]
self.formatter.end_paragraph(not self.list_stack)
self.formatter.pop_margin()
def start_menu(self, attrs):
self.start_ul(attrs)
def end_menu(self):
self.end_ul()
def start_dir(self, attrs):
self.start_ul(attrs)
def end_dir(self):
self.end_ul()
def start_dl(self, attrs):
self.formatter.end_paragraph(1)
self.list_stack.append(['dl', '', 0])
def end_dl(self):
self.ddpop(1)
if self.list_stack: del self.list_stack[-1]
def do_dt(self, attrs):
self.ddpop()
def do_dd(self, attrs):
self.ddpop()
self.formatter.push_margin('dd')
self.list_stack.append(['dd', '', 0])
def ddpop(self, bl=0):
self.formatter.end_paragraph(bl)
if self.list_stack:
if self.list_stack[-1][0] == 'dd':
del self.list_stack[-1]
self.formatter.pop_margin()
# --- Phrase Markup
# Idiomatic Elements
def start_cite(self, attrs): self.start_i(attrs)
def end_cite(self): self.end_i()
def start_code(self, attrs): self.start_tt(attrs)
def end_code(self): self.end_tt()
def start_em(self, attrs): self.start_i(attrs)
def end_em(self): self.end_i()
def start_kbd(self, attrs): self.start_tt(attrs)
def end_kbd(self): self.end_tt()
def start_samp(self, attrs): self.start_tt(attrs)
def end_samp(self): self.end_tt()
def start_strong(self, attrs): self.start_b(attrs)
def end_strong(self): self.end_b()
def start_var(self, attrs): self.start_i(attrs)
def end_var(self): self.end_i()
# Typographic Elements
def start_i(self, attrs):
self.formatter.push_font((AS_IS, 1, AS_IS, AS_IS))
def end_i(self):
self.formatter.pop_font()
def start_b(self, attrs):
self.formatter.push_font((AS_IS, AS_IS, 1, AS_IS))
def end_b(self):
self.formatter.pop_font()
def start_tt(self, attrs):
self.formatter.push_font((AS_IS, AS_IS, AS_IS, 1))
def end_tt(self):
self.formatter.pop_font()
def start_a(self, attrs):
href = ''
name = ''
type = ''
for attrname, value in attrs:
value = value.strip()
if attrname == 'href':
href = value
if attrname == 'name':
name = value
if attrname == 'type':
type = value.lower()
self.anchor_bgn(href, name, type)
def end_a(self):
self.anchor_end()
# --- Line Break
def do_br(self, attrs):
self.formatter.add_line_break()
# --- Horizontal Rule
def do_hr(self, attrs):
self.formatter.add_hor_rule()
# --- Image
def do_img(self, attrs):
align = ''
alt = '(image)'
ismap = ''
src = ''
width = 0
height = 0
for attrname, value in attrs:
if attrname == 'align':
align = value
if attrname == 'alt':
alt = value
if attrname == 'ismap':
ismap = value
if attrname == 'src':
src = value
if attrname == 'width':
try: width = int(value)
except ValueError: pass
if attrname == 'height':
try: height = int(value)
except ValueError: pass
self.handle_image(src, alt, ismap, align, width, height)
# --- Really Old Unofficial Deprecated Stuff
def do_plaintext(self, attrs):
self.start_pre(attrs)
self.setnomoretags() # Tell SGML parser
# --- Unhandled tags
def unknown_starttag(self, tag, attrs):
pass
def unknown_endtag(self, tag):
pass
def test(args = None):
import sys, formatter
if not args:
args = sys.argv[1:]
silent = args and args[0] == '-s'
if silent:
del args[0]
if args:
file = args[0]
else:
file = 'test.html'
if file == '-':
f = sys.stdin
else:
try:
f = open(file, 'r')
except IOError, msg:
print file, ":", msg
sys.exit(1)
data = f.read()
if f is not sys.stdin:
f.close()
if silent:
f = formatter.NullFormatter()
else:
f = formatter.AbstractFormatter(formatter.DumbWriter())
p = HTMLParser(f)
p.feed(data)
p.close()
if __name__ == '__main__':
test()
| mit |
derekjchow/models | research/learned_optimizer/problems/datasets.py | 7 | 7404 | # Copyright 2017 Google, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions to generate or load datasets for supervised learning."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import namedtuple
import numpy as np
from sklearn.datasets import make_classification
MAX_SEED = 4294967295
class Dataset(namedtuple("Dataset", "data labels")):
"""Helper class for managing a supervised learning dataset.
Args:
data: an array of type float32 with N samples, each of which is the set
of features for that sample. (Shape (N, D_i), where N is the number of
samples and D_i is the number of features for that sample.)
labels: an array of type int32 or int64 with N elements, indicating the
class label for the corresponding set of features in data.
"""
# Since this is an immutable object, we don't need to reserve slots.
__slots__ = ()
@property
def size(self):
"""Dataset size (number of samples)."""
return len(self.data)
def batch_indices(self, num_batches, batch_size):
"""Creates indices of shuffled minibatches.
Args:
num_batches: the number of batches to generate
batch_size: the size of each batch
Returns:
batch_indices: a list of minibatch indices, arranged so that the dataset
is randomly shuffled.
Raises:
ValueError: if the data and labels have different lengths
"""
if len(self.data) != len(self.labels):
raise ValueError("Labels and data must have the same number of samples.")
batch_indices = []
# Follows logic in mnist.py to ensure we cover the entire dataset.
index_in_epoch = 0
dataset_size = len(self.data)
dataset_indices = np.arange(dataset_size)
np.random.shuffle(dataset_indices)
for _ in range(num_batches):
start = index_in_epoch
index_in_epoch += batch_size
if index_in_epoch > dataset_size:
# Finished epoch, reshuffle.
np.random.shuffle(dataset_indices)
# Start next epoch.
start = 0
index_in_epoch = batch_size
end = index_in_epoch
batch_indices.append(dataset_indices[start:end].tolist())
return batch_indices
def noisy_parity_class(n_samples,
n_classes=2,
n_context_ids=5,
noise_prob=0.25,
random_seed=None):
"""Returns a randomly generated sparse-to-sparse dataset.
The label is a parity class of a set of context classes.
Args:
n_samples: number of samples (data points)
n_classes: number of class labels (default: 2)
n_context_ids: how many classes to take the parity of (default: 5).
noise_prob: how often to corrupt the label (default: 0.25)
random_seed: seed used for drawing the random data (default: None)
Returns:
dataset: A Dataset namedtuple containing the generated data and labels
"""
np.random.seed(random_seed)
x = np.random.randint(0, n_classes, [n_samples, n_context_ids])
noise = np.random.binomial(1, noise_prob, [n_samples])
y = (np.sum(x, 1) + noise) % n_classes
return Dataset(x.astype("float32"), y.astype("int32"))
def random(n_features, n_samples, n_classes=2, sep=1.0, random_seed=None):
"""Returns a randomly generated classification dataset.
Args:
n_features: number of features (dependent variables)
n_samples: number of samples (data points)
n_classes: number of class labels (default: 2)
sep: separation of the two classes, a higher value corresponds to
an easier classification problem (default: 1.0)
random_seed: seed used for drawing the random data (default: None)
Returns:
dataset: A Dataset namedtuple containing the generated data and labels
"""
# Generate the problem data.
x, y = make_classification(n_samples=n_samples,
n_features=n_features,
n_informative=n_features,
n_redundant=0,
n_classes=n_classes,
class_sep=sep,
random_state=random_seed)
return Dataset(x.astype("float32"), y.astype("int32"))
def random_binary(n_features, n_samples, random_seed=None):
"""Returns a randomly generated dataset of binary values.
Args:
n_features: number of features (dependent variables)
n_samples: number of samples (data points)
random_seed: seed used for drawing the random data (default: None)
Returns:
dataset: A Dataset namedtuple containing the generated data and labels
"""
random_seed = (np.random.randint(MAX_SEED) if random_seed is None
else random_seed)
np.random.seed(random_seed)
x = np.random.randint(2, size=(n_samples, n_features))
y = np.zeros((n_samples, 1))
return Dataset(x.astype("float32"), y.astype("int32"))
def random_symmetric(n_features, n_samples, random_seed=None):
"""Returns a randomly generated dataset of values and their negatives.
Args:
n_features: number of features (dependent variables)
n_samples: number of samples (data points)
random_seed: seed used for drawing the random data (default: None)
Returns:
dataset: A Dataset namedtuple containing the generated data and labels
"""
random_seed = (np.random.randint(MAX_SEED) if random_seed is None
else random_seed)
np.random.seed(random_seed)
x1 = np.random.normal(size=(int(n_samples/2), n_features))
x = np.concatenate((x1, -x1), axis=0)
y = np.zeros((n_samples, 1))
return Dataset(x.astype("float32"), y.astype("int32"))
def random_mlp(n_features, n_samples, random_seed=None, n_layers=6, width=20):
"""Returns a generated output of an MLP with random weights.
Args:
n_features: number of features (dependent variables)
n_samples: number of samples (data points)
random_seed: seed used for drawing the random data (default: None)
n_layers: number of layers in random MLP
width: width of the layers in random MLP
Returns:
dataset: A Dataset namedtuple containing the generated data and labels
"""
random_seed = (np.random.randint(MAX_SEED) if random_seed is None
else random_seed)
np.random.seed(random_seed)
x = np.random.normal(size=(n_samples, n_features))
y = x
n_in = n_features
scale_factor = np.sqrt(2.) / np.sqrt(n_features)
for _ in range(n_layers):
weights = np.random.normal(size=(n_in, width)) * scale_factor
y = np.dot(y, weights).clip(min=0)
n_in = width
y = y[:, 0]
y[y > 0] = 1
return Dataset(x.astype("float32"), y.astype("int32"))
EMPTY_DATASET = Dataset(np.array([], dtype="float32"),
np.array([], dtype="int32"))
| apache-2.0 |
mbbill/shadowsocks | shadowsocks/server.py | 273 | 4627 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2015 clowwindy
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, \
with_statement
import sys
import os
import logging
import signal
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '../'))
from shadowsocks import shell, daemon, eventloop, tcprelay, udprelay, \
asyncdns, manager
def main():
shell.check_python()
config = shell.get_config(False)
daemon.daemon_exec(config)
if config['port_password']:
if config['password']:
logging.warn('warning: port_password should not be used with '
'server_port and password. server_port and password '
'will be ignored')
else:
config['port_password'] = {}
server_port = config['server_port']
if type(server_port) == list:
for a_server_port in server_port:
config['port_password'][a_server_port] = config['password']
else:
config['port_password'][str(server_port)] = config['password']
if config.get('manager_address', 0):
logging.info('entering manager mode')
manager.run(config)
return
tcp_servers = []
udp_servers = []
dns_resolver = asyncdns.DNSResolver()
port_password = config['port_password']
del config['port_password']
for port, password in port_password.items():
a_config = config.copy()
a_config['server_port'] = int(port)
a_config['password'] = password
logging.info("starting server at %s:%d" %
(a_config['server'], int(port)))
tcp_servers.append(tcprelay.TCPRelay(a_config, dns_resolver, False))
udp_servers.append(udprelay.UDPRelay(a_config, dns_resolver, False))
def run_server():
def child_handler(signum, _):
logging.warn('received SIGQUIT, doing graceful shutting down..')
list(map(lambda s: s.close(next_tick=True),
tcp_servers + udp_servers))
signal.signal(getattr(signal, 'SIGQUIT', signal.SIGTERM),
child_handler)
def int_handler(signum, _):
sys.exit(1)
signal.signal(signal.SIGINT, int_handler)
try:
loop = eventloop.EventLoop()
dns_resolver.add_to_loop(loop)
list(map(lambda s: s.add_to_loop(loop), tcp_servers + udp_servers))
daemon.set_user(config.get('user', None))
loop.run()
except Exception as e:
shell.print_exception(e)
sys.exit(1)
if int(config['workers']) > 1:
if os.name == 'posix':
children = []
is_child = False
for i in range(0, int(config['workers'])):
r = os.fork()
if r == 0:
logging.info('worker started')
is_child = True
run_server()
break
else:
children.append(r)
if not is_child:
def handler(signum, _):
for pid in children:
try:
os.kill(pid, signum)
os.waitpid(pid, 0)
except OSError: # child may already exited
pass
sys.exit()
signal.signal(signal.SIGTERM, handler)
signal.signal(signal.SIGQUIT, handler)
signal.signal(signal.SIGINT, handler)
# master
for a_tcp_server in tcp_servers:
a_tcp_server.close()
for a_udp_server in udp_servers:
a_udp_server.close()
dns_resolver.close()
for child in children:
os.waitpid(child, 0)
else:
logging.warn('worker is only available on Unix/Linux')
run_server()
else:
run_server()
if __name__ == '__main__':
main()
| apache-2.0 |
sunlianqiang/kbengine | kbe/src/lib/python/Lib/idlelib/CodeContext.py | 128 | 8353 | """CodeContext - Extension to display the block context above the edit window
Once code has scrolled off the top of a window, it can be difficult to
determine which block you are in. This extension implements a pane at the top
of each IDLE edit window which provides block structure hints. These hints are
the lines which contain the block opening keywords, e.g. 'if', for the
enclosing block. The number of hint lines is determined by the numlines
variable in the CodeContext section of config-extensions.def. Lines which do
not open blocks are not shown in the context hints pane.
"""
import tkinter
from tkinter.constants import TOP, LEFT, X, W, SUNKEN
import re
from sys import maxsize as INFINITY
from idlelib.configHandler import idleConf
BLOCKOPENERS = set(["class", "def", "elif", "else", "except", "finally", "for",
"if", "try", "while", "with"])
UPDATEINTERVAL = 100 # millisec
FONTUPDATEINTERVAL = 1000 # millisec
getspacesfirstword =\
lambda s, c=re.compile(r"^(\s*)(\w*)"): c.match(s).groups()
class CodeContext:
menudefs = [('options', [('!Code Conte_xt', '<<toggle-code-context>>')])]
context_depth = idleConf.GetOption("extensions", "CodeContext",
"numlines", type="int", default=3)
bgcolor = idleConf.GetOption("extensions", "CodeContext",
"bgcolor", type="str", default="LightGray")
fgcolor = idleConf.GetOption("extensions", "CodeContext",
"fgcolor", type="str", default="Black")
def __init__(self, editwin):
self.editwin = editwin
self.text = editwin.text
self.textfont = self.text["font"]
self.label = None
# self.info is a list of (line number, indent level, line text, block
# keyword) tuples providing the block structure associated with
# self.topvisible (the linenumber of the line displayed at the top of
# the edit window). self.info[0] is initialized as a 'dummy' line which
# starts the toplevel 'block' of the module.
self.info = [(0, -1, "", False)]
self.topvisible = 1
visible = idleConf.GetOption("extensions", "CodeContext",
"visible", type="bool", default=False)
if visible:
self.toggle_code_context_event()
self.editwin.setvar('<<toggle-code-context>>', True)
# Start two update cycles, one for context lines, one for font changes.
self.text.after(UPDATEINTERVAL, self.timer_event)
self.text.after(FONTUPDATEINTERVAL, self.font_timer_event)
def toggle_code_context_event(self, event=None):
if not self.label:
# Calculate the border width and horizontal padding required to
# align the context with the text in the main Text widget.
#
# All values are passed through int(str(<value>)), since some
# values may be pixel objects, which can't simply be added to ints.
widgets = self.editwin.text, self.editwin.text_frame
# Calculate the required vertical padding
padx = 0
for widget in widgets:
padx += int(str( widget.pack_info()['padx'] ))
padx += int(str( widget.cget('padx') ))
# Calculate the required border width
border = 0
for widget in widgets:
border += int(str( widget.cget('border') ))
self.label = tkinter.Label(self.editwin.top,
text="\n" * (self.context_depth - 1),
anchor=W, justify=LEFT,
font=self.textfont,
bg=self.bgcolor, fg=self.fgcolor,
width=1, #don't request more than we get
padx=padx, border=border,
relief=SUNKEN)
# Pack the label widget before and above the text_frame widget,
# thus ensuring that it will appear directly above text_frame
self.label.pack(side=TOP, fill=X, expand=False,
before=self.editwin.text_frame)
else:
self.label.destroy()
self.label = None
idleConf.SetOption("extensions", "CodeContext", "visible",
str(self.label is not None))
idleConf.SaveUserCfgFiles()
def get_line_info(self, linenum):
"""Get the line indent value, text, and any block start keyword
If the line does not start a block, the keyword value is False.
The indentation of empty lines (or comment lines) is INFINITY.
"""
text = self.text.get("%d.0" % linenum, "%d.end" % linenum)
spaces, firstword = getspacesfirstword(text)
opener = firstword in BLOCKOPENERS and firstword
if len(text) == len(spaces) or text[len(spaces)] == '#':
indent = INFINITY
else:
indent = len(spaces)
return indent, text, opener
def get_context(self, new_topvisible, stopline=1, stopindent=0):
"""Get context lines, starting at new_topvisible and working backwards.
Stop when stopline or stopindent is reached. Return a tuple of context
data and the indent level at the top of the region inspected.
"""
assert stopline > 0
lines = []
# The indentation level we are currently in:
lastindent = INFINITY
# For a line to be interesting, it must begin with a block opening
# keyword, and have less indentation than lastindent.
for linenum in range(new_topvisible, stopline-1, -1):
indent, text, opener = self.get_line_info(linenum)
if indent < lastindent:
lastindent = indent
if opener in ("else", "elif"):
# We also show the if statement
lastindent += 1
if opener and linenum < new_topvisible and indent >= stopindent:
lines.append((linenum, indent, text, opener))
if lastindent <= stopindent:
break
lines.reverse()
return lines, lastindent
def update_code_context(self):
"""Update context information and lines visible in the context pane.
"""
new_topvisible = int(self.text.index("@0,0").split('.')[0])
if self.topvisible == new_topvisible: # haven't scrolled
return
if self.topvisible < new_topvisible: # scroll down
lines, lastindent = self.get_context(new_topvisible,
self.topvisible)
# retain only context info applicable to the region
# between topvisible and new_topvisible:
while self.info[-1][1] >= lastindent:
del self.info[-1]
elif self.topvisible > new_topvisible: # scroll up
stopindent = self.info[-1][1] + 1
# retain only context info associated
# with lines above new_topvisible:
while self.info[-1][0] >= new_topvisible:
stopindent = self.info[-1][1]
del self.info[-1]
lines, lastindent = self.get_context(new_topvisible,
self.info[-1][0]+1,
stopindent)
self.info.extend(lines)
self.topvisible = new_topvisible
# empty lines in context pane:
context_strings = [""] * max(0, self.context_depth - len(self.info))
# followed by the context hint lines:
context_strings += [x[2] for x in self.info[-self.context_depth:]]
self.label["text"] = '\n'.join(context_strings)
def timer_event(self):
if self.label:
self.update_code_context()
self.text.after(UPDATEINTERVAL, self.timer_event)
def font_timer_event(self):
newtextfont = self.text["font"]
if self.label and newtextfont != self.textfont:
self.textfont = newtextfont
self.label["font"] = self.textfont
self.text.after(FONTUPDATEINTERVAL, self.font_timer_event)
| lgpl-3.0 |
krast/suse_xen | tools/tests/utests/ut_xend/ut_XendConfig.py | 42 | 4443 | #===========================================================================
# This library is free software; you can redistribute it and/or
# modify it under the terms of version 2.1 of the GNU Lesser General Public
# License as published by the Free Software Foundation.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#============================================================================
# Copyright (C) 2009 flonatel GmbH & Co. KG
#============================================================================
import os
import unittest
# This does not work because of a cyclic import loop
#from xen.xend.XendConfig import XendConfig
import xen.xend.XendDomain
class XendConfigUnitTest(unittest.TestCase):
def minimal_vmconf(self):
return {
'memory_dynamic_min': 64,
'memory_dynamic_max': 128,
'memory_static_max': 128,
}
def check_hf_01(self):
"xend.XendConfig.handle_fileutils - PV_kernel/ramdisk not set"
vmconf = self.minimal_vmconf()
xc = xen.xend.XendConfig.XendConfig(xapi = vmconf)
self.assert_(not xc.has_key('use_tmp_kernel'))
self.assert_(not xc.has_key('use_tmp_ramdisk'))
def check_hf_02(self):
"xend.XendConfig.handle_fileutils - PV_kernel/ramdisk set to some path"
vmconf = self.minimal_vmconf()
vmconf['PV_kernel'] = '/some/where/under/the/rainbow-kernel'
vmconf['PV_ramdisk'] = '/some/where/under/the/rainbow-ramdisk'
xc = xen.xend.XendConfig.XendConfig(xapi = vmconf)
self.assert_(xc.has_key('use_tmp_kernel'))
self.assert_(xc.has_key('use_tmp_ramdisk'))
self.assert_(not xc['use_tmp_kernel'])
self.assert_(not xc['use_tmp_ramdisk'])
def check_hf_03(self):
"xend.XendConfig.handle_fileutils - PV_kernel/ramdisk using file: scheme"
vmconf = self.minimal_vmconf()
vmconf['PV_kernel'] = 'file:///some/where/under/the/rainbow-kernel'
vmconf['PV_ramdisk'] = 'file:///some/where/under/the/rainbow-ramdisk'
xc = xen.xend.XendConfig.XendConfig(xapi = vmconf)
self.assert_(xc.has_key('use_tmp_kernel'))
self.assert_(xc.has_key('use_tmp_ramdisk'))
self.assert_(not xc['use_tmp_kernel'])
self.assert_(not xc['use_tmp_ramdisk'])
self.assert_('PV_kernel' in xc)
self.assert_('PV_ramdisk' in xc)
self.assertEqual("/some/where/under/the/rainbow-kernel",
xc['PV_kernel'])
self.assertEqual("/some/where/under/the/rainbow-ramdisk",
xc['PV_ramdisk'])
def check_hf_04(self):
"xend.XendConfig.handle_fileutils - PV_kernel/ramdisk using data: scheme"
vmconf = self.minimal_vmconf()
vmconf['PV_kernel'] = 'data:application/octet-stream;base64,VGhpcyBpcyB0aGUga2VybmVsCg=='
vmconf['PV_ramdisk'] = 'data:application/octet-stream;base64,TXkgZ3JlYXQgcmFtZGlzawo='
xc = xen.xend.XendConfig.XendConfig(xapi = vmconf)
self.assert_(xc.has_key('use_tmp_kernel'))
self.assert_(xc.has_key('use_tmp_ramdisk'))
self.assert_(xc['use_tmp_kernel'])
self.assert_(xc['use_tmp_ramdisk'])
self.assert_('PV_kernel' in xc)
self.assert_('PV_ramdisk' in xc)
self.assert_(xc['PV_kernel'].startswith(
"/var/run/xend/boot/data_uri_file."))
self.assert_(xc['PV_ramdisk'].startswith(
"/var/run/xend/boot/data_uri_file."))
f = file(xc['PV_kernel'])
kc = f.read()
f.close()
f = file(xc['PV_ramdisk'])
rc = f.read()
f.close()
os.unlink(xc['PV_kernel'])
os.unlink(xc['PV_ramdisk'])
self.assertEqual(kc, "This is the kernel\n")
self.assertEqual(rc, "My great ramdisk\n")
def suite():
return unittest.TestSuite(
[unittest.makeSuite(XendConfigUnitTest, 'check_'),])
if __name__ == "__main__":
testresult = unittest.TextTestRunner(verbosity=3).run(suite())
| gpl-2.0 |
RedHatQE/cfme_tests | cfme/automate/dialogs/dialog_tab.py | 1 | 2309 | import attr
from navmazing import NavigateToAttribute
from widgetastic.widget import Text
from . import AddTabView
from . import TabForm
from .dialog_box import BoxCollection
from cfme.modeling.base import BaseCollection
from cfme.modeling.base import BaseEntity
from cfme.modeling.base import parent_of_type
from cfme.utils.appliance.implementations.ui import CFMENavigateStep
from cfme.utils.appliance.implementations.ui import navigate_to
from cfme.utils.appliance.implementations.ui import navigator
class EditTabView(TabForm):
@property
def is_displayed(self):
return (
self.in_customization and
self.title.text == "Editing Dialog {} [Tab Information]".format(self.tab_label)
)
class DetailsTabView(TabForm):
title = Text("#explorer_title_text")
@property
def is_displayed(self):
return (
self.in_customization and self.service_dialogs.is_opened and
self.title.text == 'Dialog "{}"'.format(self.context['object'].tab_label)
)
@attr.s
class Tab(BaseEntity):
"""A class representing one Tab in the UI."""
tab_label = attr.ib()
tab_desc = attr.ib(default=None)
_collections = {'boxes': BoxCollection}
@property
def boxes(self):
return self.collections.boxes
@property
def tree_path(self):
return self.parent.tree_path + [self.tab_label]
@property
def dialog(self):
""" Returns parent object - Dialog"""
from .service_dialogs import Dialog
return parent_of_type(self, Dialog)
@attr.s
class TabCollection(BaseCollection):
ENTITY = Tab
@property
def tree_path(self):
return self.parent.tree_path
def create(self, tab_label=None, tab_desc=None):
""" Create tab method"""
view = navigate_to(self, "Add")
view.new_tab.click()
view.edit_tab.click()
view.fill({'tab_label': tab_label, 'tab_desc': tab_desc})
view.save_button.click()
return self.instantiate(tab_label=tab_label, tab_desc=tab_desc)
@navigator.register(TabCollection)
class Add(CFMENavigateStep):
VIEW = AddTabView
prerequisite = NavigateToAttribute('parent.parent', 'Add')
def step(self, *args, **kwargs):
self.prerequisite_view.create_tab.click()
| gpl-2.0 |
felliott/osf.io | scripts/migration/migrate_share_registration_data.py | 6 | 1325 | # TODO: Consider rewriting as management command
import logging
import sys
import django
from django.db import transaction
django.setup()
from osf.models import Registration
from scripts import utils as script_utils
from website import settings
from website.app import init_app
from api.share.utils import update_share
logger = logging.getLogger(__name__)
def migrate(dry_run):
assert settings.SHARE_URL, 'SHARE_URL must be set to migrate.'
assert settings.SHARE_API_TOKEN, 'SHARE_API_TOKEN must be set to migrate.'
registrations = Registration.objects.filter(is_deleted=False, is_public=True)
registrations_count = registrations.count()
count = 0
logger.info('Preparing to migrate {} registrations.'.format(registrations_count))
for registration in registrations.iterator():
count += 1
logger.info('{}/{} - {}'.format(count, registrations_count, registration._id))
if not dry_run:
update_share(registration)
logger.info('Registration {} was sent to SHARE.'.format(registration._id))
def main():
dry_run = '--dry' in sys.argv
if not dry_run:
script_utils.add_file_logger(logger, __file__)
init_app(set_backends=True, routes=False)
with transaction.atomic():
migrate(dry_run)
if __name__ == '__main__':
main()
| apache-2.0 |
Codefans-fan/odoo | openerp/addons/base/res/res_currency.py | 15 | 14571 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import re
import time
import math
from openerp import api, fields as fields2
from openerp import tools
from openerp.osv import fields, osv
from openerp.tools import float_round, float_is_zero, float_compare
from openerp.tools.translate import _
CURRENCY_DISPLAY_PATTERN = re.compile(r'(\w+)\s*(?:\((.*)\))?')
class res_currency(osv.osv):
def _current_rate(self, cr, uid, ids, name, arg, context=None):
return self._get_current_rate(cr, uid, ids, context=context)
def _current_rate_silent(self, cr, uid, ids, name, arg, context=None):
return self._get_current_rate(cr, uid, ids, raise_on_no_rate=False, context=context)
def _get_current_rate(self, cr, uid, ids, raise_on_no_rate=True, context=None):
if context is None:
context = {}
res = {}
date = context.get('date') or time.strftime('%Y-%m-%d')
for id in ids:
cr.execute('SELECT rate FROM res_currency_rate '
'WHERE currency_id = %s '
'AND name <= %s '
'ORDER BY name desc LIMIT 1',
(id, date))
if cr.rowcount:
res[id] = cr.fetchone()[0]
elif not raise_on_no_rate:
res[id] = 0
else:
currency = self.browse(cr, uid, id, context=context)
raise osv.except_osv(_('Error!'),_("No currency rate associated for currency '%s' for the given period" % (currency.name)))
return res
_name = "res.currency"
_description = "Currency"
_columns = {
# Note: 'code' column was removed as of v6.0, the 'name' should now hold the ISO code.
'name': fields.char('Currency', size=3, required=True, help="Currency Code (ISO 4217)"),
'symbol': fields.char('Symbol', size=4, help="Currency sign, to be used when printing amounts."),
'rate': fields.function(_current_rate, string='Current Rate', digits=(12,6),
help='The rate of the currency to the currency of rate 1.'),
# Do not use for computation ! Same as rate field with silent failing
'rate_silent': fields.function(_current_rate_silent, string='Current Rate', digits=(12,6),
help='The rate of the currency to the currency of rate 1 (0 if no rate defined).'),
'rate_ids': fields.one2many('res.currency.rate', 'currency_id', 'Rates'),
'accuracy': fields.integer('Computational Accuracy'),
'rounding': fields.float('Rounding Factor', digits=(12,6)),
'active': fields.boolean('Active'),
'company_id':fields.many2one('res.company', 'Company'),
'base': fields.boolean('Base'),
'position': fields.selection([('after','After Amount'),('before','Before Amount')], 'Symbol Position', help="Determines where the currency symbol should be placed after or before the amount.")
}
_defaults = {
'active': 1,
'position' : 'after',
'rounding': 0.01,
'accuracy': 4,
'company_id': False,
}
_sql_constraints = [
# this constraint does not cover all cases due to SQL NULL handling for company_id,
# so it is complemented with a unique index (see below). The constraint and index
# share the same prefix so that IntegrityError triggered by the index will be caught
# and reported to the user with the constraint's error message.
('unique_name_company_id', 'unique (name, company_id)', 'The currency code must be unique per company!'),
]
_order = "name"
def init(self, cr):
# CONSTRAINT/UNIQUE INDEX on (name,company_id)
# /!\ The unique constraint 'unique_name_company_id' is not sufficient, because SQL92
# only support field names in constraint definitions, and we need a function here:
# we need to special-case company_id to treat all NULL company_id as equal, otherwise
# we would allow duplicate "global" currencies (all having company_id == NULL)
cr.execute("""SELECT indexname FROM pg_indexes WHERE indexname = 'res_currency_unique_name_company_id_idx'""")
if not cr.fetchone():
cr.execute("""CREATE UNIQUE INDEX res_currency_unique_name_company_id_idx
ON res_currency
(name, (COALESCE(company_id,-1)))""")
date = fields2.Date(compute='compute_date')
@api.one
@api.depends('rate_ids.name')
def compute_date(self):
self.date = self.rate_ids[:1].name
def name_search(self, cr, user, name='', args=None, operator='ilike', context=None, limit=100):
if not args:
args = []
results = super(res_currency,self)\
.name_search(cr, user, name, args, operator=operator, context=context, limit=limit)
if not results:
name_match = CURRENCY_DISPLAY_PATTERN.match(name)
if name_match:
results = super(res_currency,self)\
.name_search(cr, user, name_match.group(1), args, operator=operator, context=context, limit=limit)
return results
def name_get(self, cr, uid, ids, context=None):
if not ids:
return []
if isinstance(ids, (int, long)):
ids = [ids]
reads = self.read(cr, uid, ids, ['name','symbol'], context=context, load='_classic_write')
return [(x['id'], tools.ustr(x['name'])) for x in reads]
@api.v8
def round(self, amount):
""" Return `amount` rounded according to currency `self`. """
return float_round(amount, precision_rounding=self.rounding)
@api.v7
def round(self, cr, uid, currency, amount):
"""Return ``amount`` rounded according to ``currency``'s
rounding rules.
:param Record currency: currency for which we are rounding
:param float amount: the amount to round
:return: rounded float
"""
return float_round(amount, precision_rounding=currency.rounding)
@api.v8
def compare_amounts(self, amount1, amount2):
""" Compare `amount1` and `amount2` after rounding them according to
`self`'s precision. An amount is considered lower/greater than
another amount if their rounded value is different. This is not the
same as having a non-zero difference!
For example 1.432 and 1.431 are equal at 2 digits precision, so this
method would return 0. However 0.006 and 0.002 are considered
different (returns 1) because they respectively round to 0.01 and
0.0, even though 0.006-0.002 = 0.004 which would be considered zero
at 2 digits precision.
"""
return float_compare(amount1, amount2, precision_rounding=self.rounding)
@api.v7
def compare_amounts(self, cr, uid, currency, amount1, amount2):
"""Compare ``amount1`` and ``amount2`` after rounding them according to the
given currency's precision..
An amount is considered lower/greater than another amount if their rounded
value is different. This is not the same as having a non-zero difference!
For example 1.432 and 1.431 are equal at 2 digits precision,
so this method would return 0.
However 0.006 and 0.002 are considered different (returns 1) because
they respectively round to 0.01 and 0.0, even though
0.006-0.002 = 0.004 which would be considered zero at 2 digits precision.
:param Record currency: currency for which we are rounding
:param float amount1: first amount to compare
:param float amount2: second amount to compare
:return: (resp.) -1, 0 or 1, if ``amount1`` is (resp.) lower than,
equal to, or greater than ``amount2``, according to
``currency``'s rounding.
"""
return float_compare(amount1, amount2, precision_rounding=currency.rounding)
@api.v8
def is_zero(self, amount):
""" Return true if `amount` is small enough to be treated as zero
according to currency `self`'s rounding rules.
Warning: ``is_zero(amount1-amount2)`` is not always equivalent to
``compare_amounts(amount1,amount2) == 0``, as the former will round
after computing the difference, while the latter will round before,
giving different results, e.g., 0.006 and 0.002 at 2 digits precision.
"""
return float_is_zero(amount, precision_rounding=self.rounding)
@api.v7
def is_zero(self, cr, uid, currency, amount):
"""Returns true if ``amount`` is small enough to be treated as
zero according to ``currency``'s rounding rules.
Warning: ``is_zero(amount1-amount2)`` is not always equivalent to
``compare_amounts(amount1,amount2) == 0``, as the former will round after
computing the difference, while the latter will round before, giving
different results for e.g. 0.006 and 0.002 at 2 digits precision.
:param Record currency: currency for which we are rounding
:param float amount: amount to compare with currency's zero
"""
return float_is_zero(amount, precision_rounding=currency.rounding)
def _get_conversion_rate(self, cr, uid, from_currency, to_currency, context=None):
if context is None:
context = {}
ctx = context.copy()
from_currency = self.browse(cr, uid, from_currency.id, context=ctx)
to_currency = self.browse(cr, uid, to_currency.id, context=ctx)
if from_currency.rate == 0 or to_currency.rate == 0:
date = context.get('date', time.strftime('%Y-%m-%d'))
if from_currency.rate == 0:
currency_symbol = from_currency.symbol
else:
currency_symbol = to_currency.symbol
raise osv.except_osv(_('Error'), _('No rate found \n' \
'for the currency: %s \n' \
'at the date: %s') % (currency_symbol, date))
return to_currency.rate/from_currency.rate
def _compute(self, cr, uid, from_currency, to_currency, from_amount, round=True, context=None):
if (to_currency.id == from_currency.id):
if round:
return self.round(cr, uid, to_currency, from_amount)
else:
return from_amount
else:
rate = self._get_conversion_rate(cr, uid, from_currency, to_currency, context=context)
if round:
return self.round(cr, uid, to_currency, from_amount * rate)
else:
return from_amount * rate
@api.v7
def compute(self, cr, uid, from_currency_id, to_currency_id, from_amount,
round=True, context=None):
context = context or {}
if not from_currency_id:
from_currency_id = to_currency_id
if not to_currency_id:
to_currency_id = from_currency_id
xc = self.browse(cr, uid, [from_currency_id,to_currency_id], context=context)
from_currency = (xc[0].id == from_currency_id and xc[0]) or xc[1]
to_currency = (xc[0].id == to_currency_id and xc[0]) or xc[1]
return self._compute(cr, uid, from_currency, to_currency, from_amount, round, context)
@api.v8
def compute(self, from_amount, to_currency, round=True):
""" Convert `from_amount` from currency `self` to `to_currency`. """
assert self, "compute from unknown currency"
assert to_currency, "compute to unknown currency"
# apply conversion rate
if self == to_currency:
to_amount = from_amount
else:
to_amount = from_amount * self._get_conversion_rate(self, to_currency)
# apply rounding
return to_currency.round(to_amount) if round else to_amount
def get_format_currencies_js_function(self, cr, uid, context=None):
""" Returns a string that can be used to instanciate a javascript function that formats numbers as currencies.
That function expects the number as first parameter and the currency id as second parameter. In case of failure it returns undefined."""
function = ""
for row in self.search_read(cr, uid, domain=[], fields=['id', 'name', 'symbol', 'rounding', 'position'], context=context):
digits = int(math.ceil(math.log10(1 / row['rounding'])))
symbol = row['symbol'] or row['name']
format_number_str = "openerp.web.format_value(arguments[0], {type: 'float', digits: [69," + str(digits) + "]}, 0.00)"
if row['position'] == 'after':
return_str = "return " + format_number_str + " + '\\xA0" + symbol + "';"
else:
return_str = "return '" + symbol + "\\xA0' + " + format_number_str + ";"
function += "if (arguments[1] === " + str(row['id']) + ") { " + return_str + " }"
return function
class res_currency_rate(osv.osv):
_name = "res.currency.rate"
_description = "Currency Rate"
_columns = {
'name': fields.datetime('Date', required=True, select=True),
'rate': fields.float('Rate', digits=(12, 6), help='The rate of the currency to the currency of rate 1'),
'currency_id': fields.many2one('res.currency', 'Currency', readonly=True),
}
_defaults = {
'name': lambda *a: time.strftime('%Y-%m-%d 00:00:00'),
}
_order = "name desc"
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
willprice/arduino-sphere-project | scripts/example_direction_finder/temboo/Library/Google/Drive/Parents/Delete.py | 5 | 4626 | # -*- coding: utf-8 -*-
###############################################################################
#
# Delete
# Removes a parent from a file.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class Delete(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the Delete Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(Delete, self).__init__(temboo_session, '/Library/Google/Drive/Parents/Delete')
def new_input_set(self):
return DeleteInputSet()
def _make_result_set(self, result, path):
return DeleteResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return DeleteChoreographyExecution(session, exec_id, path)
class DeleteInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the Delete
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_AccessToken(self, value):
"""
Set the value of the AccessToken input for this Choreo. ((optional, string) A valid access token retrieved during the OAuth2 process. This is required unless you provide the ClientID, ClientSecret, and RefreshToken to generate a new access token.)
"""
super(DeleteInputSet, self)._set_input('AccessToken', value)
def set_ClientID(self, value):
"""
Set the value of the ClientID input for this Choreo. ((conditional, string) The Client ID provided by Google. Required unless providing a valid AccessToken.)
"""
super(DeleteInputSet, self)._set_input('ClientID', value)
def set_ClientSecret(self, value):
"""
Set the value of the ClientSecret input for this Choreo. ((conditional, string) The Client Secret provided by Google. Required unless providing a valid AccessToken.)
"""
super(DeleteInputSet, self)._set_input('ClientSecret', value)
def set_FileID(self, value):
"""
Set the value of the FileID input for this Choreo. ((required, string) The ID of the file.)
"""
super(DeleteInputSet, self)._set_input('FileID', value)
def set_ParentID(self, value):
"""
Set the value of the ParentID input for this Choreo. ((required, string) The ID of the parent.)
"""
super(DeleteInputSet, self)._set_input('ParentID', value)
def set_RefreshToken(self, value):
"""
Set the value of the RefreshToken input for this Choreo. ((conditional, string) An OAuth refresh token used to generate a new access token when the original token is expired. Required unless providing a valid AccessToken.)
"""
super(DeleteInputSet, self)._set_input('RefreshToken', value)
class DeleteResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the Delete Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. ((json) The response from Google.)
"""
return self._output.get('Response', None)
def get_NewAccessToken(self):
"""
Retrieve the value for the "NewAccessToken" output from this Choreo execution. ((string) Contains a new AccessToken when the RefreshToken is provided.)
"""
return self._output.get('NewAccessToken', None)
class DeleteChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return DeleteResultSet(response, path)
| gpl-2.0 |
markrawlingson/SickRage | autoProcessTV/lib/requests/packages/chardet/sjisprober.py | 1777 | 3764 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
import sys
from .mbcharsetprober import MultiByteCharSetProber
from .codingstatemachine import CodingStateMachine
from .chardistribution import SJISDistributionAnalysis
from .jpcntx import SJISContextAnalysis
from .mbcssm import SJISSMModel
from . import constants
class SJISProber(MultiByteCharSetProber):
def __init__(self):
MultiByteCharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(SJISSMModel)
self._mDistributionAnalyzer = SJISDistributionAnalysis()
self._mContextAnalyzer = SJISContextAnalysis()
self.reset()
def reset(self):
MultiByteCharSetProber.reset(self)
self._mContextAnalyzer.reset()
def get_charset_name(self):
return self._mContextAnalyzer.get_charset_name()
def feed(self, aBuf):
aLen = len(aBuf)
for i in range(0, aLen):
codingState = self._mCodingSM.next_state(aBuf[i])
if codingState == constants.eError:
if constants._debug:
sys.stderr.write(self.get_charset_name()
+ ' prober hit error at byte ' + str(i)
+ '\n')
self._mState = constants.eNotMe
break
elif codingState == constants.eItsMe:
self._mState = constants.eFoundIt
break
elif codingState == constants.eStart:
charLen = self._mCodingSM.get_current_charlen()
if i == 0:
self._mLastChar[1] = aBuf[0]
self._mContextAnalyzer.feed(self._mLastChar[2 - charLen:],
charLen)
self._mDistributionAnalyzer.feed(self._mLastChar, charLen)
else:
self._mContextAnalyzer.feed(aBuf[i + 1 - charLen:i + 3
- charLen], charLen)
self._mDistributionAnalyzer.feed(aBuf[i - 1:i + 1],
charLen)
self._mLastChar[0] = aBuf[aLen - 1]
if self.get_state() == constants.eDetecting:
if (self._mContextAnalyzer.got_enough_data() and
(self.get_confidence() > constants.SHORTCUT_THRESHOLD)):
self._mState = constants.eFoundIt
return self.get_state()
def get_confidence(self):
contxtCf = self._mContextAnalyzer.get_confidence()
distribCf = self._mDistributionAnalyzer.get_confidence()
return max(contxtCf, distribCf)
| gpl-3.0 |
tiborsimko/invenio-records-rest | invenio_records_rest/sorter.py | 3 | 4470 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2016-2018 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
r"""Sorter factories for REST API.
The default sorter factory allows you to define possible sort options in
the :data:`invenio_records_rest.config.RECORDS_REST_SORT_OPTIONS`
configuration variable. The sort options are defined per index alias
(e.g. ``records``). If more fine grained control is needed a custom sorter
factory can be provided to Records-REST instead.
See Elasticsearch Reference Manual for full details of sorting capabilities:
https://www.elastic.co/guide/en/elasticsearch/reference/2.4/search-request-sort.html
"""
from __future__ import absolute_import, print_function
import copy
import six
from flask import current_app, request
def geolocation_sort(field_name, argument, unit, mode=None,
distance_type=None):
"""Sort field factory for geo-location based sorting.
:param argument: Name of URL query string field to parse pin location from.
Multiple locations can be provided. Each location can be either a
string "latitude,longitude" or a geohash.
:param unit: Distance unit (e.g. km).
:param mode: Sort mode (avg, min, max).
:param distance_type: Distance calculation mode.
:returns: Function that returns geolocation sort field.
"""
def inner(asc):
locations = request.values.getlist(argument, type=str)
field = {
'_geo_distance': {
field_name: locations,
'order': 'asc' if asc else 'desc',
'unit': unit,
}
}
if mode:
field['_geo_distance']['mode'] = mode
if distance_type:
field['_geo_distance']['distance_type'] = distance_type
return field
return inner
def parse_sort_field(field_value):
"""Parse a URL field.
:param field_value: Field value (e.g. 'key' or '-key').
:returns: Tuple of (field, ascending) as string and boolean.
"""
if field_value.startswith("-"):
return (field_value[1:], False)
return (field_value, True)
def reverse_order(order_value):
"""Reserve ordering of order value (asc or desc).
:param order_value: Either the string ``asc`` or ``desc``.
:returns: Reverse sort order of order value.
"""
if order_value == 'desc':
return 'asc'
elif order_value == 'asc':
return 'desc'
return None
def eval_field(field, asc):
"""Evaluate a field for sorting purpose.
:param field: Field definition (string, dict or callable).
:param asc: ``True`` if order is ascending, ``False`` if descending.
:returns: Dictionary with the sort field query.
"""
if isinstance(field, dict):
if asc:
return field
else:
# Field should only have one key and must have an order subkey.
field = copy.deepcopy(field)
key = list(field.keys())[0]
field[key]['order'] = reverse_order(field[key]['order'])
return field
elif callable(field):
return field(asc)
else:
key, key_asc = parse_sort_field(field)
if not asc:
key_asc = not key_asc
return {key: {'order': 'asc' if key_asc else 'desc'}}
def default_sorter_factory(search, index):
"""Default sort query factory.
:param query: Search query.
:param index: Index to search in.
:returns: Tuple of (query, URL arguments).
"""
sort_arg_name = 'sort'
urlfield = request.values.get(sort_arg_name, '', type=str)
# Get default sorting if sort is not specified.
if not urlfield:
# cast to six.text_type to handle unicodes in Python 2
has_query = request.values.get('q', type=six.text_type)
urlfield = current_app.config['RECORDS_REST_DEFAULT_SORT'].get(
index, {}).get('query' if has_query else 'noquery', '')
# Parse sort argument
key, asc = parse_sort_field(urlfield)
# Get sort options
sort_options = current_app.config['RECORDS_REST_SORT_OPTIONS'].get(
index, {}).get(key)
if sort_options is None:
return (search, {})
# Get fields to sort query by
search = search.sort(
*[eval_field(f, asc) for f in sort_options['fields']]
)
return (search, {sort_arg_name: urlfield})
| mit |
Amechi101/concepteur-market-app | venv/lib/python2.7/site-packages/django/contrib/comments/models.py | 125 | 7754 | from django.conf import settings
from django.contrib.comments.managers import CommentManager
from django.contrib.contenttypes import generic
from django.contrib.contenttypes.models import ContentType
from django.contrib.sites.models import Site
from django.core import urlresolvers
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.utils import timezone
from django.utils.encoding import python_2_unicode_compatible
COMMENT_MAX_LENGTH = getattr(settings, 'COMMENT_MAX_LENGTH', 3000)
class BaseCommentAbstractModel(models.Model):
"""
An abstract base class that any custom comment models probably should
subclass.
"""
# Content-object field
content_type = models.ForeignKey(ContentType,
verbose_name=_('content type'),
related_name="content_type_set_for_%(class)s")
object_pk = models.TextField(_('object ID'))
content_object = generic.GenericForeignKey(ct_field="content_type", fk_field="object_pk")
# Metadata about the comment
site = models.ForeignKey(Site)
class Meta:
abstract = True
def get_content_object_url(self):
"""
Get a URL suitable for redirecting to the content object.
"""
return urlresolvers.reverse(
"comments-url-redirect",
args=(self.content_type_id, self.object_pk)
)
@python_2_unicode_compatible
class Comment(BaseCommentAbstractModel):
"""
A user comment about some object.
"""
# Who posted this comment? If ``user`` is set then it was an authenticated
# user; otherwise at least user_name should have been set and the comment
# was posted by a non-authenticated user.
user = models.ForeignKey(settings.AUTH_USER_MODEL, verbose_name=_('user'),
blank=True, null=True, related_name="%(class)s_comments")
user_name = models.CharField(_("user's name"), max_length=50, blank=True)
user_email = models.EmailField(_("user's email address"), blank=True)
user_url = models.URLField(_("user's URL"), blank=True)
comment = models.TextField(_('comment'), max_length=COMMENT_MAX_LENGTH)
# Metadata about the comment
submit_date = models.DateTimeField(_('date/time submitted'), default=None)
ip_address = models.GenericIPAddressField(_('IP address'), unpack_ipv4=True, blank=True, null=True)
is_public = models.BooleanField(_('is public'), default=True,
help_text=_('Uncheck this box to make the comment effectively ' \
'disappear from the site.'))
is_removed = models.BooleanField(_('is removed'), default=False,
help_text=_('Check this box if the comment is inappropriate. ' \
'A "This comment has been removed" message will ' \
'be displayed instead.'))
# Manager
objects = CommentManager()
class Meta:
db_table = "django_comments"
ordering = ('submit_date',)
permissions = [("can_moderate", "Can moderate comments")]
verbose_name = _('comment')
verbose_name_plural = _('comments')
def __str__(self):
return "%s: %s..." % (self.name, self.comment[:50])
def save(self, *args, **kwargs):
if self.submit_date is None:
self.submit_date = timezone.now()
super(Comment, self).save(*args, **kwargs)
def _get_userinfo(self):
"""
Get a dictionary that pulls together information about the poster
safely for both authenticated and non-authenticated comments.
This dict will have ``name``, ``email``, and ``url`` fields.
"""
if not hasattr(self, "_userinfo"):
userinfo = {
"name": self.user_name,
"email": self.user_email,
"url": self.user_url
}
if self.user_id:
u = self.user
if u.email:
userinfo["email"] = u.email
# If the user has a full name, use that for the user name.
# However, a given user_name overrides the raw user.username,
# so only use that if this comment has no associated name.
if u.get_full_name():
userinfo["name"] = self.user.get_full_name()
elif not self.user_name:
userinfo["name"] = u.get_username()
self._userinfo = userinfo
return self._userinfo
userinfo = property(_get_userinfo, doc=_get_userinfo.__doc__)
def _get_name(self):
return self.userinfo["name"]
def _set_name(self, val):
if self.user_id:
raise AttributeError(_("This comment was posted by an authenticated "\
"user and thus the name is read-only."))
self.user_name = val
name = property(_get_name, _set_name, doc="The name of the user who posted this comment")
def _get_email(self):
return self.userinfo["email"]
def _set_email(self, val):
if self.user_id:
raise AttributeError(_("This comment was posted by an authenticated "\
"user and thus the email is read-only."))
self.user_email = val
email = property(_get_email, _set_email, doc="The email of the user who posted this comment")
def _get_url(self):
return self.userinfo["url"]
def _set_url(self, val):
self.user_url = val
url = property(_get_url, _set_url, doc="The URL given by the user who posted this comment")
def get_absolute_url(self, anchor_pattern="#c%(id)s"):
return self.get_content_object_url() + (anchor_pattern % self.__dict__)
def get_as_text(self):
"""
Return this comment as plain text. Useful for emails.
"""
d = {
'user': self.user or self.name,
'date': self.submit_date,
'comment': self.comment,
'domain': self.site.domain,
'url': self.get_absolute_url()
}
return _('Posted by %(user)s at %(date)s\n\n%(comment)s\n\nhttp://%(domain)s%(url)s') % d
@python_2_unicode_compatible
class CommentFlag(models.Model):
"""
Records a flag on a comment. This is intentionally flexible; right now, a
flag could be:
* A "removal suggestion" -- where a user suggests a comment for (potential) removal.
* A "moderator deletion" -- used when a moderator deletes a comment.
You can (ab)use this model to add other flags, if needed. However, by
design users are only allowed to flag a comment with a given flag once;
if you want rating look elsewhere.
"""
user = models.ForeignKey(settings.AUTH_USER_MODEL, verbose_name=_('user'), related_name="comment_flags")
comment = models.ForeignKey(Comment, verbose_name=_('comment'), related_name="flags")
flag = models.CharField(_('flag'), max_length=30, db_index=True)
flag_date = models.DateTimeField(_('date'), default=None)
# Constants for flag types
SUGGEST_REMOVAL = "removal suggestion"
MODERATOR_DELETION = "moderator deletion"
MODERATOR_APPROVAL = "moderator approval"
class Meta:
db_table = 'django_comment_flags'
unique_together = [('user', 'comment', 'flag')]
verbose_name = _('comment flag')
verbose_name_plural = _('comment flags')
def __str__(self):
return "%s flag of comment ID %s by %s" % \
(self.flag, self.comment_id, self.user.get_username())
def save(self, *args, **kwargs):
if self.flag_date is None:
self.flag_date = timezone.now()
super(CommentFlag, self).save(*args, **kwargs)
| mit |
lochiiconnectivity/boto | tests/integration/gs/test_resumable_downloads.py | 110 | 16174 | # Copyright 2010 Google Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
Tests of resumable downloads.
"""
import errno
import os
import re
import boto
from boto.s3.resumable_download_handler import get_cur_file_size
from boto.s3.resumable_download_handler import ResumableDownloadHandler
from boto.exception import ResumableTransferDisposition
from boto.exception import ResumableDownloadException
from cb_test_harness import CallbackTestHarness
from tests.integration.gs.testcase import GSTestCase
SMALL_KEY_SIZE = 2 * 1024 # 2 KB.
LARGE_KEY_SIZE = 500 * 1024 # 500 KB.
class ResumableDownloadTests(GSTestCase):
"""Resumable download test suite."""
def make_small_key(self):
small_src_key_as_string = os.urandom(SMALL_KEY_SIZE)
small_src_key = self._MakeKey(data=small_src_key_as_string)
return small_src_key_as_string, small_src_key
def make_tracker_file(self, tmpdir=None):
if not tmpdir:
tmpdir = self._MakeTempDir()
tracker_file = os.path.join(tmpdir, 'tracker')
return tracker_file
def make_dst_fp(self, tmpdir=None):
if not tmpdir:
tmpdir = self._MakeTempDir()
dst_file = os.path.join(tmpdir, 'dstfile')
return open(dst_file, 'w')
def test_non_resumable_download(self):
"""
Tests that non-resumable downloads work
"""
dst_fp = self.make_dst_fp()
small_src_key_as_string, small_src_key = self.make_small_key()
small_src_key.get_contents_to_file(dst_fp)
self.assertEqual(SMALL_KEY_SIZE,
get_cur_file_size(dst_fp))
self.assertEqual(small_src_key_as_string,
small_src_key.get_contents_as_string())
def test_download_without_persistent_tracker(self):
"""
Tests a single resumable download, with no tracker persistence
"""
res_download_handler = ResumableDownloadHandler()
dst_fp = self.make_dst_fp()
small_src_key_as_string, small_src_key = self.make_small_key()
small_src_key.get_contents_to_file(
dst_fp, res_download_handler=res_download_handler)
self.assertEqual(SMALL_KEY_SIZE,
get_cur_file_size(dst_fp))
self.assertEqual(small_src_key_as_string,
small_src_key.get_contents_as_string())
def test_failed_download_with_persistent_tracker(self):
"""
Tests that failed resumable download leaves a correct tracker file
"""
harness = CallbackTestHarness()
tmpdir = self._MakeTempDir()
tracker_file_name = self.make_tracker_file(tmpdir)
dst_fp = self.make_dst_fp(tmpdir)
res_download_handler = ResumableDownloadHandler(
tracker_file_name=tracker_file_name, num_retries=0)
small_src_key_as_string, small_src_key = self.make_small_key()
try:
small_src_key.get_contents_to_file(
dst_fp, cb=harness.call,
res_download_handler=res_download_handler)
self.fail('Did not get expected ResumableDownloadException')
except ResumableDownloadException, e:
# We'll get a ResumableDownloadException at this point because
# of CallbackTestHarness (above). Check that the tracker file was
# created correctly.
self.assertEqual(e.disposition,
ResumableTransferDisposition.ABORT_CUR_PROCESS)
self.assertTrue(os.path.exists(tracker_file_name))
f = open(tracker_file_name)
etag_line = f.readline()
self.assertEquals(etag_line.rstrip('\n'), small_src_key.etag.strip('"\''))
def test_retryable_exception_recovery(self):
"""
Tests handling of a retryable exception
"""
# Test one of the RETRYABLE_EXCEPTIONS.
exception = ResumableDownloadHandler.RETRYABLE_EXCEPTIONS[0]
harness = CallbackTestHarness(exception=exception)
res_download_handler = ResumableDownloadHandler(num_retries=1)
dst_fp = self.make_dst_fp()
small_src_key_as_string, small_src_key = self.make_small_key()
small_src_key.get_contents_to_file(
dst_fp, cb=harness.call,
res_download_handler=res_download_handler)
# Ensure downloaded object has correct content.
self.assertEqual(SMALL_KEY_SIZE,
get_cur_file_size(dst_fp))
self.assertEqual(small_src_key_as_string,
small_src_key.get_contents_as_string())
def test_broken_pipe_recovery(self):
"""
Tests handling of a Broken Pipe (which interacts with an httplib bug)
"""
exception = IOError(errno.EPIPE, "Broken pipe")
harness = CallbackTestHarness(exception=exception)
res_download_handler = ResumableDownloadHandler(num_retries=1)
dst_fp = self.make_dst_fp()
small_src_key_as_string, small_src_key = self.make_small_key()
small_src_key.get_contents_to_file(
dst_fp, cb=harness.call,
res_download_handler=res_download_handler)
# Ensure downloaded object has correct content.
self.assertEqual(SMALL_KEY_SIZE,
get_cur_file_size(dst_fp))
self.assertEqual(small_src_key_as_string,
small_src_key.get_contents_as_string())
def test_non_retryable_exception_handling(self):
"""
Tests resumable download that fails with a non-retryable exception
"""
harness = CallbackTestHarness(
exception=OSError(errno.EACCES, 'Permission denied'))
res_download_handler = ResumableDownloadHandler(num_retries=1)
dst_fp = self.make_dst_fp()
small_src_key_as_string, small_src_key = self.make_small_key()
try:
small_src_key.get_contents_to_file(
dst_fp, cb=harness.call,
res_download_handler=res_download_handler)
self.fail('Did not get expected OSError')
except OSError, e:
# Ensure the error was re-raised.
self.assertEqual(e.errno, 13)
def test_failed_and_restarted_download_with_persistent_tracker(self):
"""
Tests resumable download that fails once and then completes,
with tracker file
"""
harness = CallbackTestHarness()
tmpdir = self._MakeTempDir()
tracker_file_name = self.make_tracker_file(tmpdir)
dst_fp = self.make_dst_fp(tmpdir)
small_src_key_as_string, small_src_key = self.make_small_key()
res_download_handler = ResumableDownloadHandler(
tracker_file_name=tracker_file_name, num_retries=1)
small_src_key.get_contents_to_file(
dst_fp, cb=harness.call,
res_download_handler=res_download_handler)
# Ensure downloaded object has correct content.
self.assertEqual(SMALL_KEY_SIZE,
get_cur_file_size(dst_fp))
self.assertEqual(small_src_key_as_string,
small_src_key.get_contents_as_string())
# Ensure tracker file deleted.
self.assertFalse(os.path.exists(tracker_file_name))
def test_multiple_in_process_failures_then_succeed(self):
"""
Tests resumable download that fails twice in one process, then completes
"""
res_download_handler = ResumableDownloadHandler(num_retries=3)
dst_fp = self.make_dst_fp()
small_src_key_as_string, small_src_key = self.make_small_key()
small_src_key.get_contents_to_file(
dst_fp, res_download_handler=res_download_handler)
# Ensure downloaded object has correct content.
self.assertEqual(SMALL_KEY_SIZE,
get_cur_file_size(dst_fp))
self.assertEqual(small_src_key_as_string,
small_src_key.get_contents_as_string())
def test_multiple_in_process_failures_then_succeed_with_tracker_file(self):
"""
Tests resumable download that fails completely in one process,
then when restarted completes, using a tracker file
"""
# Set up test harness that causes more failures than a single
# ResumableDownloadHandler instance will handle, writing enough data
# before the first failure that some of it survives that process run.
harness = CallbackTestHarness(
fail_after_n_bytes=LARGE_KEY_SIZE/2, num_times_to_fail=2)
larger_src_key_as_string = os.urandom(LARGE_KEY_SIZE)
larger_src_key = self._MakeKey(data=larger_src_key_as_string)
tmpdir = self._MakeTempDir()
tracker_file_name = self.make_tracker_file(tmpdir)
dst_fp = self.make_dst_fp(tmpdir)
res_download_handler = ResumableDownloadHandler(
tracker_file_name=tracker_file_name, num_retries=0)
try:
larger_src_key.get_contents_to_file(
dst_fp, cb=harness.call,
res_download_handler=res_download_handler)
self.fail('Did not get expected ResumableDownloadException')
except ResumableDownloadException, e:
self.assertEqual(e.disposition,
ResumableTransferDisposition.ABORT_CUR_PROCESS)
# Ensure a tracker file survived.
self.assertTrue(os.path.exists(tracker_file_name))
# Try it one more time; this time should succeed.
larger_src_key.get_contents_to_file(
dst_fp, cb=harness.call,
res_download_handler=res_download_handler)
self.assertEqual(LARGE_KEY_SIZE,
get_cur_file_size(dst_fp))
self.assertEqual(larger_src_key_as_string,
larger_src_key.get_contents_as_string())
self.assertFalse(os.path.exists(tracker_file_name))
# Ensure some of the file was downloaded both before and after failure.
self.assertTrue(
len(harness.transferred_seq_before_first_failure) > 1 and
len(harness.transferred_seq_after_first_failure) > 1)
def test_download_with_inital_partial_download_before_failure(self):
"""
Tests resumable download that successfully downloads some content
before it fails, then restarts and completes
"""
# Set up harness to fail download after several hundred KB so download
# server will have saved something before we retry.
harness = CallbackTestHarness(
fail_after_n_bytes=LARGE_KEY_SIZE/2)
larger_src_key_as_string = os.urandom(LARGE_KEY_SIZE)
larger_src_key = self._MakeKey(data=larger_src_key_as_string)
res_download_handler = ResumableDownloadHandler(num_retries=1)
dst_fp = self.make_dst_fp()
larger_src_key.get_contents_to_file(
dst_fp, cb=harness.call,
res_download_handler=res_download_handler)
# Ensure downloaded object has correct content.
self.assertEqual(LARGE_KEY_SIZE,
get_cur_file_size(dst_fp))
self.assertEqual(larger_src_key_as_string,
larger_src_key.get_contents_as_string())
# Ensure some of the file was downloaded both before and after failure.
self.assertTrue(
len(harness.transferred_seq_before_first_failure) > 1 and
len(harness.transferred_seq_after_first_failure) > 1)
def test_zero_length_object_download(self):
"""
Tests downloading a zero-length object (exercises boundary conditions).
"""
res_download_handler = ResumableDownloadHandler()
dst_fp = self.make_dst_fp()
k = self._MakeKey()
k.get_contents_to_file(dst_fp,
res_download_handler=res_download_handler)
self.assertEqual(0, get_cur_file_size(dst_fp))
def test_download_with_invalid_tracker_etag(self):
"""
Tests resumable download with a tracker file containing an invalid etag
"""
tmp_dir = self._MakeTempDir()
dst_fp = self.make_dst_fp(tmp_dir)
small_src_key_as_string, small_src_key = self.make_small_key()
invalid_etag_tracker_file_name = os.path.join(tmp_dir,
'invalid_etag_tracker')
f = open(invalid_etag_tracker_file_name, 'w')
f.write('3.14159\n')
f.close()
res_download_handler = ResumableDownloadHandler(
tracker_file_name=invalid_etag_tracker_file_name)
# An error should be printed about the invalid tracker, but then it
# should run the update successfully.
small_src_key.get_contents_to_file(
dst_fp, res_download_handler=res_download_handler)
self.assertEqual(SMALL_KEY_SIZE, get_cur_file_size(dst_fp))
self.assertEqual(small_src_key_as_string,
small_src_key.get_contents_as_string())
def test_download_with_inconsistent_etag_in_tracker(self):
"""
Tests resumable download with an inconsistent etag in tracker file
"""
tmp_dir = self._MakeTempDir()
dst_fp = self.make_dst_fp(tmp_dir)
small_src_key_as_string, small_src_key = self.make_small_key()
inconsistent_etag_tracker_file_name = os.path.join(tmp_dir,
'inconsistent_etag_tracker')
f = open(inconsistent_etag_tracker_file_name, 'w')
good_etag = small_src_key.etag.strip('"\'')
new_val_as_list = []
for c in reversed(good_etag):
new_val_as_list.append(c)
f.write('%s\n' % ''.join(new_val_as_list))
f.close()
res_download_handler = ResumableDownloadHandler(
tracker_file_name=inconsistent_etag_tracker_file_name)
# An error should be printed about the expired tracker, but then it
# should run the update successfully.
small_src_key.get_contents_to_file(
dst_fp, res_download_handler=res_download_handler)
self.assertEqual(SMALL_KEY_SIZE,
get_cur_file_size(dst_fp))
self.assertEqual(small_src_key_as_string,
small_src_key.get_contents_as_string())
def test_download_with_unwritable_tracker_file(self):
"""
Tests resumable download with an unwritable tracker file
"""
# Make dir where tracker_file lives temporarily unwritable.
tmp_dir = self._MakeTempDir()
tracker_file_name = os.path.join(tmp_dir, 'tracker')
save_mod = os.stat(tmp_dir).st_mode
try:
os.chmod(tmp_dir, 0)
res_download_handler = ResumableDownloadHandler(
tracker_file_name=tracker_file_name)
except ResumableDownloadException, e:
self.assertEqual(e.disposition, ResumableTransferDisposition.ABORT)
self.assertNotEqual(
e.message.find('Couldn\'t write URI tracker file'), -1)
finally:
# Restore original protection of dir where tracker_file lives.
os.chmod(tmp_dir, save_mod)
| mit |
jquacinella/flask-social | flask_social/providers/facebook.py | 3 | 1895 | # -*- coding: utf-8 -*-
"""
flask.ext.social.providers.facebook
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This module contains the Flask-Social facebook code
:copyright: (c) 2012 by Matt Wright.
:license: MIT, see LICENSE for more details.
"""
from __future__ import absolute_import
import facebook
config = {
'id': 'facebook',
'name': 'Facebook',
'install': 'pip install facebook-sdk',
'module': 'flask_social.providers.facebook',
'base_url': 'https://graph.facebook.com/',
'request_token_url': None,
'access_token_url': '/oauth/access_token',
'authorize_url': 'https://www.facebook.com/dialog/oauth',
'request_token_params': {
'scope': 'email'
}
}
def get_api(connection, **kwargs):
return facebook.GraphAPI(getattr(connection, 'access_token'))
def get_provider_user_id(response, **kwargs):
if response:
graph = facebook.GraphAPI(response['access_token'])
profile = graph.get_object("me")
return profile['id']
return None
def get_connection_values(response, **kwargs):
if not response:
return None
access_token = response['access_token']
graph = facebook.GraphAPI(access_token)
profile = graph.get_object("me")
profile_url = "http://facebook.com/profile.php?id=%s" % profile['id']
image_url = "http://graph.facebook.com/%s/picture" % profile['id']
return dict(
provider_id=config['id'],
provider_user_id=profile['id'],
access_token=access_token,
secret=None,
display_name=profile.get('username', None),
full_name = profile.get('name', None),
profile_url=profile_url,
image_url=image_url,
email=profile.get('email', '')
)
def get_token_pair_from_response(response):
return dict(
access_token = response.get('access_token', None),
secret = None
)
| mit |
pyspace/test | pySPACE/missions/nodes/meta/same_input_layer.py | 1 | 22715 | """ Combine several other nodes together in parallel
This is useful to be combined with the
:class:`~pySPACE.missions.nodes.meta.flow_node.FlowNode`.
"""
import numpy
from pySPACE.environments.chains.node_chain import NodeChainFactory
from pySPACE.missions.nodes.base_node import BaseNode
from pySPACE.resources.data_types.feature_vector import FeatureVector
from pySPACE.resources.data_types.time_series import TimeSeries
from pySPACE.resources.data_types.prediction_vector import PredictionVector
# ensemble imports
import os
import fcntl
import fnmatch
import cPickle
import logging
from collections import defaultdict
from pySPACE.missions.nodes.meta.flow_node import FlowNode
from pySPACE.tools.filesystem import locate
class SameInputLayerNode(BaseNode):
""" Encapsulates a set of other nodes that are executed in parallel in the flow.
This node was a thin wrapper around MDP's SameInputLayer node
but is now an own implementation.
**Parameters**
:enforce_unique_names:
When combining time series channels or feature vectors,
the node adds the index of the current node to the channel names or
feature names as a prefix to enforce unique names.
(*optional, default: True*)
**Exemplary Call**
.. code-block:: yaml
-
node : Same_Input_Layer
parameters :
enforce_unique_names : True
nodes :
-
node : Time_Domain_Features
parameters :
moving_window_length : 1
-
node : STFT_Features
parameters :
frequency_band : [2.0, 8.0]
frequency_resolution : 1.0
"""
def __init__(self, nodes,enforce_unique_names=True,
store = False, **kwargs):
self.nodes = nodes # needed to find out dimensions and trainability, ...
super(SameInputLayerNode, self).__init__(**kwargs)
self.permanent_state.pop("nodes")
self.set_permanent_attributes(output_type = None,
names = None,
unique = enforce_unique_names)
@staticmethod
def node_from_yaml(layer_spec):
""" Load the specs and initialize the layer nodes """
# This node requires one parameters, namely a list of nodes
assert("parameters" in layer_spec
and "nodes" in layer_spec["parameters"]),\
"SameInputLayerNode requires specification of a list of nodes!"
# Create all nodes that are packed together in this layer
layer_nodes = []
for node_spec in layer_spec["parameters"]["nodes"]:
node_obj = BaseNode.node_from_yaml(node_spec)
layer_nodes.append(node_obj)
layer_spec["parameters"].pop("nodes")
# Create the node object
node_obj = SameInputLayerNode(nodes = layer_nodes,**layer_spec["parameters"])
return node_obj
def reset(self):
""" Also reset internal nodes """
nodes = self.nodes
for node in nodes:
node.reset()
super(SameInputLayerNode, self).reset()
self.nodes = nodes
def register_input_node(self, input_node):
""" All sub-nodes have the same input node """
super(SameInputLayerNode, self).register_input_node(input_node)
# Register the node as the input for all internal nodes
for node in self.nodes:
node.register_input_node(input_node)
def _execute(self, data):
""" Process the data through the internal nodes """
names = []
result_array = None
result_label = []
result_predictor = []
result_prediction = []
# For all node-layers
for node_index, node in enumerate(self.nodes):
# Compute node's result
node_result = node.execute(data)
# Determine the output type of the node
if self.output_type is None:
self.output_type = type(node_result)
else:
assert (self.output_type == type(node_result)), \
"SameInputLayerNode requires that all of its layers return "\
"the same type. Types found: %s %s" \
% (self.output_type, type(node_result))
# Merge the nodes' outputs depending on the type
if self.output_type == FeatureVector:
result_array = \
self.add_feature_vector(node_result, node_index,
result_array, names)
elif self.output_type == PredictionVector:
if type(node_result.label) == list:
result_label.extend(node_result.label)
else:
# a single classification is expected here
result_label.append(node_result.label)
if type(node_result.prediction) == list:
result_prediction.extend(node_result.prediction)
else:
result_prediction.append(node_result.prediction)
if type(node_result.predictor) == list:
result_predictor.extend(node_result.predictor)
else:
result_predictor.append(node_result.predictor)
else:
assert (self.output_type == TimeSeries), \
"SameInputLayerNode can not merge data of type %s." \
% self.output_type
if self.names is None and not self.unique:
names.extend(node_result.channel_names)
elif self.names is None and self.unique:
for name in node_result.channel_names:
names.append("%i_%s" % (node_index, name))
if result_array == None:
result_array = node_result
if self.dtype == None:
self.dtype = node_result.dtype
else :
result_array = numpy.concatenate((result_array,
node_result), axis=1)
# Construct output with correct type and names
if self.names is None:
self.names = names
if self.output_type == FeatureVector:
return FeatureVector(result_array, self.names)
elif self.output_type == PredictionVector:
return PredictionVector(label=result_label,
prediction=result_prediction,
predictor=result_predictor)
else:
return TimeSeries(result_array, self.names,
node_result.sampling_frequency,
node_result.start_time, node_result.end_time,
node_result.name, node_result.marker_name)
def add_feature_vector(self, data, index, result_array, names):
""" Concatenate feature vectors, ensuring unique names """
if self.names is None and self.unique:
for name in data.feature_names:
names.append("%i_%s" % (index,name))
elif self.names is None and not self.unique:
names.extend(data.feature_names)
if result_array == None:
result_array = data
else:
result_array = numpy.concatenate((result_array,data), axis=1)
return result_array
def is_trainable(self):
""" Trainable if one subnode is trainable """
for node in self.nodes:
if node.is_trainable():
return True
return False
def is_supervised(self):
""" Supervised if one subnode requires supervised training """
for node in self.nodes:
if node.is_supervised():
return True
return False
#
# def train_sweep(self, use_test_data):
# """ Train all internal nodes """
# for node in self.nodes:
# node.train_sweep(use_test_data)
def _train(self, x, *args, **kwargs):
""" Perform single training step by training the internal nodes """
for node in self.nodes:
if node.is_training():
node.train(x, *args, **kwargs)
def _stop_training(self):
""" Perform single training step by training the internal nodes """
for node in self.nodes:
if node.is_training():
node.stop_training()
def store_state(self, result_dir, index=None):
""" Stores all nodes in subdirectories of *result_dir* """
for i, node in enumerate(self.nodes):
node_dir = os.path.join(result_dir, (self.__class__.__name__+str(index).split("None")[0]+str(i)))
node.store_state(node_dir, index=i)
def _inc_train(self,data,label):
""" Forward data to retrainable nodes
So the single nodes do not need to buffer or *present_labels* does not
have to be reimplemented.
"""
for node in self.nodes:
if node.is_retrainable():
node._inc_train(data, label)
def set_run_number(self, run_number):
""" Informs all subnodes about the number of the current run """
for node in self.nodes:
node.set_run_number(run_number)
super(SameInputLayerNode, self).set_run_number(run_number)
class EnsembleNotFoundException(Exception): pass
class ClassificationFlowsLoaderNode(BaseNode):
""" Combine an ensemble of pretrained node chains
This node loads all "pickled" flows whose file names match
*ensemble_pattern* and are contained in the directory tree rooted at
*ensemble_base_dir*. If the *flow_select_list* is not empty, only the
flows with indices contained in flow_select_list are used. The index "-1"
corresponds to "all flows".
**Parameters**
:ensemble_base_dir:
The root directory under which the stored flow objects which constitute
the ensemble are stored.
:ensemble_pattern:
Pickled flows must match the given pattern to be included into the
ensemble.
:flow_select_list:
This optional parameter allows to select only a subset of the flows
that are found in ensemble_base_dir. It must be a list of indices.
Only the flows with the given index are included into the ensemble.
If -1 is contained in the list, all flows are automatically added to
the ensemble.
.. note::
The order of the flows in the ensemble is potentially random or at
least hard to predict. Thus, this parameter should not be used
to select a specific flow. In contrast, this parameter can be used
to select a certain number of flows from the available flows
(where it doesn't matter which ones). This can be useful for instance
in benchmarking experiments when one is interested in
the average performance of an ensemble of a certain size.
(*optional, default: [-1]*)
:cache_dir:
If this argument is given, all results of all ensembles are remembered
and stored in a persistent cache file in the given cache_dir. These
cached results can be later reused without actually loading and
executing the ensemble.
(*optional, default: None*)
**Exemplary Call**
.. code-block:: yaml
-
node : Ensemble_Node
parameters :
ensemble_base_dir : "/tmp/" # <- insert suitable directory here
ensemble_pattern : "flow*.pickle"
flow_select_list : "eval(range(10))"
:Author: Jan Hendrik Metzen (jhm@informatik.uni-bremen.de)
:Created: 2010/05/20
"""
def __init__(self, ensemble_base_dir, ensemble_pattern,
flow_select_list=[-1], cache_dir=None, **kwargs):
super(ClassificationFlowsLoaderNode, self).__init__(**kwargs)
# Load all flow-pickle files that match the given ensemble_pattern
# in the directory tree rooted in ensemble_base_dir
flow_pathes = tuple(locate(ensemble_pattern, ensemble_base_dir))
if -1 not in flow_select_list:
# Select only flows for ensemble whose index is contained in
# flow_select_list
flow_pathes = tuple(flow_pathes[index] for index in flow_select_list)
if len(flow_pathes) == 0:
raise EnsembleNotFoundException("No ensemble found in %s for pattern %s" %
(ensemble_base_dir, ensemble_pattern))
self.feature_names = \
map(lambda s: "_".join(s.split(os.sep)[-1].split('_')[0:2]),
flow_pathes)
self.set_permanent_attributes(ensemble = None,
flow_pathes = flow_pathes,
cache_dir = cache_dir,
cache = None,
cache_updated = False,
store = True) # always store cache
def _load_cache(self):
self.cache = defaultdict(dict)
# Check if there are cached results for this ensemble
for flow_path in self.flow_pathes:
file_path = self.cache_dir + os.sep + "ensemble_cache" + os.sep \
+ "cache_%s" % hash(flow_path)
if os.path.exists(file_path):
# Load ensemble cache
self._log("Loading flow cache from %s" % file_path)
lock_file = open(file_path + ".lock", 'w')
fcntl.flock(lock_file, fcntl.LOCK_EX)
self._log("Got exclusive lock on %s" % (file_path + ".lock"),
logging.INFO)
cache_file = open(file_path, 'r')
self.cache[flow_path] = cPickle.load(cache_file)
cache_file.close()
self._log("Release exclusive lock on %s" % (file_path + ".lock"),
logging.INFO)
fcntl.flock(lock_file, fcntl.LOCK_UN)
def _load_ensemble(self):
self._log("Loading ensemble")
# Create a flow node for each flow pickle
flow_nodes = [FlowNode(subflow_path = flow_path)
for flow_path in self.flow_pathes]
# Create an SameInputLayer node that executes all flows independently
# with the same input
ensemble = SameInputLayerNode(flow_nodes, enforce_unique_names=True)
# We can now set the input dim and output dim
self.input_dim = ensemble.input_dim
self.output_dim = ensemble.output_dim
self.set_permanent_attributes(ensemble = ensemble)
def _train(self, data, label):
""" Trains the ensemble on the given data vector *data* """
if self.ensemble == None:
# Load ensemble since data is not cached
self._load_ensemble()
return self.ensemble.train(data, label)
def _execute(self, data):
# Compute data's hash
data_hash = hash(tuple(data.flatten()))
# Load ensemble's cache
if self.cache == None:
if self.cache_dir:
self._load_cache()
else: # Caching disabled
self.cache = defaultdict(dict)
# Try to lookup the result of this ensemble for the given data in the cache
labels = []
predictions = []
for i, flow_path in enumerate(self.flow_pathes):
if data_hash in self.cache[flow_path]:
label, prediction = self.cache[flow_path][data_hash]
else:
self.cache_updated = True
if self.ensemble == None:
# Load ensemble since data is not cached
self._load_ensemble()
node_result = self.ensemble.nodes[i].execute(data)
label = node_result.label
prediction = node_result.prediction
self.cache[flow_path][data_hash] = (label, prediction)
labels.append(label)
predictions.append(prediction)
result = PredictionVector(label=labels,
prediction=predictions,
predictor=self)
result.dim_names = self.feature_names
return result
def store_state(self, result_dir, index=None):
""" Stores this node in the given directory *result_dir* """
# Store cache if caching is enabled and cache has changed
if self.cache_dir and self.cache_updated:
if not os.path.exists(self.cache_dir + os.sep + "ensemble_cache"):
os.makedirs(self.cache_dir + os.sep + "ensemble_cache")
for flow_path in self.flow_pathes:
file_path = self.cache_dir + os.sep + "ensemble_cache" + os.sep \
+ "cache_%s" % hash(flow_path)
if os.path.exists(file_path):
self._log("Updating flow cache %s" % file_path)
# Update existing cache persistency file
lock_file = open(file_path + ".lock", 'w')
fcntl.flock(lock_file, fcntl.LOCK_EX)
self._log("Got exclusive lock on %s" % (file_path + ".lock"),
logging.INFO)
cache_file = open(file_path, 'r')
self.cache[flow_path].update(cPickle.load(cache_file))
cache_file.close()
cache_file = open(file_path, 'w')
cPickle.dump(self.cache[flow_path], cache_file)
cache_file.close()
self._log("Release exclusive lock on %s" % (file_path + ".lock"),
logging.INFO)
fcntl.flock(lock_file, fcntl.LOCK_UN)
else:
self._log("Writing flow cache %s" % file_path)
# Create new cache persistency file
lock_file = open(file_path + ".lock", 'w')
fcntl.flock(lock_file, fcntl.LOCK_EX)
self._log("Got exclusive lock on %s" % (file_path + ".lock"),
logging.INFO)
cache_file = open(file_path, 'w')
cPickle.dump(self.cache[flow_path], cache_file)
cache_file.close()
self._log("Release exclusive lock on %s" % (file_path + ".lock"),
logging.INFO)
fcntl.flock(lock_file, fcntl.LOCK_UN)
class MultiClassLayerNode(SameInputLayerNode):
""" Wrap the one vs. rest or one vs. one scheme around the given node
The given class labels are forwarded to the internal nodes.
During training, data is relabeled.
Everything else is the same as in the base node.
Though this scheme is most important for classification it permits
other trainable algorithms to use this scheme.
**Parameters**
:class_labels:
This is the complete list of expected class labels.
It is needed to construct the necessary flows in the
initialization stage.
:node:
Specification of the wrapped node for the used scheme
As class labels , for the *1vsR* scheme,
this node has to use *REST* and *LABEL*.
*LABEL* is replaced with the different `class_labels`.
The other label should be *REST*.
For the *1vs1* scheme *LABEL1* and *LABEL2* have to be used.
:scheme:
One of *1v1* (One vs. One) or *1vR* (One vs. Rest)
.. note:: The one class approach is included by simply not giving
'REST' label to the classifier, but filtering it out.
(*optional, default:'1v1'*)
**Exemplary Call**
.. code-block:: yaml
-
node : MultiClassLayer
parameters :
class_labels : ["Target", "Standard","Artifact"]
scheme : 1vR
node :
-
node : 1SVM
parameters :
class_labels : ["LABEL","REST"]
complexity : 1
"""
@staticmethod
def node_from_yaml(layer_spec):
""" Load the specs and initialize the layer nodes """
assert("parameters" in layer_spec
and "class_labels" in layer_spec["parameters"]
and "node" in layer_spec["parameters"]),\
"Node requires specification of a node and classification labels!"
scheme = layer_spec["parameters"].pop("scheme","1vs1")
# Create all nodes that are packed together in this layer
layer_nodes = []
node_spec = layer_spec["parameters"]["node"][0]
classes = layer_spec["parameters"]["class_labels"]
if scheme=='1vR':
for label in layer_spec["parameters"]["class_labels"]:
node_obj = BaseNode.node_from_yaml(NodeChainFactory.instantiate(node_spec,{"LABEL":label}))
layer_nodes.append(node_obj)
else:
n=len(classes)
for i in range(n-1):
for j in range(i+1,n):
replace_dict = {"LABEL1":classes[i],"LABEL2":classes[j]}
node_obj = BaseNode.node_from_yaml(NodeChainFactory.instantiate(node_spec,replace_dict))
layer_nodes.append(node_obj)
layer_spec["parameters"].pop("node")
layer_spec["parameters"].pop("class_labels")
# Create the node object
node_obj = MultiClassLayerNode(nodes = layer_nodes,**layer_spec["parameters"])
return node_obj
_NODE_MAPPING = {"Ensemble_Node": ClassificationFlowsLoaderNode,
"Same_Input_Layer": SameInputLayerNode,
}
| gpl-3.0 |
googleapis/python-game-servers | google/cloud/gaming_v1/services/realms_service/async_client.py | 1 | 28046 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
import functools
import re
from typing import Dict, Sequence, Tuple, Type, Union
import pkg_resources
import google.api_core.client_options as ClientOptions # type: ignore
from google.api_core import exceptions as core_exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.oauth2 import service_account # type: ignore
from google.api_core import operation # type: ignore
from google.api_core import operation_async # type: ignore
from google.cloud.gaming_v1.services.realms_service import pagers
from google.cloud.gaming_v1.types import common
from google.cloud.gaming_v1.types import realms
from google.protobuf import empty_pb2 # type: ignore
from google.protobuf import field_mask_pb2 # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
from .transports.base import RealmsServiceTransport, DEFAULT_CLIENT_INFO
from .transports.grpc_asyncio import RealmsServiceGrpcAsyncIOTransport
from .client import RealmsServiceClient
class RealmsServiceAsyncClient:
"""A realm is a grouping of game server clusters that are
considered interchangeable.
"""
_client: RealmsServiceClient
DEFAULT_ENDPOINT = RealmsServiceClient.DEFAULT_ENDPOINT
DEFAULT_MTLS_ENDPOINT = RealmsServiceClient.DEFAULT_MTLS_ENDPOINT
realm_path = staticmethod(RealmsServiceClient.realm_path)
parse_realm_path = staticmethod(RealmsServiceClient.parse_realm_path)
common_billing_account_path = staticmethod(
RealmsServiceClient.common_billing_account_path
)
parse_common_billing_account_path = staticmethod(
RealmsServiceClient.parse_common_billing_account_path
)
common_folder_path = staticmethod(RealmsServiceClient.common_folder_path)
parse_common_folder_path = staticmethod(
RealmsServiceClient.parse_common_folder_path
)
common_organization_path = staticmethod(
RealmsServiceClient.common_organization_path
)
parse_common_organization_path = staticmethod(
RealmsServiceClient.parse_common_organization_path
)
common_project_path = staticmethod(RealmsServiceClient.common_project_path)
parse_common_project_path = staticmethod(
RealmsServiceClient.parse_common_project_path
)
common_location_path = staticmethod(RealmsServiceClient.common_location_path)
parse_common_location_path = staticmethod(
RealmsServiceClient.parse_common_location_path
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
RealmsServiceAsyncClient: The constructed client.
"""
return RealmsServiceClient.from_service_account_info.__func__(RealmsServiceAsyncClient, info, *args, **kwargs) # type: ignore
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
RealmsServiceAsyncClient: The constructed client.
"""
return RealmsServiceClient.from_service_account_file.__func__(RealmsServiceAsyncClient, filename, *args, **kwargs) # type: ignore
from_service_account_json = from_service_account_file
@property
def transport(self) -> RealmsServiceTransport:
"""Returns the transport used by the client instance.
Returns:
RealmsServiceTransport: The transport used by the client instance.
"""
return self._client.transport
get_transport_class = functools.partial(
type(RealmsServiceClient).get_transport_class, type(RealmsServiceClient)
)
def __init__(
self,
*,
credentials: ga_credentials.Credentials = None,
transport: Union[str, RealmsServiceTransport] = "grpc_asyncio",
client_options: ClientOptions = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiates the realms service client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, ~.RealmsServiceTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (ClientOptions): Custom options for the client. It
won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
Raises:
google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
creation failed for any reason.
"""
self._client = RealmsServiceClient(
credentials=credentials,
transport=transport,
client_options=client_options,
client_info=client_info,
)
async def list_realms(
self,
request: realms.ListRealmsRequest = None,
*,
parent: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListRealmsAsyncPager:
r"""Lists realms in a given project and location.
Args:
request (:class:`google.cloud.gaming_v1.types.ListRealmsRequest`):
The request object. Request message for
RealmsService.ListRealms.
parent (:class:`str`):
Required. The parent resource name. Uses the form:
``projects/{project}/locations/{location}``.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.gaming_v1.services.realms_service.pagers.ListRealmsAsyncPager:
Response message for
RealmsService.ListRealms.
Iterating over this object will yield
results and resolve additional pages
automatically.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = realms.ListRealmsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.list_realms,
default_retry=retries.Retry(
initial=1.0,
maximum=10.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.ServiceUnavailable,
),
deadline=60.0,
),
default_timeout=60.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# This method is paged; wrap the response in a pager, which provides
# an `__aiter__` convenience method.
response = pagers.ListRealmsAsyncPager(
method=rpc, request=request, response=response, metadata=metadata,
)
# Done; return the response.
return response
async def get_realm(
self,
request: realms.GetRealmRequest = None,
*,
name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> realms.Realm:
r"""Gets details of a single realm.
Args:
request (:class:`google.cloud.gaming_v1.types.GetRealmRequest`):
The request object. Request message for
RealmsService.GetRealm.
name (:class:`str`):
Required. The name of the realm to retrieve. Uses the
form:
``projects/{project}/locations/{location}/realms/{realm}``.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.gaming_v1.types.Realm:
A realm resource.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = realms.GetRealmRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.get_realm,
default_retry=retries.Retry(
initial=1.0,
maximum=10.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.ServiceUnavailable,
),
deadline=60.0,
),
default_timeout=60.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
async def create_realm(
self,
request: realms.CreateRealmRequest = None,
*,
parent: str = None,
realm: realms.Realm = None,
realm_id: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation_async.AsyncOperation:
r"""Creates a new realm in a given project and location.
Args:
request (:class:`google.cloud.gaming_v1.types.CreateRealmRequest`):
The request object. Request message for
RealmsService.CreateRealm.
parent (:class:`str`):
Required. The parent resource name. Uses the form:
``projects/{project}/locations/{location}``.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
realm (:class:`google.cloud.gaming_v1.types.Realm`):
Required. The realm resource to be
created.
This corresponds to the ``realm`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
realm_id (:class:`str`):
Required. The ID of the realm
resource to be created.
This corresponds to the ``realm_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation_async.AsyncOperation:
An object representing a long-running operation.
The result type for the operation will be
:class:`google.cloud.gaming_v1.types.Realm` A realm
resource.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, realm, realm_id])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = realms.CreateRealmRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
if realm is not None:
request.realm = realm
if realm_id is not None:
request.realm_id = realm_id
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.create_realm,
default_timeout=60.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Wrap the response in an operation future.
response = operation_async.from_gapic(
response,
self._client._transport.operations_client,
realms.Realm,
metadata_type=common.OperationMetadata,
)
# Done; return the response.
return response
async def delete_realm(
self,
request: realms.DeleteRealmRequest = None,
*,
name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation_async.AsyncOperation:
r"""Deletes a single realm.
Args:
request (:class:`google.cloud.gaming_v1.types.DeleteRealmRequest`):
The request object. Request message for
RealmsService.DeleteRealm.
name (:class:`str`):
Required. The name of the realm to delete. Uses the
form:
``projects/{project}/locations/{location}/realms/{realm}``.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation_async.AsyncOperation:
An object representing a long-running operation.
The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated
empty messages in your APIs. A typical example is to
use it as the request or the response type of an API
method. For instance:
service Foo {
rpc Bar(google.protobuf.Empty) returns
(google.protobuf.Empty);
}
The JSON representation for Empty is empty JSON
object {}.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = realms.DeleteRealmRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.delete_realm,
default_timeout=60.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Wrap the response in an operation future.
response = operation_async.from_gapic(
response,
self._client._transport.operations_client,
empty_pb2.Empty,
metadata_type=common.OperationMetadata,
)
# Done; return the response.
return response
async def update_realm(
self,
request: realms.UpdateRealmRequest = None,
*,
realm: realms.Realm = None,
update_mask: field_mask_pb2.FieldMask = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation_async.AsyncOperation:
r"""Patches a single realm.
Args:
request (:class:`google.cloud.gaming_v1.types.UpdateRealmRequest`):
The request object. Request message for
RealmsService.UpdateRealm.
realm (:class:`google.cloud.gaming_v1.types.Realm`):
Required. The realm to be updated. Only fields specified
in update_mask are updated.
This corresponds to the ``realm`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`):
Required. The update mask applies to the resource. For
the ``FieldMask`` definition, see
https: //developers.google.com/protocol-buffers //
/docs/reference/google.protobuf#fieldmask
This corresponds to the ``update_mask`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation_async.AsyncOperation:
An object representing a long-running operation.
The result type for the operation will be
:class:`google.cloud.gaming_v1.types.Realm` A realm
resource.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([realm, update_mask])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = realms.UpdateRealmRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if realm is not None:
request.realm = realm
if update_mask is not None:
request.update_mask = update_mask
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.update_realm,
default_timeout=60.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("realm.name", request.realm.name),)
),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Wrap the response in an operation future.
response = operation_async.from_gapic(
response,
self._client._transport.operations_client,
realms.Realm,
metadata_type=common.OperationMetadata,
)
# Done; return the response.
return response
async def preview_realm_update(
self,
request: realms.PreviewRealmUpdateRequest = None,
*,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> realms.PreviewRealmUpdateResponse:
r"""Previews patches to a single realm.
Args:
request (:class:`google.cloud.gaming_v1.types.PreviewRealmUpdateRequest`):
The request object. Request message for
RealmsService.PreviewRealmUpdate.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.gaming_v1.types.PreviewRealmUpdateResponse:
Response message for
RealmsService.PreviewRealmUpdate.
"""
# Create or coerce a protobuf request object.
request = realms.PreviewRealmUpdateRequest(request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.preview_realm_update,
default_retry=retries.Retry(
initial=1.0,
maximum=10.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.ServiceUnavailable,
),
deadline=60.0,
),
default_timeout=60.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("realm.name", request.realm.name),)
),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
"google-cloud-game-servers",
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
__all__ = ("RealmsServiceAsyncClient",)
| apache-2.0 |
Antiun/c2c-rd-addons | chricar_top/report/__init__.py | 4 | 1230 | # -*- coding: utf-8 -*-
# ChriCar Beteiligungs- und Beratungs- GmbH
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsability of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# garantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU Affero General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
import report_webkit_html
import report_real_estate_location
| agpl-3.0 |
dgzzhb/GAOthello | board.py | 1 | 10425 | #!/usr/bin/env python
""" game.py Humberto Henrique Campos Pinheiro
Game logic.
"""
from config import WHITE, BLACK, EMPTY
from copy import deepcopy
class Board:
""" Rules of the game """
def __init__ ( self ):
self.board = [ [0,0,0,0,0,0,0,0], \
[0,0,0,0,0,0,0,0], \
[0,0,0,0,0,0,0,0], \
[0,0,0,0,0,0,0,0], \
[0,0,0,0,0,0,0,0], \
[0,0,0,0,0,0,0,0], \
[0,0,0,0,0,0,0,0], \
[0,0,0,0,0,0,0,0] ]
self.board[3][4] = BLACK
self.board[4][3] = BLACK
self.board[3][3] = WHITE
self.board[4][4] = WHITE
self.valid_moves = []
def __getitem__ ( self, i, j):
return self.board[i][j]
def lookup ( self, row, column, color ):
""" Returns the possible positions that there exists at least one straight
(horizontal, vertical, or diagonal) line between the piece specified by (row,
column, color) and another piece of the same color.
"""
if color == BLACK:
other = WHITE
else:
other = BLACK
places = []
if ( row < 0 or row > 7 or column < 0 or column > 7 ):
return places
# For each direction search for possible positions to put a piece.
# north
i = row - 1
if ( i >= 0 and self.board[i][column] == other ):
i = i - 1
while ( i >= 0 and self.board[i][column] == other ):
i = i - 1
if ( i >= 0 and self.board[i][column] == 0 ):
places = places + [( i, column)]
# northeast
i = row - 1
j = column + 1
if ( i >= 0 and j < 8 and self.board[i][j] == other ) :
i = i - 1
j = j + 1
while ( i >= 0 and j < 8 and self.board[i][j] == other ):
i = i - 1
j = j + 1
if ( i >= 0 and j < 8 and self.board[i][j] == 0 ):
places = places + [(i, j)]
# east
j = column + 1
if ( j < 8 and self.board[row][j] == other ) :
j = j + 1
while ( j < 8 and self.board[row][j] == other ):
j = j + 1
if ( j < 8 and self.board[row][j] == 0 ):
places = places + [(row, j)]
# southeast
i = row + 1
j = column + 1
if ( i < 8 and j < 8 and self.board[i][j] == other ) :
i = i + 1
j = j + 1
while ( i < 8 and j < 8 and self.board[i][j] == other ):
i = i + 1
j = j + 1
if ( i < 8 and j < 8 and self.board[i][j] == 0 ):
places = places + [(i, j)]
# south
i = row + 1
if ( i < 8 and self.board[i][column] == other ):
i = i + 1
while ( i < 8 and self.board[i][column] == other ):
i = i + 1
if ( i < 8 and self.board[i][column] == 0 ):
places = places + [(i, column)]
# southwest
i = row + 1
j = column - 1
if ( i < 8 and j >= 0 and self.board[i][j] == other ):
i = i + 1
j = j - 1
while ( i < 8 and j >= 0 and self.board[i][j] == other ):
i = i + 1
j = j - 1
if ( i < 8 and j >= 0 and self.board[i][j] == 0 ):
places = places + [(i, j)]
# west
j = column - 1
if ( j >= 0 and self.board[row][j] == other ):
j = j - 1
while ( j >= 0 and self.board[row][j] == other ):
j = j - 1
if ( j >= 0 and self.board[row][j] == 0 ):
places = places + [(row, j)]
# northwest
i = row - 1
j = column - 1
if ( i >= 0 and j >= 0 and self.board[i][j] == other):
i = i - 1
j = j - 1
while ( i >= 0 and j >= 0 and self.board[i][j] == other):
i = i - 1
j = j - 1
if ( i >= 0 and j >= 0 and self.board[i][j] == 0 ):
places = places + [(i, j)]
return places
def get_valid_moves ( self, color ):
""" Get the avaiable positions to put a piece of the given color. For each
piece of the given color we search its neighbours, searching for pieces of the
other color to determine if is possible to make a move. This method must be
called before apply_move."""
if color == BLACK:
other = WHITE
else:
other = BLACK
places = []
for i in range ( 8 ) :
for j in range ( 8 ) :
if self.board[i][j] == color :
places = places + self.lookup ( i, j, color )
places = list( set ( places ))
self.valid_moves = places
return places
def apply_move ( self, move, color ):
""" Determine if the move is correct and apply the changes in the game.
"""
if move in self.valid_moves:
self.board[move[0]][move[1]] = color
for i in range ( 1, 9 ):
self.flip ( i, move, color )
def flip ( self, direction, position, color ):
""" Flips (capturates) the pieces of the given color in the given direction
(1=North,2=Northeast...) from position. """
if direction == 1:
# north
row_inc = -1
col_inc = 0
elif direction == 2:
# northeast
row_inc = -1
col_inc = 1
elif direction == 3:
# east
row_inc = 0
col_inc = 1
elif direction == 4:
# southeast
row_inc = 1
col_inc = 1
elif direction == 5:
# south
row_inc = 1
col_inc = 0
elif direction == 6:
# southwest
row_inc = 1
col_inc = -1
elif direction == 7:
# west
row_inc = 0
col_inc = -1
elif direction == 8:
# northwest
row_inc = -1
col_inc = -1
places = [] # pieces to flip
i = position[0] + row_inc
j = position[1] + col_inc
if color == WHITE:
other = BLACK
else:
other = WHITE
if i in range( 8 ) and j in range( 8 ) and self.board[i][j] == other:
# assures there is at least one piece to flip
places = places + [(i,j)]
i = i + row_inc
j = j + col_inc
while i in range( 8 ) and j in range( 8 ) and self.board[i][j] == other:
# search for more pieces to flip
places = places + [(i,j)]
i = i + row_inc
j = j + col_inc
if i in range( 8 ) and j in range( 8 ) and self.board[i][j] == color:
# found a piece of the right color to flip the pieces between
for pos in places:
# flips
self.board[pos[0]][pos[1]] = color
def get_changes ( self ):
""" Return black and white counters. """
whites, blacks, empty = self.count_stones()
return ( self.board, blacks, whites )
def game_ended ( self ):
""" Is the game ended? """
# board full or wipeout
whites, blacks, empty = self.count_stones()
if whites == 0 or blacks == 0 or empty == 0:
return True
# no valid moves for both players
if self.get_valid_moves( BLACK ) == [] and self.get_valid_moves( WHITE ) == []:
return True
return False
def print_board ( self ):
for i in range ( 8 ):
print i, ' |',
for j in range ( 8 ):
if self.board[i][j] == BLACK:
print 'B',
elif self.board[i][j] == WHITE:
print 'W',
else:
print ' ',
print '|',
print
def count_stones( self ):
""" Returns the number of white pieces, black pieces and empty squares, in
this order.
"""
whites = 0
blacks = 0
empty = 0
for i in range( 8 ):
for j in range( 8 ):
if self.board[i][j] == WHITE:
whites += 1
elif self.board[i][j] == BLACK:
blacks += 1
else:
empty += 1
return whites, blacks, empty
def compare( self, otherBoard ):
""" Return a board containing only the squares that are empty in one of the boards
and not empty on the other.
"""
diffBoard = Board()
diffBoard.board[3][4] = 0
diffBoard.board[3][3] = 0
diffBoard.board[4][3] = 0
diffBoard.board[4][4] = 0
for i in range( 8 ):
for j in range( 8 ):
if otherBoard.board[i][j] != self.board[i][j]:
diffBoard.board[i][j] = otherBoard.board[i][j]
return otherBoard
def get_adjacent_count( self, color ):
""" Return how many empty squares there are on the board adjacent to the specified color."""
adjCount = 0
for x,y in [(a,b) for a in range( 8 ) for b in range( 8 ) if self.board[a][b] == color]:
for i,j in [(a,b) for a in [-1,0,1] for b in [-1,0,1]]:
if 0 <= x+i <= 7 and 0 <= y+j <= 7:
if self.board[x+i][y+j] == EMPTY:
adjCount += 1
return adjCount
def next_states( self, color ):
""" Given a player's color return all the boards resulting from moves that this player
cand do. It's implemented as an iterator.
"""
valid_moves = self.get_valid_moves( color )
for move in valid_moves:
newBoard = deepcopy( self )
newBoard.apply_move( move, color )
yield newBoard
| mit |
sbg/Mitty | mitty/simulation/sequencing/syntheticsequencer.py | 1 | 1955 | """A fully synthetic read model that allows us to produce single end or paired end reads with arbitrary
read and template lengths. It's read model format is as follows
{
'model_class': 'illumina',
'model_description': '',
'paired': True/False,
'read_length': 100,
'mean_template_length': 300,
'std_template_length': 100,
'bq_mat': [],
'cum_bq_mat': []
}
"""
import pickle
import numpy as np
def create_model(
pkl,
read_length=100, mean_template_length=500, std_template_length=100, max_tlen=1000,
bq0=30, k=200, sigma=10,
comment=''):
description = """This is a synthetic read model that generates reads
with a length of {} bp, a template length of {} +/- {} bp.
The mean base quality follows the equation:
{} * exp(- b/l * {})
where b is the base in the read and l is the length of the read.
The base quality for a given base in a given read is drawn from a gaussian with standard deviation {}
{}""".format(
read_length,
mean_template_length,
std_template_length,
bq0, k, sigma,
comment)
bq = bq0 * (1 - np.exp(- k * np.linspace(1, 0, read_length) ** 2))
one_bq_mat = np.zeros((read_length, 94), dtype=float)
for n in range(read_length):
one_bq_mat[n, :] = np.exp(- 0.5 * ((np.arange(94) - bq[n]) / sigma) ** 2)
one_cum_bq_mat = one_bq_mat.cumsum(axis=1) / one_bq_mat.sum(axis=1).clip(1)[:, None]
tlen_mat = np.exp(- 0.5 * ((np.arange(max_tlen) - mean_template_length) / std_template_length) ** 2)
tlen_mat /= tlen_mat.sum()
cum_tlen = tlen_mat.cumsum() / tlen_mat.sum()
pickle.dump({
'model_class': 'illumina',
'model_description': description,
'min_mq': 0,
'bq_mat': np.array((one_bq_mat, one_bq_mat)),
'cum_bq_mat': np.array((one_cum_bq_mat, one_cum_bq_mat)),
'tlen': tlen_mat,
'cum_tlen': cum_tlen,
'mean_rlen': read_length,
'min_rlen': read_length,
'max_rlen': read_length,
'r_cnt': 1
}, open(pkl, 'wb'))
| apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.