text
stringlengths 4
1.02M
| meta
dict |
|---|---|
import os.path
import tempfile
import wordcount_lib
def _make_testfile(filename, data):
"Make a temp file containing the given data; return full path to file."
tempdir = tempfile.mkdtemp(prefix='wordcounttest_')
testfile = os.path.join(tempdir, filename)
with open(testfile, 'wt') as fp:
fp.write(data)
return testfile
def test_consume_1():
# do a basic test of the consume function.
testfile = _make_testfile('sometext.txt', 'a b cc\nddd')
chars, words, lines = wordcount_lib.consume(testfile)
assert chars == 10
assert words == 4
assert lines == 2
def test_consume_2():
# do another basic test of the consume function.
testfile = _make_testfile('sometext.txt', 'a\nb\ncc\nddd\ne')
chars, words, lines = wordcount_lib.consume(testfile)
assert chars == 12 # includes whitespace in char count
assert words == 5
assert lines == 5
def test_consume_3():
# check something tricky: whitespace at beginning & end of line
testfile = _make_testfile('sometext.txt', ' a b c ')
chars, words, lines = wordcount_lib.consume(testfile)
assert chars == 7 # includes whitespace in char count
assert words == 3
assert lines == 1
def test_consume_4():
# check something tricky: whitespace at beginning & end of line
testfile = _make_testfile('sometext.txt', ' a b c d e')
chars, words, lines = wordcount_lib.consume(testfile)
assert chars == 10 # includes whitespace in char count
assert words == 5
assert lines == 1
def test_daaaangerous_func():
try:
wordcount_lib.daaaangerous()
assert False
except ZeroDivisionError as e:
assert True, 'Should throw ZeroDivisionError'
|
{
"content_hash": "b52158dab9a13d5547761cff11190319",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 78,
"avg_line_length": 29.966666666666665,
"alnum_prop": 0.64293659621802,
"repo_name": "lifan0127/2016-pycon-tutorial-project",
"id": "711e5dadaa176e9f9b080ce781e7bf78b3afe16b",
"size": "1798",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test_wordcount.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "2992"
}
],
"symlink_target": ""
}
|
import _plotly_utils.basevalidators
class ColorsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self, plotly_name="colorsrc", parent_name="funnel.marker.line", **kwargs
):
super(ColorsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
role=kwargs.pop("role", "info"),
**kwargs
)
|
{
"content_hash": "19c9c7ac166973019224ffedacb31fcc",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 80,
"avg_line_length": 33.285714285714285,
"alnum_prop": 0.5944206008583691,
"repo_name": "plotly/python-api",
"id": "f517260ef7a8327ac46c434bf4169a388b4da38e",
"size": "466",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/funnel/marker/line/_colorsrc.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "6870"
},
{
"name": "Makefile",
"bytes": "1708"
},
{
"name": "Python",
"bytes": "823245"
},
{
"name": "Shell",
"bytes": "3238"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import models, migrations
def starting_urls(apps, schema_editor):
Url = apps.get_model("url_checker", "url")
Url(name="mitre", url="http://buecrplb01.cienradios.com.ar/Mitre790.aac").save()
Url(name="continental", url="http://1351.live.streamtheworld.com:80/CONTINENTAL_SC").save()
Url(name="radio argentina", url="http://wmserver3.aginet.com.ar:13574/;stream/1/").save()
Url(name="los 40", url="http://5133.live.streamtheworld.com:80/LOS40_ARGENTINA_SC").save()
Url(name="la 100", url="http://buecrplb01.cienradios.com.ar/la100.aac").save()
Url(name="espn", url="http://edge.espn.cdn.abacast.net/espn-deportesmp3-48").save()
Url(name="imagina", url="http://7309.live.streamtheworld.com:80/IMAGINA_ARGENTINA_SC").save()
Url(name="nova", url="http://buecrplb01.cienradios.com.ar/fm979.mp3").save()
Url(name="sonic", url="http://live.chicago.cdn.sonic.fm:8000/live128").save()
Url(name="el mundo", url="http://radiostream.elmundoradio.com:8332/;").save()
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Url',
fields=[
('id', models.AutoField(primary_key=True, serialize=False, auto_created=True, verbose_name='ID')),
('name', models.CharField(max_length=200)),
('url', models.URLField()),
],
),
migrations.RunPython(starting_urls),
]
|
{
"content_hash": "3c7f2c703a3ca6f505f481278a589ba7",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 114,
"avg_line_length": 43.34285714285714,
"alnum_prop": 0.6420566908371786,
"repo_name": "unbalancedparentheses/radioelm",
"id": "6abe7867f68c4e9a91b449cdbd015374ff20b788",
"size": "1541",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "admin/url_checker/migrations/0001_initial.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "60"
},
{
"name": "Elm",
"bytes": "4264"
},
{
"name": "Erlang",
"bytes": "15635"
},
{
"name": "HTML",
"bytes": "478"
},
{
"name": "JavaScript",
"bytes": "442"
},
{
"name": "Makefile",
"bytes": "2395"
},
{
"name": "Python",
"bytes": "6548"
}
],
"symlink_target": ""
}
|
'''
Stack Layout
============
.. only:: html
.. image:: images/stacklayout.gif
:align: right
.. only:: latex
.. image:: images/stacklayout.png
:align: right
.. versionadded:: 1.0.5
The :class:`StackLayout` arranges children vertically or horizontally, as many
as the layout can fit. The size of the individual children widgets do not
have to be uniform.
For example, to display widgets that get progressively larger in width::
root = StackLayout()
for i in range(25):
btn = Button(text=str(i), width=40 + i * 5, size_hint=(None, 0.15))
root.add_widget(btn)
.. image:: images/stacklayout_sizing.png
:align: left
'''
__all__ = ('StackLayout', )
from kivy.uix.layout import Layout
from kivy.properties import NumericProperty, OptionProperty, \
ReferenceListProperty, VariableListProperty
class StackLayout(Layout):
'''Stack layout class. See module documentation for more information.
'''
spacing = VariableListProperty([0, 0], length=2)
'''Spacing between children: [spacing_horizontal, spacing_vertical].
spacing also accepts a single argument form [spacing].
:attr:`spacing` is a
:class:`~kivy.properties.VariableListProperty` and defaults to [0, 0].
'''
padding = VariableListProperty([0, 0, 0, 0])
'''Padding between the layout box and it's children: [padding_left,
padding_top, padding_right, padding_bottom].
padding also accepts a two argument form [padding_horizontal,
padding_vertical] and a single argument form [padding].
.. versionchanged:: 1.7.0
Replaced the NumericProperty with a VariableListProperty.
:attr:`padding` is a
:class:`~kivy.properties.VariableListProperty` and defaults to
[0, 0, 0, 0].
'''
orientation = OptionProperty('lr-tb', options=(
'lr-tb', 'tb-lr', 'rl-tb', 'tb-rl', 'lr-bt', 'bt-lr', 'rl-bt',
'bt-rl'))
'''Orientation of the layout.
:attr:`orientation` is an :class:`~kivy.properties.OptionProperty` and
defaults to 'lr-tb'.
Valid orientations are 'lr-tb', 'tb-lr', 'rl-tb', 'tb-rl', 'lr-bt',
'bt-lr', 'rl-bt' and 'bt-rl'.
.. versionchanged:: 1.5.0
:attr:`orientation` now correctly handles all valid combinations of
'lr','rl','tb','bt'. Before this version only 'lr-tb' and
'tb-lr' were supported, and 'tb-lr' was misnamed and placed
widgets from bottom to top and from right to left (reversed compared
to what was expected).
.. note::
'lr' means Left to Right.
'rl' means Right to Left.
'tb' means Top to Bottom.
'bt' means Bottom to Top.
'''
minimum_width = NumericProperty(0)
'''Minimum width needed to contain all children. It is automatically set
by the layout.
.. versionadded:: 1.0.8
:attr:`minimum_width` is a :class:`kivy.properties.NumericProperty` and
defaults to 0.
'''
minimum_height = NumericProperty(0)
'''Minimum height needed to contain all children. It is automatically set
by the layout.
.. versionadded:: 1.0.8
:attr:`minimum_height` is a :class:`kivy.properties.NumericProperty` and
defaults to 0.
'''
minimum_size = ReferenceListProperty(minimum_width, minimum_height)
'''Minimum size needed to contain all children. It is automatically set
by the layout.
.. versionadded:: 1.0.8
:attr:`minimum_size` is a
:class:`~kivy.properties.ReferenceListProperty` of
(:attr:`minimum_width`, :attr:`minimum_height`) properties.
'''
def __init__(self, **kwargs):
super(StackLayout, self).__init__(**kwargs)
self.bind(
padding=self._trigger_layout,
spacing=self._trigger_layout,
children=self._trigger_layout,
orientation=self._trigger_layout,
size=self._trigger_layout,
pos=self._trigger_layout)
def do_layout(self, *largs):
if not self.children:
return
# optimize layout by preventing looking at the same attribute in a loop
selfpos = self.pos
selfsize = self.size
orientation = self.orientation.split('-')
padding_left = self.padding[0]
padding_top = self.padding[1]
padding_right = self.padding[2]
padding_bottom = self.padding[3]
padding_x = padding_left + padding_right
padding_y = padding_top + padding_bottom
spacing_x, spacing_y = self.spacing
lc = []
# Determine which direction and in what order to place the widgets
posattr = [0] * 2
posdelta = [0] * 2
posstart = [0] * 2
for i in (0, 1):
posattr[i] = 1 * (orientation[i] in ('tb', 'bt'))
k = posattr[i]
if orientation[i] == 'lr':
# left to right
posdelta[i] = 1
posstart[i] = selfpos[k] + padding_left
elif orientation[i] == 'bt':
# bottom to top
posdelta[i] = 1
posstart[i] = selfpos[k] + padding_bottom
elif orientation[i] == 'rl':
# right to left
posdelta[i] = -1
posstart[i] = selfpos[k] + selfsize[k] - padding_right
else:
# top to bottom
posdelta[i] = -1
posstart[i] = selfpos[k] + selfsize[k] - padding_top
innerattr, outerattr = posattr
ustart, vstart = posstart
deltau, deltav = posdelta
del posattr, posdelta, posstart
u = ustart # inner loop position variable
v = vstart # outer loop position variable
# space calculation, used for determining when a row or column is full
if orientation[0] in ('lr', 'rl'):
sv = padding_y # size in v-direction, for minimum_size property
su = padding_x # size in h-direction
spacing_u = spacing_x
spacing_v = spacing_y
padding_u = padding_x
padding_v = padding_y
else:
sv = padding_x # size in v-direction, for minimum_size property
su = padding_y # size in h-direction
spacing_u = spacing_y
spacing_v = spacing_x
padding_u = padding_y
padding_v = padding_x
# space calculation, row height or column width, for arranging widgets
lv = 0
urev = (deltau < 0)
vrev = (deltav < 0)
firstchild = self.children[0]
sizes = []
for c in reversed(self.children):
if c.size_hint[outerattr]:
c.size[outerattr] = max(
1, c.size_hint[outerattr] *
(selfsize[outerattr] - padding_v))
# does the widget fit in the row/column?
ccount = len(lc)
totalsize = availsize = max(
0, selfsize[innerattr] - padding_u - spacing_u * ccount)
if not lc:
if c.size_hint[innerattr]:
childsize = max(1, c.size_hint[innerattr] * totalsize)
else:
childsize = max(0, c.size[innerattr])
availsize = selfsize[innerattr] - padding_u - childsize
testsizes = [childsize]
else:
testsizes = [0] * (ccount + 1)
for i, child in enumerate(lc):
if availsize <= 0:
# no space left but we're trying to add another widget.
availsize = -1
break
if child.size_hint[innerattr]:
testsizes[i] = childsize = max(
1, child.size_hint[innerattr] * totalsize)
else:
testsizes[i] = childsize = max(0, child.size[innerattr])
availsize -= childsize
if c.size_hint[innerattr]:
testsizes[-1] = max(1, c.size_hint[innerattr] * totalsize)
else:
testsizes[-1] = max(0, c.size[innerattr])
availsize -= testsizes[-1]
if availsize >= 0 or not lc:
# even if there's no space, we always add one widget to a row
lc.append(c)
sizes = testsizes
lv = max(lv, c.size[outerattr])
continue
# apply the sizes
for i, child in enumerate(lc):
if child.size_hint[innerattr]:
child.size[innerattr] = sizes[i]
# push the line
sv += lv + spacing_v
for c2 in lc:
if urev:
u -= c2.size[innerattr]
c2.pos[innerattr] = u
pos_outer = v
if vrev:
# v position is actually the top/right side of the widget
# when going from high to low coordinate values,
# we need to subtract the height/width from the position.
pos_outer -= c2.size[outerattr]
c2.pos[outerattr] = pos_outer
if urev:
u -= spacing_u
else:
u += c2.size[innerattr] + spacing_u
v += deltav * lv
v += deltav * spacing_v
lc = [c]
lv = c.size[outerattr]
if c.size_hint[innerattr]:
sizes = [max(1, c.size_hint[innerattr] *
(selfsize[innerattr] - padding_u))]
else:
sizes = [max(0, c.size[innerattr])]
u = ustart
if lc:
# apply the sizes
for i, child in enumerate(lc):
if child.size_hint[innerattr]:
child.size[innerattr] = sizes[i]
# push the last (incomplete) line
sv += lv + spacing_v
for c2 in lc:
if urev:
u -= c2.size[innerattr]
c2.pos[innerattr] = u
pos_outer = v
if vrev:
pos_outer -= c2.size[outerattr]
c2.pos[outerattr] = pos_outer
if urev:
u -= spacing_u
else:
u += c2.size[innerattr] + spacing_u
self.minimum_size[outerattr] = sv
|
{
"content_hash": "09703a7c3a9221e132938d9c2b632d84",
"timestamp": "",
"source": "github",
"line_count": 309,
"max_line_length": 80,
"avg_line_length": 33.78964401294498,
"alnum_prop": 0.5331864763911502,
"repo_name": "JohnHowland/kivy",
"id": "ea1f15fd9cb87b3d4046369161d98df61caf467e",
"size": "10441",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kivy/uix/stacklayout.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "327609"
},
{
"name": "CSS",
"bytes": "7237"
},
{
"name": "Emacs Lisp",
"bytes": "9695"
},
{
"name": "JavaScript",
"bytes": "12900"
},
{
"name": "Makefile",
"bytes": "9565"
},
{
"name": "Objective-C",
"bytes": "14779"
},
{
"name": "Python",
"bytes": "3685901"
},
{
"name": "Shell",
"bytes": "11731"
},
{
"name": "TeX",
"bytes": "4355"
},
{
"name": "VimL",
"bytes": "1123"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect
from operator import or_
from six.moves import reduce
from sentry.models import ApiKey, AuditLogEntryEvent
from sentry.web.frontend.base import OrganizationView
DEFAULT_SCOPES = [
'project:read',
'event:read',
'team:read',
'org:read',
'member:read',
]
class OrganizationApiKeysView(OrganizationView):
required_scope = 'org:delete'
def handle(self, request, organization):
if request.POST.get('op') == 'newkey':
key = ApiKey.objects.create(
organization=organization,
scopes=reduce(or_, [getattr(ApiKey.scopes, s) for s in DEFAULT_SCOPES])
)
self.create_audit_entry(
request,
organization=organization,
target_object=key.id,
event=AuditLogEntryEvent.APIKEY_ADD,
data=key.get_audit_log_data(),
)
redirect_uri = reverse('sentry-organization-api-key-settings', args=[
organization.slug, key.id,
])
return HttpResponseRedirect(redirect_uri)
elif request.POST.get('op') == 'removekey':
try:
key = ApiKey.objects.get(
id=request.POST.get('kid'),
organization=organization,
)
except ApiKey.DoesNotExist:
pass
else:
audit_data = key.get_audit_log_data()
key.delete()
self.create_audit_entry(
request,
organization=organization,
target_object=key.id,
event=AuditLogEntryEvent.APIKEY_REMOVE,
data=audit_data,
)
return HttpResponseRedirect(request.path)
key_list = sorted(ApiKey.objects.filter(
organization=organization,
), key=lambda x: x.label)
context = {
'key_list': key_list,
}
return self.respond('sentry/organization-api-keys.html', context)
|
{
"content_hash": "022a7176a92c6256962fb85f7e7ef28d",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 87,
"avg_line_length": 29.756756756756758,
"alnum_prop": 0.5540417801998183,
"repo_name": "fotinakis/sentry",
"id": "638484ea7aeb1226898a5269be832a370b33a9ee",
"size": "2202",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "src/sentry/web/frontend/organization_api_keys.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "222885"
},
{
"name": "HTML",
"bytes": "282398"
},
{
"name": "JavaScript",
"bytes": "927323"
},
{
"name": "Lua",
"bytes": "22367"
},
{
"name": "Makefile",
"bytes": "5812"
},
{
"name": "Python",
"bytes": "11654397"
},
{
"name": "Shell",
"bytes": "793"
}
],
"symlink_target": ""
}
|
'''OpenGL extension SGI.texture_color_table
This module customises the behaviour of the
OpenGL.raw.GL.SGI.texture_color_table to provide a more
Python-friendly API
Overview (from the spec)
This extension adds a color lookup table to the texture mechanism.
The table is applied to the filtered result of a texture lookup,
before that result is used in the texture environment equations.
The definition and application of the texture color table are
similar to those of the color tables defined in SGI_color_table,
though it is not necessary for that extension to be implemented.
Texture color tables can be used to expand luminance or intensity
textures to full RGBA, and also to linearize the results of color
space conversions implemented by multidimensional texture table
lookup.
This specification has been updated to define its interaction with
multitexture.
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/SGI/texture_color_table.txt
'''
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.SGI.texture_color_table import *
from OpenGL.raw.GL.SGI.texture_color_table import _EXTENSION_NAME
def glInitTextureColorTableSGI():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION
|
{
"content_hash": "f3c75fa4f029517cb2de7efbe50fb8ed",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 71,
"avg_line_length": 36.707317073170735,
"alnum_prop": 0.7966777408637874,
"repo_name": "alexus37/AugmentedRealityChess",
"id": "a917dbe39e8d333eebff9b6d75f2c7891d1ccb9b",
"size": "1505",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "pythonAnimations/pyOpenGLChess/engineDirectory/oglc-env/lib/python2.7/site-packages/OpenGL/GL/SGI/texture_color_table.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "158062"
},
{
"name": "C++",
"bytes": "267993"
},
{
"name": "CMake",
"bytes": "11319"
},
{
"name": "Fortran",
"bytes": "3707"
},
{
"name": "Makefile",
"bytes": "14618"
},
{
"name": "Python",
"bytes": "12813086"
},
{
"name": "Roff",
"bytes": "3310"
},
{
"name": "Shell",
"bytes": "3855"
}
],
"symlink_target": ""
}
|
""" Representation of DNS A records """
from sqlalchemy import Column, ForeignKey, Index
from sqlalchemy.orm import (relation, backref, mapper, deferred, object_session,
validates)
from sqlalchemy.orm.attributes import instance_state
from sqlalchemy.sql import join, and_
from aquilon.exceptions_ import ArgumentError
from aquilon.aqdb.model import Network, NetworkEnvironment, DnsRecord, Fqdn
from aquilon.aqdb.column_types import IPV4
_TN = 'a_record'
_DTN = 'dynamic_stub'
class ARecord(DnsRecord):
__tablename__ = _TN
_class_label = 'DNS Record'
dns_record_id = Column(ForeignKey(DnsRecord.id, ondelete='CASCADE'),
primary_key=True)
ip = Column(IPV4, nullable=False)
network_id = Column(ForeignKey(Network.id), nullable=False)
reverse_ptr_id = Column(ForeignKey(Fqdn.id, name='%s_reverse_ptr_fk' % _TN,
ondelete='SET NULL'),
nullable=True, index=True)
network = relation(Network, innerjoin=True,
backref=backref('dns_records', passive_deletes=True))
reverse_ptr = relation(Fqdn, foreign_keys=reverse_ptr_id,
backref=backref('reverse_entries',
passive_deletes=True))
__table_args__ = (Index("%s_network_ip_idx" % _TN, network_id, ip),
{'info': {'unique_fields': ['fqdn'],
'extra_search_fields': ['ip', 'network',
'dns_environment']}})
__mapper_args__ = {'polymorphic_identity': _TN}
@property
def is_unused(self):
if self.assignments:
return False
elif self.service_addresses:
return False
else:
return True
def __format__(self, format_spec):
if format_spec != "a":
return super(ARecord, self).__format__(format_spec)
return "%s [%s]" % (self.fqdn, self.ip)
def __init__(self, ip=None, network=None, fqdn=None, **kwargs):
if not network: # pragma: no cover
raise ValueError("network argument is missing")
if ip not in network.network: # pragma: no cover
raise ValueError("IP not inside network")
if not fqdn: # pragma: no cover
raise ValueError("fqdn cannot be empty")
# We can't share both the IP and the FQDN with an other A record. Only
# do the query if the FQDN is already persistent
if instance_state(fqdn).has_identity:
session = object_session(fqdn)
if not session: # pragma: no cover
raise ValueError("fqdn must be already part of a session")
# Disable autoflush because self is not ready to be pushed to the DB
# yet
with session.no_autoflush:
q = session.query(ARecord.id)
q = q.filter_by(network=network)
q = q.filter_by(ip=ip)
q = q.filter_by(fqdn=fqdn)
if q.all(): # pragma: no cover
raise ArgumentError("%s, ip %s already exists." %
(self._get_class_label(), ip))
super(ARecord, self).__init__(ip=ip, network=network, fqdn=fqdn,
**kwargs)
@validates('reverse_ptr')
def _validate_reverse_ptr(self, key, value):
return self.validate_reverse_ptr(key, value)
def validate_reverse_ptr(self, key, value): # pylint: disable=W0613
if value and self.fqdn.dns_environment != value.dns_environment: # pragma: no cover
raise ValueError("DNS environment mismatch: %s != %s" %
(self.fqdn.dns_environment, value.dns_environment))
return value
def check_grn_conflict(self, grn):
super(ARecord, self).check_grn_conflict(grn)
if self.service_addresses:
raise ArgumentError("{0} is a service address. GRN should not be set "
"but derived from the device.".format(self))
# Create a secondary mapper to allow filtering DNS entries based on the DNS
# environment associated with the network the address is allocated from:
#
# +------------------+ (IP, network_id) +--------+ +----+
# |Address allocation| ---------------- |A record| ------ |FQDN|
# +------------------+ +--------+ +----+
# | |
# (network_id) | | (dns_environment_id)
# | |
# +-------+ +-------------------+
# |Network| ------- |Network environment|
# +-------+ +-------------------+
#
# So, we want to be able to create joins between an allocated address (IP and
# network_id pair embedded into an object) and an A record, but we don't want to
# mix different DNS views together, so the join needs to be restricted to a
# single DNS environment. The mapper below implements the right side of the
# above graph (everything except the address allocation object) with the
# required filtering.
__j = join(ARecord, Fqdn, ARecord.fqdn_id == Fqdn.id)
__j = __j.join(NetworkEnvironment,
Fqdn.dns_environment_id == NetworkEnvironment.dns_environment_id)
__j = __j.join(Network, and_(Network.id == ARecord.network_id,
Network.network_environment_id == NetworkEnvironment.id))
dns_fqdn_mapper = mapper(ARecord, __j,
# Only map the columns from the join which ARecord
# would normally have
include_properties=(DnsRecord.__table__.c.values() +
ARecord.__table__.c.values()),
properties={
# Both DnsRecord and Fqdn have a column named 'id'.
# Tell the ORM that DnsRecord.fqdn_id and Fqdn.id are
# really the same thing due to the join condition
'fqdn_id': [__j.c.fqdn_id, __j.c.dns_record_fqdn_id],
# Usually these columns are not needed, so don't
# load them automatically
'creation_date': deferred(__j.c.dns_record_creation_date),
'comments': deferred(__j.c.dns_record_comments),
# Make sure FQDNs are eager loaded when using this
# mapper. This will cause fqdn being joined twice,
# but there's no way to tell SQLAlchemy that this
# information is already present in __j.
'fqdn': relation(Fqdn, lazy=False, innerjoin=True,
primaryjoin=__j.c.dns_record_fqdn_id == Fqdn.id)
},
polymorphic_identity=_TN,
primary_key=__j.c.a_record_dns_record_id,
non_primary=True)
class DynamicStub(ARecord):
"""
DynamicStub is a hack to handle stand alone DNS records for dynamic
hosts prior to having a properly reworked set of tables for DNS
information. It should not be used by anything other than to create host
records for virtual machines using names similar to
'dynamic-1-2-3-4.subdomain.ms.com'
"""
__tablename__ = _DTN
__mapper_args__ = {'polymorphic_identity': _DTN}
_class_label = 'Dynamic Stub'
dns_record_id = Column(ForeignKey(ARecord.dns_record_id,
ondelete='CASCADE'),
primary_key=True)
__table_args__ = ({'info': {'unique_fields': ['fqdn'],
'extra_search_fields': ['ip', 'network',
'dns_environment']}},)
def validate_reverse_ptr(self, key, value):
super(DynamicStub, self).validate_reverse_ptr(key, value)
if value:
raise ValueError("The reverse PTR record cannot be set for "
"DNS records used for dynamic DHCP.")
return value
Network.dynamic_stubs = relation(DynamicStub, order_by=[DynamicStub.ip],
viewonly=True)
|
{
"content_hash": "de2cf0658e41b4c0584da172ea4e40a3",
"timestamp": "",
"source": "github",
"line_count": 186,
"max_line_length": 94,
"avg_line_length": 46.55913978494624,
"alnum_prop": 0.5196304849884527,
"repo_name": "guillaume-philippon/aquilon",
"id": "a7b42ee37c2c64e5d632dce94f196dfe04e7758f",
"size": "9368",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/aquilon/aqdb/model/a_record.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "DIGITAL Command Language",
"bytes": "3791"
},
{
"name": "Makefile",
"bytes": "5024"
},
{
"name": "Mako",
"bytes": "3996"
},
{
"name": "PLSQL",
"bytes": "69088"
},
{
"name": "Perl",
"bytes": "5030"
},
{
"name": "Python",
"bytes": "4257490"
},
{
"name": "SQLPL",
"bytes": "869"
},
{
"name": "Shell",
"bytes": "22083"
}
],
"symlink_target": ""
}
|
from collections import defaultdict, OrderedDict
import math
import os
import cPickle
import copy
import numpy as np
import pysal as ps
from pysal.weights.util import get_ids
from analysis import NetworkG, NetworkK, NetworkF
import util
__all__ = ["Network", "PointPattern", "NetworkG", "NetworkK", "NetworkF" ]
class Network:
"""
Spatially constrained network representation and analytical functionality.
Parameters
-----------
in_shp : string
A topoligically correct input shapefile
Attributes
----------
in_shp : string
input shapefile name
adjacencylist : list
of lists storing node adjacency
nodes : dict
key are tuple of node coords and value is the node ID
edge_lengths : dict
key is a tuple of sorted node IDs representing an edge
value is the length
pointpatterns : dict
key is a string name of the pattern
value is a point pattern class instance
node_coords : dict
key is th node ID and value are the (x,y) coordinates
inverse to nodes
edges : list
of edges, where each edge is a sorted tuple of node IDs
node_list : list
node IDs
alldistances : dict
key is the node ID
value is a tuple with two elements, first is a list of the
shortest path distances, second is a dict with
the key being the id of the destination node and the value
is a list of the shortest path.
Examples
--------
Instantiate an instance of a network
>>> ntw = network.Network(ps.examples.get_path('geodanet/streets.shp'))
Snap point observations to the network with attribute information
>>> ntw.snapobservations(ps.examples.get_path('geodanet/crimes.shp'), 'crimes', attribute=True)
And without attribute information
>>> ntw.snapobservations(ps.examples.get_path('geodanet/schools.shp'), 'schools', attribute=False)
"""
def __init__(self, in_shp=None):
if in_shp:
self.in_shp = in_shp
self.adjacencylist = defaultdict(list)
self.nodes = {}
self.edge_lengths = {}
self.edges = []
self.pointpatterns = {}
self._extractnetwork()
self.node_coords = dict((value, key) for key, value in self.nodes.iteritems())
#This is a spatial representation of the network.
self.edges = sorted(self.edges)
#Extract the graph
self.extractgraph()
self.node_list = sorted(self.nodes.values())
def _extractnetwork(self):
"""
Used internally, to extract a network from a polyline shapefile
"""
nodecount = 0
shps = ps.open(self.in_shp)
for shp in shps:
vertices = shp.vertices
for i, v in enumerate(vertices[:-1]):
try:
vid = self.nodes[v]
except:
self.nodes[v] = vid = nodecount
nodecount += 1
try:
nvid = self.nodes[vertices[i+1]]
except:
self.nodes[vertices[i+1]] = nvid = nodecount
nodecount += 1
self.adjacencylist[vid].append(nvid)
self.adjacencylist[nvid].append(vid)
#Sort the edges so that mono-directional keys can be stored.
edgenodes = sorted([vid, nvid])
edge = tuple(edgenodes)
self.edges.append(edge)
length = util.compute_length(v, vertices[i+1])
self.edge_lengths[edge] = length
def extractgraph(self):
"""
Using the existing network representation, create a graph based representation,
by removing all nodes with neighbor incidence of two. That is, we assume these
nodes are bridges between nodes with higher incidence.
"""
self.graphedges = []
self.edge_to_graph = {}
self.graph_lengths = {}
#Find all nodes with cardinality 2
segment_nodes = []
for k, v in self.adjacencylist.iteritems():
#len(v) == 1 #cul-de-sac
#len(v) == 2 #bridge segment
#len(v) > 2 #intersection
if len(v) == 2:
segment_nodes.append(k)
#Start with a copy of the spatial representation and iteratively
# remove edges deemed to be segments
self.graphedges = copy.deepcopy(self.edges)
self.graph_lengths = copy.deepcopy(self.edge_lengths)
self.graph_to_edges = {} #Mapping all the edges contained within a single graph represented edge
bridges = []
for s in segment_nodes:
bridge = [s]
neighbors = self._yieldneighbor(s, segment_nodes, bridge)
while neighbors:
cnode = neighbors.pop()
segment_nodes.remove(cnode)
bridge.append(cnode)
newneighbors = self._yieldneighbor(cnode, segment_nodes, bridge)
neighbors += newneighbors
bridges.append(bridge)
for bridge in bridges:
if len(bridge) == 1:
n = self.adjacencylist[bridge[0]]
newedge = tuple(sorted([n[0], n[1]]))
#Identify the edges to be removed
e1 = tuple(sorted([bridge[0], n[0]]))
e2 = tuple(sorted([bridge[0], n[1]]))
#Remove from the graph
self.graphedges.remove(e1)
self.graphedges.remove(e2)
#Remove from the edge lengths
length_e1 = self.edge_lengths[e1]
length_e2 = self.edge_lengths[e2]
self.graph_lengths.pop(e1, None)
self.graph_lengths.pop(e2, None)
self.graph_lengths[newedge] = length_e1 + length_e2
#Update the pointers
self.graph_to_edges[e1] = newedge
self.graph_to_edges[e2] = newedge
else:
cumulative_length = 0
startend = {}
redundant = set([])
for b in bridge:
for n in self.adjacencylist[b]:
if n not in bridge:
startend[b] = n
else:
redundant.add(tuple(sorted([b,n])))
newedge = tuple(sorted(startend.values()))
for k, v in startend.iteritems():
redundant.add(tuple(sorted([k,v])))
for r in redundant:
self.graphedges.remove(r)
cumulative_length += self.edge_lengths[r]
self.graph_lengths.pop(r, None)
self.graph_to_edges[r] = newedge
self.graph_lengths[newedge] = cumulative_length
self.graphedges.append(newedge)
self.graphedges = sorted(self.graphedges)
def _yieldneighbor(self, node, segment_nodes, bridge):
"""
Used internally, this method traverses a bridge segement
to find the source and destination nodes.
"""
n = []
for i in self.adjacencylist[node]:
if i in segment_nodes and i not in bridge:
n.append(i)
return n
def contiguityweights(self, graph=True, weightings=None):
"""
Create a contiguity based W object
Parameters
----------
graph : boolean
{True, False } controls whether the W is generated using the spatial
representation or the graph representation
weightings : dict
of lists of weightings for each edge
Returns
-------
: W
A PySAL W Object representing the binary adjacency of the network
Examples
--------
>>> w = ntw.contiguityweights(graph=False)
Using the W object, access to ESDA functionality is provided. First,
a vector of attributes is created for all edges with observations.
>>> w = ntw.contiguityweights(graph=False)
>>> edges = w.neighbors.keys()
>>> y = np.zeros(len(edges))
>>> for i, e in enumerate(edges):
>>> if e in counts.keys():
>>> y[i] = counts[e]
Next, a standard call ot Moran is made and the result placed into `res`
>>> res = ps.esda.moran.Moran(y, ntw.w, permutations=99)
"""
neighbors = {}
neighbors = OrderedDict()
if graph:
edges = self.graphedges
else:
edges = self.edges
if weightings:
weights = {}
else:
weights = None
for key in edges:
neighbors[key] = []
if weightings:
weights[key] = []
for neigh in edges:
if key == neigh:
continue
if key[0] == neigh[0] or key[0] == neigh[1] or key[1] == neigh[0] or key[1] == neigh[1]:
neighbors[key].append(neigh)
if weightings:
weights[key].append(weightings[neigh])
#TODO: Add a break condition - everything is sorted, so we know when we have stepped beyond a possible neighbor.
#if key[1] > neigh[1]: #NOT THIS
#break
return ps.weights.W(neighbors, weights=weights)
def distancebandweights(self, threshold):
"""
Create distance based weights
Parameters
----------
threshold : float
Distance threshold value
"""
try:
hasattr(self.alldistances)
except:
self.node_distance_matrix()
neighbor_query = np.where(self.distancematrix < threshold)
neighbors = defaultdict(list)
for i, n in enumerate(neighbor_query[0]):
neigh = neighbor_query[1][i]
if n != neigh:
neighbors[n].append(neighbor_query[1][i])
return ps.weights.W(neighbors)
def snapobservations(self, shapefile, name, idvariable=None, attribute=None):
"""
Snap a point pattern shapefile to this network object. The point pattern
is the stored in the network.pointpattern['key'] attribute of the network
object.
Parameters
----------
shapefile : str
The PATH to the shapefile
name : str
Name to be assigned to the point dataset
idvariable : str
Column name to be used as ID variable
attribute : bool
Defines whether attributes should be extracted
Returns
-------
"""
self.pointpatterns[name] = PointPattern(shapefile, idvariable=idvariable, attribute=attribute)
self._snap_to_edge(self.pointpatterns[name])
def compute_distance_to_nodes(self, x, y, edge):
"""
Given an observation on a network edge, return the distance to the two
nodes that bound that end.
Parameters
----------
x : float
x-coordinate of the snapped point
y : float
y-coordiante of the snapped point
edge : tuple
(node0, node1) representation of the network edge
Returns
-------
d1 : float
the distance to node0, always the node with the lesser id
d2 : float
the distance to node1, always the node with the greater id
"""
d1 = util.compute_length((x,y), self.node_coords[edge[0]])
d2 = util.compute_length((x,y), self.node_coords[edge[1]])
return d1, d2
def _snap_to_edge(self, pointpattern):
"""
Used internally to snap point observations to network edges.
Parameters
-----------
pointpattern : obj
PySAL Point Pattern Object
Returns
-------
obs_to_edge : dict
with edge as key and list of points as value
edge_to_obs : dict
with point id as key and edge tuple as value
dist_to_node : dict
with edge as key and tuple of distances to nodes as value
"""
obs_to_edge = {}
dist_to_node = {}
pointpattern.snapped_coordinates = {}
for pt_index, point in pointpattern.points.iteritems():
x0 = point['coordinates'][0]
y0 = point['coordinates'][1]
d = {}
vectors = {}
c = 0
#Components of this for loop can be pre computed and cached, like denom to distance =
for edge in self.edges:
xi = self.node_coords[edge[0]][0]
yi = self.node_coords[edge[0]][1]
xi1 = self.node_coords[edge[1]][0]
yi1 = self.node_coords[edge[1]][1]
num = ((yi1 - yi)*(x0-xi)-(xi1-xi)*(y0-yi))
denom = ((yi1-yi)**2 + (xi1-xi)**2)
k = num / float(denom)
distance = abs(num) / math.sqrt(((yi1-yi)**2 + (xi1-xi)**2))
vectors[c] = (xi, xi1, yi, yi1,k,edge)
d[distance] = c
c += 1
min_dist = SortedEdges(sorted(d.items()))
for dist, vector_id in min_dist.iteritems():
value = vectors[vector_id]
xi = value[0]
xi1 = value[1]
yi = value[2]
yi1 = value[3]
k = value[4]
edge = value[5]
#Okabe Method
x = x0 - k * (yi1 - yi)
y = y0 + k * (xi1 - xi)
#Compute the distance from the new point to the nodes
d1, d2 = self.compute_distance_to_nodes(x, y, edge)
if xi <= x <= xi1 or xi1 <= x <= xi and yi <= y <= yi1 or yi1 <=y <= yi:
#print "{} intersections edge {} at {}".format(pt_index, edge, (x,y))
#We are assuming undirected - this should never be true.
if edge not in obs_to_edge.keys():
obs_to_edge[edge] = {pt_index: (x,y)}
else:
obs_to_edge[edge][pt_index] = (x,y)
dist_to_node[pt_index] = {edge[0]:d1, edge[1]:d2}
pointpattern.snapped_coordinates[pt_index] = (x,y)
break
else:
#either pi or pi+1 are the nearest point on that edge.
#If this point is closer than the next distance, we can break, the
# observation intersects the node with the shorter
# distance.
pi = (xi, yi)
pi1 = (xi1, yi1)
p0 = (x0,y0)
#Maybe this call to ps.cg should go as well - as per the call in the class above
dist_pi = ps.cg.standalone.get_points_dist(p0, pi)
dist_pi1 = ps.cg.standalone.get_points_dist(p0, pi1)
if dist_pi < dist_pi1:
node_dist = dist_pi
(x,y) = pi
else:
node_dist = dist_pi1
(x,y) = pi1
d1, d2 = self.compute_distance_to_nodes(x, y, edge)
if node_dist < min_dist.next_key(dist):
if edge not in obs_to_edge.keys():
obs_to_edge[edge] = {pt_index: (x, y)}
else:
obs_to_edge[edge][pt_index] = (x, y)
dist_to_node[pt_index] = {edge[0]:d1, edge[1]:d2}
pointpattern.snapped_coordinates[pt_index] = (x,y)
break
obs_to_node = defaultdict(list)
for k, v in obs_to_edge.iteritems():
keys = v.keys()
obs_to_node[k[0]] = keys
obs_to_node[k[1]] = keys
pointpattern.obs_to_edge = obs_to_edge
pointpattern.dist_to_node = dist_to_node
pointpattern.obs_to_node = obs_to_node
def count_per_edge(self, obs_on_network, graph=True):
"""
Compute the counts per edge.
Parameters
----------
obs_on_network : dict
of observations on the network
{(edge): {pt_id: (coords)}} or {edge: [(coord), (coord), (coord)]}
Returns
-------
counts: dict {(edge):count}
Example
-------
Note that this passes the obs_to_edge attribute of a point pattern
snapped to the network.
>>> counts = ntw.count_per_edge(ntw.pointpatterns['crimes'].obs_to_edge,
graph=False)
"""
counts = {}
if graph:
for key, observations in obs_on_network.iteritems():
cnt = len(observations)
if key in self.graph_to_edges.keys():
key = self.graph_to_edges[key]
try:
counts[key] += cnt
except:
counts[key] = cnt
else:
for key in obs_on_network.iterkeys():
counts[key] = len(obs_on_network[key])
return counts
def _newpoint_coords(self, edge, distance):
"""
Used internally to compute new point coordinates during snapping
"""
x1 = self.node_coords[edge[0]][0]
y1 = self.node_coords[edge[0]][1]
x2 = self.node_coords[edge[1]][0]
y2 = self.node_coords[edge[1]][1]
m = (y2 - y1) / (x2 - x1)
if x1 > x2:
x0 = x1 - distance / math.sqrt(1 + m**2)
elif x1 < x2:
x0 = x1 + distance / math.sqrt(1 + m**2)
y0 = m * (x0 - x1) + y1
return x0, y0
def simulate_observations(self, count, distribution='uniform'):
"""
Generate a simulated point pattern on the network.
Parameters
----------
count : integer
number of points to create or mean of the distribution
if not 'uniform'
distribution : string
{'uniform', 'poisson'} distribution of random points
Returns
-------
random_pts : dict
key is the edge tuple
value is a list of new point coordinates
Example
-------
>>> npts = ntw.pointpatterns['crimes'].npoints
>>> sim = ntw.simulate_observations(npts)
>>> sim
<network.SimulatedPointPattern instance at 0x1133d8710>
"""
simpts = SimulatedPointPattern()
#Cumulative Network Length
edges = []
lengths = np.zeros(len(self.edge_lengths))
for i, key in enumerate(self.edge_lengths.iterkeys()):
edges.append(key)
lengths[i] = self.edge_lengths[key]
stops = np.cumsum(lengths)
totallength = stops[-1]
if distribution is 'uniform':
nrandompts = np.random.uniform(0, totallength, size=(count,))
elif distribution is 'poisson':
nrandompts = np.random.uniform(0, totallength, size=(np.random.poisson(count),))
for i, r in enumerate(nrandompts):
idx = np.where(r < stops)[0][0]
assignment_edge = edges[idx]
distance_from_start = stops[idx] - r
#Populate the coordinates dict
x0, y0 = self._newpoint_coords(assignment_edge, distance_from_start)
simpts.snapped_coordinates[i] = (x0, y0)
simpts.obs_to_node[assignment_edge[0]].append(i)
simpts.obs_to_node[assignment_edge[1]].append(i)
#Populate the distance to node
simpts.dist_to_node[i] = {assignment_edge[0] : distance_from_start,
assignment_edge[1] : self.edge_lengths[edges[idx]] - distance_from_start}
simpts.points = simpts.snapped_coordinates
simpts.npoints = len(simpts.points)
return simpts
def enum_links_node(self, v0):
"""
Returns the edges (links) around node
Parameters
-----------
v0 : int
node id
Returns
-------
links : list
list of tuple edge adjacent to the node
"""
links = []
neighbornodes = self.adjacencylist[v0]
for n in neighbornodes:
links.append(tuple(sorted([n, v0])))
return links
def node_distance_matrix(self):
self.alldistances = {}
nnodes = len(self.node_list)
self.distancematrix = np.empty((nnodes, nnodes))
for node in self.node_list:
distance, pred = util.dijkstra(self, self.edge_lengths, node, n=float('inf'))
pred = np.array(pred)
tree = util.generatetree(pred)
self.alldistances[node] = (distance, tree)
self.distancematrix[node] = distance
def allneighbordistances(self, sourcepattern, destpattern=None):
"""
Compute either all distances between i and j in a single point pattern
or all distances between each i from a source pattern and all j
from a destination pattern
Parameters
----------
sourcepattern : str
The key of a point pattern snapped to the network.
destpattern :str
(Optional) The key of a point pattern snapped to the network.
Returns
-------
nearest : array (n,n)
An array or shape n,n storing distances between all points
"""
if not hasattr(self,'alldistances'):
self.node_distance_matrix()
src_indices = sourcepattern.points.keys()
nsource_pts = len(src_indices)
dist_to_node = sourcepattern.dist_to_node
if destpattern == None:
destpattern = sourcepattern
dest_indices = destpattern.points.keys()
ndest_pts = len(dest_indices)
searchpts = copy.deepcopy(dest_indices)
nearest = np.empty((nsource_pts, ndest_pts))
nearest[:] = np.inf
searchnodes = {}
for s in searchpts:
e1, e2 = dist_to_node[s].keys()
searchnodes[s] = (e1, e2)
for p1 in src_indices:
#Get the source nodes and dist to source nodes
source1, source2 = searchnodes[p1]
set1 = set(searchnodes[p1])
# distance from node1 to p, distance from node2 to p
sdist1, sdist2 = dist_to_node[p1].values()
searchpts.remove(p1)
for p2 in searchpts:
dest1, dest2 = searchnodes[p2]
set2 = set(searchnodes[p2])
if set1 == set2: #same edge
x1,y1 = sourcepattern.snapped_coordinates[p1]
x2,y2 = destpattern.snapped_coordinates[p2]
xd = x1-x2
yd = y1-y2
nearest[p1,p2] = np.sqrt(xd*xd + yd*yd)
nearest[p2,p1] = nearest[p1,p2]
else:
ddist1, ddist2 = dist_to_node[p2].values()
d11 = self.alldistances[source1][0][dest1]
d21 = self.alldistances[source2][0][dest1]
d12 = self.alldistances[source1][0][dest2]
d22 = self.alldistances[source2][0][dest2]
# find shortest distance from path passing through each of two origin nodes
# to first destination node
sd_1 = d11 + sdist1
sd_21 = d21 + sdist2
if sd_1 > sd_21:
sd_1 = sd_21
# now add point to node one distance on destination edge
len_1 = sd_1 + ddist1
# repeat but now for paths entering at second node of second edge
sd_2 = d12 + sdist1
sd_22 = d22 + sdist2
b = 0
if sd_2 > sd_22:
sd_2 = sd_22
b = 1
len_2 = sd_2 + ddist2
# now find shortest length path between the point 1 on edge 1 and
# point 2 on edge 2, and assign
sp_12 = len_1
if len_1 > len_2:
sp_12 = len_2
nearest[p1, p2] = sp_12
nearest[p2, p1] = sp_12
#print p1,p2, sp_12
np.fill_diagonal(nearest, np.nan)
return nearest
def nearestneighbordistances(self, sourcepattern, destpattern=None):
"""
Compute the interpattern nearest neighbor distances or the intrapattern
nearest neight distances between a source pattern and a destination pattern.
Parameters
----------
sourcepattern str The key of a point pattern snapped to the network.
destpattern str (Optional) The key of a point pattern snapped to the network.
Returns
-------
nearest ndarray (n,2) With column[:,0] containing the id of the nearest
neighbor and column [:,1] containing the distance.
"""
if not sourcepattern in self.pointpatterns.keys():
raise KeyError("Available point patterns are {}".format(self.pointpatterns.keys()))
if not hasattr(self,'alldistances'):
self.node_distance_matrix()
pt_indices = self.pointpatterns[sourcepattern].points.keys()
dist_to_node = self.pointpatterns[sourcepattern].dist_to_node
nearest = np.zeros((len(pt_indices), 2), dtype=np.float32)
nearest[:,1] = np.inf
if destpattern == None:
destpattern = sourcepattern
searchpts = copy.deepcopy(pt_indices)
searchnodes = {}
for s in searchpts:
e1, e2 = dist_to_node[s].keys()
searchnodes[s] = (e1, e2)
for p1 in pt_indices:
#Get the source nodes and dist to source nodes
source1, source2 = searchnodes[p1]
sdist1, sdist2 = dist_to_node[p1].values()
searchpts.remove(p1)
for p2 in searchpts:
dest1, dest2 = searchnodes[p2]
ddist1, ddist2 = dist_to_node[p2].values()
source1_to_dest1 = sdist1 + self.alldistances[source1][0][dest1] + ddist1
source1_to_dest2 = sdist1 + self.alldistances[source1][0][dest2] + ddist2
source2_to_dest1 = sdist2 + self.alldistances[source2][0][dest1] + ddist1
source2_to_dest2 = sdist2 + self.alldistances[source2][0][dest2] + ddist2
if source1_to_dest1 < nearest[p1, 1]:
nearest[p1, 0] = p2
nearest[p1, 1] = source1_to_dest1
if source1_to_dest1 < nearest[p2, 1]:
nearest[p2, 0] = p1
nearest[p2, 1] = source1_to_dest1
if source1_to_dest2 < nearest[p1, 1]:
nearest[p1, 0] = p2
nearest[p1, 1] = source1_to_dest2
if source1_to_dest1 < nearest[p2, 1]:
nearest[p2, 0] = p1
nearest[p2, 1] = source1_to_dest2
if source2_to_dest1 < nearest[p1, 1]:
nearest[p1, 0] = p2
nearest[p1, 1] = source2_to_dest1
if source2_to_dest1 < nearest[p2, 1]:
nearest[p2, 0] = p1
nearest[p2, 1] = source2_to_dest1
if source2_to_dest2 < nearest[p1, 1]:
nearest[p1, 0] = p2
nearest[p1, 1] = source2_to_dest2
if source2_to_dest2 < nearest[p2, 1]:
nearest[p2, 0] = p1
nearest[p2, 1] = source2_to_dest2
return nearest
def NetworkF(self, pointpattern, nsteps=10, permutations=99,
threshold=0.2, distribution='uniform',
lowerbound=None, upperbound=None):
"""
Computes a network constrained F-Function
Parameters
----------
pointpattern : object
A PySAL point pattern object
nsteps : int
The number of steps at which the count of the nearest
neighbors is computed
permutations : int
The number of permutations to perform (default 99)
threshold : float
The level at which significance is computed. 0.5 would be 97.5% and 2.5%
distribution : str
The distirbution from which random points are sampled: uniform or poisson
lowerbound : float
The lower bound at which the G-function is computed. (default 0)
upperbound : float
The upper bound at which the G-function is computed.
Defaults to the maximum pbserved nearest neighbor distance.
Returns
-------
NetworkF : object
A network F class instance
"""
return NetworkF(self, pointpattern, nsteps=nsteps,
permutations=permutations,threshold=threshold,
distribution=distribution,lowerbound=lowerbound,
upperbound=upperbound)
def NetworkG(self, pointpattern, nsteps=10, permutations=99,
threshold=0.5, distribution='uniform',
lowerbound=None, upperbound=None):
"""
Computes a network constrained G-Function
Parameters
----------
pointpattern : object
A PySAL point pattern object
nsteps : int
The number of steps at which the count of the nearest
neighbors is computed
permutations : int
The number of permutations to perform (default 99)
threshold : float
The level at which significance is computed. 0.5 would be 97.5% and 2.5%
distribution : str
The distirbution from which random points are sampled: uniform or poisson
lowerbound : float
The lower bound at which the G-function is computed. (default 0)
upperbound : float
The upper bound at which the G-function is computed.
Defaults to the maximum pbserved nearest neighbor distance.
Returns
-------
NetworkG : object
A network G class object
"""
return NetworkG(self, pointpattern, nsteps=nsteps,
permutations=permutations,threshold=threshold,
distribution=distribution,lowerbound=lowerbound,
upperbound=upperbound)
def NetworkK(self, pointpattern, nsteps=10, permutations=99,
threshold=0.5, distribution='uniform',
lowerbound=None, upperbound=None):
"""
Computes a network constrained G-Function
Parameters
----------
pointpattern : object
A PySAL point pattern object
nsteps : int
The number of steps at which the count of the nearest
neighbors is computed
permutations : int
The number of permutations to perform (default 99)
threshold : float
The level at which significance is computed. 0.5 would be 97.5% and 2.5%
distribution : str
The distirbution from which random points are sampled: uniform or poisson
lowerbound : float
The lower bound at which the G-function is computed. (default 0)
upperbound : float
The upper bound at which the G-function is computed.
Defaults to the maximum pbserved nearest neighbor distance.
Returns
-------
NetworkK : object
A network K class object
"""
return NetworkK(self, pointpattern, nsteps=nsteps,
permutations=permutations,threshold=threshold,
distribution=distribution,lowerbound=lowerbound,
upperbound=upperbound)
def segment_edges(self, distance):
"""
Segment all of the edges in the network at either
a fixed distance or a fixed number of segments.
Parameters
-----------
distance : float
The distance at which edges are split
Returns
-------
sn : object
PySAL Network Object
Example
-------
>>> n200 = ntw.segment_edges(200.0)
"""
sn = Network()
sn.adjacencylist = copy.deepcopy(self.adjacencylist)
sn.edge_lengths = copy.deepcopy(self.edge_lengths)
sn.edges = set(copy.deepcopy(self.edges))
sn.node_coords = copy.deepcopy(self.node_coords)
sn.node_list = copy.deepcopy(self.node_list)
sn.nodes = copy.deepcopy(self.nodes)
sn.pointpatterns = copy.deepcopy(self.pointpatterns)
sn.in_shp = self.in_shp
current_node_id = max(self.nodes.values())
newedges = set()
removeedges = set()
for e in sn.edges:
length = sn.edge_lengths[e]
interval = distance
totallength = 0
currentstart = startnode = e[0]
endnode = e[1]
#If the edge will be segmented, remove the
# current edge from the adjacency list
if interval < length:
sn.adjacencylist[e[0]].remove(e[1])
sn.adjacencylist[e[1]].remove(e[0])
sn.edge_lengths.pop(e, None)
removeedges.add(e)
else:
continue
while totallength < length:
currentstop = current_node_id
if totallength + interval > length:
currentstop = endnode
interval = length - totallength
totallength = length
else:
current_node_id += 1
currentstop = current_node_id
totallength += interval
#Compute the new node coordinate
newx, newy = self._newpoint_coords(e, totallength)
#Update node_list
if currentstop not in sn.node_list:
sn.node_list.append(currentstop)
#Update nodes and node_coords
sn.node_coords[currentstop] = newx, newy
sn.nodes[(newx, newy)] = currentstop
#Update the adjacencylist
sn.adjacencylist[currentstart].append(currentstop)
sn.adjacencylist[currentstop].append(currentstart)
#Add the new edge to the edge dict
#Iterating over this, so we need to add after iterating
newedges.add(tuple(sorted([currentstart, currentstop])))
#Modify edge_lengths
sn.edge_lengths[tuple(sorted([currentstart, currentstop]))] = interval
#Increment the start to the stop
currentstart = currentstop
sn.edges.update(newedges)
sn.edges.difference_update(removeedges)
sn.edges = list(sn.edges)
#Update the point pattern snapping
for instance in sn.pointpatterns.itervalues():
sn._snap_to_edge(instance)
return sn
def savenetwork(self, filename):
"""
Save a network to disk as a binary file
Parameters
----------
filename : str
The filename where the network should be saved.
This should be a full PATH or the file is saved
whereever this method is called from.
Example
--------
>>> ntw.savenetwork('mynetwork.pkl')
"""
with open(filename, 'wb') as networkout:
cPickle.dump(self, networkout, protocol=2)
@staticmethod
def loadnetwork(filename):
with open(filename, 'rb') as networkin:
self = cPickle.load(networkin)
return self
class PointPattern():
"""
A stub point pattern class used to store a point pattern.
This class is monkey patched with network specific attributes
when the points are snapped to a network.
In the future this class may be replaced with a generic point
pattern class.
Parameters
----------
shapefile : string
input shapefile
idvariable : string
field in the shapefile to use as an idvariable
attribute : boolean
{False, True} A flag to indicate whether all attributes
are tagged to this class.
Attributes
----------
points : dict
key is the point id
value are the coordiantes
npoints : integer
the number of points
"""
def __init__(self, shapefile, idvariable=None, attribute=False):
self.points = {}
self.npoints = 0
if idvariable:
ids = get_ids(shapefile, idvariable)
else:
ids = None
pts = ps.open(shapefile)
#Get attributes if requested
if attribute == True:
dbname = os.path.splitext(shapefile)[0] + '.dbf'
db = ps.open(dbname)
else:
db = None
for i, pt in enumerate(pts):
if ids and db:
self.points[ids[i]] = {'coordinates':pt, 'properties':db[i]}
elif ids and not db:
self.points[ids[i]] = {'coordinates':pt, 'properties':None}
elif not ids and db:
self.points[i] = {'coordinates':pt, 'properties':db[i]}
else:
self.points[i] = {'coordinates':pt, 'properties':None}
pts.close()
if db:
db.close()
self.npoints = len(self.points.keys())
class SimulatedPointPattern():
"""
Struct style class to mirror the Point Pattern Class.
If the PointPattern class has methods, it might make sense to
make this a child of that class.
This class is not intended to be used by the external user.
"""
def __init__(self):
self.npoints = 0
self.obs_to_edge = {}
self.obs_to_node = defaultdict(list)
self.dist_to_node = {}
self.snapped_coordinates = {}
class SortedEdges(OrderedDict):
def next_key(self, key):
next = self._OrderedDict__map[key][1]
if next is self._OrderedDict__root:
raise ValueError("{!r} is the last key.".format(key))
return next[2]
def first_key(self):
for key in self: return key
raise ValueError("No sorted edges remain.")
|
{
"content_hash": "1e2e6170513abd71fbe1308d18b30a78",
"timestamp": "",
"source": "github",
"line_count": 1167,
"max_line_length": 128,
"avg_line_length": 33.94515852613539,
"alnum_prop": 0.5221891250567982,
"repo_name": "darribas/pysal",
"id": "6ed98581c4dfaed113e2adfcc6d108c06f262229",
"size": "39614",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "pysal/network/network.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "10152"
},
{
"name": "Makefile",
"bytes": "408"
},
{
"name": "Python",
"bytes": "2549924"
}
],
"symlink_target": ""
}
|
from . import app
from datetime import date
from flask import url_for, current_app
from flask.ext.themes2 import render_theme_template, get_theme
from markdown import markdown
from smartypants import smartyPants
import bleach
import bs4
import codecs
import datetime
import jwt
import os
import os.path
import random
import re
import requests
import shutil
import unicodedata
import urllib
import hmac
import hashlib
bleach.ALLOWED_TAGS += ['img', 'p', 'br', 'marquee', 'blink']
bleach.ALLOWED_ATTRIBUTES.update({
'img': ['src', 'alt', 'title']
})
TWITTER_PROFILE_RE = re.compile(r'https?://(?:www\.)?twitter\.com/(\w+)')
TWITTER_RE = re.compile(r'https?://(?:www\.|mobile\.)?twitter\.com/(\w+)/status(?:es)?/(\w+)')
FACEBOOK_PROFILE_RE = re.compile(r'https?://(?:www\.)?facebook\.com/([a-zA-Z0-9._-]+)')
FACEBOOK_RE = re.compile(r'https?://(?:www\.)?facebook\.com/([a-zA-Z0-9._-]+)/\w+/(\w+)')
YOUTUBE_RE = re.compile(r'https?://(?:www.)?youtube\.com/watch\?v=(\w+)')
INSTAGRAM_RE = re.compile(r'https?://instagram\.com/p/(\w+)')
PEOPLE_RE = re.compile(r"\[\[([\w ]+)(?:\|([\w\-'. ]+))?\]\]")
RELATIVE_PATH_RE = re.compile('\[([^\]]*)\]\(([^/)]+)\)')
AT_USERNAME_RE = re.compile(r"""(?<!\w)@([a-zA-Z0-9_]+)(?=($|[\s,:;.'"]))""")
LINK_RE = re.compile(
# optional schema
r'\b([a-z]{3,9}://)?'
# hostname and port
r'((?:[a-z0-9\-]+\.)+[a-z]{2,4}(?::\d{2,6})?'
# path
r'(?:(?:/(?:[a-zA-Z0-9\-_~.;:$?&%#@()/=]*[a-zA-Z0-9\-_$?#/])?)|\b))'
)
def isoparse(s):
"""Parse (UTC) datetimes in ISO8601 format"""
if s:
try:
return datetime.datetime.strptime(s, '%Y-%m-%dT%H:%M:%S')
except:
return datetime.datetime.strptime(s, '%Y-%m-%d')
def isoparse_with_tz(s):
"""Parse datetimes with a timezone in ISO8601 format"""
return s and datetime.datetime.strptime(s, '%Y-%m-%dT%H:%M:%S%z')
def isoformat(date):
if date:
if (isinstance(date, datetime.date)
and not isinstance(date, datetime.datetime)):
return date.isoformat()
if date.tzinfo:
date = date.astimezone(datetime.timezone.utc)
date = date.replace(tzinfo=None)
date = date.replace(microsecond=0)
return date.isoformat('T')
def isoformat_with_tz(date):
if hasattr(date, 'tzinfo') and not date.tzinfo:
date = date.replace(tzinfo=datetime.timezone.utc)
return date.isoformat(sep='T')
def normalize_tag(tag):
# lowercase and remove spaces, dashes, and underscores
tag = unicodedata.normalize('NFKD', tag).lower()
tag = re.sub(r'[ _\-]', '', tag)
return tag
def filter_empty_keys(data):
if isinstance(data, list):
return list(filter_empty_keys(v) for v in data if filter_empty_keys(v))
if isinstance(data, dict):
return dict((k, filter_empty_keys(v)) for k, v in data.items()
if filter_empty_keys(v))
return data
def download_resource(url, path):
from .models import get_settings
app.logger.debug("downloading {} to {}".format(url, path))
response = requests.get(urllib.parse.urljoin(get_settings().site_url, url),
stream=True, timeout=10)
response.raise_for_status()
if not os.path.exists(os.path.dirname(path)):
os.makedirs(os.path.dirname(path))
with open(path, 'wb') as f:
for chunk in response.iter_content(512):
f.write(chunk)
def urls_match(url1, url2):
if url1 == url2:
return True
p1 = urllib.parse.urlparse(url1)
p2 = urllib.parse.urlparse(url2)
return p1.netloc == p2.netloc and p1.path == p2.path
def url_to_link(url, soup):
a = soup.new_tag('a', href=url)
a.string = prettify_url(url)
return a
def person_to_microcard(contact, nick, soup):
if contact:
url = contact.url or url_for('contact_by_name', nick)
a_tag = soup.new_tag('a', href=url)
a_tag['class'] = ['microcard', 'h-card']
image = contact.image
if image:
image = construct_imageproxy_url(image, 26)
image_tag = soup.new_tag('img', src=image)
a_tag.append(image_tag)
a_tag.append(contact.name)
else:
a_tag.append('@' + contact.name)
else:
a_tag = soup.new_tag('a', href='https://twitter.com/' + nick)
a_tag.string = '@' + nick
return a_tag
def autolink(plain, url_processor=url_to_link,
person_processor=person_to_microcard):
"""Replace bare URLs in a document with an HTML <a> representation
"""
blacklist = ('a', 'script', 'pre', 'code', 'embed', 'object',
'audio', 'video')
soup = bs4.BeautifulSoup(plain)
def bs4_sub(regex, repl):
"""Process text elements in a BeautifulSoup document with a regex and
replacement string.
:param BeautifulSoup soup: the BeautifulSoup document to be
edited in place
:param string regex: a regular expression whose matches will
be replaced
:param function repl: a function that a Match object, and
returns text or a new HTML node
:param list blacklist: a list of tags whose children should
not be modified (<pre> for example)
"""
for txt in soup.find_all(text=True):
if any(p.name in blacklist for p in txt.parents):
continue
nodes = []
start = 0
for m in regex.finditer(txt):
nodes.append(txt[start:m.start()])
nodes.append(repl(m))
start = m.end()
if not nodes:
continue
nodes.append(txt[start:])
parent = txt.parent
ii = parent.contents.index(txt)
txt.extract()
for offset, node in enumerate(nodes):
parent.insert(ii + offset, node)
def link_repl(m):
url = (m.group(1) or 'http://') + m.group(2)
return url_processor(url, soup)
def process_nick(m):
from . import db
from .models import Nick
name = m.group(1)
nick = Nick.query.filter(
db.func.lower(Nick.name) == db.func.lower(name)).first()
contact = nick and nick.contact
processed = person_processor(contact, name, soup)
if processed:
return processed
return m.group(0)
if url_processor:
bs4_sub(LINK_RE, link_repl)
if person_processor:
bs4_sub(AT_USERNAME_RE, process_nick)
return ''.join(str(t) for t in soup.body.contents) if soup.body else ''
TAG_TO_TYPE = {
'n': 'note',
'a': 'article',
'r': 'reply',
's': 'share',
'l': 'like',
'c': 'checkin',
'p': 'photo',
'b': 'bookmark',
}
TYPE_TO_TAG = {v: k for k, v in TAG_TO_TYPE.items()}
BASE_ORDINAL = date(1970, 1, 1).toordinal()
def parse_type(tag):
type_enc = tag[0]
return TAG_TO_TYPE.get(type_enc)
def parse_date(tag):
try:
date_enc = tag[1:4]
ordinal = base60_decode(date_enc)
if ordinal:
return date_from_ordinal(ordinal)
except ValueError:
app.logger.warn("Could not parse base60 date %s", tag)
def parse_index(tag):
index_enc = tag[4:]
return index_enc
def date_to_ordinal(date0):
return date0.toordinal() - BASE_ORDINAL
def date_from_ordinal(ordinal):
return date.fromordinal(ordinal + BASE_ORDINAL)
def tag_for_post_type(post_type):
return TYPE_TO_TAG.get(post_type)
# use tantek's NewBase60 http://tantek.pbworks.com/w/page/19402946/NewBase60
RADIX = list("0123456789ABCDEFGHJKLMNPQRSTUVWXYZ_abcdefghijkmnopqrstuvwxyz")
def base60_encode(n):
arr = []
base = len(RADIX)
while n > 0:
c = RADIX[n % base]
n = n // base
arr.append(c)
arr.reverse()
return ''.join(arr)
def base60_decode(s):
base = len(RADIX)
n = 0
for c in s:
n *= base
n += RADIX.index(c)
return n
def slugify(s, limit=256):
slug = unicodedata.normalize('NFKD', s).lower()
slug = re.sub(r'[^a-z0-9]+', '-', slug).strip('-')
slug = re.sub(r'[-]+', '-', slug)
# trim to first - after the limit
if len(slug) > limit:
idx = slug.find('-', limit)
if idx >= 0:
slug = slug[:idx]
return slug
def multiline_string_to_list(s):
return [l.strip() for l in s.split('\n') if l.strip()]
def image_root_path():
return app.config.get('IMAGE_ROOT_PATH', app.root_path)
def proxy_all_images(html):
def repl(m):
return m.group(1) + construct_imageproxy_url(m.group(2)) + m.group(3)
regex = re.compile(r'(<img[^>]+src=")([^">]+)(")')
return regex.sub(repl, html)
def construct_imageproxy_url(src, side=None):
pilbox_url = app.config.get('PILBOX_URL')
if not pilbox_url or src.lower().startswith('data:'):
# cannot resize without pilbox
# pilbox cannot resize "data:" urls
app.logger.warn('No pilbox server configured')
return src
query = {}
query['url'] = src
if side:
query['w'] = side
query['h'] = side
else:
query['op'] = 'noop'
pilbox_key = app.config.get('PILBOX_KEY')
if pilbox_key:
qs = urllib.parse.urlencode(query)
h = hmac.new(pilbox_key.encode(), qs.encode(), hashlib.sha1)
qs += '&sig=' + h.hexdigest()
return pilbox_url + '?' + qs
def markdown_filter(data, img_path=None, url_processor=url_to_link,
person_processor=person_to_microcard):
if data is None:
return ''
if img_path:
# replace relative paths to images with absolute
data = RELATIVE_PATH_RE.sub('[\g<1>](' + img_path + '/\g<2>)', data)
data = convert_legacy_people_to_at_names(data)
result = markdown(data, extensions=['codehilite', 'fenced_code'])
if url_processor or person_processor:
result = autolink(result, url_processor, person_processor)
result = smartyPants(result)
return result
def convert_legacy_people_to_at_names(data):
from .models import Contact
def process_name(m):
fullname = m.group(1)
displayname = m.group(2)
contact = Contact.query.filter_by(name=fullname).first()
if contact and contact.nicks:
return '@' + contact.nicks[0].name
return '@' + displayname
data = PEOPLE_RE.sub(process_name, data)
return data
def format_as_text(html, link_fn=None):
if html is None:
return ''
soup = bs4.BeautifulSoup(html)
# replace links with the URL
for a in soup.find_all('a'):
if link_fn:
link_fn(a)
else:
a.replace_with(a.get('href') or '[link]')
# and remove images
for i in soup.find_all('img'):
i.hidden = True
return soup.get_text().strip()
def is_cached_current(original, cached):
"""Compare a file and the processed, cached version to see if the cached
version is up to date.
"""
return (os.path.exists(cached)
and os.stat(cached).st_mtime >= os.stat(original).st_mtime)
def prettify_url(url):
"""Return a URL without its schema
"""
if not url:
return url
split = url.split('//', 1)
if len(split) == 2:
schema, path = split
else:
path = url
return path
def fetch_html(url):
"""Utility to fetch HTML from an external site. If the Content-Type
header does not explicitly list a charset, Requests will assume a
bad one, so we ahve to use 'get_encodings_from_content` to find
the meta charset or other indications in the actual response body.
Return a requests.Response
"""
response = requests.get(url, timeout=30)
if response.status_code // 2 == 100:
# requests ignores <meta charset> when a Content-Type header
# is provided, even if the header does not define a charset
if 'charset' not in response.headers.get('content-type', ''):
encodings = requests.utils.get_encodings_from_content(
response.text)
if encodings:
response.encoding = encodings[0]
else:
app.logger.warn('failed to fetch url %s. got response %s.',
url, response)
return response
def clean_foreign_html(html):
return bleach.clean(html, strip=True)
def jwt_encode(obj):
obj['nonce'] = random.randint(1000000, 2 ** 31)
return jwt.encode(obj, app.config['SECRET_KEY'])
def jwt_decode(s):
return jwt.decode(s, app.config['SECRET_KEY'])
def render_themed(template, **context):
ident = current_app.config.get('DEFAULT_THEME', 'plain')
return render_theme_template(get_theme(ident), template, **context)
|
{
"content_hash": "d66b48789615e1f89d8ec19197d9282a",
"timestamp": "",
"source": "github",
"line_count": 448,
"max_line_length": 94,
"avg_line_length": 28.435267857142858,
"alnum_prop": 0.5910197032734124,
"repo_name": "thedod/redwind",
"id": "2e254dfdf30af38372919be488c210a5dd44c9ec",
"size": "12739",
"binary": false,
"copies": "1",
"ref": "refs/heads/deployment",
"path": "redwind/util.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "18996"
},
{
"name": "JavaScript",
"bytes": "40212"
},
{
"name": "Python",
"bytes": "178084"
}
],
"symlink_target": ""
}
|
import stat
import sys
from pathlib import Path
from synthtool.sources import templates
FIXTURES = Path(__file__).parent / "fixtures" / "node_templates" / "standard"
NODE_TEMPLATES = Path(__file__).parent.parent / "synthtool/gcp/templates/node_library"
def test_render():
t = templates.Templates(FIXTURES)
result = t.render("example.j2", name="world")
assert result.name == "example"
assert result.read_text() == "Hello, world!\n"
def test_render_with_subdir():
t = templates.Templates(FIXTURES)
result = t.render("example.j2", name="world", subdir="foo/bar")
assert result.match("**/foo/bar/example")
assert result.name == "example"
assert result.read_text() == "Hello, world!\n"
def test_render_group():
t = templates.TemplateGroup(FIXTURES / "group")
result = t.render(var_a="hello", var_b="world")
assert (result / "1.txt").read_text() == "hello\n"
assert (result / "subdir" / "2.txt").read_text() == "world\n"
def test_render_group_with_subdir():
t = templates.TemplateGroup(FIXTURES / "group")
result = t.render(subdir="foo/bar", var_a="hello", var_b="world")
assert (result / "foo/bar" / "1.txt").read_text() == "hello\n"
assert (result / "foo/bar" / "subdir" / "2.txt").read_text() == "world\n"
def test_render_preserve_mode():
"""
Test that rendering templates correctly preserve file modes.
"""
source_file = FIXTURES / "executable.j2"
source_mode = source_file.stat().st_mode
# Verify source fixture has execute permission for USER
if sys.platform != "win32":
assert source_mode & stat.S_IXUSR
t = templates.Templates(FIXTURES)
result = t.render("executable.j2", name="executable")
assert result.stat().st_mode == source_mode
def test_release_quality_badge():
t = templates.Templates(NODE_TEMPLATES)
result = t.render(
"README.md", metadata={"repo": {"release_level": "preview"}, "samples": {}}
).read_text()
assert "https://img.shields.io/badge/release%20level-preview-yellow.svg" in result
assert "This library is considered to be in **preview**" in result
def test_syntax_highlighter():
t = templates.Templates(NODE_TEMPLATES)
result = t.render(
"README.md",
metadata={"repo": {"language": "nodejs"}, "quickstart": "const foo = 'bar'"},
).read_text()
assert "```javascript" in result
def test_hide_billing():
t = templates.Templates(NODE_TEMPLATES)
result = t.render(
"README.md", metadata={"repo": {"requires_billing": True, "api_id": "fooapi"}}
).read_text()
assert "Enable billing for your project" in result
result = t.render(
"README.md", metadata={"repo": {"requires_billing": False}}
).read_text()
assert "Enable billing for your project" not in result
def test_slugify():
assert templates.slugify("Foo Bar") == "foo-bar"
assert templates.slugify("ACL (Access Control)") == "acl-access-control"
|
{
"content_hash": "49ff355ed8a879172981d9d196948805",
"timestamp": "",
"source": "github",
"line_count": 96,
"max_line_length": 86,
"avg_line_length": 30.979166666666668,
"alnum_prop": 0.6455951580363147,
"repo_name": "googleapis/synthtool",
"id": "0e555be22be78aa5604c2053ee4171c30387418e",
"size": "3550",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_templates.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "801"
},
{
"name": "Dockerfile",
"bytes": "13507"
},
{
"name": "HTML",
"bytes": "6091"
},
{
"name": "Java",
"bytes": "21963"
},
{
"name": "JavaScript",
"bytes": "3390"
},
{
"name": "Jinja",
"bytes": "85687"
},
{
"name": "Python",
"bytes": "396495"
},
{
"name": "Shell",
"bytes": "67707"
}
],
"symlink_target": ""
}
|
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from sqlserver_ado.fields import (LegacyDateTimeField, LegacyDateField,
LegacyTimeField, DateField, DateTimeField, TimeField, DateTimeOffsetField)
@python_2_unicode_compatible
class LegacyDateTimeTable(models.Model):
val = LegacyDateTimeField()
def __str__(self):
return self.val
class Meta:
db_table = 'LegacyDateTimeTable'
@python_2_unicode_compatible
class DateTimeLegacyDateTimeTable(models.Model):
val = DateTimeField()
def __str__(self):
return self.val
class Meta:
managed = False
db_table = 'LegacyDateTimeTable'
@python_2_unicode_compatible
class LegacyDateTable(models.Model):
val = LegacyDateField()
def __str__(self):
return self.val
@python_2_unicode_compatible
class LegacyTimeTable(models.Model):
val = LegacyTimeField()
def __str__(self):
return self.val
@python_2_unicode_compatible
class DateTable(models.Model):
val = DateField()
def __str__(self):
return self.val
@python_2_unicode_compatible
class DateTimeTable(models.Model):
val = DateTimeField()
def __str__(self):
return self.val
@python_2_unicode_compatible
class TimeTable(models.Model):
val = TimeField()
def __str__(self):
return self.val
@python_2_unicode_compatible
class DateTimeOffsetTable(models.Model):
val = DateTimeOffsetField()
def __str__(self):
return self.val
|
{
"content_hash": "690a9eaa462af4cccf99aa46b75825aa",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 78,
"avg_line_length": 23.426470588235293,
"alnum_prop": 0.6603892027620841,
"repo_name": "theoriginalgri/django-mssql",
"id": "de0133a793e946a85b5a15ab3197a18f7a66fb0d",
"size": "1610",
"binary": false,
"copies": "1",
"ref": "refs/heads/1.7-python3",
"path": "tests/mssql_dates/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C#",
"bytes": "1900"
},
{
"name": "Python",
"bytes": "218939"
},
{
"name": "SQLPL",
"bytes": "900"
}
],
"symlink_target": ""
}
|
from setuptools import setup
setup(
setup_requires=["pbr>=1.8"],
pbr=True,
)
|
{
"content_hash": "1fb730006d7c739b2eeef2c00c250770",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 32,
"avg_line_length": 14.333333333333334,
"alnum_prop": 0.6395348837209303,
"repo_name": "Marx314/python-ubersmithclient",
"id": "b2e43a5b397959ae7e47644f24fbc1c4e1c5607c",
"size": "682",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "16396"
}
],
"symlink_target": ""
}
|
from django.apps import AppConfig
class TestsConfig(AppConfig):
name = 'tests'
|
{
"content_hash": "553469c02c45dd7e31108c1b4eba53e3",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 33,
"avg_line_length": 17,
"alnum_prop": 0.7411764705882353,
"repo_name": "ZeroCater/zc_common",
"id": "a56b400e349311bb9e10781ef3d7898761c32a75",
"size": "85",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tests/apps.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "137464"
},
{
"name": "Shell",
"bytes": "866"
}
],
"symlink_target": ""
}
|
import urllib.request
from urllib.error import URLError
import html2text
import xml.etree.ElementTree as ET
#from time import strptime, mktime
#PUBDATEFORMAT = "%a, %d %b %Y %H:%M:%S %z"
def get_new_articles(source):
articles = []
try:
response = urllib.request.urlopen(source)
orig_rss = response.read().decode("utf-8")
rss = ET.fromstring(orig_rss)
channel = rss.find("channel")
for item in channel.findall("item"):
# Not used anymore
# pubDate = item.find("pubDate").text
# pubDateConv = mktime(time.strptime(pubDate, PUBDATEFORMAT)))
link = item.find("link").text
title = item.find("title")
if title is not None:
title = title.text
if title is None:
print("found no title, will use link")
title = link
description = item.find("description")
if description is not None:
description = html2text.html2text(description.text)
guid = item.find("guid")
if guid is not None:
guid = guid.text
if guid is None:
#print("found no guid, will use link")
guid = link
articles.append((title, link, description, guid))
except URLError as e:
print("Error:", e.reason)
return articles
|
{
"content_hash": "424918ab16fa6012535019989d13edce",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 65,
"avg_line_length": 24.306122448979593,
"alnum_prop": 0.6607892527287993,
"repo_name": "SmBe19/RedditBots",
"id": "059859eb9e3dc85cbdff491229986149417ee2fa",
"size": "1191",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "RSSBot/RSSReader.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "96038"
}
],
"symlink_target": ""
}
|
from rctk.widgets.control import Control, Attribute
from rctk.event import Changable, Submittable, Keypressable
class Text(Control, Changable, Submittable, Keypressable):
name = "text"
value = Attribute("")
rows = Attribute(1, Attribute.NUMBER)
columns = Attribute(20, Attribute.NUMBER)
class Password(Text):
name = "password"
|
{
"content_hash": "02c834bd7aaffd0d7c80b98cdb875901",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 59,
"avg_line_length": 27,
"alnum_prop": 0.7293447293447294,
"repo_name": "rctk/rctk",
"id": "c73f00c03a5e9c6532ba7aa2eda653743faa65b3",
"size": "351",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rctk/widgets/text.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "JavaScript",
"bytes": "25764"
},
{
"name": "Python",
"bytes": "214120"
}
],
"symlink_target": ""
}
|
"""Support for Sonarr."""
from datetime import datetime
import logging
import time
from pytz import timezone
import requests
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
CONF_API_KEY,
CONF_HOST,
CONF_MONITORED_CONDITIONS,
CONF_PORT,
CONF_SSL,
DATA_BYTES,
DATA_EXABYTES,
DATA_GIGABYTES,
DATA_KILOBYTES,
DATA_MEGABYTES,
DATA_PETABYTES,
DATA_TERABYTES,
DATA_YOTTABYTES,
DATA_ZETTABYTES,
)
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
_LOGGER = logging.getLogger(__name__)
CONF_DAYS = "days"
CONF_INCLUDED = "include_paths"
CONF_UNIT = "unit"
CONF_URLBASE = "urlbase"
DEFAULT_HOST = "localhost"
DEFAULT_PORT = 8989
DEFAULT_URLBASE = ""
DEFAULT_DAYS = "1"
DEFAULT_UNIT = DATA_GIGABYTES
SENSOR_TYPES = {
"diskspace": ["Disk Space", DATA_GIGABYTES, "mdi:harddisk"],
"queue": ["Queue", "Episodes", "mdi:download"],
"upcoming": ["Upcoming", "Episodes", "mdi:television"],
"wanted": ["Wanted", "Episodes", "mdi:television"],
"series": ["Series", "Shows", "mdi:television"],
"commands": ["Commands", "Commands", "mdi:code-braces"],
"status": ["Status", "Status", "mdi:information"],
}
ENDPOINTS = {
"diskspace": "http{0}://{1}:{2}/{3}api/diskspace",
"queue": "http{0}://{1}:{2}/{3}api/queue",
"upcoming": "http{0}://{1}:{2}/{3}api/calendar?start={4}&end={5}",
"wanted": "http{0}://{1}:{2}/{3}api/wanted/missing",
"series": "http{0}://{1}:{2}/{3}api/series",
"commands": "http{0}://{1}:{2}/{3}api/command",
"status": "http{0}://{1}:{2}/{3}api/system/status",
}
# Support to Yottabytes for the future, why not
BYTE_SIZES = [
DATA_BYTES,
DATA_KILOBYTES,
DATA_MEGABYTES,
DATA_GIGABYTES,
DATA_TERABYTES,
DATA_PETABYTES,
DATA_EXABYTES,
DATA_ZETTABYTES,
DATA_YOTTABYTES,
]
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_API_KEY): cv.string,
vol.Optional(CONF_DAYS, default=DEFAULT_DAYS): cv.string,
vol.Optional(CONF_HOST, default=DEFAULT_HOST): cv.string,
vol.Optional(CONF_INCLUDED, default=[]): cv.ensure_list,
vol.Optional(CONF_MONITORED_CONDITIONS, default=["upcoming"]): vol.All(
cv.ensure_list, [vol.In(list(SENSOR_TYPES))]
),
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_SSL, default=False): cv.boolean,
vol.Optional(CONF_UNIT, default=DEFAULT_UNIT): vol.In(BYTE_SIZES),
vol.Optional(CONF_URLBASE, default=DEFAULT_URLBASE): cv.string,
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Sonarr platform."""
conditions = config.get(CONF_MONITORED_CONDITIONS)
add_entities([SonarrSensor(hass, config, sensor) for sensor in conditions], True)
class SonarrSensor(Entity):
"""Implementation of the Sonarr sensor."""
def __init__(self, hass, conf, sensor_type):
"""Create Sonarr entity."""
self.conf = conf
self.host = conf.get(CONF_HOST)
self.port = conf.get(CONF_PORT)
self.urlbase = conf.get(CONF_URLBASE)
if self.urlbase:
self.urlbase = "{}/".format(self.urlbase.strip("/"))
self.apikey = conf.get(CONF_API_KEY)
self.included = conf.get(CONF_INCLUDED)
self.days = int(conf.get(CONF_DAYS))
self.ssl = "s" if conf.get(CONF_SSL) else ""
self._state = None
self.data = []
self._tz = timezone(str(hass.config.time_zone))
self.type = sensor_type
self._name = SENSOR_TYPES[self.type][0]
if self.type == "diskspace":
self._unit = conf.get(CONF_UNIT)
else:
self._unit = SENSOR_TYPES[self.type][1]
self._icon = SENSOR_TYPES[self.type][2]
self._available = False
@property
def name(self):
"""Return the name of the sensor."""
return "{} {}".format("Sonarr", self._name)
@property
def state(self):
"""Return sensor state."""
return self._state
@property
def available(self):
"""Return sensor availability."""
return self._available
@property
def unit_of_measurement(self):
"""Return the unit of the sensor."""
return self._unit
@property
def device_state_attributes(self):
"""Return the state attributes of the sensor."""
attributes = {}
if self.type == "upcoming":
for show in self.data:
attributes[show["series"]["title"]] = "S{:02d}E{:02d}".format(
show["seasonNumber"], show["episodeNumber"]
)
elif self.type == "queue":
for show in self.data:
remaining = 1 if show["size"] == 0 else show["sizeleft"] / show["size"]
attributes[
show["series"]["title"]
+ " S{:02d}E{:02d}".format(
show["episode"]["seasonNumber"],
show["episode"]["episodeNumber"],
)
] = "{:.2f}%".format(100 * (1 - (remaining)))
elif self.type == "wanted":
for show in self.data:
attributes[
show["series"]["title"]
+ " S{:02d}E{:02d}".format(
show["seasonNumber"], show["episodeNumber"]
)
] = show["airDate"]
elif self.type == "commands":
for command in self.data:
attributes[command["name"]] = command["state"]
elif self.type == "diskspace":
for data in self.data:
attributes[data["path"]] = "{:.2f}/{:.2f}{} ({:.2f}%)".format(
to_unit(data["freeSpace"], self._unit),
to_unit(data["totalSpace"], self._unit),
self._unit,
(
to_unit(data["freeSpace"], self._unit)
/ to_unit(data["totalSpace"], self._unit)
* 100
),
)
elif self.type == "series":
for show in self.data:
if "episodeFileCount" not in show or "episodeCount" not in show:
attributes[show["title"]] = "N/A"
else:
attributes[show["title"]] = "{}/{} Episodes".format(
show["episodeFileCount"], show["episodeCount"]
)
elif self.type == "status":
attributes = self.data
return attributes
@property
def icon(self):
"""Return the icon of the sensor."""
return self._icon
def update(self):
"""Update the data for the sensor."""
start = get_date(self._tz)
end = get_date(self._tz, self.days)
try:
res = requests.get(
ENDPOINTS[self.type].format(
self.ssl, self.host, self.port, self.urlbase, start, end
),
headers={"X-Api-Key": self.apikey},
timeout=10,
)
except OSError:
_LOGGER.warning("Host %s is not available", self.host)
self._available = False
self._state = None
return
if res.status_code == 200:
if self.type in ["upcoming", "queue", "series", "commands"]:
if self.days == 1 and self.type == "upcoming":
# Sonarr API returns an empty array if start and end dates
# are the same, so we need to filter to just today
self.data = list(
filter(lambda x: x["airDate"] == str(start), res.json())
)
else:
self.data = res.json()
self._state = len(self.data)
elif self.type == "wanted":
data = res.json()
res = requests.get(
"{}?pageSize={}".format(
ENDPOINTS[self.type].format(
self.ssl, self.host, self.port, self.urlbase
),
data["totalRecords"],
),
headers={"X-Api-Key": self.apikey},
timeout=10,
)
self.data = res.json()["records"]
self._state = len(self.data)
elif self.type == "diskspace":
# If included paths are not provided, use all data
if self.included == []:
self.data = res.json()
else:
# Filter to only show lists that are included
self.data = list(
filter(lambda x: x["path"] in self.included, res.json())
)
self._state = "{:.2f}".format(
to_unit(sum([data["freeSpace"] for data in self.data]), self._unit)
)
elif self.type == "status":
self.data = res.json()
self._state = self.data["version"]
self._available = True
def get_date(zone, offset=0):
"""Get date based on timezone and offset of days."""
day = 60 * 60 * 24
return datetime.date(datetime.fromtimestamp(time.time() + day * offset, tz=zone))
def to_unit(value, unit):
"""Convert bytes to give unit."""
return value / 1024 ** BYTE_SIZES.index(unit)
|
{
"content_hash": "f4b525b2912e1952d488958ae4fb3254",
"timestamp": "",
"source": "github",
"line_count": 274,
"max_line_length": 87,
"avg_line_length": 35.1021897810219,
"alnum_prop": 0.5236015803701394,
"repo_name": "postlund/home-assistant",
"id": "c0781b37603ab3e13e832c29d8f16ce9b02dc5c3",
"size": "9618",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "homeassistant/components/sonarr/sensor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "20215859"
},
{
"name": "Shell",
"bytes": "6663"
}
],
"symlink_target": ""
}
|
import argparse
import tarfile
import re
def extract_text(filein):
return filein.read().decode("utf-16").replace("\r","").encode("utf-8")
if __name__ == "__main__":
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-f', '--file', required=True, help='input repacked tgz file')
parser.add_argument('-s', '--source', required=True, help='source language (e.g. fr)')
parser.add_argument('-t', '--target', required=False, help='target language (e.g. en)')
parser.add_argument('-c', '--category', required=False, default="dev", help='dev or test?')
parser.add_argument('-o', '--output', required=False, help='output file (used for parallel data)')
args = parser.parse_args()
parallel_data = {}
source_pattern = "(.*/mslt_"+args.category+"_"+args.source+"_\d+/.+?).t2..*"
with tarfile.open(args.file, "r:gz") as tar:
for tarinfo in tar.getmembers():
match = re.match(source_pattern, tarinfo.name.lower())
if match != None:
text = extract_text(tar.extractfile(tarinfo))
parallel_data[match.group(1)] = {"src":text}
if args.target:
target_pattern = "(.*/mslt_"+args.category+"_"+args.source+"_\d+/.+?).t[3-9]."+args.target+".*"
with tarfile.open(args.file, "r:gz") as tar:
for tarinfo in tar.getmembers():
match = re.match(target_pattern, tarinfo.name.lower())
if match != None:
text = extract_text(tar.extractfile(tarinfo))
parallel_data[match.group(1)]["tgt"] = text
output_src = open(args.output+"."+args.source, "wb")
output_tgt = open(args.output+"."+args.target, "wb")
for item in parallel_data.items():
assert len(item[1]) == 2
if len(item[1]["src"].strip()) > 0 and len(item[1]["tgt"].strip()) > 0:
output_src.write(item[1]["src"])
output_tgt.write(item[1]["tgt"])
output_src.close()
output_tgt.close()
else:
for value in parallel_data.itervalues():
print(value["src"].strip())
|
{
"content_hash": "d51630ffe4ff5c329f6a55a6138a18c2",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 103,
"avg_line_length": 46.8936170212766,
"alnum_prop": 0.5707803992740472,
"repo_name": "xingniu/nlp-util",
"id": "110ea377d74423dbf66247064fa3cab63a8b5810",
"size": "2249",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "MSLT-extract.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "30078"
},
{
"name": "Shell",
"bytes": "298"
}
],
"symlink_target": ""
}
|
import locale
import os
import struct
from subprocess import Popen, PIPE
import sys
import threading
import time
from .util import six
# Import some platform-specific things at top level so they can be mocked for
# tests.
try:
import pty
except ImportError:
pty = None
try:
import fcntl
except ImportError:
fcntl = None
try:
import termios
except ImportError:
termios = None
from .exceptions import (
UnexpectedExit, Failure, ThreadException, WatcherError,
)
from .platform import (
WINDOWS, pty_size, character_buffered, ready_for_reading, bytes_to_read,
)
from .util import has_fileno, isatty, ExceptionHandlingThread, encode_output
class Runner(object):
"""
Partially-abstract core command-running API.
This class is not usable by itself and must be subclassed, implementing a
number of methods such as `start`, `wait` and `returncode`. For a subclass
implementation example, see the source code for `.Local`.
"""
read_chunk_size = 1000
input_sleep = 0.01
def __init__(self, context):
"""
Create a new runner with a handle on some `.Context`.
:param context:
a `.Context` instance, used to transmit default options and provide
access to other contextualized information (e.g. a remote-oriented
`.Runner` might want a `.Context` subclass holding info about
hostnames and ports.)
.. note::
The `.Context` given to `.Runner` instances **must** contain
default config values for the `.Runner` class in question. At a
minimum, this means values for each of the default
`.Runner.run` keyword arguments such as ``echo`` and ``warn``.
:raises exceptions.ValueError:
if not all expected default values are found in ``context``.
"""
#: The `.Context` given to the same-named argument of `__init__`.
self.context = context
#: A `threading.Event` signaling program completion.
#:
#: Typically set after `wait` returns. Some IO mechanisms rely on this
#: to know when to exit an infinite read loop.
self.program_finished = threading.Event()
# I wish Sphinx would organize all class/instance attrs in the same
# place. If I don't do this here, it goes 'class vars -> __init__
# docstring -> instance vars' :( TODO: consider just merging class and
# __init__ docstrings, though that's annoying too.
#: How many bytes (at maximum) to read per iteration of stream reads.
self.read_chunk_size = self.__class__.read_chunk_size
# Ditto re: declaring this in 2 places for doc reasons.
#: How many seconds to sleep on each iteration of the stdin read loop
#: and other otherwise-fast loops.
self.input_sleep = self.__class__.input_sleep
#: Whether pty fallback warning has been emitted.
self.warned_about_pty_fallback = False
#: A list of `.StreamWatcher` instances for use by `respond`. Is filled
#: in at runtime by `run`.
self.watchers = []
def run(self, command, **kwargs):
"""
Execute ``command``, returning an instance of `Result`.
.. note::
All kwargs will default to the values found in this instance's
`~.Runner.context` attribute, specifically in its configuration's
``run`` subtree (e.g. ``run.echo`` provides the default value for
the ``echo`` keyword, etc). The base default values are described
in the parameter list below.
:param str command: The shell command to execute.
:param str shell: Which shell binary to use. Default: ``/bin/bash``.
:param bool warn:
Whether to warn and continue, instead of raising
`.UnexpectedExit`, when the executed command exits with a
nonzero status. Default: ``False``.
.. note::
This setting has no effect on exceptions, which will still be
raised, typically bundled in `.ThreadException` objects if they
were raised by the IO worker threads.
Similarly, `.WatcherError` exceptions raised by
`.StreamWatcher` instances will also ignore this setting, and
will usually be bundled inside `.Failure` objects (in order to
preserve the execution context).
:param hide:
Allows the caller to disable ``run``'s default behavior of copying
the subprocess' stdout and stderr to the controlling terminal.
Specify ``hide='out'`` (or ``'stdout'``) to hide only the stdout
stream, ``hide='err'`` (or ``'stderr'``) to hide only stderr, or
``hide='both'`` (or ``True``) to hide both streams.
The default value is ``None``, meaning to print everything;
``False`` will also disable hiding.
.. note::
Stdout and stderr are always captured and stored in the
``Result`` object, regardless of ``hide``'s value.
.. note::
``hide=True`` will also override ``echo=True`` if both are
given (either as kwargs or via config/CLI).
:param bool pty:
By default, ``run`` connects directly to the invoked process and
reads its stdout/stderr streams. Some programs will buffer (or even
behave) differently in this situation compared to using an actual
terminal or pseudoterminal (pty). To use a pty instead of the
default behavior, specify ``pty=True``.
.. warning::
Due to their nature, ptys have a single output stream, so the
ability to tell stdout apart from stderr is **not possible**
when ``pty=True``. As such, all output will appear on
``out_stream`` (see below) and be captured into the ``stdout``
result attribute. ``err_stream`` and ``stderr`` will always be
empty when ``pty=True``.
:param bool fallback:
Controls auto-fallback behavior re: problems offering a pty when
``pty=True``. Whether this has any effect depends on the specific
`Runner` subclass being invoked. Default: ``True``.
:param bool echo:
Controls whether `.run` prints the command string to local stdout
prior to executing it. Default: ``False``.
.. note::
``hide=True`` will override ``echo=True`` if both are given.
:param dict env:
By default, subprocesses recieve a copy of Invoke's own environment
(i.e. ``os.environ``). Supply a dict here to update that child
environment.
For example, ``run('command', env={'PYTHONPATH':
'/some/virtual/env/maybe'})`` would modify the ``PYTHONPATH`` env
var, with the rest of the child's env looking identical to the
parent.
.. seealso:: ``replace_env`` for changing 'update' to 'replace'.
:param bool replace_env:
When ``True``, causes the subprocess to receive the dictionary
given to ``env`` as its entire shell environment, instead of
updating a copy of ``os.environ`` (which is the default behavior).
Default: ``False``.
:param str encoding:
Override auto-detection of which encoding the subprocess is using
for its stdout/stderr streams (which defaults to the return value
of `default_encoding`).
:param out_stream:
A file-like stream object to which the subprocess' standard output
should be written. If ``None`` (the default), ``sys.stdout`` will
be used.
:param err_stream:
Same as ``out_stream``, except for standard error, and defaulting
to ``sys.stderr``.
:param in_stream:
A file-like stream object to used as the subprocess' standard
input. If ``None`` (the default), ``sys.stdin`` will be used.
If ``False``, will disable stdin mirroring entirely (though other
functionality which writes to the subprocess' stdin, such as
autoresponding, will still function.) Disabling stdin mirroring can
help when ``sys.stdin`` is a misbehaving non-stream object, such as
under test harnesses or headless command runners.
:param watchers:
A list of `.StreamWatcher` instances which will be used to scan the
program's ``stdout`` or ``stderr`` and may write into its ``stdin``
(typically ``str`` or ``bytes`` objects depending on Python
version) in response to patterns or other heuristics.
See :doc:`/concepts/watchers` for details on this functionality.
Default: ``[]``.
:param bool echo_stdin:
Whether to write data from ``in_stream`` back to ``out_stream``.
In other words, in normal interactive usage, this parameter
controls whether Invoke mirrors what you type back to your
terminal.
By default (when ``None``), this behavior is triggered by the
following:
* Not using a pty to run the subcommand (i.e. ``pty=False``),
as ptys natively echo stdin to stdout on their own;
* And when the controlling terminal of Invoke itself (as per
``in_stream``) appears to be a valid terminal device or TTY.
(Specifically, when `~invoke.util.isatty` yields a ``True``
result when given ``in_stream``.)
.. note::
This property tends to be ``False`` when piping another
program's output into an Invoke session, or when running
Invoke within another program (e.g. running Invoke from
itself).
If both of those properties are true, echoing will occur; if either
is false, no echoing will be performed.
When not ``None``, this parameter will override that auto-detection
and force, or disable, echoing.
:returns:
`Result`, or a subclass thereof.
:raises:
`.UnexpectedExit`, if the command exited nonzero and
``warn`` was ``False``.
:raises:
`.Failure`, if the command didn't even exit cleanly, e.g. if a
`.StreamWatcher` raised `.WatcherError`.
:raises:
`.ThreadException` (if the background I/O threads encountered
exceptions other than `.WatcherError`).
"""
try:
return self._run_body(command, **kwargs)
finally:
self.stop()
def _run_body(self, command, **kwargs):
# Normalize kwargs w/ config
opts, out_stream, err_stream, in_stream = self._run_opts(kwargs)
shell = opts['shell']
# Environment setup
env = self.generate_env(opts['env'], opts['replace_env'])
# Echo running command
if opts['echo']:
print("\033[1;37m{}\033[0m".format(command))
# Start executing the actual command (runs in background)
self.start(command, shell, env)
# Arrive at final encoding if neither config nor kwargs had one
self.encoding = opts['encoding'] or self.default_encoding()
# Set up IO thread parameters (format - body_func: {kwargs})
stdout, stderr = [], []
thread_args = {
self.handle_stdout: {
'buffer_': stdout,
'hide': 'stdout' in opts['hide'],
'output': out_stream,
},
}
# After opt processing above, in_stream will be a real stream obj or
# False, so we can truth-test it. We don't even create a stdin-handling
# thread if it's False, meaning user indicated stdin is nonexistent or
# problematic.
if in_stream:
thread_args[self.handle_stdin] = {
'input_': in_stream,
'output': out_stream,
'echo': opts['echo_stdin'],
}
if not self.using_pty:
thread_args[self.handle_stderr] = {
'buffer_': stderr,
'hide': 'stderr' in opts['hide'],
'output': err_stream,
}
# Kick off IO threads
self.threads = {}
exceptions = []
for target, kwargs in six.iteritems(thread_args):
t = ExceptionHandlingThread(target=target, kwargs=kwargs)
self.threads[target] = t
t.start()
# Wait for completion, then tie things off & obtain result
# And make sure we perform that tying off even if things asplode.
exception = None
while True:
try:
self.wait()
break # done waiting!
# NOTE: we handle all this now instead of at
# actual-exception-handling time because otherwise the stdout/err
# reader threads may block until the subprocess exits.
# TODO: honor other signals sent to our own process and transmit
# them to the subprocess before handling 'normally'.
except KeyboardInterrupt as e:
self.send_interrupt(e)
# NOTE: no break; we want to return to self.wait()
except BaseException as e: # Want to handle SystemExit etc still
# Store exception for post-shutdown reraise
exception = e
# Break out of return-to-wait() loop - we want to shut down
break
# Inform stdin-mirroring worker to stop its eternal looping
self.program_finished.set()
# Join threads, setting a timeout if necessary
for target, thread in six.iteritems(self.threads):
thread.join(self._thread_timeout(target))
e = thread.exception()
if e is not None:
exceptions.append(e)
# If we got a main-thread exception while wait()ing, raise it now that
# we've closed our worker threads.
if exception is not None:
raise exception
# Strip out WatcherError from any thread exceptions; they are bundled
# into Failure handling at the end.
watcher_errors = []
thread_exceptions = []
for exception in exceptions:
real = exception.value
if isinstance(real, WatcherError):
watcher_errors.append(real)
else:
thread_exceptions.append(exception)
# If any exceptions appeared inside the threads, raise them now as an
# aggregate exception object.
if thread_exceptions:
raise ThreadException(thread_exceptions)
# At this point, we had enough success that we want to be returning or
# raising detailed info about our execution; so we generate a Result.
stdout = ''.join(stdout)
stderr = ''.join(stderr)
if WINDOWS:
# "Universal newlines" - replace all standard forms of
# newline with \n. This is not technically Windows related
# (\r as newline is an old Mac convention) but we only apply
# the translation for Windows as that's the only platform
# it is likely to matter for these days.
stdout = stdout.replace("\r\n", "\n").replace("\r", "\n")
stderr = stderr.replace("\r\n", "\n").replace("\r", "\n")
# Get return/exit code, unless there were WatcherErrors to handle.
# NOTE: In that case, returncode() may block waiting on the process
# (which may be waiting for user input). Since most WatcherError
# situations lack a useful exit code anyways, skipping this doesn't
# really hurt any.
exited = None if watcher_errors else self.returncode()
# Obtain actual result
result = self.generate_result(
command=command,
shell=shell,
env=env,
stdout=stdout,
stderr=stderr,
exited=exited,
pty=self.using_pty,
hide=opts['hide'],
encoding=self.encoding,
)
# Any presence of WatcherError from the threads indicates a watcher was
# upset and aborted execution; make a generic Failure out of it and
# raise that.
if watcher_errors:
# TODO: ambiguity exists if we somehow get WatcherError in *both*
# threads...as unlikely as that would normally be.
raise Failure(result, reason=watcher_errors[0])
if not (result or opts['warn']):
raise UnexpectedExit(result)
return result
def _run_opts(self, kwargs):
"""
Unify `run` kwargs with config options to arrive at local options.
:returns:
Four-tuple of ``(opts_dict, stdout_stream, stderr_stream,
stdin_stream)``.
"""
opts = {}
for key, value in six.iteritems(self.context.config.run):
runtime = kwargs.pop(key, None)
opts[key] = value if runtime is None else runtime
# Handle invalid kwarg keys (anything left in kwargs).
# Act like a normal function would, i.e. TypeError
if kwargs:
err = "run() got an unexpected keyword argument '{}'"
raise TypeError(err.format(list(kwargs.keys())[0]))
# If hide was True, turn off echoing
if opts['hide'] is True:
opts['echo'] = False
# Then normalize 'hide' from one of the various valid input values,
# into a stream-names tuple.
opts['hide'] = normalize_hide(opts['hide'])
# Derive stream objects
out_stream = opts['out_stream']
if out_stream is None:
out_stream = sys.stdout
err_stream = opts['err_stream']
if err_stream is None:
err_stream = sys.stderr
in_stream = opts['in_stream']
if in_stream is None:
in_stream = sys.stdin
# Determine pty or no
self.using_pty = self.should_use_pty(opts['pty'], opts['fallback'])
if opts['watchers']:
self.watchers = opts['watchers']
return opts, out_stream, err_stream, in_stream
def _thread_timeout(self, target):
# Add a timeout to out/err thread joins when it looks like they're not
# dead but their counterpart is dead; this indicates issue #351 (fixed
# by #432) where the subproc may hang because its stdout (or stderr) is
# no longer being consumed by the dead thread (and a pipe is filling
# up.) In that case, the non-dead thread is likely to block forever on
# a `recv` unless we add this timeout.
if target == self.handle_stdin:
return None
opposite = self.handle_stderr
if target == self.handle_stderr:
opposite = self.handle_stdout
if opposite in self.threads and self.threads[opposite].is_dead:
return 1
return None
def generate_result(self, **kwargs):
"""
Create & return a suitable `Result` instance from the given ``kwargs``.
Subclasses may wish to override this in order to manipulate things or
generate a `Result` subclass (e.g. ones containing additional metadata
besides the default).
"""
return Result(**kwargs)
def read_proc_output(self, reader):
"""
Iteratively read & decode bytes from a subprocess' out/err stream.
:param reader:
A literal reader function/partial, wrapping the actual stream
object in question, which takes a number of bytes to read, and
returns that many bytes (or ``None``).
``reader`` should be a reference to either `read_proc_stdout` or
`read_proc_stderr`, which perform the actual, platform/library
specific read calls.
:returns:
A generator yielding Unicode strings (`unicode` on Python 2; `str`
on Python 3).
Specifically, each resulting string is the result of decoding
`read_chunk_size` bytes read from the subprocess' out/err stream.
"""
# NOTE: Typically, reading from any stdout/err (local, remote or
# otherwise) can be thought of as "read until you get nothing back".
# This is preferable over "wait until an out-of-band signal claims the
# process is done running" because sometimes that signal will appear
# before we've actually read all the data in the stream (i.e.: a race
# condition).
while True:
data = reader(self.read_chunk_size)
if not data:
break
yield self.decode(data)
def write_our_output(self, stream, string):
"""
Write ``string`` to ``stream``.
Also calls ``.flush()`` on ``stream`` to ensure that real terminal
streams don't buffer.
:param stream:
A file-like stream object, mapping to the ``out_stream`` or
``err_stream`` parameters of `run`.
:param string: A Unicode string object.
:returns: ``None``.
"""
stream.write(encode_output(string, self.encoding))
stream.flush()
def _handle_output(self, buffer_, hide, output, reader):
# TODO: store un-decoded/raw bytes somewhere as well...
for data in self.read_proc_output(reader):
# Echo to local stdout if necessary
# TODO: should we rephrase this as "if you want to hide, give me a
# dummy output stream, e.g. something like /dev/null"? Otherwise, a
# combo of 'hide=stdout' + 'here is an explicit out_stream' means
# out_stream is never written to, and that seems...odd.
if not hide:
self.write_our_output(stream=output, string=data)
# Store in shared buffer so main thread can do things with the
# result after execution completes.
# NOTE: this is threadsafe insofar as no reading occurs until after
# the thread is join()'d.
buffer_.append(data)
# Run our specific buffer through the autoresponder framework
self.respond(buffer_)
def handle_stdout(self, buffer_, hide, output):
"""
Read process' stdout, storing into a buffer & printing/parsing.
Intended for use as a thread target. Only terminates when all stdout
from the subprocess has been read.
:param buffer_: The capture buffer shared with the main thread.
:param bool hide: Whether or not to replay data into ``output``.
:param output:
Output stream (file-like object) to write data into when not
hiding.
:returns: ``None``.
"""
self._handle_output(
buffer_,
hide,
output,
reader=self.read_proc_stdout,
)
def handle_stderr(self, buffer_, hide, output):
"""
Read process' stderr, storing into a buffer & printing/parsing.
Identical to `handle_stdout` except for the stream read from; see its
docstring for API details.
"""
self._handle_output(
buffer_,
hide,
output,
reader=self.read_proc_stderr,
)
def read_our_stdin(self, input_):
"""
Read & decode bytes from a local stdin stream.
:param input_:
Actual stream object to read from. Maps to ``in_stream`` in `run`,
so will often be ``sys.stdin``, but might be any stream-like
object.
:returns:
A Unicode string, the result of decoding the read bytes (this might
be the empty string if the pipe has closed/reached EOF); or
``None`` if stdin wasn't ready for reading yet.
"""
# TODO: consider moving the character_buffered contextmanager call in
# here? Downside is it would be flipping those switches for every byte
# read instead of once per session, which could be costly (?).
bytes_ = None
if ready_for_reading(input_):
bytes_ = input_.read(bytes_to_read(input_))
# Decode if it appears to be binary-type. (From real terminal
# streams, usually yes; from file-like objects, often no.)
if bytes_ and isinstance(bytes_, six.binary_type):
# TODO: will decoding 1 byte at a time break multibyte
# character encodings? How to square interactivity with that?
bytes_ = self.decode(bytes_)
return bytes_
def handle_stdin(self, input_, output, echo):
"""
Read local stdin, copying into process' stdin as necessary.
Intended for use as a thread target.
.. note::
Because real terminal stdin streams have no well-defined "end", if
such a stream is detected (based on existence of a callable
``.fileno()``) this method will wait until `program_finished` is
set, before terminating.
When the stream doesn't appear to be from a terminal, the same
semantics as `handle_stdout` are used - the stream is simply
``read()`` from until it returns an empty value.
:param input_: Stream (file-like object) from which to read.
:param output: Stream (file-like object) to which echoing may occur.
:param bool echo: User override option for stdin-stdout echoing.
:returns: ``None``.
"""
# TODO: reinstate lock/whatever thread logic from fab v1 which prevents
# reading from stdin while other parts of the code are prompting for
# runtime passwords? (search for 'input_enabled')
# TODO: fabric#1339 is strongly related to this, if it's not literally
# exposing some regression in Fabric 1.x itself.
with character_buffered(input_):
while True:
data = self.read_our_stdin(input_)
if data:
# Mirror what we just read to process' stdin.
# We perform an encode so Python 3 gets bytes (streams +
# str's in Python 3 == no bueno) but skip the decode step,
# since there's presumably no need (nobody's interacting
# with this data programmatically).
self.write_proc_stdin(data)
# Also echo it back to local stdout (or whatever
# out_stream is set to) when necessary.
if echo is None:
echo = self.should_echo_stdin(input_, output)
if echo:
self.write_our_output(stream=output, string=data)
# Empty string/char/byte != None. Can't just use 'else' here.
elif data is not None:
# When reading from file-like objects that aren't "real"
# terminal streams, an empty byte signals EOF.
break
# Dual all-done signals: program being executed is done
# running, *and* we don't seem to be reading anything out of
# stdin. (NOTE: If we only test the former, we may encounter
# race conditions re: unread stdin.)
if self.program_finished.is_set() and not data:
break
# Take a nap so we're not chewing CPU.
time.sleep(self.input_sleep)
def should_echo_stdin(self, input_, output):
"""
Determine whether data read from ``input_`` should echo to ``output``.
Used by `handle_stdin`; tests attributes of ``input_`` and ``output``.
:param input_: Input stream (file-like object).
:param output: Output stream (file-like object).
:returns: A ``bool``.
"""
return (not self.using_pty) and isatty(input_)
def respond(self, buffer_):
"""
Write to the program's stdin in response to patterns in ``buffer_``.
The patterns and responses are driven by the `.StreamWatcher` instances
from the ``watchers`` kwarg of `run` - see :doc:`/concepts/watchers`
for a conceptual overview.
:param buffer:
The capture buffer for this thread's particular IO stream.
:returns: ``None``.
"""
# Join buffer contents into a single string; without this,
# StreamWatcher subclasses can't do things like iteratively scan for
# pattern matches.
# NOTE: using string.join should be "efficient enough" for now, re:
# speed and memory use. Should that become false, consider using
# StringIO or cStringIO (tho the latter doesn't do Unicode well?) which
# is apparently even more efficient.
stream = u''.join(buffer_)
for watcher in self.watchers:
for response in watcher.submit(stream):
self.write_proc_stdin(response)
def generate_env(self, env, replace_env):
"""
Return a suitable environment dict based on user input & behavior.
:param dict env: Dict supplying overrides or full env, depending.
:param bool replace_env:
Whether ``env`` updates, or is used in place of, the value of
`os.environ`.
:returns: A dictionary of shell environment vars.
"""
return env if replace_env else dict(os.environ, **env)
def should_use_pty(self, pty, fallback):
"""
Should execution attempt to use a pseudo-terminal?
:param bool pty:
Whether the user explicitly asked for a pty.
:param bool fallback:
Whether falling back to non-pty execution should be allowed, in
situations where ``pty=True`` but a pty could not be allocated.
"""
# NOTE: fallback not used: no falling back implemented by default.
return pty
@property
def has_dead_threads(self):
"""
Detect whether any IO threads appear to have terminated unexpectedly.
Used during process-completion waiting (in `wait`) to ensure we don't
deadlock our child process if our IO processing threads have
errored/died.
:returns:
``True`` if any threads appear to have terminated with an
exception, ``False`` otherwise.
"""
return any(x.is_dead for x in self.threads.values())
def wait(self):
"""
Block until the running command appears to have exited.
:returns: ``None``.
"""
while True:
proc_finished = self.process_is_finished
dead_threads = self.has_dead_threads
if proc_finished or dead_threads:
break
time.sleep(self.input_sleep)
def write_proc_stdin(self, data):
"""
Write encoded ``data`` to the running process' stdin.
:param data: A Unicode string.
:returns: ``None``.
"""
# Encode always, then request implementing subclass to perform the
# actual write to subprocess' stdin.
self._write_proc_stdin(data.encode(self.encoding))
def decode(self, data):
"""
Decode some ``data`` bytes, returning Unicode.
"""
# NOTE: yes, this is a 1-liner. The point is to make it much harder to
# forget to use 'replace' when decoding :)
return data.decode(self.encoding, 'replace')
@property
def process_is_finished(self):
"""
Determine whether our subprocess has terminated.
.. note::
The implementation of this method should be nonblocking, as it is
used within a query/poll loop.
:returns:
``True`` if the subprocess has finished running, ``False``
otherwise.
"""
raise NotImplementedError
def start(self, command, shell, env):
"""
Initiate execution of ``command`` (via ``shell``, with ``env``).
Typically this means use of a forked subprocess or requesting start of
execution on a remote system.
In most cases, this method will also set subclass-specific member
variables used in other methods such as `wait` and/or `returncode`.
"""
raise NotImplementedError
def read_proc_stdout(self, num_bytes):
"""
Read ``num_bytes`` from the running process' stdout stream.
:param int num_bytes: Number of bytes to read at maximum.
:returns: A string/bytes object.
"""
raise NotImplementedError
def read_proc_stderr(self, num_bytes):
"""
Read ``num_bytes`` from the running process' stderr stream.
:param int num_bytes: Number of bytes to read at maximum.
:returns: A string/bytes object.
"""
raise NotImplementedError
def _write_proc_stdin(self, data):
"""
Write ``data`` to running process' stdin.
This should never be called directly; it's for subclasses to implement.
See `write_proc_stdin` for the public API call.
:param data: Already-encoded byte data suitable for writing.
:returns: ``None``.
"""
raise NotImplementedError
def default_encoding(self):
"""
Return a string naming the expected encoding of subprocess streams.
This return value should be suitable for use by encode/decode methods.
"""
# TODO: probably wants to be 2 methods, one for local and one for
# subprocess. For now, good enough to assume both are the same.
#
# Based on some experiments there is an issue with
# `locale.getpreferredencoding(do_setlocale=False)` in Python 2.x on
# Linux and OS X, and `locale.getpreferredencoding(do_setlocale=True)`
# triggers some global state changes. (See #274 for discussion.)
encoding = locale.getpreferredencoding(False)
if six.PY2 and not WINDOWS:
default = locale.getdefaultlocale()[1]
if default is not None:
encoding = default
return encoding
def send_interrupt(self, interrupt):
"""
Submit an interrupt signal to the running subprocess.
In almost all implementations, the default behavior is what will be
desired: submit ``\x03`` to the subprocess' stdin pipe. However, we
leave this as a public method in case this default needs to be
augmented or replaced.
:param interrupt:
The locally-sourced ``KeyboardInterrupt`` causing the method call.
:returns: ``None``.
"""
self.write_proc_stdin(u'\x03')
def returncode(self):
"""
Return the numeric return/exit code resulting from command execution.
:returns: `int`
"""
raise NotImplementedError
def stop(self):
"""
Perform final cleanup, if necessary.
This method is called within a ``finally`` clause inside the main `run`
method. Depending on the subclass, it may be a no-op, or it may do
things such as close network connections or open files.
:returns: ``None``
"""
raise NotImplementedError
class Local(Runner):
"""
Execute a command on the local system in a subprocess.
.. note::
When Invoke itself is executed without a controlling terminal (e.g.
when ``sys.stdin`` lacks a useful ``fileno``), it's not possible to
present a handle on our PTY to local subprocesses. In such situations,
`Local` will fallback to behaving as if ``pty=False`` (on the theory
that degraded execution is better than none at all) as well as printing
a warning to stderr.
To disable this behavior, say ``fallback=False``.
"""
def __init__(self, context):
super(Local, self).__init__(context)
# Bookkeeping var for pty use case
self.status = None
def should_use_pty(self, pty=False, fallback=True):
use_pty = False
if pty:
use_pty = True
# TODO: pass in & test in_stream, not sys.stdin
if not has_fileno(sys.stdin) and fallback:
if not self.warned_about_pty_fallback:
sys.stderr.write("WARNING: stdin has no fileno; falling back to non-pty execution!\n") # noqa
self.warned_about_pty_fallback = True
use_pty = False
return use_pty
def read_proc_stdout(self, num_bytes):
# Obtain useful read-some-bytes function
if self.using_pty:
# Need to handle spurious OSErrors on some Linux platforms.
try:
data = os.read(self.parent_fd, num_bytes)
except OSError as e:
# Only eat this specific OSError so we don't hide others
if "Input/output error" not in str(e):
raise
# The bad OSErrors happen after all expected output has
# appeared, so we return a falsey value, which triggers the
# "end of output" logic in code using reader functions.
data = None
else:
data = os.read(self.process.stdout.fileno(), num_bytes)
return data
def read_proc_stderr(self, num_bytes):
# NOTE: when using a pty, this will never be called.
# TODO: do we ever get those OSErrors on stderr? Feels like we could?
return os.read(self.process.stderr.fileno(), num_bytes)
def _write_proc_stdin(self, data):
# NOTE: parent_fd from os.fork() is a read/write pipe attached to our
# forked process' stdout/stdin, respectively.
fd = self.parent_fd if self.using_pty else self.process.stdin.fileno()
# Try to write, ignoring broken pipes if encountered (implies child
# process exited before the process piping stdin to us finished;
# there's nothing we can do about that!)
try:
return os.write(fd, data)
except OSError as e:
if 'Broken pipe' not in str(e):
raise
def start(self, command, shell, env):
if self.using_pty:
if pty is None: # Encountered ImportError
sys.exit("You indicated pty=True, but your platform doesn't support the 'pty' module!") # noqa
cols, rows = pty_size()
self.pid, self.parent_fd = pty.fork()
# If we're the child process, load up the actual command in a
# shell, just as subprocess does; this replaces our process - whose
# pipes are all hooked up to the PTY - with the "real" one.
if self.pid == 0:
# TODO: both pty.spawn() and pexpect.spawn() do a lot of
# setup/teardown involving tty.setraw, getrlimit, signal.
# Ostensibly we'll want some of that eventually, but if
# possible write tests - integration-level if necessary -
# before adding it!
#
# Set pty window size based on what our own controlling
# terminal's window size appears to be.
# TODO: make subroutine?
winsize = struct.pack('HHHH', rows, cols, 0, 0)
fcntl.ioctl(sys.stdout.fileno(), termios.TIOCSWINSZ, winsize)
# Use execve for bare-minimum "exec w/ variable # args + env"
# behavior. No need for the 'p' (use PATH to find executable)
# for now.
# TODO: see if subprocess is using equivalent of execvp...
os.execve(shell, [shell, '-c', command], env)
else:
self.process = Popen(
command,
shell=True,
executable=shell,
env=env,
stdout=PIPE,
stderr=PIPE,
stdin=PIPE,
)
@property
def process_is_finished(self):
if self.using_pty:
# NOTE:
# https://github.com/pexpect/ptyprocess/blob/4058faa05e2940662ab6da1330aa0586c6f9cd9c/ptyprocess/ptyprocess.py#L680-L687
# implies that Linux "requires" use of the blocking, non-WNOHANG
# version of this call. Our testing doesn't verify this, however,
# so...
# NOTE: It does appear to be totally blocking on Windows, so our
# issue #351 may be totally unsolvable there. Unclear.
pid_val, self.status = os.waitpid(self.pid, os.WNOHANG)
return pid_val != 0
else:
return self.process.poll() is not None
def returncode(self):
if self.using_pty:
# No subprocess.returncode available; use WIFEXITED/WIFSIGNALED to
# determine whch of WEXITSTATUS / WTERMSIG to use.
# TODO: is it safe to just say "call all WEXITSTATUS/WTERMSIG and
# return whichever one of them is nondefault"? Probably not?
# NOTE: doing this in an arbitrary order should be safe since only
# one of the WIF* methods ought to ever return True.
code = None
if os.WIFEXITED(self.status):
code = os.WEXITSTATUS(self.status)
elif os.WIFSIGNALED(self.status):
code = os.WTERMSIG(self.status)
# Match subprocess.returncode by turning signals into negative
# 'exit code' integers.
code = -1 * code
return code
# TODO: do we care about WIFSTOPPED? Maybe someday?
else:
return self.process.returncode
def stop(self):
# No explicit close-out required (so far).
pass
class Result(object):
"""
A container for information about the result of a command execution.
All params are exposed as attributes of the same name and type.
:param str stdout:
The subprocess' standard output.
:param str stderr:
Same as ``stdout`` but containing standard error (unless the process
was invoked via a pty, in which case it will be empty; see
`.Runner.run`.)
:param str encoding:
The string encoding used by the local shell environment.
:param str command:
The command which was executed.
:param str shell:
The shell binary used for execution.
:param dict env:
The shell environment used for execution. (Default is the empty dict,
``{}``, not ``None`` as displayed in the signature.)
:param int exited:
An integer representing the subprocess' exit/return code.
:param bool pty:
A boolean describing whether the subprocess was invoked with a pty or
not; see `.Runner.run`.
:param tuple hide:
A tuple of stream names (none, one or both of ``('stdout', 'stderr')``)
which were hidden from the user when the generating command executed;
this is a normalized value derived from the ``hide`` parameter of
`.Runner.run`.
For example, ``run('command', hide='stdout')`` will yield a `Result`
where ``result.hide == ('stdout',)``; ``hide=True`` or ``hide='both'``
results in ``result.hide == ('stdout', 'stderr')``; and ``hide=False``
(the default) generates ``result.hide == ()`` (the empty tuple.)
.. note::
`Result` objects' truth evaluation is equivalent to their `.ok`
attribute's value. Therefore, quick-and-dirty expressions like the
following are possible::
if run("some shell command"):
do_something()
else:
handle_problem()
However, remember `Zen of Python #2
<http://zen-of-python.info/explicit-is-better-than-implicit.html#2>`_.
"""
# TODO: inherit from namedtuple instead? heh (or: use attrs from pypi)
def __init__(
self,
stdout="",
stderr="",
encoding=None,
command="",
shell="",
env=None,
exited=0,
pty=False,
hide=tuple(),
):
self.stdout = stdout
self.stderr = stderr
self.encoding = encoding
self.command = command
self.shell = shell
self.env = {} if env is None else env
self.exited = exited
self.pty = pty
self.hide = hide
@property
def return_code(self):
"""
An alias for ``.exited``.
"""
return self.exited
def __nonzero__(self):
# NOTE: This is the method that (under Python 2) determines Boolean
# behavior for objects.
return self.ok
def __bool__(self):
# NOTE: And this is the Python 3 equivalent of __nonzero__. Much better
# name...
return self.__nonzero__()
def __str__(self):
if self.exited is not None:
desc = "Command exited with status {}.".format(self.exited)
else:
desc = "Command was not fully executed due to watcher error."
ret = [desc]
for x in ('stdout', 'stderr'):
val = getattr(self, x)
ret.append(u"""=== {} ===
{}
""".format(x, val.rstrip()) if val else u"(no {})".format(x))
return u"\n".join(ret)
def __repr__(self):
# TODO: more? e.g. len of stdout/err? (how to represent cleanly in a
# 'x=y' format like this? e.g. '4b' is ambiguous as to what it
# represents
template = "<Result cmd={!r} exited={}>"
return template.format(self.command, self.exited)
@property
def ok(self):
"""
A boolean equivalent to ``exited == 0``.
"""
return self.exited == 0
@property
def failed(self):
"""
The inverse of ``ok``.
I.e., ``True`` if the program exited with a nonzero return code, and
``False`` otherwise.
"""
return not self.ok
def normalize_hide(val):
hide_vals = (None, False, 'out', 'stdout', 'err', 'stderr', 'both', True)
if val not in hide_vals:
err = "'hide' got {!r} which is not in {!r}"
raise ValueError(err.format(val, hide_vals))
if val in (None, False):
hide = ()
elif val in ('both', True):
hide = ('stdout', 'stderr')
elif val == 'out':
hide = ('stdout',)
elif val == 'err':
hide = ('stderr',)
else:
hide = (val,)
return hide
|
{
"content_hash": "aa99017230d1bcd60f2a596203cc1a9c",
"timestamp": "",
"source": "github",
"line_count": 1174,
"max_line_length": 132,
"avg_line_length": 40.057069846678026,
"alnum_prop": 0.5860250494396836,
"repo_name": "mkusz/invoke",
"id": "72786702fc8083d7af048b13b7c5a9b872cdc651",
"size": "47052",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "invoke/runners.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "PowerShell",
"bytes": "876"
},
{
"name": "Python",
"bytes": "553234"
},
{
"name": "Shell",
"bytes": "2763"
}
],
"symlink_target": ""
}
|
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'OrderItem.product_legacy_id'
db.add_column(u'checkout_orderitem', 'product_legacy_id',
self.gf('django.db.models.fields.PositiveIntegerField')(default=0),
keep_default=False)
# Changing field 'OrderItem.product'
db.alter_column(u'checkout_orderitem', 'product_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['products.Product'], null=True))
def backwards(self, orm):
# Deleting field 'OrderItem.product_legacy_id'
db.delete_column(u'checkout_orderitem', 'product_legacy_id')
# Changing field 'OrderItem.product'
db.alter_column(u'checkout_orderitem', 'product_id', self.gf('django.db.models.fields.related.ForeignKey')(default=None, to=orm['products.Product']))
models = {
u'checkout.order': {
'Meta': {'object_name': 'Order'},
'address': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'orders'", 'null': 'True', 'to': u"orm['users.Address']"}),
'code': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'comment': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'delivery': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['shops.Delivery']", 'null': 'True', 'blank': 'True'}),
'delivery_cost': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'net': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '8', 'decimal_places': '2'}),
'payment_type': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'products': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['products.Product']", 'through': u"orm['checkout.OrderItem']", 'symmetrical': 'False'}),
'shop': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'orders'", 'null': 'True', 'to': u"orm['shops.Shop']"}),
'status': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'orders'", 'null': 'True', 'to': u"orm['users.User']"})
},
u'checkout.orderitem': {
'Meta': {'object_name': 'OrderItem'},
'amount': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['checkout.Order']"}),
'per_item': ('django.db.models.fields.DecimalField', [], {'max_digits': '8', 'decimal_places': '2'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['products.Product']", 'null': 'True', 'blank': 'True'}),
'product_legacy_id': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
u'products.category': {
'Meta': {'object_name': 'Category', 'db_table': "'products_categories'"},
'cover': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_complementary': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'is_standalone': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'legacy_id': ('django.db.models.fields.CommaSeparatedIntegerField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'parent': ('mptt.fields.TreeForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': u"orm['products.Category']"}),
'position': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
u'products.product': {
'Meta': {'object_name': 'Product', 'db_table': "'products_items'"},
'categories': ('mptt.fields.TreeManyToManyField', [], {'related_name': "'products'", 'symmetrical': 'False', 'to': u"orm['products.Category']"}),
'code': ('django.db.models.fields.CharField', [], {'max_length': '20', 'db_index': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'dimensions': ('django.db.models.fields.CharField', [], {'max_length': '15', 'blank': 'True'}),
'discount': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '8', 'decimal_places': '2', 'blank': 'True'}),
'franchisee_discount': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '8', 'decimal_places': '2', 'blank': 'True'}),
'franchisee_price': ('django.db.models.fields.DecimalField', [], {'db_index': 'True', 'null': 'True', 'max_digits': '8', 'decimal_places': '2', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_new': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'is_visible': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'is_wholesale': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'pack_amount': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '1'}),
'price': ('django.db.models.fields.DecimalField', [], {'db_index': 'True', 'null': 'True', 'max_digits': '8', 'decimal_places': '2', 'blank': 'True'}),
'quantity': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'sku': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '30', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'weight': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'wholesale_discount': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '8', 'decimal_places': '2', 'blank': 'True'}),
'wholesale_legacy_id': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'wholesale_price': ('django.db.models.fields.DecimalField', [], {'db_index': 'True', 'null': 'True', 'max_digits': '8', 'decimal_places': '2', 'blank': 'True'})
},
u'shops.city': {
'Meta': {'object_name': 'City', 'db_table': "'shops_cities'"},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'shops.delivery': {
'Meta': {'object_name': 'Delivery'},
'caption': ('django.db.models.fields.TextField', [], {}),
'content': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'position': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'shops.shop': {
'Meta': {'object_name': 'Shop'},
'address': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'city': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'shops'", 'to': u"orm['shops.City']"}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'phones': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'worktime': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'users.address': {
'Meta': {'object_name': 'Address'},
'city': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'flat': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'house': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'original_string': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'postal_code': ('django.db.models.fields.CharField', [], {'max_length': '6', 'blank': 'True'}),
'receiver_title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'street': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'addresses'", 'to': u"orm['users.User']"})
},
u'users.user': {
'Meta': {'object_name': 'User'},
'birthday': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'unique': 'True', 'max_length': '75', 'db_index': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'status': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '1', 'db_index': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'})
}
}
complete_apps = ['checkout']
|
{
"content_hash": "9f9ad90267ca13fb6556264aa79dfccf",
"timestamp": "",
"source": "github",
"line_count": 146,
"max_line_length": 181,
"avg_line_length": 80.57534246575342,
"alnum_prop": 0.5572934376062564,
"repo_name": "Lisaveta-K/lisaveta-k.github.io",
"id": "1e7ebc095b2588a359ae6830b5f7b925a350c1f0",
"size": "11788",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "_site/tomat/apps/checkout/migrations/0003_order_item_legacy_id.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "159937"
},
{
"name": "HTML",
"bytes": "275262"
},
{
"name": "JavaScript",
"bytes": "34638"
},
{
"name": "Python",
"bytes": "664204"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import flags
import absl.logging as _logging # pylint: disable=unused-import
import collections
import os
import time
import math
import json
import six
import random
import gc
import numpy as np
if six.PY2:
import cPickle as pickle
else:
import pickle
import tensorflow as tf
import sentencepiece as spm
from prepro_utils import preprocess_text, encode_ids, encode_pieces, printable_text
import function_builder
import model_utils
import squad_utils
from data_utils import SEP_ID, CLS_ID, VOCAB_SIZE
SPIECE_UNDERLINE = u'▁'
SEG_ID_P = 0
SEG_ID_Q = 1
SEG_ID_CLS = 2
SEG_ID_PAD = 3
# Preprocessing
flags.DEFINE_bool("do_prepro", default=False,
help="Perform preprocessing only.")
flags.DEFINE_integer("num_proc", default=1,
help="Number of preprocessing processes.")
flags.DEFINE_integer("proc_id", default=0,
help="Process id for preprocessing.")
# Model
flags.DEFINE_string("model_config_path", default=None,
help="Model config path.")
flags.DEFINE_float("dropout", default=0.1,
help="Dropout rate.")
flags.DEFINE_float("dropatt", default=0.1,
help="Attention dropout rate.")
flags.DEFINE_integer("clamp_len", default=-1,
help="Clamp length.")
flags.DEFINE_string("summary_type", default="last",
help="Method used to summarize a sequence into a vector.")
flags.DEFINE_bool("use_bfloat16", default=False,
help="Whether to use bfloat16.")
# Parameter initialization
flags.DEFINE_enum("init", default="normal",
enum_values=["normal", "uniform"],
help="Initialization method.")
flags.DEFINE_float("init_std", default=0.02,
help="Initialization std when init is normal.")
flags.DEFINE_float("init_range", default=0.1,
help="Initialization std when init is uniform.")
# I/O paths
flags.DEFINE_bool("overwrite_data", default=False,
help="If False, will use cached data if available.")
flags.DEFINE_string("init_checkpoint", default=None,
help="checkpoint path for initializing the model. "
"Could be a pretrained model or a finetuned model.")
flags.DEFINE_bool("init_global_vars", default=False,
help="If true, init all global vars. If false, init "
"trainable vars only.")
flags.DEFINE_string("output_dir", default="",
help="Output dir for TF records.")
flags.DEFINE_string("predict_dir", default="",
help="Dir for predictions.")
flags.DEFINE_string("spiece_model_file", default="",
help="Sentence Piece model path.")
flags.DEFINE_string("model_dir", default="",
help="Directory for saving the finetuned model.")
flags.DEFINE_string("train_file", default="",
help="Path of train file.")
flags.DEFINE_string("predict_file", default="",
help="Path of prediction file.")
# Data preprocessing config
flags.DEFINE_integer("max_seq_length",
default=512, help="Max sequence length")
flags.DEFINE_integer("max_query_length",
default=64, help="Max query length")
flags.DEFINE_integer("doc_stride",
default=128, help="Doc stride")
flags.DEFINE_integer("max_answer_length",
default=64, help="Max answer length")
flags.DEFINE_bool("uncased", default=False, help="Use uncased data.")
# TPUs and machines
flags.DEFINE_bool("use_tpu", default=False, help="whether to use TPU.")
flags.DEFINE_integer("num_hosts", default=1, help="How many TPU hosts.")
flags.DEFINE_integer("num_core_per_host", default=8,
help="8 for TPU v2 and v3-8, 16 for larger TPU v3 pod. In the context "
"of GPU training, it refers to the number of GPUs used.")
flags.DEFINE_string("tpu_job_name", default=None, help="TPU worker job name.")
flags.DEFINE_string("tpu", default=None, help="TPU name.")
flags.DEFINE_string("tpu_zone", default=None, help="TPU zone.")
flags.DEFINE_string("gcp_project", default=None, help="gcp project.")
flags.DEFINE_string("master", default=None, help="master")
flags.DEFINE_integer("iterations", default=1000,
help="number of iterations per TPU training loop.")
# Training
flags.DEFINE_bool("do_train", default=True, help="whether to do training")
flags.DEFINE_integer("train_batch_size", default=48,
help="batch size for training")
flags.DEFINE_integer("train_steps", default=8000,
help="Number of training steps")
flags.DEFINE_integer("warmup_steps", default=0, help="number of warmup steps")
flags.DEFINE_integer("save_steps", default=None,
help="Save the model for every save_steps. "
"If None, not to save any model.")
flags.DEFINE_integer("max_save", default=5,
help="Max number of checkpoints to save. "
"Use 0 to save all.")
flags.DEFINE_integer("shuffle_buffer", default=2048,
help="Buffer size used for shuffle.")
# Optimization
flags.DEFINE_float("learning_rate", default=3e-5, help="initial learning rate")
flags.DEFINE_float("min_lr_ratio", default=0.0,
help="min lr ratio for cos decay.")
flags.DEFINE_float("clip", default=1.0, help="Gradient clipping")
flags.DEFINE_float("weight_decay", default=0.00, help="Weight decay rate")
flags.DEFINE_float("adam_epsilon", default=1e-6, help="Adam epsilon")
flags.DEFINE_string("decay_method", default="poly", help="poly or cos")
flags.DEFINE_float("lr_layer_decay_rate", default=0.75,
help="Top layer: lr[L] = FLAGS.learning_rate."
"Lower layers: lr[l-1] = lr[l] * lr_layer_decay_rate.")
# Eval / Prediction
flags.DEFINE_bool("do_predict", default=False, help="whether to do predict")
flags.DEFINE_integer("predict_batch_size", default=32,
help="batch size for prediction")
flags.DEFINE_integer("n_best_size", default=5,
help="n best size for predictions")
flags.DEFINE_integer("start_n_top", default=5, help="Beam size for span start.")
flags.DEFINE_integer("end_n_top", default=5, help="Beam size for span end.")
flags.DEFINE_string("target_eval_key", default="best_f1",
help="Use has_ans_f1 for Model I.")
FLAGS = flags.FLAGS
class SquadExample(object):
"""A single training/test example for simple sequence classification.
For examples without an answer, the start and end position are -1.
"""
def __init__(self,
qas_id,
question_text,
paragraph_text,
orig_answer_text=None,
start_position=None,
is_impossible=False):
self.qas_id = qas_id
self.question_text = question_text
self.paragraph_text = paragraph_text
self.orig_answer_text = orig_answer_text
self.start_position = start_position
self.is_impossible = is_impossible
def __str__(self):
return self.__repr__()
def __repr__(self):
s = ""
s += "qas_id: %s" % (printable_text(self.qas_id))
s += ", question_text: %s" % (
printable_text(self.question_text))
s += ", paragraph_text: [%s]" % (" ".join(self.paragraph_text))
if self.start_position:
s += ", start_position: %d" % (self.start_position)
if self.start_position:
s += ", is_impossible: %r" % (self.is_impossible)
return s
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self,
unique_id,
example_index,
doc_span_index,
tok_start_to_orig_index,
tok_end_to_orig_index,
token_is_max_context,
input_ids,
input_mask,
p_mask,
segment_ids,
paragraph_len,
cls_index,
start_position=None,
end_position=None,
is_impossible=None):
self.unique_id = unique_id
self.example_index = example_index
self.doc_span_index = doc_span_index
self.tok_start_to_orig_index = tok_start_to_orig_index
self.tok_end_to_orig_index = tok_end_to_orig_index
self.token_is_max_context = token_is_max_context
self.input_ids = input_ids
self.input_mask = input_mask
self.p_mask = p_mask
self.segment_ids = segment_ids
self.paragraph_len = paragraph_len
self.cls_index = cls_index
self.start_position = start_position
self.end_position = end_position
self.is_impossible = is_impossible
def read_squad_examples(input_file, is_training):
"""Read a SQuAD json file into a list of SquadExample."""
with tf.gfile.Open(input_file, "r") as reader:
input_data = json.load(reader)["data"]
examples = []
for entry in input_data:
for paragraph in entry["paragraphs"]:
paragraph_text = paragraph["context"]
for qa in paragraph["qas"]:
qas_id = qa["id"]
question_text = qa["question"]
start_position = None
orig_answer_text = None
is_impossible = False
if is_training:
is_impossible = qa["is_impossible"]
if (len(qa["answers"]) != 1) and (not is_impossible):
raise ValueError(
"For training, each question should have exactly 1 answer.")
if not is_impossible:
answer = qa["answers"][0]
orig_answer_text = answer["text"]
start_position = answer["answer_start"]
else:
start_position = -1
orig_answer_text = ""
example = SquadExample(
qas_id=qas_id,
question_text=question_text,
paragraph_text=paragraph_text,
orig_answer_text=orig_answer_text,
start_position=start_position,
is_impossible=is_impossible)
examples.append(example)
return examples
def _convert_index(index, pos, M=None, is_start=True):
if index[pos] is not None:
return index[pos]
N = len(index)
rear = pos
while rear < N - 1 and index[rear] is None:
rear += 1
front = pos
while front > 0 and index[front] is None:
front -= 1
assert index[front] is not None or index[rear] is not None
if index[front] is None:
if index[rear] >= 1:
if is_start:
return 0
else:
return index[rear] - 1
return index[rear]
if index[rear] is None:
if M is not None and index[front] < M - 1:
if is_start:
return index[front] + 1
else:
return M - 1
return index[front]
if is_start:
if index[rear] > index[front] + 1:
return index[front] + 1
else:
return index[rear]
else:
if index[rear] > index[front] + 1:
return index[rear] - 1
else:
return index[front]
def convert_examples_to_features(examples, sp_model, max_seq_length,
doc_stride, max_query_length, is_training,
output_fn):
"""Loads a data file into a list of `InputBatch`s."""
cnt_pos, cnt_neg = 0, 0
unique_id = 1000000000
max_N, max_M = 1024, 1024
f = np.zeros((max_N, max_M), dtype=np.float32)
for (example_index, example) in enumerate(examples):
if example_index % 100 == 0:
tf.logging.info('Converting {}/{} pos {} neg {}'.format(
example_index, len(examples), cnt_pos, cnt_neg))
query_tokens = encode_ids(
sp_model,
preprocess_text(example.question_text, lower=FLAGS.uncased))
if len(query_tokens) > max_query_length:
query_tokens = query_tokens[0:max_query_length]
paragraph_text = example.paragraph_text
para_tokens = encode_pieces(
sp_model,
preprocess_text(example.paragraph_text, lower=FLAGS.uncased))
chartok_to_tok_index = []
tok_start_to_chartok_index = []
tok_end_to_chartok_index = []
char_cnt = 0
for i, token in enumerate(para_tokens):
chartok_to_tok_index.extend([i] * len(token))
tok_start_to_chartok_index.append(char_cnt)
char_cnt += len(token)
tok_end_to_chartok_index.append(char_cnt - 1)
tok_cat_text = ''.join(para_tokens).replace(SPIECE_UNDERLINE, ' ')
N, M = len(paragraph_text), len(tok_cat_text)
if N > max_N or M > max_M:
max_N = max(N, max_N)
max_M = max(M, max_M)
f = np.zeros((max_N, max_M), dtype=np.float32)
gc.collect()
g = {}
def _lcs_match(max_dist):
f.fill(0)
g.clear()
### longest common sub sequence
# f[i, j] = max(f[i - 1, j], f[i, j - 1], f[i - 1, j - 1] + match(i, j))
for i in range(N):
# note(zhiliny):
# unlike standard LCS, this is specifically optimized for the setting
# because the mismatch between sentence pieces and original text will
# be small
for j in range(i - max_dist, i + max_dist):
if j >= M or j < 0: continue
if i > 0:
g[(i, j)] = 0
f[i, j] = f[i - 1, j]
if j > 0 and f[i, j - 1] > f[i, j]:
g[(i, j)] = 1
f[i, j] = f[i, j - 1]
f_prev = f[i - 1, j - 1] if i > 0 and j > 0 else 0
if (preprocess_text(paragraph_text[i], lower=FLAGS.uncased,
remove_space=False)
== tok_cat_text[j]
and f_prev + 1 > f[i, j]):
g[(i, j)] = 2
f[i, j] = f_prev + 1
max_dist = abs(N - M) + 5
for _ in range(2):
_lcs_match(max_dist)
if f[N - 1, M - 1] > 0.8 * N: break
max_dist *= 2
orig_to_chartok_index = [None] * N
chartok_to_orig_index = [None] * M
i, j = N - 1, M - 1
while i >= 0 and j >= 0:
if (i, j) not in g: break
if g[(i, j)] == 2:
orig_to_chartok_index[i] = j
chartok_to_orig_index[j] = i
i, j = i - 1, j - 1
elif g[(i, j)] == 1:
j = j - 1
else:
i = i - 1
if all(v is None for v in orig_to_chartok_index) or f[N - 1, M - 1] < 0.8 * N:
print('MISMATCH DETECTED!')
continue
tok_start_to_orig_index = []
tok_end_to_orig_index = []
for i in range(len(para_tokens)):
start_chartok_pos = tok_start_to_chartok_index[i]
end_chartok_pos = tok_end_to_chartok_index[i]
start_orig_pos = _convert_index(chartok_to_orig_index, start_chartok_pos,
N, is_start=True)
end_orig_pos = _convert_index(chartok_to_orig_index, end_chartok_pos,
N, is_start=False)
tok_start_to_orig_index.append(start_orig_pos)
tok_end_to_orig_index.append(end_orig_pos)
if not is_training:
tok_start_position = tok_end_position = None
if is_training and example.is_impossible:
tok_start_position = -1
tok_end_position = -1
if is_training and not example.is_impossible:
start_position = example.start_position
end_position = start_position + len(example.orig_answer_text) - 1
start_chartok_pos = _convert_index(orig_to_chartok_index, start_position,
is_start=True)
tok_start_position = chartok_to_tok_index[start_chartok_pos]
end_chartok_pos = _convert_index(orig_to_chartok_index, end_position,
is_start=False)
tok_end_position = chartok_to_tok_index[end_chartok_pos]
assert tok_start_position <= tok_end_position
def _piece_to_id(x):
if six.PY2 and isinstance(x, unicode):
x = x.encode('utf-8')
return sp_model.PieceToId(x)
all_doc_tokens = list(map(_piece_to_id, para_tokens))
# The -3 accounts for [CLS], [SEP] and [SEP]
max_tokens_for_doc = max_seq_length - len(query_tokens) - 3
# We can have documents that are longer than the maximum sequence length.
# To deal with this we do a sliding window approach, where we take chunks
# of the up to our max length with a stride of `doc_stride`.
_DocSpan = collections.namedtuple( # pylint: disable=invalid-name
"DocSpan", ["start", "length"])
doc_spans = []
start_offset = 0
while start_offset < len(all_doc_tokens):
length = len(all_doc_tokens) - start_offset
if length > max_tokens_for_doc:
length = max_tokens_for_doc
doc_spans.append(_DocSpan(start=start_offset, length=length))
if start_offset + length == len(all_doc_tokens):
break
start_offset += min(length, doc_stride)
for (doc_span_index, doc_span) in enumerate(doc_spans):
tokens = []
token_is_max_context = {}
segment_ids = []
p_mask = []
cur_tok_start_to_orig_index = []
cur_tok_end_to_orig_index = []
for i in range(doc_span.length):
split_token_index = doc_span.start + i
cur_tok_start_to_orig_index.append(
tok_start_to_orig_index[split_token_index])
cur_tok_end_to_orig_index.append(
tok_end_to_orig_index[split_token_index])
is_max_context = _check_is_max_context(doc_spans, doc_span_index,
split_token_index)
token_is_max_context[len(tokens)] = is_max_context
tokens.append(all_doc_tokens[split_token_index])
segment_ids.append(SEG_ID_P)
p_mask.append(0)
paragraph_len = len(tokens)
tokens.append(SEP_ID)
segment_ids.append(SEG_ID_P)
p_mask.append(1)
# note(zhiliny): we put P before Q
# because during pretraining, B is always shorter than A
for token in query_tokens:
tokens.append(token)
segment_ids.append(SEG_ID_Q)
p_mask.append(1)
tokens.append(SEP_ID)
segment_ids.append(SEG_ID_Q)
p_mask.append(1)
cls_index = len(segment_ids)
tokens.append(CLS_ID)
segment_ids.append(SEG_ID_CLS)
p_mask.append(0)
input_ids = tokens
# The mask has 0 for real tokens and 1 for padding tokens. Only real
# tokens are attended to.
input_mask = [0] * len(input_ids)
# Zero-pad up to the sequence length.
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(1)
segment_ids.append(SEG_ID_PAD)
p_mask.append(1)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
assert len(p_mask) == max_seq_length
span_is_impossible = example.is_impossible
start_position = None
end_position = None
if is_training and not span_is_impossible:
# For training, if our document chunk does not contain an annotation
# we throw it out, since there is nothing to predict.
doc_start = doc_span.start
doc_end = doc_span.start + doc_span.length - 1
out_of_span = False
if not (tok_start_position >= doc_start and
tok_end_position <= doc_end):
out_of_span = True
if out_of_span:
# continue
start_position = 0
end_position = 0
span_is_impossible = True
else:
# note(zhiliny): we put P before Q, so doc_offset should be zero.
# doc_offset = len(query_tokens) + 2
doc_offset = 0
start_position = tok_start_position - doc_start + doc_offset
end_position = tok_end_position - doc_start + doc_offset
if is_training and span_is_impossible:
start_position = cls_index
end_position = cls_index
if example_index < 20:
tf.logging.info("*** Example ***")
tf.logging.info("unique_id: %s" % (unique_id))
tf.logging.info("example_index: %s" % (example_index))
tf.logging.info("doc_span_index: %s" % (doc_span_index))
tf.logging.info("tok_start_to_orig_index: %s" % " ".join(
[str(x) for x in cur_tok_start_to_orig_index]))
tf.logging.info("tok_end_to_orig_index: %s" % " ".join(
[str(x) for x in cur_tok_end_to_orig_index]))
tf.logging.info("token_is_max_context: %s" % " ".join([
"%d:%s" % (x, y) for (x, y) in six.iteritems(token_is_max_context)
]))
tf.logging.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
tf.logging.info(
"input_mask: %s" % " ".join([str(x) for x in input_mask]))
tf.logging.info(
"segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
if is_training and span_is_impossible:
tf.logging.info("impossible example span")
if is_training and not span_is_impossible:
pieces = [sp_model.IdToPiece(token) for token in
tokens[start_position: (end_position + 1)]]
answer_text = sp_model.DecodePieces(pieces)
tf.logging.info("start_position: %d" % (start_position))
tf.logging.info("end_position: %d" % (end_position))
tf.logging.info(
"answer: %s" % (printable_text(answer_text)))
# note(zhiliny): With multi processing,
# the example_index is actually the index within the current process
# therefore we use example_index=None to avoid being used in the future.
# The current code does not use example_index of training data.
if is_training:
feat_example_index = None
else:
feat_example_index = example_index
feature = InputFeatures(
unique_id=unique_id,
example_index=feat_example_index,
doc_span_index=doc_span_index,
tok_start_to_orig_index=cur_tok_start_to_orig_index,
tok_end_to_orig_index=cur_tok_end_to_orig_index,
token_is_max_context=token_is_max_context,
input_ids=input_ids,
input_mask=input_mask,
p_mask=p_mask,
segment_ids=segment_ids,
paragraph_len=paragraph_len,
cls_index=cls_index,
start_position=start_position,
end_position=end_position,
is_impossible=span_is_impossible)
# Run callback
output_fn(feature)
unique_id += 1
if span_is_impossible:
cnt_neg += 1
else:
cnt_pos += 1
tf.logging.info("Total number of instances: {} = pos {} neg {}".format(
cnt_pos + cnt_neg, cnt_pos, cnt_neg))
def _check_is_max_context(doc_spans, cur_span_index, position):
"""Check if this is the 'max context' doc span for the token."""
# Because of the sliding window approach taken to scoring documents, a single
# token can appear in multiple documents. E.g.
# Doc: the man went to the store and bought a gallon of milk
# Span A: the man went to the
# Span B: to the store and bought
# Span C: and bought a gallon of
# ...
#
# Now the word 'bought' will have two scores from spans B and C. We only
# want to consider the score with "maximum context", which we define as
# the *minimum* of its left and right context (the *sum* of left and
# right context will always be the same, of course).
#
# In the example the maximum context for 'bought' would be span C since
# it has 1 left context and 3 right context, while span B has 4 left context
# and 0 right context.
best_score = None
best_span_index = None
for (span_index, doc_span) in enumerate(doc_spans):
end = doc_span.start + doc_span.length - 1
if position < doc_span.start:
continue
if position > end:
continue
num_left_context = position - doc_span.start
num_right_context = end - position
score = min(num_left_context, num_right_context) + 0.01 * doc_span.length
if best_score is None or score > best_score:
best_score = score
best_span_index = span_index
return cur_span_index == best_span_index
class FeatureWriter(object):
"""Writes InputFeature to TF example file."""
def __init__(self, filename, is_training):
self.filename = filename
self.is_training = is_training
self.num_features = 0
self._writer = tf.python_io.TFRecordWriter(filename)
def process_feature(self, feature):
"""Write a InputFeature to the TFRecordWriter as a tf.train.Example."""
self.num_features += 1
def create_int_feature(values):
feature = tf.train.Feature(
int64_list=tf.train.Int64List(value=list(values)))
return feature
def create_float_feature(values):
f = tf.train.Feature(float_list=tf.train.FloatList(value=list(values)))
return f
features = collections.OrderedDict()
features["unique_ids"] = create_int_feature([feature.unique_id])
features["input_ids"] = create_int_feature(feature.input_ids)
features["input_mask"] = create_float_feature(feature.input_mask)
features["p_mask"] = create_float_feature(feature.p_mask)
features["segment_ids"] = create_int_feature(feature.segment_ids)
features["cls_index"] = create_int_feature([feature.cls_index])
if self.is_training:
features["start_positions"] = create_int_feature([feature.start_position])
features["end_positions"] = create_int_feature([feature.end_position])
impossible = 0
if feature.is_impossible:
impossible = 1
features["is_impossible"] = create_float_feature([impossible])
tf_example = tf.train.Example(features=tf.train.Features(feature=features))
self._writer.write(tf_example.SerializeToString())
def close(self):
self._writer.close()
RawResult = collections.namedtuple("RawResult",
["unique_id", "start_top_log_probs", "start_top_index",
"end_top_log_probs", "end_top_index", "cls_logits"])
_PrelimPrediction = collections.namedtuple( # pylint: disable=invalid-name
"PrelimPrediction",
["feature_index", "start_index", "end_index",
"start_log_prob", "end_log_prob"])
_NbestPrediction = collections.namedtuple( # pylint: disable=invalid-name
"NbestPrediction", ["text", "start_log_prob", "end_log_prob"])
def write_predictions(all_examples, all_features, all_results, n_best_size,
max_answer_length, output_prediction_file,
output_nbest_file,
output_null_log_odds_file, orig_data):
"""Write final predictions to the json file and log-odds of null if needed."""
tf.logging.info("Writing predictions to: %s" % (output_prediction_file))
# tf.logging.info("Writing nbest to: %s" % (output_nbest_file))
example_index_to_features = collections.defaultdict(list)
for feature in all_features:
example_index_to_features[feature.example_index].append(feature)
unique_id_to_result = {}
for result in all_results:
unique_id_to_result[result.unique_id] = result
all_predictions = collections.OrderedDict()
all_nbest_json = collections.OrderedDict()
scores_diff_json = collections.OrderedDict()
for (example_index, example) in enumerate(all_examples):
features = example_index_to_features[example_index]
prelim_predictions = []
# keep track of the minimum score of null start+end of position 0
score_null = 1000000 # large and positive
for (feature_index, feature) in enumerate(features):
result = unique_id_to_result[feature.unique_id]
cur_null_score = result.cls_logits
# if we could have irrelevant answers, get the min score of irrelevant
score_null = min(score_null, cur_null_score)
for i in range(FLAGS.start_n_top):
for j in range(FLAGS.end_n_top):
start_log_prob = result.start_top_log_probs[i]
start_index = result.start_top_index[i]
j_index = i * FLAGS.end_n_top + j
end_log_prob = result.end_top_log_probs[j_index]
end_index = result.end_top_index[j_index]
# We could hypothetically create invalid predictions, e.g., predict
# that the start of the span is in the question. We throw out all
# invalid predictions.
if start_index >= feature.paragraph_len - 1:
continue
if end_index >= feature.paragraph_len - 1:
continue
if not feature.token_is_max_context.get(start_index, False):
continue
if end_index < start_index:
continue
length = end_index - start_index + 1
if length > max_answer_length:
continue
prelim_predictions.append(
_PrelimPrediction(
feature_index=feature_index,
start_index=start_index,
end_index=end_index,
start_log_prob=start_log_prob,
end_log_prob=end_log_prob))
prelim_predictions = sorted(
prelim_predictions,
key=lambda x: (x.start_log_prob + x.end_log_prob),
reverse=True)
seen_predictions = {}
nbest = []
for pred in prelim_predictions:
if len(nbest) >= n_best_size:
break
feature = features[pred.feature_index]
tok_start_to_orig_index = feature.tok_start_to_orig_index
tok_end_to_orig_index = feature.tok_end_to_orig_index
start_orig_pos = tok_start_to_orig_index[pred.start_index]
end_orig_pos = tok_end_to_orig_index[pred.end_index]
paragraph_text = example.paragraph_text
final_text = paragraph_text[start_orig_pos: end_orig_pos + 1].strip()
if final_text in seen_predictions:
continue
seen_predictions[final_text] = True
nbest.append(
_NbestPrediction(
text=final_text,
start_log_prob=pred.start_log_prob,
end_log_prob=pred.end_log_prob))
# In very rare edge cases we could have no valid predictions. So we
# just create a nonce prediction in this case to avoid failure.
if not nbest:
nbest.append(
_NbestPrediction(text="", start_log_prob=-1e6,
end_log_prob=-1e6))
total_scores = []
best_non_null_entry = None
for entry in nbest:
total_scores.append(entry.start_log_prob + entry.end_log_prob)
if not best_non_null_entry:
best_non_null_entry = entry
probs = _compute_softmax(total_scores)
nbest_json = []
for (i, entry) in enumerate(nbest):
output = collections.OrderedDict()
output["text"] = entry.text
output["probability"] = probs[i]
output["start_log_prob"] = entry.start_log_prob
output["end_log_prob"] = entry.end_log_prob
nbest_json.append(output)
assert len(nbest_json) >= 1
assert best_non_null_entry is not None
score_diff = score_null
scores_diff_json[example.qas_id] = score_diff
# note(zhiliny): always predict best_non_null_entry
# and the evaluation script will search for the best threshold
all_predictions[example.qas_id] = best_non_null_entry.text
all_nbest_json[example.qas_id] = nbest_json
with tf.gfile.GFile(output_prediction_file, "w") as writer:
writer.write(json.dumps(all_predictions, indent=4) + "\n")
with tf.gfile.GFile(output_nbest_file, "w") as writer:
writer.write(json.dumps(all_nbest_json, indent=4) + "\n")
with tf.gfile.GFile(output_null_log_odds_file, "w") as writer:
writer.write(json.dumps(scores_diff_json, indent=4) + "\n")
qid_to_has_ans = squad_utils.make_qid_to_has_ans(orig_data)
has_ans_qids = [k for k, v in qid_to_has_ans.items() if v]
no_ans_qids = [k for k, v in qid_to_has_ans.items() if not v]
exact_raw, f1_raw = squad_utils.get_raw_scores(orig_data, all_predictions)
out_eval = {}
squad_utils.find_all_best_thresh_v2(out_eval, all_predictions, exact_raw, f1_raw,
scores_diff_json, qid_to_has_ans)
return out_eval
def _get_best_indexes(logits, n_best_size):
"""Get the n-best logits from a list."""
index_and_score = sorted(enumerate(logits), key=lambda x: x[1], reverse=True)
best_indexes = []
for i in range(len(index_and_score)):
if i >= n_best_size:
break
best_indexes.append(index_and_score[i][0])
return best_indexes
def _compute_softmax(scores):
"""Compute softmax probability over raw logits."""
if not scores:
return []
max_score = None
for score in scores:
if max_score is None or score > max_score:
max_score = score
exp_scores = []
total_sum = 0.0
for score in scores:
x = math.exp(score - max_score)
exp_scores.append(x)
total_sum += x
probs = []
for score in exp_scores:
probs.append(score / total_sum)
return probs
def input_fn_builder(input_glob, seq_length, is_training, drop_remainder,
num_hosts, num_threads=8):
"""Creates an `input_fn` closure to be passed to TPUEstimator."""
name_to_features = {
"unique_ids": tf.FixedLenFeature([], tf.int64),
"input_ids": tf.FixedLenFeature([seq_length], tf.int64),
"input_mask": tf.FixedLenFeature([seq_length], tf.float32),
"segment_ids": tf.FixedLenFeature([seq_length], tf.int64),
"cls_index": tf.FixedLenFeature([], tf.int64),
"p_mask": tf.FixedLenFeature([seq_length], tf.float32)
}
if is_training:
name_to_features["start_positions"] = tf.FixedLenFeature([], tf.int64)
name_to_features["end_positions"] = tf.FixedLenFeature([], tf.int64)
name_to_features["is_impossible"] = tf.FixedLenFeature([], tf.float32)
tf.logging.info("Input tfrecord file glob {}".format(input_glob))
global_input_paths = tf.gfile.Glob(input_glob)
tf.logging.info("Find {} input paths {}".format(
len(global_input_paths), global_input_paths))
def _decode_record(record, name_to_features):
"""Decodes a record to a TensorFlow example."""
example = tf.parse_single_example(record, name_to_features)
# tf.Example only supports tf.int64, but the TPU only supports tf.int32.
# So cast all int64 to int32.
for name in list(example.keys()):
t = example[name]
if t.dtype == tf.int64:
t = tf.cast(t, tf.int32)
example[name] = t
return example
def input_fn(params):
"""The actual input function."""
if FLAGS.use_tpu:
batch_size = params["batch_size"]
elif is_training:
batch_size = FLAGS.train_batch_size
else:
batch_size = FLAGS.predict_batch_size
# Split tfrecords across hosts
if num_hosts > 1:
host_id = params["context"].current_host
num_files = len(global_input_paths)
if num_files >= num_hosts:
num_files_per_host = (num_files + num_hosts - 1) // num_hosts
my_start_file_id = host_id * num_files_per_host
my_end_file_id = min((host_id + 1) * num_files_per_host, num_files)
input_paths = global_input_paths[my_start_file_id: my_end_file_id]
tf.logging.info("Host {} handles {} files".format(host_id,
len(input_paths)))
else:
input_paths = global_input_paths
if len(input_paths) == 1:
d = tf.data.TFRecordDataset(input_paths[0])
# For training, we want a lot of parallel reading and shuffling.
# For eval, we want no shuffling and parallel reading doesn't matter.
if is_training:
d = d.shuffle(buffer_size=FLAGS.shuffle_buffer)
d = d.repeat()
else:
d = tf.data.Dataset.from_tensor_slices(input_paths)
# file level shuffle
d = d.shuffle(len(input_paths)).repeat()
# `cycle_length` is the number of parallel files that get read.
cycle_length = min(num_threads, len(input_paths))
d = d.apply(
tf.contrib.data.parallel_interleave(
tf.data.TFRecordDataset,
sloppy=is_training,
cycle_length=cycle_length))
if is_training:
# sample level shuffle
d = d.shuffle(buffer_size=FLAGS.shuffle_buffer)
d = d.apply(
tf.contrib.data.map_and_batch(
lambda record: _decode_record(record, name_to_features),
batch_size=batch_size,
num_parallel_batches=num_threads,
drop_remainder=drop_remainder))
d = d.prefetch(1024)
return d
return input_fn
def get_model_fn():
def model_fn(features, labels, mode, params):
#### Training or Evaluation
is_training = (mode == tf.estimator.ModeKeys.TRAIN)
#### Get loss from inputs
outputs = function_builder.get_qa_outputs(FLAGS, features, is_training)
#### Check model parameters
num_params = sum([np.prod(v.shape) for v in tf.trainable_variables()])
tf.logging.info('#params: {}'.format(num_params))
scaffold_fn = None
#### Evaluation mode
if mode == tf.estimator.ModeKeys.PREDICT:
if FLAGS.init_checkpoint:
tf.logging.info("init_checkpoint not being used in predict mode.")
predictions = {
"unique_ids": features["unique_ids"],
"start_top_index": outputs["start_top_index"],
"start_top_log_probs": outputs["start_top_log_probs"],
"end_top_index": outputs["end_top_index"],
"end_top_log_probs": outputs["end_top_log_probs"],
"cls_logits": outputs["cls_logits"]
}
if FLAGS.use_tpu:
output_spec = tf.contrib.tpu.TPUEstimatorSpec(
mode=mode, predictions=predictions, scaffold_fn=scaffold_fn)
else:
output_spec = tf.estimator.EstimatorSpec(
mode=mode, predictions=predictions)
return output_spec
### Compute loss
seq_length = tf.shape(features["input_ids"])[1]
def compute_loss(log_probs, positions):
one_hot_positions = tf.one_hot(
positions, depth=seq_length, dtype=tf.float32)
loss = - tf.reduce_sum(one_hot_positions * log_probs, axis=-1)
loss = tf.reduce_mean(loss)
return loss
start_loss = compute_loss(
outputs["start_log_probs"], features["start_positions"])
end_loss = compute_loss(
outputs["end_log_probs"], features["end_positions"])
total_loss = (start_loss + end_loss) * 0.5
cls_logits = outputs["cls_logits"]
is_impossible = tf.reshape(features["is_impossible"], [-1])
regression_loss = tf.nn.sigmoid_cross_entropy_with_logits(
labels=is_impossible, logits=cls_logits)
regression_loss = tf.reduce_mean(regression_loss)
# note(zhiliny): by default multiply the loss by 0.5 so that the scale is
# comparable to start_loss and end_loss
total_loss += regression_loss * 0.5
#### Configuring the optimizer
train_op, learning_rate, _ = model_utils.get_train_op(FLAGS, total_loss)
monitor_dict = {}
monitor_dict["lr"] = learning_rate
#### load pretrained models
scaffold_fn = model_utils.init_from_checkpoint(FLAGS)
#### Constucting training TPUEstimatorSpec with new cache.
if FLAGS.use_tpu:
host_call = function_builder.construct_scalar_host_call(
monitor_dict=monitor_dict,
model_dir=FLAGS.model_dir,
prefix="train/",
reduce_fn=tf.reduce_mean)
train_spec = tf.contrib.tpu.TPUEstimatorSpec(
mode=mode, loss=total_loss, train_op=train_op, host_call=host_call,
scaffold_fn=scaffold_fn)
else:
train_spec = tf.estimator.EstimatorSpec(
mode=mode, loss=total_loss, train_op=train_op)
return train_spec
return model_fn
def _get_spm_basename():
spm_basename = os.path.basename(FLAGS.spiece_model_file)
return spm_basename
def preprocess():
sp_model = spm.SentencePieceProcessor()
sp_model.Load(FLAGS.spiece_model_file)
spm_basename = _get_spm_basename()
train_rec_file = os.path.join(
FLAGS.output_dir,
"{}.{}.slen-{}.qlen-{}.train.tf_record".format(
spm_basename, FLAGS.proc_id, FLAGS.max_seq_length,
FLAGS.max_query_length))
tf.logging.info("Read examples from {}".format(FLAGS.train_file))
train_examples = read_squad_examples(FLAGS.train_file, is_training=True)
train_examples = train_examples[FLAGS.proc_id::FLAGS.num_proc]
# Pre-shuffle the input to avoid having to make a very large shuffle
# buffer in the `input_fn`.
random.shuffle(train_examples)
tf.logging.info("Write to {}".format(train_rec_file))
train_writer = FeatureWriter(
filename=train_rec_file,
is_training=True)
convert_examples_to_features(
examples=train_examples,
sp_model=sp_model,
max_seq_length=FLAGS.max_seq_length,
doc_stride=FLAGS.doc_stride,
max_query_length=FLAGS.max_query_length,
is_training=True,
output_fn=train_writer.process_feature)
train_writer.close()
def main(_):
tf.logging.set_verbosity(tf.logging.INFO)
if not tf.gfile.Exists(FLAGS.output_dir):
tf.gfile.MakeDirs(FLAGS.output_dir)
if FLAGS.do_prepro:
preprocess()
return
#### Validate flags
if FLAGS.save_steps is not None:
FLAGS.iterations = min(FLAGS.iterations, FLAGS.save_steps)
if not FLAGS.do_train and not FLAGS.do_predict:
raise ValueError(
"At least one of `do_train` and `do_predict` must be True.")
if FLAGS.do_predict and not tf.gfile.Exists(FLAGS.predict_dir):
tf.gfile.MakeDirs(FLAGS.predict_dir)
sp_model = spm.SentencePieceProcessor()
sp_model.Load(FLAGS.spiece_model_file)
### TPU Configuration
run_config = model_utils.configure_tpu(FLAGS)
model_fn = get_model_fn()
spm_basename = _get_spm_basename()
# If TPU is not available, this will fall back to normal Estimator on CPU
# or GPU.
if FLAGS.use_tpu:
estimator = tf.contrib.tpu.TPUEstimator(
use_tpu=FLAGS.use_tpu,
model_fn=model_fn,
config=run_config,
train_batch_size=FLAGS.train_batch_size,
predict_batch_size=FLAGS.predict_batch_size)
else:
estimator = tf.estimator.Estimator(
model_fn=model_fn,
config=run_config)
if FLAGS.do_train:
train_rec_glob = os.path.join(
FLAGS.output_dir,
"{}.*.slen-{}.qlen-{}.train.tf_record".format(
spm_basename, FLAGS.max_seq_length,
FLAGS.max_query_length))
train_input_fn = input_fn_builder(
input_glob=train_rec_glob,
seq_length=FLAGS.max_seq_length,
is_training=True,
drop_remainder=True,
num_hosts=FLAGS.num_hosts)
estimator.train(input_fn=train_input_fn, max_steps=FLAGS.train_steps)
if FLAGS.do_predict:
eval_examples = read_squad_examples(FLAGS.predict_file, is_training=False)
with tf.gfile.Open(FLAGS.predict_file) as f:
orig_data = json.load(f)["data"]
eval_rec_file = os.path.join(
FLAGS.output_dir,
"{}.slen-{}.qlen-{}.eval.tf_record".format(
spm_basename, FLAGS.max_seq_length, FLAGS.max_query_length))
eval_feature_file = os.path.join(
FLAGS.output_dir,
"{}.slen-{}.qlen-{}.eval.features.pkl".format(
spm_basename, FLAGS.max_seq_length, FLAGS.max_query_length))
if tf.gfile.Exists(eval_rec_file) and tf.gfile.Exists(
eval_feature_file) and not FLAGS.overwrite_data:
tf.logging.info("Loading eval features from {}".format(eval_feature_file))
with tf.gfile.Open(eval_feature_file, 'rb') as fin:
eval_features = pickle.load(fin)
else:
eval_writer = FeatureWriter(filename=eval_rec_file, is_training=False)
eval_features = []
def append_feature(feature):
eval_features.append(feature)
eval_writer.process_feature(feature)
convert_examples_to_features(
examples=eval_examples,
sp_model=sp_model,
max_seq_length=FLAGS.max_seq_length,
doc_stride=FLAGS.doc_stride,
max_query_length=FLAGS.max_query_length,
is_training=False,
output_fn=append_feature)
eval_writer.close()
with tf.gfile.Open(eval_feature_file, 'wb') as fout:
pickle.dump(eval_features, fout)
eval_input_fn = input_fn_builder(
input_glob=eval_rec_file,
seq_length=FLAGS.max_seq_length,
is_training=False,
drop_remainder=False,
num_hosts=1)
cur_results = []
for result in estimator.predict(
input_fn=eval_input_fn,
yield_single_examples=True):
if len(cur_results) % 1000 == 0:
tf.logging.info("Processing example: %d" % (len(cur_results)))
unique_id = int(result["unique_ids"])
start_top_log_probs = (
[float(x) for x in result["start_top_log_probs"].flat])
start_top_index = [int(x) for x in result["start_top_index"].flat]
end_top_log_probs = (
[float(x) for x in result["end_top_log_probs"].flat])
end_top_index = [int(x) for x in result["end_top_index"].flat]
cls_logits = float(result["cls_logits"].flat[0])
cur_results.append(
RawResult(
unique_id=unique_id,
start_top_log_probs=start_top_log_probs,
start_top_index=start_top_index,
end_top_log_probs=end_top_log_probs,
end_top_index=end_top_index,
cls_logits=cls_logits))
output_prediction_file = os.path.join(
FLAGS.predict_dir, "predictions.json")
output_nbest_file = os.path.join(
FLAGS.predict_dir, "nbest_predictions.json")
output_null_log_odds_file = os.path.join(
FLAGS.predict_dir, "null_odds.json")
ret = write_predictions(eval_examples, eval_features, cur_results,
FLAGS.n_best_size, FLAGS.max_answer_length,
output_prediction_file,
output_nbest_file,
output_null_log_odds_file,
orig_data)
# Log current result
tf.logging.info("=" * 80)
log_str = "Result | "
for key, val in ret.items():
log_str += "{} {} | ".format(key, val)
tf.logging.info(log_str)
tf.logging.info("=" * 80)
if __name__ == "__main__":
tf.app.run()
|
{
"content_hash": "6b7ab5e43fe21676418b22d75b20b369",
"timestamp": "",
"source": "github",
"line_count": 1309,
"max_line_length": 83,
"avg_line_length": 34.99923605805959,
"alnum_prop": 0.617169424193478,
"repo_name": "zihangdai/xlnet",
"id": "156fa4396e149186ab0a1873750e851796558f6e",
"size": "45831",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "run_squad.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "7106"
},
{
"name": "Python",
"bytes": "373704"
},
{
"name": "Shell",
"bytes": "4770"
}
],
"symlink_target": ""
}
|
"""
sphinx.locale
~~~~~~~~~~~~~
Locale utilities.
:copyright: Copyright 2007-2016 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import gettext
from six import PY3, text_type
from six.moves import UserString
class _TranslationProxy(UserString, object):
"""
Class for proxy strings from gettext translations. This is a helper for the
lazy_* functions from this module.
The proxy implementation attempts to be as complete as possible, so that
the lazy objects should mostly work as expected, for example for sorting.
This inherits from UserString because some docutils versions use UserString
for their Text nodes, which then checks its argument for being either a
basestring or UserString, otherwise calls str() -- not unicode() -- on it.
This also inherits from object to make the __new__ method work.
"""
__slots__ = ('_func', '_args')
def __new__(cls, func, *args):
if not args:
# not called with "function" and "arguments", but a plain string
return text_type(func)
return object.__new__(cls)
def __getnewargs__(self):
return (self._func,) + self._args
def __init__(self, func, *args):
self._func = func
self._args = args
data = property(lambda x: x._func(*x._args))
# replace function from UserString; it instantiates a self.__class__
# for the encoding result
def encode(self, encoding=None, errors=None):
if encoding:
if errors:
return self.data.encode(encoding, errors)
else:
return self.data.encode(encoding)
else:
return self.data.encode()
def __contains__(self, key):
return key in self.data
def __bool__(self):
return bool(self.data)
__nonzero__ = __bool__ # for python2 compatibility
def __dir__(self):
return dir(text_type)
def __iter__(self):
return iter(self.data)
def __len__(self):
return len(self.data)
def __str__(self):
return str(self.data)
def __unicode__(self):
return text_type(self.data)
def __add__(self, other):
return self.data + other
def __radd__(self, other):
return other + self.data
def __mod__(self, other):
return self.data % other
def __rmod__(self, other):
return other % self.data
def __mul__(self, other):
return self.data * other
def __rmul__(self, other):
return other * self.data
def __lt__(self, other):
return self.data < other
def __le__(self, other):
return self.data <= other
def __eq__(self, other):
return self.data == other
def __ne__(self, other):
return self.data != other
def __gt__(self, other):
return self.data > other
def __ge__(self, other):
return self.data >= other
def __getattr__(self, name):
if name == '__members__':
return self.__dir__()
return getattr(self.data, name)
def __getstate__(self):
return self._func, self._args
def __setstate__(self, tup):
self._func, self._args = tup
def __getitem__(self, key):
return self.data[key]
def __copy__(self):
return self
def __repr__(self):
try:
return 'i' + repr(text_type(self.data))
except:
return '<%s broken>' % self.__class__.__name__
def mygettext(string):
"""Used instead of _ when creating TranslationProxies, because _ is
not bound yet at that time.
"""
return _(string)
def lazy_gettext(string):
"""A lazy version of `gettext`."""
# if isinstance(string, _TranslationProxy):
# return string
return _TranslationProxy(mygettext, string)
l_ = lazy_gettext
admonitionlabels = {
'attention': l_('Attention'),
'caution': l_('Caution'),
'danger': l_('Danger'),
'error': l_('Error'),
'hint': l_('Hint'),
'important': l_('Important'),
'note': l_('Note'),
'seealso': l_('See also'),
'tip': l_('Tip'),
'warning': l_('Warning'),
}
versionlabels = {
'versionadded': l_('New in version %s'),
'versionchanged': l_('Changed in version %s'),
'deprecated': l_('Deprecated since version %s'),
}
# XXX Python specific
pairindextypes = {
'module': l_('module'),
'keyword': l_('keyword'),
'operator': l_('operator'),
'object': l_('object'),
'exception': l_('exception'),
'statement': l_('statement'),
'builtin': l_('built-in function'),
}
translators = {}
if PY3:
def _(message):
return translators['sphinx'].gettext(message)
else:
def _(message):
return translators['sphinx'].ugettext(message)
def init(locale_dirs, language, catalog='sphinx'):
"""Look for message catalogs in `locale_dirs` and *ensure* that there is at
least a NullTranslations catalog set in `translators`. If called multiple
times or if several ``.mo`` files are found, their contents are merged
together (thus making ``init`` reentrable).
"""
global translators
translator = translators.get(catalog)
# ignore previously failed attempts to find message catalogs
if isinstance(translator, gettext.NullTranslations):
translator = None
# the None entry is the system's default locale path
has_translation = True
# loading
for dir_ in locale_dirs:
try:
trans = gettext.translation(catalog, localedir=dir_,
languages=[language])
if translator is None:
translator = trans
else:
translator._catalog.update(trans._catalog)
except Exception:
# Language couldn't be found in the specified path
pass
# guarantee translators[catalog] exists
if translator is None:
translator = gettext.NullTranslations()
has_translation = False
translators[catalog] = translator
if hasattr(translator, 'ugettext'):
translator.gettext = translator.ugettext
return translator, has_translation
|
{
"content_hash": "997536b7dcb3b1a0cce310ed9f9edff1",
"timestamp": "",
"source": "github",
"line_count": 231,
"max_line_length": 80,
"avg_line_length": 27.060606060606062,
"alnum_prop": 0.5872660374340105,
"repo_name": "axbaretto/beam",
"id": "d6ce7329bec527cd9be812b0afa045d3c6ea60ea",
"size": "6275",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "sdks/python/.tox/docs/lib/python2.7/site-packages/sphinx/locale/__init__.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "1598"
},
{
"name": "Batchfile",
"bytes": "3220"
},
{
"name": "C",
"bytes": "1339873"
},
{
"name": "C++",
"bytes": "1132901"
},
{
"name": "CSS",
"bytes": "124283"
},
{
"name": "Dockerfile",
"bytes": "23950"
},
{
"name": "FreeMarker",
"bytes": "7428"
},
{
"name": "Go",
"bytes": "2795906"
},
{
"name": "Groovy",
"bytes": "187109"
},
{
"name": "HTML",
"bytes": "238575"
},
{
"name": "Java",
"bytes": "39085315"
},
{
"name": "JavaScript",
"bytes": "1221326"
},
{
"name": "Jupyter Notebook",
"bytes": "7396"
},
{
"name": "Makefile",
"bytes": "354938"
},
{
"name": "Python",
"bytes": "51449019"
},
{
"name": "Roff",
"bytes": "70716"
},
{
"name": "Ruby",
"bytes": "4159"
},
{
"name": "Shell",
"bytes": "351541"
},
{
"name": "TeX",
"bytes": "70920"
},
{
"name": "Thrift",
"bytes": "1118"
}
],
"symlink_target": ""
}
|
from __future__ import division
import logging
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D # Seemingly unused, but for 3D plots
from matplotlib.colors import ColorConverter
from shapely.geometry import box, Polygon
from descartes.patch import PolygonPatch
from cops_and_robots.helpers.visualizations import plot_multisurface
# The following are class methods of Softmax
def plot(self, class_=None, show_plot=True, plot_3D=True, plot_probs=True,
plot_dominant_classes=True, plot_poly=False, plot_normals=False,
plot_subclasses=False, plot_legend=True, fig=None, ax=None,
title='Softmax Classification',
**kwargs):
"""Display the class and/or PDF plots of the Softmax distribution.
The class plot shows only the critical classes (those that have the
greatest probability at any given state).
Parameters
----------
plot_dominant_classes : bool, optional
Plot the critical classes. Defaults to `True`.
plot_probs : bool, optional
Plot the probability densities. Defaults to `True`.
plot_poly : bool, optional
Plot the polygon from which the boundaries are formed. Defaults to
`False`.
**kwargs
Keyword arguments for ``plot_dominant_classes``.
"""
# Define probabilities lazily
if not hasattr(self, 'probs') and not plot_subclasses:
self.probability()
if not hasattr(self, 'subclass_probs') and plot_subclasses:
self.probability(find_subclass_probs=True)
# Plotting attributes
self.plot_3D = plot_3D
self.plot_subclasses = plot_subclasses
if plot_dominant_classes and plot_probs and class_ is None:
if fig is None:
self.fig = plt.figure(figsize=(14, 8))
else:
self.fig = fig
bbox_size = (-1.3, -0.175, 2.2, -0.075)
if ax is None:
ax1 = self.fig.add_subplot(1, 2, 1)
if plot_3D and self.state.shape[1] > 1:
ax2 = self.fig.add_subplot(1, 2, 2, projection='3d')
else:
ax2 = self.fig.add_subplot(1, 2, 2)
else:
ax1 = ax[0]
ax2 = ax[1]
self._plot_dominant_classes(ax1)
self._plot_probs(ax2)
axes = [ax1, ax2]
elif plot_dominant_classes and class_ is None:
if fig is None:
self.fig = plt.figure(figsize=(8, 8))
else:
self.fig = fig
if ax is None:
ax1 = self.fig.add_subplot(111)
else:
ax1 = ax
bbox_size = (0, -0.175, 1, -0.075)
self._plot_dominant_classes(ax=ax1, **kwargs)
axes = [ax1]
elif plot_probs:
if fig is None:
self.fig = plt.figure(figsize=(8, 8))
else:
self.fig = fig
if class_ is not None:
if ax is None:
if plot_3D and self.state.shape[1] > 1:
ax = self.fig.add_subplot(1, 1, 1, projection='3d')
else:
ax = self.fig.add_subplot(1, 1, 1)
self.classes[class_].plot(ax=ax, **kwargs)
axes = [self.fig.gca()]
else:
if plot_3D and self.state.shape[1] > 1 and ax is None:
ax1 = self.fig.add_subplot(111, projection='3d')
elif ax is None:
ax1 = self.fig.add_subplot(111)
else:
ax1 = ax
self._plot_probs(ax1, **kwargs)
axes = [ax1]
bbox_size = (0, -0.15, 1, -0.05)
if plot_legend:
# Create Proxy artists for legend labels
proxy = [None] * self.num_classes
for i in range(self.num_classes):
if self.class_labels[i] not in self.class_labels[:i]:
proxy_label = self.class_labels[i]
else:
proxy_label = "_nolegend_"
proxy[i] = plt.Rectangle((0, 0), 1, 1, fc=self.class_colors[i],
alpha=0.6, label=proxy_label,)
plt.legend(handles=proxy, loc='lower center', mode='expand', ncol=5,
bbox_to_anchor=(0, 1.0 ,1, 0), borderaxespad=0.)
# plt.legend(handles=proxy, loc='lower center', mode='expand', ncol=4,
# bbox_to_anchor=bbox_size, borderaxespad=0.)
plt.suptitle(title, fontsize=16)
# Plot polygon
if self.poly is not None and plot_poly and plot_dominant_classes:
try:
for poly in self.polys:
patch = PolygonPatch(poly, facecolor='none', zorder=2,
linewidth=3, edgecolor='black',)
ax1.add_patch(patch)
except:
patch = PolygonPatch(self.poly, facecolor='none', zorder=2,
linewidth=3, edgecolor='black',)
ax1.add_patch(patch)
# Plot normals
# <>TODO fix crashing issue with vertical normals
if self.normals is not None and plot_normals and plot_dominant_classes:
t = np.arange(self.bounds[0], self.bounds[2] + 1)
for i, normal in enumerate(self.normals):
if abs(normal[1]) < 0.0001:
ax1.axvline(self.offsets[i], ls='--', lw=3, c='black')
else:
slope = normal[0]
y = slope * t - self.offsets[i]
ax1.plot(t, y, ls='--', lw=3, c='black')
if show_plot:
plt.show()
try:
return axes
except UnboundLocalError:
logging.warn('No axes to return.')
def _plot_probs(self, ax=None, class_=None):
if self.state.shape[1] == 1:
if ax is None:
ax = self.fig.gca()
self._plot_probs_1D(ax, class_)
elif self.state.ndim == 2:
if ax is None and self.plot_3D:
ax = self.fig.gca(projection='3d')
elif ax is None:
ax = self.fig.gca()
self._plot_probs_2D(ax, class_)
elif self.state.ndim == 3:
if ax is None:
ax = self.fig.gca(projection='3d')
self._plot_probs_3D(ax, class_)
else:
raise ValueError('The state vector must be able to be represented '
'in 1D, 2D or 3D to be plotted.')
# Shrink current axis's height by 10% on the bottom
box = ax.get_position()
ax.set_position([box.x0, box.y0 + box.height * 0.1,
box.width, box.height * 0.9])
def _plot_probs_1D(self, ax, class_):
if type(class_) is str:
try:
class_ = self.classes[class_].id
except KeyError:
logging.debug('Couldn\'t find class {}. Looking in subclasses.'
.format(class_))
class_ = self.subclasses[class_].id
if not hasattr(self, 'subclass_probs'):
self.probability(find_subclass_probs=True)
self.plot_subclasses = True
except e:
logging.error('Couldn\'t find {} as a class or subclass.'
.format(class_))
raise e
if self.plot_subclasses:
Z = self.subclass_probs[:]
else:
Z = self.probs[:]
if class_ is not None:
ax.plot(self.X[0, :], Z[:,class_], color=self.class_colors[class_])
ax.fill_between(self.X[0, :], 0, Z[:,class_], color=self.class_colors[class_],
alpha=0.4)
else:
for i in range(self.num_classes):
ax.plot(self.X[0, :], Z[:,i], color=self.class_colors[i])
ax.fill_between(self.X[0, :], 0, Z[:,i], color=self.class_colors[i],
alpha=0.4)
ax.set_xlim(self.bounds[0], self.bounds[2])
ax.set_ylim(0, 1)
ax.set_xlabel('x')
ax.set_ylabel('Probability P(D=i|X)')
ax.set_title('Class Probabilities')
def _plot_probs_2D(self, ax, class_):
if class_ is not None:
if type(class_) is str:
try:
class_ = self.classes[class_].id
except KeyError:
logging.debug('Couldn\'t find class {}. Looking in subclasses.'
.format(class_))
class_ = self.subclasses[class_].id
if not hasattr(self, 'subclass_probs'):
self.probability(find_subclass_probs=True)
self.plot_subclasses = True
except e:
logging.error('Couldn\'t find {} as a class or subclass.'
.format(class_))
raise e
if self.plot_subclasses:
Z = self.subclass_probs[:, class_].reshape(self.X.shape[0], self.X.shape[1])
else:
Z = self.probs[:, class_].reshape(self.X.shape[0], self.X.shape[1])
elif self.plot_subclasses:
Z = self.subclass_probs.reshape(self.X.shape[0], self.X.shape[1],
self.num_subclasses)
else:
Z = self.probs.reshape(self.X.shape[0], self.X.shape[1],
self.num_classes)
if self.plot_3D:
if class_ is not None:
ax.plot_surface(self.X, self.Y, Z, cstride=2, rstride=2,
linewidth=0, antialiased=False,
cmap=plt.get_cmap(self.class_cmaps[class_]))
else:
#<>TODO: replace with mlab mayavi plotting
plot_multisurface(self.X, self.Y, Z, ax, cstride=4, rstride=4)
ax.set_xlim(self.bounds[0], self.bounds[2])
ax.set_ylim(self.bounds[1], self.bounds[3])
ax.set_zlabel('P(D=i|X)')
else:
levels = np.linspace(0, np.max(Z), 50)
# <>TODO: Fix contourf plotting for multiple classes
for i in range(self.num_classes):
ax.contourf(self.X, self.Y, Z[:,:,i], levels=levels,
cmap=plt.get_cmap(self.class_cmaps[i]), alpha=0.8)
ax.set_xlabel('x1')
ax.set_ylabel('x2')
ax.set_title('Class Likelihoods')
def _plot_probs_3D(self, ax, class_):
pass
def _plot_dominant_classes(self, ax=None, plot_poly=False, **kwargs):
"""Plot only the critical classes.
Critical classes are defined as the classes with highest probability
for a given state vector `x`.
"""
# <>TODO: Fix the checking of the state vector to allow, say, 1 x y x^2
# Plot based on the dimension of the state vector
if ax is None:
ax = self.fig.gca()
if self.state.shape[1] == 1:
self._plot_dominant_classes_1D(ax, **kwargs)
elif self.state.ndim == 2:
self._plot_dominant_classes_2D(ax, **kwargs)
elif self.state.ndim == 3:
self._plot_dominant_classes_3D(ax, **kwargs)
else:
raise ValueError('The state vector must be able to be represented '
'in 1D, 2D or 3D to be plotted.')
# Shrink current axis's height by 10% on the bottom
box = ax.get_position()
ax.set_position([box.x0, box.y0 + box.height * 0.1,
box.width, box.height * 0.9])
def _plot_dominant_classes_1D(self, ax, **kwargs):
if self.plot_subclasses:
probs = self.subclass_probs
else:
probs = self.probs
res = 1 / self.X.size
fake_X, fake_Y = np.mgrid[self.bounds[0]:self.bounds[2] + res:res,
0:0.1]
max_pdf_indices = np.argmax(probs, axis=1)
max_colors = np.take(self.class_colors, max_pdf_indices)
cc = ColorConverter()
max_colors_rgb = np.array([cc.to_rgb(_) for _ in max_colors])
ax.bar(self.X.T, np.ones_like(self.X.T), color=max_colors_rgb,
linewidth=0, alpha=1)
ax.set_xlim(self.bounds[0], self.bounds[2])
ax.set_ylim(0, 0.01)
ax.set_yticks([])
ax.set_xlabel('x')
ax.set_title('Critical Classes')
def _plot_dominant_classes_2D(self, ax, **kwargs):
if self.plot_subclasses:
probs = self.subclass_probs
else:
probs = self.probs
# <>TODO: come up with more elegant solution than scatter plot
# Identify colors of critical classes for each state
np.set_printoptions(threshold=np.nan)
max_pdf_indices = np.argmax(probs, axis=1)
max_colors = np.take(self.class_colors, max_pdf_indices)
cc = ColorConverter()
max_colors_rgb = np.array([cc.to_rgb(_) for _ in max_colors])
ax.scatter(self.X, self.Y, c=max_colors_rgb, marker='s', s=50,
linewidths=0, alpha=1)
ax.set_xlim(self.bounds[0], self.bounds[2])
ax.set_ylim(self.bounds[1], self.bounds[3])
ax.set_xlabel(r'$X_1$')
ax.set_ylabel(r'$X_2$')
ax.set_title('Critical Classes')
def _plot_dominant_classes_3D(self, **kwargs):
pass
|
{
"content_hash": "cc52366b0e9fafe580a299475eb5a75a",
"timestamp": "",
"source": "github",
"line_count": 348,
"max_line_length": 88,
"avg_line_length": 36.11494252873563,
"alnum_prop": 0.5568905155951623,
"repo_name": "COHRINT/cops_and_robots",
"id": "230fbd01667e2e19b74c84059e1cb5da3de382c5",
"size": "12568",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "src/cops_and_robots/fusion/softmax/_visualization.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "3882"
},
{
"name": "CSS",
"bytes": "4701"
},
{
"name": "JavaScript",
"bytes": "217197"
},
{
"name": "Jupyter Notebook",
"bytes": "8190659"
},
{
"name": "Makefile",
"bytes": "6844"
},
{
"name": "Matlab",
"bytes": "12537"
},
{
"name": "PHP",
"bytes": "42478"
},
{
"name": "Python",
"bytes": "711182"
},
{
"name": "Shell",
"bytes": "3376"
}
],
"symlink_target": ""
}
|
class Solution:
def jump(self, nums: [int]) -> int:
count = max_right = current_right = 0
for i, num in enumerate(nums):
if i > max_right:
return 0
if i > current_right:
current_right = max_right
count += 1
if i + num > max_right:
max_right = i + num
return count
|
{
"content_hash": "d0cc7d02b6f0956d291d2ff547b13e18",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 45,
"avg_line_length": 28.142857142857142,
"alnum_prop": 0.4517766497461929,
"repo_name": "BigEgg/LeetCode",
"id": "9e0dc4f0e40b6db9ff269c56080ca9d587329cdf",
"size": "394",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Python/LeetCode/_001_050/_045_JumpGame2.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "6405"
},
{
"name": "C#",
"bytes": "460435"
},
{
"name": "C++",
"bytes": "49261"
},
{
"name": "HTML",
"bytes": "1371"
},
{
"name": "Java",
"bytes": "22777"
},
{
"name": "JavaScript",
"bytes": "41935"
},
{
"name": "Python",
"bytes": "167427"
},
{
"name": "Smalltalk",
"bytes": "1174"
}
],
"symlink_target": ""
}
|
from django.conf import settings
from django.db import models
from django.core.urlresolvers import reverse
from . import util
import mimetypes
from datetime import datetime
import os
class Attachment(models.Model):
"""
a file attachment assiciated with a post
a post may have many attachments
"""
filehash = models.CharField(max_length=256, editable=False)
filename = models.CharField(max_length=256)
mimetype = models.CharField(max_length=256, default='text/plain')
width = models.IntegerField(default=0)
height = models.IntegerField(default=0)
banned = models.BooleanField(default=False)
def path(self):
ext = self.filename.split('.')[-1]
return '{}.{}'.format(self.filehash, ext)
def thumb(self, root=settings.MEDIA_URL):
return '{}thumb-{}.jpg'.format(root, self.path())
def source(self, root=settings.MEDIA_URL):
return '{}{}'.format(root, self.path())
def remove(self):
"""
remove from filesystem and delete self
"""
os.unlink(os.path.join(settings.MEDIA_ROOT, self.thumb('')))
os.unlink(os.path.join(settings.MEDIA_ROOT, self.source('')))
self.delete()
class Newsgroup(models.Model):
"""
synonym for board
"""
name = models.CharField(max_length=256, primary_key=True, editable=False)
posts_per_page = models.IntegerField(default=10)
max_pages = models.IntegerField(default=10)
banned = models.BooleanField(default=False)
def get_absolute_url(self):
if self.name == 'ctl':
return reverse('frontend:modlog')
return reverse('frontend:board-front', args=[self.name[9:]])
class Post(models.Model):
"""
a post made anywhere on the boards
"""
msgid = models.CharField(max_length=256, primary_key=True, editable=False)
posthash = models.CharField(max_length=256, editable=False)
reference = models.CharField(max_length=256, default='')
message = models.TextField(default='')
subject = models.CharField(max_length=256, default='None')
name = models.CharField(max_length=256, default='Anonymous')
pubkey = models.CharField(max_length=64, default='')
signature = models.CharField(max_length=64, default='')
newsgroup = models.ForeignKey(Newsgroup)
attachments = models.ManyToManyField(Attachment)
posted = models.IntegerField(default=0)
placeholder = models.BooleanField(default=False)
last_bumped = models.IntegerField(default=0)
def has_attachment(self, filehash):
"""
return True if we own a file attachment by its hash
"""
for att in self.attachments.all():
if att.filehash in filehash:
return True
return False
def get_all_replies(self):
"""
get all replies to this thread
"""
if self.is_op():
return Post.objects.filter(reference=self.msgid).order_by('posted')
def get_board_replies(self, truncate=5):
"""
get replies to this thread
truncate to last N replies
"""
rpls = self.get_all_replies()
l = len(rpls)
if l > truncate:
rpls = rpls[l-truncate:]
return rpls
def is_op(self):
return self.reference == '' or self.reference == self.msgid
def shorthash(self):
return self.posthash[:10]
def postdate(self):
return datetime.fromtimestamp(self.posted)
def get_absolute_url(self):
"""
self explainitory
"""
if self.is_op():
op = util.hashid(self.msgid)
return reverse('frontend:thread', args=[op])
else:
op = util.hashid(self.reference)
frag = util.hashid(self.msgid)
return reverse('frontend:thread', args=[op]) + '#{}'.format(frag)
def bump(self, last):
"""
bump thread
"""
if self.is_op():
self.last_bumped = last
def remove(self):
"""
remove post and all attachments
"""
for att in self.attachments.all():
att.remove()
self.delete()
class ModPriv(models.Model):
"""
a record that permits moderation actions on certain boards or globally
"""
"""
absolute power :^DDDDDDD (does not exist)
"""
GOD = 0
"""
node admin
"""
ADMIN = 1
"""
can ban, delete and edit posts
"""
MOD = 2
"""
can only delete
"""
JANITOR = 3
"""
lowest access level for login
"""
LOWEST = JANITOR
"""
what board this priviledge is for or 'all' for global
"""
board = models.CharField(max_length=128, default='all')
"""
what level of priviledge is granted
"""
level = models.IntegerField(default=3)
"""
public key of mod mod user
"""
pubkey = models.CharField(max_length=256, editable=False)
@staticmethod
def has_access(level, pubkey, board_name=None):
# check global priviledge
global_priv = ModPriv.objects.filter(pubkey=pubkey, board='all')
for priv in global_priv:
if priv.level <= level:
return True
# check board level priviledge
if board_name:
board_priv = ModPriv.objects.filter(pubkey=pubkey, board=board_name)
for priv in board_priv:
if priv.level <= level:
return True
# no access allowed
return False
@staticmethod
def try_delete(pubkey, post):
"""
try deleting a post, return True if it was deleted otherwise return False
"""
if ModPriv.has_access(ModPriv.JANITOR, pubkey, post.newsgroup.name):
# we can do it
post.remove()
return True
return False
@staticmethod
def try_edit(pubkey, post, newbody):
"""
try editing a post by replacing its body with a new one
returns True if this was done otherwise return False
"""
if ModPriv.has_access(ModPriv.MOD, pubkey, post.newsgroup.name):
post.message = newbody
post.save()
return True
return False
|
{
"content_hash": "c47f832c1acf80906c104edc8a74f7ec",
"timestamp": "",
"source": "github",
"line_count": 220,
"max_line_length": 81,
"avg_line_length": 28.545454545454547,
"alnum_prop": 0.5947452229299363,
"repo_name": "majestrate/nntpchan",
"id": "19ded9eb9efcaeb2ec18c676a763686a8143986e",
"size": "6281",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "contrib/frontends/django/nntpchan/nntpchan/frontend/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "85402"
},
{
"name": "CSS",
"bytes": "45454"
},
{
"name": "Emacs Lisp",
"bytes": "506"
},
{
"name": "Go",
"bytes": "732796"
},
{
"name": "HTML",
"bytes": "231849"
},
{
"name": "JavaScript",
"bytes": "124038"
},
{
"name": "Lua",
"bytes": "1641"
},
{
"name": "Makefile",
"bytes": "5325"
},
{
"name": "PHP",
"bytes": "3674"
},
{
"name": "Python",
"bytes": "48997"
},
{
"name": "Shell",
"bytes": "737"
},
{
"name": "VCL",
"bytes": "2010"
}
],
"symlink_target": ""
}
|
import requests
from requests.exceptions import HTTPError
#from requests_toolbelt.utils import dump
#def print_raw_http(response):
# data = dump.dump_all(response, request_prefix=b'', response_prefix=b'')
# print('\n' * 2 + data.decode('utf-8'))
class Request(object):
"""HTTP helper class"""
def __init__(self):
self.session = requests.Session()
def _request(self, url, method='GET', params={}, headers={}, stream=False, raw=False):
## uncomment for debug logging
"""
import logging
import http.client
http.client.HTTPConnection.debuglevel = 1
logging.basicConfig()
logging.getLogger().setLevel(logging.DEBUG)
req_log = logging.getLogger('requests.packages.urllib3')
req_log.setLevel(logging.DEBUG)
req_log.propagate = True
"""
if method == 'GET':
#print('COOKIES: ', self.session.cookies.get_dict())
r = self.session.get(url, params=params, headers=headers, stream=stream)
r.raise_for_status()
if stream is True:
return r
elif method == 'PUT':
r = self.session.put(url, json=params, headers=headers)
r.raise_for_status()
elif method == 'POST':
r = self.session.post(url, json=params, headers=headers)
r.raise_for_status()
elif method == 'OPTIONS':
r = self.session.options(url, headers=headers)
r.raise_for_status()
return
body = r.json()
if raw:
return body
else:
if ('success' in body and body['success'] == True) or ('meta' in body and body['meta']['code'] == 200):
if 'data' in body:
return body['data']
else:
raise HTTPError('Request ({0} {1}) failed: {2}'.format(method, url, r.json()), response=r)
def get(self, url, params={}, headers={}, stream=False, raw=False):
return self._request(url, 'GET', params=params, headers=headers, stream=stream, raw=raw)
def put(self, url, params={}, headers={}, raw=False):
return self._request(url, 'PUT', params=params, headers=headers, raw=raw)
def post(self, url, params={}, headers={}, raw=False):
return self._request(url, 'POST', params=params, headers=headers, raw=raw)
def options(self, url, headers={}, raw=False):
return self._request(url, 'OPTIONS', headers=headers, raw=raw)
|
{
"content_hash": "b1c613ce4c41322133e9a0d9a18824b1",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 115,
"avg_line_length": 37.28358208955224,
"alnum_prop": 0.5800640512409928,
"repo_name": "jeffreydwalter/arlo",
"id": "f80afa3475b9f530b734c30756af9802570e7d48",
"size": "3085",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "request.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "1109"
},
{
"name": "Python",
"bytes": "137031"
}
],
"symlink_target": ""
}
|
import direct.directbase.DirectStart
from pandac.PandaModules import *
from direct.filter.CommonFilters import CommonFilters
import sys
base.accept("escape", sys.exit)
base.disableMouse()
base.oobe()
base.accept("v", base.bufferViewer.toggleEnable)
base.accept("V", base.bufferViewer.toggleEnable)
base.bufferViewer.setPosition("llcorner")
base.bufferViewer.setLayout("hline")
track = loader.loadModel("track.bam")
track.reparentTo(render)
track.setPos(Point3(0,0,0))
dlight = DirectionalLight('dlight')
alight = AmbientLight('alight')
dlnp = track.attachNewNode(dlight)
alnp = track.attachNewNode(alight)
dlight.setColor(Vec4(0.8, 0.7, 0.4, 1))
alight.setColor(Vec4(0.2, 0.2, 0.2, 1))
dlnp.setHpr(0, -60, 0)
track.setLight(dlnp)
track.setLight(alnp)
render.setShaderAuto()
filters = CommonFilters(base.win, base.cam)
#filters.setCartoonInk(separation=50)
filters.setBloom(blend=(0.1,0.1,0.9,0), desat=-0.5, intensity=2.0)
run()
|
{
"content_hash": "77a8a5e44fd1aefb4886d4b6c83ff461",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 66,
"avg_line_length": 26.742857142857144,
"alnum_prop": 0.7649572649572649,
"repo_name": "mdinacci/rtw",
"id": "c20e671e30d79cd20c6a0a5cbf7b042d2ebf7106",
"size": "936",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "track-editor/res/models/ink.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "577524"
},
{
"name": "Shell",
"bytes": "1031"
}
],
"symlink_target": ""
}
|
class Address(object):
def __init__(self, street_address, district, city):
self.street_address = street_address
self.district = district
self.city = city
self.country = "Vietnam"
def get_street_address(self):
return self.street_address
def get_district(self):
return self.district
def get_city(self):
return self.city
def get_country(self):
return self.country
def __str__(self):
return self.street_address + ' ' + self.district + ', ' + self.city
|
{
"content_hash": "6eadbbf17150b469379965f36ef67d80",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 75,
"avg_line_length": 25,
"alnum_prop": 0.6018181818181818,
"repo_name": "earlwlkr/POICrawler",
"id": "8cae604b8ca8a8dfaa05f9e3ff31fc8cbc6591de",
"size": "550",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "POICrawler/address.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "16774"
}
],
"symlink_target": ""
}
|
import subprocess
import time
from progressbar import ProgressBar
import make_mfcc_data
def cmd(cmd):
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
return stdout.rstrip()
dirs = cmd("ls voice2")
labels = dirs.splitlines()
if 'doc' not in labels:
cmd("mkdir doc")
recog = open('doc/recog.txt', 'w')
progress_bar = ProgressBar(len(labels))
for class_no, label in enumerate(labels):
progress_bar.update(class_no+1)
time.sleep(0.01)
work_dir = 'voice2/' + label
voice_files = cmd('ls ' + work_dir + '/*.wav')
voices = voice_files.splitlines()
for index, voice in enumerate(voices):
ceps = make_mfcc_data.convert_to_mfcc(voice)
if ceps is None:
continue
for data in ceps[0]:
recog.write('%s ' % data)
recog.write('\n')
recog.close()
|
{
"content_hash": "bdaa3640fe24f12805a60538686905ad",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 89,
"avg_line_length": 21.209302325581394,
"alnum_prop": 0.6370614035087719,
"repo_name": "piruty-joy/voice_actor_recog",
"id": "c812e6345ce23046dfab6e30c21976fdde2c7039",
"size": "912",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "make_recog_data.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "5382"
},
{
"name": "Shell",
"bytes": "908"
}
],
"symlink_target": ""
}
|
<<<<<<< HEAD
<<<<<<< HEAD
"""
Tests for the threading module.
"""
import test.support
from test.support import verbose, strip_python_stderr, import_module, cpython_only
from test.script_helper import assert_python_ok, assert_python_failure
import random
import re
import sys
_thread = import_module('_thread')
threading = import_module('threading')
import time
import unittest
import weakref
import os
import subprocess
from test import lock_tests
# Between fork() and exec(), only async-safe functions are allowed (issues
# #12316 and #11870), and fork() from a worker thread is known to trigger
# problems with some operating systems (issue #3863): skip problematic tests
# on platforms known to behave badly.
platforms_to_skip = ('freebsd4', 'freebsd5', 'freebsd6', 'netbsd5',
'hp-ux11')
# A trivial mutable counter.
class Counter(object):
def __init__(self):
self.value = 0
def inc(self):
self.value += 1
def dec(self):
self.value -= 1
def get(self):
return self.value
class TestThread(threading.Thread):
def __init__(self, name, testcase, sema, mutex, nrunning):
threading.Thread.__init__(self, name=name)
self.testcase = testcase
self.sema = sema
self.mutex = mutex
self.nrunning = nrunning
def run(self):
delay = random.random() / 10000.0
if verbose:
print('task %s will run for %.1f usec' %
(self.name, delay * 1e6))
with self.sema:
with self.mutex:
self.nrunning.inc()
if verbose:
print(self.nrunning.get(), 'tasks are running')
self.testcase.assertTrue(self.nrunning.get() <= 3)
time.sleep(delay)
if verbose:
print('task', self.name, 'done')
with self.mutex:
self.nrunning.dec()
self.testcase.assertTrue(self.nrunning.get() >= 0)
if verbose:
print('%s is finished. %d tasks are running' %
(self.name, self.nrunning.get()))
class BaseTestCase(unittest.TestCase):
def setUp(self):
self._threads = test.support.threading_setup()
def tearDown(self):
test.support.threading_cleanup(*self._threads)
test.support.reap_children()
class ThreadTests(BaseTestCase):
# Create a bunch of threads, let each do some work, wait until all are
# done.
def test_various_ops(self):
# This takes about n/3 seconds to run (about n/3 clumps of tasks,
# times about 1 second per clump).
NUMTASKS = 10
# no more than 3 of the 10 can run at once
sema = threading.BoundedSemaphore(value=3)
mutex = threading.RLock()
numrunning = Counter()
threads = []
for i in range(NUMTASKS):
t = TestThread("<thread %d>"%i, self, sema, mutex, numrunning)
threads.append(t)
self.assertEqual(t.ident, None)
self.assertTrue(re.match('<TestThread\(.*, initial\)>', repr(t)))
t.start()
if verbose:
print('waiting for all tasks to complete')
for t in threads:
t.join()
self.assertTrue(not t.is_alive())
self.assertNotEqual(t.ident, 0)
self.assertFalse(t.ident is None)
self.assertTrue(re.match('<TestThread\(.*, stopped -?\d+\)>',
repr(t)))
if verbose:
print('all tasks done')
self.assertEqual(numrunning.get(), 0)
def test_ident_of_no_threading_threads(self):
# The ident still must work for the main thread and dummy threads.
self.assertFalse(threading.currentThread().ident is None)
def f():
ident.append(threading.currentThread().ident)
done.set()
done = threading.Event()
ident = []
_thread.start_new_thread(f, ())
done.wait()
self.assertFalse(ident[0] is None)
# Kill the "immortal" _DummyThread
del threading._active[ident[0]]
# run with a small(ish) thread stack size (256kB)
def test_various_ops_small_stack(self):
if verbose:
print('with 256kB thread stack size...')
try:
threading.stack_size(262144)
except _thread.error:
raise unittest.SkipTest(
'platform does not support changing thread stack size')
self.test_various_ops()
threading.stack_size(0)
# run with a large thread stack size (1MB)
def test_various_ops_large_stack(self):
if verbose:
print('with 1MB thread stack size...')
try:
threading.stack_size(0x100000)
except _thread.error:
raise unittest.SkipTest(
'platform does not support changing thread stack size')
self.test_various_ops()
threading.stack_size(0)
def test_foreign_thread(self):
# Check that a "foreign" thread can use the threading module.
def f(mutex):
# Calling current_thread() forces an entry for the foreign
# thread to get made in the threading._active map.
threading.current_thread()
mutex.release()
mutex = threading.Lock()
mutex.acquire()
tid = _thread.start_new_thread(f, (mutex,))
# Wait for the thread to finish.
mutex.acquire()
self.assertIn(tid, threading._active)
self.assertIsInstance(threading._active[tid], threading._DummyThread)
del threading._active[tid]
# PyThreadState_SetAsyncExc() is a CPython-only gimmick, not (currently)
# exposed at the Python level. This test relies on ctypes to get at it.
def test_PyThreadState_SetAsyncExc(self):
ctypes = import_module("ctypes")
set_async_exc = ctypes.pythonapi.PyThreadState_SetAsyncExc
class AsyncExc(Exception):
pass
exception = ctypes.py_object(AsyncExc)
# First check it works when setting the exception from the same thread.
tid = threading.get_ident()
try:
result = set_async_exc(ctypes.c_long(tid), exception)
# The exception is async, so we might have to keep the VM busy until
# it notices.
while True:
pass
except AsyncExc:
pass
else:
# This code is unreachable but it reflects the intent. If we wanted
# to be smarter the above loop wouldn't be infinite.
self.fail("AsyncExc not raised")
try:
self.assertEqual(result, 1) # one thread state modified
except UnboundLocalError:
# The exception was raised too quickly for us to get the result.
pass
# `worker_started` is set by the thread when it's inside a try/except
# block waiting to catch the asynchronously set AsyncExc exception.
# `worker_saw_exception` is set by the thread upon catching that
# exception.
worker_started = threading.Event()
worker_saw_exception = threading.Event()
class Worker(threading.Thread):
def run(self):
self.id = threading.get_ident()
self.finished = False
try:
while True:
worker_started.set()
time.sleep(0.1)
except AsyncExc:
self.finished = True
worker_saw_exception.set()
t = Worker()
t.daemon = True # so if this fails, we don't hang Python at shutdown
t.start()
if verbose:
print(" started worker thread")
# Try a thread id that doesn't make sense.
if verbose:
print(" trying nonsensical thread id")
result = set_async_exc(ctypes.c_long(-1), exception)
self.assertEqual(result, 0) # no thread states modified
# Now raise an exception in the worker thread.
if verbose:
print(" waiting for worker thread to get started")
ret = worker_started.wait()
self.assertTrue(ret)
if verbose:
print(" verifying worker hasn't exited")
self.assertTrue(not t.finished)
if verbose:
print(" attempting to raise asynch exception in worker")
result = set_async_exc(ctypes.c_long(t.id), exception)
self.assertEqual(result, 1) # one thread state modified
if verbose:
print(" waiting for worker to say it caught the exception")
worker_saw_exception.wait(timeout=10)
self.assertTrue(t.finished)
if verbose:
print(" all OK -- joining worker")
if t.finished:
t.join()
# else the thread is still running, and we have no way to kill it
def test_limbo_cleanup(self):
# Issue 7481: Failure to start thread should cleanup the limbo map.
def fail_new_thread(*args):
raise threading.ThreadError()
_start_new_thread = threading._start_new_thread
threading._start_new_thread = fail_new_thread
try:
t = threading.Thread(target=lambda: None)
self.assertRaises(threading.ThreadError, t.start)
self.assertFalse(
t in threading._limbo,
"Failed to cleanup _limbo map on failure of Thread.start().")
finally:
threading._start_new_thread = _start_new_thread
def test_finalize_runnning_thread(self):
# Issue 1402: the PyGILState_Ensure / _Release functions may be called
# very late on python exit: on deallocation of a running thread for
# example.
import_module("ctypes")
rc, out, err = assert_python_failure("-c", """if 1:
import ctypes, sys, time, _thread
# This lock is used as a simple event variable.
ready = _thread.allocate_lock()
ready.acquire()
# Module globals are cleared before __del__ is run
# So we save the functions in class dict
class C:
ensure = ctypes.pythonapi.PyGILState_Ensure
release = ctypes.pythonapi.PyGILState_Release
def __del__(self):
state = self.ensure()
self.release(state)
def waitingThread():
x = C()
ready.release()
time.sleep(100)
_thread.start_new_thread(waitingThread, ())
ready.acquire() # Be sure the other thread is waiting.
sys.exit(42)
""")
self.assertEqual(rc, 42)
def test_finalize_with_trace(self):
# Issue1733757
# Avoid a deadlock when sys.settrace steps into threading._shutdown
assert_python_ok("-c", """if 1:
import sys, threading
# A deadlock-killer, to prevent the
# testsuite to hang forever
def killer():
import os, time
time.sleep(2)
print('program blocked; aborting')
os._exit(2)
t = threading.Thread(target=killer)
t.daemon = True
t.start()
# This is the trace function
def func(frame, event, arg):
threading.current_thread()
return func
sys.settrace(func)
""")
def test_join_nondaemon_on_shutdown(self):
# Issue 1722344
# Raising SystemExit skipped threading._shutdown
rc, out, err = assert_python_ok("-c", """if 1:
import threading
from time import sleep
def child():
sleep(1)
# As a non-daemon thread we SHOULD wake up and nothing
# should be torn down yet
print("Woke up, sleep function is:", sleep)
threading.Thread(target=child).start()
raise SystemExit
""")
self.assertEqual(out.strip(),
b"Woke up, sleep function is: <built-in function sleep>")
self.assertEqual(err, b"")
def test_enumerate_after_join(self):
# Try hard to trigger #1703448: a thread is still returned in
# threading.enumerate() after it has been join()ed.
enum = threading.enumerate
old_interval = sys.getswitchinterval()
try:
for i in range(1, 100):
sys.setswitchinterval(i * 0.0002)
t = threading.Thread(target=lambda: None)
t.start()
t.join()
l = enum()
self.assertNotIn(t, l,
"#1703448 triggered after %d trials: %s" % (i, l))
finally:
sys.setswitchinterval(old_interval)
def test_no_refcycle_through_target(self):
class RunSelfFunction(object):
def __init__(self, should_raise):
# The links in this refcycle from Thread back to self
# should be cleaned up when the thread completes.
self.should_raise = should_raise
self.thread = threading.Thread(target=self._run,
args=(self,),
kwargs={'yet_another':self})
self.thread.start()
def _run(self, other_ref, yet_another):
if self.should_raise:
raise SystemExit
cyclic_object = RunSelfFunction(should_raise=False)
weak_cyclic_object = weakref.ref(cyclic_object)
cyclic_object.thread.join()
del cyclic_object
self.assertIsNone(weak_cyclic_object(),
msg=('%d references still around' %
sys.getrefcount(weak_cyclic_object())))
raising_cyclic_object = RunSelfFunction(should_raise=True)
weak_raising_cyclic_object = weakref.ref(raising_cyclic_object)
raising_cyclic_object.thread.join()
del raising_cyclic_object
self.assertIsNone(weak_raising_cyclic_object(),
msg=('%d references still around' %
sys.getrefcount(weak_raising_cyclic_object())))
def test_old_threading_api(self):
# Just a quick sanity check to make sure the old method names are
# still present
t = threading.Thread()
t.isDaemon()
t.setDaemon(True)
t.getName()
t.setName("name")
t.isAlive()
e = threading.Event()
e.isSet()
threading.activeCount()
def test_repr_daemon(self):
t = threading.Thread()
self.assertFalse('daemon' in repr(t))
t.daemon = True
self.assertTrue('daemon' in repr(t))
def test_deamon_param(self):
t = threading.Thread()
self.assertFalse(t.daemon)
t = threading.Thread(daemon=False)
self.assertFalse(t.daemon)
t = threading.Thread(daemon=True)
self.assertTrue(t.daemon)
@unittest.skipUnless(hasattr(os, 'fork'), 'test needs fork()')
def test_dummy_thread_after_fork(self):
# Issue #14308: a dummy thread in the active list doesn't mess up
# the after-fork mechanism.
code = """if 1:
import _thread, threading, os, time
def background_thread(evt):
# Creates and registers the _DummyThread instance
threading.current_thread()
evt.set()
time.sleep(10)
evt = threading.Event()
_thread.start_new_thread(background_thread, (evt,))
evt.wait()
assert threading.active_count() == 2, threading.active_count()
if os.fork() == 0:
assert threading.active_count() == 1, threading.active_count()
os._exit(0)
else:
os.wait()
"""
_, out, err = assert_python_ok("-c", code)
self.assertEqual(out, b'')
self.assertEqual(err, b'')
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
def test_is_alive_after_fork(self):
# Try hard to trigger #18418: is_alive() could sometimes be True on
# threads that vanished after a fork.
old_interval = sys.getswitchinterval()
self.addCleanup(sys.setswitchinterval, old_interval)
# Make the bug more likely to manifest.
sys.setswitchinterval(1e-6)
for i in range(20):
t = threading.Thread(target=lambda: None)
t.start()
self.addCleanup(t.join)
pid = os.fork()
if pid == 0:
os._exit(1 if t.is_alive() else 0)
else:
pid, status = os.waitpid(pid, 0)
self.assertEqual(0, status)
def test_main_thread(self):
main = threading.main_thread()
self.assertEqual(main.name, 'MainThread')
self.assertEqual(main.ident, threading.current_thread().ident)
self.assertEqual(main.ident, threading.get_ident())
def f():
self.assertNotEqual(threading.main_thread().ident,
threading.current_thread().ident)
th = threading.Thread(target=f)
th.start()
th.join()
@unittest.skipUnless(hasattr(os, 'fork'), "test needs os.fork()")
@unittest.skipUnless(hasattr(os, 'waitpid'), "test needs os.waitpid()")
def test_main_thread_after_fork(self):
code = """if 1:
import os, threading
pid = os.fork()
if pid == 0:
main = threading.main_thread()
print(main.name)
print(main.ident == threading.current_thread().ident)
print(main.ident == threading.get_ident())
else:
os.waitpid(pid, 0)
"""
_, out, err = assert_python_ok("-c", code)
data = out.decode().replace('\r', '')
self.assertEqual(err, b"")
self.assertEqual(data, "MainThread\nTrue\nTrue\n")
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
@unittest.skipUnless(hasattr(os, 'fork'), "test needs os.fork()")
@unittest.skipUnless(hasattr(os, 'waitpid'), "test needs os.waitpid()")
def test_main_thread_after_fork_from_nonmain_thread(self):
code = """if 1:
import os, threading, sys
def f():
pid = os.fork()
if pid == 0:
main = threading.main_thread()
print(main.name)
print(main.ident == threading.current_thread().ident)
print(main.ident == threading.get_ident())
# stdout is fully buffered because not a tty,
# we have to flush before exit.
sys.stdout.flush()
else:
os.waitpid(pid, 0)
th = threading.Thread(target=f)
th.start()
th.join()
"""
_, out, err = assert_python_ok("-c", code)
data = out.decode().replace('\r', '')
self.assertEqual(err, b"")
self.assertEqual(data, "Thread-1\nTrue\nTrue\n")
def test_tstate_lock(self):
# Test an implementation detail of Thread objects.
started = _thread.allocate_lock()
finish = _thread.allocate_lock()
started.acquire()
finish.acquire()
def f():
started.release()
finish.acquire()
time.sleep(0.01)
# The tstate lock is None until the thread is started
t = threading.Thread(target=f)
self.assertIs(t._tstate_lock, None)
t.start()
started.acquire()
self.assertTrue(t.is_alive())
# The tstate lock can't be acquired when the thread is running
# (or suspended).
tstate_lock = t._tstate_lock
self.assertFalse(tstate_lock.acquire(timeout=0), False)
finish.release()
# When the thread ends, the state_lock can be successfully
# acquired.
self.assertTrue(tstate_lock.acquire(timeout=5), False)
# But is_alive() is still True: we hold _tstate_lock now, which
# prevents is_alive() from knowing the thread's end-of-life C code
# is done.
self.assertTrue(t.is_alive())
# Let is_alive() find out the C code is done.
tstate_lock.release()
self.assertFalse(t.is_alive())
# And verify the thread disposed of _tstate_lock.
self.assertTrue(t._tstate_lock is None)
def test_repr_stopped(self):
# Verify that "stopped" shows up in repr(Thread) appropriately.
started = _thread.allocate_lock()
finish = _thread.allocate_lock()
started.acquire()
finish.acquire()
def f():
started.release()
finish.acquire()
t = threading.Thread(target=f)
t.start()
started.acquire()
self.assertIn("started", repr(t))
finish.release()
# "stopped" should appear in the repr in a reasonable amount of time.
# Implementation detail: as of this writing, that's trivially true
# if .join() is called, and almost trivially true if .is_alive() is
# called. The detail we're testing here is that "stopped" shows up
# "all on its own".
LOOKING_FOR = "stopped"
for i in range(500):
if LOOKING_FOR in repr(t):
break
time.sleep(0.01)
self.assertIn(LOOKING_FOR, repr(t)) # we waited at least 5 seconds
def test_BoundedSemaphore_limit(self):
# BoundedSemaphore should raise ValueError if released too often.
for limit in range(1, 10):
bs = threading.BoundedSemaphore(limit)
threads = [threading.Thread(target=bs.acquire)
for _ in range(limit)]
for t in threads:
t.start()
for t in threads:
t.join()
threads = [threading.Thread(target=bs.release)
for _ in range(limit)]
for t in threads:
t.start()
for t in threads:
t.join()
self.assertRaises(ValueError, bs.release)
@cpython_only
def test_frame_tstate_tracing(self):
# Issue #14432: Crash when a generator is created in a C thread that is
# destroyed while the generator is still used. The issue was that a
# generator contains a frame, and the frame kept a reference to the
# Python state of the destroyed C thread. The crash occurs when a trace
# function is setup.
def noop_trace(frame, event, arg):
# no operation
return noop_trace
def generator():
while 1:
yield "genereator"
def callback():
if callback.gen is None:
callback.gen = generator()
return next(callback.gen)
callback.gen = None
old_trace = sys.gettrace()
sys.settrace(noop_trace)
try:
# Install a trace function
threading.settrace(noop_trace)
# Create a generator in a C thread which exits after the call
import _testcapi
_testcapi.call_in_temporary_c_thread(callback)
# Call the generator in a different Python thread, check that the
# generator didn't keep a reference to the destroyed thread state
for test in range(3):
# The trace function is still called here
callback()
finally:
sys.settrace(old_trace)
class ThreadJoinOnShutdown(BaseTestCase):
def _run_and_join(self, script):
script = """if 1:
import sys, os, time, threading
# a thread, which waits for the main program to terminate
def joiningfunc(mainthread):
mainthread.join()
print('end of thread')
# stdout is fully buffered because not a tty, we have to flush
# before exit.
sys.stdout.flush()
\n""" + script
rc, out, err = assert_python_ok("-c", script)
data = out.decode().replace('\r', '')
self.assertEqual(data, "end of main\nend of thread\n")
def test_1_join_on_shutdown(self):
# The usual case: on exit, wait for a non-daemon thread
script = """if 1:
import os
t = threading.Thread(target=joiningfunc,
args=(threading.current_thread(),))
t.start()
time.sleep(0.1)
print('end of main')
"""
self._run_and_join(script)
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_2_join_in_forked_process(self):
# Like the test above, but from a forked interpreter
script = """if 1:
childpid = os.fork()
if childpid != 0:
os.waitpid(childpid, 0)
sys.exit(0)
t = threading.Thread(target=joiningfunc,
args=(threading.current_thread(),))
t.start()
print('end of main')
"""
self._run_and_join(script)
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_3_join_in_forked_from_thread(self):
# Like the test above, but fork() was called from a worker thread
# In the forked process, the main Thread object must be marked as stopped.
script = """if 1:
main_thread = threading.current_thread()
def worker():
childpid = os.fork()
if childpid != 0:
os.waitpid(childpid, 0)
sys.exit(0)
t = threading.Thread(target=joiningfunc,
args=(main_thread,))
print('end of main')
t.start()
t.join() # Should not block: main_thread is already stopped
w = threading.Thread(target=worker)
w.start()
"""
self._run_and_join(script)
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_4_daemon_threads(self):
# Check that a daemon thread cannot crash the interpreter on shutdown
# by manipulating internal structures that are being disposed of in
# the main thread.
script = """if True:
import os
import random
import sys
import time
import threading
thread_has_run = set()
def random_io():
'''Loop for a while sleeping random tiny amounts and doing some I/O.'''
while True:
in_f = open(os.__file__, 'rb')
stuff = in_f.read(200)
null_f = open(os.devnull, 'wb')
null_f.write(stuff)
time.sleep(random.random() / 1995)
null_f.close()
in_f.close()
thread_has_run.add(threading.current_thread())
def main():
count = 0
for _ in range(40):
new_thread = threading.Thread(target=random_io)
new_thread.daemon = True
new_thread.start()
count += 1
while len(thread_has_run) < count:
time.sleep(0.001)
# Trigger process shutdown
sys.exit(0)
main()
"""
rc, out, err = assert_python_ok('-c', script)
self.assertFalse(err)
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_reinit_tls_after_fork(self):
# Issue #13817: fork() would deadlock in a multithreaded program with
# the ad-hoc TLS implementation.
def do_fork_and_wait():
# just fork a child process and wait it
pid = os.fork()
if pid > 0:
os.waitpid(pid, 0)
else:
os._exit(0)
# start a bunch of threads that will fork() child processes
threads = []
for i in range(16):
t = threading.Thread(target=do_fork_and_wait)
threads.append(t)
t.start()
for t in threads:
t.join()
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
def test_clear_threads_states_after_fork(self):
# Issue #17094: check that threads states are cleared after fork()
# start a bunch of threads
threads = []
for i in range(16):
t = threading.Thread(target=lambda : time.sleep(0.3))
threads.append(t)
t.start()
pid = os.fork()
if pid == 0:
# check that threads states have been cleared
if len(sys._current_frames()) == 1:
os._exit(0)
else:
os._exit(1)
else:
_, status = os.waitpid(pid, 0)
self.assertEqual(0, status)
for t in threads:
t.join()
class SubinterpThreadingTests(BaseTestCase):
def test_threads_join(self):
# Non-daemon threads should be joined at subinterpreter shutdown
# (issue #18808)
r, w = os.pipe()
self.addCleanup(os.close, r)
self.addCleanup(os.close, w)
code = r"""if 1:
import os
import threading
import time
def f():
# Sleep a bit so that the thread is still running when
# Py_EndInterpreter is called.
time.sleep(0.05)
os.write(%d, b"x")
threading.Thread(target=f).start()
""" % (w,)
ret = test.support.run_in_subinterp(code)
self.assertEqual(ret, 0)
# The thread was joined properly.
self.assertEqual(os.read(r, 1), b"x")
def test_threads_join_2(self):
# Same as above, but a delay gets introduced after the thread's
# Python code returned but before the thread state is deleted.
# To achieve this, we register a thread-local object which sleeps
# a bit when deallocated.
r, w = os.pipe()
self.addCleanup(os.close, r)
self.addCleanup(os.close, w)
code = r"""if 1:
import os
import threading
import time
class Sleeper:
def __del__(self):
time.sleep(0.05)
tls = threading.local()
def f():
# Sleep a bit so that the thread is still running when
# Py_EndInterpreter is called.
time.sleep(0.05)
tls.x = Sleeper()
os.write(%d, b"x")
threading.Thread(target=f).start()
""" % (w,)
ret = test.support.run_in_subinterp(code)
self.assertEqual(ret, 0)
# The thread was joined properly.
self.assertEqual(os.read(r, 1), b"x")
@cpython_only
def test_daemon_threads_fatal_error(self):
subinterp_code = r"""if 1:
import os
import threading
import time
def f():
# Make sure the daemon thread is still running when
# Py_EndInterpreter is called.
time.sleep(10)
threading.Thread(target=f, daemon=True).start()
"""
script = r"""if 1:
import _testcapi
_testcapi.run_in_subinterp(%r)
""" % (subinterp_code,)
with test.support.SuppressCrashReport():
rc, out, err = assert_python_failure("-c", script)
self.assertIn("Fatal Python error: Py_EndInterpreter: "
"not the last thread", err.decode())
class ThreadingExceptionTests(BaseTestCase):
# A RuntimeError should be raised if Thread.start() is called
# multiple times.
def test_start_thread_again(self):
thread = threading.Thread()
thread.start()
self.assertRaises(RuntimeError, thread.start)
def test_joining_current_thread(self):
current_thread = threading.current_thread()
self.assertRaises(RuntimeError, current_thread.join);
def test_joining_inactive_thread(self):
thread = threading.Thread()
self.assertRaises(RuntimeError, thread.join)
def test_daemonize_active_thread(self):
thread = threading.Thread()
thread.start()
self.assertRaises(RuntimeError, setattr, thread, "daemon", True)
def test_releasing_unacquired_lock(self):
lock = threading.Lock()
self.assertRaises(RuntimeError, lock.release)
@unittest.skipUnless(sys.platform == 'darwin' and test.support.python_is_optimized(),
'test macosx problem')
def test_recursion_limit(self):
# Issue 9670
# test that excessive recursion within a non-main thread causes
# an exception rather than crashing the interpreter on platforms
# like Mac OS X or FreeBSD which have small default stack sizes
# for threads
script = """if True:
import threading
def recurse():
return recurse()
def outer():
try:
recurse()
except RuntimeError:
pass
w = threading.Thread(target=outer)
w.start()
w.join()
print('end of main thread')
"""
expected_output = "end of main thread\n"
p = subprocess.Popen([sys.executable, "-c", script],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
data = stdout.decode().replace('\r', '')
self.assertEqual(p.returncode, 0, "Unexpected error: " + stderr.decode())
self.assertEqual(data, expected_output)
def test_print_exception(self):
script = r"""if True:
import threading
import time
running = False
def run():
global running
running = True
while running:
time.sleep(0.01)
1/0
t = threading.Thread(target=run)
t.start()
while not running:
time.sleep(0.01)
running = False
t.join()
"""
rc, out, err = assert_python_ok("-c", script)
self.assertEqual(out, b'')
err = err.decode()
self.assertIn("Exception in thread", err)
self.assertIn("Traceback (most recent call last):", err)
self.assertIn("ZeroDivisionError", err)
self.assertNotIn("Unhandled exception", err)
def test_print_exception_stderr_is_none_1(self):
script = r"""if True:
import sys
import threading
import time
running = False
def run():
global running
running = True
while running:
time.sleep(0.01)
1/0
t = threading.Thread(target=run)
t.start()
while not running:
time.sleep(0.01)
sys.stderr = None
running = False
t.join()
"""
rc, out, err = assert_python_ok("-c", script)
self.assertEqual(out, b'')
err = err.decode()
self.assertIn("Exception in thread", err)
self.assertIn("Traceback (most recent call last):", err)
self.assertIn("ZeroDivisionError", err)
self.assertNotIn("Unhandled exception", err)
def test_print_exception_stderr_is_none_2(self):
script = r"""if True:
import sys
import threading
import time
running = False
def run():
global running
running = True
while running:
time.sleep(0.01)
1/0
sys.stderr = None
t = threading.Thread(target=run)
t.start()
while not running:
time.sleep(0.01)
running = False
t.join()
"""
rc, out, err = assert_python_ok("-c", script)
self.assertEqual(out, b'')
self.assertNotIn("Unhandled exception", err.decode())
class TimerTests(BaseTestCase):
def setUp(self):
BaseTestCase.setUp(self)
self.callback_args = []
self.callback_event = threading.Event()
def test_init_immutable_default_args(self):
# Issue 17435: constructor defaults were mutable objects, they could be
# mutated via the object attributes and affect other Timer objects.
timer1 = threading.Timer(0.01, self._callback_spy)
timer1.start()
self.callback_event.wait()
timer1.args.append("blah")
timer1.kwargs["foo"] = "bar"
self.callback_event.clear()
timer2 = threading.Timer(0.01, self._callback_spy)
timer2.start()
self.callback_event.wait()
self.assertEqual(len(self.callback_args), 2)
self.assertEqual(self.callback_args, [((), {}), ((), {})])
def _callback_spy(self, *args, **kwargs):
self.callback_args.append((args[:], kwargs.copy()))
self.callback_event.set()
class LockTests(lock_tests.LockTests):
locktype = staticmethod(threading.Lock)
class PyRLockTests(lock_tests.RLockTests):
locktype = staticmethod(threading._PyRLock)
@unittest.skipIf(threading._CRLock is None, 'RLock not implemented in C')
class CRLockTests(lock_tests.RLockTests):
locktype = staticmethod(threading._CRLock)
class EventTests(lock_tests.EventTests):
eventtype = staticmethod(threading.Event)
class ConditionAsRLockTests(lock_tests.RLockTests):
# An Condition uses an RLock by default and exports its API.
locktype = staticmethod(threading.Condition)
class ConditionTests(lock_tests.ConditionTests):
condtype = staticmethod(threading.Condition)
class SemaphoreTests(lock_tests.SemaphoreTests):
semtype = staticmethod(threading.Semaphore)
class BoundedSemaphoreTests(lock_tests.BoundedSemaphoreTests):
semtype = staticmethod(threading.BoundedSemaphore)
class BarrierTests(lock_tests.BarrierTests):
barriertype = staticmethod(threading.Barrier)
if __name__ == "__main__":
unittest.main()
=======
"""
Tests for the threading module.
"""
import test.support
from test.support import verbose, strip_python_stderr, import_module, cpython_only
from test.script_helper import assert_python_ok, assert_python_failure
import random
import re
import sys
_thread = import_module('_thread')
threading = import_module('threading')
import time
import unittest
import weakref
import os
import subprocess
from test import lock_tests
# Between fork() and exec(), only async-safe functions are allowed (issues
# #12316 and #11870), and fork() from a worker thread is known to trigger
# problems with some operating systems (issue #3863): skip problematic tests
# on platforms known to behave badly.
platforms_to_skip = ('freebsd4', 'freebsd5', 'freebsd6', 'netbsd5',
'hp-ux11')
# A trivial mutable counter.
class Counter(object):
def __init__(self):
self.value = 0
def inc(self):
self.value += 1
def dec(self):
self.value -= 1
def get(self):
return self.value
class TestThread(threading.Thread):
def __init__(self, name, testcase, sema, mutex, nrunning):
threading.Thread.__init__(self, name=name)
self.testcase = testcase
self.sema = sema
self.mutex = mutex
self.nrunning = nrunning
def run(self):
delay = random.random() / 10000.0
if verbose:
print('task %s will run for %.1f usec' %
(self.name, delay * 1e6))
with self.sema:
with self.mutex:
self.nrunning.inc()
if verbose:
print(self.nrunning.get(), 'tasks are running')
self.testcase.assertTrue(self.nrunning.get() <= 3)
time.sleep(delay)
if verbose:
print('task', self.name, 'done')
with self.mutex:
self.nrunning.dec()
self.testcase.assertTrue(self.nrunning.get() >= 0)
if verbose:
print('%s is finished. %d tasks are running' %
(self.name, self.nrunning.get()))
class BaseTestCase(unittest.TestCase):
def setUp(self):
self._threads = test.support.threading_setup()
def tearDown(self):
test.support.threading_cleanup(*self._threads)
test.support.reap_children()
class ThreadTests(BaseTestCase):
# Create a bunch of threads, let each do some work, wait until all are
# done.
def test_various_ops(self):
# This takes about n/3 seconds to run (about n/3 clumps of tasks,
# times about 1 second per clump).
NUMTASKS = 10
# no more than 3 of the 10 can run at once
sema = threading.BoundedSemaphore(value=3)
mutex = threading.RLock()
numrunning = Counter()
threads = []
for i in range(NUMTASKS):
t = TestThread("<thread %d>"%i, self, sema, mutex, numrunning)
threads.append(t)
self.assertEqual(t.ident, None)
self.assertTrue(re.match('<TestThread\(.*, initial\)>', repr(t)))
t.start()
if verbose:
print('waiting for all tasks to complete')
for t in threads:
t.join()
self.assertTrue(not t.is_alive())
self.assertNotEqual(t.ident, 0)
self.assertFalse(t.ident is None)
self.assertTrue(re.match('<TestThread\(.*, stopped -?\d+\)>',
repr(t)))
if verbose:
print('all tasks done')
self.assertEqual(numrunning.get(), 0)
def test_ident_of_no_threading_threads(self):
# The ident still must work for the main thread and dummy threads.
self.assertFalse(threading.currentThread().ident is None)
def f():
ident.append(threading.currentThread().ident)
done.set()
done = threading.Event()
ident = []
_thread.start_new_thread(f, ())
done.wait()
self.assertFalse(ident[0] is None)
# Kill the "immortal" _DummyThread
del threading._active[ident[0]]
# run with a small(ish) thread stack size (256kB)
def test_various_ops_small_stack(self):
if verbose:
print('with 256kB thread stack size...')
try:
threading.stack_size(262144)
except _thread.error:
raise unittest.SkipTest(
'platform does not support changing thread stack size')
self.test_various_ops()
threading.stack_size(0)
# run with a large thread stack size (1MB)
def test_various_ops_large_stack(self):
if verbose:
print('with 1MB thread stack size...')
try:
threading.stack_size(0x100000)
except _thread.error:
raise unittest.SkipTest(
'platform does not support changing thread stack size')
self.test_various_ops()
threading.stack_size(0)
def test_foreign_thread(self):
# Check that a "foreign" thread can use the threading module.
def f(mutex):
# Calling current_thread() forces an entry for the foreign
# thread to get made in the threading._active map.
threading.current_thread()
mutex.release()
mutex = threading.Lock()
mutex.acquire()
tid = _thread.start_new_thread(f, (mutex,))
# Wait for the thread to finish.
mutex.acquire()
self.assertIn(tid, threading._active)
self.assertIsInstance(threading._active[tid], threading._DummyThread)
del threading._active[tid]
# PyThreadState_SetAsyncExc() is a CPython-only gimmick, not (currently)
# exposed at the Python level. This test relies on ctypes to get at it.
def test_PyThreadState_SetAsyncExc(self):
ctypes = import_module("ctypes")
set_async_exc = ctypes.pythonapi.PyThreadState_SetAsyncExc
class AsyncExc(Exception):
pass
exception = ctypes.py_object(AsyncExc)
# First check it works when setting the exception from the same thread.
tid = threading.get_ident()
try:
result = set_async_exc(ctypes.c_long(tid), exception)
# The exception is async, so we might have to keep the VM busy until
# it notices.
while True:
pass
except AsyncExc:
pass
else:
# This code is unreachable but it reflects the intent. If we wanted
# to be smarter the above loop wouldn't be infinite.
self.fail("AsyncExc not raised")
try:
self.assertEqual(result, 1) # one thread state modified
except UnboundLocalError:
# The exception was raised too quickly for us to get the result.
pass
# `worker_started` is set by the thread when it's inside a try/except
# block waiting to catch the asynchronously set AsyncExc exception.
# `worker_saw_exception` is set by the thread upon catching that
# exception.
worker_started = threading.Event()
worker_saw_exception = threading.Event()
class Worker(threading.Thread):
def run(self):
self.id = threading.get_ident()
self.finished = False
try:
while True:
worker_started.set()
time.sleep(0.1)
except AsyncExc:
self.finished = True
worker_saw_exception.set()
t = Worker()
t.daemon = True # so if this fails, we don't hang Python at shutdown
t.start()
if verbose:
print(" started worker thread")
# Try a thread id that doesn't make sense.
if verbose:
print(" trying nonsensical thread id")
result = set_async_exc(ctypes.c_long(-1), exception)
self.assertEqual(result, 0) # no thread states modified
# Now raise an exception in the worker thread.
if verbose:
print(" waiting for worker thread to get started")
ret = worker_started.wait()
self.assertTrue(ret)
if verbose:
print(" verifying worker hasn't exited")
self.assertTrue(not t.finished)
if verbose:
print(" attempting to raise asynch exception in worker")
result = set_async_exc(ctypes.c_long(t.id), exception)
self.assertEqual(result, 1) # one thread state modified
if verbose:
print(" waiting for worker to say it caught the exception")
worker_saw_exception.wait(timeout=10)
self.assertTrue(t.finished)
if verbose:
print(" all OK -- joining worker")
if t.finished:
t.join()
# else the thread is still running, and we have no way to kill it
def test_limbo_cleanup(self):
# Issue 7481: Failure to start thread should cleanup the limbo map.
def fail_new_thread(*args):
raise threading.ThreadError()
_start_new_thread = threading._start_new_thread
threading._start_new_thread = fail_new_thread
try:
t = threading.Thread(target=lambda: None)
self.assertRaises(threading.ThreadError, t.start)
self.assertFalse(
t in threading._limbo,
"Failed to cleanup _limbo map on failure of Thread.start().")
finally:
threading._start_new_thread = _start_new_thread
def test_finalize_runnning_thread(self):
# Issue 1402: the PyGILState_Ensure / _Release functions may be called
# very late on python exit: on deallocation of a running thread for
# example.
import_module("ctypes")
rc, out, err = assert_python_failure("-c", """if 1:
import ctypes, sys, time, _thread
# This lock is used as a simple event variable.
ready = _thread.allocate_lock()
ready.acquire()
# Module globals are cleared before __del__ is run
# So we save the functions in class dict
class C:
ensure = ctypes.pythonapi.PyGILState_Ensure
release = ctypes.pythonapi.PyGILState_Release
def __del__(self):
state = self.ensure()
self.release(state)
def waitingThread():
x = C()
ready.release()
time.sleep(100)
_thread.start_new_thread(waitingThread, ())
ready.acquire() # Be sure the other thread is waiting.
sys.exit(42)
""")
self.assertEqual(rc, 42)
def test_finalize_with_trace(self):
# Issue1733757
# Avoid a deadlock when sys.settrace steps into threading._shutdown
assert_python_ok("-c", """if 1:
import sys, threading
# A deadlock-killer, to prevent the
# testsuite to hang forever
def killer():
import os, time
time.sleep(2)
print('program blocked; aborting')
os._exit(2)
t = threading.Thread(target=killer)
t.daemon = True
t.start()
# This is the trace function
def func(frame, event, arg):
threading.current_thread()
return func
sys.settrace(func)
""")
def test_join_nondaemon_on_shutdown(self):
# Issue 1722344
# Raising SystemExit skipped threading._shutdown
rc, out, err = assert_python_ok("-c", """if 1:
import threading
from time import sleep
def child():
sleep(1)
# As a non-daemon thread we SHOULD wake up and nothing
# should be torn down yet
print("Woke up, sleep function is:", sleep)
threading.Thread(target=child).start()
raise SystemExit
""")
self.assertEqual(out.strip(),
b"Woke up, sleep function is: <built-in function sleep>")
self.assertEqual(err, b"")
def test_enumerate_after_join(self):
# Try hard to trigger #1703448: a thread is still returned in
# threading.enumerate() after it has been join()ed.
enum = threading.enumerate
old_interval = sys.getswitchinterval()
try:
for i in range(1, 100):
sys.setswitchinterval(i * 0.0002)
t = threading.Thread(target=lambda: None)
t.start()
t.join()
l = enum()
self.assertNotIn(t, l,
"#1703448 triggered after %d trials: %s" % (i, l))
finally:
sys.setswitchinterval(old_interval)
def test_no_refcycle_through_target(self):
class RunSelfFunction(object):
def __init__(self, should_raise):
# The links in this refcycle from Thread back to self
# should be cleaned up when the thread completes.
self.should_raise = should_raise
self.thread = threading.Thread(target=self._run,
args=(self,),
kwargs={'yet_another':self})
self.thread.start()
def _run(self, other_ref, yet_another):
if self.should_raise:
raise SystemExit
cyclic_object = RunSelfFunction(should_raise=False)
weak_cyclic_object = weakref.ref(cyclic_object)
cyclic_object.thread.join()
del cyclic_object
self.assertIsNone(weak_cyclic_object(),
msg=('%d references still around' %
sys.getrefcount(weak_cyclic_object())))
raising_cyclic_object = RunSelfFunction(should_raise=True)
weak_raising_cyclic_object = weakref.ref(raising_cyclic_object)
raising_cyclic_object.thread.join()
del raising_cyclic_object
self.assertIsNone(weak_raising_cyclic_object(),
msg=('%d references still around' %
sys.getrefcount(weak_raising_cyclic_object())))
def test_old_threading_api(self):
# Just a quick sanity check to make sure the old method names are
# still present
t = threading.Thread()
t.isDaemon()
t.setDaemon(True)
t.getName()
t.setName("name")
t.isAlive()
e = threading.Event()
e.isSet()
threading.activeCount()
def test_repr_daemon(self):
t = threading.Thread()
self.assertFalse('daemon' in repr(t))
t.daemon = True
self.assertTrue('daemon' in repr(t))
def test_deamon_param(self):
t = threading.Thread()
self.assertFalse(t.daemon)
t = threading.Thread(daemon=False)
self.assertFalse(t.daemon)
t = threading.Thread(daemon=True)
self.assertTrue(t.daemon)
@unittest.skipUnless(hasattr(os, 'fork'), 'test needs fork()')
def test_dummy_thread_after_fork(self):
# Issue #14308: a dummy thread in the active list doesn't mess up
# the after-fork mechanism.
code = """if 1:
import _thread, threading, os, time
def background_thread(evt):
# Creates and registers the _DummyThread instance
threading.current_thread()
evt.set()
time.sleep(10)
evt = threading.Event()
_thread.start_new_thread(background_thread, (evt,))
evt.wait()
assert threading.active_count() == 2, threading.active_count()
if os.fork() == 0:
assert threading.active_count() == 1, threading.active_count()
os._exit(0)
else:
os.wait()
"""
_, out, err = assert_python_ok("-c", code)
self.assertEqual(out, b'')
self.assertEqual(err, b'')
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
def test_is_alive_after_fork(self):
# Try hard to trigger #18418: is_alive() could sometimes be True on
# threads that vanished after a fork.
old_interval = sys.getswitchinterval()
self.addCleanup(sys.setswitchinterval, old_interval)
# Make the bug more likely to manifest.
sys.setswitchinterval(1e-6)
for i in range(20):
t = threading.Thread(target=lambda: None)
t.start()
self.addCleanup(t.join)
pid = os.fork()
if pid == 0:
os._exit(1 if t.is_alive() else 0)
else:
pid, status = os.waitpid(pid, 0)
self.assertEqual(0, status)
def test_main_thread(self):
main = threading.main_thread()
self.assertEqual(main.name, 'MainThread')
self.assertEqual(main.ident, threading.current_thread().ident)
self.assertEqual(main.ident, threading.get_ident())
def f():
self.assertNotEqual(threading.main_thread().ident,
threading.current_thread().ident)
th = threading.Thread(target=f)
th.start()
th.join()
@unittest.skipUnless(hasattr(os, 'fork'), "test needs os.fork()")
@unittest.skipUnless(hasattr(os, 'waitpid'), "test needs os.waitpid()")
def test_main_thread_after_fork(self):
code = """if 1:
import os, threading
pid = os.fork()
if pid == 0:
main = threading.main_thread()
print(main.name)
print(main.ident == threading.current_thread().ident)
print(main.ident == threading.get_ident())
else:
os.waitpid(pid, 0)
"""
_, out, err = assert_python_ok("-c", code)
data = out.decode().replace('\r', '')
self.assertEqual(err, b"")
self.assertEqual(data, "MainThread\nTrue\nTrue\n")
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
@unittest.skipUnless(hasattr(os, 'fork'), "test needs os.fork()")
@unittest.skipUnless(hasattr(os, 'waitpid'), "test needs os.waitpid()")
def test_main_thread_after_fork_from_nonmain_thread(self):
code = """if 1:
import os, threading, sys
def f():
pid = os.fork()
if pid == 0:
main = threading.main_thread()
print(main.name)
print(main.ident == threading.current_thread().ident)
print(main.ident == threading.get_ident())
# stdout is fully buffered because not a tty,
# we have to flush before exit.
sys.stdout.flush()
else:
os.waitpid(pid, 0)
th = threading.Thread(target=f)
th.start()
th.join()
"""
_, out, err = assert_python_ok("-c", code)
data = out.decode().replace('\r', '')
self.assertEqual(err, b"")
self.assertEqual(data, "Thread-1\nTrue\nTrue\n")
def test_tstate_lock(self):
# Test an implementation detail of Thread objects.
started = _thread.allocate_lock()
finish = _thread.allocate_lock()
started.acquire()
finish.acquire()
def f():
started.release()
finish.acquire()
time.sleep(0.01)
# The tstate lock is None until the thread is started
t = threading.Thread(target=f)
self.assertIs(t._tstate_lock, None)
t.start()
started.acquire()
self.assertTrue(t.is_alive())
# The tstate lock can't be acquired when the thread is running
# (or suspended).
tstate_lock = t._tstate_lock
self.assertFalse(tstate_lock.acquire(timeout=0), False)
finish.release()
# When the thread ends, the state_lock can be successfully
# acquired.
self.assertTrue(tstate_lock.acquire(timeout=5), False)
# But is_alive() is still True: we hold _tstate_lock now, which
# prevents is_alive() from knowing the thread's end-of-life C code
# is done.
self.assertTrue(t.is_alive())
# Let is_alive() find out the C code is done.
tstate_lock.release()
self.assertFalse(t.is_alive())
# And verify the thread disposed of _tstate_lock.
self.assertTrue(t._tstate_lock is None)
def test_repr_stopped(self):
# Verify that "stopped" shows up in repr(Thread) appropriately.
started = _thread.allocate_lock()
finish = _thread.allocate_lock()
started.acquire()
finish.acquire()
def f():
started.release()
finish.acquire()
t = threading.Thread(target=f)
t.start()
started.acquire()
self.assertIn("started", repr(t))
finish.release()
# "stopped" should appear in the repr in a reasonable amount of time.
# Implementation detail: as of this writing, that's trivially true
# if .join() is called, and almost trivially true if .is_alive() is
# called. The detail we're testing here is that "stopped" shows up
# "all on its own".
LOOKING_FOR = "stopped"
for i in range(500):
if LOOKING_FOR in repr(t):
break
time.sleep(0.01)
self.assertIn(LOOKING_FOR, repr(t)) # we waited at least 5 seconds
def test_BoundedSemaphore_limit(self):
# BoundedSemaphore should raise ValueError if released too often.
for limit in range(1, 10):
bs = threading.BoundedSemaphore(limit)
threads = [threading.Thread(target=bs.acquire)
for _ in range(limit)]
for t in threads:
t.start()
for t in threads:
t.join()
threads = [threading.Thread(target=bs.release)
for _ in range(limit)]
for t in threads:
t.start()
for t in threads:
t.join()
self.assertRaises(ValueError, bs.release)
@cpython_only
def test_frame_tstate_tracing(self):
# Issue #14432: Crash when a generator is created in a C thread that is
# destroyed while the generator is still used. The issue was that a
# generator contains a frame, and the frame kept a reference to the
# Python state of the destroyed C thread. The crash occurs when a trace
# function is setup.
def noop_trace(frame, event, arg):
# no operation
return noop_trace
def generator():
while 1:
yield "genereator"
def callback():
if callback.gen is None:
callback.gen = generator()
return next(callback.gen)
callback.gen = None
old_trace = sys.gettrace()
sys.settrace(noop_trace)
try:
# Install a trace function
threading.settrace(noop_trace)
# Create a generator in a C thread which exits after the call
import _testcapi
_testcapi.call_in_temporary_c_thread(callback)
# Call the generator in a different Python thread, check that the
# generator didn't keep a reference to the destroyed thread state
for test in range(3):
# The trace function is still called here
callback()
finally:
sys.settrace(old_trace)
class ThreadJoinOnShutdown(BaseTestCase):
def _run_and_join(self, script):
script = """if 1:
import sys, os, time, threading
# a thread, which waits for the main program to terminate
def joiningfunc(mainthread):
mainthread.join()
print('end of thread')
# stdout is fully buffered because not a tty, we have to flush
# before exit.
sys.stdout.flush()
\n""" + script
rc, out, err = assert_python_ok("-c", script)
data = out.decode().replace('\r', '')
self.assertEqual(data, "end of main\nend of thread\n")
def test_1_join_on_shutdown(self):
# The usual case: on exit, wait for a non-daemon thread
script = """if 1:
import os
t = threading.Thread(target=joiningfunc,
args=(threading.current_thread(),))
t.start()
time.sleep(0.1)
print('end of main')
"""
self._run_and_join(script)
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_2_join_in_forked_process(self):
# Like the test above, but from a forked interpreter
script = """if 1:
childpid = os.fork()
if childpid != 0:
os.waitpid(childpid, 0)
sys.exit(0)
t = threading.Thread(target=joiningfunc,
args=(threading.current_thread(),))
t.start()
print('end of main')
"""
self._run_and_join(script)
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_3_join_in_forked_from_thread(self):
# Like the test above, but fork() was called from a worker thread
# In the forked process, the main Thread object must be marked as stopped.
script = """if 1:
main_thread = threading.current_thread()
def worker():
childpid = os.fork()
if childpid != 0:
os.waitpid(childpid, 0)
sys.exit(0)
t = threading.Thread(target=joiningfunc,
args=(main_thread,))
print('end of main')
t.start()
t.join() # Should not block: main_thread is already stopped
w = threading.Thread(target=worker)
w.start()
"""
self._run_and_join(script)
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_4_daemon_threads(self):
# Check that a daemon thread cannot crash the interpreter on shutdown
# by manipulating internal structures that are being disposed of in
# the main thread.
script = """if True:
import os
import random
import sys
import time
import threading
thread_has_run = set()
def random_io():
'''Loop for a while sleeping random tiny amounts and doing some I/O.'''
while True:
in_f = open(os.__file__, 'rb')
stuff = in_f.read(200)
null_f = open(os.devnull, 'wb')
null_f.write(stuff)
time.sleep(random.random() / 1995)
null_f.close()
in_f.close()
thread_has_run.add(threading.current_thread())
def main():
count = 0
for _ in range(40):
new_thread = threading.Thread(target=random_io)
new_thread.daemon = True
new_thread.start()
count += 1
while len(thread_has_run) < count:
time.sleep(0.001)
# Trigger process shutdown
sys.exit(0)
main()
"""
rc, out, err = assert_python_ok('-c', script)
self.assertFalse(err)
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_reinit_tls_after_fork(self):
# Issue #13817: fork() would deadlock in a multithreaded program with
# the ad-hoc TLS implementation.
def do_fork_and_wait():
# just fork a child process and wait it
pid = os.fork()
if pid > 0:
os.waitpid(pid, 0)
else:
os._exit(0)
# start a bunch of threads that will fork() child processes
threads = []
for i in range(16):
t = threading.Thread(target=do_fork_and_wait)
threads.append(t)
t.start()
for t in threads:
t.join()
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
def test_clear_threads_states_after_fork(self):
# Issue #17094: check that threads states are cleared after fork()
# start a bunch of threads
threads = []
for i in range(16):
t = threading.Thread(target=lambda : time.sleep(0.3))
threads.append(t)
t.start()
pid = os.fork()
if pid == 0:
# check that threads states have been cleared
if len(sys._current_frames()) == 1:
os._exit(0)
else:
os._exit(1)
else:
_, status = os.waitpid(pid, 0)
self.assertEqual(0, status)
for t in threads:
t.join()
class SubinterpThreadingTests(BaseTestCase):
def test_threads_join(self):
# Non-daemon threads should be joined at subinterpreter shutdown
# (issue #18808)
r, w = os.pipe()
self.addCleanup(os.close, r)
self.addCleanup(os.close, w)
code = r"""if 1:
import os
import threading
import time
def f():
# Sleep a bit so that the thread is still running when
# Py_EndInterpreter is called.
time.sleep(0.05)
os.write(%d, b"x")
threading.Thread(target=f).start()
""" % (w,)
ret = test.support.run_in_subinterp(code)
self.assertEqual(ret, 0)
# The thread was joined properly.
self.assertEqual(os.read(r, 1), b"x")
def test_threads_join_2(self):
# Same as above, but a delay gets introduced after the thread's
# Python code returned but before the thread state is deleted.
# To achieve this, we register a thread-local object which sleeps
# a bit when deallocated.
r, w = os.pipe()
self.addCleanup(os.close, r)
self.addCleanup(os.close, w)
code = r"""if 1:
import os
import threading
import time
class Sleeper:
def __del__(self):
time.sleep(0.05)
tls = threading.local()
def f():
# Sleep a bit so that the thread is still running when
# Py_EndInterpreter is called.
time.sleep(0.05)
tls.x = Sleeper()
os.write(%d, b"x")
threading.Thread(target=f).start()
""" % (w,)
ret = test.support.run_in_subinterp(code)
self.assertEqual(ret, 0)
# The thread was joined properly.
self.assertEqual(os.read(r, 1), b"x")
@cpython_only
def test_daemon_threads_fatal_error(self):
subinterp_code = r"""if 1:
import os
import threading
import time
def f():
# Make sure the daemon thread is still running when
# Py_EndInterpreter is called.
time.sleep(10)
threading.Thread(target=f, daemon=True).start()
"""
script = r"""if 1:
import _testcapi
_testcapi.run_in_subinterp(%r)
""" % (subinterp_code,)
with test.support.SuppressCrashReport():
rc, out, err = assert_python_failure("-c", script)
self.assertIn("Fatal Python error: Py_EndInterpreter: "
"not the last thread", err.decode())
class ThreadingExceptionTests(BaseTestCase):
# A RuntimeError should be raised if Thread.start() is called
# multiple times.
def test_start_thread_again(self):
thread = threading.Thread()
thread.start()
self.assertRaises(RuntimeError, thread.start)
def test_joining_current_thread(self):
current_thread = threading.current_thread()
self.assertRaises(RuntimeError, current_thread.join);
def test_joining_inactive_thread(self):
thread = threading.Thread()
self.assertRaises(RuntimeError, thread.join)
def test_daemonize_active_thread(self):
thread = threading.Thread()
thread.start()
self.assertRaises(RuntimeError, setattr, thread, "daemon", True)
def test_releasing_unacquired_lock(self):
lock = threading.Lock()
self.assertRaises(RuntimeError, lock.release)
@unittest.skipUnless(sys.platform == 'darwin' and test.support.python_is_optimized(),
'test macosx problem')
def test_recursion_limit(self):
# Issue 9670
# test that excessive recursion within a non-main thread causes
# an exception rather than crashing the interpreter on platforms
# like Mac OS X or FreeBSD which have small default stack sizes
# for threads
script = """if True:
import threading
def recurse():
return recurse()
def outer():
try:
recurse()
except RuntimeError:
pass
w = threading.Thread(target=outer)
w.start()
w.join()
print('end of main thread')
"""
expected_output = "end of main thread\n"
p = subprocess.Popen([sys.executable, "-c", script],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
data = stdout.decode().replace('\r', '')
self.assertEqual(p.returncode, 0, "Unexpected error: " + stderr.decode())
self.assertEqual(data, expected_output)
def test_print_exception(self):
script = r"""if True:
import threading
import time
running = False
def run():
global running
running = True
while running:
time.sleep(0.01)
1/0
t = threading.Thread(target=run)
t.start()
while not running:
time.sleep(0.01)
running = False
t.join()
"""
rc, out, err = assert_python_ok("-c", script)
self.assertEqual(out, b'')
err = err.decode()
self.assertIn("Exception in thread", err)
self.assertIn("Traceback (most recent call last):", err)
self.assertIn("ZeroDivisionError", err)
self.assertNotIn("Unhandled exception", err)
def test_print_exception_stderr_is_none_1(self):
script = r"""if True:
import sys
import threading
import time
running = False
def run():
global running
running = True
while running:
time.sleep(0.01)
1/0
t = threading.Thread(target=run)
t.start()
while not running:
time.sleep(0.01)
sys.stderr = None
running = False
t.join()
"""
rc, out, err = assert_python_ok("-c", script)
self.assertEqual(out, b'')
err = err.decode()
self.assertIn("Exception in thread", err)
self.assertIn("Traceback (most recent call last):", err)
self.assertIn("ZeroDivisionError", err)
self.assertNotIn("Unhandled exception", err)
def test_print_exception_stderr_is_none_2(self):
script = r"""if True:
import sys
import threading
import time
running = False
def run():
global running
running = True
while running:
time.sleep(0.01)
1/0
sys.stderr = None
t = threading.Thread(target=run)
t.start()
while not running:
time.sleep(0.01)
running = False
t.join()
"""
rc, out, err = assert_python_ok("-c", script)
self.assertEqual(out, b'')
self.assertNotIn("Unhandled exception", err.decode())
class TimerTests(BaseTestCase):
def setUp(self):
BaseTestCase.setUp(self)
self.callback_args = []
self.callback_event = threading.Event()
def test_init_immutable_default_args(self):
# Issue 17435: constructor defaults were mutable objects, they could be
# mutated via the object attributes and affect other Timer objects.
timer1 = threading.Timer(0.01, self._callback_spy)
timer1.start()
self.callback_event.wait()
timer1.args.append("blah")
timer1.kwargs["foo"] = "bar"
self.callback_event.clear()
timer2 = threading.Timer(0.01, self._callback_spy)
timer2.start()
self.callback_event.wait()
self.assertEqual(len(self.callback_args), 2)
self.assertEqual(self.callback_args, [((), {}), ((), {})])
def _callback_spy(self, *args, **kwargs):
self.callback_args.append((args[:], kwargs.copy()))
self.callback_event.set()
class LockTests(lock_tests.LockTests):
locktype = staticmethod(threading.Lock)
class PyRLockTests(lock_tests.RLockTests):
locktype = staticmethod(threading._PyRLock)
@unittest.skipIf(threading._CRLock is None, 'RLock not implemented in C')
class CRLockTests(lock_tests.RLockTests):
locktype = staticmethod(threading._CRLock)
class EventTests(lock_tests.EventTests):
eventtype = staticmethod(threading.Event)
class ConditionAsRLockTests(lock_tests.RLockTests):
# An Condition uses an RLock by default and exports its API.
locktype = staticmethod(threading.Condition)
class ConditionTests(lock_tests.ConditionTests):
condtype = staticmethod(threading.Condition)
class SemaphoreTests(lock_tests.SemaphoreTests):
semtype = staticmethod(threading.Semaphore)
class BoundedSemaphoreTests(lock_tests.BoundedSemaphoreTests):
semtype = staticmethod(threading.BoundedSemaphore)
class BarrierTests(lock_tests.BarrierTests):
barriertype = staticmethod(threading.Barrier)
if __name__ == "__main__":
unittest.main()
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
=======
"""
Tests for the threading module.
"""
import test.support
from test.support import verbose, strip_python_stderr, import_module, cpython_only
from test.script_helper import assert_python_ok, assert_python_failure
import random
import re
import sys
_thread = import_module('_thread')
threading = import_module('threading')
import time
import unittest
import weakref
import os
import subprocess
from test import lock_tests
# Between fork() and exec(), only async-safe functions are allowed (issues
# #12316 and #11870), and fork() from a worker thread is known to trigger
# problems with some operating systems (issue #3863): skip problematic tests
# on platforms known to behave badly.
platforms_to_skip = ('freebsd4', 'freebsd5', 'freebsd6', 'netbsd5',
'hp-ux11')
# A trivial mutable counter.
class Counter(object):
def __init__(self):
self.value = 0
def inc(self):
self.value += 1
def dec(self):
self.value -= 1
def get(self):
return self.value
class TestThread(threading.Thread):
def __init__(self, name, testcase, sema, mutex, nrunning):
threading.Thread.__init__(self, name=name)
self.testcase = testcase
self.sema = sema
self.mutex = mutex
self.nrunning = nrunning
def run(self):
delay = random.random() / 10000.0
if verbose:
print('task %s will run for %.1f usec' %
(self.name, delay * 1e6))
with self.sema:
with self.mutex:
self.nrunning.inc()
if verbose:
print(self.nrunning.get(), 'tasks are running')
self.testcase.assertTrue(self.nrunning.get() <= 3)
time.sleep(delay)
if verbose:
print('task', self.name, 'done')
with self.mutex:
self.nrunning.dec()
self.testcase.assertTrue(self.nrunning.get() >= 0)
if verbose:
print('%s is finished. %d tasks are running' %
(self.name, self.nrunning.get()))
class BaseTestCase(unittest.TestCase):
def setUp(self):
self._threads = test.support.threading_setup()
def tearDown(self):
test.support.threading_cleanup(*self._threads)
test.support.reap_children()
class ThreadTests(BaseTestCase):
# Create a bunch of threads, let each do some work, wait until all are
# done.
def test_various_ops(self):
# This takes about n/3 seconds to run (about n/3 clumps of tasks,
# times about 1 second per clump).
NUMTASKS = 10
# no more than 3 of the 10 can run at once
sema = threading.BoundedSemaphore(value=3)
mutex = threading.RLock()
numrunning = Counter()
threads = []
for i in range(NUMTASKS):
t = TestThread("<thread %d>"%i, self, sema, mutex, numrunning)
threads.append(t)
self.assertEqual(t.ident, None)
self.assertTrue(re.match('<TestThread\(.*, initial\)>', repr(t)))
t.start()
if verbose:
print('waiting for all tasks to complete')
for t in threads:
t.join()
self.assertTrue(not t.is_alive())
self.assertNotEqual(t.ident, 0)
self.assertFalse(t.ident is None)
self.assertTrue(re.match('<TestThread\(.*, stopped -?\d+\)>',
repr(t)))
if verbose:
print('all tasks done')
self.assertEqual(numrunning.get(), 0)
def test_ident_of_no_threading_threads(self):
# The ident still must work for the main thread and dummy threads.
self.assertFalse(threading.currentThread().ident is None)
def f():
ident.append(threading.currentThread().ident)
done.set()
done = threading.Event()
ident = []
_thread.start_new_thread(f, ())
done.wait()
self.assertFalse(ident[0] is None)
# Kill the "immortal" _DummyThread
del threading._active[ident[0]]
# run with a small(ish) thread stack size (256kB)
def test_various_ops_small_stack(self):
if verbose:
print('with 256kB thread stack size...')
try:
threading.stack_size(262144)
except _thread.error:
raise unittest.SkipTest(
'platform does not support changing thread stack size')
self.test_various_ops()
threading.stack_size(0)
# run with a large thread stack size (1MB)
def test_various_ops_large_stack(self):
if verbose:
print('with 1MB thread stack size...')
try:
threading.stack_size(0x100000)
except _thread.error:
raise unittest.SkipTest(
'platform does not support changing thread stack size')
self.test_various_ops()
threading.stack_size(0)
def test_foreign_thread(self):
# Check that a "foreign" thread can use the threading module.
def f(mutex):
# Calling current_thread() forces an entry for the foreign
# thread to get made in the threading._active map.
threading.current_thread()
mutex.release()
mutex = threading.Lock()
mutex.acquire()
tid = _thread.start_new_thread(f, (mutex,))
# Wait for the thread to finish.
mutex.acquire()
self.assertIn(tid, threading._active)
self.assertIsInstance(threading._active[tid], threading._DummyThread)
del threading._active[tid]
# PyThreadState_SetAsyncExc() is a CPython-only gimmick, not (currently)
# exposed at the Python level. This test relies on ctypes to get at it.
def test_PyThreadState_SetAsyncExc(self):
ctypes = import_module("ctypes")
set_async_exc = ctypes.pythonapi.PyThreadState_SetAsyncExc
class AsyncExc(Exception):
pass
exception = ctypes.py_object(AsyncExc)
# First check it works when setting the exception from the same thread.
tid = threading.get_ident()
try:
result = set_async_exc(ctypes.c_long(tid), exception)
# The exception is async, so we might have to keep the VM busy until
# it notices.
while True:
pass
except AsyncExc:
pass
else:
# This code is unreachable but it reflects the intent. If we wanted
# to be smarter the above loop wouldn't be infinite.
self.fail("AsyncExc not raised")
try:
self.assertEqual(result, 1) # one thread state modified
except UnboundLocalError:
# The exception was raised too quickly for us to get the result.
pass
# `worker_started` is set by the thread when it's inside a try/except
# block waiting to catch the asynchronously set AsyncExc exception.
# `worker_saw_exception` is set by the thread upon catching that
# exception.
worker_started = threading.Event()
worker_saw_exception = threading.Event()
class Worker(threading.Thread):
def run(self):
self.id = threading.get_ident()
self.finished = False
try:
while True:
worker_started.set()
time.sleep(0.1)
except AsyncExc:
self.finished = True
worker_saw_exception.set()
t = Worker()
t.daemon = True # so if this fails, we don't hang Python at shutdown
t.start()
if verbose:
print(" started worker thread")
# Try a thread id that doesn't make sense.
if verbose:
print(" trying nonsensical thread id")
result = set_async_exc(ctypes.c_long(-1), exception)
self.assertEqual(result, 0) # no thread states modified
# Now raise an exception in the worker thread.
if verbose:
print(" waiting for worker thread to get started")
ret = worker_started.wait()
self.assertTrue(ret)
if verbose:
print(" verifying worker hasn't exited")
self.assertTrue(not t.finished)
if verbose:
print(" attempting to raise asynch exception in worker")
result = set_async_exc(ctypes.c_long(t.id), exception)
self.assertEqual(result, 1) # one thread state modified
if verbose:
print(" waiting for worker to say it caught the exception")
worker_saw_exception.wait(timeout=10)
self.assertTrue(t.finished)
if verbose:
print(" all OK -- joining worker")
if t.finished:
t.join()
# else the thread is still running, and we have no way to kill it
def test_limbo_cleanup(self):
# Issue 7481: Failure to start thread should cleanup the limbo map.
def fail_new_thread(*args):
raise threading.ThreadError()
_start_new_thread = threading._start_new_thread
threading._start_new_thread = fail_new_thread
try:
t = threading.Thread(target=lambda: None)
self.assertRaises(threading.ThreadError, t.start)
self.assertFalse(
t in threading._limbo,
"Failed to cleanup _limbo map on failure of Thread.start().")
finally:
threading._start_new_thread = _start_new_thread
def test_finalize_runnning_thread(self):
# Issue 1402: the PyGILState_Ensure / _Release functions may be called
# very late on python exit: on deallocation of a running thread for
# example.
import_module("ctypes")
rc, out, err = assert_python_failure("-c", """if 1:
import ctypes, sys, time, _thread
# This lock is used as a simple event variable.
ready = _thread.allocate_lock()
ready.acquire()
# Module globals are cleared before __del__ is run
# So we save the functions in class dict
class C:
ensure = ctypes.pythonapi.PyGILState_Ensure
release = ctypes.pythonapi.PyGILState_Release
def __del__(self):
state = self.ensure()
self.release(state)
def waitingThread():
x = C()
ready.release()
time.sleep(100)
_thread.start_new_thread(waitingThread, ())
ready.acquire() # Be sure the other thread is waiting.
sys.exit(42)
""")
self.assertEqual(rc, 42)
def test_finalize_with_trace(self):
# Issue1733757
# Avoid a deadlock when sys.settrace steps into threading._shutdown
assert_python_ok("-c", """if 1:
import sys, threading
# A deadlock-killer, to prevent the
# testsuite to hang forever
def killer():
import os, time
time.sleep(2)
print('program blocked; aborting')
os._exit(2)
t = threading.Thread(target=killer)
t.daemon = True
t.start()
# This is the trace function
def func(frame, event, arg):
threading.current_thread()
return func
sys.settrace(func)
""")
def test_join_nondaemon_on_shutdown(self):
# Issue 1722344
# Raising SystemExit skipped threading._shutdown
rc, out, err = assert_python_ok("-c", """if 1:
import threading
from time import sleep
def child():
sleep(1)
# As a non-daemon thread we SHOULD wake up and nothing
# should be torn down yet
print("Woke up, sleep function is:", sleep)
threading.Thread(target=child).start()
raise SystemExit
""")
self.assertEqual(out.strip(),
b"Woke up, sleep function is: <built-in function sleep>")
self.assertEqual(err, b"")
def test_enumerate_after_join(self):
# Try hard to trigger #1703448: a thread is still returned in
# threading.enumerate() after it has been join()ed.
enum = threading.enumerate
old_interval = sys.getswitchinterval()
try:
for i in range(1, 100):
sys.setswitchinterval(i * 0.0002)
t = threading.Thread(target=lambda: None)
t.start()
t.join()
l = enum()
self.assertNotIn(t, l,
"#1703448 triggered after %d trials: %s" % (i, l))
finally:
sys.setswitchinterval(old_interval)
def test_no_refcycle_through_target(self):
class RunSelfFunction(object):
def __init__(self, should_raise):
# The links in this refcycle from Thread back to self
# should be cleaned up when the thread completes.
self.should_raise = should_raise
self.thread = threading.Thread(target=self._run,
args=(self,),
kwargs={'yet_another':self})
self.thread.start()
def _run(self, other_ref, yet_another):
if self.should_raise:
raise SystemExit
cyclic_object = RunSelfFunction(should_raise=False)
weak_cyclic_object = weakref.ref(cyclic_object)
cyclic_object.thread.join()
del cyclic_object
self.assertIsNone(weak_cyclic_object(),
msg=('%d references still around' %
sys.getrefcount(weak_cyclic_object())))
raising_cyclic_object = RunSelfFunction(should_raise=True)
weak_raising_cyclic_object = weakref.ref(raising_cyclic_object)
raising_cyclic_object.thread.join()
del raising_cyclic_object
self.assertIsNone(weak_raising_cyclic_object(),
msg=('%d references still around' %
sys.getrefcount(weak_raising_cyclic_object())))
def test_old_threading_api(self):
# Just a quick sanity check to make sure the old method names are
# still present
t = threading.Thread()
t.isDaemon()
t.setDaemon(True)
t.getName()
t.setName("name")
t.isAlive()
e = threading.Event()
e.isSet()
threading.activeCount()
def test_repr_daemon(self):
t = threading.Thread()
self.assertFalse('daemon' in repr(t))
t.daemon = True
self.assertTrue('daemon' in repr(t))
def test_deamon_param(self):
t = threading.Thread()
self.assertFalse(t.daemon)
t = threading.Thread(daemon=False)
self.assertFalse(t.daemon)
t = threading.Thread(daemon=True)
self.assertTrue(t.daemon)
@unittest.skipUnless(hasattr(os, 'fork'), 'test needs fork()')
def test_dummy_thread_after_fork(self):
# Issue #14308: a dummy thread in the active list doesn't mess up
# the after-fork mechanism.
code = """if 1:
import _thread, threading, os, time
def background_thread(evt):
# Creates and registers the _DummyThread instance
threading.current_thread()
evt.set()
time.sleep(10)
evt = threading.Event()
_thread.start_new_thread(background_thread, (evt,))
evt.wait()
assert threading.active_count() == 2, threading.active_count()
if os.fork() == 0:
assert threading.active_count() == 1, threading.active_count()
os._exit(0)
else:
os.wait()
"""
_, out, err = assert_python_ok("-c", code)
self.assertEqual(out, b'')
self.assertEqual(err, b'')
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
def test_is_alive_after_fork(self):
# Try hard to trigger #18418: is_alive() could sometimes be True on
# threads that vanished after a fork.
old_interval = sys.getswitchinterval()
self.addCleanup(sys.setswitchinterval, old_interval)
# Make the bug more likely to manifest.
sys.setswitchinterval(1e-6)
for i in range(20):
t = threading.Thread(target=lambda: None)
t.start()
self.addCleanup(t.join)
pid = os.fork()
if pid == 0:
os._exit(1 if t.is_alive() else 0)
else:
pid, status = os.waitpid(pid, 0)
self.assertEqual(0, status)
def test_main_thread(self):
main = threading.main_thread()
self.assertEqual(main.name, 'MainThread')
self.assertEqual(main.ident, threading.current_thread().ident)
self.assertEqual(main.ident, threading.get_ident())
def f():
self.assertNotEqual(threading.main_thread().ident,
threading.current_thread().ident)
th = threading.Thread(target=f)
th.start()
th.join()
@unittest.skipUnless(hasattr(os, 'fork'), "test needs os.fork()")
@unittest.skipUnless(hasattr(os, 'waitpid'), "test needs os.waitpid()")
def test_main_thread_after_fork(self):
code = """if 1:
import os, threading
pid = os.fork()
if pid == 0:
main = threading.main_thread()
print(main.name)
print(main.ident == threading.current_thread().ident)
print(main.ident == threading.get_ident())
else:
os.waitpid(pid, 0)
"""
_, out, err = assert_python_ok("-c", code)
data = out.decode().replace('\r', '')
self.assertEqual(err, b"")
self.assertEqual(data, "MainThread\nTrue\nTrue\n")
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
@unittest.skipUnless(hasattr(os, 'fork'), "test needs os.fork()")
@unittest.skipUnless(hasattr(os, 'waitpid'), "test needs os.waitpid()")
def test_main_thread_after_fork_from_nonmain_thread(self):
code = """if 1:
import os, threading, sys
def f():
pid = os.fork()
if pid == 0:
main = threading.main_thread()
print(main.name)
print(main.ident == threading.current_thread().ident)
print(main.ident == threading.get_ident())
# stdout is fully buffered because not a tty,
# we have to flush before exit.
sys.stdout.flush()
else:
os.waitpid(pid, 0)
th = threading.Thread(target=f)
th.start()
th.join()
"""
_, out, err = assert_python_ok("-c", code)
data = out.decode().replace('\r', '')
self.assertEqual(err, b"")
self.assertEqual(data, "Thread-1\nTrue\nTrue\n")
def test_tstate_lock(self):
# Test an implementation detail of Thread objects.
started = _thread.allocate_lock()
finish = _thread.allocate_lock()
started.acquire()
finish.acquire()
def f():
started.release()
finish.acquire()
time.sleep(0.01)
# The tstate lock is None until the thread is started
t = threading.Thread(target=f)
self.assertIs(t._tstate_lock, None)
t.start()
started.acquire()
self.assertTrue(t.is_alive())
# The tstate lock can't be acquired when the thread is running
# (or suspended).
tstate_lock = t._tstate_lock
self.assertFalse(tstate_lock.acquire(timeout=0), False)
finish.release()
# When the thread ends, the state_lock can be successfully
# acquired.
self.assertTrue(tstate_lock.acquire(timeout=5), False)
# But is_alive() is still True: we hold _tstate_lock now, which
# prevents is_alive() from knowing the thread's end-of-life C code
# is done.
self.assertTrue(t.is_alive())
# Let is_alive() find out the C code is done.
tstate_lock.release()
self.assertFalse(t.is_alive())
# And verify the thread disposed of _tstate_lock.
self.assertTrue(t._tstate_lock is None)
def test_repr_stopped(self):
# Verify that "stopped" shows up in repr(Thread) appropriately.
started = _thread.allocate_lock()
finish = _thread.allocate_lock()
started.acquire()
finish.acquire()
def f():
started.release()
finish.acquire()
t = threading.Thread(target=f)
t.start()
started.acquire()
self.assertIn("started", repr(t))
finish.release()
# "stopped" should appear in the repr in a reasonable amount of time.
# Implementation detail: as of this writing, that's trivially true
# if .join() is called, and almost trivially true if .is_alive() is
# called. The detail we're testing here is that "stopped" shows up
# "all on its own".
LOOKING_FOR = "stopped"
for i in range(500):
if LOOKING_FOR in repr(t):
break
time.sleep(0.01)
self.assertIn(LOOKING_FOR, repr(t)) # we waited at least 5 seconds
def test_BoundedSemaphore_limit(self):
# BoundedSemaphore should raise ValueError if released too often.
for limit in range(1, 10):
bs = threading.BoundedSemaphore(limit)
threads = [threading.Thread(target=bs.acquire)
for _ in range(limit)]
for t in threads:
t.start()
for t in threads:
t.join()
threads = [threading.Thread(target=bs.release)
for _ in range(limit)]
for t in threads:
t.start()
for t in threads:
t.join()
self.assertRaises(ValueError, bs.release)
@cpython_only
def test_frame_tstate_tracing(self):
# Issue #14432: Crash when a generator is created in a C thread that is
# destroyed while the generator is still used. The issue was that a
# generator contains a frame, and the frame kept a reference to the
# Python state of the destroyed C thread. The crash occurs when a trace
# function is setup.
def noop_trace(frame, event, arg):
# no operation
return noop_trace
def generator():
while 1:
yield "genereator"
def callback():
if callback.gen is None:
callback.gen = generator()
return next(callback.gen)
callback.gen = None
old_trace = sys.gettrace()
sys.settrace(noop_trace)
try:
# Install a trace function
threading.settrace(noop_trace)
# Create a generator in a C thread which exits after the call
import _testcapi
_testcapi.call_in_temporary_c_thread(callback)
# Call the generator in a different Python thread, check that the
# generator didn't keep a reference to the destroyed thread state
for test in range(3):
# The trace function is still called here
callback()
finally:
sys.settrace(old_trace)
class ThreadJoinOnShutdown(BaseTestCase):
def _run_and_join(self, script):
script = """if 1:
import sys, os, time, threading
# a thread, which waits for the main program to terminate
def joiningfunc(mainthread):
mainthread.join()
print('end of thread')
# stdout is fully buffered because not a tty, we have to flush
# before exit.
sys.stdout.flush()
\n""" + script
rc, out, err = assert_python_ok("-c", script)
data = out.decode().replace('\r', '')
self.assertEqual(data, "end of main\nend of thread\n")
def test_1_join_on_shutdown(self):
# The usual case: on exit, wait for a non-daemon thread
script = """if 1:
import os
t = threading.Thread(target=joiningfunc,
args=(threading.current_thread(),))
t.start()
time.sleep(0.1)
print('end of main')
"""
self._run_and_join(script)
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_2_join_in_forked_process(self):
# Like the test above, but from a forked interpreter
script = """if 1:
childpid = os.fork()
if childpid != 0:
os.waitpid(childpid, 0)
sys.exit(0)
t = threading.Thread(target=joiningfunc,
args=(threading.current_thread(),))
t.start()
print('end of main')
"""
self._run_and_join(script)
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_3_join_in_forked_from_thread(self):
# Like the test above, but fork() was called from a worker thread
# In the forked process, the main Thread object must be marked as stopped.
script = """if 1:
main_thread = threading.current_thread()
def worker():
childpid = os.fork()
if childpid != 0:
os.waitpid(childpid, 0)
sys.exit(0)
t = threading.Thread(target=joiningfunc,
args=(main_thread,))
print('end of main')
t.start()
t.join() # Should not block: main_thread is already stopped
w = threading.Thread(target=worker)
w.start()
"""
self._run_and_join(script)
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_4_daemon_threads(self):
# Check that a daemon thread cannot crash the interpreter on shutdown
# by manipulating internal structures that are being disposed of in
# the main thread.
script = """if True:
import os
import random
import sys
import time
import threading
thread_has_run = set()
def random_io():
'''Loop for a while sleeping random tiny amounts and doing some I/O.'''
while True:
in_f = open(os.__file__, 'rb')
stuff = in_f.read(200)
null_f = open(os.devnull, 'wb')
null_f.write(stuff)
time.sleep(random.random() / 1995)
null_f.close()
in_f.close()
thread_has_run.add(threading.current_thread())
def main():
count = 0
for _ in range(40):
new_thread = threading.Thread(target=random_io)
new_thread.daemon = True
new_thread.start()
count += 1
while len(thread_has_run) < count:
time.sleep(0.001)
# Trigger process shutdown
sys.exit(0)
main()
"""
rc, out, err = assert_python_ok('-c', script)
self.assertFalse(err)
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_reinit_tls_after_fork(self):
# Issue #13817: fork() would deadlock in a multithreaded program with
# the ad-hoc TLS implementation.
def do_fork_and_wait():
# just fork a child process and wait it
pid = os.fork()
if pid > 0:
os.waitpid(pid, 0)
else:
os._exit(0)
# start a bunch of threads that will fork() child processes
threads = []
for i in range(16):
t = threading.Thread(target=do_fork_and_wait)
threads.append(t)
t.start()
for t in threads:
t.join()
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
def test_clear_threads_states_after_fork(self):
# Issue #17094: check that threads states are cleared after fork()
# start a bunch of threads
threads = []
for i in range(16):
t = threading.Thread(target=lambda : time.sleep(0.3))
threads.append(t)
t.start()
pid = os.fork()
if pid == 0:
# check that threads states have been cleared
if len(sys._current_frames()) == 1:
os._exit(0)
else:
os._exit(1)
else:
_, status = os.waitpid(pid, 0)
self.assertEqual(0, status)
for t in threads:
t.join()
class SubinterpThreadingTests(BaseTestCase):
def test_threads_join(self):
# Non-daemon threads should be joined at subinterpreter shutdown
# (issue #18808)
r, w = os.pipe()
self.addCleanup(os.close, r)
self.addCleanup(os.close, w)
code = r"""if 1:
import os
import threading
import time
def f():
# Sleep a bit so that the thread is still running when
# Py_EndInterpreter is called.
time.sleep(0.05)
os.write(%d, b"x")
threading.Thread(target=f).start()
""" % (w,)
ret = test.support.run_in_subinterp(code)
self.assertEqual(ret, 0)
# The thread was joined properly.
self.assertEqual(os.read(r, 1), b"x")
def test_threads_join_2(self):
# Same as above, but a delay gets introduced after the thread's
# Python code returned but before the thread state is deleted.
# To achieve this, we register a thread-local object which sleeps
# a bit when deallocated.
r, w = os.pipe()
self.addCleanup(os.close, r)
self.addCleanup(os.close, w)
code = r"""if 1:
import os
import threading
import time
class Sleeper:
def __del__(self):
time.sleep(0.05)
tls = threading.local()
def f():
# Sleep a bit so that the thread is still running when
# Py_EndInterpreter is called.
time.sleep(0.05)
tls.x = Sleeper()
os.write(%d, b"x")
threading.Thread(target=f).start()
""" % (w,)
ret = test.support.run_in_subinterp(code)
self.assertEqual(ret, 0)
# The thread was joined properly.
self.assertEqual(os.read(r, 1), b"x")
@cpython_only
def test_daemon_threads_fatal_error(self):
subinterp_code = r"""if 1:
import os
import threading
import time
def f():
# Make sure the daemon thread is still running when
# Py_EndInterpreter is called.
time.sleep(10)
threading.Thread(target=f, daemon=True).start()
"""
script = r"""if 1:
import _testcapi
_testcapi.run_in_subinterp(%r)
""" % (subinterp_code,)
with test.support.SuppressCrashReport():
rc, out, err = assert_python_failure("-c", script)
self.assertIn("Fatal Python error: Py_EndInterpreter: "
"not the last thread", err.decode())
class ThreadingExceptionTests(BaseTestCase):
# A RuntimeError should be raised if Thread.start() is called
# multiple times.
def test_start_thread_again(self):
thread = threading.Thread()
thread.start()
self.assertRaises(RuntimeError, thread.start)
def test_joining_current_thread(self):
current_thread = threading.current_thread()
self.assertRaises(RuntimeError, current_thread.join);
def test_joining_inactive_thread(self):
thread = threading.Thread()
self.assertRaises(RuntimeError, thread.join)
def test_daemonize_active_thread(self):
thread = threading.Thread()
thread.start()
self.assertRaises(RuntimeError, setattr, thread, "daemon", True)
def test_releasing_unacquired_lock(self):
lock = threading.Lock()
self.assertRaises(RuntimeError, lock.release)
@unittest.skipUnless(sys.platform == 'darwin' and test.support.python_is_optimized(),
'test macosx problem')
def test_recursion_limit(self):
# Issue 9670
# test that excessive recursion within a non-main thread causes
# an exception rather than crashing the interpreter on platforms
# like Mac OS X or FreeBSD which have small default stack sizes
# for threads
script = """if True:
import threading
def recurse():
return recurse()
def outer():
try:
recurse()
except RuntimeError:
pass
w = threading.Thread(target=outer)
w.start()
w.join()
print('end of main thread')
"""
expected_output = "end of main thread\n"
p = subprocess.Popen([sys.executable, "-c", script],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
data = stdout.decode().replace('\r', '')
self.assertEqual(p.returncode, 0, "Unexpected error: " + stderr.decode())
self.assertEqual(data, expected_output)
def test_print_exception(self):
script = r"""if True:
import threading
import time
running = False
def run():
global running
running = True
while running:
time.sleep(0.01)
1/0
t = threading.Thread(target=run)
t.start()
while not running:
time.sleep(0.01)
running = False
t.join()
"""
rc, out, err = assert_python_ok("-c", script)
self.assertEqual(out, b'')
err = err.decode()
self.assertIn("Exception in thread", err)
self.assertIn("Traceback (most recent call last):", err)
self.assertIn("ZeroDivisionError", err)
self.assertNotIn("Unhandled exception", err)
def test_print_exception_stderr_is_none_1(self):
script = r"""if True:
import sys
import threading
import time
running = False
def run():
global running
running = True
while running:
time.sleep(0.01)
1/0
t = threading.Thread(target=run)
t.start()
while not running:
time.sleep(0.01)
sys.stderr = None
running = False
t.join()
"""
rc, out, err = assert_python_ok("-c", script)
self.assertEqual(out, b'')
err = err.decode()
self.assertIn("Exception in thread", err)
self.assertIn("Traceback (most recent call last):", err)
self.assertIn("ZeroDivisionError", err)
self.assertNotIn("Unhandled exception", err)
def test_print_exception_stderr_is_none_2(self):
script = r"""if True:
import sys
import threading
import time
running = False
def run():
global running
running = True
while running:
time.sleep(0.01)
1/0
sys.stderr = None
t = threading.Thread(target=run)
t.start()
while not running:
time.sleep(0.01)
running = False
t.join()
"""
rc, out, err = assert_python_ok("-c", script)
self.assertEqual(out, b'')
self.assertNotIn("Unhandled exception", err.decode())
class TimerTests(BaseTestCase):
def setUp(self):
BaseTestCase.setUp(self)
self.callback_args = []
self.callback_event = threading.Event()
def test_init_immutable_default_args(self):
# Issue 17435: constructor defaults were mutable objects, they could be
# mutated via the object attributes and affect other Timer objects.
timer1 = threading.Timer(0.01, self._callback_spy)
timer1.start()
self.callback_event.wait()
timer1.args.append("blah")
timer1.kwargs["foo"] = "bar"
self.callback_event.clear()
timer2 = threading.Timer(0.01, self._callback_spy)
timer2.start()
self.callback_event.wait()
self.assertEqual(len(self.callback_args), 2)
self.assertEqual(self.callback_args, [((), {}), ((), {})])
def _callback_spy(self, *args, **kwargs):
self.callback_args.append((args[:], kwargs.copy()))
self.callback_event.set()
class LockTests(lock_tests.LockTests):
locktype = staticmethod(threading.Lock)
class PyRLockTests(lock_tests.RLockTests):
locktype = staticmethod(threading._PyRLock)
@unittest.skipIf(threading._CRLock is None, 'RLock not implemented in C')
class CRLockTests(lock_tests.RLockTests):
locktype = staticmethod(threading._CRLock)
class EventTests(lock_tests.EventTests):
eventtype = staticmethod(threading.Event)
class ConditionAsRLockTests(lock_tests.RLockTests):
# An Condition uses an RLock by default and exports its API.
locktype = staticmethod(threading.Condition)
class ConditionTests(lock_tests.ConditionTests):
condtype = staticmethod(threading.Condition)
class SemaphoreTests(lock_tests.SemaphoreTests):
semtype = staticmethod(threading.Semaphore)
class BoundedSemaphoreTests(lock_tests.BoundedSemaphoreTests):
semtype = staticmethod(threading.BoundedSemaphore)
class BarrierTests(lock_tests.BarrierTests):
barriertype = staticmethod(threading.Barrier)
if __name__ == "__main__":
unittest.main()
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
|
{
"content_hash": "73bff6500397b661639eebf810578f63",
"timestamp": "",
"source": "github",
"line_count": 3312,
"max_line_length": 89,
"avg_line_length": 35.5024154589372,
"alnum_prop": 0.5524561164784324,
"repo_name": "ArcherSys/ArcherSys",
"id": "f7368019ab44fc70ae4941f30d9528d63251d967",
"size": "117584",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Lib/test/test_threading.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
}
|
import logging
from collections import OrderedDict
from io import BytesIO
from django import forms
from django.contrib.staticfiles import finders
from django.core.files import File
from django.http import HttpResponse
from django.utils.translation import ugettext_lazy as _
from pretix.base.ticketoutput import BaseTicketOutput
from pretix.control.forms import ExtFileField
logger = logging.getLogger('pretix.plugins.ticketoutputpdf')
class PdfTicketOutput(BaseTicketOutput):
identifier = 'pdf'
verbose_name = _('PDF output')
download_button_text = _('Download PDF')
download_button_icon = 'fa-print'
def generate(self, request, order):
from reportlab.graphics.shapes import Drawing
from reportlab.pdfgen import canvas
from reportlab.lib import pagesizes, units
from reportlab.graphics.barcode.qr import QrCodeWidget
from reportlab.graphics import renderPDF
from PyPDF2 import PdfFileWriter, PdfFileReader
response = HttpResponse(content_type='application/pdf')
response['Content-Disposition'] = 'inline; filename="order%s%s.pdf"' % (request.event.slug, order.code)
pagesize = self.settings.get('pagesize', default='A4')
if hasattr(pagesizes, pagesize):
pagesize = getattr(pagesizes, pagesize)
else:
pagesize = pagesizes.A4
orientation = self.settings.get('orientation', default='portrait')
if hasattr(pagesizes, orientation):
pagesize = getattr(pagesizes, orientation)(pagesize)
fname = self.settings.get('background', as_type=File)
if isinstance(fname, File):
fname = fname.name
else:
fname = finders.find('pretixpresale/pdf/ticket_default_a4.pdf')
buffer = BytesIO()
p = canvas.Canvas(buffer, pagesize=pagesize)
for op in order.positions.all().select_related('item', 'variation'):
event_s = self.settings.get('event_s', default=22, as_type=float)
if event_s:
p.setFont("Helvetica", event_s)
event_x = self.settings.get('event_x', default=15, as_type=float)
event_y = self.settings.get('event_y', default=235, as_type=float)
p.drawString(event_x * units.mm, event_y * units.mm, str(request.event.name))
name_s = self.settings.get('name_s', default=17, as_type=float)
if name_s:
p.setFont("Helvetica", name_s)
name_x = self.settings.get('name_x', default=15, as_type=float)
name_y = self.settings.get('name_y', default=220, as_type=float)
item = str(op.item.name)
if op.variation:
item += " – " + str(op.variation)
p.drawString(name_x * units.mm, name_y * units.mm, item)
price_s = self.settings.get('price_s', default=17, as_type=float)
if price_s:
p.setFont("Helvetica", price_s)
price_x = self.settings.get('price_x', default=15, as_type=float)
price_y = self.settings.get('price_y', default=210, as_type=float)
p.drawString(price_x * units.mm, price_y * units.mm, "%s %s" % (str(op.price), request.event.currency))
qr_s = self.settings.get('qr_s', default=80, as_type=float)
if qr_s:
reqs = qr_s * units.mm
qrw = QrCodeWidget(op.identity, barLevel='H')
b = qrw.getBounds()
w = b[2] - b[0]
h = b[3] - b[1]
d = Drawing(reqs, reqs, transform=[reqs / w, 0, 0, reqs / h, 0, 0])
d.add(qrw)
qr_x = self.settings.get('qr_x', default=10, as_type=float)
qr_y = self.settings.get('qr_y', default=130, as_type=float)
renderPDF.draw(d, p, qr_x * units.mm, qr_y * units.mm)
code_s = self.settings.get('code_s', default=11, as_type=float)
if code_s:
p.setFont("Helvetica", code_s)
code_x = self.settings.get('code_x', default=15, as_type=float)
code_y = self.settings.get('code_y', default=130, as_type=float)
p.drawString(code_x * units.mm, code_y * units.mm, op.identity)
p.showPage()
p.save()
buffer.seek(0)
new_pdf = PdfFileReader(buffer)
output = PdfFileWriter()
for page in new_pdf.pages:
bg_pdf = PdfFileReader(open(fname, "rb"))
bg_page = bg_pdf.getPage(0)
bg_page.mergePage(page)
output.addPage(bg_page)
output.write(response)
return response
@property
def settings_form_fields(self) -> dict:
return OrderedDict(
list(super().settings_form_fields.items()) + [
('paper_size',
forms.ChoiceField(
label=_('Paper size'),
choices=(
('A4', 'A4'),
('A5', 'A5'),
('B4', 'B4'),
('B5', 'B5'),
('letter', 'Letter'),
('legal', 'Legal'),
),
required=False
)),
('orientation',
forms.ChoiceField(
label=_('Paper orientation'),
choices=(
('portrait', _('Portrait')),
('landscape', _('Landscape')),
),
required=False
)),
('background',
ExtFileField(
label=_('Background PDF'),
ext_whitelist=(".pdf", ),
required=False
)),
('qr_x', forms.FloatField(label=_('QR-Code x position (mm)'), required=False)),
('qr_y', forms.FloatField(label=_('QR-Code y position (mm)'), required=False)),
('qr_s', forms.FloatField(label=_('QR-Code size (mm)'), required=False)),
('code_x', forms.FloatField(label=_('Ticket code x position (mm)'), required=False)),
('code_y', forms.FloatField(label=_('Ticket code y position (mm)'), required=False)),
('code_s', forms.FloatField(label=_('Ticket code size (mm)'), required=False)),
('name_x', forms.FloatField(label=_('Product name x position (mm)'), required=False)),
('name_y', forms.FloatField(label=_('Product name y position (mm)'), required=False)),
('name_s', forms.FloatField(label=_('Product name size (mm)'), required=False)),
('price_x', forms.FloatField(label=_('Price x position (mm)'), required=False)),
('price_y', forms.FloatField(label=_('Price y position (mm)'), required=False)),
('price_s', forms.FloatField(label=_('Price size (mm)'), required=False)),
('event_x', forms.FloatField(label=_('Event name x position (mm)'), required=False)),
('event_y', forms.FloatField(label=_('Event name y position (mm)'), required=False)),
('event_s', forms.FloatField(label=_('Event name size (mm)'), required=False)),
]
)
|
{
"content_hash": "c1ab5e65e6ea25cb6bb7a91d5525136a",
"timestamp": "",
"source": "github",
"line_count": 161,
"max_line_length": 119,
"avg_line_length": 45.732919254658384,
"alnum_prop": 0.5370093711802254,
"repo_name": "awg24/pretix",
"id": "256c1784a22db4a1c03645661b5784ee5a283e0d",
"size": "7365",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/pretix/plugins/ticketoutputpdf/ticketoutput.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "43664"
},
{
"name": "HTML",
"bytes": "167660"
},
{
"name": "JavaScript",
"bytes": "24712"
},
{
"name": "Makefile",
"bytes": "423"
},
{
"name": "Python",
"bytes": "643853"
},
{
"name": "Shell",
"bytes": "287"
}
],
"symlink_target": ""
}
|
"""
For handling the config file data.
"""
import json
import logging
import os
import yaml
from box import Box
from .util import deepmerge
log = logging.getLogger(__name__)
class Config(Box):
"""
A class to deal with our config file in a nice way.
"""
def __init__(self, *args, **kwargs):
kwargs['box_it_up'] = True
super().__init__(*args, **kwargs)
def get(self, key, default=None):
"""
Works just like :py:meth:`dict.get` except it allows for
"dotted notation" to get values far down the dict tree::
>>> c.set('foo.bar', 1)
>>> c.get('foo.bar')
1
Args:
key (str): dictionary key
default: return this if the key is not found
Returns:
value: whatever's there, else the default
"""
if '.' not in key:
return super().get(key, default)
k, rest = key.split('.', 1)
v = super().get(k, default)
if isinstance(v, (dict, Config)):
return v.get(rest, default)
elif v:
return v
else:
return default
def set(self, key, value):
"""
Stores a value in the dictionary. Works just like
you'd expect, but 'key' can be in "dotted notation"
to set deep values::
>>> c.set('foo.bar', 1)
>>> c.get('foo.bar')
1
Args:
key (str): where to store the value
value: data to store.
"""
if '.' not in key:
self[key] = value
else:
# otherwise we do a crazy mergin' thing
self.merge_dict(dict_from_dotted_key(key, value))
def load(self, cfgfile, encoding='utf-8'):
"""
Load configuration from file or 'conf.d' directory.
"""
if os.path.isdir(cfgfile):
self.load_dir(cfgfile, encoding)
else:
self.load_file(cfgfile, encoding)
def load_file(self, cfgfile, encoding='utf-8'):
"""Load a config file."""
if not os.path.isfile(cfgfile):
raise ValueError(f"Cannot read config file '{cfgfile}'")
log.debug("Loading config file %s", cfgfile)
with open(cfgfile, 'r', encoding=encoding) as f:
if cfgfile.endswith('.json'):
data_dict = json.load(f)
elif cfgfile.endswith('.yaml') or cfgfile.endswith('.yml'):
data_dict = yaml.safe_load(f.read())
else:
raise ValueError("unknown file format")
self.merge_dict(data_dict)
def load_dir(self, cfgdir, encoding='utf-8'):
"""
Load all files in a directory, 'conf.d' style, merging them in in
alphanumeric order.
Only allows '.yaml' files, ignores dot files.
Args:
cfgdir (str): directory path
encoding (str): File encoding option for open()
"""
for f in sorted(os.listdir(cfgdir)):
# skip dot files
if f.startswith('.'):
continue
if not f.endswith('.yaml'):
continue
self.load_file(os.path.join(cfgdir, f), encoding=encoding)
def merge_dict(self, data):
"""
Merge a dictionary into the config.
"""
if not isinstance(data, dict):
raise TypeError("Argument 'data' must be of type 'dict'")
self.update(deepmerge(self.to_dict(), data))
def dict_from_dotted_key(key, value):
"""
Make a dict from a dotted key::
>>> dict_from_dotted_key('foo.bar.baz', 1)
{'foo': {'bar': {'baz': 1}}}
Args:
key (str): dotted key
value: value
Returns:
dict
"""
d = {}
if '.' not in key:
d[key] = value
return d
components = key.split('.')
last = components.pop()
leaf = d
for c in components:
leaf[c] = {}
leaf = leaf[c]
leaf[last] = value
return d
|
{
"content_hash": "b2a469c8076960ed6fd674614ea5d4fa",
"timestamp": "",
"source": "github",
"line_count": 173,
"max_line_length": 73,
"avg_line_length": 23.421965317919074,
"alnum_prop": 0.5165350444225074,
"repo_name": "jwplayer/rssalertbot",
"id": "a64f1a7e7f6fbfbb85641e0bf409887386e25d00",
"size": "4052",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rssalertbot/config.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "1037"
},
{
"name": "Makefile",
"bytes": "685"
},
{
"name": "Python",
"bytes": "57574"
},
{
"name": "Shell",
"bytes": "33"
}
],
"symlink_target": ""
}
|
import asyncio
import mimetypes
import os
from . import hdrs
from .helpers import create_future
from .web_reqrep import StreamResponse
class FileSender:
""""A helper that can be used to send files.
"""
def __init__(self, *, resp_factory=StreamResponse, chunk_size=256*1024):
self._response_factory = resp_factory
self._chunk_size = chunk_size
if bool(os.environ.get("AIOHTTP_NOSENDFILE")):
self._sendfile = self._sendfile_fallback
def _sendfile_cb(self, fut, out_fd, in_fd, offset,
count, loop, registered):
if registered:
loop.remove_writer(out_fd)
if fut.cancelled():
return
try:
n = os.sendfile(out_fd, in_fd, offset, count)
if n == 0: # EOF reached
n = count
except (BlockingIOError, InterruptedError):
n = 0
except Exception as exc:
fut.set_exception(exc)
return
if n < count:
loop.add_writer(out_fd, self._sendfile_cb, fut, out_fd, in_fd,
offset + n, count - n, loop, True)
else:
fut.set_result(None)
@asyncio.coroutine
def _sendfile_system(self, request, resp, fobj, count):
# Write count bytes of fobj to resp using
# the os.sendfile system call.
#
# request should be a aiohttp.web.Request instance.
#
# resp should be a aiohttp.web.StreamResponse instance.
#
# fobj should be an open file object.
#
# count should be an integer > 0.
transport = request.transport
if transport.get_extra_info("sslcontext"):
yield from self._sendfile_fallback(request, resp, fobj, count)
return
def _send_headers(resp_impl):
# Durty hack required for
# https://github.com/KeepSafe/aiohttp/issues/1093
# don't send headers in sendfile mode
pass
resp._send_headers = _send_headers
@asyncio.coroutine
def write_eof():
# Durty hack required for
# https://github.com/KeepSafe/aiohttp/issues/1177
# do nothing in write_eof
pass
resp.write_eof = write_eof
resp_impl = yield from resp.prepare(request)
loop = request.app.loop
# See https://github.com/KeepSafe/aiohttp/issues/958 for details
# send headers
headers = ['HTTP/{0.major}.{0.minor} 200 OK\r\n'.format(
request.version)]
for hdr, val in resp.headers.items():
headers.append('{}: {}\r\n'.format(hdr, val))
headers.append('\r\n')
out_socket = transport.get_extra_info("socket").dup()
out_socket.setblocking(False)
out_fd = out_socket.fileno()
in_fd = fobj.fileno()
bheaders = ''.join(headers).encode('utf-8')
headers_length = len(bheaders)
resp_impl.headers_length = headers_length
resp_impl.output_length = headers_length + count
try:
yield from loop.sock_sendall(out_socket, bheaders)
fut = create_future(loop)
self._sendfile_cb(fut, out_fd, in_fd, 0, count, loop, False)
yield from fut
finally:
out_socket.close()
@asyncio.coroutine
def _sendfile_fallback(self, request, resp, fobj, count):
# Mimic the _sendfile_system() method, but without using the
# os.sendfile() system call. This should be used on systems
# that don't support the os.sendfile().
# To avoid blocking the event loop & to keep memory usage low,
# fobj is transferred in chunks controlled by the
# constructor's chunk_size argument.
yield from resp.prepare(request)
chunk_size = self._chunk_size
chunk = fobj.read(chunk_size)
while True:
resp.write(chunk)
yield from resp.drain()
count = count - chunk_size
if count <= 0:
break
chunk = fobj.read(count)
if hasattr(os, "sendfile"): # pragma: no cover
_sendfile = _sendfile_system
else: # pragma: no cover
_sendfile = _sendfile_fallback
@asyncio.coroutine
def send(self, request, filepath):
"""Send filepath to client using request."""
st = filepath.stat()
modsince = request.if_modified_since
if modsince is not None and st.st_mtime <= modsince.timestamp():
from .web_exceptions import HTTPNotModified
raise HTTPNotModified()
ct, encoding = mimetypes.guess_type(str(filepath))
if not ct:
ct = 'application/octet-stream'
resp = self._response_factory()
resp.content_type = ct
if encoding:
resp.headers[hdrs.CONTENT_ENCODING] = encoding
resp.last_modified = st.st_mtime
file_size = st.st_size
resp.content_length = file_size
resp.set_tcp_cork(True)
try:
with filepath.open('rb') as f:
yield from self._sendfile(request, resp, f, file_size)
finally:
resp.set_tcp_nodelay(True)
return resp
|
{
"content_hash": "823cbc77b737dad4bd3a098853f0a30d",
"timestamp": "",
"source": "github",
"line_count": 168,
"max_line_length": 76,
"avg_line_length": 31.321428571428573,
"alnum_prop": 0.572215887495249,
"repo_name": "jettify/aiohttp",
"id": "c2768deeb309c81790d4ac0e29370e9cbed2a393",
"size": "5262",
"binary": false,
"copies": "19",
"ref": "refs/heads/master",
"path": "aiohttp/file_sender.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "838"
},
{
"name": "CSS",
"bytes": "112"
},
{
"name": "HTML",
"bytes": "4885"
},
{
"name": "Makefile",
"bytes": "2376"
},
{
"name": "PLpgSQL",
"bytes": "765"
},
{
"name": "Python",
"bytes": "1029499"
},
{
"name": "Shell",
"bytes": "550"
}
],
"symlink_target": ""
}
|
from ..encoding import wif_to_secret_exponent
from ..convention import tx_fee
from .Spendable import Spendable
from .Tx import Tx
from .TxOut import TxOut, standard_tx_out_script
from .pay_to import build_hash160_lookup
from ..networks import wif_prefix_for_netcode
class SecretExponentMissing(Exception):
pass
class LazySecretExponentDB(object):
"""
The pycoin pure python implementation that converts secret exponents
into public pairs is very slow, so this class does the conversion lazily
and caches the results to optimize for the case of a large number
of secret exponents.
"""
def __init__(self, wif_iterable, secret_exponent_db_cache, netcode = 'BTC'):
self.wif_iterable = iter(wif_iterable)
self.secret_exponent_db_cache = secret_exponent_db_cache
self.netcode = netcode
def get(self, v):
if v in self.secret_exponent_db_cache:
return self.secret_exponent_db_cache[v]
for wif in self.wif_iterable:
secret_exponent = wif_to_secret_exponent(wif, allowable_wif_prefixes=wif_prefix_for_netcode(self.netcode))
d = build_hash160_lookup([secret_exponent])
self.secret_exponent_db_cache.update(d)
if v in self.secret_exponent_db_cache:
return self.secret_exponent_db_cache[v]
self.wif_iterable = []
return None
def create_tx(spendables, payables, fee="standard", lock_time=0, version=1):
"""
This function provides the easiest way to create an unsigned transaction.
All coin values are in satoshis.
spendables:
a list of Spendable objects, which act as inputs. These can
be either a Spendable or a Spendable.as_text or a Spendable.as_dict
if you prefer a non-object-based input (which might be easier for
airgapped transactions, for example).
payables:
a list where each entry is a bitcoin address, or a tuple of
(bitcoin address, coin_value). If the coin_value is missing or
zero, this address is thrown into the "split pool". Funds not
explicitly claimed by the fee or a bitcoin address are shared as
equally as possible among the split pool. [Minor point: if the
amount to be split does not divide evenly, some of the earlier
bitcoin addresses will get an extra satoshi.]
fee:
a value, or "standard" for it to be calculated.
version:
the version to use in the transaction. Normally 1.
lock_time:
the lock_time to use in the transaction. Normally 0.
Returns the unsigned Tx transaction. Note that unspents are set, so the
transaction can be immediately signed.
Example:
tx = create_tx(
spendables_for_address("1BgGZ9tcN4rm9KBzDn7KprQz87SZ26SAMH"),
["1cMh228HTCiwS8ZsaakH8A8wze1JR5ZsP"],
fee=0)
This will move all available reported funds from 1BgGZ9tcN4rm9KBzDn7KprQz87SZ26SAMH
to 1cMh228HTCiwS8ZsaakH8A8wze1JR5ZsP, with no transaction fees (which means it might
take a while to confirm, possibly never).
"""
def _fix_spendable(s):
if isinstance(s, Spendable):
return s
if not hasattr(s, "keys"):
return Spendable.from_text(s)
return Spendable.from_dict(s)
spendables = [_fix_spendable(s) for s in spendables]
txs_in = [spendable.tx_in() for spendable in spendables]
txs_out = []
for payable in payables:
if len(payable) == 2:
bitcoin_address, coin_value = payable
else:
bitcoin_address = payable
coin_value = 0
script = standard_tx_out_script(bitcoin_address)
txs_out.append(TxOut(coin_value, script))
tx = Tx(version=version, txs_in=txs_in, txs_out=txs_out, lock_time=lock_time)
tx.set_unspents(spendables)
distribute_from_split_pool(tx, fee)
return tx
def distribute_from_split_pool(tx, fee):
"""
This function looks at TxOut items of a transaction tx and
and puts TxOut items with a coin value of zero into a "split pool".
Funds not explicitly claimed by the fee or other TxOut items are
shared as equally as possible among the split pool. If the amount
to be split does not divide evenly, some of the earlier TxOut items
will get an extra satoshi.
tx:
the transaction
fee:
the reserved fee set aside
"""
# calculate fees
if fee == 'standard':
# TODO: improve this
# 1: the tx is not fully built out, so it will actually be larger than implied at this point
# 2: recommended_fee_for_tx gives estimates that are too high
fee = tx_fee.recommended_fee_for_tx(tx)
zero_count = sum(1 for tx_out in tx.txs_out if tx_out.coin_value == 0)
if zero_count > 0:
total_coin_value = sum(spendable.coin_value for spendable in tx.txs_in_as_spendable())
coins_allocated = sum(tx_out.coin_value for tx_out in tx.txs_out) + fee
remaining_coins = total_coin_value - coins_allocated
if remaining_coins < 0:
raise ValueError("insufficient inputs for outputs")
value_each, extra_count = divmod(remaining_coins, zero_count)
if value_each < 1:
raise ValueError("not enough to pay nonzero amounts to at least one of the unspecified outputs")
for tx_out in tx.txs_out:
if tx_out.coin_value == 0:
tx_out.coin_value = value_each + (1 if extra_count > 0 else 0)
extra_count -= 1
return zero_count
def sign_tx(tx, wifs=[], secret_exponent_db={}, **kwargs):
"""
This function provides an convenience method to sign a transaction.
The transaction must have "unspents" set by, for example,
calling tx.unspents_from_db.
wifs:
the list of WIFs required to sign this transaction.
secret_exponent_db:
an optional dictionary (or any object with a .get method) that contains
a bitcoin address => (secret_exponent, public_pair, is_compressed)
tuple. This will be built automatically lazily with the list of WIFs.
You can pass in an empty dictionary and as WIFs are processed, they
will be cached here. If you have multiple transactions to sign, each with
the same WIF list, passing a cache dictionary in may speed things up a bit.
Returns the signed Tx transaction, or raises an exception.
At least one of "wifs" and "secret_exponent_db" must be included for there
to be any hope of signing the transaction.
Example:
sign_tx(wifs=["KwDiBf89QgGbjEhKnhXJuH7LrciVrZi3qYjgd9M7rFU73sVHnoWn"])
"""
tx.sign(LazySecretExponentDB(wifs, secret_exponent_db), **kwargs)
def create_signed_tx(spendables, payables, wifs=[], fee="standard",
lock_time=0, version=1, secret_exponent_db={}, **kwargs):
"""
This function provides an easy way to create and sign a transaction.
All coin values are in satoshis.
spendables, payables, fee, lock_time, version are as in create_tx, above.
wifs, secret_exponent_db are as in sign_tx, above.
Returns the signed Tx transaction, or raises an exception.
At least one of "wifs" and "secret_exponent_db" must be included for there
to be any hope of signing the transaction.
Example:
tx = create_signed_tx(
spendables_for_address("1BgGZ9tcN4rm9KBzDn7KprQz87SZ26SAMH"),
["1cMh228HTCiwS8ZsaakH8A8wze1JR5ZsP"],
wifs=["KwDiBf89QgGbjEhKnhXJuH7LrciVrZi3qYjgd9M7rFU73sVHnoWn"],
fee=0)
This will move all available reported funds from 1BgGZ9tcN4rm9KBzDn7KprQz87SZ26SAMH
to 1cMh228HTCiwS8ZsaakH8A8wze1JR5ZsP, with no transaction fees (which means it might
take a while to confirm, possibly never).
"""
tx = create_tx(spendables, payables, fee=fee, lock_time=lock_time, version=version)
sign_tx(tx, wifs=wifs, secret_exponent_db=secret_exponent_db, **kwargs)
for idx, tx_out in enumerate(tx.txs_in):
if not tx.is_signature_ok(idx):
raise SecretExponentMissing("failed to sign spendable for %s" %
tx.unspents[idx].bitcoin_address())
return tx
|
{
"content_hash": "efe58f937fce38c4fcf509dc198b1a4a",
"timestamp": "",
"source": "github",
"line_count": 208,
"max_line_length": 118,
"avg_line_length": 39.42307692307692,
"alnum_prop": 0.6726829268292683,
"repo_name": "shayanb/pycoin",
"id": "94e9a00a3142a2ce16dbce7e30c345b2c1e99aff",
"size": "8201",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pycoin/tx/tx_utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "115"
},
{
"name": "Python",
"bytes": "454923"
},
{
"name": "Shell",
"bytes": "198"
}
],
"symlink_target": ""
}
|
import pylab as pl
import numpy as np
import fourpeaks as fF
class ga:
def __init__(self,stringLength,fitnessFunction,nEpochs,populationSize=100,mutationProb=-1,crossover='un',nElite=4,tournament=True):
""" Constructor"""
self.stringLength = stringLength
# Population size should be even
if np.mod(populationSize,2)==0:
self.populationSize = populationSize
else:
self.populationSize = populationSize+1
if mutationProb < 0:
self.mutationProb = 1/stringLength
else:
self.mutationProb = mutationProb
self.nEpochs = nEpochs
self.fitnessFunction = fitnessFunction
self.crossover = crossover
self.nElite = nElite
self.tournment = tournament
self.population = np.random.rand(self.populationSize,self.stringLength)
self.population = np.where(self.population<0.5,0,1)
def runGA(self,plotfig):
"""The basic loop"""
pl.ion()
#plotfig = pl.figure()
bestfit = np.zeros(self.nEpochs)
for i in range(self.nEpochs):
# Compute fitness of the population
fitness = eval(self.fitnessFunction)(self.population)
# Pick parents -- can do in order since they are randomised
newPopulation = self.fps(self.population,fitness)
# Apply the genetic operators
if self.crossover == 'sp':
newPopulation = self.spCrossover(newPopulation)
elif self.crossover == 'un':
newPopulation = self.uniformCrossover(newPopulation)
newPopulation = self.mutate(newPopulation)
# Apply elitism and tournaments if using
if self.nElite>0:
newPopulation = self.elitism(self.population,newPopulation,fitness)
if self.tournament:
newPopulation = self.tournament(self.population,newPopulation,fitness,self.fitnessFunction)
self.population = newPopulation
bestfit[i] = fitness.max()
if (np.mod(i,100)==0):
print i, fitness.max()
#pl.plot([i],[fitness.max()],'r+')
pl.plot(bestfit,'kx-')
#pl.show()
def fps(self,population,fitness):
# Scale fitness by total fitness
fitness = fitness/np.sum(fitness)
fitness = 10*fitness/fitness.max()
# Put repeated copies of each string in according to fitness
# Deal with strings with very low fitness
j=0
while np.round(fitness[j])<1:
j = j+1
newPopulation = np.kron(np.ones((np.round(fitness[j]),1)),population[j,:])
# Add multiple copies of strings into the newPopulation
for i in range(j+1,self.populationSize):
if np.round(fitness[i])>=1:
newPopulation = np.concatenate((newPopulation,np.kron(np.ones((np.round(fitness[i]),1)),population[i,:])),axis=0)
# Shuffle the order (note that there are still too many)
indices = range(np.shape(newPopulation)[0])
np.random.shuffle(indices)
newPopulation = newPopulation[indices[:self.populationSize],:]
return newPopulation
def spCrossover(self,population):
# Single point crossover
newPopulation = np.zeros(np.shape(population))
crossoverPoint = np.random.randint(0,self.stringLength,self.populationSize)
for i in range(0,self.populationSize,2):
newPopulation[i,:crossoverPoint[i]] = population[i,:crossoverPoint[i]]
newPopulation[i+1,:crossoverPoint[i]] = population[i+1,:crossoverPoint[i]]
newPopulation[i,crossoverPoint[i]:] = population[i+1,crossoverPoint[i]:]
newPopulation[i+1,crossoverPoint[i]:] = population[i,crossoverPoint[i]:]
return newPopulation
def uniformCrossover(self,population):
# Uniform crossover
newPopulation = np.zeros(np.shape(population))
which = np.random.rand(self.populationSize,self.stringLength)
which1 = which>=0.5
for i in range(0,self.populationSize,2):
newPopulation[i,:] = population[i,:]*which1[i,:] + population[i+1,:]*(1-which1[i,:])
newPopulation[i+1,:] = population[i,:]*(1-which1[i,:]) + population[i+1,:]*which1[i,:]
return newPopulation
def mutate(self,population):
# Mutation
whereMutate = np.random.rand(np.shape(population)[0],np.shape(population)[1])
population[np.where(whereMutate < self.mutationProb)] = 1 - population[np.where(whereMutate < self.mutationProb)]
return population
def elitism(self,oldPopulation,population,fitness):
best = np.argsort(fitness)
best = np.squeeze(oldPopulation[best[-self.nElite:],:])
indices = range(np.shape(population)[0])
np.random.shuffle(indices)
population = population[indices,:]
population[0:self.nElite,:] = best
return population
def tournament(self,oldPopulation,population,fitness,fitnessFunction):
newFitness = eval(self.fitnessFunction)(population)
for i in range(0,np.shape(population)[0],2):
f = np.concatenate((fitness[i:i+2],newFitness[i:i+2]),axis=1)
indices = np.argsort(f)
if indices[-1]<2 and indices[-2]<2:
population[i,:] = oldPopulation[i,:]
population[i+1,:] = oldPopulation[i+1,:]
elif indices[-1]<2:
if indices[0]>=2:
population[i+indices[0]-2,:] = oldPopulation[i+indices[-1]]
else:
population[i+indices[1]-2,:] = oldPopulation[i+indices[-1]]
elif indices[-2]<2:
if indices[0]>=2:
population[i+indices[0]-2,:] = oldPopulation[i+indices[-2]]
else:
population[i+indices[1]-2,:] = oldPopulation[i+indices[-2]]
return population
|
{
"content_hash": "66dd0b43b4f42d69bf9e6e1a1e221441",
"timestamp": "",
"source": "github",
"line_count": 149,
"max_line_length": 132,
"avg_line_length": 34.32214765100671,
"alnum_prop": 0.7094251075479077,
"repo_name": "Anderson-Lab/anderson-lab.github.io",
"id": "4bfc51bb5fdf3feb57eee70c633d52e36a6a701d",
"size": "5622",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "csc_466_2021_spring/MLCode/Ch10/ga.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "79604"
},
{
"name": "JavaScript",
"bytes": "53016"
},
{
"name": "Jupyter Notebook",
"bytes": "64098"
},
{
"name": "Python",
"bytes": "557510"
},
{
"name": "Ruby",
"bytes": "681"
},
{
"name": "SCSS",
"bytes": "64925"
},
{
"name": "Shell",
"bytes": "25"
}
],
"symlink_target": ""
}
|
from .leaflet import (
Map,
Marker,
TileLayer, ImageOverlay,
Polyline, Polygon, Rectangle, Circle, CircleMarker,
GeoJSON,
LayerGroup, FeatureGroup,
DrawControl
)
from .notebook import initialize_notebook, get_static_path
|
{
"content_hash": "84914bf5866026f8f7c87596396e891d",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 58,
"avg_line_length": 21.166666666666668,
"alnum_prop": 0.7007874015748031,
"repo_name": "erdc-cm/leafletwidget",
"id": "8d1452d01df58689dfb76df7ff08cc48f4116931",
"size": "254",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "leafletwidget/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "15770"
},
{
"name": "Python",
"bytes": "20629"
}
],
"symlink_target": ""
}
|
from airflow.exceptions import AirflowException
from airflow.providers.amazon.aws.hooks.aws_hook import AwsHook
from airflow.providers.amazon.aws.operators.sagemaker_base import SageMakerBaseOperator
from airflow.utils.decorators import apply_defaults
class SageMakerModelOperator(SageMakerBaseOperator):
"""
Create a SageMaker model.
This operator returns The ARN of the model created in Amazon SageMaker
:param config: The configuration necessary to create a model.
For details of the configuration parameter see :py:meth:`SageMaker.Client.create_model`
:type config: dict
:param aws_conn_id: The AWS connection ID to use.
:type aws_conn_id: str
"""
@apply_defaults
def __init__(self,
config,
*args, **kwargs):
super().__init__(config=config,
*args, **kwargs)
self.config = config
def expand_role(self):
if 'ExecutionRoleArn' in self.config:
hook = AwsHook(self.aws_conn_id)
self.config['ExecutionRoleArn'] = hook.expand_role(self.config['ExecutionRoleArn'])
def execute(self, context):
self.preprocess_config()
self.log.info('Creating SageMaker Model %s.', self.config['ModelName'])
response = self.hook.create_model(self.config)
if response['ResponseMetadata']['HTTPStatusCode'] != 200:
raise AirflowException('Sagemaker model creation failed: %s' % response)
else:
return {
'Model': self.hook.describe_model(
self.config['ModelName']
)
}
|
{
"content_hash": "67639c5fda05e7ccf1ab235dba4e038f",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 95,
"avg_line_length": 34.270833333333336,
"alnum_prop": 0.6370820668693009,
"repo_name": "spektom/incubator-airflow",
"id": "57b110042d3280aaac4cdfbc990d018c4d7b2cc3",
"size": "2433",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "airflow/providers/amazon/aws/operators/sagemaker_model.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "13715"
},
{
"name": "Dockerfile",
"bytes": "17179"
},
{
"name": "HTML",
"bytes": "148492"
},
{
"name": "JavaScript",
"bytes": "25233"
},
{
"name": "Jupyter Notebook",
"bytes": "2933"
},
{
"name": "Mako",
"bytes": "1339"
},
{
"name": "Python",
"bytes": "9768581"
},
{
"name": "Shell",
"bytes": "221415"
},
{
"name": "TSQL",
"bytes": "879"
}
],
"symlink_target": ""
}
|
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class ListGlobalOperations(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the ListGlobalOperations Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(ListGlobalOperations, self).__init__(temboo_session, '/Library/Google/ComputeEngine/GlobalOperations/ListGlobalOperations')
def new_input_set(self):
return ListGlobalOperationsInputSet()
def _make_result_set(self, result, path):
return ListGlobalOperationsResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return ListGlobalOperationsChoreographyExecution(session, exec_id, path)
class ListGlobalOperationsInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the ListGlobalOperations
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_AccessToken(self, value):
"""
Set the value of the AccessToken input for this Choreo. ((optional, string) A valid access token retrieved during the OAuth process. This is required unless you provide the ClientID, ClientSecret, and RefreshToken to generate a new access token.)
"""
super(ListGlobalOperationsInputSet, self)._set_input('AccessToken', value)
def set_ClientID(self, value):
"""
Set the value of the ClientID input for this Choreo. ((conditional, string) The Client ID provided by Google. Required unless providing a valid AccessToken.)
"""
super(ListGlobalOperationsInputSet, self)._set_input('ClientID', value)
def set_ClientSecret(self, value):
"""
Set the value of the ClientSecret input for this Choreo. ((conditional, string) The Client Secret provided by Google. Required unless providing a valid AccessToken.)
"""
super(ListGlobalOperationsInputSet, self)._set_input('ClientSecret', value)
def set_Fields(self, value):
"""
Set the value of the Fields input for this Choreo. ((optional, string) Comma-seperated list of fields you want to include in the response.)
"""
super(ListGlobalOperationsInputSet, self)._set_input('Fields', value)
def set_Filter(self, value):
"""
Set the value of the Filter input for this Choreo. ((optional, string) A filter expression for narrowing results in the form: {field_name} {comparison_string} {literal_string} (e.g. name eq operation-1234). Comparison strings can be eq (equals) or ne (not equals).)
"""
super(ListGlobalOperationsInputSet, self)._set_input('Filter', value)
def set_MaxResults(self, value):
"""
Set the value of the MaxResults input for this Choreo. ((optional, integer) The maximum number of results to return.)
"""
super(ListGlobalOperationsInputSet, self)._set_input('MaxResults', value)
def set_PageToken(self, value):
"""
Set the value of the PageToken input for this Choreo. ((optional, string) The "nextPageToken" found in the response which is used to page through results.)
"""
super(ListGlobalOperationsInputSet, self)._set_input('PageToken', value)
def set_Project(self, value):
"""
Set the value of the Project input for this Choreo. ((required, string) The ID of a Google Compute project.)
"""
super(ListGlobalOperationsInputSet, self)._set_input('Project', value)
def set_RefreshToken(self, value):
"""
Set the value of the RefreshToken input for this Choreo. ((conditional, string) An OAuth refresh token used to generate a new access token when the original token is expired. Required unless providing a valid AccessToken.)
"""
super(ListGlobalOperationsInputSet, self)._set_input('RefreshToken', value)
class ListGlobalOperationsResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the ListGlobalOperations Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. ((json) The response from Google.)
"""
return self._output.get('Response', None)
def get_NewAccessToken(self):
"""
Retrieve the value for the "NewAccessToken" output from this Choreo execution. ((string) Contains a new AccessToken when the RefreshToken is provided.)
"""
return self._output.get('NewAccessToken', None)
class ListGlobalOperationsChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return ListGlobalOperationsResultSet(response, path)
|
{
"content_hash": "5cbae5488f22da5d80d9038a17e17eee",
"timestamp": "",
"source": "github",
"line_count": 101,
"max_line_length": 273,
"avg_line_length": 50.584158415841586,
"alnum_prop": 0.7020943433157174,
"repo_name": "jordanemedlock/psychtruths",
"id": "2d6ba1474c9f0623695ea8feb2a287157df45e71",
"size": "6021",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "temboo/core/Library/Google/ComputeEngine/GlobalOperations/ListGlobalOperations.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "18544"
},
{
"name": "HTML",
"bytes": "34650"
},
{
"name": "JavaScript",
"bytes": "423"
},
{
"name": "PHP",
"bytes": "1097"
},
{
"name": "Python",
"bytes": "23444578"
}
],
"symlink_target": ""
}
|
import datetime as dt
import argparse
import psycopg2 as psy
import simplejson as json
import yaml
import re
# other database functions
import setup_db
# peyotl setup
from peyotl.api.phylesystem_api import PhylesystemAPI
from peyotl.manip import iter_trees
from peyotl import gen_otu_dict, iter_node
from peyotl.phylesystem.phylesystem_umbrella import Phylesystem
from peyotl.nexson_syntax import get_nexml_el
from peyotl import get_logger
_LOG = get_logger()
# creates an empty file once phylesystem loaded
# used during ansible deployment to determine whether data loaded
def create_status_file():
try:
with open('.phylesystem', 'w+') as f:
pass
except IOError as (errno,strerror):
print "I/O error({0}): {1}".format(errno, strerror)
def create_phylesystem_obj():
# create connection to local phylesystem
phylesystem_api_wrapper = PhylesystemAPI(get_from='local')
phylesystem = phylesystem_api_wrapper.phylesystem_obj
return phylesystem
# Either convert a string to unicode, or returns an
# already-unicode string. Used for curator names.
def to_unicode(text):
try:
text = unicode(text, 'utf-8')
return text
except TypeError:
return text
# iterate over curators, adding curators to curator table and the
# who-curated-what relationship to study-curator-map
def insert_curators(connection,cursor,config_obj,study_id,curators):
_LOG.debug(u'Loading {n} curators for study {s}'.format(
n=len(curators),
s=study_id)
)
try:
CURATORTABLE = config_obj.get('database_tables','curatortable')
CURATORSTUDYTABLE = config_obj.get('database_tables','curatorstudytable')
for name in curators:
name = to_unicode(name)
_LOG.debug(u'Loading curator {c}'.format(c=name))
# check to make sure this curator name doesn't exist already
sqlstring = ('SELECT id FROM {tablename} '
'WHERE name=%s;'
.format(tablename=CURATORTABLE)
)
data=(name)
cursor.execute(sqlstring,(data,))
curator_id = cursor.fetchone()
if curator_id is None:
# insert the curator, returning the serial id, which
# we will need shortly
sqlstring = ('INSERT INTO {tablename} (name) '
'VALUES (%s) RETURNING id;'
.format(tablename=CURATORTABLE)
)
data = (name)
_LOG.debug('SQL: {p}'.format(p=cursor.mogrify(sqlstring,(data,))))
cursor.execute(sqlstring,(data,))
curator_id = cursor.fetchone()
# now insert the curator - study mapping
sqlstring = ('INSERT INTO {tablename} (curator_id,study_id) '
'VALUES (%s,%s);'
.format(tablename=CURATORSTUDYTABLE)
)
data = (curator_id,study_id)
_LOG.debug(u'SQL: {p}'.format(p=cursor.mogrify(sqlstring,(data))))
cursor.execute(sqlstring,data)
connection.commit()
except psy.ProgrammingError, ex:
print 'Error inserting curator'
# load the nexson properties into a table
def load_properties(connection,cursor,prop_table,study_props,tree_props):
for p in study_props:
prefix = None
# remove the '^' or '@' at the start of the property
# should be true for all properties, but check just in case
if p.startswith('^') or p.startswith('@'):
prefix = p[0]
p = p[1:]
sqlstring = ("INSERT INTO {t} (property,prefix,type) "
"VALUES (%s,%s,%s);"
.format(t=prop_table)
)
data = (p,prefix,'study')
_LOG.debug(u'SQL: {s}'.format(s=cursor.mogrify(sqlstring,(data))))
cursor.execute(sqlstring,data)
connection.commit()
for p in tree_props:
prefix = None
if p.startswith('^') or p.startswith('@'):
prefix = p[0]
p = p[1:]
sqlstring = ("INSERT INTO {t} (property,prefix,type) "
"VALUES (%s,%s,%s);"
.format(t=prop_table)
)
data = (p,prefix,'tree')
#print ' SQL: ',cursor.mogrify(sqlstring)
cursor.execute(sqlstring,data)
connection.commit()
# iterate over phylesystem nexsons and import
def load_nexsons(connection,cursor,phy,config_obj,nstudies=None):
counter = 0
study_properties = set()
tree_properties = set()
for study_id, studyobj in phy.iter_study_objs():
nexml = get_nexml_el(studyobj)
#print 'STUDY: ',study_id
study_properties.update(nexml.keys())
# study data for study table
STUDYTABLE = config_obj.get('database_tables','studytable')
year = nexml.get('^ot:studyYear')
proposedTrees = nexml.get('^ot:candidateTreeForSynthesis')
if proposedTrees is None:
proposedTrees = []
# must insert study before trees
sqlstring = ("INSERT INTO {tablename} (id) "
"VALUES (%s);"
.format(tablename=STUDYTABLE)
)
data = (study_id,)
#print ' SQL: ',cursor.mogrify(sqlstring)
cursor.execute(sqlstring,data)
connection.commit()
# update with treebase id, if exists
datadeposit = nexml.get('^ot:dataDeposit')
if (datadeposit):
url = datadeposit['@href']
pattern = re.compile(u'.+TB2:(.+)$')
matchobj = re.match(pattern,url)
if (matchobj):
tb_id = matchobj.group(1)
sqlstring = ("UPDATE {tablename} "
"SET treebase_id=%s "
"WHERE id=%s;"
.format(tablename=STUDYTABLE)
)
data = (tb_id,study_id)
#print ' SQL: ',cursor.mogrify(sqlstring,data)
cursor.execute(sqlstring,data)
connection.commit()
# get curator(s), noting that ot:curators might be a
# string or a list
c = nexml.get('^ot:curatorName')
#print ' ot:curatorName: ',c
curators=[]
if (isinstance(c,basestring)):
curators.append(c)
else:
curators=c
# remove duplicates
curators = list(set(curators))
insert_curators(connection,cursor,config_obj,study_id,curators)
# iterate over trees and insert tree data
# note that OTU data done separately as COPY
# due to size of table (see script <scriptname>)
TREETABLE = config_obj.get('database_tables','treetable')
ntrees = 0
try:
for trees_group_id, tree_id, tree in iter_trees(studyobj):
#print ' tree :' ,tree_id
_LOG.debug(u'{i} Loading tree {t} for study {s}'.format(
i=counter,
s=study_id,
t=tree_id)
)
ntrees += 1
proposedForSynth = False
tree_properties.update(tree.keys())
if (tree_id in proposedTrees):
proposedForSynth = True
treejson = json.dumps(tree)
ntips = 0
for node_id, node in iter_node(tree):
oid = node.get('@otu')
# no @otu property on internal nodes
if oid is not None:
ntips+=1
sqlstring = ("INSERT INTO {tablename} "
"(tree_id,study_id,ntips,proposed,data) "
"VALUES (%s,%s,%s,%s,%s);"
.format(tablename=TREETABLE)
)
data = (tree_id,study_id,ntips,proposedForSynth,treejson)
#print ' SQL: ',cursor.mogrify(sqlstring,data)
cursor.execute(sqlstring,data)
connection.commit()
except psy.Error as e:
print e.pgerror
# now that we have added the tree info, update the study record
# with the json data (minus the tree info) and ntrees
del nexml['treesById']
studyjson = json.dumps(nexml)
sqlstring = ("UPDATE {tablename} "
"SET data=%s,ntrees=%s "
"WHERE id=%s;"
.format(tablename=STUDYTABLE)
)
data = (studyjson,ntrees,study_id)
cursor.execute(sqlstring,data)
connection.commit()
counter+=1
if (counter%500 == 0):
print "loaded {n} studies".format(n=counter)
if (nstudies and counter>=nstudies):
print "finished inserting",nstudies,"studies"
break
# add the searchable properties that are not in the nexsons
tree_properties.add("ot:ottId")
tree_properties.add("ot:ottTaxonName")
tree_properties.add("ot:studyId")
tree_properties.add("ntips")
study_properties.add("ntrees")
study_properties.add("treebaseId")
# and load the tree and study properties into the property table
PROPERTYTABLE = config_obj.get('database_tables','propertytable')
load_properties(
connection,
cursor,
PROPERTYTABLE,
study_properties,
tree_properties)
if __name__ == "__main__":
# get command line argument (nstudies to import)
parser = argparse.ArgumentParser(description='load nexsons into postgres')
parser.add_argument('configfile',
help='path to the development.ini file'
)
parser.add_argument('-n',
dest='nstudies',
type=int,
help='load only n studies; if absent, load all studies'
)
args = parser.parse_args()
# read config variables
config_obj = setup_db.read_config(args.configfile)
connection, cursor = setup_db.connect(config_obj)
# test that tables exist
# and clear data, except taxonomy table
try:
tabledict = dict(config_obj.items('database_tables'))
for table in tabledict:
# skip the taxonomy table, which does note get loaded here
if table == "otttable":
continue
name = tabledict[table]
if setup_db.table_exists(cursor,name):
setup_db.clear_single_table(connection,cursor,name)
else:
raise psy.ProgrammingError("Table {t} does not exist".format(t=name))
# setup_db.clear_gin_index(connection,cursor)
# print "done clearing tables and index"
print "done clearing tables"
except psy.Error as e:
print e.pgerror
sys.exit(1)
# data import
starttime = dt.datetime.now()
try:
# TODO: catch peyotl-specific exceptions
phy = create_phylesystem_obj()
print "loading nexsons"
if (args.nstudies):
load_nexsons(connection,cursor,phy,config_obj,args.nstudies)
else:
load_nexsons(connection,cursor,phy,config_obj)
endtime = dt.datetime.now()
print "Load time: ",endtime - starttime
# print "indexing JSONB columns in tree and study table"
# setup_db.index_json_columns(connection,cursor,config_obj)
create_status_file()
except psy.Error as e:
print e.pgerror
sys.exit(1)
connection.close()
endtime = dt.datetime.now()
print "Total load + index time: ",endtime - starttime
|
{
"content_hash": "aa3f7334f390dced0ec6b9e43c844bce",
"timestamp": "",
"source": "github",
"line_count": 313,
"max_line_length": 85,
"avg_line_length": 36.65814696485623,
"alnum_prop": 0.5741676834582534,
"repo_name": "OpenTreeOfLife/ottreeindex",
"id": "3530b01dda1af9d5c8085bfc7b2133778bb99143",
"size": "11760",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "otindex/scripts/load_nexson.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "2874"
},
{
"name": "Python",
"bytes": "98726"
},
{
"name": "Shell",
"bytes": "1490"
}
],
"symlink_target": ""
}
|
'''
Created on Apr 30, 2012
@author: h87966
'''
from final.user.user_datastore_appengine import UserAppengineDataStore
class UserDataStoreFactory():
'''
Factory for getting a UserDataStore implementation
'''
storage_implementations = {'appengine':UserAppengineDataStore()}
def __init__(self, storage_impl='appengine'):
'''
Constructor
'''
self.storage = self.storage_implementations[storage_impl]
def set_storage(self, user_storage):
self.storage = user_storage
def get_storage(self):
return self.storage
|
{
"content_hash": "4d6765bddfeeeb60bf0c73bd07067b4b",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 70,
"avg_line_length": 23.5,
"alnum_prop": 0.6382978723404256,
"repo_name": "cdoremus/udacity-python_web_development-cs253",
"id": "869269151ba59e533b1dd46a2fb9b1c79105bf04",
"size": "611",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/final/user/user_datastore_factory.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "15273"
},
{
"name": "Python",
"bytes": "233912"
}
],
"symlink_target": ""
}
|
import numpy as np
import pytest
import pandas as pd
import kfp.v2.dsl
@pytest.fixture(autouse=True)
def patch_kfp_component_decorator(monkeypatch):
"""
This fixture runs once after all tests are collected.
Args:
monkeypatch: Built-in pytest fixture used to patch the decorator `@component`
in `kfp.v2.dsl`. This prevents KFP from changing the Python functions when
applying pytests.
Returns:
None
"""
def primitive_decorator(*args, **kwargs):
"""
A decorator which replaces @component, so that @component will not have any
effect on any functions.
Args:
Accepts any arguments
Returns:
func: A decorator which simply returns the input function unchanged.
"""
return lambda func: func
# patch the KFP decorator
monkeypatch.setattr(kfp.v2.dsl, "component", primitive_decorator)
@pytest.fixture(autouse=True)
def mock_kfp_artifact(monkeypatch):
"""
This fixture runs once after all tests are collected. It mocks the Artifact object
(and thus any derived classes such as Dataset, Model, etc.) to return the URI as
the path.
Unit tests set the URI of artifacts, however, KFP components use Artifact.path to
retrieve paths to files. If a URI doesn't start with gs:// or minio:// or s3://,
the path with be None. This behaviour is avoided by mocking the Artifact._get_path
method.
Args:
monkeypatch: Used to patch the decorator `@component` in `kfp.v2.dsl`.
This prevents KFP from changing the Python functions when applying
pytests.
Returns:
None
"""
def _get_path(self):
"""
Returns:
str: The URI path of the Artifact
"""
# simply return the URI
return self.uri
# mock the _get_path method of Artifact which is used by the property path
monkeypatch.setattr(kfp.v2.dsl.Artifact, "_get_path", _get_path)
@pytest.fixture(scope="session")
def make_csv_file():
"""
A factory fixture which can be used in unit tests to make a CSV file
Args:
None
Returns:
_make_csv_file (function)
"""
def _make_csv_file(n_features, n_rows, output_path):
"""
Create CSV file with one label column and N feature columns.
Args:
n_features (int): number of features in dataset
n_rows (int): number of rows in dataset
output_path (str): destination path to save csv
Returns:
None
"""
columns = ["label"] + [f"feature{x}" for x in range(n_features)]
df = pd.DataFrame(np.random.rand(n_rows, n_features + 1), columns=columns)
df.to_csv(output_path, index=False)
return _make_csv_file
|
{
"content_hash": "6fa5938af3987fe4c88d501894b617fe",
"timestamp": "",
"source": "github",
"line_count": 100,
"max_line_length": 86,
"avg_line_length": 28.3,
"alnum_prop": 0.6279151943462897,
"repo_name": "GoogleCloudPlatform/vertex-pipelines-end-to-end-samples",
"id": "c10a2504aeaef4ea1a8b399902b14b5f3917d2d2",
"size": "3406",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tests/kfp_components/conftest.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "844"
},
{
"name": "HCL",
"bytes": "36931"
},
{
"name": "Jupyter Notebook",
"bytes": "34258"
},
{
"name": "Makefile",
"bytes": "1897"
},
{
"name": "Python",
"bytes": "263544"
},
{
"name": "Shell",
"bytes": "2524"
}
],
"symlink_target": ""
}
|
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import os
import shutil
from os.path import join, dirname
import pstats
import pytest
import six
from asv import benchmarks
from asv import config
from asv import environment
from asv import util
from . import tools
BENCHMARK_DIR = join(dirname(__file__), 'benchmark')
INVALID_BENCHMARK_DIR = join(
dirname(__file__), 'benchmark.invalid')
ASV_CONF_JSON = {
'project': 'asv'
}
def test_find_benchmarks(tmpdir):
tmpdir = six.text_type(tmpdir)
os.chdir(tmpdir)
shutil.copytree(BENCHMARK_DIR, 'benchmark')
d = {}
d.update(ASV_CONF_JSON)
d['env_dir'] = "env"
d['benchmark_dir'] = 'benchmark'
d['repo'] = tools.generate_test_repo(tmpdir, [0]).path
conf = config.Config.from_json(d)
envs = list(environment.get_environments(conf, None))
b = benchmarks.Benchmarks(conf, envs, regex='secondary')
assert len(b) == 3
b = benchmarks.Benchmarks(conf, envs, regex='example')
assert len(b) == 22
b = benchmarks.Benchmarks(conf, envs, regex='time_example_benchmark_1')
assert len(b) == 2
b = benchmarks.Benchmarks(conf, envs, regex=['time_example_benchmark_1',
'some regexp that does not match anything'])
assert len(b) == 2
b = benchmarks.Benchmarks(conf, envs)
assert len(b) == 26
b = benchmarks.Benchmarks(conf, envs)
times = b.run_benchmarks(envs[0], profile=True, show_stderr=True)
assert len(times) == len(b)
assert times[
'time_examples.TimeSuite.time_example_benchmark_1']['result'] is not None
# Benchmarks that raise exceptions should have a time of "None"
assert times[
'time_secondary.TimeSecondary.time_exception']['result'] is None
assert times[
'subdir.time_subdir.time_foo']['result'] is not None
assert times[
'mem_examples.mem_list']['result'] > 1000
assert times[
'time_secondary.track_value']['result'] == 42.0
assert 'profile' in times[
'time_secondary.track_value']
assert 'stderr' in times[
'time_examples.time_with_warnings']
assert times['time_examples.time_with_warnings']['errcode'] != 0
assert times['params_examples.track_param']['result']['params'] == [["<class 'benchmark.params_examples.ClassOne'>",
"<class 'benchmark.params_examples.ClassTwo'>"]]
assert times['params_examples.track_param']['result']['result'] == [42, 42]
assert times['params_examples.mem_param']['result']['params'] == [['10', '20'], ['2', '3']]
assert len(times['params_examples.mem_param']['result']['result']) == 2*2
assert times['params_examples.ParamSuite.track_value']['result']['params'] == [["'a'", "'b'", "'c'"]]
assert times['params_examples.ParamSuite.track_value']['result']['result'] == [1+0, 2+0, 3+0]
assert isinstance(times['params_examples.TuningTest.time_it']['result']['result'][0], float)
assert isinstance(times['params_examples.time_skip']['result']['result'][0], float)
assert isinstance(times['params_examples.time_skip']['result']['result'][1], float)
assert util.is_nan(times['params_examples.time_skip']['result']['result'][2])
assert times['peakmem_examples.peakmem_list']['result'] >= 4 * 2**20
assert times['cache_examples.ClassLevelSetup.track_example']['result'] == 500
assert times['cache_examples.ClassLevelSetup.track_example2']['result'] == 500
assert times['cache_examples.track_cache_foo']['result'] == 42
assert times['cache_examples.track_cache_bar']['result'] == 12
assert times['cache_examples.track_my_cache_foo']['result'] == 0
assert times['cache_examples.ClassLevelSetupFail.track_fail']['result'] == None
assert 'raise RuntimeError()' in times['cache_examples.ClassLevelSetupFail.track_fail']['stderr']
profile_path = join(tmpdir, 'test.profile')
with open(profile_path, 'wb') as fd:
fd.write(times['time_secondary.track_value']['profile'])
pstats.Stats(profile_path)
# Check for running setup on each repeat (one extra run from profile)
# The output would contain error messages if the asserts in the benchmark fail.
expected = ["<%d>" % j for j in range(1, 12)]
assert times['time_examples.TimeWithRepeat.time_it']['stderr'].split() == expected
# Calibration of iterations should not rerun setup
expected = ['setup']*2
assert times['time_examples.TimeWithRepeatCalibrate.time_it']['stderr'].split() == expected
def test_invalid_benchmark_tree(tmpdir):
tmpdir = six.text_type(tmpdir)
os.chdir(tmpdir)
d = {}
d.update(ASV_CONF_JSON)
d['benchmark_dir'] = INVALID_BENCHMARK_DIR
d['env_dir'] = "env"
d['repo'] = tools.generate_test_repo(tmpdir, [0]).path
conf = config.Config.from_json(d)
envs = list(environment.get_environments(conf, None))
with pytest.raises(util.UserError):
b = benchmarks.Benchmarks(conf, envs)
def test_table_formatting():
benchmark = {'params': [], 'param_names': [], 'unit': 's'}
result = []
expected = ["[]"]
assert benchmarks._format_benchmark_result(result, benchmark) == expected
benchmark = {'params': [['a', 'b', 'c']], 'param_names': ['param1'], "unit": "seconds"}
result = [1e-6, 2e-6, 3e-6]
expected = ("======== ========\n"
" param1 \n"
"-------- --------\n"
" a 1.00\u03bcs \n"
" b 2.00\u03bcs \n"
" c 3.00\u03bcs \n"
"======== ========")
table = "\n".join(benchmarks._format_benchmark_result(result, benchmark, max_width=80))
assert table == expected
benchmark = {'params': [["'a'", "'b'", "'c'"], ["[1]", "[2]"]], 'param_names': ['param1', 'param2'], "unit": "seconds"}
result = [1, 2, None, 4, 5, float('nan')]
expected = ("======== ======== =======\n"
"-- param2 \n"
"-------- ----------------\n"
" param1 [1] [2] \n"
"======== ======== =======\n"
" a 1.00s 2.00s \n"
" b failed 4.00s \n"
" c 5.00s n/a \n"
"======== ======== =======")
table = "\n".join(benchmarks._format_benchmark_result(result, benchmark, max_width=80))
assert table == expected
expected = ("======== ======== ========\n"
" param1 param2 \n"
"-------- -------- --------\n"
" a [1] 1.00s \n"
" a [2] 2.00s \n"
" b [1] failed \n"
" b [2] 4.00s \n"
" c [1] 5.00s \n"
" c [2] n/a \n"
"======== ======== ========")
table = "\n".join(benchmarks._format_benchmark_result(result, benchmark, max_width=0))
assert table == expected
def test_find_benchmarks_cwd_imports(tmpdir):
# Test that files in the directory above the benchmark suite are
# not importable
tmpdir = six.text_type(tmpdir)
os.chdir(tmpdir)
os.makedirs('benchmark')
with open(os.path.join('benchmark', '__init__.py'), 'w') as f:
pass
with open(os.path.join('benchmark', 'test.py'), 'w') as f:
f.write("""
try:
import this_should_really_not_be_here
raise AssertionError('This should not happen!')
except ImportError:
pass
def track_this():
return 0
""")
with open(os.path.join('this_should_really_not_be_here.py'), 'w') as f:
f.write("raise AssertionError('Should not be imported!')")
d = {}
d.update(ASV_CONF_JSON)
d['env_dir'] = "env"
d['benchmark_dir'] = 'benchmark'
d['repo'] = tools.generate_test_repo(tmpdir, [0]).path
conf = config.Config.from_json(d)
envs = list(environment.get_environments(conf, None))
b = benchmarks.Benchmarks(conf, envs, regex='track_this')
assert len(b) == 1
|
{
"content_hash": "630d8a0a31162cebaf9e53378a857f21",
"timestamp": "",
"source": "github",
"line_count": 223,
"max_line_length": 123,
"avg_line_length": 36.55605381165919,
"alnum_prop": 0.568081452404318,
"repo_name": "mdboom/asv",
"id": "5d98448db69acfdc1f45a31e47f3ded08b168e67",
"size": "8241",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/test_benchmarks.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "2261"
},
{
"name": "HTML",
"bytes": "6741"
},
{
"name": "JavaScript",
"bytes": "61984"
},
{
"name": "Python",
"bytes": "290150"
},
{
"name": "Shell",
"bytes": "414"
}
],
"symlink_target": ""
}
|
from .fakedb import List, Todo, list
from .app import App
from .model import Root
@App.json(model=Root)
def view_root(self, request):
return {"collection": request.link(list)}
@App.json(model=List)
def view_list(self, request):
return {"todos": [request.view(todo) for todo in self.get_all()]}
@App.json(model=List, request_method="POST")
def add_todo(self, request):
todo_json = request.json
todo = Todo(todo_json["title"], todo_json["completed"])
self.add(todo)
@request.after
def after(response):
response.status = 201
return request.view(todo)
@App.json(model=Todo)
def view_todo(self, request):
return {"@id": request.link(self), "title": self.title, "completed": self.completed}
@App.json(model=Todo, request_method="PUT")
def change_todo(self, request):
todo_json = request.json
self.title = todo_json["title"]
self.completed = todo_json["completed"]
return request.view(self)
@App.json(model=Todo, request_method="DELETE")
def delete_todo(self, request):
list.remove(self.id)
return request.view(list)
|
{
"content_hash": "1e29f960d59838298d50494f36e142ba",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 88,
"avg_line_length": 24.266666666666666,
"alnum_prop": 0.6813186813186813,
"repo_name": "morepath/morepath_cerebral_todomvc",
"id": "f6c7bb1a379fe89ff58ce211e224cb0c3351cbe3",
"size": "1092",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "server/view.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "219"
},
{
"name": "HTML",
"bytes": "1624"
},
{
"name": "JavaScript",
"bytes": "1141130"
},
{
"name": "Python",
"bytes": "6697"
}
],
"symlink_target": ""
}
|
intake = np.array([5260., 5470, 5640, 6180, 6390, 6515, 6805, 7515, \
7515, 8230, 8770])
# Does their energy intake deviate systematically from the recommended
# value of 7725 kJ?
# We have 10 degrees of freedom, so is the sample mean within 95% of the
# recommended value?
s = np.random.standard_t(10, size=100000)
np.mean(intake)
# 6753.636363636364
intake.std(ddof=1)
# 1142.1232221373727
# Calculate the t statistic, setting the ddof parameter to the unbiased
# value so the divisor in the standard deviation will be degrees of
# freedom, N-1.
t = (np.mean(intake)-7725)/(intake.std(ddof=1)/np.sqrt(len(intake)))
import matplotlib.pyplot as plt
h = plt.hist(s, bins=100, normed=True)
# For a one-sided t-test, how far out in the distribution does the t
# statistic appear?
>>> np.sum(s<t) / float(len(s))
# 0.0090699999999999999 #random
# So the p-value is about 0.009, which says the null hypothesis has a
# probability of about 99% of being true.
|
{
"content_hash": "d915e2b6a2df2e889475b0d003108917",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 72,
"avg_line_length": 31.64516129032258,
"alnum_prop": 0.7135575942915392,
"repo_name": "leesavide/pythonista-docs",
"id": "7b5b7a955667e5fd845d1fe38d7c3505f86e9f57",
"size": "1072",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Documentation/numpy/reference/generated/numpy-random-standard_t-1.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "84392"
},
{
"name": "HTML",
"bytes": "70040156"
},
{
"name": "JavaScript",
"bytes": "89777"
},
{
"name": "Python",
"bytes": "884325"
}
],
"symlink_target": ""
}
|
"""Utilities for debugging garbage collection, leaked memory, etc."""
import gc
def get_objects_by_typename():
gc.collect()
x = {}
for obj in gc.get_objects():
t = type(obj).__name__
if t in x:
x[t].append(obj)
else:
x[t] = [obj]
return x
def get_objects_by_livecount(obj_dict=None):
if not obj_dict:
obj_dict = get_objects_by_typename()
else:
gc.collect()
lst = [(len(val), k) for (k, val) in obj_dict.iteritems()]
lst.sort()
return lst
def get_referrers(obj, path=None):
if not path:
path = []
referrers = gc.get_referrers(obj)
for edge in path:
referrers = gc.get_referrers(referrers[edge])
return referrers
def count_object_by_name(name):
objects = get_objects_by_livecount()
for (count, oName) in objects:
if oName==name:
return count
return 0
|
{
"content_hash": "322b9224901b9712b1e2519d246970e8",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 69,
"avg_line_length": 24.157894736842106,
"alnum_prop": 0.5762527233115469,
"repo_name": "celiafish/VisTrails",
"id": "172130c446b5b99392ef578271a1a22ff5835118",
"size": "2799",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "vistrails/core/utils/gcutils.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1421"
},
{
"name": "Inno Setup",
"bytes": "19611"
},
{
"name": "Makefile",
"bytes": "768"
},
{
"name": "Mako",
"bytes": "66415"
},
{
"name": "PHP",
"bytes": "49038"
},
{
"name": "Python",
"bytes": "19674395"
},
{
"name": "R",
"bytes": "778864"
},
{
"name": "Rebol",
"bytes": "3972"
},
{
"name": "Shell",
"bytes": "34182"
},
{
"name": "TeX",
"bytes": "145219"
},
{
"name": "XSLT",
"bytes": "1090"
}
],
"symlink_target": ""
}
|
from builtins import object
class VarImpl(object):
"""Implement the Var function used within the DEPS file."""
def __init__(self, local_scope, custom_scope):
self._local_scope = local_scope
self._custom_scope = custom_scope
def Lookup(self, var_name):
"""Implements the Var syntax."""
if var_name in self._custom_scope:
return self._custom_scope[var_name]
elif var_name in self._local_scope.get('vars', {}):
return self._local_scope['vars'][var_name]
raise Exception('Var is not defined: %s' % var_name)
def GetDepsContent(deps_path, text=None, return_dict=False, git_url=None):
"""Read a DEPS file and return all the sections."""
if deps_path:
deps_file = open(deps_path, 'rU')
content = deps_file.read()
else:
content = text
custom_scope = {}
local_scope = {}
if git_url:
custom_scope['git_url'] = git_url
custom_scope['webkit_url'] = git_url+"/chromium/blink.git"
var = VarImpl(local_scope, custom_scope)
global_scope = {
'Var': var.Lookup,
'deps': {},
'hooks': [],
}
exec(content, global_scope, local_scope)
local_scope.setdefault('deps', {})
local_scope.setdefault('hooks', [])
local_scope.setdefault('vars', {})
local_scope.setdefault('recursion', None)
if return_dict:
return local_scope
return (local_scope['deps'],
local_scope['hooks'], local_scope['vars'],
local_scope['recursion'])
|
{
"content_hash": "c8070e9736df78643fa1ae9652bde57e",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 74,
"avg_line_length": 30.53191489361702,
"alnum_prop": 0.6390243902439025,
"repo_name": "ric2b/Vivaldi-browser",
"id": "b3c30afdbe2f249417981a31a59465dab6daee27",
"size": "1620",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/read_deps_file.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
from oslo_utils import excutils
import six
from sahara import conductor as c
from sahara import context
from sahara.plugins import base as plugin_base
from sahara.service import api
from sahara.service.health import verification_base
from sahara.service import quotas
from sahara.utils import cluster as c_u
from sahara.utils import general as g
from sahara.utils.notification import sender
conductor = c.API
# Cluster ops
def get_clusters(**kwargs):
return conductor.cluster_get_all(context.ctx(),
regex_search=True, **kwargs)
def get_cluster(id, show_progress=False):
return conductor.cluster_get(context.ctx(), id, show_progress)
def scale_cluster(id, data):
context.set_current_cluster_id(id)
ctx = context.ctx()
cluster = conductor.cluster_get(ctx, id)
plugin = plugin_base.PLUGINS.get_plugin(cluster.plugin_name)
existing_node_groups = data.get('resize_node_groups', [])
additional_node_groups = data.get('add_node_groups', [])
# the next map is the main object we will work with
# to_be_enlarged : {node_group_id: desired_amount_of_instances}
to_be_enlarged = {}
node_group_instance_map = {}
for ng in existing_node_groups:
ng_id = g.find(cluster.node_groups, name=ng['name'])['id']
to_be_enlarged.update({ng_id: ng['count']})
if 'instances' in ng:
node_group_instance_map.update({ng_id: ng['instances']})
additional = construct_ngs_for_scaling(cluster, additional_node_groups)
cluster = conductor.cluster_get(ctx, cluster)
_add_ports_for_auto_sg(ctx, cluster, plugin)
try:
cluster = c_u.change_cluster_status(
cluster, c_u.CLUSTER_STATUS_VALIDATING)
quotas.check_scaling(cluster, to_be_enlarged, additional)
plugin.recommend_configs(cluster, scaling=True)
plugin.validate_scaling(cluster, to_be_enlarged, additional)
except Exception as e:
with excutils.save_and_reraise_exception():
c_u.clean_cluster_from_empty_ng(cluster)
c_u.change_cluster_status(
cluster, c_u.CLUSTER_STATUS_ACTIVE, six.text_type(e))
# If we are here validation is successful.
# So let's update to_be_enlarged map:
to_be_enlarged.update(additional)
for node_group in cluster.node_groups:
if node_group.id not in to_be_enlarged:
to_be_enlarged[node_group.id] = node_group.count
api.OPS.provision_scaled_cluster(id, to_be_enlarged,
node_group_instance_map)
return cluster
def create_cluster(values):
plugin = plugin_base.PLUGINS.get_plugin(values['plugin_name'])
return _cluster_create(values, plugin)
def create_multiple_clusters(values):
num_of_clusters = values['count']
clusters = []
plugin = plugin_base.PLUGINS.get_plugin(values['plugin_name'])
for counter in range(num_of_clusters):
cluster_dict = values.copy()
cluster_name = cluster_dict['name']
cluster_dict['name'] = get_multiple_cluster_name(num_of_clusters,
cluster_name,
counter + 1)
cluster = _cluster_create(cluster_dict, plugin).to_wrapped_dict()
clusters.append(cluster)
clusters_dict = {'clusters': clusters}
return clusters_dict
def _cluster_create(values, plugin):
ctx = context.ctx()
cluster = conductor.cluster_create(ctx, values)
context.set_current_cluster_id(cluster.id)
sender.status_notify(cluster.id, cluster.name, "New",
"create")
_add_ports_for_auto_sg(ctx, cluster, plugin)
# validating cluster
try:
plugin.recommend_configs(cluster)
cluster = c_u.change_cluster_status(
cluster, c_u.CLUSTER_STATUS_VALIDATING)
plugin.validate(cluster)
quotas.check_cluster(cluster)
except Exception as e:
with excutils.save_and_reraise_exception():
c_u.change_cluster_status(
cluster, c_u.CLUSTER_STATUS_ERROR, six.text_type(e))
api.OPS.provision_cluster(cluster.id)
return cluster
def get_multiple_cluster_name(num_of_clusters, name, counter):
return "%%s-%%0%dd" % len(str(num_of_clusters)) % (name, counter)
def _add_ports_for_auto_sg(ctx, cluster, plugin):
for ng in cluster.node_groups:
if ng.auto_security_group:
ports = {'open_ports': plugin.get_open_ports(ng)}
conductor.node_group_update(ctx, ng, ports)
def terminate_cluster(id, force=False):
context.set_current_cluster_id(id)
cluster = c_u.change_cluster_status(id, c_u.CLUSTER_STATUS_DELETING)
if cluster is None:
return
api.OPS.terminate_cluster(id, force)
sender.status_notify(cluster.id, cluster.name, cluster.status,
"delete")
def update_cluster(id, values):
if "update_keypair" in values:
if values["update_keypair"]:
api.OPS.update_keypair(id)
values.pop("update_keypair")
if verification_base.update_verification_required(values):
api.OPS.handle_verification(id, values)
return conductor.cluster_get(context.ctx(), id)
return conductor.cluster_update(context.ctx(), id, values)
def construct_ngs_for_scaling(cluster, additional_node_groups):
ctx = context.ctx()
additional = {}
for ng in additional_node_groups:
count = ng['count']
ng['count'] = 0
ng_id = conductor.node_group_add(ctx, cluster, ng)
additional.update({ng_id: count})
return additional
|
{
"content_hash": "34e0ec638c1d67fbde9ce2572afaad4d",
"timestamp": "",
"source": "github",
"line_count": 168,
"max_line_length": 75,
"avg_line_length": 33.642857142857146,
"alnum_prop": 0.6456121726822364,
"repo_name": "openstack/sahara",
"id": "6e6db0264e5c59483847c0e349c9e5dccc82c87b",
"size": "6235",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sahara/service/api/v2/clusters.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "952"
},
{
"name": "Python",
"bytes": "2197746"
},
{
"name": "Shell",
"bytes": "37893"
}
],
"symlink_target": ""
}
|
from gppylib.mainUtils import *
import os, sys, time, signal
from optparse import Option, OptionGroup, OptionParser, OptionValueError, SUPPRESS_USAGE
from gppylib.gpparseopts import OptParser, OptChecker
from gppylib import gplog
from gppylib.commands import base
from gppylib.commands import unix
from gppylib.commands import gp
from gppylib.commands.gp import SEGMENT_STOP_TIMEOUT_DEFAULT
from gppylib.commands import pg
from gppylib.db import catalog
from gppylib.db import dbconn
from gppylib import pgconf
from gppylib.gpcoverage import GpCoverage
from gppylib.commands.gp import is_pid_postmaster
description = ("""
This utility is NOT SUPPORTED and is for internal-use only.
stops a set of one or more segment databases.
""")
logger = gplog.get_default_logger()
#-------------------------------------------------------------------------
class SegStopStatus:
def __init__(self,datadir,stopped,reason):
self.datadir=datadir
self.stopped=stopped
self.reason=reason
def __str__(self):
return "STATUS--DIR:%s--STOPPED:%s--REASON:%s" % (self.datadir,self.stopped,self.reason)
class SegStop(base.Command):
def __init__(self, name, db, mode, timeout):
self.name = name
self.db = db
self.mode = mode
self.timeout = timeout
self.result = None
self.port = None
self.datadir = None
self.logger = logger
base.Command.__init__(self, name=self.name, cmdStr='Stop an individual segment on the host', ctxt=None, remoteHost=None)
def get_datadir_and_port(self):
return self.db.split(':')[0:2]
def get_results(self):
return self.result
def run(self):
try:
self.datadir, self.port = self.get_datadir_and_port()
cmd = gp.SegmentStop('segment shutdown', self.datadir, mode=self.mode, timeout=self.timeout)
cmd.run()
results = cmd.get_results()
is_shutdown = False
if results.rc == 0:
cmd = gp.SegmentIsShutDown('check if shutdown', self.datadir)
cmd.run()
if cmd.is_shutdown():
status = SegStopStatus(self.datadir, True, "Shutdown Succeeded")
self.result = status
is_shutdown = True
# MPP-16171
#
elif self.mode == 'immediate':
status = SegStopStatus(self.datadir, True, "Shutdown Immediate")
self.result = status
is_shutdown = True
# read pid and datadir from /tmp/.s.PGSQL.<port>.lock file
name = "failed segment '%s'" % self.db
(succeeded, mypid, file_datadir) = pg.ReadPostmasterTempFile.local(name,self.port).getResults()
if not is_shutdown:
if succeeded and file_datadir == self.datadir:
# now try to terminate the process, first trying with
# SIGTERM and working our way up to SIGABRT sleeping
# in between to give the process a moment to exit
#
unix.kill_sequence(mypid)
if not unix.check_pid(mypid):
lockfile = "/tmp/.s.PGSQL.%s" % self.port
if os.path.exists(lockfile):
self.logger.info("Clearing segment instance lock files")
os.remove(lockfile)
status = SegStopStatus(self.datadir, True, "Forceful termination success: rc: %d stdout: %s stderr: %s." % (results.rc,results.stdout,results.stderr))
try:
unix.kill_9_segment_processes(self.datadir, self.port, mypid)
if unix.check_pid(mypid) and mypid != -1:
status = SegStopStatus(self.datadir, False, "Failed forceful termnation: rc: %d stdout: %s stderr: %s." % (results.rc,results.stdout,results.stderr))
self.result = status
except Exception as e:
logger.error('Failed forceful termination of segment %s: (%s)' % (self.datadir, str(e)))
self.result = SegStopStatus(self.datadir,False,'Failed forceful termination of segment! (%s)' % str(e))
return self.result
except Exception as e:
logger.exception(e)
self.result = SegStopStatus(self.datadir,False,'Shutdown failed! %s' % str(e))
return self.result
#-------------------------------------------------------------------------
class GpSegStop:
######
def __init__(self,dblist,mode,gpversion,timeout=SEGMENT_STOP_TIMEOUT_DEFAULT):
self.dblist=dblist
self.mode=mode
self.expected_gpversion=gpversion
self.timeout=timeout
self.gphome=os.path.abspath(os.pardir)
self.actual_gpversion=gp.GpVersion.local('local GP software version check',self.gphome)
if self.actual_gpversion != self.expected_gpversion:
raise Exception("Local Software Version does not match what is expected.\n"
"The local software version is: '%s'\n"
"But we were expecting it to be: '%s'\n"
"Please review and correct" % (self.actual_gpversion,self.expected_gpversion))
self.logger = logger
self.pool = None
######
def run(self):
results = []
failures = []
self.logger.info("Issuing shutdown commands to local segments...")
self.pool = base.WorkerPool()
for db in self.dblist:
cmd = SegStop('segment shutdown', db=db, mode=self.mode, timeout=self.timeout)
self.pool.addCommand(cmd)
self.pool.join()
failed = False
for cmd in self.pool.getCompletedItems():
result = cmd.get_results()
if not result.stopped:
failed = True
results.append(result)
#Log the results!
status = '\nCOMMAND RESULTS\n'
for result in results:
status += str(result) + "\n"
self.logger.info(status)
return 1 if failed else 0
######
def cleanup(self):
if self.pool:
self.pool.haltWork()
@staticmethod
def createParser():
parser = OptParser(option_class=OptChecker,
description=' '.join(description.split()),
version='%prog version $Revision: #12 $')
parser.setHelp([])
addStandardLoggingAndHelpOptions(parser, includeNonInteractiveOption=False)
parser.add_option("-D","--db",dest="dblist", action="append", type="string")
parser.add_option("-V", "--gp-version", dest="gpversion",metavar="GP_VERSION",
help="expected software version")
parser.add_option("-m", "--mode", dest="mode",metavar="<MODE>",
help="how to shutdown. modes are smart,fast, or immediate")
parser.add_option("-t", "--timeout", dest="timeout", type="int", default=SEGMENT_STOP_TIMEOUT_DEFAULT,
help="seconds to wait")
return parser
@staticmethod
def createProgram(options, args):
return GpSegStop(options.dblist,options.mode,options.gpversion,options.timeout)
#-------------------------------------------------------------------------
if __name__ == '__main__':
mainOptions = { 'setNonuserOnToolLogger':True}
simple_main( GpSegStop.createParser, GpSegStop.createProgram, mainOptions)
|
{
"content_hash": "faed1f64b827215f5d0fb488417f1ea3",
"timestamp": "",
"source": "github",
"line_count": 195,
"max_line_length": 169,
"avg_line_length": 39.11794871794872,
"alnum_prop": 0.5697430519140011,
"repo_name": "foyzur/gpdb",
"id": "604a59788b4e97c4e950f2c5480f9942fc9d553f",
"size": "7830",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "gpMgmt/sbin/gpsegstop.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "5196"
},
{
"name": "Batchfile",
"bytes": "11532"
},
{
"name": "C",
"bytes": "34176888"
},
{
"name": "C++",
"bytes": "4798415"
},
{
"name": "CMake",
"bytes": "28254"
},
{
"name": "CSS",
"bytes": "7068"
},
{
"name": "Cucumber",
"bytes": "896816"
},
{
"name": "DTrace",
"bytes": "1154"
},
{
"name": "Fortran",
"bytes": "14777"
},
{
"name": "Groff",
"bytes": "601878"
},
{
"name": "HTML",
"bytes": "340701"
},
{
"name": "Java",
"bytes": "943457"
},
{
"name": "Lex",
"bytes": "202575"
},
{
"name": "M4",
"bytes": "94554"
},
{
"name": "Makefile",
"bytes": "463264"
},
{
"name": "Objective-C",
"bytes": "7388"
},
{
"name": "PLSQL",
"bytes": "174787"
},
{
"name": "PLpgSQL",
"bytes": "47986854"
},
{
"name": "Perl",
"bytes": "778165"
},
{
"name": "Python",
"bytes": "5461340"
},
{
"name": "Ruby",
"bytes": "3283"
},
{
"name": "SQLPL",
"bytes": "122363"
},
{
"name": "Shell",
"bytes": "471931"
},
{
"name": "XS",
"bytes": "8309"
},
{
"name": "XSLT",
"bytes": "5779"
},
{
"name": "Yacc",
"bytes": "471668"
}
],
"symlink_target": ""
}
|
'''
Video Gstplayer
===============
.. versionadded:: 1.8.0
Implementation of a VideoBase with Kivy :class:`~kivy.lib.gstplayer.GstPlayer`
This player is the prefered player, using Gstreamer 1.0, working on both Python
2 and 3.
'''
from kivy.lib.gstplayer import GstPlayer, get_gst_version
from kivy.graphics.texture import Texture
from kivy.core.video import VideoBase
from kivy.logger import Logger
from kivy.clock import Clock
from kivy.compat import PY2
from threading import Lock
from functools import partial
from os.path import realpath
from weakref import ref
if PY2:
from urllib import pathname2url
else:
from urllib.request import pathname2url
Logger.info('VideoGstplayer: Using Gstreamer {}'.format(
'.'.join(map(str, get_gst_version()))))
def _on_gstplayer_buffer(video, width, height, data):
video = video()
# if we still receive the video but no more player, remove it.
if not video:
return
with video._buffer_lock:
video._buffer = (width, height, data)
def _on_gstplayer_message(mtype, message):
if mtype == 'error':
Logger.error('VideoGstplayer: {}'.format(message))
elif mtype == 'warning':
Logger.warning('VideoGstplayer: {}'.format(message))
elif mtype == 'info':
Logger.info('VideoGstplayer: {}'.format(message))
class VideoGstplayer(VideoBase):
def __init__(self, **kwargs):
self.player = None
self._buffer = None
self._buffer_lock = Lock()
super(VideoGstplayer, self).__init__(**kwargs)
def _on_gst_eos_sync(self):
Clock.schedule_once(self._do_eos, 0)
def load(self):
Logger.debug('VideoGstplayer: Load <{}>'.format(self._filename))
uri = self._get_uri()
wk_self = ref(self)
self.player_callback = partial(_on_gstplayer_buffer, wk_self)
self.player = GstPlayer(uri, self.player_callback,
self._on_gst_eos_sync, _on_gstplayer_message)
self.player.load()
def unload(self):
if self.player:
self.player.unload()
self.player = None
with self._buffer_lock:
self._buffer = None
self._texture = None
def stop(self):
super(VideoGstplayer, self).stop()
self.player.stop()
def pause(self):
super(VideoGstplayer, self).pause()
self.player.pause()
def play(self):
super(VideoGstplayer, self).play()
self.player.set_volume(self.volume)
self.player.play()
def seek(self, percent):
self.player.seek(percent)
def _get_position(self):
return self.player.get_position()
def _get_duration(self):
return self.player.get_duration()
def _set_volume(self, value):
self._volume = value
if self.player:
self.player.set_volume(self._volume)
def _update(self, dt):
buf = None
with self._buffer_lock:
buf = self._buffer
self._buffer = None
if buf is not None:
self._update_texture(buf)
self.dispatch('on_frame')
def _update_texture(self, buf):
width, height, data = buf
# texture is not allocated yet, create it first
if not self._texture:
self._texture = Texture.create(size=(width, height),
colorfmt='rgb')
self._texture.flip_vertical()
self.dispatch('on_load')
if self._texture:
self._texture.blit_buffer(
data, size=(width, height), colorfmt='rgb')
def _get_uri(self):
uri = self.filename
if not uri:
return
if not '://' in uri:
uri = 'file:' + pathname2url(realpath(uri))
return uri
|
{
"content_hash": "21f25db818ba60e1e5f4a0f9836a8812",
"timestamp": "",
"source": "github",
"line_count": 134,
"max_line_length": 79,
"avg_line_length": 28.350746268656717,
"alnum_prop": 0.5991050276388523,
"repo_name": "andnovar/kivy",
"id": "8ecd008b35d698f3f5faa1a536543bb4222cc3d4",
"size": "3799",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "kivy/core/video/video_gstplayer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "329293"
},
{
"name": "Emacs Lisp",
"bytes": "9695"
},
{
"name": "GLSL",
"bytes": "289"
},
{
"name": "HTML",
"bytes": "19384"
},
{
"name": "Makefile",
"bytes": "4201"
},
{
"name": "Objective-C",
"bytes": "14779"
},
{
"name": "Python",
"bytes": "3570798"
},
{
"name": "VimL",
"bytes": "1123"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import os
import shutil
from .common import BASE_DIR
try:
from jinja2 import Environment
from jinja2 import FileSystemLoader
HAS_JINJA = True
except ImportError:
HAS_JINJA = False
if HAS_JINJA:
JINJA_ENV = Environment(
loader=FileSystemLoader(BASE_DIR + '/devtools/stubs')
)
def module_file_present(module):
module_file = '{0}/ansible_collections/f5networks/f5_modules/plugins/modules/{1}.py'.format(BASE_DIR, module)
if os.path.exists(module_file):
print('Module file "{0}" exists'.format(module_file))
return True
return False
def module_file_absent(module):
result = module_file_present(module)
return not result
def stub_roles_dirs(module):
# Create role containing all of your future functional tests
for d in ['defaults', 'tasks', 'meta']:
directory = '{0}/test/integration/targets/{1}/{2}'.format(BASE_DIR, module, d)
if not os.path.exists(directory):
os.makedirs(directory)
def stub_roles_yaml_files(module):
# Create default vars to contain any playbook variables
for d in ['defaults', 'tasks']:
defaults_file = '{0}/test/integration/targets/{1}/{2}/main.yaml'.format(
BASE_DIR, module, d
)
touch(defaults_file)
for f in ['setup.yaml', 'teardown.yaml']:
defaults_file = '{0}/test/integration/targets/{1}/tasks/{2}'.format(BASE_DIR, module, f)
touch(defaults_file)
main_tests = '{0}/test/integration/targets/{1}/tasks/main.yaml'.format(BASE_DIR, module)
template = JINJA_ENV.get_template('test_integration_targets_main.yaml')
content = template.render()
with open(main_tests, 'w') as fh:
fh.write(content)
stub_meta_main_file(module)
def stub_meta_main_file(module):
touch('{0}/test/integration/targets/{1}/meta/main.yml'.format(BASE_DIR, module))
main_meta = '{0}/test/integration/targets/{1}/meta/main.yml'.format(BASE_DIR, module)
template = JINJA_ENV.get_template('test_meta_main.yml')
content = template.render()
with open(main_meta, 'w') as fh:
fh.write(content)
def stub_playbook_file(module):
# Stub out the test playbook
playbook_file = '{0}/test/integration/{1}.yaml'.format(BASE_DIR, module)
template = JINJA_ENV.get_template('playbooks_module.yaml')
content = template.render(module=module)
fh = open(playbook_file, 'w')
fh.write(content)
fh.close()
def stub_library_file(module, extension):
# Create your new module python file
library_file = '{0}/ansible_collections/f5networks/f5_modules/plugins/modules/{1}{2}'.format(
BASE_DIR, module, extension
)
template = JINJA_ENV.get_template('library_module.py')
content = template.render(module=module)
fh = open(library_file, 'w')
fh.write(content)
fh.close()
def touch(name, times=None):
with open(name, 'a'):
os.utime(name, times)
def stub_unit_test_file(module, extension):
test_dir_path = '{0}/ansible_collections/f5networks/f5_modules/tests/unit/modules/network/f5/'.format(BASE_DIR)
if not os.path.exists(test_dir_path):
os.makedirs(test_dir_path)
test_file = '{0}/ansible_collections/f5networks/f5_modules/tests/unit/modules/network/f5/test_{1}{2}'.format(
BASE_DIR, module, extension
)
template = JINJA_ENV.get_template('tests_unit_module.py')
content = template.render(module=module)
fh = open(test_file, 'w')
fh.write(content)
fh.close()
def unstub_meta_yml_files(module):
main_meta = '{0}/test/integration/targets/{1}/meta/main.yml'.format(BASE_DIR, module)
if os.path.exists(main_meta):
os.remove(main_meta)
def unstub_roles_yaml_files(module):
for d in ['defaults', 'tasks']:
defaults_file = '{0}/test/integration/targets/{1}/{2}/main.yaml'.format(
BASE_DIR, module, d
)
if os.path.exists(defaults_file):
os.remove(defaults_file)
unstub_meta_yml_files(module)
for f in ['setup.yaml', 'teardown.yaml']:
set_teardown_file = '{0}/test/integration/targets/{1}/tasks/{2}'.format(BASE_DIR, module, f)
if os.path.exists(set_teardown_file):
os.remove(set_teardown_file)
main_tests = '{0}/test/integration/targets/{1}/tasks/main.yaml'.format(BASE_DIR, module)
if os.path.exists(main_tests):
os.remove(main_tests)
def unstub_roles_dirs(module):
for d in ['defaults', 'tasks', 'meta']:
directory = '{0}/test/integration/targets/{1}/{2}'.format(BASE_DIR, module, d)
if os.path.exists(directory):
shutil.rmtree(directory)
def unstub_playbook_file(module):
playbook_file = '{0}/test/integration/{1}.yaml'.format(BASE_DIR, module)
if os.path.exists(playbook_file):
os.remove(playbook_file)
def unstub_library_file(module, extension):
library_file = '{0}/ansible_collections/f5networks/f5_modules/plugins/modules/{1}{2}'.format(BASE_DIR, module, extension)
if os.path.exists(library_file):
os.remove(library_file)
def unstub_unit_test_file(module, extension):
test_dir_path = '{0}/ansible_collections/f5networks/f5_modules/tests/unit/modules/network/f5/'.format(BASE_DIR)
if not os.path.exists(test_dir_path):
os.makedirs(test_dir_path)
test_file = '{0}/ansible_collections/f5networks/f5_modules/tests/unit/modules/network/f5/test_{1}{2}'.format(
BASE_DIR, module, extension
)
if os.path.exists(test_file):
os.remove(test_file)
|
{
"content_hash": "0a4db7ee4c5d2a8087c6bd53e666ec1d",
"timestamp": "",
"source": "github",
"line_count": 173,
"max_line_length": 125,
"avg_line_length": 32.323699421965316,
"alnum_prop": 0.6623748211731044,
"repo_name": "F5Networks/f5-ansible-modules",
"id": "f6c4acdb7b86cc113f0c9aa5724021575927faa3",
"size": "5767",
"binary": false,
"copies": "2",
"ref": "refs/heads/doc-update",
"path": "tasks/lib/stubber.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "1931"
},
{
"name": "Python",
"bytes": "345682"
}
],
"symlink_target": ""
}
|
"""Hello, Python 3!
Welcome to Python version 3, a very fun language
to use and learn!
Here we have a simple "Hello World!" program. All
you have to do is print, and you have output. Try
running it now, either by clicking *Run*, or
pressing *Shift-Enter*.
What happened? This tutorial contains a *Python 3
interpreter*. It starts at the top of your program
(or _script_) and does what you tell it to until
it reaches the bottom. Here, we have told it to
do exactly one thing: **print** a **string** (text
surrounded by quotation marks) to the output
window, and it has.
The word |print| is a special function in Python 3.
It instructs the interperter to output what you
tell it to. In this tutorial, we capture that
output in the window below the code so that you
can easily see it.
We will get very comfortable with this as the tutorial goes on. Meanwhile, let's talk about the tutorial itself:
- The Table of Contents is above, marked *TOC*.
- *Page-Up* and *Page-Down* keys can be used to navigate.
- Code can be run with the *Run* button or *Shift-Enter*.
- Check out the options in the *Run* menu (the arrow). Among other things,
you can see what you have changed from the original slide. The tutorial
will try to remember those changes for a long time.
Exercises
- Try removing each of the quotation marks in
turn. What happens?
- Change the string to say hello specifically to you.
- Print 'Hello, Python!' using two strings instead
of one, like this: |print('Hello', 'Python!')|.
What did |print| do for you automatically?
"""
print('Hello, Python!')
|
{
"content_hash": "7021d8f00d5531ff48260a28d1aa41c0",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 112,
"avg_line_length": 32.30612244897959,
"alnum_prop": 0.7359444093493367,
"repo_name": "shiblon/pytour",
"id": "2a79ca2ee7b1c23adc0095403b0f59a46cf635fe",
"size": "1596",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "3/tutorials/hello.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "195977"
},
{
"name": "HTML",
"bytes": "2110262"
},
{
"name": "JavaScript",
"bytes": "5106892"
},
{
"name": "Python",
"bytes": "15081380"
},
{
"name": "Shell",
"bytes": "1018"
}
],
"symlink_target": ""
}
|
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('plugins', '0037_auto_20200518_1959'),
]
operations = [
migrations.AlterField(
model_name='plugin',
name='max_gpu_limit',
field=models.IntegerField(blank=True, default=0, null=True),
),
]
|
{
"content_hash": "727edec432a4eb281784152f80b7927f",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 72,
"avg_line_length": 22.875,
"alnum_prop": 0.5846994535519126,
"repo_name": "FNNDSC/ChRIS_ultron_backEnd",
"id": "07b77377c7c1d89a6b586b3a539f19bdfb9c1224",
"size": "416",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "chris_backend/plugins/migrations/0038_auto_20200610_1922.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "3051"
},
{
"name": "HTML",
"bytes": "2839"
},
{
"name": "JavaScript",
"bytes": "262"
},
{
"name": "Python",
"bytes": "978019"
},
{
"name": "Shell",
"bytes": "74679"
}
],
"symlink_target": ""
}
|
from bs4 import BeautifulSoup
import requests
import re
class Scrapper:
def __init__(self):
pass
def visible(self, element):
try:
if element.parent.name in ['style', 'script', '[document]', 'head', 'title', 'div', 'input']:
return False
elif re.match('<!--.*-->', str(element)):
return False
except Exception as e:
return False
return True
def scrap(self, url, scrap_limit):
"""
Scrap the url given and return page text
and maximum three links to other pages from that page
"""
try:
print '"' + str(url) + '"'
if url[:4] != 'http':
url = 'http://' + url
proxies = {
'http':None,
'htts':None
}
r = requests.get(url, proxies = proxies)
except Exception as e:
print e
return None, None
data = r.text
soup = BeautifulSoup(data, "html.parser")
texts = soup.findAll(text=True)
visible_text = filter(self.visible, texts)
i = 0
links_list = []
for link in soup.find_all('a'):
if (i >= scrap_limit):
break
links_list.append(link.get('href'))
i += 1
return visible_text, links_list
|
{
"content_hash": "a37eeef4317b92b4332b545c3ec668fb",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 105,
"avg_line_length": 28.285714285714285,
"alnum_prop": 0.4805194805194805,
"repo_name": "Saket-Komawar/Forex",
"id": "11efecbf8293c951399b2a71cc235901c81429aa",
"size": "1386",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "solo/scrapper.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "80026"
},
{
"name": "HTML",
"bytes": "27162"
},
{
"name": "JavaScript",
"bytes": "177035"
},
{
"name": "Python",
"bytes": "39153"
},
{
"name": "R",
"bytes": "323"
}
],
"symlink_target": ""
}
|
"""
Django settings for radioslackservice project.
Generated by 'django-admin startproject' using Django 1.8.2.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'p)5m&to5!zzqnt3s47yhshrc_5*zf*hu&c$8!$hj(ym-i)%zb3'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'playlist',
'rtmbot',
'rest_framework',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'radioslackservice.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'radioslackservice.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
# REST framework
REST_FRAMEWORK = {
# Use Django's standard `django.contrib.auth` permissions,
# or allow read-only access for unauthenticated users.
'DEFAULT_PERMISSION_CLASSES': [
]
}
|
{
"content_hash": "a01783f3cc7c1fab17b8bf87f66d70db",
"timestamp": "",
"source": "github",
"line_count": 113,
"max_line_length": 71,
"avg_line_length": 25.893805309734514,
"alnum_prop": 0.6913875598086124,
"repo_name": "schatten/radioslack",
"id": "5fb8e2343725666ae4cf3ee2a2696bad2cb8c2e9",
"size": "2926",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "radioslackservice/settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "14835"
}
],
"symlink_target": ""
}
|
"""
Get a user by id
"""
# import the basic python packages we need
import os
import sys
import tempfile
import pprint
import traceback
# disable python from generating a .pyc file
sys.dont_write_bytecode = True
# change me to the path of pytan if this script is not running from EXAMPLES/PYTAN_API
pytan_loc = "~/gh/pytan"
pytan_static_path = os.path.join(os.path.expanduser(pytan_loc), 'lib')
# Determine our script name, script dir
my_file = os.path.abspath(sys.argv[0])
my_dir = os.path.dirname(my_file)
# try to automatically determine the pytan lib directory by assuming it is in '../../lib/'
parent_dir = os.path.dirname(my_dir)
pytan_root_dir = os.path.dirname(parent_dir)
lib_dir = os.path.join(pytan_root_dir, 'lib')
# add pytan_loc and lib_dir to the PYTHONPATH variable
path_adds = [lib_dir, pytan_static_path]
[sys.path.append(aa) for aa in path_adds if aa not in sys.path]
# import pytan
import pytan
# create a dictionary of arguments for the pytan handler
handler_args = {}
# establish our connection info for the Tanium Server
handler_args['username'] = "Administrator"
handler_args['password'] = "Tanium2015!"
handler_args['host'] = "10.0.1.240"
handler_args['port'] = "443" # optional
handler_args['trusted_certs'] = "certs"
# optional, level 0 is no output except warnings/errors
# level 1 through 12 are more and more verbose
handler_args['loglevel'] = 1
# optional, use a debug format for the logging output (uses two lines per log entry)
handler_args['debugformat'] = False
# optional, this saves all response objects to handler.session.ALL_REQUESTS_RESPONSES
# very useful for capturing the full exchange of XML requests and responses
handler_args['record_all_requests'] = True
# instantiate a handler using all of the arguments in the handler_args dictionary
print "...CALLING: pytan.handler() with args: {}".format(handler_args)
handler = pytan.Handler(**handler_args)
# print out the handler string
print "...OUTPUT: handler string: {}".format(handler)
# setup the arguments for the handler() class
kwargs = {}
kwargs["objtype"] = u'user'
kwargs["id"] = 1
print "...CALLING: handler.get with args: {}".format(kwargs)
response = handler.get(**kwargs)
print "...OUTPUT: Type of response: ", type(response)
print "...OUTPUT: print of response:"
print response
# call the export_obj() method to convert response to JSON and store it in out
export_kwargs = {}
export_kwargs['obj'] = response
export_kwargs['export_format'] = 'json'
print "...CALLING: handler.export_obj() with args {}".format(export_kwargs)
out = handler.export_obj(**export_kwargs)
# trim the output if it is more than 15 lines long
if len(out.splitlines()) > 15:
out = out.splitlines()[0:15]
out.append('..trimmed for brevity..')
out = '\n'.join(out)
print "...OUTPUT: print the objects returned in JSON format:"
print out
'''STDOUT from running this:
...CALLING: pytan.handler() with args: {'username': 'Administrator', 'record_all_requests': True, 'loglevel': 1, 'debugformat': False, 'host': '10.0.1.240', 'password': 'Tanium2015!', 'port': '443'}
...OUTPUT: handler string: PyTan v2.1.4 Handler for Session to 10.0.1.240:443, Authenticated: True, Platform Version: 6.5.314.4301
...CALLING: handler.get with args: {'objtype': u'user', 'id': 1}
...OUTPUT: Type of response: <class 'taniumpy.object_types.user_list.UserList'>
...OUTPUT: print of response:
UserList, len: 1
...CALLING: handler.export_obj() with args {'export_format': 'json', 'obj': <taniumpy.object_types.user_list.UserList object at 0x109c03410>}
...OUTPUT: print the objects returned in JSON format:
{
"_type": "users",
"user": [
{
"_type": "user",
"deleted_flag": 0,
"group_id": 0,
"id": 1,
"last_login": "2015-09-14T20:10:14",
"local_admin_flag": 1,
"name": "Administrator",
"permissions": {
"_type": "permissions",
"permission": [
"admin",
..trimmed for brevity..
'''
'''STDERR from running this:
'''
|
{
"content_hash": "de9204b94bfd49307c1f3e74d296a031",
"timestamp": "",
"source": "github",
"line_count": 122,
"max_line_length": 198,
"avg_line_length": 32.75409836065574,
"alnum_prop": 0.6936936936936937,
"repo_name": "tanium/pytan",
"id": "c238acba5f043682e00ab89ab8cb78cb9c073623",
"size": "4018",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "EXAMPLES/PYTAN_API/get_user_by_id.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "13251"
},
{
"name": "CSS",
"bytes": "32442"
},
{
"name": "HTML",
"bytes": "1232764"
},
{
"name": "JavaScript",
"bytes": "375167"
},
{
"name": "Makefile",
"bytes": "4287"
},
{
"name": "Python",
"bytes": "2541262"
},
{
"name": "Shell",
"bytes": "3194"
}
],
"symlink_target": ""
}
|
"""
fnordstalk.py
Copyright 2012 Stephen Holiday
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import getopt
import re
import sys
from stalk import Stalk
import datetime
import time
help_message = '''
fnordstalk
===============
Monitor your beanstalk instance and report to fnordmetric.
Other Options:
-h This help message
-u, --username [username] set your username
-p, --password [password] set your password
-i, --ignore [regular exp] ignore files that match this regex
'''
class Usage(Exception):
def __init__(self, msg):
self.msg = msg
def main(argv=None):
if argv is None:
argv = sys.argv
try:
try:
opts, args = getopt.getopt(argv[1:], "h:v", [
"help",
"beanstalk_host=",
"beanstalk_port=",
"redis_host=",
'redis_port=',
'redis_db=',
'generate_config'
])
except getopt.error, msg:
raise Usage(msg)
beanstalk_host = "localhost"
beanstalk_port = 11300
redis_host = "localhost"
redis_port = 6379
redis_db = 0
generate_config = False
# option processing
for option, value in opts:
if option == "-v":
verbose = True
if option in ("-h", "--help"):
raise Usage(help_message)
if option in ("--beanstalk_host"):
beanstalk_host = value
if option in ("--beanstalk_port"):
try:
beanstalk_port = int(value)
except:
Usage("port must be an integer")
if option in ("--redis_host"):
redis_host = value
if option in ("--redis_port"):
try:
redis_port = int(value)
except:
Usage("port must be an integer")
if option in ("--redis_db"):
try:
redis_db = int(value)
except:
Usage("db must be an integer")
if option in ("--generate_config"):
generate_config = True
# Start the actual work
stalk = Stalk(beanstalk_host, beanstalk_port,
redis_host, redis_port, redis_db)
if generate_config:
print stalk.generate_config()
else:
while True:
stalk.send_stats_global()
for tube in stalk.tubes():
stalk.send_stats_tube(tube)
print '%s\tSent to beanstalk'%(datetime.datetime.now().isoformat())
# TODO(sholiday): much better ways to do this...
time.sleep(60)
except Usage, err:
print >> sys.stderr, sys.argv[0].split("/")[-1] + ": " + str(err.msg)
print >> sys.stderr, "\t for help use --help"
return 2
if __name__ == "__main__":
sys.exit(main())
|
{
"content_hash": "3ac23b988137cbe66da5c0ff05348be2",
"timestamp": "",
"source": "github",
"line_count": 116,
"max_line_length": 83,
"avg_line_length": 30.46551724137931,
"alnum_prop": 0.5274476513865308,
"repo_name": "sholiday/fnordstalk",
"id": "5fafbec111507cfe400fd90848f02ff6cd2a279a",
"size": "3574",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fnordstalk.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "7066"
}
],
"symlink_target": ""
}
|
from common import connector
from common import urlutil
from common import environment
from common import xmlutil
from services.locations.domain.common.country_subdivisions import countrysubdivision
from services.locations.domain.common.country_subdivisions import countrysubdivisions
import xml.etree.ElementTree as ET
SANDBOX_URL = 'https://sandbox.api.mastercard.com/merchants/v1/countrysubdivision?Format=XML'
PRODUCTION_URL = 'https://api.mastercard.com/merchants/v1/countrysubdivision?Format=XML'
class CountrySubdivisionMerchantLocationService(connector.Connector):
def __init__(self, consumer_key, private_key, environment):
super().__init__(consumer_key, private_key)
self.environment = environment
def get_country_subdivisions(self, options):
url = self.get_url(options)
xml_response = ET.fromstring(self.do_request(url, 'GET'))
return self.generate_return_object(xml_response)
def get_url(self, options):
url = SANDBOX_URL
if self.environment == environment.Environment.PRODUCTION:
url = PRODUCTION_URL
url = urlutil.UrlUtil.add_query_parameter(url, 'Details', options.details)
url = urlutil.UrlUtil.add_query_parameter(url, 'Country', options.country)
return url
def generate_return_object(self, xml_response):
none_check = xmlutil.XMLUtil()
country_subdivision_list = list()
for xml_country_subdivision in xml_response.findall('CountrySubdivision'):
tmp_country_subdivision = countrysubdivision.CountrySubdivision(
none_check.verify_not_none(xml_country_subdivision.find('Name')),
none_check.verify_not_none(xml_country_subdivision.find('Code'))
)
country_subdivision_list.append(tmp_country_subdivision)
country_subdivisions = countrysubdivisions.CountrySubdivisions(country_subdivision_list)
return country_subdivisions
|
{
"content_hash": "47298dc6eb7d7e0a7190ec1ac128e978",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 96,
"avg_line_length": 47.707317073170735,
"alnum_prop": 0.7229038854805726,
"repo_name": "M4gn4tor/mastercard-api-python",
"id": "2f261e0ab98be015e84a27b53e3f3a924133314c",
"size": "1956",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Services/locations/merchants/services/countrysubdivisionmerchantlocationservice.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "229234"
}
],
"symlink_target": ""
}
|
# coding: utf-8
import os
import datetime
from babel.dates import format_datetime
from babel.core import Locale, UnknownLocaleError
from bottle import default_app, route, view, static_file, TEMPLATE_PATH, request, BaseTemplate, debug, hook
from peewee import IntegrityError, DoesNotExist, fn
from bottle_utils.i18n import I18NPlugin
from bottle_utils.i18n import lazy_gettext as _
from input_number import is_valid_number, parse_numbers, get_fingerprint
from models import BaseModel, Number, Place
from configuration import LANGS, MIN_COUNT, MAX_DAYS, DEFAULT_LOCALE, DISPLAY_SIZE
debug(True)
# store database outside of repository so it is not overwritten by git pull
MOD_PATH = os.path.dirname(os.path.abspath(__file__))
DB_PATH = os.path.abspath(os.path.join(MOD_PATH, '../', '../', "lagesonr.db"))
model = BaseModel(database=DB_PATH)
def get_valid_locale(l):
try:
Locale.parse(l)
return l
except UnknownLocaleError:
return DEFAULT_LOCALE
# set as global variable available in all templates (to be able to call e.g. request.locale)
BaseTemplate.defaults['request'] = request
BaseTemplate.defaults['locale_datetime'] = lambda d: format_datetime(d, format="short", locale=get_valid_locale(request.locale))
@hook('before_request')
def _connect_db():
model.connect()
@hook('after_request')
def _close_db():
model.disconnect()
@hook('before_request')
def _check_locale():
""" Determine locale from request if non set """
if not request.environ.get('LOCALE'):
accept_language = request.get_header('Accept-Language')
if not accept_language:
return
accepted = []
for language in accept_language.split(','):
if language.split(';')[0] == language:
accepted.append(language.strip())
else:
accepted.append(language.split(";")[0].strip())
# fine tuning order of locale_q_pair according to q-value necessary?!
lang = Locale.negotiate(accepted, [l[0] for l in LANGS])
if lang:
request.environ['LOCALE'] = str(lang)
@route('/')
@view('views/query_page')
def index():
"""landing page is page for querying numbers"""
context = {
'result': 'NewNumber',
'invalid_input': '',
'timestamps': ''
}
return context
@route('/enter')
@view('views/start_page')
def enter():
return {'entered': []}
@route('/enter', method='POST')
@view('views/start_page')
def do_enter():
"""Enter numbers into database"""
numbers = set(parse_numbers(request.forms.get('numbers', '')))
timestamp = datetime.datetime.now()
usr_hash = get_fingerprint(request)
result_num = []
# TODO make place variable, depending on current request
q = Place.select().where(Place.place == 'LAGESO')
lageso = q.get() if q.count() == 1 else None
if not numbers:
result_num.append(_('novalidnumbers'))
else:
for num in numbers:
if is_valid_number(num):
try:
n = Number.create(number=num.upper(), time=timestamp, place=lageso, fingerprint=usr_hash)
result_num.append(n.number)
except IntegrityError:
try:
n = Number.get(Number.number == num.upper())
# FIXME Why ain't there any value placeholder in translation string?
result_num.append(_(u'erruniquenumber') + ': {}'.format(n.number))
except DoesNotExist:
result_num.append(u'Something weired happend with {}'.format(num))
# FIXME result_num is horrible, as it contains success and failures, indistinguishable
return {'entered': result_num, 'timestamp': timestamp.strftime('%x %X')}
@route('/query')
@view('views/query_page')
def query():
return {'result': None}
@route('/query', method='POST')
@view('views/query_page')
def do_query():
"""Search for numbers in database"""
user_input = request.forms.get('number', '')
numbers = parse_numbers(user_input)
number = None
timestamps = []
invalid_input = None
if numbers:
# FIXME WTF? Allow and parse a list and than pick one & silently drop the others?
number = numbers[0]
qry = Number.select(Number.time).where(Number.number ** number).order_by(Number.time)
timestamps = [n.time for n in qry]
else:
invalid_input = user_input
context = {
'result': number or invalid_input,
'invalid_input': invalid_input,
'timestamps': timestamps
}
return context
@route('/about')
@view('views/about')
def about():
"""Return page with information about this project"""
pass
@route('/impressum')
@view('views/impressum')
def impressum():
"""Return page with contact information"""
pass
@route('/static/<filename:path>', no_i18n=True)
def send_static(filename):
return static_file(filename, root=os.path.join(MOD_PATH, 'static'))
@route('/favicon.ico', no_i18n=True)
def send_static():
return static_file("favicon.png", root=os.path.join(MOD_PATH, 'static'))
# Numbers to be shown there:
# All numbers that have been entered 3 or more times and where the last time of entry is not older than X minutes.
# For the "last time of entry age" it would be great to collect stats how long numbers are displayed in average.
# Until the stats are actually being collected, we should use 15 minutes as an "inactive" time setting.
# There should also be a link "history" where those numbers are then listed with a note "last seen".
@route('/display')
@view('views/display')
def display():
oldest_to_be_shown = datetime.datetime.combine(datetime.date.today() - datetime.timedelta(days=MAX_DAYS),
datetime.datetime.min.time())
# TODO optimize query, so we don't need to iterate manually, e.g. by selecing only count > min_count!
# TODO make Place variable and part of WHERE
numbers = Number.select(Number.number, Number.time, fn.Count(Number.number).alias('count')).\
where(Number.time >= oldest_to_be_shown).group_by(Number.number).order_by(Number.time.desc(), Number.number)
# filter numbers entered often enough
# format numbers for later output
display_output = sorted([{'num': n.number, 'count': int(n.count)}
for n in numbers if int(n.count) >= MIN_COUNT][:DISPLAY_SIZE], key=lambda n: n['num'])
since = format_datetime(oldest_to_be_shown, 'short', locale=request.locale)
return {'numbers': display_output,
'since': since,
'min_count': MIN_COUNT
}
@route('/pm-start')
@view('static/pm-start.html')
def enter():
return {'entered': []}
# findet templates im gleichen Verzeichnis
TEMPLATE_PATH.append(MOD_PATH)
app = default_app()
application = I18NPlugin(app, langs=LANGS, default_locale=DEFAULT_LOCALE,
domain='messages',
locale_dir=os.path.join(MOD_PATH, 'locales'))
|
{
"content_hash": "4914f07ceb2ce542b5b2c1abcef6ccb8",
"timestamp": "",
"source": "github",
"line_count": 221,
"max_line_length": 128,
"avg_line_length": 32.09049773755656,
"alnum_prop": 0.6400169204737732,
"repo_name": "christophmeissner/lagesonum",
"id": "01f36ac245374648be8fb048a418a84949d2994f",
"size": "7094",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "lagesonum/bottle_app.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "4757"
},
{
"name": "Python",
"bytes": "21937"
},
{
"name": "Smarty",
"bytes": "11006"
}
],
"symlink_target": ""
}
|
from django.conf.urls import patterns, include, url
from captcha_api.views import CaptchaAPIView, CaptchaAPIJSONPView
# Uncomment the next two lines to enable the admin:
# from django.contrib import admin
# admin.autodiscover()
urlpatterns = patterns('',
url(r'^captcha_api/verify$', CaptchaAPIView.as_view()),
url(r'^captcha_api/jsonp_verify$', CaptchaAPIJSONPView.as_view()),
)
|
{
"content_hash": "362485b43e0fee316ae8b38c78f9aea1",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 70,
"avg_line_length": 26.266666666666666,
"alnum_prop": 0.7461928934010152,
"repo_name": "quekshuy/shopify-easy-captcha",
"id": "58fcf8282d6c69585fcb407ae72c535923e6d9f3",
"size": "394",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "easy_captcha/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "8969"
}
],
"symlink_target": ""
}
|
from django.contrib.gis.db.models.fields import ExtentField
from django.db.models.aggregates import Aggregate
__all__ = ['Collect', 'Extent', 'Extent3D', 'MakeLine', 'Union']
class GeoAggregate(Aggregate):
function = None
is_extent = False
def as_sql(self, compiler, connection):
# this will be called again in parent, but it's needed now - before
# we get the spatial_aggregate_name
connection.ops.check_expression_support(self)
self.function = connection.ops.spatial_aggregate_name(self.name)
return super().as_sql(compiler, connection)
def as_oracle(self, compiler, connection):
if not hasattr(self, 'tolerance'):
self.tolerance = 0.05
self.extra['tolerance'] = self.tolerance
if not self.is_extent:
self.template = '%(function)s(SDOAGGRTYPE(%(expressions)s,%(tolerance)s))'
return self.as_sql(compiler, connection)
def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False):
c = super().resolve_expression(query, allow_joins, reuse, summarize, for_save)
for expr in c.get_source_expressions():
if not hasattr(expr.field, 'geom_type'):
raise ValueError('Geospatial aggregates only allowed on geometry fields.')
return c
class Collect(GeoAggregate):
name = 'Collect'
class Extent(GeoAggregate):
name = 'Extent'
is_extent = '2D'
def __init__(self, expression, **extra):
super().__init__(expression, output_field=ExtentField(), **extra)
def convert_value(self, value, expression, connection, context):
return connection.ops.convert_extent(value, context.get('transformed_srid'))
class Extent3D(GeoAggregate):
name = 'Extent3D'
is_extent = '3D'
def __init__(self, expression, **extra):
super().__init__(expression, output_field=ExtentField(), **extra)
def convert_value(self, value, expression, connection, context):
return connection.ops.convert_extent3d(value, context.get('transformed_srid'))
class MakeLine(GeoAggregate):
name = 'MakeLine'
class Union(GeoAggregate):
name = 'Union'
|
{
"content_hash": "528441754242cef0f795722c98702b92",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 108,
"avg_line_length": 33.753846153846155,
"alnum_prop": 0.6649954421148587,
"repo_name": "intgr/django",
"id": "95dce944c5afaee04eca1288d3b0718baffa9fe1",
"size": "2194",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "django/contrib/gis/db/models/aggregates.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "55935"
},
{
"name": "HTML",
"bytes": "182977"
},
{
"name": "JavaScript",
"bytes": "252645"
},
{
"name": "Makefile",
"bytes": "125"
},
{
"name": "Python",
"bytes": "11824885"
},
{
"name": "Shell",
"bytes": "809"
},
{
"name": "Smarty",
"bytes": "130"
}
],
"symlink_target": ""
}
|
import logging
import os
import pandas as pd
from .sampleRetraction import sampleRetraction
logger = logging.getLogger(__name__)
class patientRetraction(sampleRetraction):
_fileType = "patientRetraction"
|
{
"content_hash": "171538677eff9f9096249714669fafa4",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 46,
"avg_line_length": 17.666666666666668,
"alnum_prop": 0.7924528301886793,
"repo_name": "thomasyu888/Genie",
"id": "5d16849a364ca2136c0d49572c2e4b8c92480325",
"size": "212",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "genie/patientRetraction.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "841335"
},
{
"name": "Perl",
"bytes": "38214"
},
{
"name": "Python",
"bytes": "160066"
},
{
"name": "R",
"bytes": "93771"
},
{
"name": "Shell",
"bytes": "11666"
}
],
"symlink_target": ""
}
|
import nltk
from nltk.corpus import cess_esp
from nltk import UnigramTagger as ut
from pickle import dump
def main():
# Read the corpus into a list,
# each entry in the list is one sentence.
cess_sents = cess_esp.tagged_sents()
# Train the unigram tagger
uni_tag = ut(cess_sents)
output = open('uni_tag.pkl', 'wb')
dump(uni_tag, output, -1)
output.close()
if __name__ == "__main__":
main()
|
{
"content_hash": "78451e9e2bf8b5f432d8cf5d825e930d",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 45,
"avg_line_length": 22,
"alnum_prop": 0.6272727272727273,
"repo_name": "juditacs/dsl",
"id": "ef596d7acde93173d2e67901d83ce843e73b737d",
"size": "440",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/Training_uni_tag.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "78772"
},
{
"name": "Shell",
"bytes": "18499"
}
],
"symlink_target": ""
}
|
from unittest import TestCase, mock
from collections import namedtuple
from data_vault import VaultEnvironment, get_synced_params, get_connected_params
from helper import KeeperApiHelper
from keepercommander import api, generator
vault_env = VaultEnvironment()
PasswordStrength = namedtuple('PasswordStrength', 'length caps lower digits symbols')
class TestPasswordGenerator(TestCase):
@staticmethod
def get_password_strength(password): # type: (str) -> PasswordStrength
length = len(password)
caps = 0
lower = 0
digits = 0
symbols = 0
for ch in password:
if ch.isalpha():
if ch.isupper():
caps += 1
else:
lower += 1
elif ch.isdigit():
digits += 1
else:
symbols += 1
return PasswordStrength(length=length, caps=caps, lower=lower, digits=digits, symbols=symbols)
def test_generator_exclude(self):
gen = generator.KeeperPasswordGenerator(length=20, caps=-2, lower=-2, digits=-2, symbols=0)
self.assertEqual(gen.category_map[4][0], 14)
password = gen.generate()
strength = TestPasswordGenerator.get_password_strength(password)
self.assertEqual(strength.length, 20)
self.assertEqual(strength.symbols, 0)
def test_generator_fail(self):
with self.assertRaises(Exception):
generator.KeeperPasswordGenerator(length=20, caps=0, lower=0, digits=0, symbols=0)
class TestSearch(TestCase):
def setUp(self):
self.communicate_mock = mock.patch('keepercommander.api.communicate').start()
self.communicate_mock.side_effect = KeeperApiHelper.communicate_command
def tearDown(self):
mock.patch.stopall()
def test_search_records(self):
params = get_synced_params()
records = api.search_records(params, '')
self.assertEqual(len(records), len(params.record_cache))
records = api.search_records(params, 'RECORD')
self.assertEqual(len(records), len(params.record_cache))
records = api.search_records(params, 'Record 1')
self.assertEqual(len(records), 1)
records = api.search_records(params, 'INVALID')
self.assertEqual(len(records), 0)
def test_search_shared_folders(self):
params = get_synced_params()
sfs = api.search_shared_folders(params, '')
self.assertEqual(len(sfs), len(params.shared_folder_cache))
sfs = api.search_shared_folders(params, 'folder')
self.assertEqual(len(sfs), len(params.shared_folder_cache))
sfs = api.search_shared_folders(params, '1')
self.assertEqual(len(sfs), 1)
sfs = api.search_shared_folders(params, 'INVALID')
self.assertEqual(len(sfs), 0)
def test_search_teams(self):
params = get_synced_params()
teams = api.search_teams(params, '')
self.assertEqual(len(teams), len(params.team_cache))
teams = api.search_shared_folders(params, 'team')
self.assertEqual(len(teams), len(params.shared_folder_cache))
teams = api.search_shared_folders(params, '1')
self.assertEqual(len(teams), 1)
teams = api.search_shared_folders(params, 'INVALID')
self.assertEqual(len(teams), 0)
def test_accept_account_transfer_consent(self):
params = get_connected_params()
params.settings = {
'must_perform_account_share_by': '1632370067000',
'share_account_to': [{
'role_id': 123456789,
'public_key': vault_env.encoded_public_key
}]
}
with mock.patch('builtins.print'), mock.patch('builtins.input', return_value='accept'):
KeeperApiHelper.communicate_expect(['share_account'])
self.assertTrue(api.accept_account_transfer_consent(params))
self.assertTrue(KeeperApiHelper.is_expect_empty())
def test_decline_account_transfer_consent(self):
params = get_connected_params()
params.settings = {
'must_perform_account_share_by': '1632370067000',
'share_account_to': [{
'role_id': 123456789,
'public_key': vault_env.encoded_public_key
}]
}
with mock.patch('builtins.print'), mock.patch('builtins.input', return_value='decline'):
self.assertFalse(api.accept_account_transfer_consent(params))
|
{
"content_hash": "05b835242823cda49e1cbd67772dd4a6",
"timestamp": "",
"source": "github",
"line_count": 125,
"max_line_length": 102,
"avg_line_length": 35.928,
"alnum_prop": 0.6261411712313516,
"repo_name": "Keeper-Security/Commander",
"id": "9bc47a179a7578371a4ff293a59eacf8d7fae63b",
"size": "4491",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "unit-tests/test_api.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "2274231"
},
{
"name": "Shell",
"bytes": "3388"
}
],
"symlink_target": ""
}
|
from . import test_hr_attendance_constraints
from . import test_hr_attendance_process
from . import test_kiosk_tour
|
{
"content_hash": "6d897edcb0d85f063e5e26da95739fe7",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 44,
"avg_line_length": 38.666666666666664,
"alnum_prop": 0.8017241379310345,
"repo_name": "it-projects-llc/misc-addons",
"id": "fb3ee35974d73df98839aef16633f8108816b36e",
"size": "170",
"binary": false,
"copies": "1",
"ref": "refs/heads/13.0",
"path": "base_attendance/tests/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "14551"
},
{
"name": "HTML",
"bytes": "130934"
},
{
"name": "JavaScript",
"bytes": "407608"
},
{
"name": "Python",
"bytes": "414883"
}
],
"symlink_target": ""
}
|
import binascii
import hmac
import os
import random
import string
__all__ = ['constant_time_compare', 'random_bytes', 'random_string']
random = random.SystemRandom()
ASCII_ALPHANUMERIC = string.ascii_letters + string.digits
BASE64_ALPHABET = ASCII_ALPHANUMERIC + '-_'
HEX = string.digits + 'abcdef'
def random_bytes(n=16, as_hex=True):
"""Return a random string of bytes.
By default, this will encode 16 random bytes as a 32-character byte
string of hex digits (i.e., each byte is split into 4 bits and
encoded as a hex digit).
In general, whenever ``as_hex`` is True, the number of bytes
returned will be ``2 * n``.
>>> len(random_bytes()) == 32
True
>>> len(random_bytes(10, as_hex=True)) == 20
True
>>> len(random_bytes(7, as_hex=False)) == 7
True
>>> random_bytes().__class__ is bytes
True
>>> random_bytes(as_hex=False).__class__ is bytes
True
"""
_bytes = os.urandom(n)
if as_hex:
return binascii.hexlify(_bytes)
else:
return _bytes
def random_string(n=32, alphabet=BASE64_ALPHABET, encoding='ascii') -> str:
"""Return a random string with length ``n``.
By default, the string will contain 32 characters from the URL-safe
base 64 alphabet.
``encoding`` is used only if the ``alphabet`` is a byte string.
>>> len(random_string()) == 32
True
>>> len(random_string(8)) == 8
True
>>> len(random_string(7, ASCII_ALPHANUMERIC)) == 7
True
>>> random_string().__class__ is str
True
>>> random_string(alphabet=HEX).__class__ is str
True
>>> 'g' not in random_string(alphabet=HEX)
True
"""
a = alphabet[0]
chars = (random.choice(alphabet) for _ in range(n))
if isinstance(a, str):
return ''.join(chars)
elif isinstance(a, bytes):
return b''.join(chars).decode(encoding)
raise TypeError('Expected str or bytes; got %s' % a.__class__)
def constant_time_compare(a, b):
"""Compare two bytes or str objects in constant time.
``a`` and ``b`` must be either both bytes OR both strings w/ only
ASCII chars.
Returns ``False`` if ``a`` and ``b`` have different lengths, if
either is a string with non-ASCII characters, or their types don't
match.
See :func:`hmac.compare_digest` for more details.
"""
if len(a) != len(b):
return False
try:
return hmac.compare_digest(a, b)
except TypeError:
return False
|
{
"content_hash": "5bbd7ac6d1edad6160180464d2a68772",
"timestamp": "",
"source": "github",
"line_count": 97,
"max_line_length": 75,
"avg_line_length": 25.587628865979383,
"alnum_prop": 0.6192586623690572,
"repo_name": "TangledWeb/tangled",
"id": "a4a1fa544cc9a4dad47c62ae93edc1116f79ed2e",
"size": "2482",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tangled/util/random.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "195"
},
{
"name": "Python",
"bytes": "88868"
}
],
"symlink_target": ""
}
|
"""Tests for contrib.seq2seq.python.seq2seq.basic_decoder."""
# pylint: disable=unused-import,g-bad-import-order
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: enable=unused-import
import numpy as np
from tensorflow.contrib.seq2seq.python.ops import helper as helper_py
from tensorflow.contrib.seq2seq.python.ops import basic_decoder
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import tensor_shape
from tensorflow.python.layers import core as layers_core
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import rnn_cell
from tensorflow.python.ops import variables
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops.distributions import bernoulli
from tensorflow.python.ops.distributions import categorical
from tensorflow.python.platform import test
# pylint: enable=g-import-not-at-top
class BasicDecoderTest(test.TestCase):
def _testStepWithTrainingHelper(self, use_output_layer):
sequence_length = [3, 4, 3, 1, 0]
batch_size = 5
max_time = 8
input_depth = 7
cell_depth = 10
output_layer_depth = 3
with self.session(use_gpu=True) as sess:
inputs = np.random.randn(batch_size, max_time,
input_depth).astype(np.float32)
cell = rnn_cell.LSTMCell(cell_depth)
helper = helper_py.TrainingHelper(
inputs, sequence_length, time_major=False)
if use_output_layer:
output_layer = layers_core.Dense(output_layer_depth, use_bias=False)
expected_output_depth = output_layer_depth
else:
output_layer = None
expected_output_depth = cell_depth
my_decoder = basic_decoder.BasicDecoder(
cell=cell,
helper=helper,
initial_state=cell.zero_state(
dtype=dtypes.float32, batch_size=batch_size),
output_layer=output_layer)
output_size = my_decoder.output_size
output_dtype = my_decoder.output_dtype
self.assertEqual(
basic_decoder.BasicDecoderOutput(expected_output_depth,
tensor_shape.TensorShape([])),
output_size)
self.assertEqual(
basic_decoder.BasicDecoderOutput(dtypes.float32, dtypes.int32),
output_dtype)
(first_finished, first_inputs, first_state) = my_decoder.initialize()
(step_outputs, step_state, step_next_inputs,
step_finished) = my_decoder.step(
constant_op.constant(0), first_inputs, first_state)
batch_size_t = my_decoder.batch_size
self.assertTrue(isinstance(first_state, rnn_cell.LSTMStateTuple))
self.assertTrue(isinstance(step_state, rnn_cell.LSTMStateTuple))
self.assertTrue(
isinstance(step_outputs, basic_decoder.BasicDecoderOutput))
self.assertEqual((batch_size, expected_output_depth),
step_outputs[0].get_shape())
self.assertEqual((batch_size,), step_outputs[1].get_shape())
self.assertEqual((batch_size, cell_depth), first_state[0].get_shape())
self.assertEqual((batch_size, cell_depth), first_state[1].get_shape())
self.assertEqual((batch_size, cell_depth), step_state[0].get_shape())
self.assertEqual((batch_size, cell_depth), step_state[1].get_shape())
if use_output_layer:
# The output layer was accessed
self.assertEqual(len(output_layer.variables), 1)
sess.run(variables.global_variables_initializer())
sess_results = sess.run({
"batch_size": batch_size_t,
"first_finished": first_finished,
"first_inputs": first_inputs,
"first_state": first_state,
"step_outputs": step_outputs,
"step_state": step_state,
"step_next_inputs": step_next_inputs,
"step_finished": step_finished
})
self.assertAllEqual([False, False, False, False, True],
sess_results["first_finished"])
self.assertAllEqual([False, False, False, True, True],
sess_results["step_finished"])
self.assertEqual(output_dtype.sample_id,
sess_results["step_outputs"].sample_id.dtype)
self.assertAllEqual(
np.argmax(sess_results["step_outputs"].rnn_output, -1),
sess_results["step_outputs"].sample_id)
def testStepWithTrainingHelperNoOutputLayer(self):
self._testStepWithTrainingHelper(use_output_layer=False)
def testStepWithTrainingHelperWithOutputLayer(self):
self._testStepWithTrainingHelper(use_output_layer=True)
def testStepWithGreedyEmbeddingHelper(self):
batch_size = 5
vocabulary_size = 7
cell_depth = vocabulary_size # cell's logits must match vocabulary size
input_depth = 10
start_tokens = np.random.randint(0, vocabulary_size, size=batch_size)
end_token = 1
with self.session(use_gpu=True) as sess:
embeddings = np.random.randn(vocabulary_size,
input_depth).astype(np.float32)
cell = rnn_cell.LSTMCell(vocabulary_size)
helper = helper_py.GreedyEmbeddingHelper(embeddings, start_tokens,
end_token)
my_decoder = basic_decoder.BasicDecoder(
cell=cell,
helper=helper,
initial_state=cell.zero_state(
dtype=dtypes.float32, batch_size=batch_size))
output_size = my_decoder.output_size
output_dtype = my_decoder.output_dtype
self.assertEqual(
basic_decoder.BasicDecoderOutput(cell_depth,
tensor_shape.TensorShape([])),
output_size)
self.assertEqual(
basic_decoder.BasicDecoderOutput(dtypes.float32, dtypes.int32),
output_dtype)
(first_finished, first_inputs, first_state) = my_decoder.initialize()
(step_outputs, step_state, step_next_inputs,
step_finished) = my_decoder.step(
constant_op.constant(0), first_inputs, first_state)
batch_size_t = my_decoder.batch_size
self.assertTrue(isinstance(first_state, rnn_cell.LSTMStateTuple))
self.assertTrue(isinstance(step_state, rnn_cell.LSTMStateTuple))
self.assertTrue(
isinstance(step_outputs, basic_decoder.BasicDecoderOutput))
self.assertEqual((batch_size, cell_depth), step_outputs[0].get_shape())
self.assertEqual((batch_size,), step_outputs[1].get_shape())
self.assertEqual((batch_size, cell_depth), first_state[0].get_shape())
self.assertEqual((batch_size, cell_depth), first_state[1].get_shape())
self.assertEqual((batch_size, cell_depth), step_state[0].get_shape())
self.assertEqual((batch_size, cell_depth), step_state[1].get_shape())
sess.run(variables.global_variables_initializer())
sess_results = sess.run({
"batch_size": batch_size_t,
"first_finished": first_finished,
"first_inputs": first_inputs,
"first_state": first_state,
"step_outputs": step_outputs,
"step_state": step_state,
"step_next_inputs": step_next_inputs,
"step_finished": step_finished
})
expected_sample_ids = np.argmax(
sess_results["step_outputs"].rnn_output, -1)
expected_step_finished = (expected_sample_ids == end_token)
expected_step_next_inputs = embeddings[expected_sample_ids]
self.assertAllEqual([False, False, False, False, False],
sess_results["first_finished"])
self.assertAllEqual(expected_step_finished, sess_results["step_finished"])
self.assertEqual(output_dtype.sample_id,
sess_results["step_outputs"].sample_id.dtype)
self.assertAllEqual(expected_sample_ids,
sess_results["step_outputs"].sample_id)
self.assertAllEqual(expected_step_next_inputs,
sess_results["step_next_inputs"])
def testStepWithSampleEmbeddingHelper(self):
batch_size = 5
vocabulary_size = 7
cell_depth = vocabulary_size # cell's logits must match vocabulary size
input_depth = 10
np.random.seed(0)
start_tokens = np.random.randint(0, vocabulary_size, size=batch_size)
end_token = 1
with self.session(use_gpu=True) as sess:
with variable_scope.variable_scope(
"testStepWithSampleEmbeddingHelper",
initializer=init_ops.constant_initializer(0.01)):
embeddings = np.random.randn(vocabulary_size,
input_depth).astype(np.float32)
cell = rnn_cell.LSTMCell(vocabulary_size)
helper = helper_py.SampleEmbeddingHelper(embeddings, start_tokens,
end_token, seed=0)
my_decoder = basic_decoder.BasicDecoder(
cell=cell,
helper=helper,
initial_state=cell.zero_state(
dtype=dtypes.float32, batch_size=batch_size))
output_size = my_decoder.output_size
output_dtype = my_decoder.output_dtype
self.assertEqual(
basic_decoder.BasicDecoderOutput(cell_depth,
tensor_shape.TensorShape([])),
output_size)
self.assertEqual(
basic_decoder.BasicDecoderOutput(dtypes.float32, dtypes.int32),
output_dtype)
(first_finished, first_inputs, first_state) = my_decoder.initialize()
(step_outputs, step_state, step_next_inputs,
step_finished) = my_decoder.step(
constant_op.constant(0), first_inputs, first_state)
batch_size_t = my_decoder.batch_size
self.assertTrue(isinstance(first_state, rnn_cell.LSTMStateTuple))
self.assertTrue(isinstance(step_state, rnn_cell.LSTMStateTuple))
self.assertTrue(
isinstance(step_outputs, basic_decoder.BasicDecoderOutput))
self.assertEqual((batch_size, cell_depth), step_outputs[0].get_shape())
self.assertEqual((batch_size,), step_outputs[1].get_shape())
self.assertEqual((batch_size, cell_depth), first_state[0].get_shape())
self.assertEqual((batch_size, cell_depth), first_state[1].get_shape())
self.assertEqual((batch_size, cell_depth), step_state[0].get_shape())
self.assertEqual((batch_size, cell_depth), step_state[1].get_shape())
sess.run(variables.global_variables_initializer())
sess_results = sess.run({
"batch_size": batch_size_t,
"first_finished": first_finished,
"first_inputs": first_inputs,
"first_state": first_state,
"step_outputs": step_outputs,
"step_state": step_state,
"step_next_inputs": step_next_inputs,
"step_finished": step_finished
})
sample_ids = sess_results["step_outputs"].sample_id
self.assertEqual(output_dtype.sample_id, sample_ids.dtype)
expected_step_finished = (sample_ids == end_token)
expected_step_next_inputs = embeddings[sample_ids]
self.assertAllEqual(expected_step_finished,
sess_results["step_finished"])
self.assertAllEqual(expected_step_next_inputs,
sess_results["step_next_inputs"])
def testStepWithScheduledEmbeddingTrainingHelper(self):
sequence_length = [3, 4, 3, 1, 0]
batch_size = 5
max_time = 8
input_depth = 7
vocabulary_size = 10
with self.session(use_gpu=True) as sess:
inputs = np.random.randn(
batch_size, max_time, input_depth).astype(np.float32)
embeddings = np.random.randn(
vocabulary_size, input_depth).astype(np.float32)
half = constant_op.constant(0.5)
cell = rnn_cell.LSTMCell(vocabulary_size)
helper = helper_py.ScheduledEmbeddingTrainingHelper(
inputs=inputs,
sequence_length=sequence_length,
embedding=embeddings,
sampling_probability=half,
time_major=False)
my_decoder = basic_decoder.BasicDecoder(
cell=cell,
helper=helper,
initial_state=cell.zero_state(
dtype=dtypes.float32, batch_size=batch_size))
output_size = my_decoder.output_size
output_dtype = my_decoder.output_dtype
self.assertEqual(
basic_decoder.BasicDecoderOutput(vocabulary_size,
tensor_shape.TensorShape([])),
output_size)
self.assertEqual(
basic_decoder.BasicDecoderOutput(dtypes.float32, dtypes.int32),
output_dtype)
(first_finished, first_inputs, first_state) = my_decoder.initialize()
(step_outputs, step_state, step_next_inputs,
step_finished) = my_decoder.step(
constant_op.constant(0), first_inputs, first_state)
batch_size_t = my_decoder.batch_size
self.assertTrue(isinstance(first_state, rnn_cell.LSTMStateTuple))
self.assertTrue(isinstance(step_state, rnn_cell.LSTMStateTuple))
self.assertTrue(
isinstance(step_outputs, basic_decoder.BasicDecoderOutput))
self.assertEqual((batch_size, vocabulary_size),
step_outputs[0].get_shape())
self.assertEqual((batch_size,), step_outputs[1].get_shape())
self.assertEqual((batch_size, vocabulary_size),
first_state[0].get_shape())
self.assertEqual((batch_size, vocabulary_size),
first_state[1].get_shape())
self.assertEqual((batch_size, vocabulary_size),
step_state[0].get_shape())
self.assertEqual((batch_size, vocabulary_size),
step_state[1].get_shape())
self.assertEqual((batch_size, input_depth),
step_next_inputs.get_shape())
sess.run(variables.global_variables_initializer())
sess_results = sess.run({
"batch_size": batch_size_t,
"first_finished": first_finished,
"first_inputs": first_inputs,
"first_state": first_state,
"step_outputs": step_outputs,
"step_state": step_state,
"step_next_inputs": step_next_inputs,
"step_finished": step_finished
})
self.assertAllEqual([False, False, False, False, True],
sess_results["first_finished"])
self.assertAllEqual([False, False, False, True, True],
sess_results["step_finished"])
sample_ids = sess_results["step_outputs"].sample_id
self.assertEqual(output_dtype.sample_id, sample_ids.dtype)
batch_where_not_sampling = np.where(sample_ids == -1)
batch_where_sampling = np.where(sample_ids > -1)
self.assertAllClose(
sess_results["step_next_inputs"][batch_where_sampling],
embeddings[sample_ids[batch_where_sampling]])
self.assertAllClose(
sess_results["step_next_inputs"][batch_where_not_sampling],
np.squeeze(inputs[batch_where_not_sampling, 1]))
def _testStepWithScheduledOutputTrainingHelper(
self, sampling_probability, use_next_inputs_fn, use_auxiliary_inputs):
sequence_length = [3, 4, 3, 1, 0]
batch_size = 5
max_time = 8
input_depth = 7
cell_depth = input_depth
if use_auxiliary_inputs:
auxiliary_input_depth = 4
auxiliary_inputs = np.random.randn(
batch_size, max_time, auxiliary_input_depth).astype(np.float32)
else:
auxiliary_inputs = None
with self.session(use_gpu=True) as sess:
inputs = np.random.randn(batch_size, max_time,
input_depth).astype(np.float32)
cell = rnn_cell.LSTMCell(cell_depth)
sampling_probability = constant_op.constant(sampling_probability)
if use_next_inputs_fn:
def next_inputs_fn(outputs):
# Use deterministic function for test.
samples = math_ops.argmax(outputs, axis=1)
return array_ops.one_hot(samples, cell_depth, dtype=dtypes.float32)
else:
next_inputs_fn = None
helper = helper_py.ScheduledOutputTrainingHelper(
inputs=inputs,
sequence_length=sequence_length,
sampling_probability=sampling_probability,
time_major=False,
next_inputs_fn=next_inputs_fn,
auxiliary_inputs=auxiliary_inputs)
my_decoder = basic_decoder.BasicDecoder(
cell=cell,
helper=helper,
initial_state=cell.zero_state(
dtype=dtypes.float32, batch_size=batch_size))
output_size = my_decoder.output_size
output_dtype = my_decoder.output_dtype
self.assertEqual(
basic_decoder.BasicDecoderOutput(cell_depth,
tensor_shape.TensorShape([])),
output_size)
self.assertEqual(
basic_decoder.BasicDecoderOutput(dtypes.float32, dtypes.int32),
output_dtype)
(first_finished, first_inputs, first_state) = my_decoder.initialize()
(step_outputs, step_state, step_next_inputs,
step_finished) = my_decoder.step(
constant_op.constant(0), first_inputs, first_state)
if use_next_inputs_fn:
output_after_next_inputs_fn = next_inputs_fn(step_outputs.rnn_output)
batch_size_t = my_decoder.batch_size
self.assertTrue(isinstance(first_state, rnn_cell.LSTMStateTuple))
self.assertTrue(isinstance(step_state, rnn_cell.LSTMStateTuple))
self.assertTrue(
isinstance(step_outputs, basic_decoder.BasicDecoderOutput))
self.assertEqual((batch_size, cell_depth), step_outputs[0].get_shape())
self.assertEqual((batch_size,), step_outputs[1].get_shape())
self.assertEqual((batch_size, cell_depth), first_state[0].get_shape())
self.assertEqual((batch_size, cell_depth), first_state[1].get_shape())
self.assertEqual((batch_size, cell_depth), step_state[0].get_shape())
self.assertEqual((batch_size, cell_depth), step_state[1].get_shape())
sess.run(variables.global_variables_initializer())
fetches = {
"batch_size": batch_size_t,
"first_finished": first_finished,
"first_inputs": first_inputs,
"first_state": first_state,
"step_outputs": step_outputs,
"step_state": step_state,
"step_next_inputs": step_next_inputs,
"step_finished": step_finished
}
if use_next_inputs_fn:
fetches["output_after_next_inputs_fn"] = output_after_next_inputs_fn
sess_results = sess.run(fetches)
self.assertAllEqual([False, False, False, False, True],
sess_results["first_finished"])
self.assertAllEqual([False, False, False, True, True],
sess_results["step_finished"])
sample_ids = sess_results["step_outputs"].sample_id
self.assertEqual(output_dtype.sample_id, sample_ids.dtype)
batch_where_not_sampling = np.where(np.logical_not(sample_ids))
batch_where_sampling = np.where(sample_ids)
auxiliary_inputs_to_concat = (
auxiliary_inputs[:, 1] if use_auxiliary_inputs else
np.array([]).reshape(batch_size, 0).astype(np.float32))
expected_next_sampling_inputs = np.concatenate(
(sess_results["output_after_next_inputs_fn"][batch_where_sampling]
if use_next_inputs_fn else
sess_results["step_outputs"].rnn_output[batch_where_sampling],
auxiliary_inputs_to_concat[batch_where_sampling]),
axis=-1)
self.assertAllClose(
sess_results["step_next_inputs"][batch_where_sampling],
expected_next_sampling_inputs)
self.assertAllClose(
sess_results["step_next_inputs"][batch_where_not_sampling],
np.concatenate(
(np.squeeze(inputs[batch_where_not_sampling, 1], axis=0),
auxiliary_inputs_to_concat[batch_where_not_sampling]),
axis=-1))
def testStepWithScheduledOutputTrainingHelperWithoutNextInputsFnOrAuxInputs(
self):
self._testStepWithScheduledOutputTrainingHelper(
sampling_probability=0.5, use_next_inputs_fn=False,
use_auxiliary_inputs=False)
def testStepWithScheduledOutputTrainingHelperWithNextInputsFn(self):
self._testStepWithScheduledOutputTrainingHelper(
sampling_probability=0.5, use_next_inputs_fn=True,
use_auxiliary_inputs=False)
def testStepWithScheduledOutputTrainingHelperWithAuxiliaryInputs(self):
self._testStepWithScheduledOutputTrainingHelper(
sampling_probability=0.5, use_next_inputs_fn=False,
use_auxiliary_inputs=True)
def testStepWithScheduledOutputTrainingHelperWithNextInputsFnAndAuxInputs(
self):
self._testStepWithScheduledOutputTrainingHelper(
sampling_probability=0.5, use_next_inputs_fn=True,
use_auxiliary_inputs=True)
def testStepWithScheduledOutputTrainingHelperWithNoSampling(self):
self._testStepWithScheduledOutputTrainingHelper(
sampling_probability=0.0, use_next_inputs_fn=True,
use_auxiliary_inputs=True)
def testStepWithInferenceHelperCategorical(self):
batch_size = 5
vocabulary_size = 7
cell_depth = vocabulary_size
start_token = 0
end_token = 6
start_inputs = array_ops.one_hot(
np.ones(batch_size) * start_token,
vocabulary_size)
# The sample function samples categorically from the logits.
sample_fn = lambda x: categorical.Categorical(logits=x).sample()
# The next inputs are a one-hot encoding of the sampled labels.
next_inputs_fn = (
lambda x: array_ops.one_hot(x, vocabulary_size, dtype=dtypes.float32))
end_fn = lambda sample_ids: math_ops.equal(sample_ids, end_token)
with self.session(use_gpu=True) as sess:
with variable_scope.variable_scope(
"testStepWithInferenceHelper",
initializer=init_ops.constant_initializer(0.01)):
cell = rnn_cell.LSTMCell(vocabulary_size)
helper = helper_py.InferenceHelper(
sample_fn, sample_shape=(), sample_dtype=dtypes.int32,
start_inputs=start_inputs, end_fn=end_fn,
next_inputs_fn=next_inputs_fn)
my_decoder = basic_decoder.BasicDecoder(
cell=cell,
helper=helper,
initial_state=cell.zero_state(
dtype=dtypes.float32, batch_size=batch_size))
output_size = my_decoder.output_size
output_dtype = my_decoder.output_dtype
self.assertEqual(
basic_decoder.BasicDecoderOutput(cell_depth,
tensor_shape.TensorShape([])),
output_size)
self.assertEqual(
basic_decoder.BasicDecoderOutput(dtypes.float32, dtypes.int32),
output_dtype)
(first_finished, first_inputs, first_state) = my_decoder.initialize()
(step_outputs, step_state, step_next_inputs,
step_finished) = my_decoder.step(
constant_op.constant(0), first_inputs, first_state)
batch_size_t = my_decoder.batch_size
self.assertTrue(isinstance(first_state, rnn_cell.LSTMStateTuple))
self.assertTrue(isinstance(step_state, rnn_cell.LSTMStateTuple))
self.assertTrue(
isinstance(step_outputs, basic_decoder.BasicDecoderOutput))
self.assertEqual((batch_size, cell_depth), step_outputs[0].get_shape())
self.assertEqual((batch_size,), step_outputs[1].get_shape())
self.assertEqual((batch_size, cell_depth), first_state[0].get_shape())
self.assertEqual((batch_size, cell_depth), first_state[1].get_shape())
self.assertEqual((batch_size, cell_depth), step_state[0].get_shape())
self.assertEqual((batch_size, cell_depth), step_state[1].get_shape())
sess.run(variables.global_variables_initializer())
sess_results = sess.run({
"batch_size": batch_size_t,
"first_finished": first_finished,
"first_inputs": first_inputs,
"first_state": first_state,
"step_outputs": step_outputs,
"step_state": step_state,
"step_next_inputs": step_next_inputs,
"step_finished": step_finished
})
sample_ids = sess_results["step_outputs"].sample_id
self.assertEqual(output_dtype.sample_id, sample_ids.dtype)
expected_step_finished = (sample_ids == end_token)
expected_step_next_inputs = np.zeros((batch_size, vocabulary_size))
expected_step_next_inputs[np.arange(batch_size), sample_ids] = 1.0
self.assertAllEqual(expected_step_finished,
sess_results["step_finished"])
self.assertAllEqual(expected_step_next_inputs,
sess_results["step_next_inputs"])
def testStepWithInferenceHelperMultilabel(self):
batch_size = 5
vocabulary_size = 7
cell_depth = vocabulary_size
start_token = 0
end_token = 6
start_inputs = array_ops.one_hot(
np.ones(batch_size) * start_token,
vocabulary_size)
# The sample function samples independent bernoullis from the logits.
sample_fn = (
lambda x: bernoulli.Bernoulli(logits=x, dtype=dtypes.bool).sample())
# The next inputs are a one-hot encoding of the sampled labels.
next_inputs_fn = math_ops.to_float
end_fn = lambda sample_ids: sample_ids[:, end_token]
with self.session(use_gpu=True) as sess:
with variable_scope.variable_scope(
"testStepWithInferenceHelper",
initializer=init_ops.constant_initializer(0.01)):
cell = rnn_cell.LSTMCell(vocabulary_size)
helper = helper_py.InferenceHelper(
sample_fn, sample_shape=[cell_depth], sample_dtype=dtypes.bool,
start_inputs=start_inputs, end_fn=end_fn,
next_inputs_fn=next_inputs_fn)
my_decoder = basic_decoder.BasicDecoder(
cell=cell,
helper=helper,
initial_state=cell.zero_state(
dtype=dtypes.float32, batch_size=batch_size))
output_size = my_decoder.output_size
output_dtype = my_decoder.output_dtype
self.assertEqual(
basic_decoder.BasicDecoderOutput(cell_depth, cell_depth),
output_size)
self.assertEqual(
basic_decoder.BasicDecoderOutput(dtypes.float32, dtypes.bool),
output_dtype)
(first_finished, first_inputs, first_state) = my_decoder.initialize()
(step_outputs, step_state, step_next_inputs,
step_finished) = my_decoder.step(
constant_op.constant(0), first_inputs, first_state)
batch_size_t = my_decoder.batch_size
self.assertTrue(isinstance(first_state, rnn_cell.LSTMStateTuple))
self.assertTrue(isinstance(step_state, rnn_cell.LSTMStateTuple))
self.assertTrue(
isinstance(step_outputs, basic_decoder.BasicDecoderOutput))
self.assertEqual((batch_size, cell_depth), step_outputs[0].get_shape())
self.assertEqual((batch_size, cell_depth), step_outputs[1].get_shape())
self.assertEqual((batch_size, cell_depth), first_state[0].get_shape())
self.assertEqual((batch_size, cell_depth), first_state[1].get_shape())
self.assertEqual((batch_size, cell_depth), step_state[0].get_shape())
self.assertEqual((batch_size, cell_depth), step_state[1].get_shape())
sess.run(variables.global_variables_initializer())
sess_results = sess.run({
"batch_size": batch_size_t,
"first_finished": first_finished,
"first_inputs": first_inputs,
"first_state": first_state,
"step_outputs": step_outputs,
"step_state": step_state,
"step_next_inputs": step_next_inputs,
"step_finished": step_finished
})
sample_ids = sess_results["step_outputs"].sample_id
self.assertEqual(output_dtype.sample_id, sample_ids.dtype)
expected_step_finished = sample_ids[:, end_token]
expected_step_next_inputs = sample_ids.astype(np.float32)
self.assertAllEqual(expected_step_finished,
sess_results["step_finished"])
self.assertAllEqual(expected_step_next_inputs,
sess_results["step_next_inputs"])
if __name__ == "__main__":
test.main()
|
{
"content_hash": "84bea6c95ee530ef4a3af114fbd28cca",
"timestamp": "",
"source": "github",
"line_count": 656,
"max_line_length": 80,
"avg_line_length": 43.519817073170735,
"alnum_prop": 0.6371151353812743,
"repo_name": "brchiu/tensorflow",
"id": "b7f9f3fb090356a1c8d2bfb5044712ff93e267ce",
"size": "29238",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "tensorflow/contrib/seq2seq/python/kernel_tests/basic_decoder_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "4882"
},
{
"name": "Batchfile",
"bytes": "10132"
},
{
"name": "C",
"bytes": "473950"
},
{
"name": "C#",
"bytes": "8446"
},
{
"name": "C++",
"bytes": "51674376"
},
{
"name": "CMake",
"bytes": "199085"
},
{
"name": "Dockerfile",
"bytes": "36908"
},
{
"name": "Go",
"bytes": "1285435"
},
{
"name": "HTML",
"bytes": "4680032"
},
{
"name": "Java",
"bytes": "875500"
},
{
"name": "Jupyter Notebook",
"bytes": "2623054"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "63390"
},
{
"name": "Objective-C",
"bytes": "15634"
},
{
"name": "Objective-C++",
"bytes": "101475"
},
{
"name": "PHP",
"bytes": "5191"
},
{
"name": "Pascal",
"bytes": "221"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "PureBasic",
"bytes": "25356"
},
{
"name": "Python",
"bytes": "41718475"
},
{
"name": "RobotFramework",
"bytes": "891"
},
{
"name": "Ruby",
"bytes": "553"
},
{
"name": "Shell",
"bytes": "490100"
},
{
"name": "Smarty",
"bytes": "6976"
}
],
"symlink_target": ""
}
|
import _plotly_utils.basevalidators
class TickformatValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(
self, plotly_name="tickformat", parent_name="surface.colorbar", **kwargs
):
super(TickformatValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
**kwargs,
)
|
{
"content_hash": "a8a0cac2dd6b8738fd8afebf96c4b3b7",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 80,
"avg_line_length": 33,
"alnum_prop": 0.6153846153846154,
"repo_name": "plotly/plotly.py",
"id": "0f2bce4c26468ef3b3ba9a42284061cbe1493077",
"size": "429",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/surface/colorbar/_tickformat.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "545"
},
{
"name": "JavaScript",
"bytes": "2074"
},
{
"name": "PostScript",
"bytes": "565328"
},
{
"name": "Python",
"bytes": "31506317"
},
{
"name": "TypeScript",
"bytes": "71337"
}
],
"symlink_target": ""
}
|
from ..excel_comparison_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename('chart_combined04.xlsx')
self.ignore_elements = {'xl/charts/chart1.xml': ['<c:dispBlanksAs',
'<c:tickLblPos',
'<c:crosses']}
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart1 = workbook.add_chart({'type': 'column'})
chart2 = workbook.add_chart({'type': 'line'})
data = [
[2, 7, 3, 6, 2],
[20, 25, 10, 10, 20],
]
worksheet.write_column('A1', data[0])
worksheet.write_column('B1', data[1])
chart1.add_series({'values': '=Sheet1!$A$1:$A$5'})
chart2.add_series({'values': '=Sheet1!$B$1:$B$5', 'y2_axis': 1})
chart1.combine(chart2)
worksheet.insert_chart('E9', chart1)
workbook.close()
self.assertExcelEqual()
|
{
"content_hash": "4f6e099083a302f0c7c172438984dbd2",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 75,
"avg_line_length": 27.170212765957448,
"alnum_prop": 0.5379796397807362,
"repo_name": "jmcnamara/XlsxWriter",
"id": "a22a3dfc448df1d118afde60c045b2c05f477859",
"size": "1490",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "xlsxwriter/test/comparison/test_chart_combined04.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "5113"
},
{
"name": "CSS",
"bytes": "16544"
},
{
"name": "HTML",
"bytes": "13100"
},
{
"name": "Makefile",
"bytes": "7748"
},
{
"name": "Perl",
"bytes": "3503"
},
{
"name": "Python",
"bytes": "2807230"
},
{
"name": "Shell",
"bytes": "7964"
}
],
"symlink_target": ""
}
|
from typing import Any, Callable, Dict, IO, Iterable, Optional, TypeVar, Union, overload
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
map_error,
)
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
from .._serialization import Serializer
from .._vendor import _convert_request
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_add_update_policy_for_tenant_request(**kwargs: Any) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-10-01")) # type: str
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop("template_url", "/providers/Microsoft.Subscription/policies/default")
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
if content_type is not None:
_headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str")
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs)
def build_get_policy_for_tenant_request(**kwargs: Any) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-10-01")) # type: str
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop("template_url", "/providers/Microsoft.Subscription/policies/default")
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
def build_list_policy_for_tenant_request(**kwargs: Any) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-10-01")) # type: str
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop("template_url", "/providers/Microsoft.Subscription/policies")
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
class SubscriptionPolicyOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.subscription.SubscriptionClient`'s
:attr:`subscription_policy` attribute.
"""
models = _models
def __init__(self, *args, **kwargs):
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@overload
def add_update_policy_for_tenant(
self, body: _models.PutTenantPolicyRequestProperties, *, content_type: str = "application/json", **kwargs: Any
) -> _models.GetTenantPolicyResponse:
"""Create or Update Subscription tenant policy for user's tenant.
:param body: Required.
:type body: ~azure.mgmt.subscription.models.PutTenantPolicyRequestProperties
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: GetTenantPolicyResponse or the result of cls(response)
:rtype: ~azure.mgmt.subscription.models.GetTenantPolicyResponse
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
def add_update_policy_for_tenant(
self, body: IO, *, content_type: str = "application/json", **kwargs: Any
) -> _models.GetTenantPolicyResponse:
"""Create or Update Subscription tenant policy for user's tenant.
:param body: Required.
:type body: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: GetTenantPolicyResponse or the result of cls(response)
:rtype: ~azure.mgmt.subscription.models.GetTenantPolicyResponse
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace
def add_update_policy_for_tenant(
self, body: Union[_models.PutTenantPolicyRequestProperties, IO], **kwargs: Any
) -> _models.GetTenantPolicyResponse:
"""Create or Update Subscription tenant policy for user's tenant.
:param body: Is either a model type or a IO type. Required.
:type body: ~azure.mgmt.subscription.models.PutTenantPolicyRequestProperties or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: GetTenantPolicyResponse or the result of cls(response)
:rtype: ~azure.mgmt.subscription.models.GetTenantPolicyResponse
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) # type: str
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[_models.GetTenantPolicyResponse]
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(body, (IO, bytes)):
_content = body
else:
_json = self._serialize.body(body, "PutTenantPolicyRequestProperties")
request = build_add_update_policy_for_tenant_request(
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self.add_update_policy_for_tenant.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponseBody, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize("GetTenantPolicyResponse", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
add_update_policy_for_tenant.metadata = {"url": "/providers/Microsoft.Subscription/policies/default"} # type: ignore
@distributed_trace
def get_policy_for_tenant(self, **kwargs: Any) -> _models.GetTenantPolicyResponse:
"""Get the subscription tenant policy for the user's tenant.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: GetTenantPolicyResponse or the result of cls(response)
:rtype: ~azure.mgmt.subscription.models.GetTenantPolicyResponse
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[_models.GetTenantPolicyResponse]
request = build_get_policy_for_tenant_request(
api_version=api_version,
template_url=self.get_policy_for_tenant.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponseBody, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize("GetTenantPolicyResponse", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_policy_for_tenant.metadata = {"url": "/providers/Microsoft.Subscription/policies/default"} # type: ignore
@distributed_trace
def list_policy_for_tenant(self, **kwargs: Any) -> Iterable["_models.GetTenantPolicyResponse"]:
"""Get the subscription tenant policy for the user's tenant.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either GetTenantPolicyResponse or the result of
cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.subscription.models.GetTenantPolicyResponse]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[_models.GetTenantPolicyListResponse]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_policy_for_tenant_request(
api_version=api_version,
template_url=self.list_policy_for_tenant.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
else:
request = HttpRequest("GET", next_link)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("GetTenantPolicyListResponse", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponseBody, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(get_next, extract_data)
list_policy_for_tenant.metadata = {"url": "/providers/Microsoft.Subscription/policies"} # type: ignore
|
{
"content_hash": "5f5f0f266130861ed97b295c73b99e4c",
"timestamp": "",
"source": "github",
"line_count": 317,
"max_line_length": 121,
"avg_line_length": 45.03785488958991,
"alnum_prop": 0.6628143167332072,
"repo_name": "Azure/azure-sdk-for-python",
"id": "92259a98793d7caa8f24419d42cb3d63cf75b7f7",
"size": "14777",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/subscription/azure-mgmt-subscription/azure/mgmt/subscription/operations/_subscription_policy_operations.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
}
|
import os
from django.shortcuts import render, redirect, get_object_or_404
from django.utils.translation import ugettext as _
from django.views.decorators.vary import vary_on_headers
from django.core.urlresolvers import reverse, NoReverseMatch
from django.http import HttpResponse, JsonResponse
from wagtail.utils.pagination import paginate
from wagtail.wagtailcore.models import Site
from wagtail.wagtailadmin.forms import SearchForm
from wagtail.wagtailadmin import messages
from wagtail.wagtailadmin.utils import PermissionPolicyChecker, permission_denied
from wagtail.wagtailsearch.backends import get_search_backends
from wagtail.wagtailimages.models import get_image_model, Filter
from wagtail.wagtailimages.forms import get_image_form, URLGeneratorForm
from wagtail.wagtailimages.permissions import permission_policy
from wagtail.wagtailimages.utils import generate_signature
from wagtail.wagtailimages.exceptions import InvalidFilterSpecError
permission_checker = PermissionPolicyChecker(permission_policy)
@permission_checker.require_any('add', 'change', 'delete')
@vary_on_headers('X-Requested-With')
def index(request):
Image = get_image_model()
# Get images (filtered by user permission)
images = permission_policy.instances_user_has_any_permission_for(
request.user, ['change', 'delete']
).order_by('-created_at')
# Search
query_string = None
if 'q' in request.GET:
form = SearchForm(request.GET, placeholder=_("Search images"))
if form.is_valid():
query_string = form.cleaned_data['q']
images = images.search(query_string)
else:
form = SearchForm(placeholder=_("Search images"))
paginator, images = paginate(request, images)
# Create response
if request.is_ajax():
return render(request, 'wagtailimages/images/results.html', {
'images': images,
'query_string': query_string,
'is_searching': bool(query_string),
})
else:
return render(request, 'wagtailimages/images/index.html', {
'images': images,
'query_string': query_string,
'is_searching': bool(query_string),
'search_form': form,
'popular_tags': Image.popular_tags(),
'user_can_add': permission_policy.user_has_permission(request.user, 'add'),
})
@permission_checker.require('change')
def edit(request, image_id):
Image = get_image_model()
ImageForm = get_image_form(Image)
image = get_object_or_404(Image, id=image_id)
if not permission_policy.user_has_permission_for_instance(request.user, 'change', image):
return permission_denied(request)
if request.POST:
original_file = image.file
form = ImageForm(request.POST, request.FILES, instance=image)
if form.is_valid():
if 'file' in form.changed_data:
# if providing a new image file, delete the old one and all renditions.
# NB Doing this via original_file.delete() clears the file field,
# which definitely isn't what we want...
original_file.storage.delete(original_file.name)
image.renditions.all().delete()
# Set new image file size
image.file_size = image.file.size
form.save()
# Reindex the image to make sure all tags are indexed
for backend in get_search_backends():
backend.add(image)
messages.success(request, _("Image '{0}' updated.").format(image.title), buttons=[
messages.button(reverse('wagtailimages:edit', args=(image.id,)), _('Edit again'))
])
return redirect('wagtailimages:index')
else:
messages.error(request, _("The image could not be saved due to errors."))
else:
form = ImageForm(instance=image)
# Check if we should enable the frontend url generator
try:
reverse('wagtailimages_serve', args=('foo', '1', 'bar'))
url_generator_enabled = True
except NoReverseMatch:
url_generator_enabled = False
if image.is_stored_locally():
# Give error if image file doesn't exist
if not os.path.isfile(image.file.path):
messages.error(request, _(
"The source image file could not be found. Please change the source or delete the image."
).format(image.title), buttons=[
messages.button(reverse('wagtailimages:delete', args=(image.id,)), _('Delete'))
])
return render(request, "wagtailimages/images/edit.html", {
'image': image,
'form': form,
'url_generator_enabled': url_generator_enabled,
'filesize': image.get_file_size(),
'user_can_delete': permission_policy.user_has_permission_for_instance(
request.user, 'delete', image
),
})
def url_generator(request, image_id):
image = get_object_or_404(get_image_model(), id=image_id)
if not permission_policy.user_has_permission_for_instance(request.user, 'change', image):
return permission_denied(request)
form = URLGeneratorForm(initial={
'filter_method': 'original',
'width': image.width,
'height': image.height,
})
return render(request, "wagtailimages/images/url_generator.html", {
'image': image,
'form': form,
})
def generate_url(request, image_id, filter_spec):
# Get the image
Image = get_image_model()
try:
image = Image.objects.get(id=image_id)
except Image.DoesNotExist:
return JsonResponse({
'error': "Cannot find image."
}, status=404)
# Check if this user has edit permission on this image
if not permission_policy.user_has_permission_for_instance(request.user, 'change', image):
return JsonResponse({
'error': "You do not have permission to generate a URL for this image."
}, status=403)
# Parse the filter spec to make sure its valid
try:
Filter(spec=filter_spec).operations
except InvalidFilterSpecError:
return JsonResponse({
'error': "Invalid filter spec."
}, status=400)
# Generate url
signature = generate_signature(image_id, filter_spec)
url = reverse('wagtailimages_serve', args=(signature, image_id, filter_spec))
# Get site root url
try:
site_root_url = Site.objects.get(is_default_site=True).root_url
except Site.DoesNotExist:
site_root_url = Site.objects.first().root_url
# Generate preview url
preview_url = reverse('wagtailimages:preview', args=(image_id, filter_spec))
return JsonResponse({'url': site_root_url + url, 'preview_url': preview_url}, status=200)
def preview(request, image_id, filter_spec):
image = get_object_or_404(get_image_model(), id=image_id)
try:
response, image_format = Filter(spec=filter_spec).run(image, HttpResponse())
response['Content-Type'] = 'image/' + image_format
return response
except InvalidFilterSpecError:
return HttpResponse("Invalid filter spec: " + filter_spec, content_type='text/plain', status=400)
@permission_checker.require('delete')
def delete(request, image_id):
image = get_object_or_404(get_image_model(), id=image_id)
if not permission_policy.user_has_permission_for_instance(request.user, 'delete', image):
return permission_denied(request)
if request.POST:
image.delete()
messages.success(request, _("Image '{0}' deleted.").format(image.title))
return redirect('wagtailimages:index')
return render(request, "wagtailimages/images/confirm_delete.html", {
'image': image,
})
@permission_checker.require('add')
def add(request):
ImageModel = get_image_model()
ImageForm = get_image_form(ImageModel)
if request.POST:
image = ImageModel(uploaded_by_user=request.user)
form = ImageForm(request.POST, request.FILES, instance=image)
if form.is_valid():
# Set image file size
image.file_size = image.file.size
form.save()
# Reindex the image to make sure all tags are indexed
for backend in get_search_backends():
backend.add(image)
messages.success(request, _("Image '{0}' added.").format(image.title), buttons=[
messages.button(reverse('wagtailimages:edit', args=(image.id,)), _('Edit'))
])
return redirect('wagtailimages:index')
else:
messages.error(request, _("The image could not be created due to errors."))
else:
form = ImageForm()
return render(request, "wagtailimages/images/add.html", {
'form': form,
})
def usage(request, image_id):
image = get_object_or_404(get_image_model(), id=image_id)
paginator, used_by = paginate(request, image.get_usage())
return render(request, "wagtailimages/images/usage.html", {
'image': image,
'used_by': used_by
})
|
{
"content_hash": "9d340180e7108a6444601dbc35b3f4a7",
"timestamp": "",
"source": "github",
"line_count": 260,
"max_line_length": 105,
"avg_line_length": 35.06923076923077,
"alnum_prop": 0.642904145645975,
"repo_name": "JoshBarr/wagtail",
"id": "62a462a878d71af262a82d9f980c5fd02ae00488",
"size": "9118",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "wagtail/wagtailimages/views/images.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "152464"
},
{
"name": "HTML",
"bytes": "252508"
},
{
"name": "JavaScript",
"bytes": "94840"
},
{
"name": "Makefile",
"bytes": "548"
},
{
"name": "Python",
"bytes": "1839807"
},
{
"name": "Shell",
"bytes": "7388"
}
],
"symlink_target": ""
}
|
import logging
from collections import defaultdict
from django.conf import settings
from django.core.urlresolvers import reverse
from django.template import defaultfilters as filters
from django.utils.http import urlencode
from django.utils.translation import ugettext_lazy as _
from horizon import tables
from horizon.utils.memoized import memoized
from openstack_dashboard import api
LOG = logging.getLogger(__name__)
class LaunchImage(tables.LinkAction):
name = "launch_image"
verbose_name = _("Launch")
url = "horizon:project:instances:launch"
classes = ("btn-launch", "ajax-modal")
def get_link_url(self, datum):
base_url = reverse(self.url)
params = urlencode({"source_type": "image_id",
"source_id": self.table.get_object_id(datum)})
return "?".join([base_url, params])
class DeleteImage(tables.DeleteAction):
data_type_singular = _("Image")
data_type_plural = _("Images")
def allowed(self, request, image=None):
if image:
return image.owner == request.user.tenant_id
# Return True to allow table-level bulk delete action to appear.
return True
def delete(self, request, obj_id):
api.glance.image_delete(request, obj_id)
class CreateImage(tables.LinkAction):
name = "create"
verbose_name = _("Create Image")
attrs={"data-toggle": "modal"}
url = "horizon:project:images_and_snapshots:images:create"
classes = ("ajax-modal", "btn-create")
class EditImage(tables.LinkAction):
name = "edit"
verbose_name = _("Edit")
url = "horizon:project:images_and_snapshots:images:update"
classes = ("ajax-modal", "btn-edit")
def allowed(self, request, image=None):
if image:
return image.status in ("active",) and \
image.owner == request.user.tenant_id
# We don't have bulk editing, so if there isn't an image that's
# authorized, don't allow the action.
return False
def filter_tenants():
return getattr(settings, 'IMAGES_LIST_FILTER_TENANTS', [])
@memoized
def filter_tenant_ids():
return map(lambda ft: ft['tenant'], filter_tenants())
class OwnerFilter(tables.FixedFilterAction):
def get_fixed_buttons(self):
def make_dict(text, tenant, icon):
return dict(text=text, value=tenant, icon=icon)
buttons = [make_dict('Project', 'project', 'icon-home')]
for button_dict in filter_tenants():
new_dict = button_dict.copy()
new_dict['value'] = new_dict['tenant']
buttons.append(new_dict)
buttons.append(make_dict('Shared with Me', 'shared', 'icon-share'))
buttons.append(make_dict('Public', 'public', 'icon-fire'))
return buttons
def categorize(self, table, images):
user_tenant_id = table.request.user.tenant_id
tenants = defaultdict(list)
for im in images:
categories = get_image_categories(im, user_tenant_id)
for category in categories:
tenants[category].append(im)
return tenants
def get_image_categories(im, user_tenant_id):
categories = []
if im.is_public:
categories.append('public')
if im.owner == user_tenant_id:
categories.append('project')
elif im.owner in filter_tenant_ids():
categories.append(im.owner)
elif not im.is_public:
categories.append('shared')
return categories
def get_image_type(image):
return getattr(image, "properties", {}).get("image_type", _("Image"))
def get_format(image):
format = getattr(image, "disk_format", "")
# The "container_format" attribute can actually be set to None,
# which will raise an error if you call upper() on it.
if format is not None:
return format.upper()
class UpdateRow(tables.Row):
ajax = True
def get_data(self, request, image_id):
image = api.glance.image_get(request, image_id)
return image
def load_cells(self, image=None):
super(UpdateRow, self).load_cells(image)
# Tag the row with the image category for client-side filtering.
image = self.datum
my_tenant_id = self.table.request.user.tenant_id
image_categories = get_image_categories(image, my_tenant_id)
for category in image_categories:
self.classes.append('category-' + category)
class ImagesTable(tables.DataTable):
STATUS_CHOICES = (
("active", True),
("saving", None),
("queued", None),
("pending_delete", None),
("killed", False),
("deleted", False),
)
name = tables.Column("name",
link=("horizon:project:images_and_snapshots:"
"images:detail"),
verbose_name=_("Image Name"))
image_type = tables.Column(get_image_type,
verbose_name=_("Type"),
filters=(filters.title,))
status = tables.Column("status",
filters=(filters.title,),
verbose_name=_("Status"),
status=True,
status_choices=STATUS_CHOICES)
public = tables.Column("is_public",
verbose_name=_("Public"),
empty_value=False,
filters=(filters.yesno, filters.capfirst))
disk_format = tables.Column(get_format, verbose_name=_("Format"))
class Meta:
name = "images"
row_class = UpdateRow
status_columns = ["status"]
verbose_name = _("Images")
# Hide the image_type column. Done this way so subclasses still get
# all the columns by default.
columns = ["name", "status", "public", "disk_format"]
table_actions = (OwnerFilter, CreateImage, DeleteImage,)
row_actions = (LaunchImage, EditImage, DeleteImage,)
pagination_param = "image_marker"
|
{
"content_hash": "d82f7b2193aaf3045378149f18237f00",
"timestamp": "",
"source": "github",
"line_count": 181,
"max_line_length": 75,
"avg_line_length": 33.25414364640884,
"alnum_prop": 0.6050839009802292,
"repo_name": "Horace1117/MKTCloud",
"id": "36bd67ebe171692d5448c511c27d47bb487c4821",
"size": "6669",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "openstack_dashboard/dashboards/project/images_and_snapshots/images/tables.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "47811"
},
{
"name": "HTML",
"bytes": "182268"
},
{
"name": "JavaScript",
"bytes": "379783"
},
{
"name": "Makefile",
"bytes": "588"
},
{
"name": "Python",
"bytes": "1721669"
},
{
"name": "Shell",
"bytes": "14710"
}
],
"symlink_target": ""
}
|
from phi import utils
from tensorbuilder import TensorBuilder
@TensorBuilder.RegisterMethod("tb")
def rnn_placeholders_from_state(self, zero_state, name="rnn_state"):
if isinstance(zero_state, tuple):
return tuple([self.rnn_placeholders_from_state(substate, name=name) for substate in zero_state])
else:
return tf.placeholder(zero_state.dtype, shape=zero_state.get_shape(), name=name)
@TensorBuilder.RegisterMethod("tb")
def rnn_state_feed_dict(self, placeholders, values):
return dict(zip(utils.flatten(placeholders), utils.flatten_list(values)))
|
{
"content_hash": "153b57832b73ed6caa218e0c35b281a4",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 104,
"avg_line_length": 41.357142857142854,
"alnum_prop": 0.7495682210708118,
"repo_name": "cgarciae/tensorbuilder",
"id": "ef343f40eda779a07b3aa8bab7e1247ed5a42a25",
"size": "579",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorbuilder/patches/rnn_utilities_patch.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "19472"
},
{
"name": "Shell",
"bytes": "920"
}
],
"symlink_target": ""
}
|
import pytest
import sncosmo
"""Test all of the builtins code.
This file is designed to test that all of the code that loads builtins works,
and only tests a single example for each kind of builtin. A separate file
(test_download_builtins.py) actually makes sure that all of the builtins can be
downloaded and loaded, but that is slow and requires a lot of downloading so it
isn't included as part of the standard test suite. If you add new builtins
they should be picked up automatically by that file.
You should only add a new test to this file if you created a new loader
function.
"""
@pytest.mark.might_download
def test_builtins_bessell():
sncosmo.get_bandpass('bessellb')
@pytest.mark.might_download
def test_builtins_remote_aa():
sncosmo.get_bandpass('standard::u')
@pytest.mark.might_download
def test_builtins_remote_nm():
sncosmo.get_bandpass('kepler')
@pytest.mark.might_download
def test_builtins_remote_um():
sncosmo.get_bandpass('f070w')
@pytest.mark.might_download
def test_builtins_remote_wfc3():
sncosmo.get_bandpass('f098m')
@pytest.mark.might_download
def test_builtins_tophat_um():
sncosmo.get_bandpass('f1065c')
@pytest.mark.might_download
def test_builtins_megacampsf():
sncosmo.get_bandpass('megacampsf::u', 0.)
@pytest.mark.might_download
def test_builtins_timeseries_ascii():
sncosmo.get_source('nugent-sn1a')
@pytest.mark.might_download
def test_builtins_timeseries_fits():
sncosmo.get_source('hsiao')
@pytest.mark.might_download
def test_builtins_timeseries_fits_local():
sncosmo.get_source('hsiao-subsampled')
@pytest.mark.might_download
def test_builtins_salt2model():
sncosmo.get_source('salt2')
@pytest.mark.might_download
def test_builtins_salt3model():
sncosmo.get_source('salt3')
@pytest.mark.might_download
def test_builtins_2011fe():
sncosmo.get_source('snf-2011fe')
@pytest.mark.might_download
def test_builtins_mlcs2k2():
sncosmo.get_source('mlcs2k2')
@pytest.mark.might_download
def test_builtins_snemo():
sncosmo.get_source('snemo2')
@pytest.mark.might_download
def test_builtins_sugar():
sncosmo.get_source('sugar')
@pytest.mark.might_download
def test_builtins_magsys_ab():
sncosmo.get_magsystem('ab')
@pytest.mark.might_download
def test_builtins_magsys_fits():
sncosmo.get_magsystem('vega')
@pytest.mark.might_download
def test_builtins_magsys_csp():
sncosmo.get_magsystem('csp')
@pytest.mark.might_download
def test_builtins_magsys_ab_b12():
sncosmo.get_magsystem('ab-b12')
@pytest.mark.might_download
def test_builtins_magsys_jla():
sncosmo.get_magsystem('jla1')
|
{
"content_hash": "10f92e356a794a84c208fa6e46a5d066",
"timestamp": "",
"source": "github",
"line_count": 121,
"max_line_length": 79,
"avg_line_length": 21.818181818181817,
"alnum_prop": 0.7424242424242424,
"repo_name": "sncosmo/sncosmo",
"id": "a2b59cc361ec06cd27dcf0d0b36da37f3a2ef53c",
"size": "2640",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sncosmo/tests/test_builtins.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C++",
"bytes": "8876"
},
{
"name": "Cython",
"bytes": "16363"
},
{
"name": "Makefile",
"bytes": "609"
},
{
"name": "Python",
"bytes": "428095"
},
{
"name": "Shell",
"bytes": "348"
}
],
"symlink_target": ""
}
|
import unittest
from td_query.td_query_base import td_query_instance as instance
from tools.settings import *
class TestExample(unittest.TestCase):
@classmethod
def setUpClass(cls):
print("**************************************** setUpClass ****************************************")
@classmethod
def tearDownClass(cls):
print("************************************** tearDownClass ***************************************")
def setUp(self):
print("****** setUp *******")
def tearDown(self):
print("***** tearDown *****")
def _example(self):
print("This is a test example.")
print(TIMEZONE)
df = instance.query_td_sample()
print(df)
|
{
"content_hash": "5343c492b3342a8504e8ccd52312765a",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 109,
"avg_line_length": 27.037037037037038,
"alnum_prop": 0.4684931506849315,
"repo_name": "Ernestyj/PyStudy",
"id": "77b0f34f2567e578ecac69149a4c4382177bcfa8",
"size": "754",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "PyProjectTemplate/scheduler/td_query/test/test_example.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "DIGITAL Command Language",
"bytes": "17511"
},
{
"name": "HTML",
"bytes": "192730"
},
{
"name": "Python",
"bytes": "333151"
}
],
"symlink_target": ""
}
|
__version__ = "PyExpression v1.0.2"
__author__ = "Daniel Sage"
# TODO: Add exponents
# TODO: Create basic equation solver w/ this class
class Part:
# Part class, basically part of an expression
# Example:
# Part(1, 2, "+") would mean the part of an expression like this:
# 1 + 2
# You CAN nest parts inside parts to allow for complicated multi-part expressions
def __init__(self, num1, num2, op):
# init part with numbers and opsign
self.num1 = float(num1)
self.num2 = float(num2)
self.op = op
def __float__(self):
# if we want to get an int representation, solve this part and return the result
return self.solve()
def __str__(self):
# string rep of a part
# ([Number1] [opcode] [Number2])
return "(" + str(self.num1) + " " + str(self.op) + " " + str(self.num2) + ")"
def solve(self):
# return an int value of this part
# will have to add together two ints
# one might be a part, but that's OK since
# the __init__ func solves that inner part
if self.op == "+":
return self.num1 + self.num2
elif self.op == "-":
return self.num1 - self.num2
elif self.op == "*":
return self.num1 * self.num2
elif self.op == "/":
return self.num1 / self.num2
class Expression:
# Expression class that solves string expressions containing +, -, *. and / in PEMDAS order
def __init__(self, text):
# init with text and empty arrays, parse text into readable parts
self.text = text
self.solved = None
self.chars = []
self.needUpdate = True
self.beginParse(True)
def beginParse(self, doParse):
self.needUpdate = True
self.chars = []
if doParse:
self.parse()
def set(self, newExp):
self.needUpdate = True
self.text = newExp
self.beginParse(False)
def parse(self):
# make the text into an array and then parse into parts
self.makeChars()
self.parsePara()
self.parseOne()
self.parseTwo()
def makeChars(self):
# makes array of every character in self.text
# use buildingItem to create more-than-one-long numbers
self.chars = []
buildingItem = ""
for i in range(0, len(self.text)):
b = self.text[i] == "+" or self.text[i] == "-" or self.text[i] == "*" or self.text[i] == "/" or self.text[i] == "(" or self.text[i] == ")"
if not b and self.text[i] != " ":
buildingItem += self.text[i]
else:
self.chars.append(buildingItem)
if self.text[i] != " ":
self.chars.append(self.text[i])
buildingItem = ""
self.chars.append(buildingItem)
i = 0
while i != len(self.chars):
if self.chars[i] == "":
self.chars.pop(i)
i -= 1
i += 1
def parsePara(self):
"""
Heart of the nesting evaluation
:return: None
"""
for i in range(0, len(self.chars)):
begin = 0
end = 0
if (self.chars[i] == "("):
begin = i
d = 1
for x in range(i+1, len(self.chars)):
if (self.chars[x] == "("):
d += 1
if (self.chars[x] == ")"):
d -= 1
if d == 0:
end = x
break
if (end != 0):
building = ""
for i in range(begin+1, end):
building += self.chars[i]
newExp = Expression(building)
for i in range(0, end-begin-1):
self.chars.pop(begin)
self.chars.pop(begin+1)
self.chars[begin] = newExp.solve()
break
def parseOne(self):
i = -1
while i < len(self.chars)-1:
i += 1
if self.chars[i] == "*" or self.chars[i] == "/":
next_token = Part(self.chars[i-1], self.chars[i+1], self.chars[i])
# i = 1
# [1, *, 3, +, 5]
# [1, *, +, 5] (Pop i+1)
# [1, +, 5] (Pop i)
# [1*3, +, 5] (Set i-1)
self.chars.pop(i+1)
self.chars.pop(i)
self.chars[i-1] = next_token
i-= 1
def parseTwo(self):
i = -1
while i < len(self.chars) - 1:
i += 1
if self.chars[i] == "+" or self.chars[i] == "-":
next_token = Part(self.chars[i - 1], self.chars[i + 1], self.chars[i])
self.chars.pop(i + 1)
self.chars.pop(i)
self.chars[i - 1] = next_token
i -= 1
def stringChars(self):
building = ""
for i in range(0, len(self.chars) - 1):
building += str(self.chars[i])
building += ", "
building += str(self.chars[len(self.chars) - 1])
return building
def __str__(self):
return "Chars: " + self.stringChars()
def solve(self):
if self.needUpdate: # Using this we save time if we want to solve again and the exp hasn't been changed
self.solved = self.chars[0].solve()
self.needUpdate = False
return self.solved
def __float__(self):
return self.solve()
if __name__ == "__main__":
expr = input("Enter an equation to solve: ")
myExp = Expression(expr)
print(myExp)
evalSolve = eval(expr)
expSolve = myExp.solve()
print("Solved w/ eval:", evalSolve)
print("Solved: ", expSolve)
print("Match: ", evalSolve == expSolve)
|
{
"content_hash": "459d7ce3ebf18b7f372c9f3c0b688169",
"timestamp": "",
"source": "github",
"line_count": 211,
"max_line_length": 150,
"avg_line_length": 28.251184834123222,
"alnum_prop": 0.4742492870323771,
"repo_name": "sagedanielr/PyExpression",
"id": "748e62964e1b675d03d26699106958185f29f5db",
"size": "5961",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "PyExpression.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "6277"
}
],
"symlink_target": ""
}
|
import sys
from .api import PrettyDir
__author__ = 'laike9m <laike9m@gmail.com>'
sys.modules[__name__] = PrettyDir # type: ignore
|
{
"content_hash": "d1240c3b83bd3f92d913cbb08b415bc5",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 49,
"avg_line_length": 22.166666666666668,
"alnum_prop": 0.6917293233082706,
"repo_name": "laike9m/pdir2",
"id": "8e8185621156de62a3a820c575c73d14e7c6dbf9",
"size": "133",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pdir/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "537"
},
{
"name": "Python",
"bytes": "67568"
}
],
"symlink_target": ""
}
|
from celery.task import task
import datetime
import time
import md5
import csv
import os
import logging
from django.conf import settings
from django_bulk_export.models import *
from django.utils.encoding import smart_str
from django_bulk_export.constants import *
@task
def execute(task_name, params, url,cache_func,user_id,attachment_filename=''):
"""
Wrapper for the actual tasks. It gets the data from project task and writes to filesystem.
"""
task_log=get_or_create_tasklog(user_id,execute.request.id,attachment_filename)
task_log.update_fields(status=TASK_RUNNING)
path = get_file_path(url, params,cache_func)
if(os.path.isfile(path) and not is_file_expired(path)):
#Check if exported file alreasy exists and not expired
task_log.update_fields(status=TASK_SUCCESSFUL, filepath=path,
completion_date=datetime.datetime.now())
return path
else:
task_func = get_task_func(task_name)
logging.debug("Executing task : %s"%task_name)
content_data = task_func(params)
if isinstance(content_data,dict): #Task failed due to some error
task_log.update_fields(status=TASK_FAILED)
return content_data
else:
dir_name = os.path.dirname(path)
if not os.path.exists(dir_name):
os.makedirs(dir_name)
writer=csv.writer(open(path,'wb'))
for cdata in content_data:
writer.writerow([smart_str(cd) if cd else None for cd in cdata])
#writer.close()
task_log.update_fields(status=TASK_SUCCESSFUL, filepath=path,
completion_date=datetime.datetime.now())
return path
def get_task_func(task_name):
"""
returns the task function.
"""
paths = task_name.split(".")
package = ".".join(paths[0:-1])
m = __import__ (package,fromlist=[paths[-1]])
func = getattr(m,paths[-1])
return func
def get_file_path(url, params,cache_func):
"""
returns the file path to write the data into.
TODO: Need to fix the file name computation.
"""
#logging.debug('Function_name: %s'%settings.BULKEXPORT_FILENAME_FUNC)
#user_func=settings.BULKEXPORT_FILENAME_FUNC
if cache_func:
paths=cache_func.split(".")
package=".".join(paths[0:-1])
m=__import__(package,fromlist=[paths[-1]])
func=getattr(m,paths[-1])
filename=func(url,params)
else:
filename=get_default_cache_name(url)
path=os.path.join(BULK_EXPORT_DIR, "%s.csv"%filename)
return path
def get_default_cache_name(url):
filename = url + datetime.datetime.now().strftime("%m %d %Y %H %M %S")
filename = md5.new(filename)
filename = filename.hexdigest()
return filename
def is_file_expired(path):
try:
file_timestamp=os.path.getmtime(path)
now_timestamp=time.mktime(datetime.datetime.now().timetuple())
if (settings.BULKEXPORT_EXPIRES>(now_timestamp-file_timestamp)):
return False
except:
pass
return True
def get_or_create_tasklog(user_id,task_id,attachment_filename=None):
#logging.debug("Client ID:%s"%user_id)
defaults = {'attachment_filename':attachment_filename} if attachment_filename else {}
user_auth,created=TaskAuthentication.objects.get_or_create(user_id=user_id,task_id=task_id,defaults=defaults)
return user_auth
def get_user_id(request):
return "%s"%request.user.pk
|
{
"content_hash": "5f8eca5e01525ea5826fcb4095538fed",
"timestamp": "",
"source": "github",
"line_count": 102,
"max_line_length": 113,
"avg_line_length": 34.5,
"alnum_prop": 0.64563796533106,
"repo_name": "Thinktiv/django-bulk-export",
"id": "bec9407a4fc1f206ff6180dbb54f1bd75ec123f2",
"size": "3545",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django_bulk_export/tasks.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "13868"
},
{
"name": "Python",
"bytes": "17616"
}
],
"symlink_target": ""
}
|
from __future__ import print_function, unicode_literals, absolute_import
from agstools._helpers import create_argument_groups, execute_args, format_output_path
from ._helpers import open_map_document
def create_parser_save_copy(parser):
parser_copy = parser.add_parser("copy", add_help = False,
help = "Saves a copy of an ArcGIS map document, optionally in a different output version.")
parser_copy.set_defaults(func = execute_args, lib_func = save_a_copy)
group_req, group_opt, group_flags = create_argument_groups(parser_copy)
group_req.add_argument("-m", "--mxd", required = True,
help = "File path to the map document (*.mxd) to copy.")
group_req.add_argument("-o", "--output", required = True,
help = "The path on which to save the copy of the map document.")
group_opt.add_argument("-v", "--version", type=str, choices=["8.3", "9.0", "9.2", "9.3", "10.0", "10.1", "10.3"],
help = "The output version number for the saved copy.")
def save_a_copy(mxd, output_path, version = None):
import arcpy
output_path = format_output_path(output_path)
print("Opening map document: {0}".format(mxd))
map = open_map_document(mxd)
print("Saving a copy to: {0}".format(output_path))
if version == None:
map.saveACopy(output_path)
else:
map.saveACopy(output_path, version)
print("Done.")
|
{
"content_hash": "4c786813371faa59b9d9bda049e88a36",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 117,
"avg_line_length": 38.611111111111114,
"alnum_prop": 0.658273381294964,
"repo_name": "DavidWhittingham/agstools",
"id": "d7bf7948908c5859cb52ef18a94158708a120c65",
"size": "1390",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "agstools/arcpyext/_copy.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "PowerShell",
"bytes": "930"
},
{
"name": "Python",
"bytes": "80630"
}
],
"symlink_target": ""
}
|
'''
Created on Oct 20, 2015
@author: ahmadjaved.se@gmail.com
'''
from math import ceil
from sqlalchemy import func
from .exceptions import PageNotAnInteger, EmptyPage
class Paginator(object):
"""
This class helps you to manage data with pagination. This class will fetch
data in pages. That is, instead of fetching all the records from database
at a time, this class will fetch defined number of records at a time.
So that you can perform particular action(s) on fetched data and then fetch
data for next page. In this way we can get ride from the memory overloading
problem as well.
This class will also optimized the query for fetching total number of
records from database against given query_set. Optimization will be applied
only on the query that will be used for fetching total number of records.
You can also provide the separate query in optional_count_query_set argument
for fetching total number of records.
..usage::
You can use this paginator module in python scripting code and in web
based application code as well.
:Example1:
>>> from sqlalchemy_paginator import Paginator
>>> query = session.query(MyModel)
>>> paginator = Paginator(query, 5)
>>> for page in paginator:
>>> print page.number # page number of current page in iterator
>>> print page.object_list # this is a list that contains the records of current page
:Example2:
>>> from sqlalchemy_paginator import Paginator
>>> query = session.query(MyModel)
>>> paginator = Paginator(query, 5)
>>> page = paginator.page(page_number)
>>> print page.paginator.count # to get total number of records against given query
>>> print page.paginator.total_pages # to get total number of pages
>>> print page.paginator.pages_range # to get range of pages in list
>>> print page.start_index # to get index of the first object on this page
>>> print page.end_index # to get index of the last object on this page
>>> if page.has_previous():
>>> print page.previous_page_number # to get previous page number
>>> if page.has_next():
>>> print page.next_page_number
"""
def __init__(self, query_set, per_page_limit, optional_count_query_set=None,
allow_empty_first_page=True):
"""
Constructor to create the paginator object.
:param query_set: SQLAlchemy query which is used for fetching data from
from database.
:type query_set: SQLAlchemy query object.
:param per_page_limit: Required number of records in a page.
:type per_page_limit: int.
:param optional_count_query_set: This is a optional query set that will
use to fetch the total number of
records from database. If this optional
query is not provided than this class
will optimized query_set query and used
that optimized query of getting total
number of records from database.
:type optional_count_query_set: SQLAlchemy query object.
:param allow_empty_first_page: If this flag is true and there is no
data in database against given query then
it will return empty list on getting
first page otherwise this will raise
EmptyPage error. Default value of this
parameter is true.
:type allow_empty_first_page: bool.
"""
self.query_set = query_set
self.per_page_limit = per_page_limit
self.optional_count_query_set = optional_count_query_set
self.allow_empty_first_page = allow_empty_first_page
self.__total_pages = self.__count = None
self.__iter_page = 1
def __iter__(self):
"""The __iter__ returns the iterator object and is implicitly called at
the start of loops"""
self.__iter_page = 1
return self
def next(self):
"""Returns the next page and is implicitly called at each loop
increment."""
if self.__iter_page > self.total_pages:
raise StopIteration
page = self.page(self.__iter_page)
self.__iter_page += 1
return page
def validate_page_number(self, page_number):
"""
This method valid that if given page number is valid or not. Like page
number should be integer and greater than zero and should not be greater
than total number of pages.
:param page_number: Required page number against which you want to fetch
records from database.
:type page_number: int.
:return: If given page number is valid then return it.
:rtype: int.
..warning::
This function can raise the following exceptions
- PageNotAnInteger
- EmptyPage
"""
try:
page_number = int(page_number)
except ValueError:
raise PageNotAnInteger('That page number is not an integer')
if page_number < 1:
raise EmptyPage('That page number is less than 1')
if page_number > self.total_pages:
if page_number == 1 and self.allow_empty_first_page:
pass
else:
raise EmptyPage('That page contains no results')
return page_number
def page(self, page_number):
"""
Returns a page object against given page number if given page number is
valid.
:param page_number: Required page number against which you want to fetch
records from database.
:type page_number: int.
:return: Page object that contains the records against given page
number.
:rtype: Page.
..seealso::
- Page class
- Paginator.validate_page_number()
..warning::
This function can raise the following exceptions
- PageNotAnInteger
- EmptyPage
"""
page_number = self.validate_page_number(page_number)
offset = (page_number - 1) * self.per_page_limit
return Page(self.query_set.offset(offset).limit(self.per_page_limit).all(),
page_number, self)
def __get_count(self):
"""
Returns the total number of objects, across all pages.
:return: Total number of records against given query.
:rtype: int.
..info::
If optional_count_query_set is given then this function will use
query for fetching total number records otherwise query_set query
will be used for fetching total number records.
"""
if self.__count is None:
if self.optional_count_query_set is None:
self.optional_count_query_set = self.query_set.order_by(None)
count_query = self.optional_count_query_set.statement.with_only_columns([func.count()])
self.__count = self.optional_count_query_set.session.execute(count_query).scalar()
return self.__count
count = property(__get_count)
def __get_total_pages(self):
"""
Returns the total number of pages.
:return: Total number of pages against given query.
:rtype: int.
..info::
If total number of records is zero and allow_empty_first_page is
true then returns 1 instead of 0.
"""
""
if self.__total_pages is None:
if self.count == 0 and not self.allow_empty_first_page:
self.__total_pages = 0
else:
hits = max(1, self.count)
self.__total_pages = int(ceil(hits / float(self.per_page_limit)))
return self.__total_pages
total_pages = property(__get_total_pages)
def __pages_range(self):
"""
Returns a range of pages.
:return: List that contains range of pages.
:rtype: list.
"""
return range(1, self.total_pages + 1)
pages_range = property(__pages_range)
class Page(object):
"""
This is a same copy of django Page class in paginator module. This class
will be used in Paginator class for making pages. This Page class contains
a list of objects of one page, page number and reference of paginator
instance.
"""
def __init__(self, object_list, number, paginator):
self.object_list = object_list
self.number = number
self.paginator = paginator
def __repr__(self):
return '<Page %s of %s>' % (self.number, self.paginator.total_pages)
def has_next(self):
return self.number < self.paginator.total_pages
def has_previous(self):
return self.number > 1
def has_other_pages(self):
return self.has_previous() or self.has_next()
def __next_page_number(self):
return self.number + 1
next_page_number = property(__next_page_number)
def __previous_page_number(self):
return self.number - 1
previous_page_number = property(__previous_page_number)
def __start_index(self):
"""
Returns the index of the first object on this page,
relative to total objects in the paginator.
"""
# Special case, return zero if no items.
if self.paginator.count == 0:
return 0
return (self.paginator.per_page_limit * (self.number - 1)) + 1
start_index = property(__start_index)
def __end_index(self):
"""
Returns the index of the last object on this page,
relative to total objects found (hits).
"""
# Special case for the last page
if self.number == self.paginator.total_pages:
return self.paginator.count
return self.number * self.paginator.per_page_limit
end_index = property(__end_index)
|
{
"content_hash": "c734059beba9bab326fefe59f1aaa34e",
"timestamp": "",
"source": "github",
"line_count": 263,
"max_line_length": 99,
"avg_line_length": 39.6425855513308,
"alnum_prop": 0.5857471705352004,
"repo_name": "prikevs/pagination-sqlalchemy",
"id": "6a20c468d02715a30a673507c2ae82bd9575d2b8",
"size": "10426",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mypaginator/paginator.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "14833"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from __future__ import unicode_literals
import collections
Metric = collections.namedtuple('Metric', ['name', 'value'])
|
{
"content_hash": "79ebf829e9a98b7fb736f8f820a8c9d7",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 60,
"avg_line_length": 23.142857142857142,
"alnum_prop": 0.7407407407407407,
"repo_name": "ucarion/git-code-debt",
"id": "2ea0ac4f6c9215bd554d3606c122d598711b5390",
"size": "162",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "git_code_debt/metric.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2514"
},
{
"name": "HTML",
"bytes": "1617"
},
{
"name": "JavaScript",
"bytes": "5161"
},
{
"name": "Makefile",
"bytes": "384"
},
{
"name": "Mako",
"bytes": "5510"
},
{
"name": "Python",
"bytes": "135268"
}
],
"symlink_target": ""
}
|
import proto # type: ignore
from google.ads.googleads.v12.enums.types import conversion_action_category
from google.ads.googleads.v12.enums.types import (
conversion_value_rule_set_status,
)
from google.ads.googleads.v12.enums.types import value_rule_set_attachment_type
from google.ads.googleads.v12.enums.types import value_rule_set_dimension
__protobuf__ = proto.module(
package="google.ads.googleads.v12.resources",
marshal="google.ads.googleads.v12",
manifest={"ConversionValueRuleSet",},
)
class ConversionValueRuleSet(proto.Message):
r"""A conversion value rule set
Attributes:
resource_name (str):
Immutable. The resource name of the conversion value rule
set. Conversion value rule set resource names have the form:
``customers/{customer_id}/conversionValueRuleSets/{conversion_value_rule_set_id}``
id (int):
Output only. The ID of the conversion value
rule set.
conversion_value_rules (Sequence[str]):
Resource names of rules within the rule set.
dimensions (Sequence[google.ads.googleads.v12.enums.types.ValueRuleSetDimensionEnum.ValueRuleSetDimension]):
Defines dimensions for Value Rule conditions.
The condition types of value rules within this
value rule set must be of these dimensions. The
first entry in this list is the primary
dimension of the included value rules. When
using value rule primary dimension segmentation,
conversion values will be segmented into the
values adjusted by value rules and the original
values, if some value rules apply.
owner_customer (str):
Output only. The resource name of the conversion value rule
set's owner customer. When the value rule set is inherited
from a manager customer, owner_customer will be the resource
name of the manager whereas the customer in the
resource_name will be of the requesting serving customer.
\*\* Read-only \*\*
attachment_type (google.ads.googleads.v12.enums.types.ValueRuleSetAttachmentTypeEnum.ValueRuleSetAttachmentType):
Immutable. Defines the scope where the
conversion value rule set is attached.
campaign (str):
The resource name of the campaign when the
conversion value rule set is attached to a
campaign.
status (google.ads.googleads.v12.enums.types.ConversionValueRuleSetStatusEnum.ConversionValueRuleSetStatus):
Output only. The status of the conversion value rule set.
\*\* Read-only \*\*
conversion_action_categories (Sequence[google.ads.googleads.v12.enums.types.ConversionActionCategoryEnum.ConversionActionCategory]):
Immutable. The conversion action categories
of the conversion value rule set.
"""
resource_name = proto.Field(proto.STRING, number=1,)
id = proto.Field(proto.INT64, number=2,)
conversion_value_rules = proto.RepeatedField(proto.STRING, number=3,)
dimensions = proto.RepeatedField(
proto.ENUM,
number=4,
enum=value_rule_set_dimension.ValueRuleSetDimensionEnum.ValueRuleSetDimension,
)
owner_customer = proto.Field(proto.STRING, number=5,)
attachment_type = proto.Field(
proto.ENUM,
number=6,
enum=value_rule_set_attachment_type.ValueRuleSetAttachmentTypeEnum.ValueRuleSetAttachmentType,
)
campaign = proto.Field(proto.STRING, number=7,)
status = proto.Field(
proto.ENUM,
number=8,
enum=conversion_value_rule_set_status.ConversionValueRuleSetStatusEnum.ConversionValueRuleSetStatus,
)
conversion_action_categories = proto.RepeatedField(
proto.ENUM,
number=9,
enum=conversion_action_category.ConversionActionCategoryEnum.ConversionActionCategory,
)
__all__ = tuple(sorted(__protobuf__.manifest))
|
{
"content_hash": "2bbd437058f5cc11d11bd61b32aa3382",
"timestamp": "",
"source": "github",
"line_count": 91,
"max_line_length": 140,
"avg_line_length": 44.2967032967033,
"alnum_prop": 0.6871743984123047,
"repo_name": "googleads/google-ads-python",
"id": "2c0b0894bef6592f4af9a29a43e84b5c8c32dd4e",
"size": "4631",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "google/ads/googleads/v12/resources/types/conversion_value_rule_set.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "23399881"
}
],
"symlink_target": ""
}
|
import CommonModule
import TimestampModule
import SnapshotModule
import TargetsModule
def Bundle(num_of_ecus=1):
'''Director signs fresh top-level timestamp, snapshot, and targets metadata.'''
# The top-level targets role signs a target, together with custom metadata,
# for each ECU.
# This role uses one signing key.
# There is no delegation whatsoever.
bundle = TargetsModule.\
TargetsMetadata(custom=True,
num_of_keys=1,
num_of_targets=num_of_ecus,
num_of_delegations=0,
num_of_keys_per_delegation=0,
num_of_paths_per_delegation=0,
num_of_roles_per_delegation=0)
bundle += SnapshotModule.SnapshotMetadata()
bundle += TimestampModule.TimestampMetadata()
CommonModule.log('# of ECUs', num_of_ecus, unit=' ECUs')
CommonModule.log('Bundle', bundle)
return bundle
if __name__ == '__main__':
# Worst-case scenario.
CommonModule.time(CommonModule.iso_tp_overhead(Bundle(num_of_ecus=100)))
|
{
"content_hash": "4770b5e5a6a55ee4c5fcfca10055d748",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 81,
"avg_line_length": 36.3,
"alnum_prop": 0.6262626262626263,
"repo_name": "trishankkarthik/sizeof-uptane",
"id": "eda2137275022de11d2ac131c143a334d487e44a",
"size": "1134",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "DirectorRepository.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "20348"
}
],
"symlink_target": ""
}
|
import os
import configobj
config = configobj.ConfigObj() # for interpreters
for f in [os.environ.get("MACHETE_CONFIG", ""), "machete.cfg", "/etc/machete.cfg"]:
if os.path.isfile(f):
config = configobj.ConfigObj(f)
break
else:
raise Exception("Config file not found")
|
{
"content_hash": "9638725507f61458ff2f941bdcdab97e",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 83,
"avg_line_length": 22.76923076923077,
"alnum_prop": 0.6655405405405406,
"repo_name": "rustyrazorblade/machete",
"id": "f58392c5b7de58dfaf6ae36d78d9a7d34f2e075d",
"size": "296",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "machete/base/config.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CoffeeScript",
"bytes": "231"
},
{
"name": "Groovy",
"bytes": "59"
},
{
"name": "JavaScript",
"bytes": "62646"
},
{
"name": "Python",
"bytes": "39017"
},
{
"name": "Ruby",
"bytes": "1143"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
import django.contrib.postgres.fields.jsonb
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('service_builder', '0004_auto_20180402_1354'),
]
operations = [
migrations.AlterField(
model_name='service',
name='arguments',
field=django.contrib.postgres.fields.jsonb.JSONField(default={}),
),
migrations.AlterField(
model_name='service',
name='output_css',
field=models.TextField(null=True),
),
migrations.AlterField(
model_name='service',
name='output_html',
field=models.TextField(null=True),
),
migrations.AlterField(
model_name='service',
name='output_js',
field=models.TextField(null=True),
),
migrations.AlterField(
model_name='service',
name='queries',
field=django.contrib.postgres.fields.jsonb.JSONField(default={}),
),
]
|
{
"content_hash": "35f6fe980865f5c0496a4b880ce3f71d",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 77,
"avg_line_length": 28.333333333333332,
"alnum_prop": 0.5656108597285068,
"repo_name": "dipapaspyros/bdo_platform",
"id": "c268d548007ff62ff8165f3c63e754a2ce17fc63",
"size": "1176",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "service_builder/migrations/0005_auto_20180402_1356.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "176900"
},
{
"name": "HTML",
"bytes": "69066"
},
{
"name": "JavaScript",
"bytes": "10644123"
},
{
"name": "Python",
"bytes": "195457"
},
{
"name": "XSLT",
"bytes": "1521"
}
],
"symlink_target": ""
}
|
import netaddr
import six
from tempest_lib.common.utils import data_utils
from neutron.tests.api import base_routers as base
from neutron.tests.api import clients
from neutron.tests.tempest import config
from neutron.tests.tempest import test
CONF = config.CONF
class RoutersTest(base.BaseRouterTest):
@classmethod
def resource_setup(cls):
super(RoutersTest, cls).resource_setup()
if not test.is_extension_enabled('router', 'network'):
msg = "router extension not enabled."
raise cls.skipException(msg)
admin_manager = clients.AdminManager()
cls.identity_admin_client = admin_manager.identity_client
cls.tenant_cidr = (CONF.network.tenant_network_cidr
if cls._ip_version == 4 else
CONF.network.tenant_network_v6_cidr)
@test.attr(type='smoke')
@test.idempotent_id('f64403e2-8483-4b34-8ccd-b09a87bcc68c')
def test_create_show_list_update_delete_router(self):
# Create a router
# NOTE(salv-orlando): Do not invoke self.create_router
# as we need to check the response code
name = data_utils.rand_name('router-')
create_body = self.client.create_router(
name, external_gateway_info={
"network_id": CONF.network.public_network_id},
admin_state_up=False)
self.addCleanup(self._delete_router, create_body['router']['id'])
self.assertEqual(create_body['router']['name'], name)
self.assertEqual(
create_body['router']['external_gateway_info']['network_id'],
CONF.network.public_network_id)
self.assertFalse(create_body['router']['admin_state_up'])
# Show details of the created router
show_body = self.client.show_router(create_body['router']['id'])
self.assertEqual(show_body['router']['name'], name)
self.assertEqual(
show_body['router']['external_gateway_info']['network_id'],
CONF.network.public_network_id)
self.assertFalse(show_body['router']['admin_state_up'])
# List routers and verify if created router is there in response
list_body = self.client.list_routers()
routers_list = list()
for router in list_body['routers']:
routers_list.append(router['id'])
self.assertIn(create_body['router']['id'], routers_list)
# Update the name of router and verify if it is updated
updated_name = 'updated ' + name
update_body = self.client.update_router(create_body['router']['id'],
name=updated_name)
self.assertEqual(update_body['router']['name'], updated_name)
show_body = self.client.show_router(
create_body['router']['id'])
self.assertEqual(show_body['router']['name'], updated_name)
@test.attr(type='smoke')
@test.idempotent_id('e54dd3a3-4352-4921-b09d-44369ae17397')
def test_create_router_setting_tenant_id(self):
# Test creating router from admin user setting tenant_id.
test_tenant = data_utils.rand_name('test_tenant_')
test_description = data_utils.rand_name('desc_')
tenant = self.identity_admin_client.create_tenant(
name=test_tenant, description=test_description)
tenant_id = tenant['id']
self.addCleanup(self.identity_admin_client.delete_tenant, tenant_id)
name = data_utils.rand_name('router-')
create_body = self.admin_client.create_router(name,
tenant_id=tenant_id)
self.addCleanup(self.admin_client.delete_router,
create_body['router']['id'])
self.assertEqual(tenant_id, create_body['router']['tenant_id'])
@test.idempotent_id('847257cc-6afd-4154-b8fb-af49f5670ce8')
@test.requires_ext(extension='ext-gw-mode', service='network')
@test.attr(type='smoke')
def test_create_router_with_default_snat_value(self):
# Create a router with default snat rule
name = data_utils.rand_name('router')
router = self._create_router(
name, external_network_id=CONF.network.public_network_id)
self._verify_router_gateway(
router['id'], {'network_id': CONF.network.public_network_id,
'enable_snat': True})
@test.idempotent_id('ea74068d-09e9-4fd7-8995-9b6a1ace920f')
@test.requires_ext(extension='ext-gw-mode', service='network')
@test.attr(type='smoke')
def test_create_router_with_snat_explicit(self):
name = data_utils.rand_name('snat-router')
# Create a router enabling snat attributes
enable_snat_states = [False, True]
for enable_snat in enable_snat_states:
external_gateway_info = {
'network_id': CONF.network.public_network_id,
'enable_snat': enable_snat}
create_body = self.admin_client.create_router(
name, external_gateway_info=external_gateway_info)
self.addCleanup(self.admin_client.delete_router,
create_body['router']['id'])
# Verify snat attributes after router creation
self._verify_router_gateway(create_body['router']['id'],
exp_ext_gw_info=external_gateway_info)
@test.attr(type='smoke')
@test.idempotent_id('b42e6e39-2e37-49cc-a6f4-8467e940900a')
def test_add_remove_router_interface_with_subnet_id(self):
network = self.create_network()
subnet = self.create_subnet(network)
router = self._create_router(data_utils.rand_name('router-'))
# Add router interface with subnet id
interface = self.client.add_router_interface_with_subnet_id(
router['id'], subnet['id'])
self.addCleanup(self._remove_router_interface_with_subnet_id,
router['id'], subnet['id'])
self.assertIn('subnet_id', interface.keys())
self.assertIn('port_id', interface.keys())
# Verify router id is equal to device id in port details
show_port_body = self.client.show_port(
interface['port_id'])
self.assertEqual(show_port_body['port']['device_id'],
router['id'])
@test.attr(type='smoke')
@test.idempotent_id('2b7d2f37-6748-4d78-92e5-1d590234f0d5')
def test_add_remove_router_interface_with_port_id(self):
network = self.create_network()
self.create_subnet(network)
router = self._create_router(data_utils.rand_name('router-'))
port_body = self.client.create_port(
network_id=network['id'])
# add router interface to port created above
interface = self.client.add_router_interface_with_port_id(
router['id'], port_body['port']['id'])
self.addCleanup(self._remove_router_interface_with_port_id,
router['id'], port_body['port']['id'])
self.assertIn('subnet_id', interface.keys())
self.assertIn('port_id', interface.keys())
# Verify router id is equal to device id in port details
show_port_body = self.client.show_port(
interface['port_id'])
self.assertEqual(show_port_body['port']['device_id'],
router['id'])
def _verify_router_gateway(self, router_id, exp_ext_gw_info=None):
show_body = self.admin_client.show_router(router_id)
actual_ext_gw_info = show_body['router']['external_gateway_info']
if exp_ext_gw_info is None:
self.assertIsNone(actual_ext_gw_info)
return
# Verify only keys passed in exp_ext_gw_info
for k, v in six.iteritems(exp_ext_gw_info):
self.assertEqual(v, actual_ext_gw_info[k])
def _verify_gateway_port(self, router_id):
list_body = self.admin_client.list_ports(
network_id=CONF.network.public_network_id,
device_id=router_id)
self.assertEqual(len(list_body['ports']), 1)
gw_port = list_body['ports'][0]
fixed_ips = gw_port['fixed_ips']
self.assertGreaterEqual(len(fixed_ips), 1)
public_net_body = self.admin_client.show_network(
CONF.network.public_network_id)
public_subnet_id = public_net_body['network']['subnets'][0]
self.assertIn(public_subnet_id,
[x['subnet_id'] for x in fixed_ips])
@test.attr(type='smoke')
@test.idempotent_id('6cc285d8-46bf-4f36-9b1a-783e3008ba79')
def test_update_router_set_gateway(self):
router = self._create_router(data_utils.rand_name('router-'))
self.client.update_router(
router['id'],
external_gateway_info={
'network_id': CONF.network.public_network_id})
# Verify operation - router
self._verify_router_gateway(
router['id'],
{'network_id': CONF.network.public_network_id})
self._verify_gateway_port(router['id'])
@test.idempotent_id('b386c111-3b21-466d-880c-5e72b01e1a33')
@test.requires_ext(extension='ext-gw-mode', service='network')
@test.attr(type='smoke')
def test_update_router_set_gateway_with_snat_explicit(self):
router = self._create_router(data_utils.rand_name('router-'))
self.admin_client.update_router_with_snat_gw_info(
router['id'],
external_gateway_info={
'network_id': CONF.network.public_network_id,
'enable_snat': True})
self._verify_router_gateway(
router['id'],
{'network_id': CONF.network.public_network_id,
'enable_snat': True})
self._verify_gateway_port(router['id'])
@test.idempotent_id('96536bc7-8262-4fb2-9967-5c46940fa279')
@test.requires_ext(extension='ext-gw-mode', service='network')
@test.attr(type='smoke')
def test_update_router_set_gateway_without_snat(self):
router = self._create_router(data_utils.rand_name('router-'))
self.admin_client.update_router_with_snat_gw_info(
router['id'],
external_gateway_info={
'network_id': CONF.network.public_network_id,
'enable_snat': False})
self._verify_router_gateway(
router['id'],
{'network_id': CONF.network.public_network_id,
'enable_snat': False})
self._verify_gateway_port(router['id'])
@test.attr(type='smoke')
@test.idempotent_id('ad81b7ee-4f81-407b-a19c-17e623f763e8')
def test_update_router_unset_gateway(self):
router = self._create_router(
data_utils.rand_name('router-'),
external_network_id=CONF.network.public_network_id)
self.client.update_router(router['id'], external_gateway_info={})
self._verify_router_gateway(router['id'])
# No gateway port expected
list_body = self.admin_client.list_ports(
network_id=CONF.network.public_network_id,
device_id=router['id'])
self.assertFalse(list_body['ports'])
@test.idempotent_id('f2faf994-97f4-410b-a831-9bc977b64374')
@test.requires_ext(extension='ext-gw-mode', service='network')
@test.attr(type='smoke')
def test_update_router_reset_gateway_without_snat(self):
router = self._create_router(
data_utils.rand_name('router-'),
external_network_id=CONF.network.public_network_id)
self.admin_client.update_router_with_snat_gw_info(
router['id'],
external_gateway_info={
'network_id': CONF.network.public_network_id,
'enable_snat': False})
self._verify_router_gateway(
router['id'],
{'network_id': CONF.network.public_network_id,
'enable_snat': False})
self._verify_gateway_port(router['id'])
@test.idempotent_id('c86ac3a8-50bd-4b00-a6b8-62af84a0765c')
@test.requires_ext(extension='extraroute', service='network')
@test.attr(type='smoke')
def test_update_extra_route(self):
self.network = self.create_network()
self.name = self.network['name']
self.subnet = self.create_subnet(self.network)
# Add router interface with subnet id
self.router = self._create_router(
data_utils.rand_name('router-'), True)
self.create_router_interface(self.router['id'], self.subnet['id'])
self.addCleanup(
self._delete_extra_routes,
self.router['id'])
# Update router extra route, second ip of the range is
# used as next hop
cidr = netaddr.IPNetwork(self.subnet['cidr'])
next_hop = str(cidr[2])
destination = str(self.subnet['cidr'])
extra_route = self.client.update_extra_routes(self.router['id'],
next_hop, destination)
self.assertEqual(1, len(extra_route['router']['routes']))
self.assertEqual(destination,
extra_route['router']['routes'][0]['destination'])
self.assertEqual(next_hop,
extra_route['router']['routes'][0]['nexthop'])
show_body = self.client.show_router(self.router['id'])
self.assertEqual(destination,
show_body['router']['routes'][0]['destination'])
self.assertEqual(next_hop,
show_body['router']['routes'][0]['nexthop'])
def _delete_extra_routes(self, router_id):
self.client.delete_extra_routes(router_id)
@test.attr(type='smoke')
@test.idempotent_id('a8902683-c788-4246-95c7-ad9c6d63a4d9')
def test_update_router_admin_state(self):
self.router = self._create_router(data_utils.rand_name('router-'))
self.assertFalse(self.router['admin_state_up'])
# Update router admin state
update_body = self.client.update_router(self.router['id'],
admin_state_up=True)
self.assertTrue(update_body['router']['admin_state_up'])
show_body = self.client.show_router(self.router['id'])
self.assertTrue(show_body['router']['admin_state_up'])
@test.attr(type='smoke')
@test.idempotent_id('802c73c9-c937-4cef-824b-2191e24a6aab')
def test_add_multiple_router_interfaces(self):
network01 = self.create_network(
network_name=data_utils.rand_name('router-network01-'))
network02 = self.create_network(
network_name=data_utils.rand_name('router-network02-'))
subnet01 = self.create_subnet(network01)
sub02_cidr = netaddr.IPNetwork(self.tenant_cidr).next()
subnet02 = self.create_subnet(network02, cidr=sub02_cidr)
router = self._create_router(data_utils.rand_name('router-'))
interface01 = self._add_router_interface_with_subnet_id(router['id'],
subnet01['id'])
self._verify_router_interface(router['id'], subnet01['id'],
interface01['port_id'])
interface02 = self._add_router_interface_with_subnet_id(router['id'],
subnet02['id'])
self._verify_router_interface(router['id'], subnet02['id'],
interface02['port_id'])
def _verify_router_interface(self, router_id, subnet_id, port_id):
show_port_body = self.client.show_port(port_id)
interface_port = show_port_body['port']
self.assertEqual(router_id, interface_port['device_id'])
self.assertEqual(subnet_id,
interface_port['fixed_ips'][0]['subnet_id'])
@test.attr(type='smoke')
@test.idempotent_id('01f185d1-d1a6-4cf9-abf7-e0e1384c169c')
def test_network_attached_with_two_routers(self):
network = self.create_network(data_utils.rand_name('network1'))
self.create_subnet(network)
port1 = self.create_port(network)
port2 = self.create_port(network)
router1 = self._create_router(data_utils.rand_name('router1'))
router2 = self._create_router(data_utils.rand_name('router2'))
self.client.add_router_interface_with_port_id(
router1['id'], port1['id'])
self.client.add_router_interface_with_port_id(
router2['id'], port2['id'])
self.addCleanup(self.client.remove_router_interface_with_port_id,
router1['id'], port1['id'])
self.addCleanup(self.client.remove_router_interface_with_port_id,
router2['id'], port2['id'])
body = self.client.show_port(port1['id'])
port_show1 = body['port']
body = self.client.show_port(port2['id'])
port_show2 = body['port']
self.assertEqual(port_show1['network_id'], network['id'])
self.assertEqual(port_show2['network_id'], network['id'])
self.assertEqual(port_show1['device_id'], router1['id'])
self.assertEqual(port_show2['device_id'], router2['id'])
class RoutersIpV6Test(RoutersTest):
_ip_version = 6
class DvrRoutersTest(base.BaseRouterTest):
@classmethod
def skip_checks(cls):
super(DvrRoutersTest, cls).skip_checks()
if not test.is_extension_enabled('dvr', 'network'):
msg = "DVR extension not enabled."
raise cls.skipException(msg)
@test.attr(type='smoke')
@test.idempotent_id('141297aa-3424-455d-aa8d-f2d95731e00a')
def test_create_distributed_router(self):
name = data_utils.rand_name('router')
create_body = self.admin_client.create_router(
name, distributed=True)
self.addCleanup(self._delete_router,
create_body['router']['id'],
self.admin_client)
self.assertTrue(create_body['router']['distributed'])
@test.attr(type='smoke')
@test.idempotent_id('644d7a4a-01a1-4b68-bb8d-0c0042cb1729')
def test_convert_centralized_router(self):
router = self._create_router(data_utils.rand_name('router'))
self.assertNotIn('distributed', router)
update_body = self.admin_client.update_router(router['id'],
distributed=True)
self.assertTrue(update_body['router']['distributed'])
show_body = self.admin_client.show_router(router['id'])
self.assertTrue(show_body['router']['distributed'])
show_body = self.client.show_router(router['id'])
self.assertNotIn('distributed', show_body['router'])
|
{
"content_hash": "70ecd60dfaeb40a6ca44ec351a5c35c2",
"timestamp": "",
"source": "github",
"line_count": 393,
"max_line_length": 79,
"avg_line_length": 47.48854961832061,
"alnum_prop": 0.6044044365857579,
"repo_name": "apporc/neutron",
"id": "064157e47cd880296b12f1ded6654aead61d72ed",
"size": "19299",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "neutron/tests/api/test_routers.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "1047"
},
{
"name": "Python",
"bytes": "7630810"
},
{
"name": "Shell",
"bytes": "13134"
}
],
"symlink_target": ""
}
|
from simple_rl.abstraction.AbstractionWrapperClass import AbstractionWrapper
from simple_rl.abstraction.AbstractValueIterationClass import AbstractValueIteration
from simple_rl.abstraction.state_abs.StateAbstractionClass import StateAbstraction
from simple_rl.abstraction.state_abs.ProbStateAbstractionClass import ProbStateAbstraction
from simple_rl.abstraction.action_abs.ActionAbstractionClass import ActionAbstraction
from simple_rl.abstraction.action_abs.InListPredicateClass import InListPredicate
from simple_rl.abstraction.action_abs.OptionClass import Option
from simple_rl.abstraction.action_abs.PolicyClass import Policy
from simple_rl.abstraction.action_abs.PolicyFromDictClass import PolicyFromDict
from simple_rl.abstraction.action_abs.PredicateClass import Predicate
from simple_rl.abstraction.features.FeatureWrapperClass import FeatureWrapper
from simple_rl.abstraction.features.TileCodingClass import TileCoding
from simple_rl.abstraction.features.BucketCodingClass import BucketCoding
from simple_rl.abstraction.features.RBFCodingClass import RBFCoding
# Scripts.
from simple_rl.abstraction.state_abs import sa_helpers, indicator_funcs
from simple_rl.abstraction.action_abs import aa_helpers
from simple_rl.abstraction.state_action_abstr_mdp import abstr_mdp_funcs
|
{
"content_hash": "dd73f7e94e504b7e0599e8de1bfc5ce3",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 90,
"avg_line_length": 64.3,
"alnum_prop": 0.8779160186625194,
"repo_name": "david-abel/simple_rl",
"id": "8d8468d3133caf523ff1fa602bcf91f86657194f",
"size": "1297",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "simple_rl/abstraction/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "196326"
},
{
"name": "Python",
"bytes": "433150"
}
],
"symlink_target": ""
}
|
import os
import shutil
from nose.tools import (assert_equal, assert_true)
from qipipe.interfaces.copy import Copy
from ... import ROOT
from ...helpers.logging import logger
FIXTURE = os.path.join(ROOT, 'fixtures', 'interfaces', 'copy')
"""The test fixture file."""
SOURCE = os.path.join(FIXTURE, 'small.txt')
"""The test fixture file."""
RESULTS = os.path.join(ROOT, 'results', 'interfaces', 'copy')
"""The test results directory."""
TARGET = os.path.join(RESULTS, 'target')
"""The test target area where the work data is copied."""
class TestCopy(object):
"""Copy interface unit tests."""
def setUp(self):
shutil.rmtree(RESULTS, True)
def tearDown(self):
pass #shutil.rmtree(RESULTS, True)
def test_copy_file(self):
# Copy the file.
copy = Copy(in_file=SOURCE, dest=TARGET)
result = copy.run()
# Verify the result.
tgt_file = os.path.join(TARGET, 'small.txt')
assert_equal(result.outputs.out_file, tgt_file,
"Copy target file name incorrect: %s" %
result.outputs.out_file)
assert_true(os.path.exists(tgt_file),
"Copy target file does not exist: %s" % tgt_file)
def test_copy_file_with_output_filename(self):
# Copy the file.
copy = Copy(in_file=SOURCE, dest=TARGET, out_base_name='target.txt')
result = copy.run()
# Verify the result.
tgt_file = os.path.join(TARGET, 'target.txt')
assert_equal(result.outputs.out_file, tgt_file,
"Copy target file name incorrect: %s" %
result.outputs.out_file)
assert_true(os.path.exists(tgt_file),
"Copy target file does not exist: %s" % tgt_file)
def test_copy_directory(self):
# Copy the directory.
copy = Copy(in_file=FIXTURE, dest=TARGET)
result = copy.run()
# Verify the result.
_, dname = os.path.split(FIXTURE)
tgt_dir = os.path.join(TARGET, dname)
assert_equal(result.outputs.out_file, tgt_dir,
"Copy target directory name incorrect: %s" %
result.outputs.out_file)
tgt_file = os.path.join(tgt_dir, 'small.txt')
assert_true(os.path.exists(tgt_file),
"Copy target directory content is missing: %s" % tgt_file)
if __name__ == "__main__":
import nose
nose.main(defaultTest=__name__)
|
{
"content_hash": "79fdcb337c4a44c13a4970c02d873dac",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 78,
"avg_line_length": 32.98684210526316,
"alnum_prop": 0.5827682489030714,
"repo_name": "ohsu-qin/qipipe",
"id": "c9bfdb1eb6dde99a6219d4636ae666c1be7ed8f2",
"size": "2507",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/unit/interfaces/test_copy.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "528376"
}
],
"symlink_target": ""
}
|
"""
logbook.base
~~~~~~~~~~~~
Base implementation for logbook.
:copyright: (c) 2010 by Armin Ronacher, Georg Brandl.
:license: BSD, see LICENSE for more details.
"""
import os
import sys
import thread
import threading
import traceback
from itertools import chain
from weakref import ref as weakref
from datetime import datetime
from logbook.helpers import to_safe_json, parse_iso8601, cached_property, \
F, _py3
try:
from logbook._speedups import group_reflected_property, \
ContextStackManager, StackedObject
except ImportError:
from logbook._fallback import group_reflected_property, \
ContextStackManager, StackedObject
# make sure to sync these up with _speedups.pyx
CRITICAL = 6
ERROR = 5
WARNING = 4
NOTICE = 3
INFO = 2
DEBUG = 1
NOTSET = 0
_level_names = {
CRITICAL: 'CRITICAL',
ERROR: 'ERROR',
WARNING: 'WARNING',
NOTICE: 'NOTICE',
INFO: 'INFO',
DEBUG: 'DEBUG',
NOTSET: 'NOTSET'
}
_reverse_level_names = dict((v, k) for (k, v) in _level_names.iteritems())
_missing = object()
# on python 3 we can savely assume that frame filenames will be in
# unicode, on Python 2 we have to apply a trick.
if _py3:
def _convert_frame_filename(fn):
return fn
else:
def _convert_frame_filename(fn):
if isinstance(fn, unicode):
fn = fn.decode(sys.getfilesystemencoding() or 'utf-8',
'replace')
return fn
def level_name_property():
"""Returns a property that reflects the level as name from
the internal level attribute.
"""
def _get_level_name(self):
return get_level_name(self.level)
def _set_level_name(self, level):
self.level = lookup_level(level)
return property(_get_level_name, _set_level_name,
doc='The level as unicode string')
def lookup_level(level):
"""Return the integer representation of a logging level."""
if isinstance(level, (int, long)):
return level
try:
return _reverse_level_names[level]
except KeyError:
raise LookupError('unknown level name %s' % level)
def get_level_name(level):
"""Return the textual representation of logging level 'level'."""
try:
return _level_names[level]
except KeyError:
raise LookupError('unknown level')
class ExtraDict(dict):
"""A dictionary which returns ``u''`` on missing keys."""
if sys.version_info[:2] < (2, 5):
def __getitem__(self, key):
try:
return dict.__getitem__(self, key)
except KeyError:
return u''
else:
def __missing__(self, key):
return u''
def copy(self):
return self.__class__(self)
def __repr__(self):
return '%s(%s)' % (
self.__class__.__name__,
dict.__repr__(self)
)
class _ExceptionCatcher(object):
"""Helper for exception caught blocks."""
def __init__(self, logger, args, kwargs):
self.logger = logger
self.args = args
self.kwargs = kwargs
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, tb):
if exc_type is not None:
kwargs = self.kwargs.copy()
kwargs['exc_info'] = (exc_type, exc_value, tb)
self.logger.exception(*self.args, **kwargs)
return True
class ContextObject(StackedObject):
"""An object that can be bound to a context. It is managed by the
:class:`ContextStackManager`"""
#: subclasses have to instanciate a :class:`ContextStackManager`
#: object on this attribute which is then shared for all the
#: subclasses of it.
stack_manager = None
def push_thread(self):
"""Pushes the context object to the thread stack."""
self.stack_manager.push_thread(self)
def pop_thread(self):
"""Pops the context object from the stack."""
popped = self.stack_manager.pop_thread()
assert popped is self, 'popped unexpected object'
def push_application(self):
"""Pushes the context object to the application stack."""
self.stack_manager.push_application(self)
def pop_application(self):
"""Pops the context object from the stack."""
popped = self.stack_manager.pop_application()
assert popped is self, 'popped unexpected object'
class NestedSetup(StackedObject):
"""A nested setup can be used to configure multiple handlers
and processors at once.
"""
def __init__(self, objects=None):
self.objects = list(objects or ())
def push_application(self):
for obj in self.objects:
obj.push_application()
def pop_application(self):
for obj in reversed(self.objects):
obj.pop_application()
def push_thread(self):
for obj in self.objects:
obj.push_thread()
def pop_thread(self):
for obj in reversed(self.objects):
obj.pop_thread()
class Processor(ContextObject):
"""Can be pushed to a stack to inject additional information into
a log record as necessary::
def inject_ip(record):
record.extra['ip'] = '127.0.0.1'
with Processor(inject_ip):
...
"""
stack_manager = ContextStackManager()
def __init__(self, callback=None):
#: the callback that was passed to the constructor
self.callback = callback
def process(self, record):
"""Called with the log record that should be overridden. The default
implementation calls :attr:`callback` if it is not `None`.
"""
if self.callback is not None:
self.callback(record)
class _InheritedType(object):
__slots__ = ()
def __repr__(self):
return 'Inherit'
def __reduce__(self):
return 'Inherit'
Inherit = _InheritedType()
class Flags(ContextObject):
"""Allows flags to be pushed on a flag stack. Currently two flags
are available:
`errors`
Can be set to override the current error behaviour. This value is
used when logging calls fail. The default behaviour is spitting
out the stacktrace to stderr but this can be overridden:
=================== ==========================================
``'silent'`` fail silently
``'raise'`` raise a catchable exception
``'print'`` print the stacktrace to stderr (default)
=================== ==========================================
`introspection`
Can be used to disable frame introspection. This can give a
speedup on production systems if you are using a JIT compiled
Python interpreter such as pypy. The default is `True`.
Note that the default setup of some of the handler (mail for
instance) includes frame dependent information which will
not be available when introspection is disabled.
Example usage::
with Flags(errors='silent'):
...
"""
stack_manager = ContextStackManager()
def __init__(self, **flags):
self.__dict__.update(flags)
@staticmethod
def get_flag(flag, default=None):
"""Looks up the current value of a specific flag."""
for flags in Flags.stack_manager.iter_context_objects():
val = getattr(flags, flag, Inherit)
if val is not Inherit:
return val
return default
def _create_log_record(cls, dict):
"""Extra function for reduce because on Python 3 unbound methods
can no longer be pickled.
"""
return cls.from_dict(dict)
class LogRecord(object):
"""A LogRecord instance represents an event being logged.
LogRecord instances are created every time something is logged. They
contain all the information pertinent to the event being logged. The
main information passed in is in msg and args
"""
_pullable_information = frozenset((
'func_name', 'module', 'filename', 'lineno', 'process_name', 'thread',
'thread_name', 'formatted_exception', 'message', 'exception_name',
'exception_message'
))
_noned_on_close = frozenset(('exc_info', 'frame', 'calling_frame'))
#: can be overriden by a handler to not close the record. This could
#: lead to memory leaks so it should be used carefully.
keep_open = False
#: the time of the log record creation as :class:`datetime.datetime`
#: object. This information is unavailable until the record was
#: heavy initialized.
time = None
#: a flag that is `True` if the log record is heavy initialized which
#: is not the case by default.
heavy_initialized = False
#: a flag that is `True` when heavy initialization is no longer possible
late = False
#: a flag that is `True` when all the information was pulled from the
#: information that becomes unavailable on close.
information_pulled = False
def __init__(self, channel, level, msg, args=None, kwargs=None,
exc_info=None, extra=None, frame=None, dispatcher=None):
#: the name of the logger that created it or any other textual
#: channel description. This is a descriptive name and can be
#: used for filtering.
self.channel = channel
#: The message of the log record as new-style format string.
self.msg = msg
#: the positional arguments for the format string.
self.args = args or ()
#: the keyword arguments for the format string.
self.kwargs = kwargs or {}
#: the level of the log record as integer.
self.level = level
#: optional exception information. If set, this is a tuple in the
#: form ``(exc_type, exc_value, tb)`` as returned by
#: :func:`sys.exc_info`.
self.exc_info = exc_info
#: optional extra information as dictionary. This is the place
#: where custom log processors can attach custom context sensitive
#: data.
self.extra = ExtraDict(extra or ())
#: If available, optionally the interpreter frame that pulled the
#: heavy init. This usually points to somewhere in the dispatcher.
#: Might not be available for all calls and is removed when the log
#: record is closed.
self.frame = frame
#: the PID of the current process
self.process = None
if dispatcher is not None:
dispatcher = weakref(dispatcher)
self._dispatcher = dispatcher
def heavy_init(self):
"""Does the heavy initialization that could be expensive. This must
not be called from a higher stack level than when the log record was
created and the later the initialization happens, the more off the
date information will be for example.
This is internally used by the record dispatching system and usually
something not to worry about.
"""
if self.heavy_initialized:
return
assert not self.late, 'heavy init is no longer possible'
self.heavy_initialized = True
self.process = os.getpid()
self.time = datetime.utcnow()
if self.frame is None and Flags.get_flag('introspection', True):
self.frame = sys._getframe(1)
def pull_information(self):
"""A helper function that pulls all frame-related information into
the object so that this information is available after the log
record was closed.
"""
if self.information_pulled:
return
# due to how cached_property is implemented, the attribute access
# has the side effect of caching the attribute on the instance of
# the class.
for key in self._pullable_information:
getattr(self, key)
self.information_pulled = True
def close(self):
"""Closes the log record. This will set the frame and calling
frame to `None` and frame-related information will no longer be
available unless it was pulled in first (:meth:`pull_information`).
This makes a log record safe for pickling and will clean up
memory that might be still referenced by the frames.
"""
for key in self._noned_on_close:
setattr(self, key, None)
self.late = True
def __reduce_ex__(self, protocol):
return _create_log_record, (type(self), self.to_dict())
def to_dict(self, json_safe=False):
"""Exports the log record into a dictionary without the information
that cannot be safely serialized like interpreter frames and
tracebacks.
"""
self.pull_information()
rv = {}
for key, value in self.__dict__.iteritems():
if key[:1] != '_' and key not in self._noned_on_close:
rv[key] = value
# the extra dict is exported as regular dict
rv['extra'] = dict(rv['extra'])
if json_safe:
return to_safe_json(rv)
return rv
@classmethod
def from_dict(cls, d):
"""Creates a log record from an exported dictionary. This also
supports JSON exported dictionaries.
"""
rv = object.__new__(cls)
rv.update_from_dict(d)
return rv
def update_from_dict(self, d):
"""Like the :meth:`from_dict` classmethod, but will update the
instance in place. Helpful for constructors.
"""
self.__dict__.update(d)
for key in self._noned_on_close:
setattr(self, key, None)
self._information_pulled = True
self._channel = None
if isinstance(self.time, basestring):
self.time = parse_iso8601(self.time)
return self
@cached_property
def message(self):
"""The formatted message."""
if not (self.args or self.kwargs):
return self.msg
try:
return F(self.msg).format(*self.args, **self.kwargs)
except Exception, e:
# this obviously will not give a proper error message if the
# information was not pulled and the log record no longer has
# access to the frame. But there is not much we can do about
# that.
errormsg = F('Could not format message with provided '
'arguments: {err}\n msg=\'{msg}\'\n '
'args={args} \n kwargs={kwargs}.\n'
'Happened in file {file}, line {lineno}').format(
err=e, msg=self.msg, args=self.args,
kwargs=self.kwargs, file=self.filename,
lineno=self.lineno
)
if not _py3:
errormsg = errormsg.encode('utf-8')
raise TypeError(errormsg)
level_name = level_name_property()
@cached_property
def calling_frame(self):
"""The frame in which the record has been created. This only
exists for as long the log record is not closed.
"""
frm = self.frame
globs = globals()
while frm is not None and frm.f_globals is globs:
frm = frm.f_back
return frm
@cached_property
def func_name(self):
"""The name of the function that triggered the log call if
available. Requires a frame or that :meth:`pull_information`
was called before.
"""
cf = self.calling_frame
if cf is not None:
return cf.f_code.co_name
@cached_property
def module(self):
"""The name of the module that triggered the log call if
available. Requires a frame or that :meth:`pull_information`
was called before.
"""
cf = self.calling_frame
if cf is not None:
return cf.f_globals.get('__name__')
@cached_property
def filename(self):
"""The filename of the module in which the record has been created.
Requires a frame or that :meth:`pull_information` was called before.
"""
cf = self.calling_frame
if cf is not None:
fn = cf.f_code.co_filename
if fn[:1] == '<' and fn[-1:] == '>':
return fn
return _convert_frame_filename(os.path.abspath(fn))
@cached_property
def lineno(self):
"""The line number of the file in which the record has been created.
Requires a frame or that :meth:`pull_information` was called before.
"""
cf = self.calling_frame
if cf is not None:
return cf.f_lineno
@cached_property
def thread(self):
"""The ident of the thread. This is evaluated late and means that
if the log record is passed to another thread, :meth:`pull_information`
was called in the old thread.
"""
return thread.get_ident()
@cached_property
def thread_name(self):
"""The name of the thread. This is evaluated late and means that
if the log record is passed to another thread, :meth:`pull_information`
was called in the old thread.
"""
return threading.currentThread().getName()
@cached_property
def process_name(self):
"""The name of the process in which the record has been created."""
# Errors may occur if multiprocessing has not finished loading
# yet - e.g. if a custom import hook causes third-party code
# to run when multiprocessing calls import. See issue 8200
# for an example
mp = sys.modules.get('multiprocessing')
if mp is not None: # pragma: no cover
try:
return mp.current_process().name
except Exception:
pass
@cached_property
def formatted_exception(self):
"""The formatted exception which caused this record to be created
in case there was any.
"""
if self.exc_info is not None:
rv = ''.join(traceback.format_exception(*self.exc_info))
if not _py3:
rv = rv.decode('utf-8', 'replace')
return rv.rstrip()
@cached_property
def exception_name(self):
"""The name of the exception."""
if self.exc_info is not None:
cls = self.exc_info[0]
return unicode(cls.__module__ + '.' + cls.__name__)
@property
def exception_shortname(self):
"""An abbreviated exception name (no import path)"""
return self.exception_name.rsplit('.')[-1]
@cached_property
def exception_message(self):
"""The message of the exception."""
if self.exc_info is not None:
val = self.exc_info[1]
try:
return unicode(val)
except UnicodeError:
return str(val).decode('utf-8', 'replace')
@property
def dispatcher(self):
"""The dispatcher that created the log record. Might not exist because
a log record does not have to be created from a logger or other
dispatcher to be handled by logbook. If this is set, it will point to
an object that implements the :class:`~logbook.base.RecordDispatcher`
interface.
"""
if self._dispatcher is not None:
return self._dispatcher()
class LoggerMixin(object):
"""This mixin class defines and implements the "usual" logger
interface (i.e. the descriptive logging functions).
Classes using this mixin have to implement a :meth:`!handle` method which
takes a :class:`~logbook.LogRecord` and passes it along.
"""
#: The name of the minimium logging level required for records to be
#: created.
level_name = level_name_property()
def debug(self, *args, **kwargs):
"""Logs a :class:`~logbook.LogRecord` with the level set
to :data:`~logbook.DEBUG`.
"""
if not self.disabled and DEBUG >= self.level:
self._log(DEBUG, args, kwargs)
def info(self, *args, **kwargs):
"""Logs a :class:`~logbook.LogRecord` with the level set
to :data:`~logbook.INFO`.
"""
if not self.disabled and INFO >= self.level:
self._log(INFO, args, kwargs)
def warn(self, *args, **kwargs):
"""Logs a :class:`~logbook.LogRecord` with the level set
to :data:`~logbook.WARNING`. This function has an alias
named :meth:`warning`.
"""
if not self.disabled and WARNING >= self.level:
self._log(WARNING, args, kwargs)
def warning(self, *args, **kwargs):
"""Alias for :meth:`warn`."""
return self.warn(*args, **kwargs)
def notice(self, *args, **kwargs):
"""Logs a :class:`~logbook.LogRecord` with the level set
to :data:`~logbook.NOTICE`.
"""
if not self.disabled and NOTICE >= self.level:
self._log(NOTICE, args, kwargs)
def error(self, *args, **kwargs):
"""Logs a :class:`~logbook.LogRecord` with the level set
to :data:`~logbook.ERROR`.
"""
if not self.disabled and ERROR >= self.level:
self._log(ERROR, args, kwargs)
def exception(self, *args, **kwargs):
"""Works exactly like :meth:`error` just that the message
is optional and exception information is recorded.
"""
if self.disabled or ERROR < self.level:
return
if not args:
args = ('Uncaught exception occurred',)
if 'exc_info' not in kwargs:
exc_info = sys.exc_info()
assert exc_info[0] is not None, 'no exception occurred'
kwargs.setdefault('exc_info', sys.exc_info())
return self.error(*args, **kwargs)
def critical(self, *args, **kwargs):
"""Logs a :class:`~logbook.LogRecord` with the level set
to :data:`~logbook.CRITICAL`.
"""
if not self.disabled and CRITICAL >= self.level:
self._log(CRITICAL, args, kwargs)
def log(self, level, *args, **kwargs):
"""Logs a :class:`~logbook.LogRecord` with the level set
to the `level` parameter. Because custom levels are not
supported by logbook, this method is mainly used to avoid
the use of reflection (e.g.: :func:`getattr`) for programmatic
logging.
"""
level = lookup_level(level)
if level >= self.level:
self._log(level, args, kwargs)
def catch_exceptions(self, *args, **kwargs):
"""A context manager that catches exceptions and calls
:meth:`exception` for exceptions caught that way. Example::
with logger.catch_exceptions():
execute_code_that_might_fail()
"""
if not args:
args = ('Uncaught exception occurred',)
return _ExceptionCatcher(self, args, kwargs)
def _log(self, level, args, kwargs):
exc_info = kwargs.pop('exc_info', None)
extra = kwargs.pop('extra', None)
self.make_record_and_handle(level, args[0], args[1:], kwargs,
exc_info, extra)
class RecordDispatcher(object):
"""A record dispatcher is the internal base class that implements
the logic used by the :class:`~logbook.Logger`.
"""
#: If this is set to `True` the dispatcher information will be suppressed
#: for log records emitted from this logger.
suppress_dispatcher = False
def __init__(self, name=None, level=NOTSET):
#: the name of the record dispatcher
self.name = name
#: list of handlers specific for this record dispatcher
self.handlers = []
#: optionally the name of the group this logger belongs to
self.group = None
#: the level of the record dispatcher as integer
self.level = level
disabled = group_reflected_property('disabled', False)
level = group_reflected_property('level', NOTSET, fallback=NOTSET)
def handle(self, record):
"""Call the handlers for the specified record. This is
invoked automatically when a record should be handled.
The default implementation checks if the dispatcher is disabled
and if the record level is greater than the level of the
record dispatcher. In that case it will call the handlers
(:meth:`call_handlers`).
"""
if not self.disabled and record.level >= self.level:
self.call_handlers(record)
def make_record_and_handle(self, level, msg, args, kwargs, exc_info,
extra):
"""Creates a record from some given arguments and heads it
over to the handling system.
"""
# The channel information can be useful for some use cases which is
# why we keep it on there. The log record however internally will
# only store a weak reference to the channel, so it might disappear
# from one instruction to the other. It will also disappear when
# a log record is transmitted to another process etc.
channel = None
if not self.suppress_dispatcher:
channel = self
record = LogRecord(self.name, level, msg, args, kwargs, exc_info,
extra, None, channel)
# after handling the log record is closed which will remove some
# referenes that would require a GC run on cpython. This includes
# the current stack frame, exception information. However there are
# some use cases in keeping the records open for a little longer.
# For example the test handler keeps log records open until the
# test handler is closed to allow assertions based on stack frames
# and exception information.
try:
self.handle(record)
finally:
record.late = True
if not record.keep_open:
record.close()
def call_handlers(self, record):
"""Pass a record to all relevant handlers in the following
order:
- per-dispatcher handlers are handled first
- afterwards all the current context handlers in the
order they were pushed
Before the first handler is invoked, the record is processed
(:meth:`process_record`).
"""
# for performance reasons records are only heavy initialized
# and processed if at least one of the handlers has a higher
# level than the record and that handler is not a black hole.
record_initialized = False
# Both logger attached handlers as well as context specific
# handlers are handled one after another. The latter also
# include global handlers.
for handler in chain(self.handlers,
Handler.stack_manager.iter_context_objects()):
# skip records that this handler is not interested in based
# on the record and handler level or in case this method was
# overridden on some custom logic.
if not handler.should_handle(record):
continue
# if this is a blackhole handler, don't even try to
# do further processing, stop right away. Technically
# speaking this is not 100% correct because if the handler
# is bubbling we shouldn't apply this logic, but then we
# won't enter this branch anyways. The result is that a
# bubbling blackhole handler will never have this shortcut
# applied and do the heavy init at one point. This is fine
# however because a bubbling blackhole handler is not very
# useful in general.
if handler.blackhole:
break
# we are about to handle the record. If it was not yet
# processed by context-specific record processors we
# have to do that now and remeber that we processed
# the record already.
if not record_initialized:
record.heavy_init()
self.process_record(record)
record_initialized = True
# a filter can still veto the handling of the record. This
# however is already operating on an initialized and processed
# record. The impact is that filters are slower than the
# handler's should_handle function in case there is no default
# handler that would handle the record (delayed init).
if handler.filter is not None \
and not handler.filter(record, handler):
continue
# handle the record. If the record was handled and
# the record is not bubbling we can abort now.
if handler.handle(record) and not handler.bubble:
break
def process_record(self, record):
"""Processes the record with all context specific processors. This
can be overriden to also inject additional information as necessary
that can be provided by this record dispatcher.
"""
if self.group is not None:
self.group.process_record(record)
for processor in Processor.stack_manager.iter_context_objects():
processor.process(record)
class Logger(RecordDispatcher, LoggerMixin):
"""Instances of the Logger class represent a single logging channel.
A "logging channel" indicates an area of an application. Exactly
how an "area" is defined is up to the application developer.
Names used by logbook should be descriptive and are intended for user
display, not for filtering. Filtering should happen based on the
context information instead.
A logger internally is a subclass of a
:class:`~logbook.base.RecordDispatcher` that implements the actual
logic. If you want to implement a custom logger class, have a look
at the interface of that class as well.
"""
class LoggerGroup(object):
"""A LoggerGroup represents a group of loggers. It cannot emit log
messages on its own but it can be used to set the disabled flag and
log level of all loggers in the group.
Furthermore the :meth:`process_record` method of the group is called
by any logger in the group which by default calls into the
:attr:`processor` callback function.
"""
def __init__(self, loggers=None, level=NOTSET, processor=None):
#: a list of all loggers on the logger group. Use the
#: :meth:`add_logger` and :meth:`remove_logger` methods to add
#: or remove loggers from this list.
self.loggers = []
if loggers is not None:
for logger in loggers:
self.add_logger(logger)
#: the level of the group. This is reflected to the loggers
#: in the group unless they overrode the setting.
self.level = lookup_level(level)
#: the disabled flag for all loggers in the group, unless
#: the loggers overrode the setting.
self.disabled = False
#: an optional callback function that is executed to process
#: the log records of all loggers in the group.
self.processor = processor
def add_logger(self, logger):
"""Adds a logger to this group."""
assert logger.group is None, 'Logger already belongs to a group'
logger.group = self
self.loggers.append(logger)
def remove_logger(self, logger):
"""Removes a logger from the group."""
self.loggers.remove(logger)
logger.group = None
def process_record(self, record):
"""Like :meth:`Logger.process_record` but for all loggers in
the group. By default this calls into the :attr:`processor`
function is it's not `None`.
"""
if self.processor is not None:
self.processor(record)
_default_dispatcher = RecordDispatcher()
def dispatch_record(record):
"""Passes a record on to the handlers on the stack. This is useful when
log records are created programmatically and already have all the
information attached and should be dispatched independent of a logger.
"""
_default_dispatcher.call_handlers(record)
# at that point we are save to import handler
from logbook.handlers import Handler
|
{
"content_hash": "15ed3e0e01ee28c96564d3e5ba6ea4c8",
"timestamp": "",
"source": "github",
"line_count": 898,
"max_line_length": 79,
"avg_line_length": 36.0456570155902,
"alnum_prop": 0.6108931384967098,
"repo_name": "zacharyvoase/logbook",
"id": "241940ec1773e8a9728c82093e9ecfb4d9ef85ab",
"size": "32393",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "logbook/base.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
"""
Component to interface with various media players.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/media_player/
"""
import hashlib
import logging
import os
import requests
import voluptuous as vol
from homeassistant.config import load_yaml_config_file
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.entity_component import EntityComponent
from homeassistant.helpers.config_validation import PLATFORM_SCHEMA # noqa
from homeassistant.components.http import HomeAssistantView
import homeassistant.helpers.config_validation as cv
from homeassistant.const import (
STATE_OFF, STATE_UNKNOWN, STATE_PLAYING, STATE_IDLE,
ATTR_ENTITY_ID, SERVICE_TURN_OFF, SERVICE_TURN_ON,
SERVICE_VOLUME_UP, SERVICE_VOLUME_DOWN, SERVICE_VOLUME_SET,
SERVICE_VOLUME_MUTE, SERVICE_TOGGLE, SERVICE_MEDIA_STOP,
SERVICE_MEDIA_PLAY_PAUSE, SERVICE_MEDIA_PLAY, SERVICE_MEDIA_PAUSE,
SERVICE_MEDIA_NEXT_TRACK, SERVICE_MEDIA_PREVIOUS_TRACK, SERVICE_MEDIA_SEEK)
_LOGGER = logging.getLogger(__name__)
DOMAIN = 'media_player'
DEPENDENCIES = ['http']
SCAN_INTERVAL = 10
ENTITY_ID_FORMAT = DOMAIN + '.{}'
ENTITY_IMAGE_URL = '/api/media_player_proxy/{0}?token={1}&cache={2}'
SERVICE_PLAY_MEDIA = 'play_media'
SERVICE_SELECT_SOURCE = 'select_source'
SERVICE_CLEAR_PLAYLIST = 'clear_playlist'
ATTR_MEDIA_VOLUME_LEVEL = 'volume_level'
ATTR_MEDIA_VOLUME_MUTED = 'is_volume_muted'
ATTR_MEDIA_SEEK_POSITION = 'seek_position'
ATTR_MEDIA_CONTENT_ID = 'media_content_id'
ATTR_MEDIA_CONTENT_TYPE = 'media_content_type'
ATTR_MEDIA_DURATION = 'media_duration'
ATTR_MEDIA_TITLE = 'media_title'
ATTR_MEDIA_ARTIST = 'media_artist'
ATTR_MEDIA_ALBUM_NAME = 'media_album_name'
ATTR_MEDIA_ALBUM_ARTIST = 'media_album_artist'
ATTR_MEDIA_TRACK = 'media_track'
ATTR_MEDIA_SERIES_TITLE = 'media_series_title'
ATTR_MEDIA_SEASON = 'media_season'
ATTR_MEDIA_EPISODE = 'media_episode'
ATTR_MEDIA_CHANNEL = 'media_channel'
ATTR_MEDIA_PLAYLIST = 'media_playlist'
ATTR_APP_ID = 'app_id'
ATTR_APP_NAME = 'app_name'
ATTR_SUPPORTED_MEDIA_COMMANDS = 'supported_media_commands'
ATTR_INPUT_SOURCE = 'source'
ATTR_INPUT_SOURCE_LIST = 'source_list'
ATTR_MEDIA_ENQUEUE = 'enqueue'
MEDIA_TYPE_MUSIC = 'music'
MEDIA_TYPE_TVSHOW = 'tvshow'
MEDIA_TYPE_VIDEO = 'movie'
MEDIA_TYPE_EPISODE = 'episode'
MEDIA_TYPE_CHANNEL = 'channel'
MEDIA_TYPE_PLAYLIST = 'playlist'
SUPPORT_PAUSE = 1
SUPPORT_SEEK = 2
SUPPORT_VOLUME_SET = 4
SUPPORT_VOLUME_MUTE = 8
SUPPORT_PREVIOUS_TRACK = 16
SUPPORT_NEXT_TRACK = 32
SUPPORT_TURN_ON = 128
SUPPORT_TURN_OFF = 256
SUPPORT_PLAY_MEDIA = 512
SUPPORT_VOLUME_STEP = 1024
SUPPORT_SELECT_SOURCE = 2048
SUPPORT_STOP = 4096
SUPPORT_CLEAR_PLAYLIST = 8192
# simple services that only take entity_id(s) as optional argument
SERVICE_TO_METHOD = {
SERVICE_TURN_ON: 'turn_on',
SERVICE_TURN_OFF: 'turn_off',
SERVICE_TOGGLE: 'toggle',
SERVICE_VOLUME_UP: 'volume_up',
SERVICE_VOLUME_DOWN: 'volume_down',
SERVICE_MEDIA_PLAY_PAUSE: 'media_play_pause',
SERVICE_MEDIA_PLAY: 'media_play',
SERVICE_MEDIA_PAUSE: 'media_pause',
SERVICE_MEDIA_STOP: 'media_stop',
SERVICE_MEDIA_NEXT_TRACK: 'media_next_track',
SERVICE_MEDIA_PREVIOUS_TRACK: 'media_previous_track',
SERVICE_CLEAR_PLAYLIST: 'clear_playlist'
}
ATTR_TO_PROPERTY = [
ATTR_MEDIA_VOLUME_LEVEL,
ATTR_MEDIA_VOLUME_MUTED,
ATTR_MEDIA_CONTENT_ID,
ATTR_MEDIA_CONTENT_TYPE,
ATTR_MEDIA_DURATION,
ATTR_MEDIA_TITLE,
ATTR_MEDIA_ARTIST,
ATTR_MEDIA_ALBUM_NAME,
ATTR_MEDIA_ALBUM_ARTIST,
ATTR_MEDIA_TRACK,
ATTR_MEDIA_SERIES_TITLE,
ATTR_MEDIA_SEASON,
ATTR_MEDIA_EPISODE,
ATTR_MEDIA_CHANNEL,
ATTR_MEDIA_PLAYLIST,
ATTR_APP_ID,
ATTR_APP_NAME,
ATTR_SUPPORTED_MEDIA_COMMANDS,
ATTR_INPUT_SOURCE,
ATTR_INPUT_SOURCE_LIST,
]
# Service call validation schemas
MEDIA_PLAYER_SCHEMA = vol.Schema({
ATTR_ENTITY_ID: cv.entity_ids,
})
MEDIA_PLAYER_MUTE_VOLUME_SCHEMA = MEDIA_PLAYER_SCHEMA.extend({
vol.Required(ATTR_MEDIA_VOLUME_MUTED): cv.boolean,
})
MEDIA_PLAYER_SET_VOLUME_SCHEMA = MEDIA_PLAYER_SCHEMA.extend({
vol.Required(ATTR_MEDIA_VOLUME_LEVEL): cv.small_float,
})
MEDIA_PLAYER_MEDIA_SEEK_SCHEMA = MEDIA_PLAYER_SCHEMA.extend({
vol.Required(ATTR_MEDIA_SEEK_POSITION):
vol.All(vol.Coerce(float), vol.Range(min=0)),
})
MEDIA_PLAYER_PLAY_MEDIA_SCHEMA = MEDIA_PLAYER_SCHEMA.extend({
vol.Required(ATTR_MEDIA_CONTENT_TYPE): cv.string,
vol.Required(ATTR_MEDIA_CONTENT_ID): cv.string,
ATTR_MEDIA_ENQUEUE: cv.boolean,
})
MEDIA_PLAYER_SELECT_SOURCE_SCHEMA = MEDIA_PLAYER_SCHEMA.extend({
vol.Required(ATTR_INPUT_SOURCE): cv.string,
})
def is_on(hass, entity_id=None):
"""
Return true if specified media player entity_id is on.
Check all media player if no entity_id specified.
"""
entity_ids = [entity_id] if entity_id else hass.states.entity_ids(DOMAIN)
return any(not hass.states.is_state(entity_id, STATE_OFF)
for entity_id in entity_ids)
def turn_on(hass, entity_id=None):
"""Turn on specified media player or all."""
data = {ATTR_ENTITY_ID: entity_id} if entity_id else {}
hass.services.call(DOMAIN, SERVICE_TURN_ON, data)
def turn_off(hass, entity_id=None):
"""Turn off specified media player or all."""
data = {ATTR_ENTITY_ID: entity_id} if entity_id else {}
hass.services.call(DOMAIN, SERVICE_TURN_OFF, data)
def toggle(hass, entity_id=None):
"""Toggle specified media player or all."""
data = {ATTR_ENTITY_ID: entity_id} if entity_id else {}
hass.services.call(DOMAIN, SERVICE_TOGGLE, data)
def volume_up(hass, entity_id=None):
"""Send the media player the command for volume up."""
data = {ATTR_ENTITY_ID: entity_id} if entity_id else {}
hass.services.call(DOMAIN, SERVICE_VOLUME_UP, data)
def volume_down(hass, entity_id=None):
"""Send the media player the command for volume down."""
data = {ATTR_ENTITY_ID: entity_id} if entity_id else {}
hass.services.call(DOMAIN, SERVICE_VOLUME_DOWN, data)
def mute_volume(hass, mute, entity_id=None):
"""Send the media player the command for muting the volume."""
data = {ATTR_MEDIA_VOLUME_MUTED: mute}
if entity_id:
data[ATTR_ENTITY_ID] = entity_id
hass.services.call(DOMAIN, SERVICE_VOLUME_MUTE, data)
def set_volume_level(hass, volume, entity_id=None):
"""Send the media player the command for setting the volume."""
data = {ATTR_MEDIA_VOLUME_LEVEL: volume}
if entity_id:
data[ATTR_ENTITY_ID] = entity_id
hass.services.call(DOMAIN, SERVICE_VOLUME_SET, data)
def media_play_pause(hass, entity_id=None):
"""Send the media player the command for play/pause."""
data = {ATTR_ENTITY_ID: entity_id} if entity_id else {}
hass.services.call(DOMAIN, SERVICE_MEDIA_PLAY_PAUSE, data)
def media_play(hass, entity_id=None):
"""Send the media player the command for play/pause."""
data = {ATTR_ENTITY_ID: entity_id} if entity_id else {}
hass.services.call(DOMAIN, SERVICE_MEDIA_PLAY, data)
def media_pause(hass, entity_id=None):
"""Send the media player the command for pause."""
data = {ATTR_ENTITY_ID: entity_id} if entity_id else {}
hass.services.call(DOMAIN, SERVICE_MEDIA_PAUSE, data)
def media_stop(hass, entity_id=None):
"""Send the media player the stop command."""
data = {ATTR_ENTITY_ID: entity_id} if entity_id else {}
hass.services.call(DOMAIN, SERVICE_MEDIA_STOP, data)
def media_next_track(hass, entity_id=None):
"""Send the media player the command for next track."""
data = {ATTR_ENTITY_ID: entity_id} if entity_id else {}
hass.services.call(DOMAIN, SERVICE_MEDIA_NEXT_TRACK, data)
def media_previous_track(hass, entity_id=None):
"""Send the media player the command for prev track."""
data = {ATTR_ENTITY_ID: entity_id} if entity_id else {}
hass.services.call(DOMAIN, SERVICE_MEDIA_PREVIOUS_TRACK, data)
def media_seek(hass, position, entity_id=None):
"""Send the media player the command to seek in current playing media."""
data = {ATTR_ENTITY_ID: entity_id} if entity_id else {}
data[ATTR_MEDIA_SEEK_POSITION] = position
hass.services.call(DOMAIN, SERVICE_MEDIA_SEEK, data)
def play_media(hass, media_type, media_id, entity_id=None, enqueue=None):
"""Send the media player the command for playing media."""
data = {ATTR_MEDIA_CONTENT_TYPE: media_type,
ATTR_MEDIA_CONTENT_ID: media_id}
if entity_id:
data[ATTR_ENTITY_ID] = entity_id
if enqueue:
data[ATTR_MEDIA_ENQUEUE] = enqueue
hass.services.call(DOMAIN, SERVICE_PLAY_MEDIA, data)
def select_source(hass, source, entity_id=None):
"""Send the media player the command to select input source."""
data = {ATTR_INPUT_SOURCE: source}
if entity_id:
data[ATTR_ENTITY_ID] = entity_id
hass.services.call(DOMAIN, SERVICE_SELECT_SOURCE, data)
def clear_playlist(hass, entity_id=None):
"""Send the media player the command for clear playlist."""
data = {ATTR_ENTITY_ID: entity_id} if entity_id else {}
hass.services.call(DOMAIN, SERVICE_CLEAR_PLAYLIST, data)
def setup(hass, config):
"""Track states and offer events for media_players."""
component = EntityComponent(
logging.getLogger(__name__), DOMAIN, hass, SCAN_INTERVAL)
hass.wsgi.register_view(MediaPlayerImageView(hass, component.entities))
component.setup(config)
descriptions = load_yaml_config_file(
os.path.join(os.path.dirname(__file__), 'services.yaml'))
def media_player_service_handler(service):
"""Map services to methods on MediaPlayerDevice."""
method = SERVICE_TO_METHOD[service.service]
for player in component.extract_from_service(service):
getattr(player, method)()
if player.should_poll:
player.update_ha_state(True)
for service in SERVICE_TO_METHOD:
hass.services.register(DOMAIN, service, media_player_service_handler,
descriptions.get(service),
schema=MEDIA_PLAYER_SCHEMA)
def volume_set_service(service):
"""Set specified volume on the media player."""
volume = service.data.get(ATTR_MEDIA_VOLUME_LEVEL)
for player in component.extract_from_service(service):
player.set_volume_level(volume)
if player.should_poll:
player.update_ha_state(True)
hass.services.register(DOMAIN, SERVICE_VOLUME_SET, volume_set_service,
descriptions.get(SERVICE_VOLUME_SET),
schema=MEDIA_PLAYER_SET_VOLUME_SCHEMA)
def volume_mute_service(service):
"""Mute (true) or unmute (false) the media player."""
mute = service.data.get(ATTR_MEDIA_VOLUME_MUTED)
for player in component.extract_from_service(service):
player.mute_volume(mute)
if player.should_poll:
player.update_ha_state(True)
hass.services.register(DOMAIN, SERVICE_VOLUME_MUTE, volume_mute_service,
descriptions.get(SERVICE_VOLUME_MUTE),
schema=MEDIA_PLAYER_MUTE_VOLUME_SCHEMA)
def media_seek_service(service):
"""Seek to a position."""
position = service.data.get(ATTR_MEDIA_SEEK_POSITION)
for player in component.extract_from_service(service):
player.media_seek(position)
if player.should_poll:
player.update_ha_state(True)
hass.services.register(DOMAIN, SERVICE_MEDIA_SEEK, media_seek_service,
descriptions.get(SERVICE_MEDIA_SEEK),
schema=MEDIA_PLAYER_MEDIA_SEEK_SCHEMA)
def select_source_service(service):
"""Change input to selected source."""
input_source = service.data.get(ATTR_INPUT_SOURCE)
for player in component.extract_from_service(service):
player.select_source(input_source)
if player.should_poll:
player.update_ha_state(True)
hass.services.register(DOMAIN, SERVICE_SELECT_SOURCE,
select_source_service,
descriptions.get(SERVICE_SELECT_SOURCE),
schema=MEDIA_PLAYER_SELECT_SOURCE_SCHEMA)
def play_media_service(service):
"""Play specified media_id on the media player."""
media_type = service.data.get(ATTR_MEDIA_CONTENT_TYPE)
media_id = service.data.get(ATTR_MEDIA_CONTENT_ID)
enqueue = service.data.get(ATTR_MEDIA_ENQUEUE)
kwargs = {
ATTR_MEDIA_ENQUEUE: enqueue,
}
for player in component.extract_from_service(service):
player.play_media(media_type, media_id, **kwargs)
if player.should_poll:
player.update_ha_state(True)
hass.services.register(DOMAIN, SERVICE_PLAY_MEDIA, play_media_service,
descriptions.get(SERVICE_PLAY_MEDIA),
schema=MEDIA_PLAYER_PLAY_MEDIA_SCHEMA)
return True
class MediaPlayerDevice(Entity):
"""ABC for media player devices."""
# pylint: disable=too-many-public-methods,no-self-use
# Implement these for your media player
@property
def state(self):
"""State of the player."""
return STATE_UNKNOWN
@property
def access_token(self):
"""Access token for this media player."""
return str(id(self))
@property
def volume_level(self):
"""Volume level of the media player (0..1)."""
return None
@property
def is_volume_muted(self):
"""Boolean if volume is currently muted."""
return None
@property
def media_content_id(self):
"""Content ID of current playing media."""
return None
@property
def media_content_type(self):
"""Content type of current playing media."""
return None
@property
def media_duration(self):
"""Duration of current playing media in seconds."""
return None
@property
def media_image_url(self):
"""Image url of current playing media."""
return None
@property
def media_title(self):
"""Title of current playing media."""
return None
@property
def media_artist(self):
"""Artist of current playing media, music track only."""
return None
@property
def media_album_name(self):
"""Album name of current playing media, music track only."""
return None
@property
def media_album_artist(self):
"""Album artist of current playing media, music track only."""
return None
@property
def media_track(self):
"""Track number of current playing media, music track only."""
return None
@property
def media_series_title(self):
"""Title of series of current playing media, TV show only."""
return None
@property
def media_season(self):
"""Season of current playing media, TV show only."""
return None
@property
def media_episode(self):
"""Episode of current playing media, TV show only."""
return None
@property
def media_channel(self):
"""Channel currently playing."""
return None
@property
def media_playlist(self):
"""Title of Playlist currently playing."""
return None
@property
def app_id(self):
"""ID of the current running app."""
return None
@property
def app_name(self):
"""Name of the current running app."""
return None
@property
def source(self):
"""Name of the current input source."""
return None
@property
def source_list(self):
"""List of available input sources."""
return None
@property
def supported_media_commands(self):
"""Flag media commands that are supported."""
return 0
def turn_on(self):
"""Turn the media player on."""
raise NotImplementedError()
def turn_off(self):
"""Turn the media player off."""
raise NotImplementedError()
def mute_volume(self, mute):
"""Mute the volume."""
raise NotImplementedError()
def set_volume_level(self, volume):
"""Set volume level, range 0..1."""
raise NotImplementedError()
def media_play(self):
"""Send play commmand."""
raise NotImplementedError()
def media_pause(self):
"""Send pause command."""
raise NotImplementedError()
def media_stop(self):
"""Send stop command."""
raise NotImplementedError()
def media_previous_track(self):
"""Send previous track command."""
raise NotImplementedError()
def media_next_track(self):
"""Send next track command."""
raise NotImplementedError()
def media_seek(self, position):
"""Send seek command."""
raise NotImplementedError()
def play_media(self, media_type, media_id):
"""Play a piece of media."""
raise NotImplementedError()
def select_source(self, source):
"""Select input source."""
raise NotImplementedError()
def clear_playlist(self):
"""Clear players playlist."""
raise NotImplementedError()
# No need to overwrite these.
@property
def support_pause(self):
"""Boolean if pause is supported."""
return bool(self.supported_media_commands & SUPPORT_PAUSE)
@property
def support_stop(self):
"""Boolean if stop is supported."""
return bool(self.supported_media_commands & SUPPORT_STOP)
@property
def support_seek(self):
"""Boolean if seek is supported."""
return bool(self.supported_media_commands & SUPPORT_SEEK)
@property
def support_volume_set(self):
"""Boolean if setting volume is supported."""
return bool(self.supported_media_commands & SUPPORT_VOLUME_SET)
@property
def support_volume_mute(self):
"""Boolean if muting volume is supported."""
return bool(self.supported_media_commands & SUPPORT_VOLUME_MUTE)
@property
def support_previous_track(self):
"""Boolean if previous track command supported."""
return bool(self.supported_media_commands & SUPPORT_PREVIOUS_TRACK)
@property
def support_next_track(self):
"""Boolean if next track command supported."""
return bool(self.supported_media_commands & SUPPORT_NEXT_TRACK)
@property
def support_play_media(self):
"""Boolean if play media command supported."""
return bool(self.supported_media_commands & SUPPORT_PLAY_MEDIA)
@property
def support_select_source(self):
"""Boolean if select source command supported."""
return bool(self.supported_media_commands & SUPPORT_SELECT_SOURCE)
@property
def support_clear_playlist(self):
"""Boolean if clear playlist command supported."""
return bool(self.supported_media_commands & SUPPORT_CLEAR_PLAYLIST)
def toggle(self):
"""Toggle the power on the media player."""
if self.state in [STATE_OFF, STATE_IDLE]:
self.turn_on()
else:
self.turn_off()
def volume_up(self):
"""Turn volume up for media player."""
if self.volume_level < 1:
self.set_volume_level(min(1, self.volume_level + .1))
def volume_down(self):
"""Turn volume down for media player."""
if self.volume_level > 0:
self.set_volume_level(max(0, self.volume_level - .1))
def media_play_pause(self):
"""Play or pause the media player."""
if self.state == STATE_PLAYING:
self.media_pause()
else:
self.media_play()
@property
def entity_picture(self):
"""Return image of the media playing."""
if self.state == STATE_OFF:
return None
url = self.media_image_url
if url is None:
return None
return ENTITY_IMAGE_URL.format(
self.entity_id, self.access_token,
hashlib.md5(url.encode('utf-8')).hexdigest()[:5])
@property
def state_attributes(self):
"""Return the state attributes."""
if self.state == STATE_OFF:
state_attr = {
ATTR_SUPPORTED_MEDIA_COMMANDS: self.supported_media_commands,
}
else:
state_attr = {
attr: getattr(self, attr) for attr
in ATTR_TO_PROPERTY if getattr(self, attr) is not None
}
return state_attr
class MediaPlayerImageView(HomeAssistantView):
"""Media player view to serve an image."""
requires_auth = False
url = "/api/media_player_proxy/<entity(domain=media_player):entity_id>"
name = "api:media_player:image"
def __init__(self, hass, entities):
"""Initialize a media player view."""
super().__init__(hass)
self.entities = entities
def get(self, request, entity_id):
"""Start a get request."""
player = self.entities.get(entity_id)
if player is None:
return self.Response(status=404)
authenticated = (request.authenticated or
request.args.get('token') == player.access_token)
if not authenticated:
return self.Response(status=401)
image_url = player.media_image_url
if image_url:
response = requests.get(image_url)
else:
response = None
if response is None:
return self.Response(status=500)
return self.Response(response)
|
{
"content_hash": "1a71ccf40c7ce6a92147488add5f94ea",
"timestamp": "",
"source": "github",
"line_count": 710,
"max_line_length": 79,
"avg_line_length": 30.738028169014086,
"alnum_prop": 0.6441532258064516,
"repo_name": "Smart-Torvy/torvy-home-assistant",
"id": "a3a6274a89eac932d40938677b056b102ae2b445",
"size": "21824",
"binary": false,
"copies": "4",
"ref": "refs/heads/torvy",
"path": "homeassistant/components/media_player/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1373149"
},
{
"name": "Python",
"bytes": "3734212"
},
{
"name": "Ruby",
"bytes": "379"
},
{
"name": "Shell",
"bytes": "7255"
}
],
"symlink_target": ""
}
|
from Child import Child
from Node import Node # noqa: I201
TYPE_NODES = [
# simple-type-identifier -> identifier generic-argument-clause?
Node('SimpleTypeIdentifier', kind='Type',
children=[
Child('Name', kind='Token',
token_choices=[
'IdentifierToken',
'CapitalSelfToken',
'AnyToken',
]),
Child('GenericArgumentClause', kind='GenericArgumentClause',
is_optional=True),
]),
# member-type-identifier -> type '.' identifier generic-argument-clause?
Node('MemberTypeIdentifier', kind='Type',
children=[
Child('BaseType', kind='Type'),
Child('Period', kind='Token',
token_choices=[
'PeriodToken',
'PrefixPeriodToken',
]),
Child('Name', kind='Token',
token_choices=[
'IdentifierToken',
'CapitalSelfToken',
'AnyToken',
]),
Child('GenericArgumentClause', kind='GenericArgumentClause',
is_optional=True),
]),
# class-restriction-type -> 'class'
Node('ClassRestrictionType', kind='Type',
children=[
Child('ClassKeyword', kind='ClassToken'),
]),
# array-type -> '[' type ']'
Node('ArrayType', kind='Type',
children=[
Child('LeftSquareBracket', kind='LeftSquareBracketToken'),
Child('ElementType', kind='Type'),
Child('RightSquareBracket', kind='RightSquareBracketToken'),
]),
# dictionary-type -> '[' type ':' type ']'
Node('DictionaryType', kind='Type',
children=[
Child('LeftSquareBracket', kind='LeftSquareBracketToken'),
Child('KeyType', kind='Type'),
Child('Colon', kind='ColonToken'),
Child('ValueType', kind='Type'),
Child('RightSquareBracket', kind='RightSquareBracketToken'),
]),
# metatype-type -> type '.' 'Type'
# | type '.' 'Protocol
Node('MetatypeType', kind='Type',
children=[
Child('BaseType', kind='Type'),
Child('Period', kind='PeriodToken'),
Child('TypeOrProtocol', kind='IdentifierToken',
text_choices=[
'Type',
'Protocol',
]),
]),
# optional-type -> type '?'
Node('OptionalType', kind='Type',
children=[
Child('WrappedType', kind='Type'),
Child('QuestionMark', kind='PostfixQuestionMarkToken'),
]),
# implicitly-unwrapped-optional-type -> type '!'
Node('ImplicitlyUnwrappedOptionalType', kind='Type',
children=[
Child('WrappedType', kind='Type'),
Child('ExclamationMark', kind='ExclamationMarkToken'),
]),
# composition-type-element -> type '&'
Node('CompositionTypeElement', kind='Syntax',
children=[
Child('Type', kind='Type'),
Child('Ampersand', kind='Token',
text_choices=['&'],
is_optional=True),
]),
# composition-typeelement-list -> composition-type-element
# composition-type-element-list?
Node('CompositionTypeElementList', kind='SyntaxCollection',
element='CompositionTypeElement'),
# composition-type -> composition-type-element-list
Node('CompositionType', kind='Type',
children=[
Child('Elements', kind='CompositionTypeElementList'),
]),
# tuple-type-element -> identifier? ':'? type-annotation ','?
Node('TupleTypeElement', kind='Syntax',
traits=['WithTrailingComma'],
children=[
Child('InOut', kind='InOutToken',
is_optional=True),
Child('Name', kind='Token',
is_optional=True,
token_choices=[
'IdentifierToken',
'WildcardToken'
]),
Child('SecondName', kind='Token',
is_optional=True,
token_choices=[
'IdentifierToken',
'WildcardToken'
]),
Child('Colon', kind='ColonToken',
is_optional=True),
Child('Type', kind='Type'),
Child('Ellipsis', kind='Token',
is_optional=True),
Child('Initializer', kind='InitializerClause',
is_optional=True),
Child('TrailingComma', kind='CommaToken',
is_optional=True),
]),
# tuple-type-element-list -> tuple-type-element tuple-type-element-list?
Node('TupleTypeElementList', kind='SyntaxCollection',
element='TupleTypeElement'),
# tuple-type -> '(' tuple-type-element-list ')'
Node('TupleType', kind='Type',
traits=['Parenthesized'],
children=[
Child('LeftParen', kind='LeftParenToken'),
Child('Elements', kind='TupleTypeElementList'),
Child('RightParen', kind='RightParenToken'),
]),
# throwing-specifier -> 'throws' | 'rethrows'
# function-type -> attribute-list '(' function-type-argument-list ')'
# throwing-specifier? '->'? type?
Node('FunctionType', kind='Type',
traits=['Parenthesized'],
children=[
Child('LeftParen', kind='LeftParenToken'),
Child('Arguments', kind='TupleTypeElementList'),
Child('RightParen', kind='RightParenToken'),
Child('ThrowsOrRethrowsKeyword', kind='Token',
is_optional=True,
token_choices=[
'ThrowsToken',
'RethrowsToken',
'ThrowToken',
]),
Child('Arrow', kind='ArrowToken'),
Child('ReturnType', kind='Type'),
]),
# attributed-type -> type-specifier? attribute-list? type
# type-specifiyer -> 'inout' | '__owned' | '__unowned'
Node('AttributedType', kind='Type',
children=[
Child('Specifier', kind='Token',
text_choices=['inout', '__shared', '__owned'],
is_optional=True),
Child('Attributes', kind='AttributeList',
is_optional=True),
Child('BaseType', kind='Type'),
]),
# generic-argument-list -> generic-argument generic-argument-list?
Node('GenericArgumentList', kind='SyntaxCollection',
element='GenericArgument'),
# A generic argument.
# Dictionary<Int, String>
# ^~~~ ^~~~~~
Node('GenericArgument', kind='Syntax',
traits=['WithTrailingComma'],
children=[
Child('ArgumentType', kind='Type'),
Child('TrailingComma', kind='CommaToken',
is_optional=True),
]),
# generic-argument-clause -> '<' generic-argument-list '>'
Node('GenericArgumentClause', kind='Syntax',
children=[
Child('LeftAngleBracket', kind='LeftAngleToken'),
Child('Arguments', kind='GenericArgumentList'),
Child('RightAngleBracket', kind='RightAngleToken'),
]),
]
|
{
"content_hash": "2002e9eec80933204100cbcde43da920",
"timestamp": "",
"source": "github",
"line_count": 203,
"max_line_length": 76,
"avg_line_length": 37.004926108374384,
"alnum_prop": 0.5090521831735889,
"repo_name": "huonw/swift",
"id": "45ffb4149f77b2f69d5a3f69bab4f4440889f8ad",
"size": "7512",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "utils/gyb_syntax_support/TypeNodes.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "34"
},
{
"name": "C",
"bytes": "200369"
},
{
"name": "C++",
"bytes": "29277385"
},
{
"name": "CMake",
"bytes": "462532"
},
{
"name": "D",
"bytes": "1107"
},
{
"name": "DTrace",
"bytes": "2438"
},
{
"name": "Emacs Lisp",
"bytes": "57358"
},
{
"name": "LLVM",
"bytes": "67650"
},
{
"name": "Makefile",
"bytes": "1841"
},
{
"name": "Objective-C",
"bytes": "369708"
},
{
"name": "Objective-C++",
"bytes": "232830"
},
{
"name": "Perl",
"bytes": "2211"
},
{
"name": "Python",
"bytes": "1180644"
},
{
"name": "Ruby",
"bytes": "2091"
},
{
"name": "Shell",
"bytes": "206022"
},
{
"name": "Swift",
"bytes": "24216920"
},
{
"name": "Vim script",
"bytes": "15654"
}
],
"symlink_target": ""
}
|
from plugins.contracts import ICartridgeAgentPlugin
import os
import subprocess
from modules.util.log import LogFactory
class ExtensionExecutor(ICartridgeAgentPlugin):
def run_plugin(self, values):
log = LogFactory().get_log(__name__)
event_name = values["EVENT"]
log.debug("Running extension for %s" % event_name)
extension_values = {}
for key in values.keys():
extension_values["STRATOS_" + key] = values[key]
# log.debug("%s => %s" % ("STRATOS_" + key, extension_values["STRATOS_" + key]))
try:
output, errors = ExtensionExecutor.execute_script(event_name + ".sh")
except OSError:
raise RuntimeError("Could not find an extension file for event %s" % event_name)
if len(errors) > 0:
raise RuntimeError("Extension execution failed for script %s: %s" % (event_name, errors))
log.info("%s Extension executed. [output]: %s" % (event_name, output))
@staticmethod
def execute_script(bash_file):
""" Execute the given bash files in the <PCA_HOME>/extensions/bash folder
:param bash_file: name of the bash file to execute
:return: tuple of (output, errors)
"""
working_dir = os.path.abspath(os.path.dirname(__file__)).split("modules")[0]
command = working_dir[:-2] + "bash/" + bash_file
extension_values = os.environ.copy()
p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=extension_values)
output, errors = p.communicate()
return output, errors
|
{
"content_hash": "af18ca5366a565ba35536a9fe07ea16e",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 107,
"avg_line_length": 39.146341463414636,
"alnum_prop": 0.6286604361370717,
"repo_name": "lasinducharith/stratos",
"id": "fbd315bc7c0906825cc61cf50fd796af29cf13f7",
"size": "2389",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "components/org.apache.stratos.python.cartridge.agent/src/main/python/cartridge.agent/cartridge.agent/extensions/py/ExtensionExecutor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "27195"
},
{
"name": "CSS",
"bytes": "78779"
},
{
"name": "HTML",
"bytes": "42426"
},
{
"name": "Handlebars",
"bytes": "106999"
},
{
"name": "Java",
"bytes": "5887702"
},
{
"name": "JavaScript",
"bytes": "755689"
},
{
"name": "Python",
"bytes": "516805"
},
{
"name": "Ruby",
"bytes": "3546"
},
{
"name": "Shell",
"bytes": "159589"
}
],
"symlink_target": ""
}
|
import os
from flask import Blueprint
from flask import json, url_for, current_app
from flask import g, request, abort, send_file
from flask.ext.login import current_user, login_required
from ..utils import as_resource, as_collection
from ..errors import InvalidFieldError, AuthorizationError
from ..services import parameters, components
from ..core import deserialize_request
parameters_page = Blueprint('parameters', __name__)
@parameters_page.route('/')
def show():
sort = request.args.get('sort', 'id')
order = request.args.get('order', 'asc')
return parameters.jsonify_collection(parameters.all(sort=sort, order=order))
@parameters_page.route('/', methods=['POST'])
def new():
data = deserialize_request(request, fields=['key', 'description', 'component_id'])
return parameters.create(**data).jsonify()
@parameters_page.route('/<int:id>', methods=['PATCH'])
def edit(id):
data = deserialize_request(request, fields=['key', 'description',
'component'], require='some')
param = parameters.get_or_404(id)
if 'component' in data:
data['component_id'] = components.get_or_404(data.pop('component'))
parameters.update(param, **data)
return param.jsonify()
@parameters_page.route('/<int:id>')
def parameter(id):
return parameters.get_or_404(id).jsonify()
@parameters_page.route('/<int:id>', methods=['DELETE'])
def delete(id):
parameters.delete(parameters.get_or_404(id))
return "", 204
|
{
"content_hash": "5ea8445eddd2affb9bb26a5923e71ef0",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 86,
"avg_line_length": 30.18,
"alnum_prop": 0.6805831676607025,
"repo_name": "mcflugen/wmt-rest",
"id": "24b6b05f4d019ad7cdd97501436b311afd8a1c52",
"size": "1509",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "wmt/flask/api/parameters.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "169966"
},
{
"name": "Shell",
"bytes": "24"
}
],
"symlink_target": ""
}
|
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter as ADHF
from sys import stderr, stdout, exit
import logging
import csv
import re
PAT_CHR = re.compile('^(?:.*;)?chromosome=([^;]+)(?:;.*)?$', re.I)
PAT_NAME = re.compile('^(?:.*;)?Name=([^;]+)(?:;.*)?$', re.I)
LOG = logging.getLogger(__name__)
LOG.setLevel(logging.DEBUG)
# supported annotation formats
ANN_FORMAT = ['ENSEMBLE', 'GFF3']
def readEnsembleData(data):
''' parser for ensemble annotation file '''
res = set()
isHeader = True
for line in csv.reader(data, delimiter=','):
if isHeader:
isHeader = False
continue
# chr_id, start, end, gene_id
res.append((line[5].strip(), int(line[6]), int(line[7]), line[3].strip()))
return sorted(res)
def readGFF3Data(data):
''' parser for GFF3 annotation file '''
chrMap = dict()
anns = set()
for line in csv.reader(data, delimiter='\t'):
if line[0].startswith('#'):
continue
if line[2] == 'region':
m = PAT_CHR.match(line[8])
if m:
chrMap[line[0]] = m.group(1)
else:
m = PAT_NAME.match(line[8])
if m and not chrMap.has_key(line[0]):
chrMap[line[0]] = m.group(1)
elif line[2] == 'gene':
name = line[0]
m = PAT_NAME.match(line[8])
if m:
name = m.group(1)
anns.add((line[0], int(line[3]), int(line[4]), name))
anns = list(anns)
anns.sort()
i = 0
while i < len(anns):
anns[i] = (chrMap.get(anns[i][0], anns[i][0]), ) + anns[i][1:]
# check for overlaps and remove smaller gene annotation
if i > 0 and anns[i-1][0] == anns[i][0]:
if anns[i-1][1] == anns[i][1]:
del anns[i-1]
continue
elif anns[i-1][2] >= anns[i][2]:
del anns[i]
continue
i += 1
# inefficient, but hey, data is not that large
anns.sort()
return anns
if __name__ == '__main__':
# setting up the argument parser
parser = ArgumentParser(description='Reads annotations in Ensemble or ' + \
'GFF3 format and produces an annotation file that will be used' + \
' in constructing Hi-C graphs', formatter_class=ADHF)
parser.add_argument('-f', '--format', default=ANN_FORMAT[0], type=str,
choices=ANN_FORMAT,
help='Supported formats of input annotation file ')
parser.add_argument('annotation_file', type=str, help='Annoation file')
args = parser.parse_args()
# setup logging
ch = logging.StreamHandler(stderr)
ch.setLevel(logging.ERROR)
ch.setFormatter(logging.Formatter('%(levelname)s: %(message)s'))
LOG.addHandler(ch)
annotations = None
if args.format == 'ENSEMBLE':
annotations = readEnsembleData(open(args.annotation_file))
elif args.format == 'GFF3':
annotations = readGFF3Data(open(args.annotation_file))
else:
LOG.fatal('Unknown annotation file format. Exiting')
exit(1)
# output annotations
out = stdout
for ann in annotations:
print >> out, '\t'.join(map(str, ann))
|
{
"content_hash": "018c5ed517bc43a4b30f97c1c5a32204",
"timestamp": "",
"source": "github",
"line_count": 115,
"max_line_length": 82,
"avg_line_length": 28.730434782608697,
"alnum_prop": 0.5493341404358354,
"repo_name": "danydoerr/GraphTeams",
"id": "94753604a1d76902d90e8d0c93c414f9ad80a80f",
"size": "3328",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/process_annotations.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "79819"
}
],
"symlink_target": ""
}
|
"""Module for common statistic tests metadata providers."""
from abc import ABC, abstractmethod
from typing import Dict
from .criteria import CriteriaFactory
from .function import FunctionFactory
from .types import MeasurementDef, StatisticDef
from .baseline import Provider as BaselineProvider
# pylint: disable=R0903
class Provider(ABC):
"""Backend for test metadata retrieval.
Metadata consists from measurements and statistics definitions.
"""
def __init__(self, baseline_provider: BaselineProvider):
"""Initialize the metadata provider."""
self._baseline_provider = baseline_provider
@property
@abstractmethod
def measurements(self) -> Dict[str, MeasurementDef]:
"""Return measurement dictionary."""
@property
def baseline_provider(self) -> BaselineProvider:
"""Return the baseline provider."""
return self._baseline_provider
# pylint: disable=R0903
class DictProvider(Provider):
"""Backend for test metadata retrieval."""
UNIT_KEY = "unit"
STATISTICS_KEY = "statistics"
def __init__(self, measurements: dict, baseline_provider: BaselineProvider):
"""
Initialize metadata provider.
The provider expects to receive measurements following the below
schema:
```
"measurements": {
"$id": "MEASUREMENTS_SCHEMA"
"type": "object",
"definitions": "definitions": {
"Criteria": {
"type": "string",
"description": "Comparison criteria class name. They are
implemented in the `statistics.criteria` module."
}
"Function": {
"type": "string",
"description": "Statistic functions class name. They are
implemented in the `statistics.function` module."
}
"StatisticDef": {
{
"type": "object",
"description": "Exhaustive statistic definition."
"properties": {
"name": { "type": "string" },
"function": {
"type": "string"
"$ref": "#/definitions/Function"
},
"criteria": {
"type": "string"
"$ref" "#/definitions/Criteria"
}
},
"required": ["function"]
}
}
},
"properties": {
"key": {
"type": "string",
"description": "Measurement name."
},
"value": {
"type": "object",
"properties": {
"unit": "string",
"statistics": {
"type": "object",
"properties": {
"key": {
"type": "string",
"description": "Statistic name."
},
"value": {
"type": "object",
"$ref": "#/definitions/StatisticDef"
}
}
}
},
"required": ["unit"]
}
}
}
```
"""
super().__init__(baseline_provider)
self._measurements = {}
for ms_name in measurements:
assert DictProvider.UNIT_KEY in measurements[ms_name], (
f"'{DictProvider.UNIT_KEY}' field is required for '"
f"{ms_name}' measurement definition."
)
assert DictProvider.STATISTICS_KEY in measurements[ms_name], (
f"'{DictProvider.STATISTICS_KEY}' field is required for '"
f"{ms_name}' measurement definition."
)
unit = measurements[ms_name][DictProvider.UNIT_KEY]
st_defs = measurements[ms_name][DictProvider.STATISTICS_KEY]
st_list = []
for st_def in st_defs:
# Mandatory.
func_cls_name = st_def.get("function")
assert func_cls_name, (
f"Error in '{ms_name}' "
"measurement definition: "
"'function' field is required for "
"measurement statistics definitions."
)
func_cls = FunctionFactory.get(func_cls_name)
assert func_cls_name, (
f"Error in '{ms_name}' "
"measurement definition: "
f"'{func_cls_name}' is not a valid "
f"statistic function."
)
name = st_def.get("name")
func = func_cls()
if name:
func = func_cls(name)
criteria = None
criteria_cls_name = st_def.get("criteria")
baseline = baseline_provider.get(ms_name, func.name)
if criteria_cls_name and baseline:
criteria_cls = CriteriaFactory.get(criteria_cls_name)
assert criteria_cls, (
f"{criteria_cls_name} is not a " f"valid criteria."
)
criteria = criteria_cls(baseline)
st_list.append(StatisticDef(func, criteria))
self._measurements[ms_name] = MeasurementDef(ms_name, unit, st_list)
@property
def measurements(self):
"""Return measurement dictionary."""
return self._measurements
|
{
"content_hash": "fa51a174030b97fecd2069759ab8a983",
"timestamp": "",
"source": "github",
"line_count": 167,
"max_line_length": 80,
"avg_line_length": 35.98203592814371,
"alnum_prop": 0.4504909302712598,
"repo_name": "firecracker-microvm/firecracker",
"id": "fcd37f86bee30f6a60aaeee2fcb290744d6208cb",
"size": "6121",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tests/framework/stats/metadata.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "14327"
},
{
"name": "Python",
"bytes": "833110"
},
{
"name": "Ruby",
"bytes": "387"
},
{
"name": "Rust",
"bytes": "2944085"
},
{
"name": "Shell",
"bytes": "100226"
}
],
"symlink_target": ""
}
|
import os
import time
import requests
import re
import json
import io
import subprocess
import base64
import audioread
from flask import Flask, request
from google.cloud import storage
from google.cloud.storage.blob import Blob
from google.cloud import speech
from google.cloud import pubsub_v1
app = Flask(__name__)
# Load env variables
project_id = os.environ['TF_VAR_GCP_PROJECT_ID']
gcs_bucket_audio_short = os.environ['TF_VAR_GCS_BUCKET_AUDIO_DROPZONE_SHORT']
gcs_bucket_audio_long = os.environ['TF_VAR_GCS_BUCKET_AUDIO_DROPZONE_LONG']
gcs_bucket_text = os.environ['TF_VAR_GCS_BUCKET_TEXT_DROPZONE']
pubsub_topic = os.environ['TF_VAR_PUBSUB_TOPIC_TEXT_INPUT']
# Ensure that ENV variables do not have extra quotes
project_id = project_id.replace('"','')
gcs_bucket_audio_short = gcs_bucket_audio_short.replace('"','')
gcs_bucket_audio_long = gcs_bucket_audio_long.replace('"','')
gcs_bucket_text = gcs_bucket_text.replace('"','')
pubsub_topic = pubsub_topic.replace('"','')
# Initialize Clients
speech_client = speech.SpeechClient()
pubsub_publisher = pubsub_v1.PublisherClient()
storage_client = storage.Client()
def gcp_storage_upload(source_type, source, bucket_name, blob_name, ):
'''
source_type: Either "string" or "filename"
source: String to upload, or filename containing the data to upload.
bucket_name: Name of the Google Cloud Storage bucket
blob_name: Name of the Google Cloud Storage blob
'''
try:
bucket = storage_client.get_bucket(bucket_name)
blob = bucket.blob(blob_name)
if source_type.lower() == 'string':
blob.upload_from_string(source)
print(f'[ INFO ] Uploaded {blob_name} as string to GCS bucket {bucket_name}')
elif source_type.lower() == 'filename':
blob.upload_from_filename(source)
print(f'[ INFO ] Uploaded file {blob_name} to GCS bucket {bucket_name}')
except Exception as e:
print(f'[ ERROR ] gcp_storage_upload. Failed to upload to GCS. {e}')
def speech_to_text_short(gcs_uri):
'''
Google Cloud Speech-to-Text (short audio)
'''
audio = speech.RecognitionAudio(uri=gcs_uri)
config = speech.RecognitionConfig(
#encoding=speech.RecognitionConfig.AudioEncoding.FLAC,
#sample_rate_hertz=16000,
language_code="en-US",
enable_automatic_punctuation=True,
)
response = speech_client.recognize(config=config, audio=audio)
sentences = []
for result in response.results:
sentences.append(result.alternatives[0].transcript)
return sentences
def pubsub_publish( pubsub_publisher, project_id, pubsub_topic, message ):
'''
Pub/Sub Publish Message
Notes:
- When using JSON over REST, message data must be base64-encoded
- Messages must be smaller than 10MB (after decoding)
- The message payload must not be empty
- Attributes can also be added to the publisher payload
pubsub_publisher = pubsub_v1.PublisherClient()
'''
try:
def pubsub_callback( message_future ):
# When timeout is unspecified, the exception method waits indefinitely.
if message_future.exception(timeout=30):
print('[ ERROR ] Publishing message on {} threw an Exception {}.'.format(topic_name, message_future.exception()))
else:
print('[ INFO ] Result: {}'.format(message_future.result()))
# Initialize PubSub Path
pubsub_topic_path = pubsub_publisher.topic_path( project_id, pubsub_topic )
# If message is JSON, then dump to json string
if type( message ) is dict:
message = json.dumps( message )
# When you publish a message, the client returns a Future.
#message_future = pubsub_publisher.publish(pubsub_topic_path, data=message.encode('utf-8'), attribute1='myattr1', anotherattr='myattr2')
message_future = pubsub_publisher.publish(pubsub_topic_path, data=message.encode('utf-8') )
message_future.add_done_callback( pubsub_callback )
print(f'[ DEBUG ] Pubsub message_future.result(): {message_future.result()}')
except Exception as e:
print('[ ERROR ] {}'.format(e))
def download_remote_file(response, saved_filename):
'''
"response" comes from requests.get or request.post response
'''
if response.status_code == 200:
print(f'[ INFO ] Saving {response.url} as {saved_filename}')
with open(saved_filename, 'wb') as f:
#f.write(response.content)
for chunk in response.iter_content(chunk_size=1024):
if chunk:
f.write(chunk)
return None
def get_audio_duration(audio_file):
try:
with audioread.audio_open(audio_file) as f:
audio_file_duration_in_secs = f.duration
print(f'[ INFO ] Audio file duration: {audio_file_duration_in_secs} seconds')
return audio_file_duration_in_secs
except Exception as e:
print(f'[ EXCEPTION ] At get_audio_duration. {e}')
return None
def generate_filename(url):
try:
# Lowcase
url = url.lower()
# Extract filename from URL
filename = re.search('[a-zA-Z0-9\-\_\ \(\)]+\.(wav|mp3|flac)', url).group()
# Cleanup URL
filename = re.sub('[^a-zA-Z0-9\.]','_',filename)
# Remove leading character(s) if not a letter
filename = re.sub('^[^a-zA-Z]+','', filename)
except Exception as e:
print(f'[ EXCEPTION ] At generate_filename. {e}')
filename = f'noid_{int(time.time())}.mp3'
print(f'[ DEBUG ] generate_filename = {filename}')
return filename
#############################################################
#
# Routes
#
#############################################################
# Test Endpoint.
# Used for testing, debugging, or could
# even be used for service up-time checks.
@app.route("/test", methods = ['GET'])
def test():
return f'Test Successful!', 200
# Audio Endpoint.
# Accepts audio payloads as a POST message,
# with the following structure:
#
# {
# 'userid': 'user123',
# 'audio_uri': 'https://mypath/audio.wav',
# }
@app.route("/audio", methods = ['POST'])
def audio():
if request.method == 'POST':
try:
payload = request.get_json()
print(f'''[ INFO ] User-provided payload: {payload}''')
# Add timestamp to payload if it does not exist
timestamp_int = int(time.time())
if 'timestamp' not in payload:
payload['timestamp'] = timestamp_int
audio_uri = payload['audio_uri']
print(f'[ INFO ] /audio requesting audio file from {audio_uri}')
response = requests.get(audio_uri)
print(f'[ INFO ] Requested audio file. Status code: {response.status_code}')
if response.status_code == 200:
audio_filename = generate_filename(audio_uri)
# Write audio to GCS so that STT can be ran against this file.
if True: # re.search('\.mp3$',audio_filename):
# Save audio file
download_remote_file(response=response, saved_filename=audio_filename)
# Get Audio file length
audio_file_duration_in_secs = get_audio_duration(audio_file=audio_filename)
if audio_file_duration_in_secs < 60:
bucket_name = gcs_bucket_audio_short
else:
bucket_name = gcs_bucket_audio_long
print(f"[ INFO ] bucket_name: {bucket_name}")
# Upload raw (initial) audio file
print(f'[ INFO ] Processing audio file called {audio_filename}')
gcp_storage_upload(source_type='string', source=response.content, bucket_name=bucket_name, blob_name=audio_filename)
# Convert mp3 to flac
audio_filename_flac = re.sub('\.[a-z0-9]+$','.flac',audio_filename.lower())
print(f'[ INFO ] Running {audio_filename} through FFMPEG to generate {audio_filename_flac}')
subprocess.call(['ffmpeg', '-i', audio_filename, '-ac', '1', audio_filename_flac])
print(f'[ INFO ] Uploading processed audio file {audio_filename_flac} (as flac) to gs://{bucket_name}')
gcp_storage_upload(source_type='filename', source=audio_filename_flac, bucket_name=bucket_name, blob_name=audio_filename_flac)
# GCS Path
gcs_uri = f'gs://{bucket_name}/{audio_filename_flac}'
# Write audio payload/metadata to GCS
audio_payload_filename = re.sub('\.[a-z0-9]+$', '.json', audio_filename)
print(f'[ INFO ] Writing {audio_payload_filename} to GCS')
gcp_storage_upload(source_type='string', source=json.dumps(payload), bucket_name=bucket_name, blob_name=audio_payload_filename)
# Send Response - Short Audio file (less than 60 seconds)
if audio_file_duration_in_secs < 60:
msg = f'''{audio_uri} has been processed as a short audio file.'''
print(f'''[ INFO ] {msg}''')
return msg, 201
# Send Response - Long Audio file (over 60 seconds)
else:
msg = f'''{audio_uri} is being process as a long audio file.'''
print(f'''[ INFO ] {msg}''')
return msg, 201
else:
msg = f'''Failed to get {audio_uri}. Status Code: {response.status_code}. {response.content}'''
print(f'''[ ERROR ] {msg}''')
return msg, response.status_code
except Exception as e:
print(f'[ EXCEPTION ] At /audio. {e}')
return '', 401
# Text Chat Endpoint.
# Accepts text chat payloads as a POST message,
# with the following structure:
#
# {
# 'userid': 'user123',
# 'text': 'test text message'
# }
@app.route("/text", methods = ['POST'])
def text():
if request.method == 'POST':
try:
payload = request.get_json()
print(f'''[ INFO ] Request payload: {payload}''')
# Add timestamp to payload if it does not exist
timestamp_int = int(time.time())
if 'timestamp' not in payload:
payload['timestamp'] = timestamp_int
# Write to the text dropzone in GCS
bucket_name = gcs_bucket_text
if 'userid' in payload:
payload_filename = f"{payload['userid'].lower()}_{timestamp_int}.json"
else:
payload_filename = f"{timestamp_int}.json"
# Write payload to Google Cloud Storage
gcp_storage_upload(source_type='string', source=json.dumps(payload), bucket_name=bucket_name, blob_name=payload_filename)
return 'Success', 201
except Exception as e:
print(f'[ EXCEPTION ] At /chat. {e}')
return '', 401
# Callback send a scored message to the
# specified callback uri
@app.route("/callback", methods = ['POST'])
def callback():
if request.method == 'POST':
try:
print('[ INFO ] Starting Callback')
payload = request.get_json()
print(f'[ INFO ] callback payload: {payload}')
payload_decoded = json.loads(base64.b64decode(payload['message']['data']).decode('utf-8'))
callback_url = payload_decoded['callback_url']
r = requests.post(callback_url, json=payload_decoded)
print(f'[ INFO ] Status code from callback_url: {r.status_code}')
return 'Success', 201
except Exception as e:
print(f'[ EXCEPTION ] At /callback. {e}')
return 'Bad Request', 401
if __name__ == "__main__":
app.run(debug=True, host="0.0.0.0", port=8080)
|
{
"content_hash": "7f0a868a6930867d31a684b2f8815256",
"timestamp": "",
"source": "github",
"line_count": 307,
"max_line_length": 147,
"avg_line_length": 40.814332247557005,
"alnum_prop": 0.5660814046288907,
"repo_name": "googleforgames/clean-chat",
"id": "749b5befffad4f8f3dcc747f5c7fd09c68150699",
"size": "13126",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "components/api/backend_python/main.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "6717"
},
{
"name": "HCL",
"bytes": "17964"
},
{
"name": "Makefile",
"bytes": "7246"
},
{
"name": "Python",
"bytes": "103012"
},
{
"name": "Shell",
"bytes": "10587"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import, unicode_literals
from django import forms
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin as AuthUserAdmin
from django.contrib.auth.forms import UserChangeForm, UserCreationForm
from django.contrib.auth.models import User
#from .models import User
class MyUserChangeForm(UserChangeForm):
class Meta(UserChangeForm.Meta):
model = User
class MyUserCreationForm(UserCreationForm):
class Meta(UserCreationForm.Meta):
model = User
def clean_username(self):
username = self.cleaned_data["username"]
try:
User.objects.get(username=username)
except User.DoesNotExist:
return username
raise forms.ValidationError(self.error_messages['duplicate_username'])
@admin.register(User)
class UserAdmin(AuthUserAdmin):
form = MyUserChangeForm
add_form = MyUserCreationForm
|
{
"content_hash": "41c28a2341656c8ef266d56bad5bde9d",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 78,
"avg_line_length": 28.151515151515152,
"alnum_prop": 0.7373519913885899,
"repo_name": "LABETE/TestYourProject",
"id": "0167a5412004c184f32abde4f57e601a92a873de",
"size": "953",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "TestYourProject/users/admin.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "70188"
},
{
"name": "HTML",
"bytes": "99145"
},
{
"name": "JavaScript",
"bytes": "917905"
},
{
"name": "Python",
"bytes": "62518"
},
{
"name": "Shell",
"bytes": "3620"
}
],
"symlink_target": ""
}
|
import pygame, sys, time
from pygame.locals import *
import random
import math
#constants
#player and ball sizes are radius
PLAYER_SIZE=22
BALL_SIZE=10
FIELD_LENGTH=1000
FIELD_WIDTH=500
GOAL_SIZE=280
#Classes
#ball has velocity and position
class ball():
def __init__(self, pos=None):
self.pos=pos
if self.pos==None:
self.pos=[random.randint(20,FIELD_LENGTH-20),random.randint(10,FIELD_WIDTH-10)]
self.velocity=[0,0]
def slow(self):
self.velocity[0]/=1.0015
self.velocity[1]/=1.0015
#used for when ball is contacted by something
def bump(self, direction, length):
self.velocity[0]-=direction[0]*length*1.2
self.velocity[1]-=direction[1]*length*1.2
#stop the ball
def stop(self):
self.velocity[0]=0
self.velocity[1]=0
#houses the decision-making of a player i.e. neural net
class instinct():
def __init__(self, weights):
#inputs for each node + bias, currently 3 lists of 5 + 2 lists of 4
#sort of like this: [[5],[5],[5],[4],[4]]
self.weights=weights
#three layers of network
self.inputs=[]
self.outputs=[]
self.hidden_layer=[]
#takes necessary information and determines wheel speeds via neural net
def findmove(self, player_pos, player_direction, ball_pos):
#takes the center of the goal and the ball positions as input
self.inputs = [0,
FIELD_WIDTH/2,
ball_pos[0],
ball_pos[1]]
#centers coordinates on the player's position and direction
self.orient(player_pos, player_direction)
#map input to hidden, using first 3 sets of weights
self.hidden_layer = self.map_weights(self.inputs, self.weights[0:3],
self.activation_function)
#map hidden to output, using last 2 sets of weights
self.output_layer = self.map_weights(self.hidden_layer, self.weights[3:5],
self.output_function)
return [self.output_layer[0], self.output_layer[1]]
def map_weights(self, input_layer, weights, fnc):
ret=[]
#iterate over weight sets for each node in layer we map to
for node in weights:
total=0
#for each weight set, iterate over all but the last weight
#and multiply by the corresponding value from previous layer
for input_number in range(len(node)-1):
total+=node[input_number]*input_layer[input_number]
#use last weight to determine bias
total+=node[len(node)-1]
#put through output function to determine final value
ret.append(fnc(total))
return ret
#from -1 to 1
def activation_function(self, total):
if total > 50:
return 1
if total < -50:
return -1
return 2/(1+2.718**(-3*total))-1
def output_function(self, total):
if total > 50:
return 1
if total < -50:
return -1
return 2/(1+2.718**(-3*total))-1
#first center on position of player, then
#use rotation matrix to rotate so y-axis along player's direction
def orient(self, p, direction):
for i in range(0, len(self.inputs), 2):
x=self.inputs[i]-p[0]
y=p[1]-self.inputs[i+1]
d=90-direction
self.inputs[i]= cos(d)*x-sin(d)*y
self.inputs[i+1]= sin(d)*x+cos(d)*y
self.inputs[i]=self.inputs[i]/abs(self.inputs[i]+.0001)
self.inputs[i+1]=self.inputs[i+1]/abs(self.inputs[i+1]+.0001)
#change every weight by normal distribution with .02 std deviation
def mutate(self):
weights=[]
for i in range(3):
node=[]
for j in range(5):
node.append(random.normalvariate(0,.03)+self.weights[i][j])
weights.append(node)
for i in range(2):
node=[]
for j in range(4):
node.append(random.normalvariate(0,.03)+self.weights[i][j])
weights.append(node)
return weights
#for player, has position and instinct and direction
class player():
def __init__(self, pos, weights, direction=0):
self.pos=pos
self.direction=direction
#initialize brain
self.instinct=instinct(weights)
#use instinct to determine move
def makemove(self, ball):
#get wheelspeeds from brain
wheels=self.instinct.findmove(self.pos, self.direction, ball.pos)
#approximate the resulting motion in x,y
forward=(wheels[0]+wheels[1])/2
turn=(wheels[1]-wheels[0])*5
self.pos[0]+=cos(self.direction)*forward
self.pos[1]-=sin(self.direction)*forward
self.direction+=turn
if self.direction>360:
self.direction-=360
#reset to random position and mutate instinct
def mutate(self):
pos = [random.randint(0,FIELD_LENGTH),random.randint(0,FIELD_WIDTH)]
return player(pos,self.instinct.mutate())
def distance(self,other):
return ((self.pos[0]-other.pos[0])**2+(self.pos[1]-other.pos[1])**2)**(.5)
#reset to random position
def random_pos(self):
self.pos = [random.randint(0,FIELD_LENGTH),random.randint(0,FIELD_WIDTH)]
def stop(self):
pass
#Initialization and Helper Functions
#scale vector
def scale(x, y, length):
a=(length**2/(x**2+y**2+.001))**(.5)
return (a*x,a*y)
def sin(x):
return math.sin(math.radians(x))
def cos(x):
return math.cos(math.radians(x))
def random_game():
players=[]
for i in range(1):
player_pos = [random.randint(0,FIELD_LENGTH),random.randint(0,FIELD_WIDTH)]
players.append(player(player_pos, random_instinct(), random.randint(0,360)))
play(players, ball())
def demonstration():
players=[]
for i in range(1):
player_pos = [random.randint(0,FIELD_LENGTH),random.randint(0,FIELD_WIDTH)]
players.append(player(player_pos, [[-1.098435474656182, 0.6589064295051409, -0.7390573375911145, -0.6989480706606246, -1.0322066785193227], [-0.5615227913298615, -0.11591445532422427, -0.15676586775256468, 0.6172299160162027, 0.8983686257605342], [0.5591761812956685, -0.6749809133882746, 0.026847750609672514, -0.8616799960725277, 0.1940568944057437], [-1.1773188370604548, 0.6282954708562554, -0.751484119068484, -0.6732556314057755], [-0.5103879833774485, 0.02035234500389063, -0.2663091274743478, 0.5839138375464418]]
, random.randint(0,360)))
play(players, ball())
def random_instinct():
weights=[]
for i in range(3):
node=[]
for j in range(5):
node.append(random.uniform(-1,1))
weights.append(node)
for i in range(2):
node=[]
for j in range(4):
node.append(random.uniform(-1,1))
weights.append(node)
return weights
#Game Functions
def update(players, ball):
#move ball
ball.pos[0]+=ball.velocity[0]
ball.pos[1]+=ball.velocity[1]
ball.slow()
#check for goal
bottom=(FIELD_WIDTH+GOAL_SIZE)/2
top=(FIELD_WIDTH-GOAL_SIZE)/2
if ball.pos[0]<=0 and ball.pos[1]<bottom and ball.pos[1]>top:
return "goal"
isout=0
for p in players:
#players move
p.makemove(ball)
#collisions with other players
for p2 in players:
if(p!=p2 and p.distance(p2)<PLAYER_SIZE):
direction=(p.pos[0]-p2.pos[0],p.pos[1]-p2.pos[1])
length=(PLAYER_SIZE-((direction[0]**2)+(direction[1]**2))**(.5))/2
direction=scale(direction[0], direction[1], length)
p.pos=[p.pos[0]+direction[0],p.pos[1]+direction[1]]
p2.pos=[p2.pos[0]-direction[0],p2.pos[1]-direction[1]]
#collsion with ball
if(p.distance(ball)<PLAYER_SIZE+BALL_SIZE):
direction=(p.pos[0]-ball.pos[0],p.pos[1]-ball.pos[1])
length=((PLAYER_SIZE+BALL_SIZE)-((direction[0]**2)+(direction[1]**2))**(.5))
direction=scale(direction[0], direction[1], length)
ball.pos=[ball.pos[0]-direction[0]*2,ball.pos[1]-direction[1]*2]
ball.bump(direction, length)
#isout+=boundary(p)
isout+=boundary(ball)
#determine if ball or player is out
if(isout>0):
return "out"
return ""
#keep things in field, return 1 if anything was out.
def boundary(agent):
if agent.pos[0]>FIELD_LENGTH:
agent.pos=[FIELD_LENGTH,agent.pos[1]]
agent.stop()
return(1)
if agent.pos[1]>FIELD_WIDTH:
agent.pos=[agent.pos[0],FIELD_WIDTH]
agent.stop()
return(1)
if agent.pos[0]<0:
agent.pos=[0,agent.pos[1]]
agent.stop()
return(1)
if agent.pos[1]<0:
agent.pos=[agent.pos[0],0]
agent.stop()
return(1)
return 0
def draw(players, ball, screen):
#add reference bar for color to stat comparison at the bottom
screen.fill((255,255,255))
#draw players
for p in players:
head_x=p.pos[0]+cos(p.direction)*PLAYER_SIZE
head_y=p.pos[1]-sin(p.direction)*PLAYER_SIZE
pygame.draw.circle(screen,(0,0,0),(int(p.pos[0]),int(p.pos[1])),PLAYER_SIZE)
pygame.draw.circle(screen,(255,0,0),(int(head_x),int(head_y)),5)
#ball
pygame.draw.circle(screen,(100,0,100),(int(ball.pos[0]),int(ball.pos[1])),BALL_SIZE)
#goals
top=(FIELD_WIDTH-GOAL_SIZE)/2
pygame.draw.rect(screen, (0,0,255), Rect(0,top,5,GOAL_SIZE),3)
pygame.display.update()
#Simulation functions
#take generation and fill up to 100 with randos
def init_generation(old=[]):
generation=old
for i in range(1000-len(generation)):
player_pos = [random.randint(0,FIELD_LENGTH),random.randint(0,FIELD_WIDTH)]
generation.append(player(player_pos, random_instinct(), random.randint(0,360)))
return generation
#run simulation on a generation to create a new set
def select(generation):
#put ones who scored in candidates
candidates=[]
new=[]
for player in generation:
frames=0
b = ball()
status=""
while(frames < 3000 and status!="out"):
status=update([player], b)
if status=="goal":
new.append(player)
candidates.append(player)
break
frames+=1
print(len(candidates))
best_fitness=0
best_weights=[]
#test candidates for performance and put in babies accordingly
for player in candidates:
fitness=0
totalframes=0
for i in range(10):
player.random_pos()
b = ball()
frames=0
status=""
while(frames < 3000):
status=update([player], b)
if status=="goal":
fitness+=1
totalframes+=frames
break
frames+=1
if status!="goal":
totalframes+=3000
speedbonus=50000/(totalframes-10000)
if speedbonus*fitness>best_fitness:
best_fitness=speedbonus*fitness
best_weights=player.instinct.weights
for i in range(1+int(10*speedbonus/(len(candidates)**1.5)*fitness**2)):
new.append(player.mutate())
print(best_weights)
print(len(new))
return new
#run a simulation for g generations
def evolve(g):
old = []
for i in range(10):
old.append(player([0,0],[[-1.098435474656182, 0.6589064295051409, -0.7390573375911145, -0.6989480706606246, -1.0322066785193227], [-0.5615227913298615, -0.11591445532422427, -0.15676586775256468, 0.6172299160162027, 0.8983686257605342], [0.5591761812956685, -0.6749809133882746, 0.026847750609672514, -0.8616799960725277, 0.1940568944057437], [-1.1773188370604548, 0.6282954708562554, -0.751484119068484, -0.6732556314057755], [-0.5103879833774485, 0.02035234500389063, -0.2663091274743478, 0.5839138375464418]]))
for i in range(g):
print("generation: " + str(i))
generation=init_generation(old)
old=select(generation)
return old
#takes a presumable final evolved generation and grades the performance of the members
#into tiers, displaying these tiers and letting us watch them run from best to worst.
#also prints the brains as it goes so we know what they are.
def display(generation):
best=[]
generation_fitness={0:[],1:[],2:[],3:[],4:[],5:[],6:[],7:[],8:[],9:[],10:[]}
#test candidates for performance and put in babies accordingly
for player in generation:
fitness=0
totalframes=0
for i in range(10):
player.random_pos()
b = ball()
frames=0
status=""
while(frames < 5000 and status!="out"):
status=update([player], b)
if status=="goal":
fitness+=1
totalframes+=frames
break
frames+=1
if status!="goal":
frames+=5000
fitness*=1-(totalframes/50000)
generation_fitness[int(fitness)].append(player)
for i in range(10,0,-1):
print(str(i)+":"+str(len(generation_fitness[i])))
for player in generation_fitness[i]:
print(player.instinct.weights)
for i in range(10):
player.random_pos()
play([player],ball())
#Play with graphics
def play(players, ball):
pygame.init()
screen=pygame.display.set_mode((FIELD_LENGTH,FIELD_WIDTH))
pygame.display.set_caption("SoccerLand!")
screen.fill((0,0,0))
playing=True
frames = 0
time.sleep(2)
while playing:
update(players, ball)
draw(players, ball, screen)
for event in pygame.event.get():
if event.type==QUIT or (event.type==KEYUP and event.key== K_ESCAPE):
playing=False
time.sleep(0)
frames+=1
pygame.quit()
demonstration()
demonstration()
demonstration()
#display(evolve(1))
|
{
"content_hash": "66745f4ed1d46879d9f00294331e96c8",
"timestamp": "",
"source": "github",
"line_count": 400,
"max_line_length": 529,
"avg_line_length": 36.48,
"alnum_prop": 0.5775767543859649,
"repo_name": "akshitharamachandran/soccergame",
"id": "128898266fb3ac83388fc1894f8ae31c77a29907",
"size": "14611",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/finalproject50.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "52113"
},
{
"name": "HTML",
"bytes": "23102"
},
{
"name": "JavaScript",
"bytes": "54955"
},
{
"name": "Python",
"bytes": "29112"
}
],
"symlink_target": ""
}
|
import logging
import requests
from osf.metrics import PreprintSummaryReport
from website import settings
from ._base import DailyReporter
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
LOG_THRESHOLD = 11
def get_elastic_query(date, provider):
return {
'query': {
'bool': {
'must': [
{
'match': {
'type': 'preprint'
}
},
{
'match': {
'sources': provider.share_source or provider.name,
}
}
],
'filter': [
{
'range': {
'date': {
'lte': '{}||/d'.format(date.strftime('%Y-%m-%d'))
}
}
}
]
}
}
}
class PreprintCountReporter(DailyReporter):
def report(self, date):
from osf.models import PreprintProvider
reports = []
for preprint_provider in PreprintProvider.objects.all():
elastic_query = get_elastic_query(date, preprint_provider)
resp = requests.post(f'{settings.SHARE_URL}api/v2/search/creativeworks/_search', json=elastic_query).json()
reports.append(
PreprintSummaryReport(
report_date=date,
provider_key=preprint_provider._id,
preprint_count=resp['hits']['total'],
)
)
logger.info('{} Preprints counted for the provider {}'.format(resp['hits']['total'], preprint_provider.name))
return reports
def keen_events_from_report(self, report):
event = {
'provider': {
'name': report.provider_key,
'total': report.preprint_count,
},
}
return {'preprint_summary': [event]}
|
{
"content_hash": "01c6e9606ba15952cbeef3497bfc1ee4",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 121,
"avg_line_length": 30.391304347826086,
"alnum_prop": 0.4444444444444444,
"repo_name": "CenterForOpenScience/osf.io",
"id": "319f72ae319e7d49719be88aaed560b57e850909",
"size": "2097",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "osf/metrics/reporters/preprint_count.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "93635"
},
{
"name": "Dockerfile",
"bytes": "5876"
},
{
"name": "HTML",
"bytes": "373895"
},
{
"name": "JavaScript",
"bytes": "1596130"
},
{
"name": "Jupyter Notebook",
"bytes": "41326"
},
{
"name": "Mako",
"bytes": "679193"
},
{
"name": "Python",
"bytes": "11640855"
},
{
"name": "Shell",
"bytes": "2841"
},
{
"name": "VCL",
"bytes": "13885"
}
],
"symlink_target": ""
}
|
import re
import json
from urlparse import urlparse
from scrapy.selector import Selector
try:
from scrapy.spider import Spider
except:
from scrapy.spider import BaseSpider as Spider
from scrapy.utils.response import get_base_url
from scrapy.contrib.spiders import CrawlSpider, Rule
from scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor as sle
from .log import *
'''
1. 默认取sel.css()[0],如否则需要'__unique':False or __list:True
2. 默认字典均为css解析,如否则需要'__use':'dump'表明是用于dump数据
'''
class CommonSpider(CrawlSpider):
auto_join_text = True
''' # css rule example:
all_css_rules = {
'.zm-profile-header': {
'.zm-profile-header-main': {
'__use':'dump',
'name':'.title-section .name::text',
'sign':'.title-section .bio::text',
'location':'.location.item::text',
'business':'.business.item::text',
'employment':'.employment.item::text',
'position':'.position.item::text',
'education':'.education.item::text',
'education_extra':'.education-extra.item::text',
}, '.zm-profile-header-operation': {
'__use':'dump',
'agree':'.zm-profile-header-user-agree strong::text',
'thanks':'.zm-profile-header-user-thanks strong::text',
}, '.profile-navbar': {
'__use':'dump',
'asks':'a[href*=asks] .num::text',
'answers':'a[href*=answers] .num::text',
'posts':'a[href*=posts] .num::text',
'collections':'a[href*=collections] .num::text',
'logs':'a[href*=logs] .num::text',
},
}, '.zm-profile-side-following': {
'__use':'dump',
'followees':'a.item[href*=followees] strong::text',
'followers':'a.item[href*=followers] strong::text',
}
}
'''
# Extract content without any extra spaces.
# NOTE: If content only has spaces, then it would be ignored.
def extract_item(self, sels):
contents = []
for i in sels:
content = re.sub(r'\s+', ' ', i.extract())
if content != ' ':
contents.append(content)
return contents
def extract_items(self, sel, rules, item):
for nk, nv in rules.items():
if nk in ('__use', '__list'):
continue
if nk not in item:
item[nk] = []
if sel.css(nv):
# item[nk] += [i.extract() for i in sel.css(nv)]
# Without any extra spaces:
item[nk] += self.extract_item(sel.css(nv))
else:
item[nk] = []
# 1. item是一个单独的item,所有数据都聚合到其中 *merge
# 2. 存在item列表,所有item归入items
def traversal(self, sel, rules, item_class, item, items):
# print 'traversal:', sel, rules.keys()
if item is None:
item = item_class()
if '__use' in rules:
if '__list' in rules:
unique_item = item_class()
self.extract_items(sel, rules, unique_item)
items.append(unique_item)
else:
self.extract_items(sel, rules, item)
else:
for nk, nv in rules.items():
for i in sel.css(nk):
self.traversal(i, nv, item_class, item, items)
DEBUG=True
def debug(sth):
if DEBUG == True:
print(sth)
keywords = set(['__use', '__list'])
def traversal_dict(self, sel, rules, item_class, item, items):
#import pdb; pdb.set_trace()
item = {}
for k, v in rules.items():
if type(v) != dict:
if k in self.keywords:
continue
#import pdb;pdb.set_trace()
if v.endswith('::text') and self.auto_join_text:
item[k] = ' '.join(self.extract_item(sel.css(v)))
else:
_items = self.extract_item(sel.css(v))
item[k] = _items[0] if len(_items) >= 1 else ''
else:
item[k] = []
for i in sel.css(k):
#print(k, v)
self.traversal_dict(i, v, item_class, item, item[k])
items.append(item)
def dfs(self, sel, rules, item_class):
if sel is None:
return []
items = []
if item_class != dict:
self.traversal(sel, rules, item_class, None, items)
else:
self.traversal_dict(sel, rules, item_class, None, items)
return items
def parse_with_rules(self, response, rules, item_class):
return self.dfs(Selector(response), rules, item_class)
''' # use parse_with_rules example:
def parse_people_with_rules(self, response):
item = self.parse_with_rules(response, self.all_css_rules, ZhihuPeopleItem)
item['id'] = urlparse(response.url).path.split('/')[-1]
info('Parsed '+response.url) # +' to '+str(item))
return item
'''
|
{
"content_hash": "54f7c8802bfa258a6b2a123170bb713b",
"timestamp": "",
"source": "github",
"line_count": 149,
"max_line_length": 83,
"avg_line_length": 34.369127516778526,
"alnum_prop": 0.5127904706112087,
"repo_name": "geekan/google-scholar-crawler",
"id": "c9c2c1fe143c5d87c459210fb1d644bbe96d8928",
"size": "5249",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "misc/spider.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "65610"
}
],
"symlink_target": ""
}
|
from msrest.serialization import Model
class InnerError(Model):
"""Inner error details.
:param exceptiontype: The exception type.
:type exceptiontype: str
:param errordetail: The internal error message or exception dump.
:type errordetail: str
"""
_attribute_map = {
'exceptiontype': {'key': 'exceptiontype', 'type': 'str'},
'errordetail': {'key': 'errordetail', 'type': 'str'},
}
def __init__(self, exceptiontype=None, errordetail=None):
self.exceptiontype = exceptiontype
self.errordetail = errordetail
|
{
"content_hash": "cbd480af604a43f16559fee64e605c44",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 69,
"avg_line_length": 28.9,
"alnum_prop": 0.6505190311418685,
"repo_name": "v-iam/azure-sdk-for-python",
"id": "48d4db17e2a252fcde48809d2e8870b1a5226367",
"size": "1052",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "azure-mgmt-compute/azure/mgmt/compute/compute/v2016_04_30_preview/models/inner_error.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "19856874"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from celery import task
from celery import Celery
from celery import app
import pymongo
import json
from bson import json_util,ObjectId
from pymongo import MongoClient
# from pymongo import find_many
from bson.dbref import DBRef
from pymongo.mongo_replica_set_client import MongoReplicaSetClient
from pymongo.read_preferences import ReadPreference
from operator import itemgetter
from random import randint
import bisect
import collections
# from pymongo.objectid import ObjectId
#client = MongoClient()
client = MongoReplicaSetClient(
'localhost:27017,localhost:27018,localhost:27019',
replicaSet='socsDBset')
client.readPreference = 'primaryPreferred'
class JSONEncoder(json.JSONEncoder):
def default(self, o):
if isinstance(o, ObjectId):
return str(o)
return json.JSONEncoder.default(self, o)
# {u'course_id': u'cse220', u'blocks': {u'start_period': u'1', u'days': [u'M', u'W'], u'end_period': u'2'}, u'instructor': u'wong', u'course_name': u'systems', u'block_key_value': u'13'}
class DayItemSort(object):
def __init__(self, course_id,blocks,instructor,course_name,block_key_value):
self.course_id = course_id
self.blocks = blocks
self.instructor = instructor
self.course_name= course_name
self.block_key_value = block_key_value
def __repr__(self):
return '{}: {} '' {} {} {} {}'.format(self.__class__.__name__,
self.course_id,
self.blocks,
self.instructor,
self.course_name,
self.block_key_value)
def __cmp__(self, other):
if hasattr(other, 'getKey'):
return self.getKey().__cmp__(other.getKey())
def getKey(self):
return self.block_key_value
def __getitem__(self, key):
return self.block_key_value[key]
# @task(bind = True, queue = 'read_tasks')
# def create_desired_schedule(self, data):
# data = [ {
# 'course_id' : "cse220",
# 'blocks' : {
# 'start_period' : "1",
# 'days' : [
# "M",
# "W"
# ],
# 'end_period' : "2"
# },
# 'instructor' : "wong",
# 'course_name' : "systems",
# 'preferred': False
# },
# {
# 'course_id' : "cse114",
# 'blocks' : {
# 'start_period' : "5",
# 'days' : [
# "M",
# "W"
# ],
# 'end_period' : "6"
# },
# 'instructor' : "skiena",
# 'course_name' : "intro",
# 'preferred': True
# },
# {
# 'course_id' : "cse110",
# 'blocks' : {
# 'start_period' : "5",
# 'days' : [
# "M",
# "W"
# ],
# 'end_period' : "6"
# },
# 'instructor' : "bach",
# 'course_name' : "android",
# 'preferred': False
# }
# ]
# # data.append()
# db = client.students
# student_collection = db.students
# assigned_schedule = db.assigned_schedule
# email = 'peter@gmail.com'
# who_i_am =student_collection.find_one({'email':email})
# friends_loc = str(who_i_am['friendslist'])
# friends_loc = friends_loc.split(",",1)
# friends_loc = friends_loc[1]
# friends_loc = friends_loc.split("'",2)
# friends_loc = friends_loc[1]
# list_of_stuff= db.friends_list.find_one({'_id':ObjectId(friends_loc)})
# list_of_stuff= list_of_stuff['list']
# day_map= {'M':"1",'Tu':"2",'W':"3",'Th':"4",'F':"5",'S':"6",'Su':"7"}
# # num_friends_in_classes_hash = {}
# friends_overlap = []
# course_hash_map={}
# current_blocks =[]
# sort_day_value = ""
# for courses_in_data in data:
# # course_hash_map[courses_in_data['course_name']] = 0
# courses_in_data['count'] = 0
# for fr in list_of_stuff:
# assigned_schedule_friends =assigned_schedule.find_one({'email':fr['email']})
# friends_class_array = assigned_schedule_friends['classes']
# for classes in data:
# for fclasses in friends_class_array:
# if fclasses['course_name']==classes['course_name'] and fclasses['instructor']== classes['instructor'] and fclasses['course_id']==classes['course_id']:
# classes['count']=classes['count']+1
# for classes in data:
# current_blocks = classes['blocks']
# for day in current_blocks['days']:
# sort_day_value = sort_day_value + day_map[day]
# classes['block_key_value'] = sort_day_value
# classes['dif'] = int(current_blocks['end_period'])- int(current_blocks['start_period'])
# sort_day_value = ""
# for da in data:
# da['weight'] = 0.01
# if da['preferred']== True:
# da['weight'] = da['weight']+.6
# da['weight'] = (da['count'] *.1) + da['weight']
# new_list = sorted(data, key=itemgetter('block_key_value', 'dif'))
# start = []
# finish = []
# for datas in new_list:
# this_block = datas['blocks']
# start.append(this_block['start_period'])
# finish.append(this_block['end_period'])
# p = []
# for j in xrange(len(new_list)):
# i = bisect.bisect_right(finish, start[j]) - 1 # rightmost interval f_i <= s_j
# p.append(i)
# OPT = collections.defaultdict(int)
# OPT[-1] = 0
# OPT[0] = 0
# for j in xrange(1, len(new_list)):
# dats = new_list[j]
# print(dats)
# OPT[j] = max(dats['weight'] + OPT[p[j]], OPT[j - 1])
# # given OPT and p, find actual solution intervals in O(n)
# O = []
# def compute_solution(j):
# if j >= 0: # will halt on OPT[-1]
# dats = new_list[j]
# if dats['weight'] + OPT[p[j]] > OPT[j - 1]:
# O.append(new_list[j])
# compute_solution(p[j])
# else:
# compute_solution(j - 1)
# compute_solution(len(new_list) - 1)
# return O
@task(bind=True, queue='read_tasks')
def find_school_two(self, data):
db = client.students
school_collection = db.school_list
student_collection = db.students
student =student_collection.find_one({'email':data['email']})
student_school = student['school']
student_school_address = student['address']
print("PPOOOOOOOOOOOOOOOOOOOOOODLE")
print(student_school)
print(student_school_address)
target = school_collection.find_one( { '$and': [ { 'name': student_school }, { 'address': student_school_address } ] })
del target['_id']
return json_util.dumps(target)
@task(bind=True, queue='read_tasks')
def get_overlapping_friends_by_specific_course_two(self, data):
db = client.students
assigned_schedule = db.assigned_schedule
email = data['email']
target = data['target']
# name = data['course_name']
# start_period = data['start_period']
# end_period = data['end_period']
# course_id = data['course_id']
# instructor = data['instructor']
# print(email)
assigned_schedule_return =assigned_schedule.find_one({'email':email})
assigned_schedule_friends =assigned_schedule.find_one({'email':target})
# "classes" : [
# {
# "course_name" : "wongs time",
# "start_period" : "1",
# "days" : [
# "tu"
# ],
# "end_period" : "2",
# "course_id" : "cse220",
# "instructor" : "wong"
# },
return_list={}
course_list=[]
class_array = assigned_schedule_return['classes']
friends_class_array = assigned_schedule_friends['classes']
return_list['friend']=target
for classes in class_array:
for fclasses in friends_class_array:
if fclasses['course_name']==classes['course_name'] and fclasses['instructor']== classes['instructor'] and fclasses['course_id']==classes['course_id']:
course_list.append(fclasses['course_id'])
return_list['courses']=course_list
return return_list
@task(bind=True, queue='write_tasks')
def add_students_to_database_two(self, data):
db = client.students
students_temp = db.students
friends_list = db.friends_list
first_name_var = data['first_name']
last_name_var = data['last_name']
email_stuff = data['email']
school_name = data['school']
school_address = data['address']
friend_info_dict = {
'first_name': first_name_var,
'last_name': last_name_var,
'list': []}
id_1 = friends_list.insert_one(friend_info_dict)
student_dict = {
'first_name': first_name_var,
'last_name': last_name_var,
'email': email_stuff,
'school': school_name,
'address':school_address,
'friendslist': DBRef(
'friends_list',
friend_info_dict["_id"])}
print (student_dict)
id_2 = students_temp.insert_one(student_dict)
return str(student_dict)
@task(bind=True, queue='write_tasks')
def remove_school(self, data):
db = client.students
school_collection = db.school_list
name = data['school_name']
address = data['school_address']
target = school_collection.find_one_and_delete( { '$and': [ { 'name': name }, { 'address': address } ] })
#school_collection.remove(target.id)
return str(target)
@task(bind = True, queue = 'read_tasks')
def create_desired_schedule(self, email_address, data):
# data = [ {
# 'course_id' : "cse220",
# 'blocks' : {
# 'start_period' : "1",
# 'days' : [
# "M",
# "W"
# ],
# 'end_period' : "2"
# },
# 'instructor' : "wong",
# 'course_name' : "systems",
# 'preferred': False
# },
# {
# 'course_id' : "cse114",
# 'blocks' : {
# 'start_period' : "5",
# 'days' : [
# "M",
# "W"
# ],
# 'end_period' : "6"
# },
# 'instructor' : "skiena",
# 'course_name' : "intro",
# 'preferred': True
# },
# {
# 'course_id' : "cse110",
# 'blocks' : {
# 'start_period' : "5",
# 'days' : [
# "M",
# "W"
# ],
# 'end_period' : "6"
# },
# 'instructor' : "bach",
# 'course_name' : "android",
# 'preferred': False
# }
# ]
# data.append()
db = client.students
student_collection = db.students
assigned_schedule = db.assigned_schedule
email = 'peter@gmail.com'
who_i_am =student_collection.find_one({'email':email})
if who_i_am != None:
friends_loc = str(who_i_am['friendslist'])
friends_loc = friends_loc.split(",",1)
friends_loc = friends_loc[1]
friends_loc = friends_loc.split("'",2)
friends_loc = friends_loc[1]
list_of_stuff= db.friends_list.find_one({'_id':ObjectId(friends_loc)})
list_of_stuff= list_of_stuff['list']
else:
pass
day_map= {'M':"1",'Tu':"2",'W':"3",'Th':"4",'F':"5",'S':"6",'Su':"7"}
# num_friends_in_classes_hash = {}
friends_overlap = []
course_hash_map={}
current_blocks =[]
sort_day_value = ""
for courses_in_data in data:
# course_hash_map[courses_in_data['course_name']] = 0
courses_in_data['count'] = 0
for fr in list_of_stuff:
assigned_schedule_friends = assigned_schedule.find_one({'email':fr['email']})
friends_class_array = assigned_schedule_friends['classes']
if friend_class_array == None:
friend_class_array = []
for classes in data:
for fclasses in friends_class_array:
if fclasses['course_name']==classes['course_name'] and fclasses['instructor']== classes['instructor'] and fclasses['course_id']==classes['course_id']:
classes['count']=classes['count']+1
for classes in data:
current_blocks = classes['blocks']
for day in current_blocks['days']:
sort_day_value = sort_day_value + day_map[day]
classes['block_key_value'] = sort_day_value
classes['dif'] = int(current_blocks['end_period'])- int(current_blocks['start_period'])
sort_day_value = ""
for da in data:
da['weight'] = 0.01
if da['preferred']== True:
da['weight'] = da['weight']+.6
da['weight'] = (da['count'] *.1) + da['weight']
new_list = sorted(data, key=itemgetter('block_key_value', 'dif'))
start = []
finish = []
for datas in new_list:
this_block = datas['blocks']
start.append(this_block['start_period'])
finish.append(this_block['end_period'])
p = []
for j in range(len(new_list)):
i = bisect.bisect_right(finish, start[j]) - 1 # rightmost interval f_i <= s_j
p.append(i)
OPT = collections.defaultdict(int)
OPT[-1] = 0
OPT[0] = 0
for j in range(1, len(new_list)):
dats = new_list[j]
print(dats)
OPT[j] = max(dats['weight'] + OPT[p[j]], OPT[j - 1])
# given OPT and p, find actual solution intervals in O(n)
O = []
def compute_solution(j):
if j >= 0: # will halt on OPT[-1]
dats = new_list[j]
if dats['weight'] + OPT[p[j]] > OPT[j - 1]:
O.append(new_list[j])
compute_solution(p[j])
else:
compute_solution(j - 1)
compute_solution(len(new_list) - 1)
return O
@task(bind=True, queue='write_tasks')
def remove_a_class_from_assigned_two(self, data,days_array):
db = client.students
assigned_schedule = db.assigned_schedule
email = data['email']
name = data['course_name']
start_period = data['start_period']
end_period = data['end_period']
course_id = data['course_id']
instructor = data['instructor']
print(data)
print(days_array)
blocks = {}
blocks['start_period'] = start_period
blocks['end_period'] = end_period
blocks['days'] = days_array
print(" ")
print(blocks)
val =assigned_schedule.find_one_and_update( {'email': email, 'classes.course_name': name, 'classes.course_id':course_id,'classes.instructor':instructor},
{'$pull': { 'classes': { 'course_name': name, 'course_id':course_id,'instructor':instructor}}})
print(val)
return json_util.dumps(val)
@task(bind = True,queue='read_tasks')
def get_course_offerings_two(self,email,year):
db = client.students
student_collection = db.students
school_collection = db.school_list
course_offerings =db.semester_courses_ref
course_list = db.course_list
# print(email)
who_i_am =student_collection.find_one({'email':email})
school_i_go_to = who_i_am['school']
school_address = who_i_am['address']
# print(school_i_go_to)
my_school =school_collection.find_one({'$and': [{'address': school_address}, {'name': school_i_go_to}]})
# year is missing
output = []
for yr in my_school['year']:
if yr['year_name']== year:
all_semesters = yr['semesters']
for als in all_semesters:
semester_ref = als['semester_courses_ref']
semester_name = als['semester_name']
course_ref_list = course_offerings.find_one({'_id':ObjectId(semester_ref)})
courses_held = course_ref_list['courses_held']
for cor in courses_held:
# prepare to trim the stuff we dont need
setup_course = {}
id_of_this_course = str(cor['course_id'])
print(id_of_this_course)
found_course = course_list.find_one({'_id':ObjectId(id_of_this_course)})
print(found_course)
setup_course['course_id'] = found_course['course_id']
setup_course['instructor'] = found_course['instructor']
setup_course['course_name']= found_course['course_name']
setup_course['blocks'] = found_course['blocks']
setup_course['semester_name']=semester_name
output.append(setup_course)
return output
@task(bind = True,queue='read_tasks')
def get_course_offerings_by_semester_two(self,email,year,semester):
db = client.students
student_collection = db.students
school_collection = db.school_list
course_offerings =db.semester_courses_ref
course_list = db.course_list
# print(email)
who_i_am =student_collection.find_one({'email':email})
school_i_go_to = who_i_am['school']
school_address = who_i_am['address']
# print(school_i_go_to)
my_school =school_collection.find_one({'$and': [{'address': school_address}, {'name': school_i_go_to}]})
# year is missing
output = []
for yr in my_school['year']:
if yr['year_name']== year:
all_semesters = yr['semesters']
for als in all_semesters:
if als['semester_name'] == semester:
semester_ref = als['semester_courses_ref']
semester_name = als['semester_name']
course_ref_list = course_offerings.find_one({'_id':ObjectId(semester_ref)})
courses_held = course_ref_list['courses_held']
for cor in courses_held:
# prepare to trim the stuff we dont need
setup_course = {}
id_of_this_course = str(cor['course_id'])
print(id_of_this_course)
found_course = course_list.find_one({'_id':ObjectId(id_of_this_course)})
print(found_course)
setup_course['course_id'] = found_course['course_id']
setup_course['instructor'] = found_course['instructor']
setup_course['course_name']= found_course['course_name']
setup_course['semester_name']=semester_name
output.append(setup_course)
return output
@task(bind = True, queue='write_tasks')
def get_normal_schedule_two(self,data):
db = client.students
assigned = db.assigned_schedule
email = data['email']
# print(email)
val =assigned.find_one({'email':email})
# print(val)
if val is None:
return "null"
else:
return val['classes']
@task(bind=True, queue='read_tasks')
def add_classes_to_database_two(self, data):
db = client.students
students_collection = db.students
school_collection = db.school_list
course_list = db.course_list
course_offerings =db.semester_courses_ref
assigned = db.assigned_schedule
# {'username': 't1@t1.com',
# 'year': '2015', 'course_id': 'CSE 201',
# 'days': ['M', 'Tu', 'W'], 'course_name': 'Comp Sci',
# 'semester': 'Fall', 'new_year_flag': False,
# 'instructor': 'Poodle', 'start_period': '0', 'end_period': '3'}
username= data['username']
course_id=data['course_id']
course_name=data['course_name']
instructor=data['instructor']
# data['school'] = ''
blocks={}
blocks['days']=data['days']
blocks['start_period']= data['start_period']
blocks['end_period']= data['end_period']
# days=data['days'] #= ['','']
#start_period=data['start_period']
#end_period=data['end_period']
year=data['year']
semester=data['semester']
myself = students_collection.find_one({'email': username})
address_of_school = myself['address']
school_name = myself['school']
is_new_year=data['new_year_flag']
#the_school_info = school_collection.find_one({'name':school_name, 'address': address_of_school})
# info doesnt exist in the schools
# create info
# if newyear and not already in the database
if(is_new_year):
# create year object
course_list= []
courses = []
course_obj_ids=[]
semester = []
#for x in range len(name_of_semesters)
#course_listing_and_semster += {None, name_of_semesters[x]}
year_obj = {'year_name':year,'num_periods_in_a_day': 0,'blocks':[],'semesters':[]}
#school_collection.update_one({'$addToSet': {'year': year_obj}})
course_list.append({'year':year, 'sem_name':semester, 'courses_held':courses})
course_obj_ids.append(semester_courses_ref.insert_one(semester_temp).inserted_id)
#semester.append({'semester_name': semester,'semester_courses_ref': DBRef('semester_courses_ref':ObjectId(course_obj_ids[0])}))
semester.append({'semester_name': semester,'semester_courses_ref': str(course_obj_ids[0])})
year_obj['semester']=semester
# return str(course_obj_ids)
#for index, g in enumerate(name_of_semesters):
# for i in range(len(name_of_semesters)):
# semester+={'semester_name': i,'course_listing': DBRef('course_offerings',course_obj_ids[i])}
school_collection.find_one_and_update({'name':school_name, 'address': address_of_school}, {'$addToSet': {'year': year_obj}})
else:
pass
temp_school = school_collection.find_one({'name':school_name, 'address': address_of_school})
year_sem = None
current_semester = None
# print(temp_school['year'])
for y in temp_school['year']:
if year == y['year_name']:
year_sem = y
break
# print("*******************")
# print(year_sem)
for s in year_sem['semesters']:
print("*******************")
print(semester +"=="+ s['semester_name'])
if semester.lower() == s['semester_name'].lower():
current_semester = s
ref_number = current_semester['semester_courses_ref']
# print(ref_number)
course_data = {'course_id':course_id,'course_name':course_name,'instructor':instructor,'blocks':blocks}
# deference(s['semester_courses_ref'])course_id=data['course_id']
course_name=data['course_name']
instructor=data['instructor']
# update({}, course_data, {upsert:true})
# id_of_course = course_list.insert_one(course_data).inserted_id
course_list.update(course_data, course_data, True)
id_of_inserted_course = course_list.find_one(course_data)
# print(id_of_inserted_course)
id_of_inserted_course = id_of_inserted_course['_id']
# print(id_of_inserted_course)
id_to_insert= {'course_id':ObjectId(id_of_inserted_course)}
course_offerings.update({'_id':ObjectId(ref_number)},{ '$addToSet': {'courses_held': id_to_insert} },True)
# add it the schedule now
# assigned
# insert_into_schedule
course_id=data['course_id']
course_name=data['course_name']
instructor=data['instructor']
# data['school'] = ''
days=data['days'] #= ['','']
#start_period=data['start_period']
#end_period=data['end_period']
##PUT BLOCK INFORMATION HERE
set_add = {'course_id':course_id, 'course_name': course_name, 'instructor': instructor,'blocks':blocks}
assigned.update({'email':username},{'$addToSet':{'classes':set_add}},True)
# .inserted_id
return
@task(bind=True, queue='write_tasks')
def send_a_friend_request_two(self,data):
db = client.students
email_of_requester = data['email_of_sender']
first_name_of_requester = data['first_name_emailer']
last_name_of_requester = data['last_name']
email_of_emailee = data['email_of_sendee']
first_name_of_emailee = data['first_name_emailee']
last_name_of_emailee = data['last_name_emailee']
friend_request_info = {"email_of_requester": email_of_requester,
"first_name_of_requester": first_name_of_requester,
"last_name_of_requester": last_name_of_requester,
"email_of_emailee": email_of_emailee,
"first_of_emailee": first_name_of_emailee,
'last_name_emailee':last_name_of_emailee}
db.friend_requests.insert_one(friend_request_info)
@task(bind=True, queue='read_tasks')
def get_friends_list_two(self,data):
db = client.students
# dat_base_var = "students"
# first_name_var = data['first_name']
# last_name_var = data['last_name']
email_stuff = data['email']
# original_id_2=db.students.insert(info2)
value = db.students.find_one({'email':email_stuff})
friends_loc = str(value['friendslist'])
friends_loc = friends_loc.split(",",1)
friends_loc = friends_loc[1]
friends_loc = friends_loc.split("'",2)
friends_loc = friends_loc[1]
# friends_loc = friends_loc.split("'",1)
# friends_loc = friends_loc[:-1]
# friends_loc = friends_loc[1:]
# print(friends_loc)
list_of_stuff= db.friends_list.find_one({'_id':ObjectId(friends_loc)})
# print(list_of_stuff)
list_of_stuff= list_of_stuff['list']
print(list_of_stuff)
# html = "<html><body> string: "+""+"</body></html>"
# print(list_of_stuff)
return list_of_stuff
@task(bind=True, queue='write_tasks')
def delete_a_student_from_database_two(self,email):
db = client.students
student_collection = db.students
db.students.find_one_and_delete({'email':email})
@task(bind=True, queue='write_tasks')
def delete_friend_from_friends_list_two(self,data):
db = client.students
# self
email_stuff = data['email']
first_name = data['first_name']
last_name =data['last_name']
f_email= data['friend_email']
value = db.students.find_one({'email':email_stuff})
# value_two = db.students.find_one({'email':f_stuff})
friends_loc = str(value['friendslist'])
friends_loc = friends_loc.split(",",1)
friends_loc = friends_loc[1]
friends_loc = friends_loc.split("'",2)
friends_loc = friends_loc[1]
first_name_two=value['first_name']
last_name_two=value['last_name']
friend_ob = db.students.find_one({'email':f_email})
friends_loc_two = str(friend_ob['friendslist'])
# strip the info we dont need
friends_loc_two = friends_loc_two.split(",",1)
friends_loc_two = friends_loc_two[1]
friends_loc_two = friends_loc_two.split("'",2)
friends_loc_two = friends_loc_two[1]
# first_name_two=friend_ob['first_name']
# last_name_two=friend_ob['last_name']
print(first_name_two)
print(last_name_two)
value_two = {'first_name':first_name,'last_name':last_name,'email':f_email}
print(value)
value = {'first_name':first_name_two,'last_name':last_name_two,'email':email_stuff}
print(value_two)
# {'$addToSet': {'year': year_obj}}
list_of_stuff= db.friends_list.find_one_and_update({'_id':ObjectId(friends_loc_two)},{ '$pull': {'list': value} })
list_of_stuff= db.friends_list.find_one_and_update({'_id':ObjectId(friends_loc)},{ '$pull': {'list': value_two} })
# return list_of_stuff
#dont use this yet
@task(bind=True, queue='read_tasks')
def get_schools_address_two(self):
db = client.students
school_collection = db.school_list
name_of_school = data['school_name']
address_of_school = data['address']
schools = school_collection.find_one({'name':name_of_school, 'address': address_of_school})
# schools = school_collection.find({'name':name_of_school, 'address': address_of_school})
array_of_schools=[]
for cus in schools:
# my_values['name'] = cus['name']
# cus['_id']= JSONEncoder().encode(cus['_id'])
array_of_schools.append(cus)
# return_bundle = {'result': array_of_schools}
return json_util.dumps(array_of_schools)
#unfinished
@task(bind=True, queue='write_tasks')
def delete_school_from_database_two(self, data):
# not done
db = client.students
school_collection = db.school_list
return str(student_dict)
@task(bind=True, queue='read_tasks')
def search_all_students_two(self):
db = client.students
student_collection = db.students
students = student_collection.find({})
array_of_students=[]
for stud in students:
array_of_students.append(stud)
return json_util.dumps(array_of_students)
@task(bind=True, queue='read_tasks')
def search_school_from_database_two(self, data=None):
db = client.students
school_collection = db.school_list
schools = None
if data:
name_of_school = data['school_name']
schools = school_collection.find({'name':name_of_school})
else:
schools = school_collection.find()
array_of_schools=[]
for cus in schools:
# my_values['name'] = cus['name']
# cus['_id']= JSONEncoder().encode(cus['_id'])
array_of_schools.append(cus)
# return_bundle = {'result': array_of_schools}
return json_util.dumps(array_of_schools)
# return array_of_schools
@task(bind=True, queue='write_tasks')
def edit_school_to_database_two(self, data,address_of_edit):
db = client.students
school_collection = db.school_list
semester_courses_ref = db.semester_courses_ref
#data= {'name':name_of_school, 'num_days':days_in_a_year, 'num_sem':number_of_sem, 'address':address, 'num_days_in_schedule':num_days_in_a_schedule, 'year_obj':year}
name_of_school = data['name']
days_in_a_year = data['num_days']
address = data['address']
semesters_in_year= data['num_sem']
num_days_in_a_schedule=data['num_days_in_schedule']
name_of_semesters=data['semester_names']
year = data['year_obj']
year_container = []
semester = []
courses = []
course_list =[]
course_obj_ids=[]
course_name_id_tuple=[]
for current_sem_name in name_of_semesters:
course_list.append({'year':year['year_name'], 'sem_name':current_sem_name, 'courses_held':courses})
for semester_temp in course_list:
course_obj_ids.append(semester_courses_ref.insert_one(semester_temp).inserted_id)
for index ,g in enumerate(name_of_semesters):
semester.append({'semester_name': g,'semester_courses_ref': str(course_obj_ids[index])})
#some_val = db.dereference(semester[index])
year['semesters'] = semester
year_container.append(year)
data_input = {'name':name_of_school, 'days_in_a_year': days_in_a_year,
'address':address, 'semesters_in_year':semesters_in_year,
'num_days_in_a_schedule':num_days_in_a_schedule,'name_of_semesters':name_of_semesters,
'year':year_container
}
school_collection.find_one_and_replace({'address':address_of_edit},data_input)
return
@task(bind=True, queue='write_tasks')
def add_school_to_database_two(self, data):
db = client.students
school_collection = db.school_list
semester_courses_ref = db.semester_courses_ref
#data= {'name':name_of_school, 'num_days':days_in_a_year, 'num_sem':number_of_sem, 'address':address, 'num_days_in_schedule':num_days_in_a_schedule, 'year_obj':year}
name_of_school = data['name']
days_in_a_year = data['num_days']
address = data['address']
semesters_in_year= data['num_sem']
num_days_in_a_schedule=data['num_days_in_schedule']
name_of_semesters=data['semester_names']
year = data['year_obj']
year_container = []
semester = []
courses = []
course_list =[]
course_obj_ids=[]
course_name_id_tuple=[]
for current_sem_name in name_of_semesters:
course_list.append({'year':year['year_name'], 'sem_name':current_sem_name, 'courses_held':courses})
for semester_temp in course_list:
course_obj_ids.append(semester_courses_ref.insert_one(semester_temp).inserted_id)
for index ,g in enumerate(name_of_semesters):
semester.append({'semester_name': g,'semester_courses_ref': str(course_obj_ids[index])})
#some_val = db.dereference(semester[index])
year['semesters'] = semester
year_container.append(year)
data_input = {'name':name_of_school, 'days_in_a_year': days_in_a_year,
'address':address, 'semesters_in_year':semesters_in_year,
'num_days_in_a_schedule':num_days_in_a_schedule,'name_of_semesters':name_of_semesters,
'year':year_container
}
id_1 = school_collection.insert_one(data_input)
return
@task(bind = True, queue='write_tasks')
def copy_and_modify_school_two(self, data):
pass
@task(bind = True, queue='write_tasks')
def accept_friend_request_two(self, data):
db = client.students
friend_requests = db.friend_requests
student_collection = db.students
friends_collection = db.friends_list
emailee = data['email_of_sendee']
emailer = data['email_of_requester']
value=friend_requests.find_one_and_delete({'email_of_emailee':emailee, 'email_of_requester':emailer})
sendee_first_name=value['first_of_emailee']
sendee_last_name=value['last_name_emailee']
sender_first_name=value['first_name_of_requester']
sender_last_name=value['last_name_of_requester']
sender_info = student_collection.find_one({'email':emailer})
friends_loc = str(sender_info['friendslist'])
# strip the info we dont need
friends_loc = friends_loc.split(",",1)
friends_loc = friends_loc[1]
friends_loc = friends_loc.split("'",2)
friends_loc = friends_loc[1]
sendee_info = student_collection.find_one({'email':emailee})
friends_loc_two = str(sendee_info['friendslist'])
# strip the info we dont need
friends_loc_two = friends_loc_two.split(",",1)
friends_loc_two = friends_loc_two[1]
friends_loc_two = friends_loc_two.split("'",2)
friends_loc_two = friends_loc_two[1]
send_to_sender_friends= {'first_name': sendee_first_name, 'last_name':sendee_last_name, 'email':emailee}
send_to_sendee_friends= {'first_name': sender_first_name, 'last_name':sender_last_name, 'email':emailer}
# sender
friends_collection.find_one_and_update({'_id':ObjectId(friends_loc)},{ '$addToSet': { 'list': send_to_sender_friends} })
# sendee
friends_collection.find_one_and_update({'_id':ObjectId(friends_loc_two)},{ '$addToSet': { 'list': send_to_sendee_friends} })
# db.friends_list.find_one({'_id':ObjectId(friends_loc)})
@task(bind = True, queue='write_tasks')
def deny_friend_request_two(self, data):
db = client.students
friend_requests = db.friend_requests
emailee = data['email_of_sendee']
emailer = data['email_of_requester']
print(emailee)
print(emailer)
friend_requests.find_one_and_delete({'email_of_emailee':emailee, 'email_of_requester':emailer})
@task(bind = True, queue='read_tasks')
def get_friend_request_two(self, data):
db = client.students
email = data['email_of_sendee']
first_name= data['first_name_emailee']
last_name = data['last_name_emailee']
# "email_of_emailee" : "cheap@gmail.com",
# "last_name_emailee" : "will",
# "first_of_emailee" : "cheap",
friend_requests = db.friend_requests
result = friend_requests.find({'email_of_emailee':email})
# print(result['email_of_requester'])
allRequests= []
for req in result:
# print(result)
allRequests.append(req)
# print("returned")
return json_util.dumps(allRequests)
@task(bind=True, queue='read_tasks')
def possible_friends(self, username, first_name):
# """ render the create school view. """
# Display the create school view if the admin is logged in
db = client.students
students_temp = db.students
friend_requests = db.friend_requests
friends_list = db.friends_list
# Display all possible people we can add by searching a name
#username = name
# Search this person
#first_name = first_name
#last_name = last_name
# find out who i am
print(username)
myself = students_temp.find_one({'email': username})
print(myself)
# cool i go to this cool
school_i_go_to = myself['school']
# lets get all the people with this name and go to the same school as i do
people = []
# students_list =
for person in students_temp.find({'$and': [{'first_name': first_name}, {'school': school_i_go_to}]}):
#people_dict = {'first_name': first_name_var,'last_name': last_name_var, 'email': email_stuff, 'school': school_name, 'friendslist': DBRef('friends_list', friend_info_dict[ "_id"])}
# person['friendslist'] = json.dumps(str(person['friendslist']))
# person['_id'] = str(person['_id'])
del person['friendslist']
del person['_id']
del person['school']
print(person)
people.append(person)
# go to this place
# print people
all_my_friends_complete = friends_list.find_one(myself['friendslist'])
#all_my_friends_complete = DBRef('friends_list', friend_info_dict["_id"])
# get the list itself
all_my_friends = None
if all_my_friends_complete:
all_my_friends = all_my_friends_complete['list']
# get all the requests assocaited with this person. Both sender or reciever
all_my_requests = []
for req in friend_requests.find({'$or': [{'email_of_requester': username}, {'email_of_emailee': username}]}):
all_my_requests.append(req)
if (not all_my_friends):
# this checks if the word non is on the list remove it
# this means you have no friends
# now we know that we have no friends
# this means that we cannot remove it from the list
# lets check if we can remove from ppl already requested
#my_friendslist_id = all_my_friends_complete['_id']
# print my_friendslist_id
# db.friends_list.update( { "_id": my_friendslist_id }, { "$pop": { "list": -1 } } ))
# print all_my_requests
if (not all_my_requests or len(all_my_requests) == 0):
# well shit there are no requests either
# nothing we can do show everything
x = ""
else:
x = ""
# we must people - all_my_requests
for pe in people:
# print str(pe) + "\n"
for rq in all_my_requests:
# print str(rq)+"\n"
if (pe['email'] == rq['email_of_requester'] or pe['email'] == rq['email_of_emailee']):
people.remove(pe)
# requests were made and need to be removed
else:
# you have friends do something about it
# remove all your friends
# print all_my_friends
# we must people - all_my_requests
for pe in people:
# print str(pe) + "\n"
for af in all_my_friends:
# print str(af)+"\n"
if (pe['email'] == af['email']):
people.remove(pe)
if (not all_my_requests or len(all_my_requests) == 0):
# we found no current requests
x = ""
else:
# we must people - all_my_requests
for pe in people:
# print str(pe) + "\n"
for rq in all_my_requests:
# print str(rq)+"\n"
if (pe['email'] == rq['email_of_requester'] or pe['email'] == rq['email_of_emailee']):
people.remove(pe)
# print people
# print "success"
# html = "<html><body> string: "+"success"+"</body></html>"
return_dict = {'success': 'success'}
print(people)
return people
@task(bind=True, queue='read_tasks')
def get_a_person_two(self, data):
email = data['email']
db = client.students
students_temp = db.students
value = students_temp.find_one({'email':email})
return json_util.dumps(value)
@task
def mul(x, y):
# html = "<html><body> string: "+""+"</body></html>"
# return x + y
return x * y
@task
def xsum(numbers):
return sum(numbers)
|
{
"content_hash": "f92f23634d5b8d399f7ac28531835922",
"timestamp": "",
"source": "github",
"line_count": 1135,
"max_line_length": 193,
"avg_line_length": 35.459030837004406,
"alnum_prop": 0.5814242409183521,
"repo_name": "karlfloersch/socs",
"id": "2f361521811e263721644eaf65571d3d49994282",
"size": "40246",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "schedules/tasks.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3167"
},
{
"name": "HTML",
"bytes": "10285"
},
{
"name": "JavaScript",
"bytes": "47612"
},
{
"name": "Python",
"bytes": "96079"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.